diff --git a/install-packages/deb/usr/share/sdfs/lib/sdfs.jar b/install-packages/deb/usr/share/sdfs/lib/sdfs.jar index 77762433f..850adba5a 100644 Binary files a/install-packages/deb/usr/share/sdfs/lib/sdfs.jar and b/install-packages/deb/usr/share/sdfs/lib/sdfs.jar differ diff --git a/src/fuse/SDFS/SDFSFileSystem.java b/src/fuse/SDFS/SDFSFileSystem.java index b54fc8f07..07720f4d2 100644 --- a/src/fuse/SDFS/SDFSFileSystem.java +++ b/src/fuse/SDFS/SDFSFileSystem.java @@ -842,7 +842,7 @@ else if (_f.isFile()) } private int getFtype(String path) throws FuseException { - SDFSLogger.getLog().info("Path=" + path); + //SDFSLogger.getLog().info("Path=" + path); String pt = mountedVolume + path; File _f = new File(pt); diff --git a/src/org/opendedup/buse/driver/BUSE.java b/src/org/opendedup/buse/driver/BUSE.java index 57cf442ca..05b69b660 100644 --- a/src/org/opendedup/buse/driver/BUSE.java +++ b/src/org/opendedup/buse/driver/BUSE.java @@ -1,18 +1,36 @@ -package org.opendedup.buse.driver; - -import java.nio.ByteBuffer; - -public interface BUSE { - public int read(ByteBuffer data, int len, long offset); - - public int write(ByteBuffer buff, int len, long offset); - - public void disconnect(); - - public int flush(); - - public int trim(long from, int len); - - public void close(); - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.buse.driver; + +import java.nio.ByteBuffer; + +public interface BUSE { + public int read(ByteBuffer data, int len, long offset); + + public int write(ByteBuffer buff, int len, long offset); + + public void disconnect(); + + public int flush(); + + public int trim(long from, int len); + + public void close(); + +} diff --git a/src/org/opendedup/buse/driver/BUSEMkDev.java b/src/org/opendedup/buse/driver/BUSEMkDev.java index 1898d9ae2..751103a11 100644 --- a/src/org/opendedup/buse/driver/BUSEMkDev.java +++ b/src/org/opendedup/buse/driver/BUSEMkDev.java @@ -1,47 +1,65 @@ -package org.opendedup.buse.driver; - -import java.util.logging.Logger; - -public class BUSEMkDev { - - private static Logger log = Logger.getLogger(BUSEMkDev.class.getName()); - - static { - System.loadLibrary("jbuse"); - } - - public static int startdev(final String dev, long sz, int blksz, BUSE buse, - boolean readonly) throws Exception { - - log.info("Mounted filesystem"); - // ShutdownHook t = new ShutdownHook(dev,buse); - // Runtime.getRuntime().addShutdownHook(t); - int z = mkdev(dev, sz, blksz, buse, readonly); - - log.info("Filesystem is unmounted"); - return z; - } - - public static void closeDev(final String dev) throws Exception { - closedev(dev); - } - - public static void init() { - ThreadGroup threadGroup = new ThreadGroup(Thread.currentThread() - .getThreadGroup(), "BUSE Threads"); - threadGroup.setDaemon(true); - init(threadGroup); - } - - private static native int mkdev(String dev, long sz, int blksz, BUSE buse, - boolean readonly) throws Exception; - - private static native void closedev(String dev) throws Exception; - - private static native void init(ThreadGroup threadGroup); - - public static native void release(); - - public static native void setSize(String dev, long sz); - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.buse.driver; + +import java.util.logging.Logger; + +public class BUSEMkDev { + + private static Logger log = Logger.getLogger(BUSEMkDev.class.getName()); + + static { + System.loadLibrary("jbuse"); + } + + public static int startdev(final String dev, long sz, int blksz, BUSE buse, + boolean readonly) throws Exception { + + log.info("Mounted filesystem"); + // ShutdownHook t = new ShutdownHook(dev,buse); + // Runtime.getRuntime().addShutdownHook(t); + int z = mkdev(dev, sz, blksz, buse, readonly); + + log.info("Filesystem is unmounted"); + return z; + } + + public static void closeDev(final String dev) throws Exception { + closedev(dev); + } + + public static void init() { + ThreadGroup threadGroup = new ThreadGroup(Thread.currentThread() + .getThreadGroup(), "BUSE Threads"); + threadGroup.setDaemon(true); + init(threadGroup); + } + + private static native int mkdev(String dev, long sz, int blksz, BUSE buse, + boolean readonly) throws Exception; + + private static native void closedev(String dev) throws Exception; + + private static native void init(ThreadGroup threadGroup); + + public static native void release(); + + public static native void setSize(String dev, long sz); + +} diff --git a/src/org/opendedup/buse/driver/Errno.java b/src/org/opendedup/buse/driver/Errno.java index 489ebb33d..b5527b714 100644 --- a/src/org/opendedup/buse/driver/Errno.java +++ b/src/org/opendedup/buse/driver/Errno.java @@ -1,190 +1,190 @@ -/** - * FUSE-J: Java bindings for FUSE (Filesystem in Userspace by Miklos Szeredi (mszeredi@inf.bme.hu)) - * - * Copyright (C) 2003 Peter Levart (peter@select-tech.si) - * - * This program can be distributed under the terms of the GNU LGPL. - * See the file COPYING.LIB +/** + * FUSE-J: Java bindings for FUSE (Filesystem in Userspace by Miklos Szeredi (mszeredi@inf.bme.hu)) + * + * Copyright (C) 2003, 2013 Peter Levart (peter@select-tech.si) + * + * This program can be distributed under the terms of the GNU LGPL. + * See the file COPYING.LIB */ - -package org.opendedup.buse.driver; - -/** - * This is an enumeration of error return values - */ -public interface Errno { - // - // generated from - - public static final int EPERM = 1; /* Operation not permitted */ - public static final int ENOENT = 2; /* No such file or directory */ - public static final int ESRCH = 3; /* No such process */ - public static final int EINTR = 4; /* Interrupted system call */ - public static final int EIO = 5; /* I/O error */ - public static final int ENXIO = 6; /* No such device or address */ - public static final int E2BIG = 7; /* Arg list too long */ - public static final int ENOEXEC = 8; /* Exec format error */ - public static final int EBADF = 9; /* Bad file number */ - public static final int ECHILD = 10; /* No child processes */ - public static final int EAGAIN = 11; /* Try again */ - public static final int ENOMEM = 12; /* Out of memory */ - public static final int EACCES = 13; /* Permission denied */ - public static final int EFAULT = 14; /* Bad address */ - public static final int ENOTBLK = 15; /* Block device required */ - public static final int EBUSY = 16; /* Device or resource busy */ - public static final int EEXIST = 17; /* File exists */ - public static final int EXDEV = 18; /* Cross-device link */ - public static final int ENODEV = 19; /* No such device */ - public static final int ENOTDIR = 20; /* Not a directory */ - public static final int EISDIR = 21; /* Is a directory */ - public static final int EINVAL = 22; /* Invalid argument */ - public static final int ENFILE = 23; /* File table overflow */ - public static final int EMFILE = 24; /* Too many open files */ - public static final int ENOTTY = 25; /* Not a typewriter */ - public static final int ETXTBSY = 26; /* Text file busy */ - public static final int EFBIG = 27; /* File too large */ - public static final int ENOSPC = 28; /* No space left on device */ - public static final int ESPIPE = 29; /* Illegal seek */ - public static final int EROFS = 30; /* Read-only file system */ - public static final int EMLINK = 31; /* Too many links */ - public static final int EPIPE = 32; /* Broken pipe */ - public static final int EDOM = 33; /* Math argument out of domain of func */ - public static final int ERANGE = 34; /* Math result not representable */ - public static final int EDEADLK = 35; /* Resource deadlock would occur */ - public static final int ENAMETOOLONG = 36; /* File name too long */ - public static final int ENOLCK = 37; /* No record locks available */ - public static final int ENOSYS = 38; /* Function not implemented */ - public static final int ENOTEMPTY = 39; /* Directory not empty */ - public static final int ELOOP = 40; /* Too many symbolic links encountered */ - public static final int EWOULDBLOCK = EAGAIN; /* Operation would block */ - public static final int ENOMSG = 42; /* No message of desired type */ - public static final int EIDRM = 43; /* Identifier removed */ - public static final int ECHRNG = 44; /* Channel number out of range */ - public static final int EL2NSYNC = 45; /* Level 2 not synchronized */ - public static final int EL3HLT = 46; /* Level 3 halted */ - public static final int EL3RST = 47; /* Level 3 reset */ - public static final int ELNRNG = 48; /* Link number out of range */ - public static final int EUNATCH = 49; /* Protocol driver not attached */ - public static final int ENOCSI = 50; /* No CSI structure available */ - public static final int EL2HLT = 51; /* Level 2 halted */ - public static final int EBADE = 52; /* Invalid exchange */ - public static final int EBADR = 53; /* Invalid request descriptor */ - public static final int EXFULL = 54; /* Exchange full */ - public static final int ENOANO = 55; /* No anode */ - public static final int EBADRQC = 56; /* Invalid request code */ - public static final int EBADSLT = 57; /* Invalid slot */ - public static final int EDEADLOCK = EDEADLK; - public static final int EBFONT = 59; /* Bad font file format */ - public static final int ENOSTR = 60; /* Device not a stream */ - public static final int ENODATA = 61; /* No data available */ - public static final int ETIME = 62; /* Timer expired */ - public static final int ENOSR = 63; /* Out of streams resources */ - public static final int ENONET = 64; /* Machine is not on the network */ - public static final int ENOPKG = 65; /* Package not installed */ - public static final int EREMOTE = 66; /* Object is remote */ - public static final int ENOLINK = 67; /* Link has been severed */ - public static final int EADV = 68; /* Advertise error */ - public static final int ESRMNT = 69; /* Srmount error */ - public static final int ECOMM = 70; /* Communication error on send */ - public static final int EPROTO = 71; /* Protocol error */ - public static final int EMULTIHOP = 72; /* Multihop attempted */ - public static final int EDOTDOT = 73; /* RFS specific error */ - public static final int EBADMSG = 74; /* Not a data message */ - public static final int EOVERFLOW = 75; /* - * Value too large for defined data - * type - */ - public static final int ENOTUNIQ = 76; /* Name not unique on network */ - public static final int EBADFD = 77; /* File descriptor in bad state */ - public static final int EREMCHG = 78; /* Remote address changed */ - public static final int ELIBACC = 79; /* - * Can not access a needed shared - * library - */ - public static final int ELIBBAD = 80; /* Accessing a corrupted shared library */ - public static final int ELIBSCN = 81; /* .lib section in a.out corrupted */ - public static final int ELIBMAX = 82; /* - * Attempting to link in too many shared - * libraries - */ - public static final int ELIBEXEC = 83; /* - * Cannot exec a shared library - * directly - */ - public static final int EILSEQ = 84; /* Illegal byte sequence */ - public static final int ERESTART = 85; /* - * Interrupted system call should be - * restarted - */ - public static final int ESTRPIPE = 86; /* Streams pipe error */ - public static final int EUSERS = 87; /* Too many users */ - public static final int ENOTSOCK = 88; /* Socket operation on non-socket */ - public static final int EDESTADDRREQ = 89; /* Destination address required */ - public static final int EMSGSIZE = 90; /* Message too long */ - public static final int EPROTOTYPE = 91; /* Protocol wrong type for socket */ - public static final int ENOPROTOOPT = 92; /* Protocol not available */ - public static final int EPROTONOSUPPORT = 93; /* Protocol not supported */ - public static final int ESOCKTNOSUPPORT = 94; /* Socket type not supported */ - public static final int EOPNOTSUPP = 95; /* - * Operation not supported on - * transport endpoint - */ - public static final int EPFNOSUPPORT = 96; /* Protocol family not supported */ - public static final int EAFNOSUPPORT = 97; /* - * Address family not supported - * by protocol - */ - public static final int EADDRINUSE = 98; /* Address already in use */ - public static final int EADDRNOTAVAIL = 99; /* - * Cannot assign requested - * address - */ - public static final int ENETDOWN = 100; /* Network is down */ - public static final int ENETUNREACH = 101; /* Network is unreachable */ - public static final int ENETRESET = 102; /* - * Network dropped connection - * because of reset - */ - public static final int ECONNABORTED = 103; /* - * Software caused connection - * abort - */ - public static final int ECONNRESET = 104; /* Connection reset by peer */ - public static final int ENOBUFS = 105; /* No buffer space available */ - public static final int EISCONN = 106; /* - * Transport endpoint is already - * connected - */ - public static final int ENOTCONN = 107; /* - * Transport endpoint is not - * connected - */ - public static final int ESHUTDOWN = 108; /* - * Cannot send after transport - * endpoint shutdown - */ - public static final int ETOOMANYREFS = 109; /* - * Too many references: cannot - * splice - */ - public static final int ETIMEDOUT = 110; /* Connection timed out */ - public static final int ECONNREFUSED = 111; /* Connection refused */ - public static final int EHOSTDOWN = 112; /* Host is down */ - public static final int EHOSTUNREACH = 113; /* No route to host */ - public static final int EALREADY = 114; /* Operation already in progress */ - public static final int EINPROGRESS = 115; /* Operation now in progress */ - public static final int ESTALE = 116; /* Stale NFS file handle */ - public static final int EUCLEAN = 117; /* Structure needs cleaning */ - public static final int ENOTNAM = 118; /* Not a XENIX named type file */ - public static final int ENAVAIL = 119; /* No XENIX semaphores available */ - public static final int EISNAM = 120; /* Is a named type file */ - public static final int EREMOTEIO = 121; /* Remote I/O error */ - public static final int EDQUOT = 122; /* Quota exceeded */ - public static final int ENOMEDIUM = 123; /* No medium found */ - public static final int EMEDIUMTYPE = 124; /* Wrong medium type */ - - // extended attributes support needs these... - - public static final int ENOATTR = ENODATA; /* No such attribute */ - public static final int ENOTSUPP = 524; /* Operation is not supported */ -} + +package org.opendedup.buse.driver; + +/** + * This is an enumeration of error return values + */ +public interface Errno { + // + // generated from + + public static final int EPERM = 1; /* Operation not permitted */ + public static final int ENOENT = 2; /* No such file or directory */ + public static final int ESRCH = 3; /* No such process */ + public static final int EINTR = 4; /* Interrupted system call */ + public static final int EIO = 5; /* I/O error */ + public static final int ENXIO = 6; /* No such device or address */ + public static final int E2BIG = 7; /* Arg list too long */ + public static final int ENOEXEC = 8; /* Exec format error */ + public static final int EBADF = 9; /* Bad file number */ + public static final int ECHILD = 10; /* No child processes */ + public static final int EAGAIN = 11; /* Try again */ + public static final int ENOMEM = 12; /* Out of memory */ + public static final int EACCES = 13; /* Permission denied */ + public static final int EFAULT = 14; /* Bad address */ + public static final int ENOTBLK = 15; /* Block device required */ + public static final int EBUSY = 16; /* Device or resource busy */ + public static final int EEXIST = 17; /* File exists */ + public static final int EXDEV = 18; /* Cross-device link */ + public static final int ENODEV = 19; /* No such device */ + public static final int ENOTDIR = 20; /* Not a directory */ + public static final int EISDIR = 21; /* Is a directory */ + public static final int EINVAL = 22; /* Invalid argument */ + public static final int ENFILE = 23; /* File table overflow */ + public static final int EMFILE = 24; /* Too many open files */ + public static final int ENOTTY = 25; /* Not a typewriter */ + public static final int ETXTBSY = 26; /* Text file busy */ + public static final int EFBIG = 27; /* File too large */ + public static final int ENOSPC = 28; /* No space left on device */ + public static final int ESPIPE = 29; /* Illegal seek */ + public static final int EROFS = 30; /* Read-only file system */ + public static final int EMLINK = 31; /* Too many links */ + public static final int EPIPE = 32; /* Broken pipe */ + public static final int EDOM = 33; /* Math argument out of domain of func */ + public static final int ERANGE = 34; /* Math result not representable */ + public static final int EDEADLK = 35; /* Resource deadlock would occur */ + public static final int ENAMETOOLONG = 36; /* File name too long */ + public static final int ENOLCK = 37; /* No record locks available */ + public static final int ENOSYS = 38; /* Function not implemented */ + public static final int ENOTEMPTY = 39; /* Directory not empty */ + public static final int ELOOP = 40; /* Too many symbolic links encountered */ + public static final int EWOULDBLOCK = EAGAIN; /* Operation would block */ + public static final int ENOMSG = 42; /* No message of desired type */ + public static final int EIDRM = 43; /* Identifier removed */ + public static final int ECHRNG = 44; /* Channel number out of range */ + public static final int EL2NSYNC = 45; /* Level 2 not synchronized */ + public static final int EL3HLT = 46; /* Level 3 halted */ + public static final int EL3RST = 47; /* Level 3 reset */ + public static final int ELNRNG = 48; /* Link number out of range */ + public static final int EUNATCH = 49; /* Protocol driver not attached */ + public static final int ENOCSI = 50; /* No CSI structure available */ + public static final int EL2HLT = 51; /* Level 2 halted */ + public static final int EBADE = 52; /* Invalid exchange */ + public static final int EBADR = 53; /* Invalid request descriptor */ + public static final int EXFULL = 54; /* Exchange full */ + public static final int ENOANO = 55; /* No anode */ + public static final int EBADRQC = 56; /* Invalid request code */ + public static final int EBADSLT = 57; /* Invalid slot */ + public static final int EDEADLOCK = EDEADLK; + public static final int EBFONT = 59; /* Bad font file format */ + public static final int ENOSTR = 60; /* Device not a stream */ + public static final int ENODATA = 61; /* No data available */ + public static final int ETIME = 62; /* Timer expired */ + public static final int ENOSR = 63; /* Out of streams resources */ + public static final int ENONET = 64; /* Machine is not on the network */ + public static final int ENOPKG = 65; /* Package not installed */ + public static final int EREMOTE = 66; /* Object is remote */ + public static final int ENOLINK = 67; /* Link has been severed */ + public static final int EADV = 68; /* Advertise error */ + public static final int ESRMNT = 69; /* Srmount error */ + public static final int ECOMM = 70; /* Communication error on send */ + public static final int EPROTO = 71; /* Protocol error */ + public static final int EMULTIHOP = 72; /* Multihop attempted */ + public static final int EDOTDOT = 73; /* RFS specific error */ + public static final int EBADMSG = 74; /* Not a data message */ + public static final int EOVERFLOW = 75; /* + * Value too large for defined data + * type + */ + public static final int ENOTUNIQ = 76; /* Name not unique on network */ + public static final int EBADFD = 77; /* File descriptor in bad state */ + public static final int EREMCHG = 78; /* Remote address changed */ + public static final int ELIBACC = 79; /* + * Can not access a needed shared + * library + */ + public static final int ELIBBAD = 80; /* Accessing a corrupted shared library */ + public static final int ELIBSCN = 81; /* .lib section in a.out corrupted */ + public static final int ELIBMAX = 82; /* + * Attempting to link in too many shared + * libraries + */ + public static final int ELIBEXEC = 83; /* + * Cannot exec a shared library + * directly + */ + public static final int EILSEQ = 84; /* Illegal byte sequence */ + public static final int ERESTART = 85; /* + * Interrupted system call should be + * restarted + */ + public static final int ESTRPIPE = 86; /* Streams pipe error */ + public static final int EUSERS = 87; /* Too many users */ + public static final int ENOTSOCK = 88; /* Socket operation on non-socket */ + public static final int EDESTADDRREQ = 89; /* Destination address required */ + public static final int EMSGSIZE = 90; /* Message too long */ + public static final int EPROTOTYPE = 91; /* Protocol wrong type for socket */ + public static final int ENOPROTOOPT = 92; /* Protocol not available */ + public static final int EPROTONOSUPPORT = 93; /* Protocol not supported */ + public static final int ESOCKTNOSUPPORT = 94; /* Socket type not supported */ + public static final int EOPNOTSUPP = 95; /* + * Operation not supported on + * transport endpoint + */ + public static final int EPFNOSUPPORT = 96; /* Protocol family not supported */ + public static final int EAFNOSUPPORT = 97; /* + * Address family not supported + * by protocol + */ + public static final int EADDRINUSE = 98; /* Address already in use */ + public static final int EADDRNOTAVAIL = 99; /* + * Cannot assign requested + * address + */ + public static final int ENETDOWN = 100; /* Network is down */ + public static final int ENETUNREACH = 101; /* Network is unreachable */ + public static final int ENETRESET = 102; /* + * Network dropped connection + * because of reset + */ + public static final int ECONNABORTED = 103; /* + * Software caused connection + * abort + */ + public static final int ECONNRESET = 104; /* Connection reset by peer */ + public static final int ENOBUFS = 105; /* No buffer space available */ + public static final int EISCONN = 106; /* + * Transport endpoint is already + * connected + */ + public static final int ENOTCONN = 107; /* + * Transport endpoint is not + * connected + */ + public static final int ESHUTDOWN = 108; /* + * Cannot send after transport + * endpoint shutdown + */ + public static final int ETOOMANYREFS = 109; /* + * Too many references: cannot + * splice + */ + public static final int ETIMEDOUT = 110; /* Connection timed out */ + public static final int ECONNREFUSED = 111; /* Connection refused */ + public static final int EHOSTDOWN = 112; /* Host is down */ + public static final int EHOSTUNREACH = 113; /* No route to host */ + public static final int EALREADY = 114; /* Operation already in progress */ + public static final int EINPROGRESS = 115; /* Operation now in progress */ + public static final int ESTALE = 116; /* Stale NFS file handle */ + public static final int EUCLEAN = 117; /* Structure needs cleaning */ + public static final int ENOTNAM = 118; /* Not a XENIX named type file */ + public static final int ENAVAIL = 119; /* No XENIX semaphores available */ + public static final int EISNAM = 120; /* Is a named type file */ + public static final int EREMOTEIO = 121; /* Remote I/O error */ + public static final int EDQUOT = 122; /* Quota exceeded */ + public static final int ENOMEDIUM = 123; /* No medium found */ + public static final int EMEDIUMTYPE = 124; /* Wrong medium type */ + + // extended attributes support needs these... + + public static final int ENOATTR = ENODATA; /* No such attribute */ + public static final int ENOTSUPP = 524; /* Operation is not supported */ +} diff --git a/src/org/opendedup/buse/driver/ShutdownHook.java b/src/org/opendedup/buse/driver/ShutdownHook.java index e21fbc738..3c3a29506 100644 --- a/src/org/opendedup/buse/driver/ShutdownHook.java +++ b/src/org/opendedup/buse/driver/ShutdownHook.java @@ -1,26 +1,44 @@ -package org.opendedup.buse.driver; - -public class ShutdownHook extends Thread { - public String dev; - public BUSE bClass; - - ShutdownHook(String dev, BUSE bClass) { - this.bClass = bClass; - this.dev = dev; - System.out.println("Registered shutdown hook for " + dev); - } - - @Override - public void run() { - System.out.println("#### Shutting down dev " + dev + " ####"); - - try { - BUSEMkDev.closeDev(dev); - } catch (Exception e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - bClass.close(); - System.out.println("Shut Down " + dev); - } -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.buse.driver; + +public class ShutdownHook extends Thread { + public String dev; + public BUSE bClass; + + ShutdownHook(String dev, BUSE bClass) { + this.bClass = bClass; + this.dev = dev; + System.out.println("Registered shutdown hook for " + dev); + } + + @Override + public void run() { + System.out.println("#### Shutting down dev " + dev + " ####"); + + try { + BUSEMkDev.closeDev(dev); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + bClass.close(); + System.out.println("Shut Down " + dev); + } +} diff --git a/src/org/opendedup/buse/sdfsdev/BlockDeviceBeforeClosedEvent.java b/src/org/opendedup/buse/sdfsdev/BlockDeviceBeforeClosedEvent.java index 7edb931f1..5bcc2c3d2 100644 --- a/src/org/opendedup/buse/sdfsdev/BlockDeviceBeforeClosedEvent.java +++ b/src/org/opendedup/buse/sdfsdev/BlockDeviceBeforeClosedEvent.java @@ -1,12 +1,30 @@ -package org.opendedup.buse.sdfsdev; - -import org.opendedup.sdfs.io.BlockDev; - -public class BlockDeviceBeforeClosedEvent { - BlockDev dev; - - public BlockDeviceBeforeClosedEvent(BlockDev dev) { - this.dev = dev; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.buse.sdfsdev; + +import org.opendedup.sdfs.io.BlockDev; + +public class BlockDeviceBeforeClosedEvent { + BlockDev dev; + + public BlockDeviceBeforeClosedEvent(BlockDev dev) { + this.dev = dev; + } + +} diff --git a/src/org/opendedup/buse/sdfsdev/BlockDeviceClosedEvent.java b/src/org/opendedup/buse/sdfsdev/BlockDeviceClosedEvent.java index a90cecf55..cdd109f44 100644 --- a/src/org/opendedup/buse/sdfsdev/BlockDeviceClosedEvent.java +++ b/src/org/opendedup/buse/sdfsdev/BlockDeviceClosedEvent.java @@ -1,12 +1,30 @@ -package org.opendedup.buse.sdfsdev; - -import org.opendedup.sdfs.io.BlockDev; - -public class BlockDeviceClosedEvent { - BlockDev dev; - - public BlockDeviceClosedEvent(BlockDev dev) { - this.dev = dev; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.buse.sdfsdev; + +import org.opendedup.sdfs.io.BlockDev; + +public class BlockDeviceClosedEvent { + BlockDev dev; + + public BlockDeviceClosedEvent(BlockDev dev) { + this.dev = dev; + } + +} diff --git a/src/org/opendedup/buse/sdfsdev/BlockDeviceOpenEvent.java b/src/org/opendedup/buse/sdfsdev/BlockDeviceOpenEvent.java index 651cf0348..4cdb0cfaf 100644 --- a/src/org/opendedup/buse/sdfsdev/BlockDeviceOpenEvent.java +++ b/src/org/opendedup/buse/sdfsdev/BlockDeviceOpenEvent.java @@ -1,12 +1,30 @@ -package org.opendedup.buse.sdfsdev; - -import org.opendedup.sdfs.io.BlockDev; - -public class BlockDeviceOpenEvent { - BlockDev dev; - - public BlockDeviceOpenEvent(BlockDev dev) { - this.dev = dev; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.buse.sdfsdev; + +import org.opendedup.sdfs.io.BlockDev; + +public class BlockDeviceOpenEvent { + BlockDev dev; + + public BlockDeviceOpenEvent(BlockDev dev) { + this.dev = dev; + } + +} diff --git a/src/org/opendedup/buse/sdfsdev/BlockDeviceSmallWriteEvent.java b/src/org/opendedup/buse/sdfsdev/BlockDeviceSmallWriteEvent.java index 1f8a199b0..a95333cd0 100644 --- a/src/org/opendedup/buse/sdfsdev/BlockDeviceSmallWriteEvent.java +++ b/src/org/opendedup/buse/sdfsdev/BlockDeviceSmallWriteEvent.java @@ -1,51 +1,69 @@ -package org.opendedup.buse.sdfsdev; - -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import java.nio.ByteBuffer; - -import org.opendedup.sdfs.io.BlockDev; - -public class BlockDeviceSmallWriteEvent implements Externalizable { - public BlockDev dev; - public ByteBuffer buf; - public long pos; - public int len; - - public BlockDeviceSmallWriteEvent() { - - } - - public BlockDeviceSmallWriteEvent(BlockDev dev, ByteBuffer buf, long pos, - int len) { - this.dev = dev; - this.buf = buf; - this.pos = pos; - this.len = len; - } - - @Override - public void readExternal(ObjectInput in) throws IOException, - ClassNotFoundException { - len = in.readInt(); - byte[] b = new byte[len]; - in.readFully(b); - buf = ByteBuffer.wrap(b); - pos = in.readLong(); - - } - - @Override - public void writeExternal(ObjectOutput out) throws IOException { - byte[] b = new byte[len]; - buf.position(0); - buf.get(b); - buf.position(0); - out.writeInt(len); - out.write(b); - out.writeLong(pos); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.buse.sdfsdev; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.nio.ByteBuffer; + +import org.opendedup.sdfs.io.BlockDev; + +public class BlockDeviceSmallWriteEvent implements Externalizable { + public BlockDev dev; + public ByteBuffer buf; + public long pos; + public int len; + + public BlockDeviceSmallWriteEvent() { + + } + + public BlockDeviceSmallWriteEvent(BlockDev dev, ByteBuffer buf, long pos, + int len) { + this.dev = dev; + this.buf = buf; + this.pos = pos; + this.len = len; + } + + @Override + public void readExternal(ObjectInput in) throws IOException, + ClassNotFoundException { + len = in.readInt(); + byte[] b = new byte[len]; + in.readFully(b); + buf = ByteBuffer.wrap(b); + pos = in.readLong(); + + } + + @Override + public void writeExternal(ObjectOutput out) throws IOException { + byte[] b = new byte[len]; + buf.position(0); + buf.get(b); + buf.position(0); + out.writeInt(len); + out.write(b); + out.writeLong(pos); + } + +} diff --git a/src/org/opendedup/buse/sdfsdev/SDFSBlockDev.java b/src/org/opendedup/buse/sdfsdev/SDFSBlockDev.java index 0b9c69fca..5c1120d87 100644 --- a/src/org/opendedup/buse/sdfsdev/SDFSBlockDev.java +++ b/src/org/opendedup/buse/sdfsdev/SDFSBlockDev.java @@ -1,187 +1,205 @@ -package org.opendedup.buse.sdfsdev; - -import java.io.File; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import org.opendedup.buse.driver.*; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.io.BlockDev; -import org.opendedup.sdfs.io.DedupFileChannel; -import org.opendedup.sdfs.io.MetaDataDedupFile; - -import com.google.common.eventbus.EventBus; - -public class SDFSBlockDev implements BUSE, Runnable { - - ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - long sz; - String devicePath; - String internalFPath; - public DedupFileChannel ch; - EventBus eventBus = new EventBus(); - BlockDev dev; - private boolean closed = true; - - public SDFSBlockDev(BlockDev dev) throws IOException { - this.dev = dev; - eventBus.register(dev); - this.devicePath = dev.getDevPath(); - this.sz = dev.getSize(); - File f = new File(devicePath); - if (!f.exists()) { - Process p = Runtime.getRuntime().exec("modprobe nbd"); - try { - p.waitFor(); - } catch (InterruptedException e) { - SDFSLogger.getLog().debug("unable to wait for modprobe", e); - } - } - if (!f.exists()) - throw new IOException("device " + devicePath + " not found."); - MetaDataDedupFile mf = dev.getMF(); - this.ch = mf.getDedupFile(true).getChannel(0); - } - - @Override - public int read(ByteBuffer data, int len, long offset) { - /* - * if(len >= Main.CHUNK_LENGTH) - * SDFSLogger.getLog().info("read request len=" + len + " offset=" + - * offset + " databuflen=" + data.capacity() + " databufpos=" + - * data.position()); - */ - try { - ch.read(data, 0, len, offset); - } catch (Throwable e) { - SDFSLogger.getLog().error("unable to read file " + this.devicePath, - e); - return Errno.ENODATA; - } - return 0; - } - - @Override - public int write(ByteBuffer buff, int len, long offset) { - /* - * if(len >= Main.CHUNK_LENGTH) - * SDFSLogger.getLog().info("write request len=" + len + " offset=" + - * offset + " databuflen=" + buff.capacity() + " databufpos=" + - * buff.position()); - */ - try { - if (Main.volume.isFull()) { - SDFSLogger.getLog().error("Volume is full"); - ; - return Errno.ENOSPC; - - } - try { - ch.writeFile(buff, len, 0, offset, true); - - } catch (Throwable e) { - SDFSLogger.getLog().error( - "unable to write to block device" + this.devicePath, e); - return Errno.EACCES; - } - } finally { - } - return 0; - } - - @Override - public void disconnect() { - if (ch != null) { - try { - SDFSLogger.getLog().warn( - "disconnect called for " + this.devicePath); - ch.getDedupFile().unRegisterChannel(ch, 0); - ch.getDedupFile().forceClose(); - } catch (Throwable e) { - SDFSLogger.getLog().error("unable to close " + this.devicePath, - e); - } - } - } - - @Override - public int flush() { - /* - * SDFSLogger.getLog().info("flush request"); - */ - try { - ch.force(true); - } catch (Exception e) { - SDFSLogger.getLog().error( - "unable to sync file [" + this.devicePath + "]", e); - return Errno.EACCES; - } - return 0; - } - - @Override - public int trim(long from, int len) { - /* - * SDFSLogger.getLog().debug("trim request from=" + from + " len=" + - * len); - */ - try { - ch.trim(from, len); - } catch (IOException e) { - SDFSLogger.getLog().error( - "unable to trim file [" + this.devicePath + "]", e); - return Errno.EACCES; - } - return 0; - } - - private void startBlockDev() throws Exception { - BUSEMkDev.startdev(this.devicePath, this.sz, 4096, this, false); - - } - - @Override - public void close() { - - try { - Process p = Runtime.getRuntime().exec("umount " + this.devicePath); - p.waitFor(); - } catch (Exception e) { - SDFSLogger.getLog().error( - "unable to unmount vols for " + this.devicePath, e); - } - eventBus.post(new BlockDeviceBeforeClosedEvent(this.dev)); - try { - BUSEMkDev.closeDev(devicePath); - for (int i = 0; i < 300; i++) { - if (this.closed) - return; - else - Thread.sleep(100); - } - SDFSLogger.getLog().warn("timed out waiting for close"); - } catch (Exception e) { - SDFSLogger.getLog().error("unable to close " + this.devicePath, e); - } - } - - @Override - public void run() { - try { - this.closed = false; - this.eventBus.post(new BlockDeviceOpenEvent(this.dev)); - this.startBlockDev(); - } catch (Exception e) { - SDFSLogger.getLog().warn( - "Block Device Stopping " + this.devicePath, e); - } finally { - this.closed = true; - this.eventBus.post(new BlockDeviceClosedEvent(this.dev)); - SDFSLogger.getLog().warn("Block Device Stopped " + this.devicePath); - } - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.buse.sdfsdev; + +import java.io.File; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import org.opendedup.buse.driver.*; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.io.BlockDev; +import org.opendedup.sdfs.io.DedupFileChannel; +import org.opendedup.sdfs.io.MetaDataDedupFile; + +import com.google.common.eventbus.EventBus; + +public class SDFSBlockDev implements BUSE, Runnable { + + ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + long sz; + String devicePath; + String internalFPath; + public DedupFileChannel ch; + EventBus eventBus = new EventBus(); + BlockDev dev; + private boolean closed = true; + + public SDFSBlockDev(BlockDev dev) throws IOException { + this.dev = dev; + eventBus.register(dev); + this.devicePath = dev.getDevPath(); + this.sz = dev.getSize(); + File f = new File(devicePath); + if (!f.exists()) { + Process p = Runtime.getRuntime().exec("modprobe nbd"); + try { + p.waitFor(); + } catch (InterruptedException e) { + SDFSLogger.getLog().debug("unable to wait for modprobe", e); + } + } + if (!f.exists()) + throw new IOException("device " + devicePath + " not found."); + MetaDataDedupFile mf = dev.getMF(); + this.ch = mf.getDedupFile(true).getChannel(0); + } + + @Override + public int read(ByteBuffer data, int len, long offset) { + /* + * if(len >= Main.CHUNK_LENGTH) + * SDFSLogger.getLog().info("read request len=" + len + " offset=" + + * offset + " databuflen=" + data.capacity() + " databufpos=" + + * data.position()); + */ + try { + ch.read(data, 0, len, offset); + } catch (Throwable e) { + SDFSLogger.getLog().error("unable to read file " + this.devicePath, + e); + return Errno.ENODATA; + } + return 0; + } + + @Override + public int write(ByteBuffer buff, int len, long offset) { + /* + * if(len >= Main.CHUNK_LENGTH) + * SDFSLogger.getLog().info("write request len=" + len + " offset=" + + * offset + " databuflen=" + buff.capacity() + " databufpos=" + + * buff.position()); + */ + try { + if (Main.volume.isFull()) { + SDFSLogger.getLog().error("Volume is full"); + ; + return Errno.ENOSPC; + + } + try { + ch.writeFile(buff, len, 0, offset, true); + + } catch (Throwable e) { + SDFSLogger.getLog().error( + "unable to write to block device" + this.devicePath, e); + return Errno.EACCES; + } + } finally { + } + return 0; + } + + @Override + public void disconnect() { + if (ch != null) { + try { + SDFSLogger.getLog().warn( + "disconnect called for " + this.devicePath); + ch.getDedupFile().unRegisterChannel(ch, 0); + ch.getDedupFile().forceClose(); + } catch (Throwable e) { + SDFSLogger.getLog().error("unable to close " + this.devicePath, + e); + } + } + } + + @Override + public int flush() { + /* + * SDFSLogger.getLog().info("flush request"); + */ + try { + ch.force(true); + } catch (Exception e) { + SDFSLogger.getLog().error( + "unable to sync file [" + this.devicePath + "]", e); + return Errno.EACCES; + } + return 0; + } + + @Override + public int trim(long from, int len) { + /* + * SDFSLogger.getLog().debug("trim request from=" + from + " len=" + + * len); + */ + try { + ch.trim(from, len); + } catch (IOException e) { + SDFSLogger.getLog().error( + "unable to trim file [" + this.devicePath + "]", e); + return Errno.EACCES; + } + return 0; + } + + private void startBlockDev() throws Exception { + BUSEMkDev.startdev(this.devicePath, this.sz, 4096, this, false); + + } + + @Override + public void close() { + + try { + Process p = Runtime.getRuntime().exec("umount " + this.devicePath); + p.waitFor(); + } catch (Exception e) { + SDFSLogger.getLog().error( + "unable to unmount vols for " + this.devicePath, e); + } + eventBus.post(new BlockDeviceBeforeClosedEvent(this.dev)); + try { + BUSEMkDev.closeDev(devicePath); + for (int i = 0; i < 300; i++) { + if (this.closed) + return; + else + Thread.sleep(100); + } + SDFSLogger.getLog().warn("timed out waiting for close"); + } catch (Exception e) { + SDFSLogger.getLog().error("unable to close " + this.devicePath, e); + } + } + + @Override + public void run() { + try { + this.closed = false; + this.eventBus.post(new BlockDeviceOpenEvent(this.dev)); + this.startBlockDev(); + } catch (Exception e) { + SDFSLogger.getLog().warn( + "Block Device Stopping " + this.devicePath, e); + } finally { + this.closed = true; + this.eventBus.post(new BlockDeviceClosedEvent(this.dev)); + SDFSLogger.getLog().warn("Block Device Stopped " + this.devicePath); + } + } + +} diff --git a/src/org/opendedup/buse/sdfsdev/SDFSVolMgr.java b/src/org/opendedup/buse/sdfsdev/SDFSVolMgr.java index e8f85eac0..91e699d2e 100644 --- a/src/org/opendedup/buse/sdfsdev/SDFSVolMgr.java +++ b/src/org/opendedup/buse/sdfsdev/SDFSVolMgr.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.buse.sdfsdev; import java.io.File; diff --git a/src/org/opendedup/buse/sdfsdev/VolumeShutdownHook.java b/src/org/opendedup/buse/sdfsdev/VolumeShutdownHook.java index 9c1e21d25..9c6ae22d0 100644 --- a/src/org/opendedup/buse/sdfsdev/VolumeShutdownHook.java +++ b/src/org/opendedup/buse/sdfsdev/VolumeShutdownHook.java @@ -1,38 +1,56 @@ -package org.opendedup.buse.sdfsdev; - -import org.opendedup.buse.driver.BUSEMkDev; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.servers.SDFSService; - -public class VolumeShutdownHook extends Thread { - public static SDFSService service; - private static boolean stopped; - - public VolumeShutdownHook() { - } - - @Override - public void run() { - - shutdown(); - } - - public static synchronized void shutdown() { - if (!stopped) { - stopped = true; - SDFSLogger.getLog().info("Please Wait while shutting down SDFS"); - SDFSLogger.getLog().info("Data Can be lost if this is interrupted"); - try { - Main.volume.closeAllDevices(); - Thread.sleep(1000); - BUSEMkDev.release(); - service.stop(); - } catch (Throwable e) { - e.printStackTrace(); - } - SDFSLogger.getLog().info("SDFS Shut Down Cleanly"); - } - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.buse.sdfsdev; + +import org.opendedup.buse.driver.BUSEMkDev; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.servers.SDFSService; + +public class VolumeShutdownHook extends Thread { + public static SDFSService service; + private static boolean stopped; + + public VolumeShutdownHook() { + } + + @Override + public void run() { + + shutdown(); + } + + public static synchronized void shutdown() { + if (!stopped) { + stopped = true; + SDFSLogger.getLog().info("Please Wait while shutting down SDFS"); + SDFSLogger.getLog().info("Data Can be lost if this is interrupted"); + try { + Main.volume.closeAllDevices(); + Thread.sleep(1000); + BUSEMkDev.release(); + service.stop(); + } catch (Throwable e) { + e.printStackTrace(); + } + SDFSLogger.getLog().info("SDFS Shut Down Cleanly"); + } + } + +} diff --git a/src/org/opendedup/cloud/Utils.java b/src/org/opendedup/cloud/Utils.java index b66265c55..f3df53a6a 100644 --- a/src/org/opendedup/cloud/Utils.java +++ b/src/org/opendedup/cloud/Utils.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.cloud; import java.util.ArrayList; diff --git a/src/org/opendedup/collections/AbstractHashesMap.java b/src/org/opendedup/collections/AbstractHashesMap.java index 501338b14..f7722e8ac 100644 --- a/src/org/opendedup/collections/AbstractHashesMap.java +++ b/src/org/opendedup/collections/AbstractHashesMap.java @@ -1,70 +1,88 @@ -package org.opendedup.collections; - -import java.io.IOException; - -import org.opendedup.hashing.LargeFileBloomFilter; -import org.opendedup.sdfs.filestore.ChunkData; -import org.opendedup.sdfs.notification.SDFSEvent; - -public interface AbstractHashesMap { - - public abstract long endStartingPosition(); - - public abstract long getSize(); - - public abstract long getUsedSize(); - - public abstract long getMaxSize(); - - public abstract void claimRecords(SDFSEvent evt) throws IOException; - - public abstract long claimRecords(SDFSEvent evt, LargeFileBloomFilter bf) - throws IOException; - - /** - * Searches the set for obj - * - * @param obj - * an Object value - * @return a boolean value - * @throws IOException - */ - public abstract boolean containsKey(byte[] key) throws IOException; - - public abstract InsertRecord put(ChunkData cm) throws IOException, - HashtableFullException; - - public abstract InsertRecord put(ChunkData cm, boolean persist) - throws IOException, HashtableFullException; - - public abstract boolean update(ChunkData cm) throws IOException; - - public abstract void cache(byte[] key,long pos) throws IOException; - - public abstract long get(byte[] key) throws IOException; - - public abstract byte[] getData(byte[] key) throws IOException, - DataArchivedException; - - public abstract byte[] getData(byte[] key,long pos) throws IOException, - DataArchivedException; - - public abstract boolean remove(ChunkData cm) throws IOException; - - public abstract boolean isClaimed(ChunkData cm) - throws KeyNotFoundException, IOException; - - public abstract void sync() throws IOException; - - public abstract void close(); - - public abstract void initCompact() throws IOException; - - public abstract void commitCompact(boolean force) throws IOException; - - public abstract void rollbackCompact() throws IOException; - - public abstract void init(long maxSize, String fileName) - throws IOException, HashtableFullException; - +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.collections; + +import java.io.IOException; + +import org.opendedup.hashing.LargeFileBloomFilter; +import org.opendedup.sdfs.filestore.ChunkData; +import org.opendedup.sdfs.notification.SDFSEvent; + +public interface AbstractHashesMap { + + public abstract long endStartingPosition(); + + public abstract long getSize(); + + public abstract long getUsedSize(); + + public abstract long getMaxSize(); + + public abstract void claimRecords(SDFSEvent evt) throws IOException; + + public abstract long claimRecords(SDFSEvent evt, LargeFileBloomFilter bf) + throws IOException; + + /** + * Searches the set for obj + * + * @param obj + * an Object value + * @return a boolean value + * @throws IOException + */ + public abstract boolean containsKey(byte[] key) throws IOException; + + public abstract InsertRecord put(ChunkData cm) throws IOException, + HashtableFullException; + + public abstract InsertRecord put(ChunkData cm, boolean persist) + throws IOException, HashtableFullException; + + public abstract boolean update(ChunkData cm) throws IOException; + + public abstract void cache(byte[] key,long pos) throws IOException; + + public abstract long get(byte[] key) throws IOException; + + public abstract byte[] getData(byte[] key) throws IOException, + DataArchivedException; + + public abstract byte[] getData(byte[] key,long pos) throws IOException, + DataArchivedException; + + public abstract boolean remove(ChunkData cm) throws IOException; + + public abstract boolean isClaimed(ChunkData cm) + throws KeyNotFoundException, IOException; + + public abstract void sync() throws IOException; + + public abstract void close(); + + public abstract void initCompact() throws IOException; + + public abstract void commitCompact(boolean force) throws IOException; + + public abstract void rollbackCompact() throws IOException; + + public abstract void init(long maxSize, String fileName) + throws IOException, HashtableFullException; + } \ No newline at end of file diff --git a/src/org/opendedup/collections/AbstractMap.java b/src/org/opendedup/collections/AbstractMap.java index 757d6d55f..f2a5e6200 100644 --- a/src/org/opendedup/collections/AbstractMap.java +++ b/src/org/opendedup/collections/AbstractMap.java @@ -1,15 +1,33 @@ -package org.opendedup.collections; - -import java.io.IOException; - -public interface AbstractMap { - - public abstract boolean isClosed(); - - public abstract void sync() throws IOException; - - public abstract void vanish() throws IOException; - - public abstract void close(); - +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.collections; + +import java.io.IOException; + +public interface AbstractMap { + + public abstract boolean isClosed(); + + public abstract void sync() throws IOException; + + public abstract void vanish() throws IOException; + + public abstract void close(); + } \ No newline at end of file diff --git a/src/org/opendedup/collections/AbstractShard.java b/src/org/opendedup/collections/AbstractShard.java index 766ef61f5..efdeb246e 100644 --- a/src/org/opendedup/collections/AbstractShard.java +++ b/src/org/opendedup/collections/AbstractShard.java @@ -1,73 +1,91 @@ -package org.opendedup.collections; - -import java.io.IOException; -import java.io.SyncFailedException; - -import org.opendedup.hashing.LargeFileBloomFilter; -import org.opendedup.sdfs.filestore.ChunkData; - -public interface AbstractShard { - - public abstract void iterInit(); - - public abstract byte[] nextKey() throws IOException; - - public abstract long getBigestKey() throws IOException; - - /** - * initializes the Object set of this hash table. - * - * @param initialCapacity - * an int value - * @return an int value - * @throws IOException - */ - public abstract long setUp() throws IOException; - - /** - * Searches the set for obj - * - * @param obj - * an Object value - * @return a boolean value - */ - public abstract boolean containsKey(byte[] key); - - /** - * Searches the set for obj - * - * @param obj - * an Object value - * @return a boolean value - * @throws KeyNotFoundException - */ - public abstract boolean isClaimed(byte[] key) throws KeyNotFoundException, - IOException; - - public abstract boolean update(byte[] key, long value) throws IOException; - - public abstract boolean remove(byte[] key) throws IOException; - - public abstract InsertRecord put(ChunkData cm) - throws HashtableFullException, IOException; - - public abstract InsertRecord put(byte[] key, long val) - throws HashtableFullException, IOException; - - public abstract int getEntries(); - - public abstract long get(byte[] key); - - public abstract long get(byte[] key, boolean claim); - - public abstract int size(); - - public abstract void close(); - - public abstract long claimRecords() throws IOException; - - public abstract long claimRecords(LargeFileBloomFilter bf) throws IOException; - - public abstract void sync() throws SyncFailedException, IOException; - +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.collections; + +import java.io.IOException; +import java.io.SyncFailedException; + +import org.opendedup.hashing.LargeFileBloomFilter; +import org.opendedup.sdfs.filestore.ChunkData; + +public interface AbstractShard { + + public abstract void iterInit(); + + public abstract byte[] nextKey() throws IOException; + + public abstract long getBigestKey() throws IOException; + + /** + * initializes the Object set of this hash table. + * + * @param initialCapacity + * an int value + * @return an int value + * @throws IOException + */ + public abstract long setUp() throws IOException; + + /** + * Searches the set for obj + * + * @param obj + * an Object value + * @return a boolean value + */ + public abstract boolean containsKey(byte[] key); + + /** + * Searches the set for obj + * + * @param obj + * an Object value + * @return a boolean value + * @throws KeyNotFoundException + */ + public abstract boolean isClaimed(byte[] key) throws KeyNotFoundException, + IOException; + + public abstract boolean update(byte[] key, long value) throws IOException; + + public abstract boolean remove(byte[] key) throws IOException; + + public abstract InsertRecord put(ChunkData cm) + throws HashtableFullException, IOException; + + public abstract InsertRecord put(byte[] key, long val) + throws HashtableFullException, IOException; + + public abstract int getEntries(); + + public abstract long get(byte[] key); + + public abstract long get(byte[] key, boolean claim); + + public abstract int size(); + + public abstract void close(); + + public abstract long claimRecords() throws IOException; + + public abstract long claimRecords(LargeFileBloomFilter bf) throws IOException; + + public abstract void sync() throws SyncFailedException, IOException; + } \ No newline at end of file diff --git a/src/org/opendedup/collections/ByteArrayLongMap.java b/src/org/opendedup/collections/ByteArrayLongMap.java index 2b622f0d1..9a3544625 100644 --- a/src/org/opendedup/collections/ByteArrayLongMap.java +++ b/src/org/opendedup/collections/ByteArrayLongMap.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.collections; import java.io.IOException; diff --git a/src/org/opendedup/collections/ConcurrentHopscotchHashMap.java b/src/org/opendedup/collections/ConcurrentHopscotchHashMap.java index fa8037b13..35875f7e6 100644 --- a/src/org/opendedup/collections/ConcurrentHopscotchHashMap.java +++ b/src/org/opendedup/collections/ConcurrentHopscotchHashMap.java @@ -1,729 +1,747 @@ -package org.opendedup.collections; - -//////////////////////////////////////////////////////////////////////////////// -//ConcurrentHopscotchHashMap Class -// -//////////////////////////////////////////////////////////////////////////////// -//TERMS OF USAGE -//---------------------------------------------------------------------- -// -//Permission to use, copy, modify and distribute this software and -//its documentation for any purpose is hereby granted without fee, -//provided that due acknowledgments to the authors are provided and -//this permission notice appears in all copies of the software. -//The software is provided "as is". There is no warranty of any kind. -// -//Authors: -//Maurice Herlihy -//Brown University -//and -//Nir Shavit -//Tel-Aviv University -//and -//Moran Tzafrir -//Tel-Aviv University -// -//Date: Dec 2, 2008. -// -//////////////////////////////////////////////////////////////////////////////// -//Programmer : Moran Tzafrir (MoranTza@gmail.com) -// -//////////////////////////////////////////////////////////////////////////////// -//package xbird.util.concurrent.map; - -import java.util.concurrent.locks.ReentrantLock; - -public class ConcurrentHopscotchHashMap { - - // constants ----------------------------------- - static final short _NULL_DELTA_SHORT = Short.MIN_VALUE; - static final int _NULL_DELTA_INT = (int) (Short.MIN_VALUE); - static final long _NULL_DELTA_FIRST_LONG = (long) (Short.MIN_VALUE & 0xFFFFL); - static final long _NULL_DELTA_NEXT_LONG = (long) ((Short.MIN_VALUE & 0xFFFFL) << 16); - static final long _NULL_HASH_DELTA = 0x0000000080008000L; - static final int _NULL_HASH = 0; - static final int _SEGMENT_SHIFT = 0; // choosed by empirical tests - - static final long _FIRST_MASK = 0x000000000000FFFFL; - static final long _NEXT_MASK = 0x00000000FFFF0000L; - static final long _HASH_MASK = 0xFFFFFFFF00000000L; - static final long _NOT_NEXT_MASK = ~(_NEXT_MASK); - static final long _NOT_FIRST_MASK = ~(_FIRST_MASK); - static final long _NOT_HASH_MASK = ~(_HASH_MASK); - static final int _NEXT_SHIFT = 16; - static final int _HASH_SHIFT = 32; - - static final int _RETRIES_BEFORE_LOCK = 2; - static final int _MAX_DELTA_BUCKET = Short.MAX_VALUE; - - static final int _CACHE_MASK = 4 - 1; - static final int _NOT_CACHE_MASK = ~_CACHE_MASK; - static final int _NULL_INDX = -1; - - // inner classes ------------------------------- - - static final class Segment extends ReentrantLock { - - /** - * - */ - private static final long serialVersionUID = 4476725515973126431L; - volatile int _timestamp; - int _bucketk_mask; - long[] _table_hash_delta; - Object[] _table_key_value; - // AtomicInteger _lock; - int _count; - - public Segment(final int initialCapacity) { - init(initialCapacity); - } - - private void init(final int initialCapacity) { - // _lock = new AtomicInteger(); - // _lock.set(0); - _timestamp = 0; - _bucketk_mask = initialCapacity - 1; - _count = 0; - - _table_hash_delta = new long[initialCapacity]; - _table_key_value = new Object[initialCapacity << 1]; - - // create the blocks of buckets - for (int iData = 0; iData < initialCapacity; ++iData) { - _table_hash_delta[iData] = _NULL_HASH_DELTA; - } - } - - /* - * void lock() { while(!_lock.compareAndSet(0, 0xFFFF)) { } } - * - * void unlock() { _lock.set(0); } - */ - - @SuppressWarnings("unchecked") - static final Segment[] newArray(final int numSegments) { - return new Segment[numSegments]; - } - - boolean containsKey(final K key, final int hash) { - // go over the list and look for key - int start_timestamp = _timestamp; - int iBucket = hash & _bucketk_mask; - long data = _table_hash_delta[iBucket]; - final int first_delta = (short) data; - if (0 != first_delta) { - if (_NULL_DELTA_SHORT == first_delta) - return false; - iBucket += first_delta; - data = _table_hash_delta[iBucket]; - } - - do { - if (hash == (data >> _HASH_SHIFT) - && key.equals(_table_key_value[iBucket << 1])) - return true; - final int nextDelta = (int) data >> _NEXT_SHIFT; - if (_NULL_DELTA_INT != nextDelta) { - iBucket += nextDelta; - data = _table_hash_delta[iBucket]; - continue; - } else { - final int curr_timestamp = _timestamp; - if (curr_timestamp == start_timestamp) - return false; - start_timestamp = curr_timestamp; - iBucket = hash & _bucketk_mask; - data = _table_hash_delta[iBucket]; - final int first_delta2 = (short) data; - if (0 != first_delta2) { - if (_NULL_DELTA_SHORT == first_delta2) - return false; - iBucket += first_delta2; - data = _table_hash_delta[iBucket]; - } - continue; - } - } while (true); - } - - @SuppressWarnings("unchecked") - V get(final K key, final int hash) { - // go over the list and look for key - int start_timestamp = 0; - int iBucket = 0; - long data = 0; - - boolean is_need_init = true; - do { - if (is_need_init) { - is_need_init = false; - start_timestamp = _timestamp; - iBucket = hash & _bucketk_mask; - data = _table_hash_delta[iBucket]; - final int first_delta = (short) data; - if (0 != first_delta) { - if (_NULL_DELTA_SHORT == first_delta) - return null; - iBucket += first_delta; - data = _table_hash_delta[iBucket]; - } - } - - final int iRef; - if (hash == (data >> _HASH_SHIFT) - && key.equals(_table_key_value[iRef = iBucket << 1])) { - final V value = (V) _table_key_value[iRef + 1]; - if (_timestamp == start_timestamp) - return value; - is_need_init = true; - continue; - } - final int nextDelta = (int) data >> _NEXT_SHIFT; - if (_NULL_DELTA_INT != nextDelta) { - iBucket += nextDelta; - data = _table_hash_delta[iBucket]; - continue; - } else { - if (_timestamp == start_timestamp) - return null; - is_need_init = true; - continue; - } - } while (true); - } - - @SuppressWarnings("unchecked") - V put(final K key, final int hash, final V value) { - lock(); - try { - // look for key in hash-map - // ..................................... - final int i_start_bucket = hash & _bucketk_mask; - int iBucket = i_start_bucket; - long data = _table_hash_delta[i_start_bucket]; - final short first_delta = (short) data; - if (_NULL_DELTA_SHORT != first_delta) { - if (0 != first_delta) { - iBucket += first_delta; - data = _table_hash_delta[iBucket]; - } - - do { - final int iRef; - if (hash == (data >> _HASH_SHIFT) - && key.equals(_table_key_value[iRef = (iBucket << 1)])) - return (V) _table_key_value[iRef + 1]; - final int next_delta = (int) data >> _NEXT_SHIFT; - if (_NULL_DELTA_INT == next_delta) - break; - else { - iBucket += next_delta; - data = _table_hash_delta[iBucket]; - } - } while (true); - } - - // try to place the key in the same cache-line - // .................. - final int i_start_cacheline = i_start_bucket & _NOT_CACHE_MASK; - final int i_end_cacheline = i_start_cacheline + _CACHE_MASK; - int i_free_bucket = i_start_bucket; - do { - long free_data = _table_hash_delta[i_free_bucket]; - if (_NULL_HASH == (free_data >> _HASH_SHIFT)) { - // we found a free bucket at the cahce-line, so - // add the new bucket to the begining of the list - - int i_ref_bucket = i_free_bucket << 1; - _table_key_value[i_ref_bucket] = key; - _table_key_value[++i_ref_bucket] = value; - free_data &= _NOT_HASH_MASK; - free_data |= ((long) hash << _HASH_SHIFT); - - if (0 == first_delta) { - final long start_data = _table_hash_delta[i_start_bucket]; - final int start_next = (int) start_data >> _NEXT_SHIFT; - if (_NULL_DELTA_INT != start_next) { - final long new_free_next = i_start_bucket - + start_next - i_free_bucket; - _table_hash_delta[i_free_bucket] = (free_data & _NOT_NEXT_MASK) - | ((new_free_next << _NEXT_SHIFT) & _NEXT_MASK); - } else - _table_hash_delta[i_free_bucket] = free_data; - final long new_start_next = i_free_bucket - - i_start_bucket; - _table_hash_delta[i_start_bucket] = (start_data & _NOT_NEXT_MASK) - | ((new_start_next << _NEXT_SHIFT) & _NEXT_MASK); - } else {// 0 != first_delta - if (_NULL_DELTA_SHORT != first_delta) { - final long new_free_next = i_start_bucket - + first_delta - i_free_bucket; - free_data &= _NOT_NEXT_MASK; - free_data |= ((new_free_next << _NEXT_SHIFT) & _NEXT_MASK); - } - final long start_data; - if (i_free_bucket != i_start_bucket) { - start_data = _table_hash_delta[i_start_bucket]; - _table_hash_delta[i_free_bucket] = free_data; - } else - start_data = free_data; - final long new_start_first = i_free_bucket - - i_start_bucket; - _table_hash_delta[i_start_bucket] = (start_data & _NOT_FIRST_MASK) - | (new_start_first & _FIRST_MASK); - } - - ++_count; - ++_timestamp; - return null; - } - - ++i_free_bucket; - if (i_free_bucket > i_end_cacheline) - i_free_bucket = i_start_cacheline; - } while (i_start_bucket != i_free_bucket); - - // place key in arbitrary free forward bucket - // ................... - int i_max_bucket = i_start_bucket + _MAX_DELTA_BUCKET; - if (i_max_bucket > _bucketk_mask) - i_max_bucket = _bucketk_mask; - i_free_bucket = i_end_cacheline + 1; - - while (i_free_bucket <= i_max_bucket) { - long free_data = _table_hash_delta[i_free_bucket]; - if (_NULL_HASH == (free_data >> _HASH_SHIFT)) { - // we found a free bucket outside of the cahce-line, so - // add the new bucket to the end of the list - - int i_ref_bucket = i_free_bucket << 1; - _table_key_value[i_ref_bucket] = key; - _table_key_value[++i_ref_bucket] = value; - free_data &= _NOT_HASH_MASK; - free_data |= ((long) hash << _HASH_SHIFT); - _table_hash_delta[i_free_bucket] = free_data; - - if (_NULL_DELTA_SHORT == first_delta) { - long new_start_first = (i_free_bucket - i_start_bucket) - & _FIRST_MASK; - long start_data = (_table_hash_delta[i_start_bucket] & _NOT_FIRST_MASK) - | new_start_first; - _table_hash_delta[i_start_bucket] = start_data; - } else { - long new_last_next = ((i_free_bucket - iBucket) << _NEXT_SHIFT) - & _NEXT_MASK; - long last_data = (_table_hash_delta[iBucket] & _NOT_NEXT_MASK) - | new_last_next; - _table_hash_delta[iBucket] = last_data; - } - - ++_count; - ++_timestamp; - return null; - } - - i_free_bucket += 2; - } - - // place key in arbitrary free backward bucket - // ................... - int i_min_bucket = i_start_bucket - _MAX_DELTA_BUCKET; - if (i_min_bucket < 0) - i_min_bucket = 0; - i_free_bucket = i_start_cacheline - 1; - - while (i_free_bucket >= i_min_bucket) { - long free_data = _table_hash_delta[i_free_bucket]; - if (_NULL_HASH == (free_data >> _HASH_SHIFT)) { - // we found a free bucket outside of the cahce-line, so - // add the new bucket to the end of the list - - int i_ref_bucket = i_free_bucket << 1; - _table_key_value[i_ref_bucket] = key; - _table_key_value[++i_ref_bucket] = value; - free_data &= _NOT_HASH_MASK; - free_data |= ((long) hash << _HASH_SHIFT); - _table_hash_delta[i_free_bucket] = free_data; - - if (_NULL_DELTA_SHORT == first_delta) { - long new_start_first = (i_free_bucket - i_start_bucket) - & _FIRST_MASK; - long start_data = (_table_hash_delta[i_start_bucket] & _NOT_FIRST_MASK) - | new_start_first; - _table_hash_delta[i_start_bucket] = start_data; - } else { - long new_last_next = ((i_free_bucket - iBucket) << _NEXT_SHIFT) - & _NEXT_MASK; - long last_data = (_table_hash_delta[iBucket] & _NOT_NEXT_MASK) - | new_last_next; - _table_hash_delta[iBucket] = last_data; - } - - ++_count; - ++_timestamp; - return null; - } - - i_free_bucket -= 2; - } - - } finally { - unlock(); - } - - return null; - } - - private void optimize_cacheline_use(final int i_free_bucket) { - final int i_start_cacheline = i_free_bucket & _NOT_CACHE_MASK; - final int i_end_cacheline = i_start_cacheline + _CACHE_MASK; - - // go over the buckets that reside in the cacheline of the free - // bucket - for (int i_cacheline = i_start_cacheline; i_cacheline <= i_end_cacheline; ++i_cacheline) { - - // check if current bucket has keys - final long data = _table_hash_delta[i_cacheline]; - final short first_delta = (short) data; - if (_NULL_DELTA_INT != first_delta) { - - int last_i_relocate = _NULL_INDX; - int i_relocate = i_cacheline + first_delta; - int curr_delta = first_delta; - - // go over the keys in the bucket-list - do { - // if the key reside outside the cahceline - if (curr_delta < 0 || curr_delta > _CACHE_MASK) { - - // copy key, value, & hash to the free bucket - final int i_key_value = i_free_bucket << 1; - final int i_rel_key_value = i_relocate << 1; - _table_key_value[i_key_value] = _table_key_value[i_rel_key_value]; - _table_key_value[i_key_value + 1] = _table_key_value[i_rel_key_value + 1]; - long relocate_data = _table_hash_delta[i_relocate]; - long free_data = _table_hash_delta[i_free_bucket]; - free_data &= _NOT_HASH_MASK; - free_data |= (relocate_data & _HASH_MASK); - - // update the next-field of the free-bucket - free_data &= _NOT_NEXT_MASK; - final int relocate_next_delta = (int) relocate_data >> _NEXT_SHIFT; - if (_NULL_DELTA_INT == relocate_next_delta) { - free_data |= _NULL_DELTA_NEXT_LONG; - } else { - final long new_next = (((i_relocate + relocate_next_delta) - i_free_bucket) & 0xFFFFL) << 16; - free_data |= new_next; - } - _table_hash_delta[i_free_bucket] = free_data; - - // update the "first" or "next" field of the last - if (_NULL_INDX == last_i_relocate) { - long start_data = _table_hash_delta[i_cacheline] - & _NOT_FIRST_MASK; - start_data |= ((i_free_bucket - i_cacheline) & 0xFFFFL); - _table_hash_delta[i_cacheline] = start_data; - } else { - long last_data = _table_hash_delta[last_i_relocate] - & _NOT_NEXT_MASK; - last_data |= (((i_free_bucket - last_i_relocate) & 0xFFFFL) << 16); - _table_hash_delta[last_i_relocate] = last_data; - } - - // - ++_timestamp; - relocate_data &= _NOT_HASH_MASK;// hash=null - relocate_data &= _NOT_NEXT_MASK; - relocate_data |= _NULL_DELTA_NEXT_LONG;// next = - // null - _table_hash_delta[i_relocate] = relocate_data; - _table_key_value[i_rel_key_value] = null;// key=null - _table_key_value[i_rel_key_value + 1] = null;// value=null - return; - } - - final long relocate_data = _table_hash_delta[i_relocate]; - final int next_delta = (int) relocate_data >> _NEXT_SHIFT; - if (_NULL_DELTA_INT == next_delta) - break; - last_i_relocate = i_relocate; - curr_delta += next_delta; - i_relocate += next_delta; - } while (true);// for on list - }// if list exists - }// for on list - } - - @SuppressWarnings("unchecked") - V remove(final K key, final int hash) { - lock(); - try { - // go over the list and look for key - final int i_start_bucket = hash & _bucketk_mask; - int iBucket = i_start_bucket; - long data = _table_hash_delta[iBucket]; - final short first_delta = (short) data; - if (0 != first_delta) { - if (_NULL_DELTA_SHORT == first_delta) - return null; - iBucket += first_delta; - data = _table_hash_delta[iBucket]; - } - - int i_last_bucket = -1; - do { - final int iRef; - if (hash == (data >> _HASH_SHIFT) - && key.equals(_table_key_value[iRef = (iBucket << 1)])) { - - data &= _NOT_HASH_MASK; - final int next_delta = (int) data >> _NEXT_SHIFT; - _table_hash_delta[iBucket] = data; // hash = null - _table_key_value[iRef] = null; // key = null; - - final int iRef2 = iRef + 1; - final V key_value = (V) _table_key_value[iRef2]; - _table_key_value[iRef2] = null; // value = null; - - if (-1 == i_last_bucket) { - long start_data = _table_hash_delta[i_start_bucket] - & _NOT_FIRST_MASK; - if (_NULL_DELTA_INT == next_delta) { - start_data |= _NULL_DELTA_FIRST_LONG; - } else { - final long new_first = (first_delta + next_delta) & 0xFFFFL; - start_data |= new_first; - } - if (i_start_bucket == iBucket) { - start_data &= _NOT_NEXT_MASK; - start_data |= _NULL_DELTA_NEXT_LONG; - --_count; - ++_timestamp; - _table_hash_delta[i_start_bucket] = start_data; - // return key_value; - } else - _table_hash_delta[i_start_bucket] = start_data; - } else { - long last_data = _table_hash_delta[i_last_bucket]; - final int last_next_delta = (int) last_data >> _NEXT_SHIFT; - last_data &= _NOT_NEXT_MASK; - if (_NULL_DELTA_INT == next_delta) { - last_data |= _NULL_DELTA_NEXT_LONG; - } else { - final long new_next = ((last_next_delta + next_delta) & 0xFFFFL) << 16; - last_data |= new_next; - } - _table_hash_delta[i_last_bucket] = last_data; - } - - if (i_start_bucket != iBucket) { - --_count; - ++_timestamp; - data &= _NOT_NEXT_MASK; - data |= _NULL_DELTA_NEXT_LONG; - _table_hash_delta[iBucket] = data; // next = null - } - - optimize_cacheline_use(iBucket); - - return key_value; - } - final int nextDelta = (int) data >> _NEXT_SHIFT; - if (_NULL_DELTA_INT != nextDelta) { - i_last_bucket = iBucket; - iBucket += nextDelta; - data = _table_hash_delta[iBucket]; - continue; - } else - return null; - } while (true); - - } finally { - unlock(); - } - } - - void clear() { - } - } - - // fields -------------------------------------- - final int _segment_shift; - final int _segment_mask; - final Segment[] _segments; - - // small utilities ----------------------------- - - private static int nearestPowerOfTwo(long value) { - int rc = 1; - while (rc < value) { - rc <<= 1; - } - return rc; - } - - private static final int hash(int h) { - // Spread bits to regularize both segment and index locations, - // using variant of single-word Wang/Jenkins hash. - h += (h << 15) ^ 0xffffcd7d; - h ^= (h >>> 10); - h += (h << 3); - h ^= (h >>> 6); - h += (h << 2) + (h << 14); - return h ^ (h >>> 16); - } - - private final Segment segmentFor(int hash) { - return _segments[(hash >>> _segment_shift) & _segment_mask]; - // return _segments[(hash >>> 8) & _segment_mask]; - // return _segments[hash & _segment_mask]; - } - - // public operations --------------------------- - - @SuppressWarnings({ "unchecked", "rawtypes" }) - public ConcurrentHopscotchHashMap(final long initialCapacity, - final int concurrencyLevel) { - // check for the validity of the algorithems - if (initialCapacity < 0 || concurrencyLevel <= 0 /* - * || - * machineCachelineSize - * <= 0 - */) - throw new IllegalArgumentException(); - - // set the user preference, should we force cache-line alignment - // _is_cacheline_alignment = isCachelineAlignment; - - // calculate cache-line mask - // final int bucketSize = Math.max(8, 2*machinePointerSize); - // _cache_mask = ( (machineCachelineSize / bucketSize) - 1 ); - - // allocate the segments array - final int numSegments = nearestPowerOfTwo(concurrencyLevel); - _segment_mask = (numSegments - 1); - _segments = Segment. newArray(numSegments); - - // Find power-of-two sizes best matching arguments - int sshift = 0; - int ssize = 1; - while (ssize < numSegments) { - ++sshift; - ssize <<= 1; - } - _segment_shift = 32 - sshift; - - // initialize the segmens - final long initCapacity = nearestPowerOfTwo(initialCapacity); - final int segmentCapacity = (int) (initCapacity / numSegments); - for (int iSeg = 0; iSeg < numSegments; ++iSeg) { - _segments[iSeg] = new Segment(segmentCapacity); - } - } - - public boolean isEmpty() { - final Segment[] segments = this._segments; - /* - * We keep track of per-segment "timestamp" to avoid ABA problems in - * which an element in one segment was added and in another removed - * during traversal, in which case the table was never actually empty at - * any point. Note the similar use of "timestamp" in the size() and - * containsValue() methods, which are the only other methods also - * susceptible to ABA problems. - */ - int[] mc = new int[segments.length]; - int mcsum = 0; - for (int i = 0; i < segments.length; ++i) { - if (0 != segments[i]._count) - return false; - else - mcsum += mc[i] = segments[i]._timestamp; - } - // If mcsum happens to be zero, then we know we got a snapshot - // before any modifications at all were made. This is - // probably common enough to bother tracking. - if (mcsum != 0) { - for (int i = 0; i < segments.length; ++i) { - if (0 != segments[i]._count || mc[i] != segments[i]._timestamp) - return false; - } - } - return true; - } - - public int size() { - final Segment[] segments = this._segments; - long sum = 0; - long check = 0; - int[] mc = new int[segments.length]; - - // Try a few times to get accurate count. On failure due to - // continuous async changes in table, resort to locking. - for (int iTry = 0; iTry < _RETRIES_BEFORE_LOCK; ++iTry) { - check = 0; - sum = 0; - int mcsum = 0; - for (int i = 0; i < segments.length; ++i) { - sum += segments[i]._count; - mcsum += mc[i] = segments[i]._timestamp; - } - if (mcsum != 0) { - for (int i = 0; i < segments.length; ++i) { - check += segments[i]._count; - if (mc[i] != segments[i]._timestamp) { - check = -1; // force retry - break; - } - } - } - if (check == sum) - break; - } - - if (check != sum) { // Resort to locking all segments - sum = 0; - for (int i = 0; i < segments.length; ++i) - segments[i].lock(); - for (int i = 0; i < segments.length; ++i) - sum += segments[i]._count; - for (int i = 0; i < segments.length; ++i) - segments[i].unlock(); - } - if (sum > Integer.MAX_VALUE) - return Integer.MAX_VALUE; - else - return (int) sum; - } - - // contains - - public boolean containsKey(final K key) { - final int hash = hash(key.hashCode()); - return segmentFor(hash).containsKey(key, hash); - } - - public V get(final K key) { - final int hash = hash(key.hashCode()); - return segmentFor(hash).get(key, hash); - } - - // add - public V put(K key, V value) { - if (value == null) - throw new NullPointerException(); - final int hash = hash(key.hashCode()); - return segmentFor(hash).put(key, hash, value); - } - - // remove - public V remove(final K key) { - final int hash = hash(key.hashCode()); - return segmentFor(hash).remove(key, hash); - } - - // general - public void clear() { - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.collections; + +//////////////////////////////////////////////////////////////////////////////// +//ConcurrentHopscotchHashMap Class +// +//////////////////////////////////////////////////////////////////////////////// +//TERMS OF USAGE +//---------------------------------------------------------------------- +// +//Permission to use, copy, modify and distribute this software and +//its documentation for any purpose is hereby granted without fee, +//provided that due acknowledgments to the authors are provided and +//this permission notice appears in all copies of the software. +//The software is provided "as is". There is no warranty of any kind. +// +//Authors: +//Maurice Herlihy +//Brown University +//and +//Nir Shavit +//Tel-Aviv University +//and +//Moran Tzafrir +//Tel-Aviv University +// +//Date: Dec 2, 2008. +// +//////////////////////////////////////////////////////////////////////////////// +//Programmer : Moran Tzafrir (MoranTza@gmail.com) +// +//////////////////////////////////////////////////////////////////////////////// +//package xbird.util.concurrent.map; + +import java.util.concurrent.locks.ReentrantLock; + +public class ConcurrentHopscotchHashMap { + + // constants ----------------------------------- + static final short _NULL_DELTA_SHORT = Short.MIN_VALUE; + static final int _NULL_DELTA_INT = (int) (Short.MIN_VALUE); + static final long _NULL_DELTA_FIRST_LONG = (long) (Short.MIN_VALUE & 0xFFFFL); + static final long _NULL_DELTA_NEXT_LONG = (long) ((Short.MIN_VALUE & 0xFFFFL) << 16); + static final long _NULL_HASH_DELTA = 0x0000000080008000L; + static final int _NULL_HASH = 0; + static final int _SEGMENT_SHIFT = 0; // choosed by empirical tests + + static final long _FIRST_MASK = 0x000000000000FFFFL; + static final long _NEXT_MASK = 0x00000000FFFF0000L; + static final long _HASH_MASK = 0xFFFFFFFF00000000L; + static final long _NOT_NEXT_MASK = ~(_NEXT_MASK); + static final long _NOT_FIRST_MASK = ~(_FIRST_MASK); + static final long _NOT_HASH_MASK = ~(_HASH_MASK); + static final int _NEXT_SHIFT = 16; + static final int _HASH_SHIFT = 32; + + static final int _RETRIES_BEFORE_LOCK = 2; + static final int _MAX_DELTA_BUCKET = Short.MAX_VALUE; + + static final int _CACHE_MASK = 4 - 1; + static final int _NOT_CACHE_MASK = ~_CACHE_MASK; + static final int _NULL_INDX = -1; + + // inner classes ------------------------------- + + static final class Segment extends ReentrantLock { + + /** + * + */ + private static final long serialVersionUID = 4476725515973126431L; + volatile int _timestamp; + int _bucketk_mask; + long[] _table_hash_delta; + Object[] _table_key_value; + // AtomicInteger _lock; + int _count; + + public Segment(final int initialCapacity) { + init(initialCapacity); + } + + private void init(final int initialCapacity) { + // _lock = new AtomicInteger(); + // _lock.set(0); + _timestamp = 0; + _bucketk_mask = initialCapacity - 1; + _count = 0; + + _table_hash_delta = new long[initialCapacity]; + _table_key_value = new Object[initialCapacity << 1]; + + // create the blocks of buckets + for (int iData = 0; iData < initialCapacity; ++iData) { + _table_hash_delta[iData] = _NULL_HASH_DELTA; + } + } + + /* + * void lock() { while(!_lock.compareAndSet(0, 0xFFFF)) { } } + * + * void unlock() { _lock.set(0); } + */ + + @SuppressWarnings("unchecked") + static final Segment[] newArray(final int numSegments) { + return new Segment[numSegments]; + } + + boolean containsKey(final K key, final int hash) { + // go over the list and look for key + int start_timestamp = _timestamp; + int iBucket = hash & _bucketk_mask; + long data = _table_hash_delta[iBucket]; + final int first_delta = (short) data; + if (0 != first_delta) { + if (_NULL_DELTA_SHORT == first_delta) + return false; + iBucket += first_delta; + data = _table_hash_delta[iBucket]; + } + + do { + if (hash == (data >> _HASH_SHIFT) + && key.equals(_table_key_value[iBucket << 1])) + return true; + final int nextDelta = (int) data >> _NEXT_SHIFT; + if (_NULL_DELTA_INT != nextDelta) { + iBucket += nextDelta; + data = _table_hash_delta[iBucket]; + continue; + } else { + final int curr_timestamp = _timestamp; + if (curr_timestamp == start_timestamp) + return false; + start_timestamp = curr_timestamp; + iBucket = hash & _bucketk_mask; + data = _table_hash_delta[iBucket]; + final int first_delta2 = (short) data; + if (0 != first_delta2) { + if (_NULL_DELTA_SHORT == first_delta2) + return false; + iBucket += first_delta2; + data = _table_hash_delta[iBucket]; + } + continue; + } + } while (true); + } + + @SuppressWarnings("unchecked") + V get(final K key, final int hash) { + // go over the list and look for key + int start_timestamp = 0; + int iBucket = 0; + long data = 0; + + boolean is_need_init = true; + do { + if (is_need_init) { + is_need_init = false; + start_timestamp = _timestamp; + iBucket = hash & _bucketk_mask; + data = _table_hash_delta[iBucket]; + final int first_delta = (short) data; + if (0 != first_delta) { + if (_NULL_DELTA_SHORT == first_delta) + return null; + iBucket += first_delta; + data = _table_hash_delta[iBucket]; + } + } + + final int iRef; + if (hash == (data >> _HASH_SHIFT) + && key.equals(_table_key_value[iRef = iBucket << 1])) { + final V value = (V) _table_key_value[iRef + 1]; + if (_timestamp == start_timestamp) + return value; + is_need_init = true; + continue; + } + final int nextDelta = (int) data >> _NEXT_SHIFT; + if (_NULL_DELTA_INT != nextDelta) { + iBucket += nextDelta; + data = _table_hash_delta[iBucket]; + continue; + } else { + if (_timestamp == start_timestamp) + return null; + is_need_init = true; + continue; + } + } while (true); + } + + @SuppressWarnings("unchecked") + V put(final K key, final int hash, final V value) { + lock(); + try { + // look for key in hash-map + // ..................................... + final int i_start_bucket = hash & _bucketk_mask; + int iBucket = i_start_bucket; + long data = _table_hash_delta[i_start_bucket]; + final short first_delta = (short) data; + if (_NULL_DELTA_SHORT != first_delta) { + if (0 != first_delta) { + iBucket += first_delta; + data = _table_hash_delta[iBucket]; + } + + do { + final int iRef; + if (hash == (data >> _HASH_SHIFT) + && key.equals(_table_key_value[iRef = (iBucket << 1)])) + return (V) _table_key_value[iRef + 1]; + final int next_delta = (int) data >> _NEXT_SHIFT; + if (_NULL_DELTA_INT == next_delta) + break; + else { + iBucket += next_delta; + data = _table_hash_delta[iBucket]; + } + } while (true); + } + + // try to place the key in the same cache-line + // .................. + final int i_start_cacheline = i_start_bucket & _NOT_CACHE_MASK; + final int i_end_cacheline = i_start_cacheline + _CACHE_MASK; + int i_free_bucket = i_start_bucket; + do { + long free_data = _table_hash_delta[i_free_bucket]; + if (_NULL_HASH == (free_data >> _HASH_SHIFT)) { + // we found a free bucket at the cahce-line, so + // add the new bucket to the begining of the list + + int i_ref_bucket = i_free_bucket << 1; + _table_key_value[i_ref_bucket] = key; + _table_key_value[++i_ref_bucket] = value; + free_data &= _NOT_HASH_MASK; + free_data |= ((long) hash << _HASH_SHIFT); + + if (0 == first_delta) { + final long start_data = _table_hash_delta[i_start_bucket]; + final int start_next = (int) start_data >> _NEXT_SHIFT; + if (_NULL_DELTA_INT != start_next) { + final long new_free_next = i_start_bucket + + start_next - i_free_bucket; + _table_hash_delta[i_free_bucket] = (free_data & _NOT_NEXT_MASK) + | ((new_free_next << _NEXT_SHIFT) & _NEXT_MASK); + } else + _table_hash_delta[i_free_bucket] = free_data; + final long new_start_next = i_free_bucket + - i_start_bucket; + _table_hash_delta[i_start_bucket] = (start_data & _NOT_NEXT_MASK) + | ((new_start_next << _NEXT_SHIFT) & _NEXT_MASK); + } else {// 0 != first_delta + if (_NULL_DELTA_SHORT != first_delta) { + final long new_free_next = i_start_bucket + + first_delta - i_free_bucket; + free_data &= _NOT_NEXT_MASK; + free_data |= ((new_free_next << _NEXT_SHIFT) & _NEXT_MASK); + } + final long start_data; + if (i_free_bucket != i_start_bucket) { + start_data = _table_hash_delta[i_start_bucket]; + _table_hash_delta[i_free_bucket] = free_data; + } else + start_data = free_data; + final long new_start_first = i_free_bucket + - i_start_bucket; + _table_hash_delta[i_start_bucket] = (start_data & _NOT_FIRST_MASK) + | (new_start_first & _FIRST_MASK); + } + + ++_count; + ++_timestamp; + return null; + } + + ++i_free_bucket; + if (i_free_bucket > i_end_cacheline) + i_free_bucket = i_start_cacheline; + } while (i_start_bucket != i_free_bucket); + + // place key in arbitrary free forward bucket + // ................... + int i_max_bucket = i_start_bucket + _MAX_DELTA_BUCKET; + if (i_max_bucket > _bucketk_mask) + i_max_bucket = _bucketk_mask; + i_free_bucket = i_end_cacheline + 1; + + while (i_free_bucket <= i_max_bucket) { + long free_data = _table_hash_delta[i_free_bucket]; + if (_NULL_HASH == (free_data >> _HASH_SHIFT)) { + // we found a free bucket outside of the cahce-line, so + // add the new bucket to the end of the list + + int i_ref_bucket = i_free_bucket << 1; + _table_key_value[i_ref_bucket] = key; + _table_key_value[++i_ref_bucket] = value; + free_data &= _NOT_HASH_MASK; + free_data |= ((long) hash << _HASH_SHIFT); + _table_hash_delta[i_free_bucket] = free_data; + + if (_NULL_DELTA_SHORT == first_delta) { + long new_start_first = (i_free_bucket - i_start_bucket) + & _FIRST_MASK; + long start_data = (_table_hash_delta[i_start_bucket] & _NOT_FIRST_MASK) + | new_start_first; + _table_hash_delta[i_start_bucket] = start_data; + } else { + long new_last_next = ((i_free_bucket - iBucket) << _NEXT_SHIFT) + & _NEXT_MASK; + long last_data = (_table_hash_delta[iBucket] & _NOT_NEXT_MASK) + | new_last_next; + _table_hash_delta[iBucket] = last_data; + } + + ++_count; + ++_timestamp; + return null; + } + + i_free_bucket += 2; + } + + // place key in arbitrary free backward bucket + // ................... + int i_min_bucket = i_start_bucket - _MAX_DELTA_BUCKET; + if (i_min_bucket < 0) + i_min_bucket = 0; + i_free_bucket = i_start_cacheline - 1; + + while (i_free_bucket >= i_min_bucket) { + long free_data = _table_hash_delta[i_free_bucket]; + if (_NULL_HASH == (free_data >> _HASH_SHIFT)) { + // we found a free bucket outside of the cahce-line, so + // add the new bucket to the end of the list + + int i_ref_bucket = i_free_bucket << 1; + _table_key_value[i_ref_bucket] = key; + _table_key_value[++i_ref_bucket] = value; + free_data &= _NOT_HASH_MASK; + free_data |= ((long) hash << _HASH_SHIFT); + _table_hash_delta[i_free_bucket] = free_data; + + if (_NULL_DELTA_SHORT == first_delta) { + long new_start_first = (i_free_bucket - i_start_bucket) + & _FIRST_MASK; + long start_data = (_table_hash_delta[i_start_bucket] & _NOT_FIRST_MASK) + | new_start_first; + _table_hash_delta[i_start_bucket] = start_data; + } else { + long new_last_next = ((i_free_bucket - iBucket) << _NEXT_SHIFT) + & _NEXT_MASK; + long last_data = (_table_hash_delta[iBucket] & _NOT_NEXT_MASK) + | new_last_next; + _table_hash_delta[iBucket] = last_data; + } + + ++_count; + ++_timestamp; + return null; + } + + i_free_bucket -= 2; + } + + } finally { + unlock(); + } + + return null; + } + + private void optimize_cacheline_use(final int i_free_bucket) { + final int i_start_cacheline = i_free_bucket & _NOT_CACHE_MASK; + final int i_end_cacheline = i_start_cacheline + _CACHE_MASK; + + // go over the buckets that reside in the cacheline of the free + // bucket + for (int i_cacheline = i_start_cacheline; i_cacheline <= i_end_cacheline; ++i_cacheline) { + + // check if current bucket has keys + final long data = _table_hash_delta[i_cacheline]; + final short first_delta = (short) data; + if (_NULL_DELTA_INT != first_delta) { + + int last_i_relocate = _NULL_INDX; + int i_relocate = i_cacheline + first_delta; + int curr_delta = first_delta; + + // go over the keys in the bucket-list + do { + // if the key reside outside the cahceline + if (curr_delta < 0 || curr_delta > _CACHE_MASK) { + + // copy key, value, & hash to the free bucket + final int i_key_value = i_free_bucket << 1; + final int i_rel_key_value = i_relocate << 1; + _table_key_value[i_key_value] = _table_key_value[i_rel_key_value]; + _table_key_value[i_key_value + 1] = _table_key_value[i_rel_key_value + 1]; + long relocate_data = _table_hash_delta[i_relocate]; + long free_data = _table_hash_delta[i_free_bucket]; + free_data &= _NOT_HASH_MASK; + free_data |= (relocate_data & _HASH_MASK); + + // update the next-field of the free-bucket + free_data &= _NOT_NEXT_MASK; + final int relocate_next_delta = (int) relocate_data >> _NEXT_SHIFT; + if (_NULL_DELTA_INT == relocate_next_delta) { + free_data |= _NULL_DELTA_NEXT_LONG; + } else { + final long new_next = (((i_relocate + relocate_next_delta) - i_free_bucket) & 0xFFFFL) << 16; + free_data |= new_next; + } + _table_hash_delta[i_free_bucket] = free_data; + + // update the "first" or "next" field of the last + if (_NULL_INDX == last_i_relocate) { + long start_data = _table_hash_delta[i_cacheline] + & _NOT_FIRST_MASK; + start_data |= ((i_free_bucket - i_cacheline) & 0xFFFFL); + _table_hash_delta[i_cacheline] = start_data; + } else { + long last_data = _table_hash_delta[last_i_relocate] + & _NOT_NEXT_MASK; + last_data |= (((i_free_bucket - last_i_relocate) & 0xFFFFL) << 16); + _table_hash_delta[last_i_relocate] = last_data; + } + + // + ++_timestamp; + relocate_data &= _NOT_HASH_MASK;// hash=null + relocate_data &= _NOT_NEXT_MASK; + relocate_data |= _NULL_DELTA_NEXT_LONG;// next = + // null + _table_hash_delta[i_relocate] = relocate_data; + _table_key_value[i_rel_key_value] = null;// key=null + _table_key_value[i_rel_key_value + 1] = null;// value=null + return; + } + + final long relocate_data = _table_hash_delta[i_relocate]; + final int next_delta = (int) relocate_data >> _NEXT_SHIFT; + if (_NULL_DELTA_INT == next_delta) + break; + last_i_relocate = i_relocate; + curr_delta += next_delta; + i_relocate += next_delta; + } while (true);// for on list + }// if list exists + }// for on list + } + + @SuppressWarnings("unchecked") + V remove(final K key, final int hash) { + lock(); + try { + // go over the list and look for key + final int i_start_bucket = hash & _bucketk_mask; + int iBucket = i_start_bucket; + long data = _table_hash_delta[iBucket]; + final short first_delta = (short) data; + if (0 != first_delta) { + if (_NULL_DELTA_SHORT == first_delta) + return null; + iBucket += first_delta; + data = _table_hash_delta[iBucket]; + } + + int i_last_bucket = -1; + do { + final int iRef; + if (hash == (data >> _HASH_SHIFT) + && key.equals(_table_key_value[iRef = (iBucket << 1)])) { + + data &= _NOT_HASH_MASK; + final int next_delta = (int) data >> _NEXT_SHIFT; + _table_hash_delta[iBucket] = data; // hash = null + _table_key_value[iRef] = null; // key = null; + + final int iRef2 = iRef + 1; + final V key_value = (V) _table_key_value[iRef2]; + _table_key_value[iRef2] = null; // value = null; + + if (-1 == i_last_bucket) { + long start_data = _table_hash_delta[i_start_bucket] + & _NOT_FIRST_MASK; + if (_NULL_DELTA_INT == next_delta) { + start_data |= _NULL_DELTA_FIRST_LONG; + } else { + final long new_first = (first_delta + next_delta) & 0xFFFFL; + start_data |= new_first; + } + if (i_start_bucket == iBucket) { + start_data &= _NOT_NEXT_MASK; + start_data |= _NULL_DELTA_NEXT_LONG; + --_count; + ++_timestamp; + _table_hash_delta[i_start_bucket] = start_data; + // return key_value; + } else + _table_hash_delta[i_start_bucket] = start_data; + } else { + long last_data = _table_hash_delta[i_last_bucket]; + final int last_next_delta = (int) last_data >> _NEXT_SHIFT; + last_data &= _NOT_NEXT_MASK; + if (_NULL_DELTA_INT == next_delta) { + last_data |= _NULL_DELTA_NEXT_LONG; + } else { + final long new_next = ((last_next_delta + next_delta) & 0xFFFFL) << 16; + last_data |= new_next; + } + _table_hash_delta[i_last_bucket] = last_data; + } + + if (i_start_bucket != iBucket) { + --_count; + ++_timestamp; + data &= _NOT_NEXT_MASK; + data |= _NULL_DELTA_NEXT_LONG; + _table_hash_delta[iBucket] = data; // next = null + } + + optimize_cacheline_use(iBucket); + + return key_value; + } + final int nextDelta = (int) data >> _NEXT_SHIFT; + if (_NULL_DELTA_INT != nextDelta) { + i_last_bucket = iBucket; + iBucket += nextDelta; + data = _table_hash_delta[iBucket]; + continue; + } else + return null; + } while (true); + + } finally { + unlock(); + } + } + + void clear() { + } + } + + // fields -------------------------------------- + final int _segment_shift; + final int _segment_mask; + final Segment[] _segments; + + // small utilities ----------------------------- + + private static int nearestPowerOfTwo(long value) { + int rc = 1; + while (rc < value) { + rc <<= 1; + } + return rc; + } + + private static final int hash(int h) { + // Spread bits to regularize both segment and index locations, + // using variant of single-word Wang/Jenkins hash. + h += (h << 15) ^ 0xffffcd7d; + h ^= (h >>> 10); + h += (h << 3); + h ^= (h >>> 6); + h += (h << 2) + (h << 14); + return h ^ (h >>> 16); + } + + private final Segment segmentFor(int hash) { + return _segments[(hash >>> _segment_shift) & _segment_mask]; + // return _segments[(hash >>> 8) & _segment_mask]; + // return _segments[hash & _segment_mask]; + } + + // public operations --------------------------- + + @SuppressWarnings({ "unchecked", "rawtypes" }) + public ConcurrentHopscotchHashMap(final long initialCapacity, + final int concurrencyLevel) { + // check for the validity of the algorithems + if (initialCapacity < 0 || concurrencyLevel <= 0 /* + * || + * machineCachelineSize + * <= 0 + */) + throw new IllegalArgumentException(); + + // set the user preference, should we force cache-line alignment + // _is_cacheline_alignment = isCachelineAlignment; + + // calculate cache-line mask + // final int bucketSize = Math.max(8, 2*machinePointerSize); + // _cache_mask = ( (machineCachelineSize / bucketSize) - 1 ); + + // allocate the segments array + final int numSegments = nearestPowerOfTwo(concurrencyLevel); + _segment_mask = (numSegments - 1); + _segments = Segment. newArray(numSegments); + + // Find power-of-two sizes best matching arguments + int sshift = 0; + int ssize = 1; + while (ssize < numSegments) { + ++sshift; + ssize <<= 1; + } + _segment_shift = 32 - sshift; + + // initialize the segmens + final long initCapacity = nearestPowerOfTwo(initialCapacity); + final int segmentCapacity = (int) (initCapacity / numSegments); + for (int iSeg = 0; iSeg < numSegments; ++iSeg) { + _segments[iSeg] = new Segment(segmentCapacity); + } + } + + public boolean isEmpty() { + final Segment[] segments = this._segments; + /* + * We keep track of per-segment "timestamp" to avoid ABA problems in + * which an element in one segment was added and in another removed + * during traversal, in which case the table was never actually empty at + * any point. Note the similar use of "timestamp" in the size() and + * containsValue() methods, which are the only other methods also + * susceptible to ABA problems. + */ + int[] mc = new int[segments.length]; + int mcsum = 0; + for (int i = 0; i < segments.length; ++i) { + if (0 != segments[i]._count) + return false; + else + mcsum += mc[i] = segments[i]._timestamp; + } + // If mcsum happens to be zero, then we know we got a snapshot + // before any modifications at all were made. This is + // probably common enough to bother tracking. + if (mcsum != 0) { + for (int i = 0; i < segments.length; ++i) { + if (0 != segments[i]._count || mc[i] != segments[i]._timestamp) + return false; + } + } + return true; + } + + public int size() { + final Segment[] segments = this._segments; + long sum = 0; + long check = 0; + int[] mc = new int[segments.length]; + + // Try a few times to get accurate count. On failure due to + // continuous async changes in table, resort to locking. + for (int iTry = 0; iTry < _RETRIES_BEFORE_LOCK; ++iTry) { + check = 0; + sum = 0; + int mcsum = 0; + for (int i = 0; i < segments.length; ++i) { + sum += segments[i]._count; + mcsum += mc[i] = segments[i]._timestamp; + } + if (mcsum != 0) { + for (int i = 0; i < segments.length; ++i) { + check += segments[i]._count; + if (mc[i] != segments[i]._timestamp) { + check = -1; // force retry + break; + } + } + } + if (check == sum) + break; + } + + if (check != sum) { // Resort to locking all segments + sum = 0; + for (int i = 0; i < segments.length; ++i) + segments[i].lock(); + for (int i = 0; i < segments.length; ++i) + sum += segments[i]._count; + for (int i = 0; i < segments.length; ++i) + segments[i].unlock(); + } + if (sum > Integer.MAX_VALUE) + return Integer.MAX_VALUE; + else + return (int) sum; + } + + // contains + + public boolean containsKey(final K key) { + final int hash = hash(key.hashCode()); + return segmentFor(hash).containsKey(key, hash); + } + + public V get(final K key) { + final int hash = hash(key.hashCode()); + return segmentFor(hash).get(key, hash); + } + + // add + public V put(K key, V value) { + if (value == null) + throw new NullPointerException(); + final int hash = hash(key.hashCode()); + return segmentFor(hash).put(key, hash, value); + } + + // remove + public V remove(final K key) { + final int hash = hash(key.hashCode()); + return segmentFor(hash).remove(key, hash); + } + + // general + public void clear() { + } + +} diff --git a/src/org/opendedup/collections/DataArchivedException.java b/src/org/opendedup/collections/DataArchivedException.java index 5d9789cbd..a6de69dea 100644 --- a/src/org/opendedup/collections/DataArchivedException.java +++ b/src/org/opendedup/collections/DataArchivedException.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.collections; public class DataArchivedException extends Exception { diff --git a/src/org/opendedup/collections/DataMapInterface.java b/src/org/opendedup/collections/DataMapInterface.java index fd0827872..61e60f8f4 100644 --- a/src/org/opendedup/collections/DataMapInterface.java +++ b/src/org/opendedup/collections/DataMapInterface.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.collections; import java.io.IOException; diff --git a/src/org/opendedup/collections/HashExistsException.java b/src/org/opendedup/collections/HashExistsException.java index 800594b77..c95a97209 100644 --- a/src/org/opendedup/collections/HashExistsException.java +++ b/src/org/opendedup/collections/HashExistsException.java @@ -1,24 +1,42 @@ -package org.opendedup.collections; - -import java.io.IOException; - -public class HashExistsException extends IOException { - - private static final long serialVersionUID = 2207515169199626140L; - private long pos; - private byte[] hash; - - public HashExistsException(long pos, byte[] hash) { - this.pos = pos; - this.hash = hash; - } - - public long getPos() { - return this.pos; - } - - public byte[] getHash() { - return this.hash; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.collections; + +import java.io.IOException; + +public class HashExistsException extends IOException { + + private static final long serialVersionUID = 2207515169199626140L; + private long pos; + private byte[] hash; + + public HashExistsException(long pos, byte[] hash) { + this.pos = pos; + this.hash = hash; + } + + public long getPos() { + return this.pos; + } + + public byte[] getHash() { + return this.hash; + } + +} diff --git a/src/org/opendedup/collections/HashtableFullException.java b/src/org/opendedup/collections/HashtableFullException.java index 127e0db3b..678155d17 100644 --- a/src/org/opendedup/collections/HashtableFullException.java +++ b/src/org/opendedup/collections/HashtableFullException.java @@ -1,14 +1,32 @@ -package org.opendedup.collections; - -public class HashtableFullException extends Exception { - - /** - * - */ - private static final long serialVersionUID = 1L; - - public HashtableFullException(String msg) { - super(msg); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.collections; + +public class HashtableFullException extends Exception { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public HashtableFullException(String msg) { + super(msg); + } + +} diff --git a/src/org/opendedup/collections/InsertRecord.java b/src/org/opendedup/collections/InsertRecord.java index b43c882c9..dc6e911ac 100644 --- a/src/org/opendedup/collections/InsertRecord.java +++ b/src/org/opendedup/collections/InsertRecord.java @@ -1,31 +1,49 @@ -package org.opendedup.collections; - -import com.google.common.primitives.Longs; - -public class InsertRecord { - private boolean inserted; - private byte[] hashlocs; - - public InsertRecord(boolean inserted, long pos) { - this.inserted = inserted; - this.hashlocs = Longs.toByteArray(pos); - } - - public InsertRecord(boolean inserted, byte[] locs) { - this.inserted = inserted; - this.hashlocs = locs; - } - - public boolean getInserted() { - return this.inserted; - } - - public void setHashLocs(byte[] hashlocs) { - this.hashlocs = hashlocs; - } - - public byte[] getHashLocs() { - return this.hashlocs; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.collections; + +import com.google.common.primitives.Longs; + +public class InsertRecord { + private boolean inserted; + private byte[] hashlocs; + + public InsertRecord(boolean inserted, long pos) { + this.inserted = inserted; + this.hashlocs = Longs.toByteArray(pos); + } + + public InsertRecord(boolean inserted, byte[] locs) { + this.inserted = inserted; + this.hashlocs = locs; + } + + public boolean getInserted() { + return this.inserted; + } + + public void setHashLocs(byte[] hashlocs) { + this.hashlocs = hashlocs; + } + + public byte[] getHashLocs() { + return this.hashlocs; + } + +} diff --git a/src/org/opendedup/collections/KVPair.java b/src/org/opendedup/collections/KVPair.java index 9bfe68154..6148f0e63 100644 --- a/src/org/opendedup/collections/KVPair.java +++ b/src/org/opendedup/collections/KVPair.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.collections; public class KVPair { diff --git a/src/org/opendedup/collections/KeyNotFoundException.java b/src/org/opendedup/collections/KeyNotFoundException.java index c3d4539fd..998e46822 100644 --- a/src/org/opendedup/collections/KeyNotFoundException.java +++ b/src/org/opendedup/collections/KeyNotFoundException.java @@ -1,20 +1,38 @@ -package org.opendedup.collections; - -import org.opendedup.util.StringUtils; - -public class KeyNotFoundException extends Exception { - - /** - * - */ - private static final long serialVersionUID = 3838655007053133611L; - - public KeyNotFoundException() { - super(); - } - - public KeyNotFoundException(byte[] key) { - super("Key [" + StringUtils.getHexString(key) + "] not found"); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.collections; + +import org.opendedup.util.StringUtils; + +public class KeyNotFoundException extends Exception { + + /** + * + */ + private static final long serialVersionUID = 3838655007053133611L; + + public KeyNotFoundException() { + super(); + } + + public KeyNotFoundException(byte[] key) { + super("Key [" + StringUtils.getHexString(key) + "] not found"); + } + +} diff --git a/src/org/opendedup/collections/LBFReconstructThread.java b/src/org/opendedup/collections/LBFReconstructThread.java index 73393c640..4c04ff760 100644 --- a/src/org/opendedup/collections/LBFReconstructThread.java +++ b/src/org/opendedup/collections/LBFReconstructThread.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.collections; import org.opendedup.hashing.LargeBloomFilter; diff --git a/src/org/opendedup/collections/LargeLongByteArrayMap.java b/src/org/opendedup/collections/LargeLongByteArrayMap.java index 040c71cdb..2245e2771 100644 --- a/src/org/opendedup/collections/LargeLongByteArrayMap.java +++ b/src/org/opendedup/collections/LargeLongByteArrayMap.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.collections; import java.io.File; diff --git a/src/org/opendedup/collections/LongByteArrayMap.java b/src/org/opendedup/collections/LongByteArrayMap.java index efd00a76b..7eb897dfa 100644 --- a/src/org/opendedup/collections/LongByteArrayMap.java +++ b/src/org/opendedup/collections/LongByteArrayMap.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.collections; import java.io.File; diff --git a/src/org/opendedup/collections/LongKeyValue.java b/src/org/opendedup/collections/LongKeyValue.java index 8ed397e58..624400bdf 100644 --- a/src/org/opendedup/collections/LongKeyValue.java +++ b/src/org/opendedup/collections/LongKeyValue.java @@ -1,44 +1,62 @@ -package org.opendedup.collections; - -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; - -public class LongKeyValue implements Externalizable { - private byte[] value; - private long key; - - public LongKeyValue(long key, byte[] value) { - this.key = key; - this.value = value; - } - - public LongKeyValue() { - - } - - @Override - public void readExternal(ObjectInput out) throws IOException, - ClassNotFoundException { - this.key = out.readLong(); - this.value = new byte[out.readInt()]; - out.readFully(value); - } - - @Override - public void writeExternal(ObjectOutput in) throws IOException { - in.writeLong(this.key); - in.writeInt(value.length); - in.write(value); - } - - public byte[] getValue() { - return value; - } - - public long getKey() { - return key; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.collections; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; + +public class LongKeyValue implements Externalizable { + private byte[] value; + private long key; + + public LongKeyValue(long key, byte[] value) { + this.key = key; + this.value = value; + } + + public LongKeyValue() { + + } + + @Override + public void readExternal(ObjectInput out) throws IOException, + ClassNotFoundException { + this.key = out.readLong(); + this.value = new byte[out.readInt()]; + out.readFully(value); + } + + @Override + public void writeExternal(ObjectOutput in) throws IOException { + in.writeLong(this.key); + in.writeInt(value.length); + in.write(value); + } + + public byte[] getValue() { + return value; + } + + public long getKey() { + return key; + } + +} diff --git a/src/org/opendedup/collections/MapClosedException.java b/src/org/opendedup/collections/MapClosedException.java index 208154ac2..481840fd8 100644 --- a/src/org/opendedup/collections/MapClosedException.java +++ b/src/org/opendedup/collections/MapClosedException.java @@ -1,10 +1,28 @@ -package org.opendedup.collections; - -public class MapClosedException extends Exception { - - /** - * - */ - private static final long serialVersionUID = 1L; - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.collections; + +public class MapClosedException extends Exception { + + /** + * + */ + private static final long serialVersionUID = 1L; + +} diff --git a/src/org/opendedup/collections/ProgressiveFileBasedCSMap.java b/src/org/opendedup/collections/ProgressiveFileBasedCSMap.java index 5b3a9a636..26512d888 100644 --- a/src/org/opendedup/collections/ProgressiveFileBasedCSMap.java +++ b/src/org/opendedup/collections/ProgressiveFileBasedCSMap.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.collections; import java.io.File; diff --git a/src/org/opendedup/collections/ProgressiveFileByteArrayLongMap.java b/src/org/opendedup/collections/ProgressiveFileByteArrayLongMap.java index 2e0c94d16..d1cfd6e6e 100644 --- a/src/org/opendedup/collections/ProgressiveFileByteArrayLongMap.java +++ b/src/org/opendedup/collections/ProgressiveFileByteArrayLongMap.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.collections; import java.io.File; diff --git a/src/org/opendedup/collections/QuickList.java b/src/org/opendedup/collections/QuickList.java index 09d653737..825ff80dd 100644 --- a/src/org/opendedup/collections/QuickList.java +++ b/src/org/opendedup/collections/QuickList.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.collections; import java.io.Externalizable; diff --git a/src/org/opendedup/collections/SimpleByteArrayLongMap.java b/src/org/opendedup/collections/SimpleByteArrayLongMap.java index 9cca20d8d..a81d31382 100644 --- a/src/org/opendedup/collections/SimpleByteArrayLongMap.java +++ b/src/org/opendedup/collections/SimpleByteArrayLongMap.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.collections; import java.io.File; diff --git a/src/org/opendedup/collections/SortedReadMapList.java b/src/org/opendedup/collections/SortedReadMapList.java index 73cac8541..7dd6acdbb 100644 --- a/src/org/opendedup/collections/SortedReadMapList.java +++ b/src/org/opendedup/collections/SortedReadMapList.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.collections; import java.util.Collections; diff --git a/src/org/opendedup/collections/threads/SyncThread.java b/src/org/opendedup/collections/threads/SyncThread.java index 247cb96c1..990763eb4 100644 --- a/src/org/opendedup/collections/threads/SyncThread.java +++ b/src/org/opendedup/collections/threads/SyncThread.java @@ -1,37 +1,55 @@ -package org.opendedup.collections.threads; - -import org.opendedup.collections.AbstractMap; - -public class SyncThread implements Runnable { - AbstractMap map; - Thread th = null; - - public SyncThread(AbstractMap m) { - map = m; - th = new Thread(this); - th.start(); - } - - @Override - public void run() { - while (!map.isClosed()) { - try { - Thread.sleep(2000); - } catch (InterruptedException e) { - break; - } - try { - map.sync(); - } catch (Exception e) { - } - } - } - - public void close() { - try { - th.interrupt(); - } catch (Exception e) { - } - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.collections.threads; + +import org.opendedup.collections.AbstractMap; + +public class SyncThread implements Runnable { + AbstractMap map; + Thread th = null; + + public SyncThread(AbstractMap m) { + map = m; + th = new Thread(this); + th.start(); + } + + @Override + public void run() { + while (!map.isClosed()) { + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + break; + } + try { + map.sync(); + } catch (Exception e) { + } + } + } + + public void close() { + try { + th.interrupt(); + } catch (Exception e) { + } + } + +} diff --git a/src/org/opendedup/fsync/GCJob.java b/src/org/opendedup/fsync/GCJob.java index 40849c882..993d9ad76 100644 --- a/src/org/opendedup/fsync/GCJob.java +++ b/src/org/opendedup/fsync/GCJob.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.fsync; import org.opendedup.logging.SDFSLogger; diff --git a/src/org/opendedup/fsync/SyncFSScheduler.java b/src/org/opendedup/fsync/SyncFSScheduler.java index d37661a5b..1e993cb46 100644 --- a/src/org/opendedup/fsync/SyncFSScheduler.java +++ b/src/org/opendedup/fsync/SyncFSScheduler.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.fsync; import java.util.Properties; diff --git a/src/org/opendedup/hashing/AbstractHashEngine.java b/src/org/opendedup/hashing/AbstractHashEngine.java index 6acd6a689..f04ca8ce7 100644 --- a/src/org/opendedup/hashing/AbstractHashEngine.java +++ b/src/org/opendedup/hashing/AbstractHashEngine.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.hashing; public interface AbstractHashEngine { diff --git a/src/org/opendedup/hashing/AbstractPoolThread.java b/src/org/opendedup/hashing/AbstractPoolThread.java index c94221fe4..8a6eab9bf 100644 --- a/src/org/opendedup/hashing/AbstractPoolThread.java +++ b/src/org/opendedup/hashing/AbstractPoolThread.java @@ -1,10 +1,28 @@ -package org.opendedup.hashing; - -public interface AbstractPoolThread { - public void start(); - - public void exit(); - - public boolean isStopped(); - +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.hashing; + +public interface AbstractPoolThread { + public void start(); + + public void exit(); + + public boolean isStopped(); + } \ No newline at end of file diff --git a/src/org/opendedup/hashing/FLBF.java b/src/org/opendedup/hashing/FLBF.java index f4580c83a..6f2d07490 100644 --- a/src/org/opendedup/hashing/FLBF.java +++ b/src/org/opendedup/hashing/FLBF.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.hashing; import java.io.ByteArrayOutputStream; diff --git a/src/org/opendedup/hashing/Finger.java b/src/org/opendedup/hashing/Finger.java index bd683661c..bd4f38539 100644 --- a/src/org/opendedup/hashing/Finger.java +++ b/src/org/opendedup/hashing/Finger.java @@ -1,70 +1,88 @@ -package org.opendedup.hashing; - -import java.io.IOException; -import java.util.List; - -import org.opendedup.collections.HashtableFullException; -import org.opendedup.collections.InsertRecord; -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.io.AsyncChunkWriteActionListener; -import org.opendedup.sdfs.servers.HCServiceProxy; - -public class Finger implements Runnable { - public byte[] chunk; - public byte[] hash; - public InsertRecord hl; - public int start; - public int len; - public int ap; - public AsyncChunkWriteActionListener l; - - public void run() { - try { - if (Main.chunkStoreLocal) - this.hl = HCServiceProxy.writeChunk(this.hash, this.chunk); - else - this.hl = HCServiceProxy.writeChunk(this.hash, this.chunk, - this.hl.getHashLocs()); - l.commandResponse(this); - - } catch (Throwable e) { - l.commandException(this, e); - } - } - - public static class FingerPersister implements Runnable { - public AsyncChunkWriteActionListener l; - public List fingers; - public boolean dedup; - - @Override - public void run() { - for (Finger f : fingers) { - try { - if (Main.chunkStoreLocal) - f.hl = HCServiceProxy.writeChunk(f.hash, f.chunk); - else - f.hl = HCServiceProxy.writeChunk(f.hash, f.chunk, - f.hl.getHashLocs()); - l.commandResponse(f); - - } catch (Throwable e) { - l.commandException(f, e); - } - } - - } - - public void persist() throws IOException, HashtableFullException { - for (Finger f : fingers) { - f.hl = HCServiceProxy.writeChunk(f.hash, f.chunk); - - } - } - - } - - public void persist() { - - } -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.hashing; + +import java.io.IOException; +import java.util.List; + +import org.opendedup.collections.HashtableFullException; +import org.opendedup.collections.InsertRecord; +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.io.AsyncChunkWriteActionListener; +import org.opendedup.sdfs.servers.HCServiceProxy; + +public class Finger implements Runnable { + public byte[] chunk; + public byte[] hash; + public InsertRecord hl; + public int start; + public int len; + public int ap; + public AsyncChunkWriteActionListener l; + + public void run() { + try { + if (Main.chunkStoreLocal) + this.hl = HCServiceProxy.writeChunk(this.hash, this.chunk); + else + this.hl = HCServiceProxy.writeChunk(this.hash, this.chunk, + this.hl.getHashLocs()); + l.commandResponse(this); + + } catch (Throwable e) { + l.commandException(this, e); + } + } + + public static class FingerPersister implements Runnable { + public AsyncChunkWriteActionListener l; + public List fingers; + public boolean dedup; + + @Override + public void run() { + for (Finger f : fingers) { + try { + if (Main.chunkStoreLocal) + f.hl = HCServiceProxy.writeChunk(f.hash, f.chunk); + else + f.hl = HCServiceProxy.writeChunk(f.hash, f.chunk, + f.hl.getHashLocs()); + l.commandResponse(f); + + } catch (Throwable e) { + l.commandException(f, e); + } + } + + } + + public void persist() throws IOException, HashtableFullException { + for (Finger f : fingers) { + f.hl = HCServiceProxy.writeChunk(f.hash, f.chunk); + + } + } + + } + + public void persist() { + + } +} diff --git a/src/org/opendedup/hashing/HashFunctionPool.java b/src/org/opendedup/hashing/HashFunctionPool.java index 11f96e3e5..3e06e70a9 100755 --- a/src/org/opendedup/hashing/HashFunctionPool.java +++ b/src/org/opendedup/hashing/HashFunctionPool.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.hashing; import java.io.IOException; diff --git a/src/org/opendedup/hashing/HashTest.java b/src/org/opendedup/hashing/HashTest.java index 2acf413dd..400484a52 100644 --- a/src/org/opendedup/hashing/HashTest.java +++ b/src/org/opendedup/hashing/HashTest.java @@ -1,50 +1,68 @@ -package org.opendedup.hashing; - -import java.util.Arrays; -import java.util.Random; - -import com.google.common.primitives.UnsignedBytes; - - - - -public class HashTest { - - - public static void main(String [] args) { - int bs = 1024*1024; - int runs = 10000; - Random rnd = new Random(); - byte[] b = new byte[bs]; - rnd.nextBytes(b); - long time = System.currentTimeMillis(); - byte [] z = null; - for(int i = 0;i<1;i++) { - //rnd.nextBytes(b); - z = MurMurHash3.murmurhash3_x64_128(b, 6442); - } - long nt = System.currentTimeMillis() - time; - System.out.println("took " + nt); - time = System.currentTimeMillis(); - for(int i = 0;i. + *******************************************************************************/ +package org.opendedup.hashing; + +import java.util.Arrays; +import java.util.Random; + +import com.google.common.primitives.UnsignedBytes; + + + + +public class HashTest { + + + public static void main(String [] args) { + int bs = 1024*1024; + int runs = 10000; + Random rnd = new Random(); + byte[] b = new byte[bs]; + rnd.nextBytes(b); + long time = System.currentTimeMillis(); + byte [] z = null; + for(int i = 0;i<1;i++) { + //rnd.nextBytes(b); + z = MurMurHash3.murmurhash3_x64_128(b, 6442); + } + long nt = System.currentTimeMillis() - time; + System.out.println("took " + nt); + time = System.currentTimeMillis(); + for(int i = 0;i. + *******************************************************************************/ package org.opendedup.hashing; import java.io.ByteArrayOutputStream; diff --git a/src/org/opendedup/hashing/LargeBloomFilter.java b/src/org/opendedup/hashing/LargeBloomFilter.java index b35d64828..1fe5e0189 100644 --- a/src/org/opendedup/hashing/LargeBloomFilter.java +++ b/src/org/opendedup/hashing/LargeBloomFilter.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.hashing; import java.io.File; diff --git a/src/org/opendedup/hashing/LargeFileBloomFilter.java b/src/org/opendedup/hashing/LargeFileBloomFilter.java index 2ade81c40..baa56259a 100644 --- a/src/org/opendedup/hashing/LargeFileBloomFilter.java +++ b/src/org/opendedup/hashing/LargeFileBloomFilter.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.hashing; import java.io.File; diff --git a/src/org/opendedup/hashing/Murmur3HashEngine.java b/src/org/opendedup/hashing/Murmur3HashEngine.java index b21f0af30..9b5aca84b 100644 --- a/src/org/opendedup/hashing/Murmur3HashEngine.java +++ b/src/org/opendedup/hashing/Murmur3HashEngine.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.hashing; import org.opendedup.sdfs.Main; diff --git a/src/org/opendedup/hashing/PoolThread.java b/src/org/opendedup/hashing/PoolThread.java index d8f8567da..42da5fc4d 100644 --- a/src/org/opendedup/hashing/PoolThread.java +++ b/src/org/opendedup/hashing/PoolThread.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.hashing; import java.security.NoSuchAlgorithmException; diff --git a/src/org/opendedup/hashing/ThreadPool.java b/src/org/opendedup/hashing/ThreadPool.java index 8e5c30ae6..090ee4bc7 100644 --- a/src/org/opendedup/hashing/ThreadPool.java +++ b/src/org/opendedup/hashing/ThreadPool.java @@ -1,60 +1,78 @@ -package org.opendedup.hashing; - -import java.util.ArrayList; - -import java.util.List; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; - -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.io.WritableCacheBuffer; - -public class ThreadPool { - - private BlockingQueue taskQueue = null; - private List threads = new ArrayList(); - private boolean isStopped = false; - - public ThreadPool(int noOfThreads, int maxNoOfTasks) { - taskQueue = new LinkedBlockingQueue(maxNoOfTasks); - - for (int i = 0; i < noOfThreads; i++) { - threads.add(new PoolThread(taskQueue)); - } - for (AbstractPoolThread thread : threads) { - thread.start(); - } - } - - public void execute(WritableCacheBuffer task) { - if (this.isStopped) { - SDFSLogger.getLog().warn( - "threadpool is stopped will not execute task"); - return; - } - - try { - this.taskQueue.put(task); - } catch (InterruptedException e) { - SDFSLogger.getLog().warn("thread interrupted", e); - } - } - - public synchronized void flush() { - while (!this.taskQueue.isEmpty()) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - break; - } - } - } - - public synchronized void stops() { - this.isStopped = true; - for (AbstractPoolThread thread : threads) { - thread.exit(); - } - } - +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.hashing; + +import java.util.ArrayList; + +import java.util.List; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; + +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.io.WritableCacheBuffer; + +public class ThreadPool { + + private BlockingQueue taskQueue = null; + private List threads = new ArrayList(); + private boolean isStopped = false; + + public ThreadPool(int noOfThreads, int maxNoOfTasks) { + taskQueue = new LinkedBlockingQueue(maxNoOfTasks); + + for (int i = 0; i < noOfThreads; i++) { + threads.add(new PoolThread(taskQueue)); + } + for (AbstractPoolThread thread : threads) { + thread.start(); + } + } + + public void execute(WritableCacheBuffer task) { + if (this.isStopped) { + SDFSLogger.getLog().warn( + "threadpool is stopped will not execute task"); + return; + } + + try { + this.taskQueue.put(task); + } catch (InterruptedException e) { + SDFSLogger.getLog().warn("thread interrupted", e); + } + } + + public synchronized void flush() { + while (!this.taskQueue.isEmpty()) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + break; + } + } + } + + public synchronized void stops() { + this.isStopped = true; + for (AbstractPoolThread thread : threads) { + thread.exit(); + } + } + } \ No newline at end of file diff --git a/src/org/opendedup/hashing/Tiger16HashEngine.java b/src/org/opendedup/hashing/Tiger16HashEngine.java index 123501245..1d8a78319 100644 --- a/src/org/opendedup/hashing/Tiger16HashEngine.java +++ b/src/org/opendedup/hashing/Tiger16HashEngine.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.hashing; import java.security.NoSuchAlgorithmException; diff --git a/src/org/opendedup/hashing/VariableHashEngine.java b/src/org/opendedup/hashing/VariableHashEngine.java index dc84a562f..8886bc219 100644 --- a/src/org/opendedup/hashing/VariableHashEngine.java +++ b/src/org/opendedup/hashing/VariableHashEngine.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.hashing; import java.io.IOException; diff --git a/src/org/opendedup/io/benchmarks/RandomFileIntegrityTest.java b/src/org/opendedup/io/benchmarks/RandomFileIntegrityTest.java index 029cacc75..f573c89f9 100644 --- a/src/org/opendedup/io/benchmarks/RandomFileIntegrityTest.java +++ b/src/org/opendedup/io/benchmarks/RandomFileIntegrityTest.java @@ -1,118 +1,136 @@ -package org.opendedup.io.benchmarks; - -import java.io.File; -import java.io.IOException; -import java.util.Arrays; -import java.util.Random; - -import com.google.common.hash.HashFunction; -import com.google.common.hash.Hashing; -import com.google.common.io.BaseEncoding; -import com.google.common.io.Files; - -public class RandomFileIntegrityTest implements Runnable { - File path; - int size; - boolean finished = false; - boolean passed = false; - String hashcode = null; - - public RandomFileIntegrityTest(File path, int size) { - this.path = path; - this.size = size; - Thread th = new Thread(this); - th.start(); - } - - @Override - public void run() { - try { - int len = 1024 * size; - - Random rnd = new Random(); - byte[] b = new byte[len]; - rnd.nextBytes(b); - HashFunction hf = Hashing.murmur3_128(); - byte[] nhc = hf.hashBytes(b).asBytes(); - hashcode = BaseEncoding.base16().encode(nhc); - path = new File(path.getPath() + File.separator + hashcode); - Files.write(b, path); - byte[] hc = Files.hash(path, Hashing.murmur3_128()).asBytes(); - passed = Arrays.equals(hc, nhc); - hashcode = BaseEncoding.base16().encode(hc); - } catch (Exception e) { - e.printStackTrace(); - } - this.finished = true; - } - - public boolean isFinished() { - return this.finished; - } - - public File getPath() { - return this.path; - } - - public static int test(String path, int size, int runs) throws IOException { - RandomFileIntegrityTest[] tests = new RandomFileIntegrityTest[runs]; - for (int i = 0; i < tests.length; i++) { - RandomFileIntegrityTest test = new RandomFileIntegrityTest( - new File(path), size); - tests[i] = test; - } - boolean finished = false; - int passed = 0; - ; - while (!finished) { - int nf = 0; - for (int i = 0; i < tests.length; i++) { - RandomFileIntegrityTest test = tests[i]; - if (test.isFinished()) { - nf++; - if (test.passed) - passed++; - } - if (nf == tests.length) - finished = true; - } - try { - Thread.sleep(10); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - return passed; - } - - public static void main(String[] args) throws IOException, - InterruptedException { - if (args.length != 4) { - System.out - .println("RandomFileIntegrityTest "); - System.exit(0); - } - int r = Integer.parseInt(args[3]); - for (int i = 0; i < r; i++) { - test(args[0], Integer.parseInt(args[1]), Integer.parseInt(args[2])); - } - Process p = Runtime.getRuntime().exec("sync"); - p.waitFor(); - p = Runtime.getRuntime().exec("echo 3 > /proc/sys/vm/drop_caches"); - p.waitFor(); - File f = new File(args[0]); - File[] fs = f.listFiles(); - System.out.println("Checking " + fs.length); - int passed = 0; - for (File hf : fs) { - byte[] hc = BaseEncoding.base16().decode(hf.getName()); - byte[] nhc = Files.hash(hf, Hashing.murmur3_128()).asBytes(); - if (Arrays.equals(hc, nhc)) { - passed++; - } - } - System.out.println("Files=" + fs.length + " Passed=" + passed); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.io.benchmarks; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Random; + +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hashing; +import com.google.common.io.BaseEncoding; +import com.google.common.io.Files; + +public class RandomFileIntegrityTest implements Runnable { + File path; + int size; + boolean finished = false; + boolean passed = false; + String hashcode = null; + + public RandomFileIntegrityTest(File path, int size) { + this.path = path; + this.size = size; + Thread th = new Thread(this); + th.start(); + } + + @Override + public void run() { + try { + int len = 1024 * size; + + Random rnd = new Random(); + byte[] b = new byte[len]; + rnd.nextBytes(b); + HashFunction hf = Hashing.murmur3_128(); + byte[] nhc = hf.hashBytes(b).asBytes(); + hashcode = BaseEncoding.base16().encode(nhc); + path = new File(path.getPath() + File.separator + hashcode); + Files.write(b, path); + byte[] hc = Files.hash(path, Hashing.murmur3_128()).asBytes(); + passed = Arrays.equals(hc, nhc); + hashcode = BaseEncoding.base16().encode(hc); + } catch (Exception e) { + e.printStackTrace(); + } + this.finished = true; + } + + public boolean isFinished() { + return this.finished; + } + + public File getPath() { + return this.path; + } + + public static int test(String path, int size, int runs) throws IOException { + RandomFileIntegrityTest[] tests = new RandomFileIntegrityTest[runs]; + for (int i = 0; i < tests.length; i++) { + RandomFileIntegrityTest test = new RandomFileIntegrityTest( + new File(path), size); + tests[i] = test; + } + boolean finished = false; + int passed = 0; + ; + while (!finished) { + int nf = 0; + for (int i = 0; i < tests.length; i++) { + RandomFileIntegrityTest test = tests[i]; + if (test.isFinished()) { + nf++; + if (test.passed) + passed++; + } + if (nf == tests.length) + finished = true; + } + try { + Thread.sleep(10); + } catch (InterruptedException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + return passed; + } + + public static void main(String[] args) throws IOException, + InterruptedException { + if (args.length != 4) { + System.out + .println("RandomFileIntegrityTest "); + System.exit(0); + } + int r = Integer.parseInt(args[3]); + for (int i = 0; i < r; i++) { + test(args[0], Integer.parseInt(args[1]), Integer.parseInt(args[2])); + } + Process p = Runtime.getRuntime().exec("sync"); + p.waitFor(); + p = Runtime.getRuntime().exec("echo 3 > /proc/sys/vm/drop_caches"); + p.waitFor(); + File f = new File(args[0]); + File[] fs = f.listFiles(); + System.out.println("Checking " + fs.length); + int passed = 0; + for (File hf : fs) { + byte[] hc = BaseEncoding.base16().decode(hf.getName()); + byte[] nhc = Files.hash(hf, Hashing.murmur3_128()).asBytes(); + if (Arrays.equals(hc, nhc)) { + passed++; + } + } + System.out.println("Files=" + fs.length + " Passed=" + passed); + } + +} diff --git a/src/org/opendedup/io/benchmarks/ReadTest.java b/src/org/opendedup/io/benchmarks/ReadTest.java index 64273c7a9..15b4f166d 100644 --- a/src/org/opendedup/io/benchmarks/ReadTest.java +++ b/src/org/opendedup/io/benchmarks/ReadTest.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.io.benchmarks; import java.io.File; diff --git a/src/org/opendedup/io/benchmarks/WriteTest.java b/src/org/opendedup/io/benchmarks/WriteTest.java index e12c194c0..2b72f8cbb 100644 --- a/src/org/opendedup/io/benchmarks/WriteTest.java +++ b/src/org/opendedup/io/benchmarks/WriteTest.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.io.benchmarks; import java.io.File; diff --git a/src/org/opendedup/logging/JSONLayout.java b/src/org/opendedup/logging/JSONLayout.java index e1ed46843..9112e4e1c 100644 --- a/src/org/opendedup/logging/JSONLayout.java +++ b/src/org/opendedup/logging/JSONLayout.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.logging; import java.io.IOException; diff --git a/src/org/opendedup/logging/JSONLayoutException.java b/src/org/opendedup/logging/JSONLayoutException.java index d41a0ca6e..db5814624 100644 --- a/src/org/opendedup/logging/JSONLayoutException.java +++ b/src/org/opendedup/logging/JSONLayoutException.java @@ -1,17 +1,35 @@ -package org.opendedup.logging; - -public class JSONLayoutException extends RuntimeException { - - /** - * - */ - private static final long serialVersionUID = 1L; - - public JSONLayoutException(String message, Throwable t) { - super(message, t); - } - - public JSONLayoutException(Throwable t) { - super(t); - } -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.logging; + +public class JSONLayoutException extends RuntimeException { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public JSONLayoutException(String message, Throwable t) { + super(message, t); + } + + public JSONLayoutException(Throwable t) { + super(t); + } +} diff --git a/src/org/opendedup/logging/JSONVolPerfLayout.java b/src/org/opendedup/logging/JSONVolPerfLayout.java index c85a27cf2..35b92f3ec 100644 --- a/src/org/opendedup/logging/JSONVolPerfLayout.java +++ b/src/org/opendedup/logging/JSONVolPerfLayout.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.logging; import java.io.IOException; diff --git a/src/org/opendedup/logging/SDFSEventLogger.java b/src/org/opendedup/logging/SDFSEventLogger.java index 1c9a478df..b8d52ccfb 100644 --- a/src/org/opendedup/logging/SDFSEventLogger.java +++ b/src/org/opendedup/logging/SDFSEventLogger.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.logging; import java.io.IOException; diff --git a/src/org/opendedup/logging/SDFSLogger.java b/src/org/opendedup/logging/SDFSLogger.java index 6877e8ea4..06922b933 100644 --- a/src/org/opendedup/logging/SDFSLogger.java +++ b/src/org/opendedup/logging/SDFSLogger.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.logging; import java.io.IOException; diff --git a/src/org/opendedup/mtools/BloomFDisk.java b/src/org/opendedup/mtools/BloomFDisk.java index 198483577..aaef359e3 100644 --- a/src/org/opendedup/mtools/BloomFDisk.java +++ b/src/org/opendedup/mtools/BloomFDisk.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.mtools; import java.io.File; diff --git a/src/org/opendedup/mtools/ClusterRedundancyCheck.java b/src/org/opendedup/mtools/ClusterRedundancyCheck.java index 7028bf825..6139dfc2b 100644 --- a/src/org/opendedup/mtools/ClusterRedundancyCheck.java +++ b/src/org/opendedup/mtools/ClusterRedundancyCheck.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.mtools; import java.io.File; diff --git a/src/org/opendedup/mtools/FDiskException.java b/src/org/opendedup/mtools/FDiskException.java index 6df9ffcab..d252cc7a5 100644 --- a/src/org/opendedup/mtools/FDiskException.java +++ b/src/org/opendedup/mtools/FDiskException.java @@ -1,15 +1,33 @@ -package org.opendedup.mtools; - -public class FDiskException extends Exception { - - private static final long serialVersionUID = 1L; - - public FDiskException(Exception e) { - super(e); - } - - public FDiskException(String e) { - super(e); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.mtools; + +public class FDiskException extends Exception { + + private static final long serialVersionUID = 1L; + + public FDiskException(Exception e) { + super(e); + } + + public FDiskException(String e) { + super(e); + } + +} diff --git a/src/org/opendedup/mtools/Partition.java b/src/org/opendedup/mtools/Partition.java index 459358414..c8077919a 100644 --- a/src/org/opendedup/mtools/Partition.java +++ b/src/org/opendedup/mtools/Partition.java @@ -1,74 +1,92 @@ -package org.opendedup.mtools; - -public class Partition { - String device; - boolean boot; - long start; - long end; - long blocks; - int System; - String type; - - public String getDevice() { - return device; - } - - public void setDevice(String device) { - this.device = device; - } - - public boolean isBoot() { - return boot; - } - - public void setBoot(boolean boot) { - this.boot = boot; - } - - public long getStart() { - return start; - } - - public void setStart(long start) { - this.start = start; - } - - public long getEnd() { - return end; - } - - public void setEnd(long end) { - this.end = end; - } - - public long getBlocks() { - return blocks; - } - - public void setBlocks(long blocks) { - this.blocks = blocks; - } - - public int getSystem() { - return System; - } - - public void setSystem(int system) { - System = system; - } - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - @Override - public String toString() { - return this.device + " " + this.blocks + " " + this.start + " " - + this.end + " " + this.type + " " + this.System; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.mtools; + +public class Partition { + String device; + boolean boot; + long start; + long end; + long blocks; + int System; + String type; + + public String getDevice() { + return device; + } + + public void setDevice(String device) { + this.device = device; + } + + public boolean isBoot() { + return boot; + } + + public void setBoot(boolean boot) { + this.boot = boot; + } + + public long getStart() { + return start; + } + + public void setStart(long start) { + this.start = start; + } + + public long getEnd() { + return end; + } + + public void setEnd(long end) { + this.end = end; + } + + public long getBlocks() { + return blocks; + } + + public void setBlocks(long blocks) { + this.blocks = blocks; + } + + public int getSystem() { + return System; + } + + public void setSystem(int system) { + System = system; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + @Override + public String toString() { + return this.device + " " + this.blocks + " " + this.start + " " + + this.end + " " + this.type + " " + this.System; + } + +} diff --git a/src/org/opendedup/mtools/RestoreArchive.java b/src/org/opendedup/mtools/RestoreArchive.java index 31e073b0a..54338ced6 100644 --- a/src/org/opendedup/mtools/RestoreArchive.java +++ b/src/org/opendedup/mtools/RestoreArchive.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.mtools; import java.io.File; diff --git a/src/org/opendedup/mtools/SyncFS.java b/src/org/opendedup/mtools/SyncFS.java index 1eae56aab..061274bf8 100644 --- a/src/org/opendedup/mtools/SyncFS.java +++ b/src/org/opendedup/mtools/SyncFS.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.mtools; import java.io.File; diff --git a/src/org/opendedup/mtools/VMDKMountPoint.java b/src/org/opendedup/mtools/VMDKMountPoint.java index 12ab230c8..eb4872358 100644 --- a/src/org/opendedup/mtools/VMDKMountPoint.java +++ b/src/org/opendedup/mtools/VMDKMountPoint.java @@ -1,28 +1,46 @@ -package org.opendedup.mtools; - -public class VMDKMountPoint { - String loopBack; - String mountPoint; - - public VMDKMountPoint(String loopBack, String mountPoint) { - this.loopBack = loopBack; - this.mountPoint = mountPoint; - } - - public String getLoopBack() { - return loopBack; - } - - public void setLoopBack(String loopBack) { - this.loopBack = loopBack; - } - - public String getMountPoint() { - return mountPoint; - } - - public void setMountPoint(String mountPoint) { - this.mountPoint = mountPoint; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.mtools; + +public class VMDKMountPoint { + String loopBack; + String mountPoint; + + public VMDKMountPoint(String loopBack, String mountPoint) { + this.loopBack = loopBack; + this.mountPoint = mountPoint; + } + + public String getLoopBack() { + return loopBack; + } + + public void setLoopBack(String loopBack) { + this.loopBack = loopBack; + } + + public String getMountPoint() { + return mountPoint; + } + + public void setMountPoint(String mountPoint) { + this.mountPoint = mountPoint; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/BlockDevSocket.java b/src/org/opendedup/sdfs/cluster/BlockDevSocket.java index 68f19fb6b..d68014f94 100644 --- a/src/org/opendedup/sdfs/cluster/BlockDevSocket.java +++ b/src/org/opendedup/sdfs/cluster/BlockDevSocket.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster; import java.io.DataInputStream; diff --git a/src/org/opendedup/sdfs/cluster/ClusterMain.java b/src/org/opendedup/sdfs/cluster/ClusterMain.java index 768cf4536..10ef3919c 100644 --- a/src/org/opendedup/sdfs/cluster/ClusterMain.java +++ b/src/org/opendedup/sdfs/cluster/ClusterMain.java @@ -1,7 +1,25 @@ -package org.opendedup.sdfs.cluster; - -public class ClusterMain { - public static DSEServer clusterServer; - public static ClusterSocket clusterState; - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster; + +public class ClusterMain { + public static DSEServer clusterServer; + public static ClusterSocket clusterState; + +} diff --git a/src/org/opendedup/sdfs/cluster/ClusterSocket.java b/src/org/opendedup/sdfs/cluster/ClusterSocket.java index 9f4e5be1d..ec5adfcd8 100644 --- a/src/org/opendedup/sdfs/cluster/ClusterSocket.java +++ b/src/org/opendedup/sdfs/cluster/ClusterSocket.java @@ -1,25 +1,43 @@ -package org.opendedup.sdfs.cluster; - -import java.util.List; -import java.util.concurrent.locks.Lock; - -import org.jgroups.Address; -import org.jgroups.blocks.MessageDispatcher; - -public interface ClusterSocket { - public abstract List getStorageNodes(); - - public abstract DSEServer getServer(); - - public abstract List getNameNodes(); - - public abstract List getVolumes(); - - public abstract Address getAddressForVol(String volumeName); - - public abstract Lock getLock(String name); - - public abstract boolean isPeerMaster(); - - public abstract MessageDispatcher getDispatcher(); -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster; + +import java.util.List; +import java.util.concurrent.locks.Lock; + +import org.jgroups.Address; +import org.jgroups.blocks.MessageDispatcher; + +public interface ClusterSocket { + public abstract List getStorageNodes(); + + public abstract DSEServer getServer(); + + public abstract List getNameNodes(); + + public abstract List getVolumes(); + + public abstract Address getAddressForVol(String volumeName); + + public abstract Lock getLock(String name); + + public abstract boolean isPeerMaster(); + + public abstract MessageDispatcher getDispatcher(); +} diff --git a/src/org/opendedup/sdfs/cluster/DSEClientSocket.java b/src/org/opendedup/sdfs/cluster/DSEClientSocket.java index cfa4fb3cd..6f89c6658 100644 --- a/src/org/opendedup/sdfs/cluster/DSEClientSocket.java +++ b/src/org/opendedup/sdfs/cluster/DSEClientSocket.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster; import java.io.DataInputStream; diff --git a/src/org/opendedup/sdfs/cluster/DSEServer.java b/src/org/opendedup/sdfs/cluster/DSEServer.java index c6532e90f..42c9ad9f3 100644 --- a/src/org/opendedup/sdfs/cluster/DSEServer.java +++ b/src/org/opendedup/sdfs/cluster/DSEServer.java @@ -1,189 +1,207 @@ -package org.opendedup.sdfs.cluster; - -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import java.nio.ByteBuffer; - -import org.jgroups.Address; -import org.jgroups.util.Util; -import org.opendedup.sdfs.io.Volume; -import org.opendedup.sdfs.network.HashClientPool; -import org.opendedup.sdfs.network.NetworkCMDS; -import org.opendedup.sdfs.servers.HCServer; - -public class DSEServer implements Externalizable { - public int serverType; - public byte id; - public String hostName; - public int dseport; - public boolean useSSL; - public Address address; - public long currentSize; - public long dseSize; - public long dseMaxSize; - public long dseCompressedSize; - public long maxSize; - public long freeBlocks; - public int pageSize; - public String location; - public String rack; - public Volume volume; - public transient int weight; - public static final int SERVER = 0; - public static final int CLIENT = 1; - public static final int LISTENER = 2; - public boolean readOnly = false; - - public DSEServer() { - - } - - public DSEServer(String hostName, byte id, int serverType) { - this.serverType = serverType; - this.id = id; - this.hostName = hostName; - } - - public HashClientPool createPool() throws IOException { - HCServer _server = new HCServer(this.hostName, this.dseport, false, - false, this.useSSL); - return new HashClientPool(_server, this.address.toString(), 20, this.id); - } - - @Override - public void readExternal(ObjectInput in) throws IOException, - ClassNotFoundException { - in.readByte(); - this.hostName = (String) in.readObject(); - this.id = in.readByte(); - this.serverType = in.readInt(); - this.address = (Address) in.readObject(); - this.maxSize = in.readLong(); - this.currentSize = in.readLong(); - this.freeBlocks = in.readLong(); - this.pageSize = in.readInt(); - this.dseport = in.readInt(); - this.useSSL = in.readBoolean(); - this.location = (String) in.readObject(); - this.rack = (String) in.readObject(); - this.readOnly = in.readBoolean(); - this.dseSize = in.readLong(); - this.dseMaxSize = in.readLong(); - this.dseCompressedSize = in.readLong(); - } - - @Override - public void writeExternal(ObjectOutput out) throws IOException { - out.writeByte(NetworkCMDS.UPDATE_DSE); - out.writeObject(hostName); - out.writeByte(id); - out.writeInt(serverType); - out.writeObject(address); - out.writeLong(this.maxSize); - out.writeLong(this.currentSize); - out.writeLong(this.freeBlocks); - out.writeInt(pageSize); - out.writeInt(this.dseport); - out.writeBoolean(useSSL); - out.writeObject(location); - out.writeObject(rack); - out.writeBoolean(this.readOnly); - out.writeLong(this.dseSize); - out.writeLong(this.dseMaxSize); - out.writeLong(this.dseCompressedSize); - } - - public byte[] getBytes() throws Exception { - byte[] b = hostName.getBytes(); - byte[] addr = Util.objectToByteBuffer(address); - byte[] lb = this.location.getBytes(); - byte[] rb = this.rack.getBytes(); - byte[] bz = new byte[1 + 4 + b.length + 1 + 4 + 4 + addr.length + 8 + 8 - + 8 + 4 + 4 + 1 + 4 + lb.length + 4 + rb.length + 1 + 8 + 8 + 8]; - - ByteBuffer buf = ByteBuffer.wrap(bz); - buf.put(NetworkCMDS.UPDATE_DSE); - buf.putInt(b.length); - buf.put(b); - buf.put(id); - buf.putInt(serverType); - buf.putInt(addr.length); - buf.put(addr); - buf.putLong(maxSize); - buf.putLong(currentSize); - buf.putLong(freeBlocks); - buf.putInt(pageSize); - buf.putInt(this.dseport); - if (this.useSSL) - buf.put((byte) 1); - else - buf.put((byte) 0); - - buf.putInt(lb.length); - buf.put(lb); - buf.putInt(rb.length); - buf.put(rb); - if (this.readOnly) - buf.put((byte) 1); - else - buf.put((byte) 0); - buf.putLong(this.dseSize); - buf.putLong(this.dseMaxSize); - buf.putLong(this.dseCompressedSize); - return buf.array(); - } - - public void fromByte(byte[] bz) throws Exception { - ByteBuffer buf = ByteBuffer.wrap(bz); - buf.get(); - byte[] bs = new byte[buf.getInt()]; - buf.get(bs); - this.hostName = new String(bs); - this.id = buf.get(); - this.serverType = buf.getInt(); - byte[] addr = new byte[buf.getInt()]; - buf.get(addr); - this.address = (Address) Util.objectFromByteBuffer(addr); - this.maxSize = buf.getLong(); - this.currentSize = buf.getLong(); - this.freeBlocks = buf.getLong(); - this.pageSize = buf.getInt(); - this.dseport = buf.getInt(); - this.useSSL = false; - if (buf.get() == 1) - this.useSSL = true; - byte[] lb = new byte[buf.getInt()]; - buf.get(lb); - byte[] rb = new byte[buf.getInt()]; - buf.get(rb); - this.location = new String(lb); - this.rack = new String(rb); - if (buf.get() == 1) - this.readOnly = true; - this.dseSize = buf.getLong(); - this.dseMaxSize = buf.getLong(); - this.dseCompressedSize = buf.getLong(); - } - - public String toString() { - return this.hostName + " id=" + this.id + " serverType=" - + this.serverType + " address=[" + this.address + "] maxsz=" - + this.maxSize + " currentsize=" + this.currentSize - + " freeblocks=" + this.freeBlocks + " dseport=" + this.dseport - + " usessl=" + this.useSSL + " dseSize=" + this.dseSize - + " dseMaxSize=" + this.dseMaxSize + " dseCompressedSize=" - + this.dseCompressedSize; - } - - public int hashCode() { - return this.id; - } - - public boolean equals(Object obj) { - DSEServer s = (DSEServer) obj; - return (s.id == this.id); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.nio.ByteBuffer; + +import org.jgroups.Address; +import org.jgroups.util.Util; +import org.opendedup.sdfs.io.Volume; +import org.opendedup.sdfs.network.HashClientPool; +import org.opendedup.sdfs.network.NetworkCMDS; +import org.opendedup.sdfs.servers.HCServer; + +public class DSEServer implements Externalizable { + public int serverType; + public byte id; + public String hostName; + public int dseport; + public boolean useSSL; + public Address address; + public long currentSize; + public long dseSize; + public long dseMaxSize; + public long dseCompressedSize; + public long maxSize; + public long freeBlocks; + public int pageSize; + public String location; + public String rack; + public Volume volume; + public transient int weight; + public static final int SERVER = 0; + public static final int CLIENT = 1; + public static final int LISTENER = 2; + public boolean readOnly = false; + + public DSEServer() { + + } + + public DSEServer(String hostName, byte id, int serverType) { + this.serverType = serverType; + this.id = id; + this.hostName = hostName; + } + + public HashClientPool createPool() throws IOException { + HCServer _server = new HCServer(this.hostName, this.dseport, false, + false, this.useSSL); + return new HashClientPool(_server, this.address.toString(), 20, this.id); + } + + @Override + public void readExternal(ObjectInput in) throws IOException, + ClassNotFoundException { + in.readByte(); + this.hostName = (String) in.readObject(); + this.id = in.readByte(); + this.serverType = in.readInt(); + this.address = (Address) in.readObject(); + this.maxSize = in.readLong(); + this.currentSize = in.readLong(); + this.freeBlocks = in.readLong(); + this.pageSize = in.readInt(); + this.dseport = in.readInt(); + this.useSSL = in.readBoolean(); + this.location = (String) in.readObject(); + this.rack = (String) in.readObject(); + this.readOnly = in.readBoolean(); + this.dseSize = in.readLong(); + this.dseMaxSize = in.readLong(); + this.dseCompressedSize = in.readLong(); + } + + @Override + public void writeExternal(ObjectOutput out) throws IOException { + out.writeByte(NetworkCMDS.UPDATE_DSE); + out.writeObject(hostName); + out.writeByte(id); + out.writeInt(serverType); + out.writeObject(address); + out.writeLong(this.maxSize); + out.writeLong(this.currentSize); + out.writeLong(this.freeBlocks); + out.writeInt(pageSize); + out.writeInt(this.dseport); + out.writeBoolean(useSSL); + out.writeObject(location); + out.writeObject(rack); + out.writeBoolean(this.readOnly); + out.writeLong(this.dseSize); + out.writeLong(this.dseMaxSize); + out.writeLong(this.dseCompressedSize); + } + + public byte[] getBytes() throws Exception { + byte[] b = hostName.getBytes(); + byte[] addr = Util.objectToByteBuffer(address); + byte[] lb = this.location.getBytes(); + byte[] rb = this.rack.getBytes(); + byte[] bz = new byte[1 + 4 + b.length + 1 + 4 + 4 + addr.length + 8 + 8 + + 8 + 4 + 4 + 1 + 4 + lb.length + 4 + rb.length + 1 + 8 + 8 + 8]; + + ByteBuffer buf = ByteBuffer.wrap(bz); + buf.put(NetworkCMDS.UPDATE_DSE); + buf.putInt(b.length); + buf.put(b); + buf.put(id); + buf.putInt(serverType); + buf.putInt(addr.length); + buf.put(addr); + buf.putLong(maxSize); + buf.putLong(currentSize); + buf.putLong(freeBlocks); + buf.putInt(pageSize); + buf.putInt(this.dseport); + if (this.useSSL) + buf.put((byte) 1); + else + buf.put((byte) 0); + + buf.putInt(lb.length); + buf.put(lb); + buf.putInt(rb.length); + buf.put(rb); + if (this.readOnly) + buf.put((byte) 1); + else + buf.put((byte) 0); + buf.putLong(this.dseSize); + buf.putLong(this.dseMaxSize); + buf.putLong(this.dseCompressedSize); + return buf.array(); + } + + public void fromByte(byte[] bz) throws Exception { + ByteBuffer buf = ByteBuffer.wrap(bz); + buf.get(); + byte[] bs = new byte[buf.getInt()]; + buf.get(bs); + this.hostName = new String(bs); + this.id = buf.get(); + this.serverType = buf.getInt(); + byte[] addr = new byte[buf.getInt()]; + buf.get(addr); + this.address = (Address) Util.objectFromByteBuffer(addr); + this.maxSize = buf.getLong(); + this.currentSize = buf.getLong(); + this.freeBlocks = buf.getLong(); + this.pageSize = buf.getInt(); + this.dseport = buf.getInt(); + this.useSSL = false; + if (buf.get() == 1) + this.useSSL = true; + byte[] lb = new byte[buf.getInt()]; + buf.get(lb); + byte[] rb = new byte[buf.getInt()]; + buf.get(rb); + this.location = new String(lb); + this.rack = new String(rb); + if (buf.get() == 1) + this.readOnly = true; + this.dseSize = buf.getLong(); + this.dseMaxSize = buf.getLong(); + this.dseCompressedSize = buf.getLong(); + } + + public String toString() { + return this.hostName + " id=" + this.id + " serverType=" + + this.serverType + " address=[" + this.address + "] maxsz=" + + this.maxSize + " currentsize=" + this.currentSize + + " freeblocks=" + this.freeBlocks + " dseport=" + this.dseport + + " usessl=" + this.useSSL + " dseSize=" + this.dseSize + + " dseMaxSize=" + this.dseMaxSize + " dseCompressedSize=" + + this.dseCompressedSize; + } + + public int hashCode() { + return this.id; + } + + public boolean equals(Object obj) { + DSEServer s = (DSEServer) obj; + return (s.id == this.id); + } + +} diff --git a/src/org/opendedup/sdfs/cluster/DSEServerSocket.java b/src/org/opendedup/sdfs/cluster/DSEServerSocket.java index adaad55a1..e5bbaf78e 100644 --- a/src/org/opendedup/sdfs/cluster/DSEServerSocket.java +++ b/src/org/opendedup/sdfs/cluster/DSEServerSocket.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster; import java.io.ByteArrayInputStream; diff --git a/src/org/opendedup/sdfs/cluster/DSEServerWeighting.java b/src/org/opendedup/sdfs/cluster/DSEServerWeighting.java index 59f390c7f..40864013e 100644 --- a/src/org/opendedup/sdfs/cluster/DSEServerWeighting.java +++ b/src/org/opendedup/sdfs/cluster/DSEServerWeighting.java @@ -1,13 +1,31 @@ -package org.opendedup.sdfs.cluster; - -class DSEServerWeighting { - - DSEServer server; - int weighting; - - public DSEServerWeighting(DSEServer s, int w) { - this.server = s; - this.weighting = w; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster; + +class DSEServerWeighting { + + DSEServer server; + int weighting; + + public DSEServerWeighting(DSEServer s, int w) { + this.server = s; + this.weighting = w; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/NetworkUnicastServer.java b/src/org/opendedup/sdfs/cluster/NetworkUnicastServer.java index 9ed5e98d3..5110eebe3 100755 --- a/src/org/opendedup/sdfs/cluster/NetworkUnicastServer.java +++ b/src/org/opendedup/sdfs/cluster/NetworkUnicastServer.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster; import java.io.File; diff --git a/src/org/opendedup/sdfs/cluster/ServerWeighting.java b/src/org/opendedup/sdfs/cluster/ServerWeighting.java index 12c75bb1e..c342c5e98 100644 --- a/src/org/opendedup/sdfs/cluster/ServerWeighting.java +++ b/src/org/opendedup/sdfs/cluster/ServerWeighting.java @@ -1,16 +1,34 @@ -package org.opendedup.sdfs.cluster; - -import java.util.List; - -import org.jgroups.Address; - -interface ServerWeighting { - public void init(List servers); - - public Address getAddress(byte[] ignoredHosts); - - public List
getAddresses(int sz, byte[] ignoredHosts); - - public List getServers(int sz, byte[] ignoredHosts); - +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster; + +import java.util.List; + +import org.jgroups.Address; + +interface ServerWeighting { + public void init(List servers); + + public Address getAddress(byte[] ignoredHosts); + + public List
getAddresses(int sz, byte[] ignoredHosts); + + public List getServers(int sz, byte[] ignoredHosts); + } \ No newline at end of file diff --git a/src/org/opendedup/sdfs/cluster/VolumeSocket.java b/src/org/opendedup/sdfs/cluster/VolumeSocket.java index 62f8a8aed..38ee5ef2a 100644 --- a/src/org/opendedup/sdfs/cluster/VolumeSocket.java +++ b/src/org/opendedup/sdfs/cluster/VolumeSocket.java @@ -1,163 +1,181 @@ -package org.opendedup.sdfs.cluster; - -import java.io.DataInputStream; - -import java.io.DataOutputStream; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.util.List; - -import org.jgroups.Address; -import org.jgroups.JChannel; -import org.jgroups.MembershipListener; -import org.jgroups.MergeView; -import org.jgroups.Message; -import org.jgroups.MessageListener; -import org.jgroups.View; -import org.jgroups.blocks.MessageDispatcher; -import org.jgroups.blocks.RequestHandler; -import org.jgroups.util.Util; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.io.BlockDev; -import org.opendedup.sdfs.io.Volume; - -public class VolumeSocket implements RequestHandler, MembershipListener, - MessageListener { - private static final byte ADDDEV = 0; - private static final byte RMDEV = 1; - private static final byte SETDEVSZ = 2; - private static final byte SETDEVAUTO = 4; - - private Volume vol; - private String cfg; - protected JChannel channel; - - private MessageDispatcher disp; - private boolean peermaster = false; - private Address pmAddr = null; - - public VolumeSocket(Volume vol, String config) throws Exception { - SDFSLogger.getLog().info("Starting Volume Socket for " + vol.getName()); - this.vol = vol; - this.cfg = config; - channel = new JChannel(this.cfg); - disp = new MessageDispatcher(channel, null, null, this); - disp.setMembershipListener(this); - disp.setMessageListener(this); - channel.connect(this.vol.getName()); - channel.getState(null, 10000); - SDFSLogger.getLog().info("Started Volume Socket for " + vol.getName()); - } - - @Override - public void getState(OutputStream output) throws Exception { - synchronized (vol) { - try { - Util.objectToStream(vol, new DataOutputStream(output)); - } catch (Exception e) { - SDFSLogger.getLog().error("unable to get state", e); - } - } - - } - - @Override - public void receive(Message arg0) { - - } - - @Override - public void setState(InputStream input) throws Exception { - synchronized (vol) { - Volume vl = (Volume) Util.objectFromStream(new DataInputStream( - input)); - List devices = vl.devices; - vol.devices.clear(); - for (BlockDev dev : devices) { - vol.addBlockDev(dev); - } - } - } - - @Override - public void block() { - // TODO Auto-generated method stub - - } - - @Override - public void suspect(Address arg0) { - // TODO Auto-generated method stub - - } - - @Override - public void unblock() { - // TODO Auto-generated method stub - - } - - @Override - public void viewAccepted(View new_view) { - if (new_view instanceof MergeView) { - // TODO add split brain algo - SDFSLogger.getLog().info("split brain suspected!!!"); - } - this.pmAddr = new_view.getMembers().get(0); - if (pmAddr.equals(this.channel.getAddress())) { - this.peermaster = true; - } else { - this.peermaster = false; - } - - } - - @Override - public Object handle(Message msg) throws Exception { - byte[] buffer = msg.getBuffer(); - ByteBuffer buf = ByteBuffer.wrap(buffer); - buf.position(msg.getOffset()); - byte cmd = buf.get(); - byte[] arb; - BlockDev dev; - arb = new byte[buf.getInt()]; - buf.get(arb); - dev = (BlockDev) Util.objectFromByteBuffer(arb); - switch (cmd) { - case ADDDEV: - synchronized (vol) { - vol.addBlockDev(dev); - } - break; - case RMDEV: - synchronized (vol) { - vol.removeBlockDev(dev.getDevName()); - } - break; - case SETDEVAUTO: - synchronized (vol) { - BlockDev _dev = vol.getBlockDev(dev.getDevName()); - _dev.setStartOnInit(dev.isStartOnInit()); - } - break; - case SETDEVSZ: - synchronized (vol) { - BlockDev _dev = vol.getBlockDev(dev.getDevName()); - _dev.setSize(dev.getSize()); - } - break; - - } - return null; - } - - public boolean isPeermaster() { - return peermaster; - } - - public Address getPmAddr() { - return pmAddr; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster; + +import java.io.DataInputStream; + +import java.io.DataOutputStream; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.List; + +import org.jgroups.Address; +import org.jgroups.JChannel; +import org.jgroups.MembershipListener; +import org.jgroups.MergeView; +import org.jgroups.Message; +import org.jgroups.MessageListener; +import org.jgroups.View; +import org.jgroups.blocks.MessageDispatcher; +import org.jgroups.blocks.RequestHandler; +import org.jgroups.util.Util; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.io.BlockDev; +import org.opendedup.sdfs.io.Volume; + +public class VolumeSocket implements RequestHandler, MembershipListener, + MessageListener { + private static final byte ADDDEV = 0; + private static final byte RMDEV = 1; + private static final byte SETDEVSZ = 2; + private static final byte SETDEVAUTO = 4; + + private Volume vol; + private String cfg; + protected JChannel channel; + + private MessageDispatcher disp; + private boolean peermaster = false; + private Address pmAddr = null; + + public VolumeSocket(Volume vol, String config) throws Exception { + SDFSLogger.getLog().info("Starting Volume Socket for " + vol.getName()); + this.vol = vol; + this.cfg = config; + channel = new JChannel(this.cfg); + disp = new MessageDispatcher(channel, null, null, this); + disp.setMembershipListener(this); + disp.setMessageListener(this); + channel.connect(this.vol.getName()); + channel.getState(null, 10000); + SDFSLogger.getLog().info("Started Volume Socket for " + vol.getName()); + } + + @Override + public void getState(OutputStream output) throws Exception { + synchronized (vol) { + try { + Util.objectToStream(vol, new DataOutputStream(output)); + } catch (Exception e) { + SDFSLogger.getLog().error("unable to get state", e); + } + } + + } + + @Override + public void receive(Message arg0) { + + } + + @Override + public void setState(InputStream input) throws Exception { + synchronized (vol) { + Volume vl = (Volume) Util.objectFromStream(new DataInputStream( + input)); + List devices = vl.devices; + vol.devices.clear(); + for (BlockDev dev : devices) { + vol.addBlockDev(dev); + } + } + } + + @Override + public void block() { + // TODO Auto-generated method stub + + } + + @Override + public void suspect(Address arg0) { + // TODO Auto-generated method stub + + } + + @Override + public void unblock() { + // TODO Auto-generated method stub + + } + + @Override + public void viewAccepted(View new_view) { + if (new_view instanceof MergeView) { + // TODO add split brain algo + SDFSLogger.getLog().info("split brain suspected!!!"); + } + this.pmAddr = new_view.getMembers().get(0); + if (pmAddr.equals(this.channel.getAddress())) { + this.peermaster = true; + } else { + this.peermaster = false; + } + + } + + @Override + public Object handle(Message msg) throws Exception { + byte[] buffer = msg.getBuffer(); + ByteBuffer buf = ByteBuffer.wrap(buffer); + buf.position(msg.getOffset()); + byte cmd = buf.get(); + byte[] arb; + BlockDev dev; + arb = new byte[buf.getInt()]; + buf.get(arb); + dev = (BlockDev) Util.objectFromByteBuffer(arb); + switch (cmd) { + case ADDDEV: + synchronized (vol) { + vol.addBlockDev(dev); + } + break; + case RMDEV: + synchronized (vol) { + vol.removeBlockDev(dev.getDevName()); + } + break; + case SETDEVAUTO: + synchronized (vol) { + BlockDev _dev = vol.getBlockDev(dev.getDevName()); + _dev.setStartOnInit(dev.isStartOnInit()); + } + break; + case SETDEVSZ: + synchronized (vol) { + BlockDev _dev = vol.getBlockDev(dev.getDevName()); + _dev.setSize(dev.getSize()); + } + break; + + } + return null; + } + + public boolean isPeermaster() { + return peermaster; + } + + public Address getPmAddr() { + return pmAddr; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/WeightedRandomServer.java b/src/org/opendedup/sdfs/cluster/WeightedRandomServer.java index f67524983..6b00c5e16 100644 --- a/src/org/opendedup/sdfs/cluster/WeightedRandomServer.java +++ b/src/org/opendedup/sdfs/cluster/WeightedRandomServer.java @@ -1,181 +1,199 @@ -package org.opendedup.sdfs.cluster; - -import java.util.ArrayList; - -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.Random; - -import org.jgroups.Address; - -public class WeightedRandomServer implements ServerWeighting { - List servers = null; - int arsz = 0; - private int total = 0; - Random rnd = new Random(); - - @Override - public void init(List servers) { - this.servers = servers; - double totsz = 0; - for (DSEServer w : servers) { - totsz += w.maxSize - w.currentSize; - } - if (totsz > 0) { - for (DSEServer w : servers) { - long avail = w.maxSize - w.currentSize; - if (avail > 0) { - double pt = (double) avail / totsz; - w.weight = (int) Math.ceil((pt) * 100); - this.total += w.weight; - } - } - Collections.sort(servers, new CustomComparator()); - arsz = servers.size(); - } - } - - @Override - public Address getAddress(byte[] ignoredHosts) { - int random = rnd.nextInt(total); - Address s = null; - // loop thru our weightings until we arrive at the correct one - int current = 0; - if (servers.size() == 0) - return null; - while (s == null) { - for (DSEServer w : servers) { - current += w.weight; - if (random < current) { - if (!this.ignoreHost(w, ignoredHosts)) { - s = w.address; - } - break; - } - } - } - return s; - } - - @Override - public List
getAddresses(int sz, byte[] ignoredHosts) { - int random = rnd.nextInt(total); - ArrayList
lst = new ArrayList
(); - if (sz >= arsz) { - for (DSEServer w : servers) { - if (!this.ignoreHost(w, ignoredHosts)) - lst.add(w.address); - } - } else { - // loop thru our weightings until we arrive at the correct one - int current = 0; - int pos = 0; - for (DSEServer w : servers) { - current += w.weight; - if (random < current) - break; - else - pos++; - } - - int added = 0; - for (int i = pos; i < arsz; i++) { - DSEServer s = servers.get(i); - if (!this.ignoreHost(s, ignoredHosts)) { - lst.add(s.address); - added++; - if (added >= sz) - break; - } - } - if (added < sz) { - for (int i = 0; i < arsz; i++) { - DSEServer s = servers.get(i); - if (!this.ignoreHost(s, ignoredHosts)) { - lst.add(s.address); - added++; - if (added >= sz) - break; - } - } - - } - } - return lst; - } - - @Override - public List getServers(int sz, byte[] ignoredHosts) { - int random = rnd.nextInt(total); - ArrayList lst = new ArrayList(); - if (sz >= arsz) { - for (DSEServer w : servers) { - if (!this.ignoreHost(w, ignoredHosts)) - lst.add(w); - } - } else { - // loop thru our weightings until we arrive at the correct one - int current = 0; - int pos = 0; - for (DSEServer w : servers) { - current += w.weight; - if (random < current) - break; - else - pos++; - } - - int added = 0; - for (int i = pos; i < arsz; i++) { - DSEServer s = servers.get(i); - if (!this.ignoreHost(s, ignoredHosts)) { - lst.add(s); - added++; - if (added >= sz) - break; - } - } - if (added < sz) { - for (int i = 0; i < arsz; i++) { - DSEServer s = servers.get(i); - if (!this.ignoreHost(s, ignoredHosts)) { - lst.add(s); - added++; - if (added >= sz) - break; - } - } - - } - } - return lst; - } - - private boolean ignoreHost(DSEServer s, byte[] ignoredHosts) { - if (ignoredHosts == null) - return false; - else { - for (byte b : ignoredHosts) { - if (b == s.id) - return true; - } - return false; - } - } - - private class CustomComparator implements Comparator { - @Override - public int compare(DSEServer o1, DSEServer o2) { - int fs1 = o1.weight; - int fs2 = o2.weight; - if (fs1 > fs2) - return 1; - else if (fs1 < fs2) - return -1; - else - return 0; - } - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster; + +import java.util.ArrayList; + +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Random; + +import org.jgroups.Address; + +public class WeightedRandomServer implements ServerWeighting { + List servers = null; + int arsz = 0; + private int total = 0; + Random rnd = new Random(); + + @Override + public void init(List servers) { + this.servers = servers; + double totsz = 0; + for (DSEServer w : servers) { + totsz += w.maxSize - w.currentSize; + } + if (totsz > 0) { + for (DSEServer w : servers) { + long avail = w.maxSize - w.currentSize; + if (avail > 0) { + double pt = (double) avail / totsz; + w.weight = (int) Math.ceil((pt) * 100); + this.total += w.weight; + } + } + Collections.sort(servers, new CustomComparator()); + arsz = servers.size(); + } + } + + @Override + public Address getAddress(byte[] ignoredHosts) { + int random = rnd.nextInt(total); + Address s = null; + // loop thru our weightings until we arrive at the correct one + int current = 0; + if (servers.size() == 0) + return null; + while (s == null) { + for (DSEServer w : servers) { + current += w.weight; + if (random < current) { + if (!this.ignoreHost(w, ignoredHosts)) { + s = w.address; + } + break; + } + } + } + return s; + } + + @Override + public List
getAddresses(int sz, byte[] ignoredHosts) { + int random = rnd.nextInt(total); + ArrayList
lst = new ArrayList
(); + if (sz >= arsz) { + for (DSEServer w : servers) { + if (!this.ignoreHost(w, ignoredHosts)) + lst.add(w.address); + } + } else { + // loop thru our weightings until we arrive at the correct one + int current = 0; + int pos = 0; + for (DSEServer w : servers) { + current += w.weight; + if (random < current) + break; + else + pos++; + } + + int added = 0; + for (int i = pos; i < arsz; i++) { + DSEServer s = servers.get(i); + if (!this.ignoreHost(s, ignoredHosts)) { + lst.add(s.address); + added++; + if (added >= sz) + break; + } + } + if (added < sz) { + for (int i = 0; i < arsz; i++) { + DSEServer s = servers.get(i); + if (!this.ignoreHost(s, ignoredHosts)) { + lst.add(s.address); + added++; + if (added >= sz) + break; + } + } + + } + } + return lst; + } + + @Override + public List getServers(int sz, byte[] ignoredHosts) { + int random = rnd.nextInt(total); + ArrayList lst = new ArrayList(); + if (sz >= arsz) { + for (DSEServer w : servers) { + if (!this.ignoreHost(w, ignoredHosts)) + lst.add(w); + } + } else { + // loop thru our weightings until we arrive at the correct one + int current = 0; + int pos = 0; + for (DSEServer w : servers) { + current += w.weight; + if (random < current) + break; + else + pos++; + } + + int added = 0; + for (int i = pos; i < arsz; i++) { + DSEServer s = servers.get(i); + if (!this.ignoreHost(s, ignoredHosts)) { + lst.add(s); + added++; + if (added >= sz) + break; + } + } + if (added < sz) { + for (int i = 0; i < arsz; i++) { + DSEServer s = servers.get(i); + if (!this.ignoreHost(s, ignoredHosts)) { + lst.add(s); + added++; + if (added >= sz) + break; + } + } + + } + } + return lst; + } + + private boolean ignoreHost(DSEServer s, byte[] ignoredHosts) { + if (ignoredHosts == null) + return false; + else { + for (byte b : ignoredHosts) { + if (b == s.id) + return true; + } + return false; + } + } + + private class CustomComparator implements Comparator { + @Override + public int compare(DSEServer o1, DSEServer o2) { + int fs1 = o1.weight; + int fs2 = o2.weight; + if (fs1 > fs2) + return 1; + else if (fs1 < fs2) + return -1; + else + return 0; + } + } + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/AddVolCmd.java b/src/org/opendedup/sdfs/cluster/cmds/AddVolCmd.java index c36e3b282..b5eac0035 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/AddVolCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/AddVolCmd.java @@ -1,62 +1,80 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.jgroups.Message; -import org.jgroups.blocks.RequestOptions; -import org.jgroups.blocks.ResponseMode; -import org.jgroups.util.Rsp; -import org.jgroups.util.RspList; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.cluster.ClusterSocket; - -public class AddVolCmd implements IOPeerCmd { - boolean exists = false; - RequestOptions opts = null; - // private ArrayList results = new ArrayList(); - private String volume; - - public AddVolCmd(String volume) { - this.volume = volume; - opts = new RequestOptions(ResponseMode.GET_ALL, Main.ClusterRSPTimeout); - - } - - @Override - public void executeCmd(final ClusterSocket soc) throws IOException { - byte[] vb = this.volume.getBytes(); - byte[] b = new byte[1 + 4 + vb.length]; - ByteBuffer buf = ByteBuffer.wrap(b); - buf.put(NetworkCMDS.ADD_VOLUME); - buf.putInt(vb.length); - buf.put(vb); - try { - RspList lst = soc.getDispatcher().castMessage(null, - new Message(null, null, buf.array()), opts); - for (Rsp rsp : lst) { - if (rsp.hasException()) { - SDFSLogger.getLog().error( - "Add Volume to Cache Exception thrown for " - + rsp.getSender()); - throw rsp.getException(); - } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { - SDFSLogger.getLog().error( - "Add Volume to Cache Host unreachable for " - + rsp.getSender()); - } - - } - } catch (Throwable e) { - SDFSLogger.getLog().error("error while running fdisk", e); - throw new IOException(e); - } - } - - @Override - public byte getCmdID() { - return NetworkCMDS.ADD_VOLUME; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.jgroups.Message; +import org.jgroups.blocks.RequestOptions; +import org.jgroups.blocks.ResponseMode; +import org.jgroups.util.Rsp; +import org.jgroups.util.RspList; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.cluster.ClusterSocket; + +public class AddVolCmd implements IOPeerCmd { + boolean exists = false; + RequestOptions opts = null; + // private ArrayList results = new ArrayList(); + private String volume; + + public AddVolCmd(String volume) { + this.volume = volume; + opts = new RequestOptions(ResponseMode.GET_ALL, Main.ClusterRSPTimeout); + + } + + @Override + public void executeCmd(final ClusterSocket soc) throws IOException { + byte[] vb = this.volume.getBytes(); + byte[] b = new byte[1 + 4 + vb.length]; + ByteBuffer buf = ByteBuffer.wrap(b); + buf.put(NetworkCMDS.ADD_VOLUME); + buf.putInt(vb.length); + buf.put(vb); + try { + RspList lst = soc.getDispatcher().castMessage(null, + new Message(null, null, buf.array()), opts); + for (Rsp rsp : lst) { + if (rsp.hasException()) { + SDFSLogger.getLog().error( + "Add Volume to Cache Exception thrown for " + + rsp.getSender()); + throw rsp.getException(); + } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { + SDFSLogger.getLog().error( + "Add Volume to Cache Host unreachable for " + + rsp.getSender()); + } + + } + } catch (Throwable e) { + SDFSLogger.getLog().error("error while running fdisk", e); + throw new IOException(e); + } + } + + @Override + public byte getCmdID() { + return NetworkCMDS.ADD_VOLUME; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/BFClaimHashesCmd.java b/src/org/opendedup/sdfs/cluster/cmds/BFClaimHashesCmd.java index 04cb173ef..2b244c203 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/BFClaimHashesCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/BFClaimHashesCmd.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster.cmds; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/cluster/cmds/BatchHashExistsCmd.java b/src/org/opendedup/sdfs/cluster/cmds/BatchHashExistsCmd.java index e5c4c71f7..a4b57e14c 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/BatchHashExistsCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/BatchHashExistsCmd.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster.cmds; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/cluster/cmds/BatchWriteHashCmd.java b/src/org/opendedup/sdfs/cluster/cmds/BatchWriteHashCmd.java index cf5f8ae1e..fa17587be 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/BatchWriteHashCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/BatchWriteHashCmd.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster.cmds; import java.io.ByteArrayOutputStream; diff --git a/src/org/opendedup/sdfs/cluster/cmds/ClaimHashesCmd.java b/src/org/opendedup/sdfs/cluster/cmds/ClaimHashesCmd.java index ee4c19d92..866be4c54 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/ClaimHashesCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/ClaimHashesCmd.java @@ -1,84 +1,102 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; - -import org.jgroups.Address; -import org.jgroups.Message; -import org.jgroups.blocks.RequestOptions; -import org.jgroups.blocks.ResponseMode; -import org.jgroups.util.Rsp; -import org.jgroups.util.RspList; -import org.jgroups.util.Util; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.cluster.ClusterSocket; -import org.opendedup.sdfs.cluster.DSEServer; -import org.opendedup.sdfs.notification.SDFSEvent; - -public class ClaimHashesCmd implements IOPeerCmd { - boolean exists = false; - RequestOptions opts = null; - SDFSEvent evt; - - public ClaimHashesCmd(SDFSEvent evt) { - opts = new RequestOptions(ResponseMode.GET_ALL, 0); - this.evt = evt; - - } - - @Override - public void executeCmd(final ClusterSocket soc) throws IOException { - byte[] ob = null; - try { - ob = Util.objectToByteBuffer(evt); - } catch (Exception e1) { - throw new IOException(e1); - } - byte[] b = new byte[1 + 4 + ob.length]; - ByteBuffer buf = ByteBuffer.wrap(b); - buf.put(NetworkCMDS.RUN_CLAIM); - buf.putInt(ob.length); - buf.put(ob); - try { - List
addrs = new ArrayList
(); - List servers = soc.getStorageNodes(); - for (DSEServer server : servers) { - addrs.add(server.address); - } - RspList lst = soc.getDispatcher().castMessage(addrs, - new Message(null, null, buf.array()), opts); - for (Rsp rsp : lst) { - if (rsp.hasException()) { - SDFSLogger.getLog().error( - "Claim Exception thrown for " + rsp.getSender()); - throw rsp.getException(); - } else { - if (rsp.getValue() != null) { - SDFSLogger.getLog().debug( - "Claim completed for " + rsp.getSender() - + " returned=" + rsp.getValue()); - SDFSEvent sevt = (SDFSEvent) rsp.getValue(); - ArrayList children = sevt.getChildren(); - for (SDFSEvent cevt : children) { - evt.addChild(cevt); - } - } else { - SDFSLogger.getLog().debug( - "recieved null from " + rsp.getSender()); - } - } - } - } catch (Throwable e) { - SDFSLogger.getLog().error("error while running fdisk", e); - throw new IOException(e); - } - } - - @Override - public byte getCmdID() { - return NetworkCMDS.RUN_CLAIM; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +import org.jgroups.Address; +import org.jgroups.Message; +import org.jgroups.blocks.RequestOptions; +import org.jgroups.blocks.ResponseMode; +import org.jgroups.util.Rsp; +import org.jgroups.util.RspList; +import org.jgroups.util.Util; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.cluster.ClusterSocket; +import org.opendedup.sdfs.cluster.DSEServer; +import org.opendedup.sdfs.notification.SDFSEvent; + +public class ClaimHashesCmd implements IOPeerCmd { + boolean exists = false; + RequestOptions opts = null; + SDFSEvent evt; + + public ClaimHashesCmd(SDFSEvent evt) { + opts = new RequestOptions(ResponseMode.GET_ALL, 0); + this.evt = evt; + + } + + @Override + public void executeCmd(final ClusterSocket soc) throws IOException { + byte[] ob = null; + try { + ob = Util.objectToByteBuffer(evt); + } catch (Exception e1) { + throw new IOException(e1); + } + byte[] b = new byte[1 + 4 + ob.length]; + ByteBuffer buf = ByteBuffer.wrap(b); + buf.put(NetworkCMDS.RUN_CLAIM); + buf.putInt(ob.length); + buf.put(ob); + try { + List
addrs = new ArrayList
(); + List servers = soc.getStorageNodes(); + for (DSEServer server : servers) { + addrs.add(server.address); + } + RspList lst = soc.getDispatcher().castMessage(addrs, + new Message(null, null, buf.array()), opts); + for (Rsp rsp : lst) { + if (rsp.hasException()) { + SDFSLogger.getLog().error( + "Claim Exception thrown for " + rsp.getSender()); + throw rsp.getException(); + } else { + if (rsp.getValue() != null) { + SDFSLogger.getLog().debug( + "Claim completed for " + rsp.getSender() + + " returned=" + rsp.getValue()); + SDFSEvent sevt = (SDFSEvent) rsp.getValue(); + ArrayList children = sevt.getChildren(); + for (SDFSEvent cevt : children) { + evt.addChild(cevt); + } + } else { + SDFSLogger.getLog().debug( + "recieved null from " + rsp.getSender()); + } + } + } + } catch (Throwable e) { + SDFSLogger.getLog().error("error while running fdisk", e); + throw new IOException(e); + } + } + + @Override + public byte getCmdID() { + return NetworkCMDS.RUN_CLAIM; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/ClusterCmdException.java b/src/org/opendedup/sdfs/cluster/cmds/ClusterCmdException.java index d70a9cd5a..58edc2e52 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/ClusterCmdException.java +++ b/src/org/opendedup/sdfs/cluster/cmds/ClusterCmdException.java @@ -1,14 +1,32 @@ -package org.opendedup.sdfs.cluster.cmds; - -public class ClusterCmdException extends Exception { - - /** - * - */ - private static final long serialVersionUID = -2523934501123942317L; - - public ClusterCmdException(String msg) { - super(msg); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +public class ClusterCmdException extends Exception { + + /** + * + */ + private static final long serialVersionUID = -2523934501123942317L; + + public ClusterCmdException(String msg) { + super(msg); + } + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/DirectBatchWriteHashCmd.java b/src/org/opendedup/sdfs/cluster/cmds/DirectBatchWriteHashCmd.java index 8e8be775b..df8a76eb4 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/DirectBatchWriteHashCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/DirectBatchWriteHashCmd.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster.cmds; import java.io.ByteArrayOutputStream; diff --git a/src/org/opendedup/sdfs/cluster/cmds/DirectFetchChunkCmd.java b/src/org/opendedup/sdfs/cluster/cmds/DirectFetchChunkCmd.java index 489046cf8..4a0ee5b98 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/DirectFetchChunkCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/DirectFetchChunkCmd.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster.cmds; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/cluster/cmds/DirectWriteHashCmd.java b/src/org/opendedup/sdfs/cluster/cmds/DirectWriteHashCmd.java index d4d794e2f..b9fb37440 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/DirectWriteHashCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/DirectWriteHashCmd.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster.cmds; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/cluster/cmds/FDiskCmd.java b/src/org/opendedup/sdfs/cluster/cmds/FDiskCmd.java index 1dfe957ca..cb15c4bf1 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/FDiskCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/FDiskCmd.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster.cmds; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/cluster/cmds/FetchChunkCmd.java b/src/org/opendedup/sdfs/cluster/cmds/FetchChunkCmd.java index 12e2f7e64..52b0406ae 100755 --- a/src/org/opendedup/sdfs/cluster/cmds/FetchChunkCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/FetchChunkCmd.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster.cmds; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/cluster/cmds/FindGCMasterCmd.java b/src/org/opendedup/sdfs/cluster/cmds/FindGCMasterCmd.java index 9d88262ca..f9bca292f 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/FindGCMasterCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/FindGCMasterCmd.java @@ -1,84 +1,102 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.jgroups.Address; -import org.jgroups.Message; -import org.jgroups.blocks.RequestOptions; -import org.jgroups.blocks.ResponseMode; -import org.jgroups.util.Rsp; -import org.jgroups.util.RspList; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.cluster.ClusterSocket; - -public class FindGCMasterCmd implements IOPeerCmd { - boolean exists = false; - RequestOptions opts = null; - private Address gcMaster = null; - - public FindGCMasterCmd() { - opts = new RequestOptions(ResponseMode.GET_ALL, 0); - - } - - @Override - public void executeCmd(final ClusterSocket soc) throws IOException { - byte[] b = new byte[1]; - ByteBuffer buf = ByteBuffer.wrap(b); - buf.put(NetworkCMDS.FIND_GC_MASTER_CMD); - try { - RspList lst = soc.getDispatcher().castMessage(null, - new Message(null, null, buf.array()), opts); - for (Rsp rsp : lst) { - if (rsp.hasException()) { - SDFSLogger.getLog().error( - "FIND_GC_MASTER_CMD Exception thrown for " - + rsp.getSender()); - // throw rsp.getException(); - } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { - SDFSLogger.getLog().error( - "FIND_GC_MASTER_CMD Host unreachable Exception thrown for " - + rsp.getSender()); - // throw new - // IOException("FIND_GC_MASTER_CMD Host unreachable Exception thrown for " - // + rsp.getSender()); - } else { - if (rsp.getValue() != null) { - SDFSLogger.getLog().debug( - "FIND_GC_MASTER_CMD completed for " - + rsp.getSender() + " returned=" - + rsp.getValue()); - Boolean bm = (Boolean) rsp.getValue(); - boolean m = bm.booleanValue(); - if (m) { - if (this.gcMaster != null) - throw new IOException( - "FIND_GC_MASTER_CMD already identified at [" - + gcMaster.toString() - + "] but has also been identified at [" - + rsp.getSender() + "]."); - else - this.gcMaster = rsp.getSender(); - } - - } - } - } - } catch (Throwable e) { - SDFSLogger.getLog().error("error while running FIND_GC_MASTER_CMD", - e); - throw new IOException(e); - } - } - - @Override - public byte getCmdID() { - return NetworkCMDS.FIND_GC_MASTER_CMD; - } - - public Address getResults() { - return this.gcMaster; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.jgroups.Address; +import org.jgroups.Message; +import org.jgroups.blocks.RequestOptions; +import org.jgroups.blocks.ResponseMode; +import org.jgroups.util.Rsp; +import org.jgroups.util.RspList; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.cluster.ClusterSocket; + +public class FindGCMasterCmd implements IOPeerCmd { + boolean exists = false; + RequestOptions opts = null; + private Address gcMaster = null; + + public FindGCMasterCmd() { + opts = new RequestOptions(ResponseMode.GET_ALL, 0); + + } + + @Override + public void executeCmd(final ClusterSocket soc) throws IOException { + byte[] b = new byte[1]; + ByteBuffer buf = ByteBuffer.wrap(b); + buf.put(NetworkCMDS.FIND_GC_MASTER_CMD); + try { + RspList lst = soc.getDispatcher().castMessage(null, + new Message(null, null, buf.array()), opts); + for (Rsp rsp : lst) { + if (rsp.hasException()) { + SDFSLogger.getLog().error( + "FIND_GC_MASTER_CMD Exception thrown for " + + rsp.getSender()); + // throw rsp.getException(); + } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { + SDFSLogger.getLog().error( + "FIND_GC_MASTER_CMD Host unreachable Exception thrown for " + + rsp.getSender()); + // throw new + // IOException("FIND_GC_MASTER_CMD Host unreachable Exception thrown for " + // + rsp.getSender()); + } else { + if (rsp.getValue() != null) { + SDFSLogger.getLog().debug( + "FIND_GC_MASTER_CMD completed for " + + rsp.getSender() + " returned=" + + rsp.getValue()); + Boolean bm = (Boolean) rsp.getValue(); + boolean m = bm.booleanValue(); + if (m) { + if (this.gcMaster != null) + throw new IOException( + "FIND_GC_MASTER_CMD already identified at [" + + gcMaster.toString() + + "] but has also been identified at [" + + rsp.getSender() + "]."); + else + this.gcMaster = rsp.getSender(); + } + + } + } + } + } catch (Throwable e) { + SDFSLogger.getLog().error("error while running FIND_GC_MASTER_CMD", + e); + throw new IOException(e); + } + } + + @Override + public byte getCmdID() { + return NetworkCMDS.FIND_GC_MASTER_CMD; + } + + public Address getResults() { + return this.gcMaster; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/FindVolOwnerCmd.java b/src/org/opendedup/sdfs/cluster/cmds/FindVolOwnerCmd.java index 4559632d5..9cf7b0927 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/FindVolOwnerCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/FindVolOwnerCmd.java @@ -1,73 +1,91 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.jgroups.Message; -import org.jgroups.blocks.RequestOptions; -import org.jgroups.blocks.ResponseMode; -import org.jgroups.util.Rsp; -import org.jgroups.util.RspList; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.cluster.ClusterSocket; -import org.opendedup.sdfs.io.Volume; - -public class FindVolOwnerCmd implements IOPeerCmd { - boolean exists = false; - RequestOptions opts = null; - // private ArrayList results = new ArrayList(); - private String volumeStr; - private Volume vol = null; - - public FindVolOwnerCmd(String volumeStr) { - this.volumeStr = volumeStr; - opts = new RequestOptions(ResponseMode.GET_ALL, Main.ClusterRSPTimeout); - } - - @Override - public void executeCmd(final ClusterSocket soc) throws IOException { - byte[] vb = this.volumeStr.getBytes(); - byte[] b = new byte[1 + 4 + vb.length]; - ByteBuffer buf = ByteBuffer.wrap(b); - buf.put(NetworkCMDS.FIND_VOLUME_OWNER); - buf.putInt(vb.length); - buf.put(vb); - try { - RspList lst = soc.getDispatcher().castMessage(null, - new Message(null, null, buf.array()), opts); - for (Rsp rsp : lst) { - if (rsp.hasException()) { - SDFSLogger.getLog().error( - "FIND_VOLUME_OWNER Exception thrown for " - + rsp.getSender(), rsp.getException()); - } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { - SDFSLogger.getLog().error( - "FIND_VOLUME_OWNER Host unreachable for " - + rsp.getSender()); - } else if (rsp.getValue() != null) { - SDFSLogger.getLog().debug( - "FIND_VOLUME_OWNER completed for " - + rsp.getSender() + " returned=" - + rsp.getValue()); - vol = (Volume) rsp.getValue(); - vol.host = rsp.getSender(); - - } - } - } catch (Throwable e) { - SDFSLogger.getLog().error("error while running fdisk", e); - throw new IOException(e); - } - } - - public Volume getResults() { - return this.vol; - } - - @Override - public byte getCmdID() { - return NetworkCMDS.FIND_VOLUME_OWNER; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.jgroups.Message; +import org.jgroups.blocks.RequestOptions; +import org.jgroups.blocks.ResponseMode; +import org.jgroups.util.Rsp; +import org.jgroups.util.RspList; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.cluster.ClusterSocket; +import org.opendedup.sdfs.io.Volume; + +public class FindVolOwnerCmd implements IOPeerCmd { + boolean exists = false; + RequestOptions opts = null; + // private ArrayList results = new ArrayList(); + private String volumeStr; + private Volume vol = null; + + public FindVolOwnerCmd(String volumeStr) { + this.volumeStr = volumeStr; + opts = new RequestOptions(ResponseMode.GET_ALL, Main.ClusterRSPTimeout); + } + + @Override + public void executeCmd(final ClusterSocket soc) throws IOException { + byte[] vb = this.volumeStr.getBytes(); + byte[] b = new byte[1 + 4 + vb.length]; + ByteBuffer buf = ByteBuffer.wrap(b); + buf.put(NetworkCMDS.FIND_VOLUME_OWNER); + buf.putInt(vb.length); + buf.put(vb); + try { + RspList lst = soc.getDispatcher().castMessage(null, + new Message(null, null, buf.array()), opts); + for (Rsp rsp : lst) { + if (rsp.hasException()) { + SDFSLogger.getLog().error( + "FIND_VOLUME_OWNER Exception thrown for " + + rsp.getSender(), rsp.getException()); + } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { + SDFSLogger.getLog().error( + "FIND_VOLUME_OWNER Host unreachable for " + + rsp.getSender()); + } else if (rsp.getValue() != null) { + SDFSLogger.getLog().debug( + "FIND_VOLUME_OWNER completed for " + + rsp.getSender() + " returned=" + + rsp.getValue()); + vol = (Volume) rsp.getValue(); + vol.host = rsp.getSender(); + + } + } + } catch (Throwable e) { + SDFSLogger.getLog().error("error while running fdisk", e); + throw new IOException(e); + } + } + + public Volume getResults() { + return this.vol; + } + + @Override + public byte getCmdID() { + return NetworkCMDS.FIND_VOLUME_OWNER; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/HashExistsCmd.java b/src/org/opendedup/sdfs/cluster/cmds/HashExistsCmd.java index cda9e5fcd..d9199674d 100755 --- a/src/org/opendedup/sdfs/cluster/cmds/HashExistsCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/HashExistsCmd.java @@ -1,175 +1,193 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.concurrent.locks.ReentrantLock; - -import org.jgroups.Address; -import org.jgroups.Message; -import org.jgroups.blocks.RequestOptions; -import org.jgroups.blocks.ResponseMode; -import org.jgroups.blocks.RspFilter; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.cluster.DSEClientSocket; - -public class HashExistsCmd implements IOClientCmd { - byte[] hash; - boolean exists = false; - RequestOptions opts = null; - byte[] resp = new byte[8]; - boolean waitforall = false; - byte numtowaitfor = 1; - boolean meetsRudundancy = false; - int csz = 0; - - public HashExistsCmd(byte[] hash, boolean waitforall, byte numtowaitfor) { - this.hash = hash; - this.waitforall = waitforall; - resp[0] = -1; - this.numtowaitfor = numtowaitfor; - } - - @Override - public void executeCmd(final DSEClientSocket soc) throws IOException { - if (waitforall) - opts = new RequestOptions(ResponseMode.GET_ALL, - Main.ClusterRSPTimeout, true, - - new RspFilter() { - private final ReentrantLock lock = new ReentrantLock(); - int pos = 1; - - public boolean needMoreResponses() { - - return true; - } - - @Override - public boolean isAcceptable(Object response, - Address arg1) { - - if (response instanceof Boolean) { - boolean rsp = ((Boolean) response) - .booleanValue(); - if (rsp) { - lock.lock(); - resp[0] = 1; - resp[pos] = soc.serverState.get(arg1).id; - pos++; - csz++; - exists = true; - lock.unlock(); - } else { - lock.lock(); - if (resp[0] == -1) - resp[0] = 0; - lock.unlock(); - } - return rsp; - } else { - - return false; - } - } - - }); - else { - opts = new RequestOptions(ResponseMode.GET_ALL, - Main.ClusterRSPTimeout, false, - - new RspFilter() { - private final ReentrantLock lock = new ReentrantLock(); - - int pos = 1; - - @Override - public boolean needMoreResponses() { - return !meetsRudundancy; - } - - @Override - public boolean isAcceptable(Object response, - Address arg1) { - try { - boolean rsp = ((Boolean) response) - .booleanValue(); - if (rsp) { - lock.lock(); - resp[0] = 1; - resp[pos] = soc.serverState.get(arg1).id; - - if (pos >= numtowaitfor) { - // SDFSLogger.getLog().info("meets requirements"); - meetsRudundancy = true; - } - csz++; - pos++; - exists = rsp; - lock.unlock(); - } else { - - lock.lock(); - if (resp[0] == -1) - resp[0] = 0; - lock.unlock(); - } - return true; - - } catch (Exception e) { - SDFSLogger.getLog().warn( - "malformed hashexists msg from " - + arg1.toString(), e); - return false; - } - } - - }); - } - // opts.setFlags(Message.Flag.DONT_BUNDLE); - // opts.setFlags(Message.Flag.NO_TOTAL_ORDER); - opts.setFlags(Message.Flag.OOB); - opts.setAnycasting(true); - byte[] b = new byte[1 + 2 + 2 + hash.length]; - ByteBuffer buf = ByteBuffer.wrap(b); - buf.put(NetworkCMDS.HASH_EXISTS_CMD); - buf.putShort((short) hash.length); - buf.put(hash); - try { - List
servers = soc.getServers(); - soc.disp.castMessage(servers, new Message(null, null, buf.array()), - opts); - - } catch (Exception e) { - SDFSLogger.getLog().error("error while getting hash", e); - throw new IOException(e); - } - } - - public byte[] getHash() { - return this.hash; - } - - public byte[] getResponse() { - return this.resp; - } - - public boolean exists() { - return this.exists; - } - - public int responses() { - return this.csz; - } - - public boolean meetsRedundancyRequirements() { - return this.meetsRudundancy; - } - - @Override - public byte getCmdID() { - return NetworkCMDS.HASH_EXISTS_CMD; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.locks.ReentrantLock; + +import org.jgroups.Address; +import org.jgroups.Message; +import org.jgroups.blocks.RequestOptions; +import org.jgroups.blocks.ResponseMode; +import org.jgroups.blocks.RspFilter; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.cluster.DSEClientSocket; + +public class HashExistsCmd implements IOClientCmd { + byte[] hash; + boolean exists = false; + RequestOptions opts = null; + byte[] resp = new byte[8]; + boolean waitforall = false; + byte numtowaitfor = 1; + boolean meetsRudundancy = false; + int csz = 0; + + public HashExistsCmd(byte[] hash, boolean waitforall, byte numtowaitfor) { + this.hash = hash; + this.waitforall = waitforall; + resp[0] = -1; + this.numtowaitfor = numtowaitfor; + } + + @Override + public void executeCmd(final DSEClientSocket soc) throws IOException { + if (waitforall) + opts = new RequestOptions(ResponseMode.GET_ALL, + Main.ClusterRSPTimeout, true, + + new RspFilter() { + private final ReentrantLock lock = new ReentrantLock(); + int pos = 1; + + public boolean needMoreResponses() { + + return true; + } + + @Override + public boolean isAcceptable(Object response, + Address arg1) { + + if (response instanceof Boolean) { + boolean rsp = ((Boolean) response) + .booleanValue(); + if (rsp) { + lock.lock(); + resp[0] = 1; + resp[pos] = soc.serverState.get(arg1).id; + pos++; + csz++; + exists = true; + lock.unlock(); + } else { + lock.lock(); + if (resp[0] == -1) + resp[0] = 0; + lock.unlock(); + } + return rsp; + } else { + + return false; + } + } + + }); + else { + opts = new RequestOptions(ResponseMode.GET_ALL, + Main.ClusterRSPTimeout, false, + + new RspFilter() { + private final ReentrantLock lock = new ReentrantLock(); + + int pos = 1; + + @Override + public boolean needMoreResponses() { + return !meetsRudundancy; + } + + @Override + public boolean isAcceptable(Object response, + Address arg1) { + try { + boolean rsp = ((Boolean) response) + .booleanValue(); + if (rsp) { + lock.lock(); + resp[0] = 1; + resp[pos] = soc.serverState.get(arg1).id; + + if (pos >= numtowaitfor) { + // SDFSLogger.getLog().info("meets requirements"); + meetsRudundancy = true; + } + csz++; + pos++; + exists = rsp; + lock.unlock(); + } else { + + lock.lock(); + if (resp[0] == -1) + resp[0] = 0; + lock.unlock(); + } + return true; + + } catch (Exception e) { + SDFSLogger.getLog().warn( + "malformed hashexists msg from " + + arg1.toString(), e); + return false; + } + } + + }); + } + // opts.setFlags(Message.Flag.DONT_BUNDLE); + // opts.setFlags(Message.Flag.NO_TOTAL_ORDER); + opts.setFlags(Message.Flag.OOB); + opts.setAnycasting(true); + byte[] b = new byte[1 + 2 + 2 + hash.length]; + ByteBuffer buf = ByteBuffer.wrap(b); + buf.put(NetworkCMDS.HASH_EXISTS_CMD); + buf.putShort((short) hash.length); + buf.put(hash); + try { + List
servers = soc.getServers(); + soc.disp.castMessage(servers, new Message(null, null, buf.array()), + opts); + + } catch (Exception e) { + SDFSLogger.getLog().error("error while getting hash", e); + throw new IOException(e); + } + } + + public byte[] getHash() { + return this.hash; + } + + public byte[] getResponse() { + return this.resp; + } + + public boolean exists() { + return this.exists; + } + + public int responses() { + return this.csz; + } + + public boolean meetsRedundancyRequirements() { + return this.meetsRudundancy; + } + + @Override + public byte getCmdID() { + return NetworkCMDS.HASH_EXISTS_CMD; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/IOClientCmd.java b/src/org/opendedup/sdfs/cluster/cmds/IOClientCmd.java index 8ac8f934e..79095bc2c 100755 --- a/src/org/opendedup/sdfs/cluster/cmds/IOClientCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/IOClientCmd.java @@ -1,13 +1,31 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; - -import org.opendedup.sdfs.cluster.DSEClientSocket; - -public interface IOClientCmd { - public abstract void executeCmd(DSEClientSocket socket) throws IOException, - ClusterCmdException; - - public abstract byte getCmdID(); - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; + +import org.opendedup.sdfs.cluster.DSEClientSocket; + +public interface IOClientCmd { + public abstract void executeCmd(DSEClientSocket socket) throws IOException, + ClusterCmdException; + + public abstract byte getCmdID(); + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/IOPeerCmd.java b/src/org/opendedup/sdfs/cluster/cmds/IOPeerCmd.java index f9334ac70..3d747c939 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/IOPeerCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/IOPeerCmd.java @@ -1,12 +1,30 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; - -import org.opendedup.sdfs.cluster.ClusterSocket; - -public interface IOPeerCmd { - public abstract void executeCmd(ClusterSocket socket) throws IOException; - - public abstract byte getCmdID(); - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; + +import org.opendedup.sdfs.cluster.ClusterSocket; + +public interface IOPeerCmd { + public abstract void executeCmd(ClusterSocket socket) throws IOException; + + public abstract byte getCmdID(); + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/IOServerCmd.java b/src/org/opendedup/sdfs/cluster/cmds/IOServerCmd.java index aff3e0f5f..d443c3786 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/IOServerCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/IOServerCmd.java @@ -1,12 +1,30 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; - -import org.opendedup.sdfs.cluster.DSEServerSocket; - -public interface IOServerCmd { - public abstract void executeCmd(DSEServerSocket socket) throws IOException; - - public abstract byte getCmdID(); - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; + +import org.opendedup.sdfs.cluster.DSEServerSocket; + +public interface IOServerCmd { + public abstract void executeCmd(DSEServerSocket socket) throws IOException; + + public abstract byte getCmdID(); + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/ListVolsCmd.java b/src/org/opendedup/sdfs/cluster/cmds/ListVolsCmd.java index 4c19958d4..b11ea5ae9 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/ListVolsCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/ListVolsCmd.java @@ -1,80 +1,98 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; - -import org.jgroups.Message; -import org.jgroups.blocks.RequestOptions; -import org.jgroups.blocks.ResponseMode; -import org.jgroups.util.Rsp; -import org.jgroups.util.RspList; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.cluster.ClusterSocket; -import org.opendedup.sdfs.io.Volume; - -public class ListVolsCmd implements IOPeerCmd { - boolean exists = false; - RequestOptions opts = null; - private HashMap results = new HashMap(); - - public ListVolsCmd() { - opts = new RequestOptions(ResponseMode.GET_ALL, Main.ClusterRSPTimeout); - } - - @Override - public void executeCmd(final ClusterSocket soc) throws IOException { - byte[] b = new byte[1]; - ByteBuffer buf = ByteBuffer.wrap(b); - buf.put(NetworkCMDS.LIST_VOLUMES); - try { - RspList lst = soc.getDispatcher().castMessage(null, - new Message(null, null, buf.array()), opts); - for (Rsp rsp : lst) { - if (rsp.hasException()) { - SDFSLogger.getLog().error( - "List Volume Exception thrown for " - + rsp.getSender()); - throw rsp.getException(); - } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { - SDFSLogger.getLog().error( - "List Volume Host unreachable Exception thrown for " - + rsp.getSender()); - } else { - if (rsp.getValue() != null) { - SDFSLogger.getLog().debug( - "List completed for " + rsp.getSender() - + " returned=" + rsp.getValue()); - @SuppressWarnings("unchecked") - ArrayList rst = (ArrayList) rsp - .getValue(); - for (String volStr : rst) { - if (!this.results.containsKey(volStr)) { - FindVolOwnerCmd cmd = new FindVolOwnerCmd( - volStr); - cmd.executeCmd(soc); - Volume vol = cmd.getResults(); - this.results.put(volStr, vol); - } - } - } - } - } - } catch (Throwable e) { - SDFSLogger.getLog().error("error while running list volumes", e); - throw new IOException(e); - } - } - - @Override - public byte getCmdID() { - return NetworkCMDS.LIST_VOLUMES; - } - - public HashMap getResults() { - return this.results; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; + +import org.jgroups.Message; +import org.jgroups.blocks.RequestOptions; +import org.jgroups.blocks.ResponseMode; +import org.jgroups.util.Rsp; +import org.jgroups.util.RspList; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.cluster.ClusterSocket; +import org.opendedup.sdfs.io.Volume; + +public class ListVolsCmd implements IOPeerCmd { + boolean exists = false; + RequestOptions opts = null; + private HashMap results = new HashMap(); + + public ListVolsCmd() { + opts = new RequestOptions(ResponseMode.GET_ALL, Main.ClusterRSPTimeout); + } + + @Override + public void executeCmd(final ClusterSocket soc) throws IOException { + byte[] b = new byte[1]; + ByteBuffer buf = ByteBuffer.wrap(b); + buf.put(NetworkCMDS.LIST_VOLUMES); + try { + RspList lst = soc.getDispatcher().castMessage(null, + new Message(null, null, buf.array()), opts); + for (Rsp rsp : lst) { + if (rsp.hasException()) { + SDFSLogger.getLog().error( + "List Volume Exception thrown for " + + rsp.getSender()); + throw rsp.getException(); + } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { + SDFSLogger.getLog().error( + "List Volume Host unreachable Exception thrown for " + + rsp.getSender()); + } else { + if (rsp.getValue() != null) { + SDFSLogger.getLog().debug( + "List completed for " + rsp.getSender() + + " returned=" + rsp.getValue()); + @SuppressWarnings("unchecked") + ArrayList rst = (ArrayList) rsp + .getValue(); + for (String volStr : rst) { + if (!this.results.containsKey(volStr)) { + FindVolOwnerCmd cmd = new FindVolOwnerCmd( + volStr); + cmd.executeCmd(soc); + Volume vol = cmd.getResults(); + this.results.put(volStr, vol); + } + } + } + } + } + } catch (Throwable e) { + SDFSLogger.getLog().error("error while running list volumes", e); + throw new IOException(e); + } + } + + @Override + public byte getCmdID() { + return NetworkCMDS.LIST_VOLUMES; + } + + public HashMap getResults() { + return this.results; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/MultiCastFetchChunkCmd.java b/src/org/opendedup/sdfs/cluster/cmds/MultiCastFetchChunkCmd.java index 94dc40a76..e966f8dce 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/MultiCastFetchChunkCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/MultiCastFetchChunkCmd.java @@ -1,75 +1,93 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; - -import org.jgroups.Address; -import org.jgroups.Message; -import org.jgroups.blocks.RequestOptions; -import org.jgroups.blocks.ResponseMode; -import org.jgroups.util.Rsp; -import org.jgroups.util.RspList; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.cluster.DSEClientSocket; - -public class MultiCastFetchChunkCmd implements IOClientCmd { - byte[] hash; - byte[] chunk = null; - RequestOptions opts = null; - byte[] hashlocs; - - public MultiCastFetchChunkCmd(byte[] hash, byte[] hashlocs) { - this.hash = hash; - this.hashlocs = hashlocs; - opts = new RequestOptions(ResponseMode.GET_ALL, 0); - opts.setFlags(Message.Flag.DONT_BUNDLE); - opts.setFlags(Message.Flag.OOB); - opts.setAnycasting(true); - } - - @Override - public void executeCmd(DSEClientSocket soc) throws IOException { - byte[] b = new byte[1 + 2 + hash.length]; - // SDFSLogger.getLog().debug("Fetching " + - // StringUtils.getHexString(hash)); - ByteBuffer buf = ByteBuffer.wrap(b); - buf.put(NetworkCMDS.FETCH_CMD); - buf.putShort((short) hash.length); - buf.put(hash); - int pos = 1; - while (chunk == null) { - Address addr = null; - try { - addr = soc.getServer(hashlocs, pos); - } catch (IOException e) { - throw e; - } - ArrayList
al = new ArrayList
(); - al.add(addr); - try { - RspList lst = soc.disp.castMessage(al, new Message( - null, null, buf.array()), opts); - Rsp rsp = lst.get(addr); - if (!rsp.hasException() && !rsp.wasSuspected()) { - this.chunk = (byte[]) rsp.getValue(); - } else { - pos++; - } - } catch (Exception e) { - SDFSLogger.getLog().error("error while getting hash", e); - throw new IOException(e); - } - } - } - - public byte[] getChunk() { - return this.chunk; - } - - @Override - public byte getCmdID() { - return NetworkCMDS.FETCH_CMD; - } - +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; + +import org.jgroups.Address; +import org.jgroups.Message; +import org.jgroups.blocks.RequestOptions; +import org.jgroups.blocks.ResponseMode; +import org.jgroups.util.Rsp; +import org.jgroups.util.RspList; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.cluster.DSEClientSocket; + +public class MultiCastFetchChunkCmd implements IOClientCmd { + byte[] hash; + byte[] chunk = null; + RequestOptions opts = null; + byte[] hashlocs; + + public MultiCastFetchChunkCmd(byte[] hash, byte[] hashlocs) { + this.hash = hash; + this.hashlocs = hashlocs; + opts = new RequestOptions(ResponseMode.GET_ALL, 0); + opts.setFlags(Message.Flag.DONT_BUNDLE); + opts.setFlags(Message.Flag.OOB); + opts.setAnycasting(true); + } + + @Override + public void executeCmd(DSEClientSocket soc) throws IOException { + byte[] b = new byte[1 + 2 + hash.length]; + // SDFSLogger.getLog().debug("Fetching " + + // StringUtils.getHexString(hash)); + ByteBuffer buf = ByteBuffer.wrap(b); + buf.put(NetworkCMDS.FETCH_CMD); + buf.putShort((short) hash.length); + buf.put(hash); + int pos = 1; + while (chunk == null) { + Address addr = null; + try { + addr = soc.getServer(hashlocs, pos); + } catch (IOException e) { + throw e; + } + ArrayList
al = new ArrayList
(); + al.add(addr); + try { + RspList lst = soc.disp.castMessage(al, new Message( + null, null, buf.array()), opts); + Rsp rsp = lst.get(addr); + if (!rsp.hasException() && !rsp.wasSuspected()) { + this.chunk = (byte[]) rsp.getValue(); + } else { + pos++; + } + } catch (Exception e) { + SDFSLogger.getLog().error("error while getting hash", e); + throw new IOException(e); + } + } + } + + public byte[] getChunk() { + return this.chunk; + } + + @Override + public byte getCmdID() { + return NetworkCMDS.FETCH_CMD; + } + } \ No newline at end of file diff --git a/src/org/opendedup/sdfs/cluster/cmds/NetworkCMDS.java b/src/org/opendedup/sdfs/cluster/cmds/NetworkCMDS.java index 5c19b6cf2..e8ccf3062 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/NetworkCMDS.java +++ b/src/org/opendedup/sdfs/cluster/cmds/NetworkCMDS.java @@ -1,57 +1,75 @@ -package org.opendedup.sdfs.cluster.cmds; - -/** - * - * @author Sam Silverberg These are the commands that are sent by the client to - * the chunk store. The command is sent as the first byte in a command - * request. A typical client request is as follows : - * - * |command type (1b)|length of hash (2b)|md5 or sha hash (lenghth of - * hash)| command specific data (variable length)| - * - */ - -public class NetworkCMDS { - /** Fetch a chunk of data from the chunk store */ - public static final byte FETCH_CMD = 0; - /** See if a hash already exists in the chunk store */ - public static final byte HASH_EXISTS_CMD = 1; - /** write a chunk to the chunk store **/ - public static final byte WRITE_HASH_CMD = 2; - /** Close the client thread used for this TCP connection */ - public static final byte QUIT_CMD = 3; - /** Claim that the client is still using the hash in question */ - // public static final byte CLAIM_HASH = 4; - /** - * Fetch a chunk and request that it is compressed before transmitting to - * the client. The data will be compressed by the chunk store before it is - * sent to the client. - */ - public static final byte FETCH_COMPRESSED_CMD = 5; - /** - * Write a compressed chunk to the chunk server. The data will be compressed - * by the client before it is sent. - */ - public static final byte WRITE_COMPRESSED_CMD = 6; - /** Keep alive ping command. Not used in this implementation */ - public static final byte PING_CMD = 9; - public static final byte STORE_MAX_SIZE_CMD = 10; - public static final byte STORE_SIZE_CMD = 11; - public static final byte STORE_PAGE_SIZE = 12; - public static final byte BULK_FETCH_CMD = 13; - public static final byte UPDATE_DSE = 14; - public static final byte RUN_FDISK = 15; - public static final byte RUN_CLAIM = 16; - public static final byte LIST_VOLUMES = 18; - public static final byte RM_VOLUME = 19; - public static final byte ADD_VOLUME = 20; - public static final byte BATCH_HASH_EXISTS_CMD = 21; - public static final byte FIND_GC_MASTER_CMD = 22; - public static final byte STOP_GC_MASTER_CMD = 23; - public static final byte FIND_VOLUME_OWNER = 24; - public static final byte GET_VOLUME_INFO = 25; - public static final byte BATCH_WRITE_HASH_CMD = 26; - public static final byte SET_GC_SCHEDULE = 27; - public static final byte RUN_CLAIMBF = 28; - public static final byte SEND_BF = 29; -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +/** + * + * @author Sam Silverberg These are the commands that are sent by the client to + * the chunk store. The command is sent as the first byte in a command + * request. A typical client request is as follows : + * + * |command type (1b)|length of hash (2b)|md5 or sha hash (lenghth of + * hash)| command specific data (variable length)| + * + */ + +public class NetworkCMDS { + /** Fetch a chunk of data from the chunk store */ + public static final byte FETCH_CMD = 0; + /** See if a hash already exists in the chunk store */ + public static final byte HASH_EXISTS_CMD = 1; + /** write a chunk to the chunk store **/ + public static final byte WRITE_HASH_CMD = 2; + /** Close the client thread used for this TCP connection */ + public static final byte QUIT_CMD = 3; + /** Claim that the client is still using the hash in question */ + // public static final byte CLAIM_HASH = 4; + /** + * Fetch a chunk and request that it is compressed before transmitting to + * the client. The data will be compressed by the chunk store before it is + * sent to the client. + */ + public static final byte FETCH_COMPRESSED_CMD = 5; + /** + * Write a compressed chunk to the chunk server. The data will be compressed + * by the client before it is sent. + */ + public static final byte WRITE_COMPRESSED_CMD = 6; + /** Keep alive ping command. Not used in this implementation */ + public static final byte PING_CMD = 9; + public static final byte STORE_MAX_SIZE_CMD = 10; + public static final byte STORE_SIZE_CMD = 11; + public static final byte STORE_PAGE_SIZE = 12; + public static final byte BULK_FETCH_CMD = 13; + public static final byte UPDATE_DSE = 14; + public static final byte RUN_FDISK = 15; + public static final byte RUN_CLAIM = 16; + public static final byte LIST_VOLUMES = 18; + public static final byte RM_VOLUME = 19; + public static final byte ADD_VOLUME = 20; + public static final byte BATCH_HASH_EXISTS_CMD = 21; + public static final byte FIND_GC_MASTER_CMD = 22; + public static final byte STOP_GC_MASTER_CMD = 23; + public static final byte FIND_VOLUME_OWNER = 24; + public static final byte GET_VOLUME_INFO = 25; + public static final byte BATCH_WRITE_HASH_CMD = 26; + public static final byte SET_GC_SCHEDULE = 27; + public static final byte RUN_CLAIMBF = 28; + public static final byte SEND_BF = 29; +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/RMVolCmd.java b/src/org/opendedup/sdfs/cluster/cmds/RMVolCmd.java index e9b23fac4..993352b92 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/RMVolCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/RMVolCmd.java @@ -1,62 +1,80 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.jgroups.Message; -import org.jgroups.blocks.RequestOptions; -import org.jgroups.blocks.ResponseMode; -import org.jgroups.util.Rsp; -import org.jgroups.util.RspList; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.cluster.ClusterSocket; - -public class RMVolCmd implements IOPeerCmd { - boolean exists = false; - RequestOptions opts = null; - // private ArrayList results = new ArrayList(); - private String volume; - - public RMVolCmd(String volume) { - this.volume = volume; - opts = new RequestOptions(ResponseMode.GET_ALL, 0); - - } - - @Override - public void executeCmd(final ClusterSocket soc) throws IOException { - byte[] vb = this.volume.getBytes(); - byte[] b = new byte[1 + 4 + vb.length]; - ByteBuffer buf = ByteBuffer.wrap(b); - buf.put(NetworkCMDS.RM_VOLUME); - buf.putInt(vb.length); - buf.put(vb); - try { - - RspList lst = soc.getDispatcher().castMessage(null, - new Message(null, null, buf.array()), opts); - for (Rsp rsp : lst) { - if (rsp.hasException()) { - SDFSLogger.getLog().error( - "Remove Volume from Cache Exception thrown for " - + rsp.getSender()); - throw rsp.getException(); - } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { - SDFSLogger.getLog().error( - "Remove Volume from Cache Host unreachable for " - + rsp.getSender()); - } - - } - } catch (Throwable e) { - SDFSLogger.getLog().error("error while running fdisk", e); - throw new IOException(e); - } - } - - @Override - public byte getCmdID() { - return NetworkCMDS.RM_VOLUME; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.jgroups.Message; +import org.jgroups.blocks.RequestOptions; +import org.jgroups.blocks.ResponseMode; +import org.jgroups.util.Rsp; +import org.jgroups.util.RspList; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.cluster.ClusterSocket; + +public class RMVolCmd implements IOPeerCmd { + boolean exists = false; + RequestOptions opts = null; + // private ArrayList results = new ArrayList(); + private String volume; + + public RMVolCmd(String volume) { + this.volume = volume; + opts = new RequestOptions(ResponseMode.GET_ALL, 0); + + } + + @Override + public void executeCmd(final ClusterSocket soc) throws IOException { + byte[] vb = this.volume.getBytes(); + byte[] b = new byte[1 + 4 + vb.length]; + ByteBuffer buf = ByteBuffer.wrap(b); + buf.put(NetworkCMDS.RM_VOLUME); + buf.putInt(vb.length); + buf.put(vb); + try { + + RspList lst = soc.getDispatcher().castMessage(null, + new Message(null, null, buf.array()), opts); + for (Rsp rsp : lst) { + if (rsp.hasException()) { + SDFSLogger.getLog().error( + "Remove Volume from Cache Exception thrown for " + + rsp.getSender()); + throw rsp.getException(); + } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { + SDFSLogger.getLog().error( + "Remove Volume from Cache Host unreachable for " + + rsp.getSender()); + } + + } + } catch (Throwable e) { + SDFSLogger.getLog().error("error while running fdisk", e); + throw new IOException(e); + } + } + + @Override + public byte getCmdID() { + return NetworkCMDS.RM_VOLUME; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/RedundancyNotMetException.java b/src/org/opendedup/sdfs/cluster/cmds/RedundancyNotMetException.java index 087110ff1..2bace661e 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/RedundancyNotMetException.java +++ b/src/org/opendedup/sdfs/cluster/cmds/RedundancyNotMetException.java @@ -1,18 +1,36 @@ -package org.opendedup.sdfs.cluster.cmds; - -public class RedundancyNotMetException extends ClusterCmdException { - - /** - * - */ - private static final long serialVersionUID = -2523934501123942317L; - public byte[] hashloc = null; - - public RedundancyNotMetException(int written, int requirement, - byte[] hashloc) { - super("Redundancy Requirement not met [" + written - + "] copies written and [" + requirement + "] copies required."); - this.hashloc = hashloc; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +public class RedundancyNotMetException extends ClusterCmdException { + + /** + * + */ + private static final long serialVersionUID = -2523934501123942317L; + public byte[] hashloc = null; + + public RedundancyNotMetException(int written, int requirement, + byte[] hashloc) { + super("Redundancy Requirement not met [" + written + + "] copies written and [" + requirement + "] copies required."); + this.hashloc = hashloc; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/SendBloomFilterCmd.java b/src/org/opendedup/sdfs/cluster/cmds/SendBloomFilterCmd.java index dff6380b1..7dbedb7f5 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/SendBloomFilterCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/SendBloomFilterCmd.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster.cmds; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/cluster/cmds/SetGCScheduleCmd.java b/src/org/opendedup/sdfs/cluster/cmds/SetGCScheduleCmd.java index da75bad25..1ffa30ab2 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/SetGCScheduleCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/SetGCScheduleCmd.java @@ -1,61 +1,79 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.jgroups.Message; -import org.jgroups.blocks.RequestOptions; -import org.jgroups.blocks.ResponseMode; -import org.jgroups.util.Rsp; -import org.jgroups.util.RspList; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.cluster.ClusterSocket; - -public class SetGCScheduleCmd implements IOPeerCmd { - boolean exists = false; - RequestOptions opts = null; - // private ArrayList results = new ArrayList(); - private String schedule; - - public SetGCScheduleCmd(String schedule) { - this.schedule = schedule; - opts = new RequestOptions(ResponseMode.GET_ALL, Main.ClusterRSPTimeout); - - } - - @Override - public void executeCmd(final ClusterSocket soc) throws IOException { - byte[] vb = this.schedule.getBytes(); - byte[] b = new byte[1 + 4 + vb.length]; - ByteBuffer buf = ByteBuffer.wrap(b); - buf.put(NetworkCMDS.SET_GC_SCHEDULE); - buf.putInt(vb.length); - buf.put(vb); - try { - RspList lst = soc.getDispatcher().castMessage(null, - new Message(null, null, buf.array()), opts); - for (Rsp rsp : lst) { - if (rsp.hasException()) { - SDFSLogger.getLog().error( - "Set Schedule Exception thrown for " - + rsp.getSender()); - throw rsp.getException(); - } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { - SDFSLogger.getLog().error( - "Set Schedule unreachable for " + rsp.getSender()); - } - - } - } catch (Throwable e) { - SDFSLogger.getLog().error("error while running set schedule", e); - throw new IOException(e); - } - } - - @Override - public byte getCmdID() { - return NetworkCMDS.SET_GC_SCHEDULE; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.jgroups.Message; +import org.jgroups.blocks.RequestOptions; +import org.jgroups.blocks.ResponseMode; +import org.jgroups.util.Rsp; +import org.jgroups.util.RspList; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.cluster.ClusterSocket; + +public class SetGCScheduleCmd implements IOPeerCmd { + boolean exists = false; + RequestOptions opts = null; + // private ArrayList results = new ArrayList(); + private String schedule; + + public SetGCScheduleCmd(String schedule) { + this.schedule = schedule; + opts = new RequestOptions(ResponseMode.GET_ALL, Main.ClusterRSPTimeout); + + } + + @Override + public void executeCmd(final ClusterSocket soc) throws IOException { + byte[] vb = this.schedule.getBytes(); + byte[] b = new byte[1 + 4 + vb.length]; + ByteBuffer buf = ByteBuffer.wrap(b); + buf.put(NetworkCMDS.SET_GC_SCHEDULE); + buf.putInt(vb.length); + buf.put(vb); + try { + RspList lst = soc.getDispatcher().castMessage(null, + new Message(null, null, buf.array()), opts); + for (Rsp rsp : lst) { + if (rsp.hasException()) { + SDFSLogger.getLog().error( + "Set Schedule Exception thrown for " + + rsp.getSender()); + throw rsp.getException(); + } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { + SDFSLogger.getLog().error( + "Set Schedule unreachable for " + rsp.getSender()); + } + + } + } catch (Throwable e) { + SDFSLogger.getLog().error("error while running set schedule", e); + throw new IOException(e); + } + } + + @Override + public byte getCmdID() { + return NetworkCMDS.SET_GC_SCHEDULE; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/StopGCMasterCmd.java b/src/org/opendedup/sdfs/cluster/cmds/StopGCMasterCmd.java index 26d941628..8d35fda7e 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/StopGCMasterCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/StopGCMasterCmd.java @@ -1,83 +1,101 @@ -package org.opendedup.sdfs.cluster.cmds; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.jgroups.Address; -import org.jgroups.Message; -import org.jgroups.blocks.RequestOptions; -import org.jgroups.blocks.ResponseMode; -import org.jgroups.util.Rsp; -import org.jgroups.util.RspList; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.cluster.ClusterSocket; - -public class StopGCMasterCmd implements IOPeerCmd { - boolean exists = false; - RequestOptions opts = null; - private Address gcMaster = null; - - public StopGCMasterCmd() { - opts = new RequestOptions(ResponseMode.GET_ALL, 0); - - } - - @Override - public void executeCmd(final ClusterSocket soc) throws IOException { - byte[] b = new byte[1]; - ByteBuffer buf = ByteBuffer.wrap(b); - buf.put(NetworkCMDS.STOP_GC_MASTER_CMD); - try { - RspList lst = soc.getDispatcher().castMessage(null, - new Message(null, null, buf.array()), opts); - for (Rsp rsp : lst) { - if (rsp.hasException()) { - SDFSLogger.getLog().error( - "STOP_GC_MASTER_CMD Exception thrown for " - + rsp.getSender()); - // throw rsp.getException(); - } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { - SDFSLogger.getLog().error( - "STOP_GC_MASTER_CMD Host unreachable Exception thrown for " - + rsp.getSender()); - // throw new - // IOException("FIND_GC_MASTER_CMD Host unreachable Exception thrown for " - // + rsp.getSender()); - } else { - if (rsp.getValue() != null) { - SDFSLogger.getLog().debug( - "STOP_GC_MASTER_CMD completed for " - + rsp.getSender() + " returned=" - + rsp.getValue()); - boolean m = (Boolean) rsp.getValue(); - if (m) { - if (this.gcMaster != null) - throw new IOException( - "STOP_GC_MASTER_CMD already identified at [" - + gcMaster.toString() - + "] but has also been identified at [" - + rsp.getSender() + "]."); - else - this.gcMaster = rsp.getSender(); - } - - } - } - } - } catch (Throwable e) { - SDFSLogger.getLog().error("error while running STOP_GC_MASTER_CMD", - e); - throw new IOException(e); - } - } - - @Override - public byte getCmdID() { - return NetworkCMDS.STOP_GC_MASTER_CMD; - } - - public Address getResults() { - return this.gcMaster; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.jgroups.Address; +import org.jgroups.Message; +import org.jgroups.blocks.RequestOptions; +import org.jgroups.blocks.ResponseMode; +import org.jgroups.util.Rsp; +import org.jgroups.util.RspList; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.cluster.ClusterSocket; + +public class StopGCMasterCmd implements IOPeerCmd { + boolean exists = false; + RequestOptions opts = null; + private Address gcMaster = null; + + public StopGCMasterCmd() { + opts = new RequestOptions(ResponseMode.GET_ALL, 0); + + } + + @Override + public void executeCmd(final ClusterSocket soc) throws IOException { + byte[] b = new byte[1]; + ByteBuffer buf = ByteBuffer.wrap(b); + buf.put(NetworkCMDS.STOP_GC_MASTER_CMD); + try { + RspList lst = soc.getDispatcher().castMessage(null, + new Message(null, null, buf.array()), opts); + for (Rsp rsp : lst) { + if (rsp.hasException()) { + SDFSLogger.getLog().error( + "STOP_GC_MASTER_CMD Exception thrown for " + + rsp.getSender()); + // throw rsp.getException(); + } else if (rsp.wasSuspected() | rsp.wasUnreachable()) { + SDFSLogger.getLog().error( + "STOP_GC_MASTER_CMD Host unreachable Exception thrown for " + + rsp.getSender()); + // throw new + // IOException("FIND_GC_MASTER_CMD Host unreachable Exception thrown for " + // + rsp.getSender()); + } else { + if (rsp.getValue() != null) { + SDFSLogger.getLog().debug( + "STOP_GC_MASTER_CMD completed for " + + rsp.getSender() + " returned=" + + rsp.getValue()); + boolean m = (Boolean) rsp.getValue(); + if (m) { + if (this.gcMaster != null) + throw new IOException( + "STOP_GC_MASTER_CMD already identified at [" + + gcMaster.toString() + + "] but has also been identified at [" + + rsp.getSender() + "]."); + else + this.gcMaster = rsp.getSender(); + } + + } + } + } + } catch (Throwable e) { + SDFSLogger.getLog().error("error while running STOP_GC_MASTER_CMD", + e); + throw new IOException(e); + } + } + + @Override + public byte getCmdID() { + return NetworkCMDS.STOP_GC_MASTER_CMD; + } + + public Address getResults() { + return this.gcMaster; + } + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/StorageNodesUnavailableException.java b/src/org/opendedup/sdfs/cluster/cmds/StorageNodesUnavailableException.java index d2eff205b..24ab55eec 100644 --- a/src/org/opendedup/sdfs/cluster/cmds/StorageNodesUnavailableException.java +++ b/src/org/opendedup/sdfs/cluster/cmds/StorageNodesUnavailableException.java @@ -1,10 +1,28 @@ -package org.opendedup.sdfs.cluster.cmds; - -public class StorageNodesUnavailableException extends Exception { - - /** - * - */ - private static final long serialVersionUID = -8140104168974474641L; - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster.cmds; + +public class StorageNodesUnavailableException extends Exception { + + /** + * + */ + private static final long serialVersionUID = -8140104168974474641L; + +} diff --git a/src/org/opendedup/sdfs/cluster/cmds/WriteHashCmd.java b/src/org/opendedup/sdfs/cluster/cmds/WriteHashCmd.java index 0285fd867..b82d8639a 100755 --- a/src/org/opendedup/sdfs/cluster/cmds/WriteHashCmd.java +++ b/src/org/opendedup/sdfs/cluster/cmds/WriteHashCmd.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.cluster.cmds; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/cluster/tWeighting.java b/src/org/opendedup/sdfs/cluster/tWeighting.java index 16110b9d8..102cd2ec1 100644 --- a/src/org/opendedup/sdfs/cluster/tWeighting.java +++ b/src/org/opendedup/sdfs/cluster/tWeighting.java @@ -1,62 +1,80 @@ -package org.opendedup.sdfs.cluster; - -import java.util.ArrayList; -import java.util.List; -import java.util.Random; - -class tWeighting { - - int value; - int weighting; - - public tWeighting(int v, int w) { - this.value = v; - this.weighting = w; - } - - public static int weightedRandom(List weightingOptions) { - - // determine sum of all weightings - int total = 0; - for (tWeighting w : weightingOptions) { - total += w.weighting; - } - - // select a random value between 0 and our total - int random = new Random().nextInt(total); - - // loop thru our weightings until we arrive at the correct one - int current = 0; - for (tWeighting w : weightingOptions) { - current += w.weighting; - if (random < current) - return w.value; - } - // shouldn't happen. - return -1; - } - - public static void main(String[] args) { - - List weightings = new ArrayList(); - weightings.add(new tWeighting(0, 10)); - weightings.add(new tWeighting(1, 5)); - weightings.add(new tWeighting(2, 2)); - int zct = 0; - int oct = 0; - int tct = 0; - for (int i = 0; i < 10000; i++) { - int n = weightedRandom(weightings); - if (n == 0) - zct++; - if (n == 1) - oct++; - if (n == 2) - tct++; - } - System.out.println(zct); - System.out.println(oct); - System.out.println(tct); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.cluster; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +class tWeighting { + + int value; + int weighting; + + public tWeighting(int v, int w) { + this.value = v; + this.weighting = w; + } + + public static int weightedRandom(List weightingOptions) { + + // determine sum of all weightings + int total = 0; + for (tWeighting w : weightingOptions) { + total += w.weighting; + } + + // select a random value between 0 and our total + int random = new Random().nextInt(total); + + // loop thru our weightings until we arrive at the correct one + int current = 0; + for (tWeighting w : weightingOptions) { + current += w.weighting; + if (random < current) + return w.value; + } + // shouldn't happen. + return -1; + } + + public static void main(String[] args) { + + List weightings = new ArrayList(); + weightings.add(new tWeighting(0, 10)); + weightings.add(new tWeighting(1, 5)); + weightings.add(new tWeighting(2, 2)); + int zct = 0; + int oct = 0; + int tct = 0; + for (int i = 0; i < 10000; i++) { + int n = weightedRandom(weightings); + if (n == 0) + zct++; + if (n == 1) + oct++; + if (n == 2) + tct++; + } + System.out.println(zct); + System.out.println(oct); + System.out.println(tct); + } + +} diff --git a/src/org/opendedup/sdfs/filestore/cloud/BatchAwsS3ChunkStore.java b/src/org/opendedup/sdfs/filestore/cloud/BatchAwsS3ChunkStore.java index 81812a6db..da8223be2 100644 --- a/src/org/opendedup/sdfs/filestore/cloud/BatchAwsS3ChunkStore.java +++ b/src/org/opendedup/sdfs/filestore/cloud/BatchAwsS3ChunkStore.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.filestore.cloud; import java.io.BufferedInputStream; @@ -2534,10 +2552,11 @@ public boolean isCheckedOut(String name, long volumeID) throws IOException { String pth = "claims/" + name + "/" - + EncyptUtils.encHashArchiveName(Main.DSEID, + + EncyptUtils.encHashArchiveName(volumeID, Main.chunkStoreEncryptionEnabled); this.s3clientLock.readLock().lock(); try { + s3Service.getObjectMetadata(this.name, pth); return true; } catch (Exception e) { diff --git a/src/org/opendedup/sdfs/filestore/cloud/utils/EncyptUtils.java b/src/org/opendedup/sdfs/filestore/cloud/utils/EncyptUtils.java index f146f767a..3fc5649cf 100644 --- a/src/org/opendedup/sdfs/filestore/cloud/utils/EncyptUtils.java +++ b/src/org/opendedup/sdfs/filestore/cloud/utils/EncyptUtils.java @@ -1,135 +1,153 @@ -package org.opendedup.sdfs.filestore.cloud.utils; - -import java.io.IOException; - -import org.opendedup.util.EncryptUtils; - -import com.google.common.io.BaseEncoding; - -public class EncyptUtils { - public static boolean baseEncode = false; - - public static String encString(String hashes, boolean enc) - throws IOException { - if (baseEncode) - return hashes; - if (enc) { - byte[] encH = EncryptUtils.encryptCBC(hashes.getBytes()); - if (baseEncode) - return BaseEncoding.base64().encode(encH); - else - return BaseEncoding.base64Url().encode(encH); - - } else { - return BaseEncoding.base64Url().encode(hashes.getBytes()); - - } - } - - public static String decString(String fname, boolean enc) - throws IOException { - if (baseEncode) - return fname; - if (enc) { - byte[] encH; - if (baseEncode) - encH = BaseEncoding.base64().decode(fname); - else - encH = BaseEncoding.base64Url().decode(fname); - String st = new String(EncryptUtils.decryptCBC(encH)); - return st; - } else { - - byte[] encH; - encH = BaseEncoding.base64Url().decode(fname); - return new String(encH); - } - } - - public static String encHashArchiveName(long id, boolean enc) - throws IOException { - if (baseEncode) - return Long.toString(id); - if (enc) { - byte[] encH = EncryptUtils.encryptCBC(Long.toString(id).getBytes()); - return BaseEncoding.base64Url().encode(encH); - } else { - return BaseEncoding.base64Url() - .encode(Long.toString(id).getBytes()); - } - } - - public static long decHashArchiveName(String fname, boolean enc) - throws IOException { - if (baseEncode) - return Long.parseLong(new String(fname)); - if (enc) { - byte[] encH; - encH = BaseEncoding.base64Url().decode(fname); - String st = new String(EncryptUtils.decryptCBC(encH)); - return Long.parseLong(st); - } else { - byte[] encH; - - encH = BaseEncoding.base64Url().decode(fname); - return Long.parseLong(new String(encH)); - } - } - - public static String encLong(long id, boolean enc) throws IOException { - if (enc) { - byte[] encH = EncryptUtils.encryptCBC(Long.toString(id).getBytes()); - return BaseEncoding.base64Url().encode(encH); - } else { - return Long.toString(id); - } - } - - public static long decLong(String fname, boolean enc) throws IOException { - if (enc) { - byte[] encH = BaseEncoding.base64Url().decode(fname); - String st = new String(EncryptUtils.decryptCBC(encH)); - return Long.parseLong(st); - } else { - return Long.parseLong(fname); - } - } - - public static String encInt(int id, boolean enc) throws IOException { - if (enc) { - byte[] encH = EncryptUtils.encryptCBC(Integer.toString(id) - .getBytes()); - return BaseEncoding.base64Url().encode(encH); - } else { - return Integer.toString(id); - } - } - - public static int decInt(String fname, boolean enc) throws IOException { - if (enc) { - byte[] encH = BaseEncoding.base64Url().decode(fname); - String st = new String(EncryptUtils.decryptCBC(encH)); - return Integer.parseInt(st); - } else { - return Integer.parseInt(fname); - } - } - - public static String encBar(byte[] b, boolean enc) throws IOException { - if (enc) { - byte[] encH = EncryptUtils.encryptCBC(b); - return BaseEncoding.base64Url().encode(encH); - } else { - return BaseEncoding.base64Url().encode(b); - } - } - - public static byte[] decBar(String fname, boolean enc) throws IOException { - if (enc) { - byte[] encH = BaseEncoding.base64Url().decode(fname); - return EncryptUtils.decryptCBC(encH); - } else { - return BaseEncoding.base64Url().decode(fname); - } - } +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.filestore.cloud.utils; + +import java.io.IOException; + +import org.opendedup.util.EncryptUtils; + +import com.google.common.io.BaseEncoding; + +public class EncyptUtils { + public static boolean baseEncode = false; + + public static String encString(String hashes, boolean enc) + throws IOException { + if (baseEncode) + return hashes; + if (enc) { + byte[] encH = EncryptUtils.encryptCBC(hashes.getBytes()); + if (baseEncode) + return BaseEncoding.base64().encode(encH); + else + return BaseEncoding.base64Url().encode(encH); + + } else { + return BaseEncoding.base64Url().encode(hashes.getBytes()); + + } + } + + public static String decString(String fname, boolean enc) + throws IOException { + if (baseEncode) + return fname; + if (enc) { + byte[] encH; + if (baseEncode) + encH = BaseEncoding.base64().decode(fname); + else + encH = BaseEncoding.base64Url().decode(fname); + String st = new String(EncryptUtils.decryptCBC(encH)); + return st; + } else { + + byte[] encH; + encH = BaseEncoding.base64Url().decode(fname); + return new String(encH); + } + } + + public static String encHashArchiveName(long id, boolean enc) + throws IOException { + if (baseEncode) + return Long.toString(id); + if (enc) { + byte[] encH = EncryptUtils.encryptCBC(Long.toString(id).getBytes()); + return BaseEncoding.base64Url().encode(encH); + } else { + return BaseEncoding.base64Url() + .encode(Long.toString(id).getBytes()); + } + } + + public static long decHashArchiveName(String fname, boolean enc) + throws IOException { + if (baseEncode) + return Long.parseLong(new String(fname)); + if (enc) { + byte[] encH; + encH = BaseEncoding.base64Url().decode(fname); + String st = new String(EncryptUtils.decryptCBC(encH)); + return Long.parseLong(st); + } else { + byte[] encH; + + encH = BaseEncoding.base64Url().decode(fname); + return Long.parseLong(new String(encH)); + } + } + + public static String encLong(long id, boolean enc) throws IOException { + if (enc) { + byte[] encH = EncryptUtils.encryptCBC(Long.toString(id).getBytes()); + return BaseEncoding.base64Url().encode(encH); + } else { + return Long.toString(id); + } + } + + public static long decLong(String fname, boolean enc) throws IOException { + if (enc) { + byte[] encH = BaseEncoding.base64Url().decode(fname); + String st = new String(EncryptUtils.decryptCBC(encH)); + return Long.parseLong(st); + } else { + return Long.parseLong(fname); + } + } + + public static String encInt(int id, boolean enc) throws IOException { + if (enc) { + byte[] encH = EncryptUtils.encryptCBC(Integer.toString(id) + .getBytes()); + return BaseEncoding.base64Url().encode(encH); + } else { + return Integer.toString(id); + } + } + + public static int decInt(String fname, boolean enc) throws IOException { + if (enc) { + byte[] encH = BaseEncoding.base64Url().decode(fname); + String st = new String(EncryptUtils.decryptCBC(encH)); + return Integer.parseInt(st); + } else { + return Integer.parseInt(fname); + } + } + + public static String encBar(byte[] b, boolean enc) throws IOException { + if (enc) { + byte[] encH = EncryptUtils.encryptCBC(b); + return BaseEncoding.base64Url().encode(encH); + } else { + return BaseEncoding.base64Url().encode(b); + } + } + + public static byte[] decBar(String fname, boolean enc) throws IOException { + if (enc) { + byte[] encH = BaseEncoding.base64Url().decode(fname); + return EncryptUtils.decryptCBC(encH); + } else { + return BaseEncoding.base64Url().decode(fname); + } + } } \ No newline at end of file diff --git a/src/org/opendedup/sdfs/filestore/cloud/utils/FileUtils.java b/src/org/opendedup/sdfs/filestore/cloud/utils/FileUtils.java index f5eaa117b..831088809 100644 --- a/src/org/opendedup/sdfs/filestore/cloud/utils/FileUtils.java +++ b/src/org/opendedup/sdfs/filestore/cloud/utils/FileUtils.java @@ -1,136 +1,154 @@ -package org.opendedup.sdfs.filestore.cloud.utils; - -import java.io.File; - -import java.io.FileInputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.nio.file.LinkOption; -import java.nio.file.Path; -import java.nio.file.attribute.UserDefinedFileAttributeView; -import java.security.MessageDigest; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.opendedup.util.OSValidator; - -import org.opendedup.sdfs.filestore.cloud.utils.EncyptUtils; - -public class FileUtils { - // private static final String STATTR="DSHSTR"; - private static final String BATTR = "DSHBAR"; - private static final String IATTR = "DSHINT"; - private static final String LATTR = "DSHLNG"; - - public static HashMap getFileMetaData(File file, - boolean encrypt) throws IOException { - HashMap md = new HashMap(); - if (OSValidator.isUnix()) { - boolean isSymbolicLink = Files.isSymbolicLink(file.toPath()); - if (isSymbolicLink) - return md; - else { - Path p = file.toPath(); - int uid = (Integer) Files.getAttribute(p, "unix:uid"); - int gid = (Integer) Files.getAttribute(p, "unix:gid"); - int mode = (Integer) Files.getAttribute(p, "unix:mode"); - long mtime = file.lastModified(); - md.put(IATTR + "uid", EncyptUtils.encInt(uid, encrypt)); - md.put(IATTR + "gid", EncyptUtils.encInt(gid, encrypt)); - md.put(IATTR + "mode", EncyptUtils.encInt(mode, encrypt)); - md.put(LATTR + "mtime", EncyptUtils.encLong(mtime, encrypt)); - UserDefinedFileAttributeView view = Files.getFileAttributeView( - p, UserDefinedFileAttributeView.class); - List l = view.list(); - for (String s : l) { - byte[] b = new byte[view.size(s)]; - ByteBuffer bf = ByteBuffer.wrap(b); - view.read(s, bf); - md.put(BATTR + s, EncyptUtils.encBar(b, encrypt)); - } - return md; - } - } else { - return md; - } - - } - - public static boolean fileValid(File f, byte[] hash) throws IOException { - try (FileInputStream inputStream = new FileInputStream(f)) { - MessageDigest digest = MessageDigest.getInstance("MD5"); - - byte[] bytesBuffer = new byte[1024]; - int bytesRead = -1; - - while ((bytesRead = inputStream.read(bytesBuffer)) != -1) { - digest.update(bytesBuffer, 0, bytesRead); - } - byte[] b = digest.digest(); - return Arrays.equals(b, hash); - - // initialize blob properties and assign md5 content - // generated. - - } catch (Exception ex) { - throw new IOException("Could not generate hash from file " - + f.getPath(), ex); - } - } - - public static void setFileMetaData(File f, Map md, - boolean encrypt) throws IOException { - if (OSValidator.isUnix()) { - boolean isSymbolicLink = Files.isSymbolicLink(f.toPath()); - if (isSymbolicLink) - return; - else { - Set keys = md.keySet(); - Path p = f.toPath(); - UserDefinedFileAttributeView view = Files.getFileAttributeView( - p, UserDefinedFileAttributeView.class); - for (String s : keys) { - if (s.startsWith(BATTR)) { - byte[] av = EncyptUtils.decBar(md.get(s), encrypt); - view.write(s.substring(BATTR.length()), - ByteBuffer.wrap(av)); - } else if (s.startsWith(IATTR)) { - String nm = s.substring(IATTR.length()); - if (nm.equalsIgnoreCase("uid")) - Files.setAttribute( - p, - "unix:uid", - Integer.valueOf(EncyptUtils.decInt( - md.get(s), encrypt)), - LinkOption.NOFOLLOW_LINKS); - if (nm.equalsIgnoreCase("gid")) - Files.setAttribute( - p, - "unix:gid", - Integer.valueOf(EncyptUtils.decInt( - md.get(s), encrypt)), - LinkOption.NOFOLLOW_LINKS); - if (nm.equalsIgnoreCase("mode")) - Files.setAttribute( - p, - "unix:mode", - Integer.valueOf(EncyptUtils.decInt( - md.get(s), encrypt)), - LinkOption.NOFOLLOW_LINKS); - - } else if (s.startsWith(LATTR)) { - String nm = s.substring(LATTR.length()); - if (nm.equalsIgnoreCase("mtime")) { - f.setLastModified(EncyptUtils.decLong(md.get(s), - encrypt)); - } - } - } - } - } - } -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.filestore.cloud.utils; + +import java.io.File; + +import java.io.FileInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.LinkOption; +import java.nio.file.Path; +import java.nio.file.attribute.UserDefinedFileAttributeView; +import java.security.MessageDigest; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.opendedup.util.OSValidator; + +import org.opendedup.sdfs.filestore.cloud.utils.EncyptUtils; + +public class FileUtils { + // private static final String STATTR="DSHSTR"; + private static final String BATTR = "DSHBAR"; + private static final String IATTR = "DSHINT"; + private static final String LATTR = "DSHLNG"; + + public static HashMap getFileMetaData(File file, + boolean encrypt) throws IOException { + HashMap md = new HashMap(); + if (OSValidator.isUnix()) { + boolean isSymbolicLink = Files.isSymbolicLink(file.toPath()); + if (isSymbolicLink) + return md; + else { + Path p = file.toPath(); + int uid = (Integer) Files.getAttribute(p, "unix:uid"); + int gid = (Integer) Files.getAttribute(p, "unix:gid"); + int mode = (Integer) Files.getAttribute(p, "unix:mode"); + long mtime = file.lastModified(); + md.put(IATTR + "uid", EncyptUtils.encInt(uid, encrypt)); + md.put(IATTR + "gid", EncyptUtils.encInt(gid, encrypt)); + md.put(IATTR + "mode", EncyptUtils.encInt(mode, encrypt)); + md.put(LATTR + "mtime", EncyptUtils.encLong(mtime, encrypt)); + UserDefinedFileAttributeView view = Files.getFileAttributeView( + p, UserDefinedFileAttributeView.class); + List l = view.list(); + for (String s : l) { + byte[] b = new byte[view.size(s)]; + ByteBuffer bf = ByteBuffer.wrap(b); + view.read(s, bf); + md.put(BATTR + s, EncyptUtils.encBar(b, encrypt)); + } + return md; + } + } else { + return md; + } + + } + + public static boolean fileValid(File f, byte[] hash) throws IOException { + try (FileInputStream inputStream = new FileInputStream(f)) { + MessageDigest digest = MessageDigest.getInstance("MD5"); + + byte[] bytesBuffer = new byte[1024]; + int bytesRead = -1; + + while ((bytesRead = inputStream.read(bytesBuffer)) != -1) { + digest.update(bytesBuffer, 0, bytesRead); + } + byte[] b = digest.digest(); + return Arrays.equals(b, hash); + + // initialize blob properties and assign md5 content + // generated. + + } catch (Exception ex) { + throw new IOException("Could not generate hash from file " + + f.getPath(), ex); + } + } + + public static void setFileMetaData(File f, Map md, + boolean encrypt) throws IOException { + if (OSValidator.isUnix()) { + boolean isSymbolicLink = Files.isSymbolicLink(f.toPath()); + if (isSymbolicLink) + return; + else { + Set keys = md.keySet(); + Path p = f.toPath(); + UserDefinedFileAttributeView view = Files.getFileAttributeView( + p, UserDefinedFileAttributeView.class); + for (String s : keys) { + if (s.startsWith(BATTR)) { + byte[] av = EncyptUtils.decBar(md.get(s), encrypt); + view.write(s.substring(BATTR.length()), + ByteBuffer.wrap(av)); + } else if (s.startsWith(IATTR)) { + String nm = s.substring(IATTR.length()); + if (nm.equalsIgnoreCase("uid")) + Files.setAttribute( + p, + "unix:uid", + Integer.valueOf(EncyptUtils.decInt( + md.get(s), encrypt)), + LinkOption.NOFOLLOW_LINKS); + if (nm.equalsIgnoreCase("gid")) + Files.setAttribute( + p, + "unix:gid", + Integer.valueOf(EncyptUtils.decInt( + md.get(s), encrypt)), + LinkOption.NOFOLLOW_LINKS); + if (nm.equalsIgnoreCase("mode")) + Files.setAttribute( + p, + "unix:mode", + Integer.valueOf(EncyptUtils.decInt( + md.get(s), encrypt)), + LinkOption.NOFOLLOW_LINKS); + + } else if (s.startsWith(LATTR)) { + String nm = s.substring(LATTR.length()); + if (nm.equalsIgnoreCase("mtime")) { + f.setLastModified(EncyptUtils.decLong(md.get(s), + encrypt)); + } + } + } + } + } + } +} diff --git a/src/org/opendedup/sdfs/filestore/gc/ClusteredPFullGC.java b/src/org/opendedup/sdfs/filestore/gc/ClusteredPFullGC.java index c5b42605b..fe5b3d9ff 100644 --- a/src/org/opendedup/sdfs/filestore/gc/ClusteredPFullGC.java +++ b/src/org/opendedup/sdfs/filestore/gc/ClusteredPFullGC.java @@ -1,87 +1,105 @@ -package org.opendedup.sdfs.filestore.gc; - -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.notification.SDFSEvent; -import org.opendedup.sdfs.servers.HCServiceProxy; - -public class ClusteredPFullGC implements GCControllerImpl { - - double prevPFull = 0; - double nextPFull = .05; - - public ClusteredPFullGC() { - this.prevPFull = calcPFull(); - this.nextPFull = Math.ceil(this.prevPFull * 10) / 10; - - SDFSLogger.getLog().info( - "Current DSE Percentage Full is [" + this.prevPFull - + "] will run GC when [" + this.nextPFull + "]"); - } - - @Override - public void runGC() { - if (this.calcPFull() >= this.nextPFull) { - SDFSEvent task = SDFSEvent - .gcInfoEvent("Percentage Full Exceeded : Running Orphaned Block Collection"); - task.longMsg = "Running Garbage Collection because percentage full is " - + this.calcPFull() + " and threshold is " + this.nextPFull; - try { - ManualGC.clearChunks(); - this.prevPFull = calcPFull(); - this.nextPFull = this.calcNxtRun(); - SDFSLogger.getLog() - .info("Current DSE Percentage Full is [" - + this.prevPFull + "] will run GC when [" - + this.nextPFull + "]"); - task.endEvent("Garbage Collection Succeeded"); - task.shortMsg = "Garbage Collection Succeeded"; - task.longMsg = "Current DSE Percentage Full is [" - + this.prevPFull + "] will run GC when [" - + this.nextPFull + "]"; - } catch (Exception e) { - SDFSLogger.getLog().error("Garbage Collection failed", e); - task.endEvent( - "Garbage Collection failed because " + e.getMessage(), - SDFSEvent.ERROR); - } - } - - } - - private double calcPFull() { - double pFull = 0; - if (HCServiceProxy.getSize() > 0) { - pFull = (double) HCServiceProxy.getSize() - / (double) HCServiceProxy.getMaxSize(); - } - return pFull; - } - - private double calcNxtRun() { - double next = this.calcPFull(); - if (next >= .92) - return .90; - else { - next = Math.ceil(next * 10.0) / 10; - } - if (next == 0) - next = .1; - return next; - } - - @Override - public void reCalc() { - this.prevPFull = calcPFull(); - this.nextPFull = this.calcNxtRun(); - SDFSLogger.getLog().debug( - "Current DSE Percentage Full is [" + this.prevPFull - + "] will run GC when [" + this.nextPFull + "]"); - } - - public static void main(String[] args) { - double num = 0.800338958916741818D; - - System.out.println(Math.ceil(num * 10.0) / 10); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.filestore.gc; + +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.notification.SDFSEvent; +import org.opendedup.sdfs.servers.HCServiceProxy; + +public class ClusteredPFullGC implements GCControllerImpl { + + double prevPFull = 0; + double nextPFull = .05; + + public ClusteredPFullGC() { + this.prevPFull = calcPFull(); + this.nextPFull = Math.ceil(this.prevPFull * 10) / 10; + + SDFSLogger.getLog().info( + "Current DSE Percentage Full is [" + this.prevPFull + + "] will run GC when [" + this.nextPFull + "]"); + } + + @Override + public void runGC() { + if (this.calcPFull() >= this.nextPFull) { + SDFSEvent task = SDFSEvent + .gcInfoEvent("Percentage Full Exceeded : Running Orphaned Block Collection"); + task.longMsg = "Running Garbage Collection because percentage full is " + + this.calcPFull() + " and threshold is " + this.nextPFull; + try { + ManualGC.clearChunks(); + this.prevPFull = calcPFull(); + this.nextPFull = this.calcNxtRun(); + SDFSLogger.getLog() + .info("Current DSE Percentage Full is [" + + this.prevPFull + "] will run GC when [" + + this.nextPFull + "]"); + task.endEvent("Garbage Collection Succeeded"); + task.shortMsg = "Garbage Collection Succeeded"; + task.longMsg = "Current DSE Percentage Full is [" + + this.prevPFull + "] will run GC when [" + + this.nextPFull + "]"; + } catch (Exception e) { + SDFSLogger.getLog().error("Garbage Collection failed", e); + task.endEvent( + "Garbage Collection failed because " + e.getMessage(), + SDFSEvent.ERROR); + } + } + + } + + private double calcPFull() { + double pFull = 0; + if (HCServiceProxy.getSize() > 0) { + pFull = (double) HCServiceProxy.getSize() + / (double) HCServiceProxy.getMaxSize(); + } + return pFull; + } + + private double calcNxtRun() { + double next = this.calcPFull(); + if (next >= .92) + return .90; + else { + next = Math.ceil(next * 10.0) / 10; + } + if (next == 0) + next = .1; + return next; + } + + @Override + public void reCalc() { + this.prevPFull = calcPFull(); + this.nextPFull = this.calcNxtRun(); + SDFSLogger.getLog().debug( + "Current DSE Percentage Full is [" + this.prevPFull + + "] will run GC when [" + this.nextPFull + "]"); + } + + public static void main(String[] args) { + double num = 0.800338958916741818D; + + System.out.println(Math.ceil(num * 10.0) / 10); + } + +} diff --git a/src/org/opendedup/sdfs/filestore/gc/GCControllerImpl.java b/src/org/opendedup/sdfs/filestore/gc/GCControllerImpl.java index f9acbb0f6..329007c34 100644 --- a/src/org/opendedup/sdfs/filestore/gc/GCControllerImpl.java +++ b/src/org/opendedup/sdfs/filestore/gc/GCControllerImpl.java @@ -1,9 +1,27 @@ -package org.opendedup.sdfs.filestore.gc; - -public interface GCControllerImpl { - - public void runGC(); - - public void reCalc(); - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.filestore.gc; + +public interface GCControllerImpl { + + public void runGC(); + + public void reCalc(); + +} diff --git a/src/org/opendedup/sdfs/filestore/gc/GCJob.java b/src/org/opendedup/sdfs/filestore/gc/GCJob.java index e72500114..e633607c8 100644 --- a/src/org/opendedup/sdfs/filestore/gc/GCJob.java +++ b/src/org/opendedup/sdfs/filestore/gc/GCJob.java @@ -1,33 +1,51 @@ -package org.opendedup.sdfs.filestore.gc; - -import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; - -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.notification.SDFSEvent; -import org.quartz.Job; -import org.quartz.JobExecutionContext; -import org.quartz.JobExecutionException; - -public class GCJob implements Job { - @Override - public void execute(JobExecutionContext arg0) throws JobExecutionException { - WriteLock l = GCMain.gclock.writeLock(); - l.lock(); - try { - SDFSEvent task = SDFSEvent - .gcInfoEvent("Running Scheduled Volume Garbage Collection"); - try { - ManualGC.clearChunks(); - task.endEvent("Garbage Collection Succeeded"); - } catch (Exception e) { - SDFSLogger.getLog().error("Garbage Collection failed", e); - task.endEvent("Garbage Collection failed", SDFSEvent.ERROR, e); - } - } catch (Exception e) { - SDFSLogger.getLog().warn("unable to finish executing fdisk", e); - } finally { - l.unlock(); - } - - } -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.filestore.gc; + +import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; + +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.notification.SDFSEvent; +import org.quartz.Job; +import org.quartz.JobExecutionContext; +import org.quartz.JobExecutionException; + +public class GCJob implements Job { + @Override + public void execute(JobExecutionContext arg0) throws JobExecutionException { + WriteLock l = GCMain.gclock.writeLock(); + l.lock(); + try { + SDFSEvent task = SDFSEvent + .gcInfoEvent("Running Scheduled Volume Garbage Collection"); + try { + ManualGC.clearChunks(); + task.endEvent("Garbage Collection Succeeded"); + } catch (Exception e) { + SDFSLogger.getLog().error("Garbage Collection failed", e); + task.endEvent("Garbage Collection failed", SDFSEvent.ERROR, e); + } + } catch (Exception e) { + SDFSLogger.getLog().warn("unable to finish executing fdisk", e); + } finally { + l.unlock(); + } + + } +} diff --git a/src/org/opendedup/sdfs/filestore/gc/GCMain.java b/src/org/opendedup/sdfs/filestore/gc/GCMain.java index 0e32de8e0..1bba86a48 100644 --- a/src/org/opendedup/sdfs/filestore/gc/GCMain.java +++ b/src/org/opendedup/sdfs/filestore/gc/GCMain.java @@ -1,10 +1,28 @@ -package org.opendedup.sdfs.filestore.gc; - -import java.util.concurrent.locks.ReentrantLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -public class GCMain { - public static final ReentrantReadWriteLock gclock = new ReentrantReadWriteLock(); - public static final ReentrantLock gcRunningLock = new ReentrantLock(); - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.filestore.gc; + +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +public class GCMain { + public static final ReentrantReadWriteLock gclock = new ReentrantReadWriteLock(); + public static final ReentrantLock gcRunningLock = new ReentrantLock(); + +} diff --git a/src/org/opendedup/sdfs/filestore/gc/GCRun.java b/src/org/opendedup/sdfs/filestore/gc/GCRun.java index 84185c071..07fb3913a 100644 --- a/src/org/opendedup/sdfs/filestore/gc/GCRun.java +++ b/src/org/opendedup/sdfs/filestore/gc/GCRun.java @@ -1,9 +1,27 @@ -package org.opendedup.sdfs.filestore.gc; - -public class GCRun { - - public static void executeGC() { - - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.filestore.gc; + +public class GCRun { + + public static void executeGC() { + + } + +} diff --git a/src/org/opendedup/sdfs/filestore/gc/ManualGC.java b/src/org/opendedup/sdfs/filestore/gc/ManualGC.java index ade462f3e..947735758 100644 --- a/src/org/opendedup/sdfs/filestore/gc/ManualGC.java +++ b/src/org/opendedup/sdfs/filestore/gc/ManualGC.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.filestore.gc; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/filestore/gc/PFullGC.java b/src/org/opendedup/sdfs/filestore/gc/PFullGC.java index 9ca10b46f..a527cfbcc 100644 --- a/src/org/opendedup/sdfs/filestore/gc/PFullGC.java +++ b/src/org/opendedup/sdfs/filestore/gc/PFullGC.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.filestore.gc; import java.text.DecimalFormat; diff --git a/src/org/opendedup/sdfs/filestore/gc/SDFSGCScheduler.java b/src/org/opendedup/sdfs/filestore/gc/SDFSGCScheduler.java index 9d62bb88d..c9d99aa80 100644 --- a/src/org/opendedup/sdfs/filestore/gc/SDFSGCScheduler.java +++ b/src/org/opendedup/sdfs/filestore/gc/SDFSGCScheduler.java @@ -1,61 +1,79 @@ -package org.opendedup.sdfs.filestore.gc; - -import java.util.Properties; - -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; -import org.quartz.CronTrigger; -import org.quartz.JobDetail; -import org.quartz.Scheduler; -import org.quartz.SchedulerFactory; -import org.quartz.impl.StdSchedulerFactory; - -public class SDFSGCScheduler { - - Scheduler sched = null; - CronTrigger cctrigger = null; - - public SDFSGCScheduler() { - try { - Properties props = new Properties(); - props.setProperty("org.quartz.scheduler.skipUpdateCheck", "true"); - props.setProperty("org.quartz.threadPool.class", - "org.quartz.simpl.SimpleThreadPool"); - props.setProperty("org.quartz.threadPool.threadCount", "1"); - props.setProperty("org.quartz.threadPool.threadPriority", - Integer.toString(8)); - SDFSLogger.getLog().info("Scheduling FDISK Jobs for SDFS"); - SchedulerFactory schedFact = new StdSchedulerFactory(props); - sched = schedFact.getScheduler(); - sched.start(); - JobDetail ccjobDetail = new JobDetail("gc", null, GCJob.class); - CronTrigger cctrigger = new CronTrigger("gcTrigger", "group1", - Main.fDkiskSchedule); - sched.scheduleJob(ccjobDetail, cctrigger); - SDFSLogger.getLog().info( - "Stand Alone Garbage Collection Jobs Scheduled will run first at " - + cctrigger.getNextFireTime().toString()); - } catch (Exception e) { - SDFSLogger.getLog().fatal( - "Unable to schedule SDFS Garbage Collection", e); - } - } - - public String nextFileTime() { - return cctrigger.getNextFireTime().toString(); - } - - public String schedule() { - return cctrigger.getCronExpression(); - } - - public void stopSchedules() { - try { - sched.unscheduleJob("gc", null); - sched.deleteJob("gc", null); - } catch (Exception e) { - SDFSLogger.getLog().error("unable to stop schedule", e); - } - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.filestore.gc; + +import java.util.Properties; + +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; +import org.quartz.CronTrigger; +import org.quartz.JobDetail; +import org.quartz.Scheduler; +import org.quartz.SchedulerFactory; +import org.quartz.impl.StdSchedulerFactory; + +public class SDFSGCScheduler { + + Scheduler sched = null; + CronTrigger cctrigger = null; + + public SDFSGCScheduler() { + try { + Properties props = new Properties(); + props.setProperty("org.quartz.scheduler.skipUpdateCheck", "true"); + props.setProperty("org.quartz.threadPool.class", + "org.quartz.simpl.SimpleThreadPool"); + props.setProperty("org.quartz.threadPool.threadCount", "1"); + props.setProperty("org.quartz.threadPool.threadPriority", + Integer.toString(8)); + SDFSLogger.getLog().info("Scheduling FDISK Jobs for SDFS"); + SchedulerFactory schedFact = new StdSchedulerFactory(props); + sched = schedFact.getScheduler(); + sched.start(); + JobDetail ccjobDetail = new JobDetail("gc", null, GCJob.class); + CronTrigger cctrigger = new CronTrigger("gcTrigger", "group1", + Main.fDkiskSchedule); + sched.scheduleJob(ccjobDetail, cctrigger); + SDFSLogger.getLog().info( + "Stand Alone Garbage Collection Jobs Scheduled will run first at " + + cctrigger.getNextFireTime().toString()); + } catch (Exception e) { + SDFSLogger.getLog().fatal( + "Unable to schedule SDFS Garbage Collection", e); + } + } + + public String nextFileTime() { + return cctrigger.getNextFireTime().toString(); + } + + public String schedule() { + return cctrigger.getCronExpression(); + } + + public void stopSchedules() { + try { + sched.unscheduleJob("gc", null); + sched.deleteJob("gc", null); + } catch (Exception e) { + SDFSLogger.getLog().error("unable to stop schedule", e); + } + } + +} diff --git a/src/org/opendedup/sdfs/filestore/gc/StandAloneGCScheduler.java b/src/org/opendedup/sdfs/filestore/gc/StandAloneGCScheduler.java index 65feb5fac..217626674 100644 --- a/src/org/opendedup/sdfs/filestore/gc/StandAloneGCScheduler.java +++ b/src/org/opendedup/sdfs/filestore/gc/StandAloneGCScheduler.java @@ -1,52 +1,70 @@ -package org.opendedup.sdfs.filestore.gc; - -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; - -public class StandAloneGCScheduler implements Runnable { - private GCControllerImpl gcController = null; - private boolean closed = false; - Thread th = null; - public SDFSGCScheduler gcSched = null; - - public void recalcScheduler() { - gcController.reCalc(); - } - - public StandAloneGCScheduler() throws InstantiationException, - IllegalAccessException, ClassNotFoundException { - gcController = (GCControllerImpl) Class.forName(Main.gcClass) - .newInstance(); - SDFSLogger.getLog().info( - "Using " + Main.gcClass + " for DSE Garbage Collection"); - th = new Thread(this); - try { - th.setPriority(Thread.MAX_PRIORITY); - } catch (Throwable e) { - SDFSLogger.getLog().info( - "unable to set priority for Standalone GC Sceduler "); - } - SDFSLogger.getLog().info("GC Thread priority is " + th.getPriority()); - th.start(); - gcSched = new SDFSGCScheduler(); - } - - @Override - public void run() { - while (!closed) { - gcController.runGC(); - try { - Thread.sleep(30 * 1000); - } catch (InterruptedException e) { - closed = true; - } - } - } - - public void close() { - gcSched.stopSchedules(); - this.closed = true; - th.interrupt(); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.filestore.gc; + +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; + +public class StandAloneGCScheduler implements Runnable { + private GCControllerImpl gcController = null; + private boolean closed = false; + Thread th = null; + public SDFSGCScheduler gcSched = null; + + public void recalcScheduler() { + gcController.reCalc(); + } + + public StandAloneGCScheduler() throws InstantiationException, + IllegalAccessException, ClassNotFoundException { + gcController = (GCControllerImpl) Class.forName(Main.gcClass) + .newInstance(); + SDFSLogger.getLog().info( + "Using " + Main.gcClass + " for DSE Garbage Collection"); + th = new Thread(this); + try { + th.setPriority(Thread.MAX_PRIORITY); + } catch (Throwable e) { + SDFSLogger.getLog().info( + "unable to set priority for Standalone GC Sceduler "); + } + SDFSLogger.getLog().info("GC Thread priority is " + th.getPriority()); + th.start(); + gcSched = new SDFSGCScheduler(); + } + + @Override + public void run() { + while (!closed) { + gcController.runGC(); + try { + Thread.sleep(30 * 1000); + } catch (InterruptedException e) { + closed = true; + } + } + } + + public void close() { + gcSched.stopSchedules(); + this.closed = true; + th.interrupt(); + } + +} diff --git a/src/org/opendedup/sdfs/io/AsyncChunkReadActionListener.java b/src/org/opendedup/sdfs/io/AsyncChunkReadActionListener.java index a8aba93bf..33f761471 100644 --- a/src/org/opendedup/sdfs/io/AsyncChunkReadActionListener.java +++ b/src/org/opendedup/sdfs/io/AsyncChunkReadActionListener.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.util.concurrent.atomic.AtomicInteger; diff --git a/src/org/opendedup/sdfs/io/AsyncChunkWriteActionListener.java b/src/org/opendedup/sdfs/io/AsyncChunkWriteActionListener.java index 0de688ed8..ce9e66b72 100644 --- a/src/org/opendedup/sdfs/io/AsyncChunkWriteActionListener.java +++ b/src/org/opendedup/sdfs/io/AsyncChunkWriteActionListener.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.util.concurrent.atomic.AtomicInteger; diff --git a/src/org/opendedup/sdfs/io/BlockDev.java b/src/org/opendedup/sdfs/io/BlockDev.java index e94a98c84..572befd16 100644 --- a/src/org/opendedup/sdfs/io/BlockDev.java +++ b/src/org/opendedup/sdfs/io/BlockDev.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.io.Externalizable; diff --git a/src/org/opendedup/sdfs/io/BufferClosedException.java b/src/org/opendedup/sdfs/io/BufferClosedException.java index 02c83abb7..887f48c94 100644 --- a/src/org/opendedup/sdfs/io/BufferClosedException.java +++ b/src/org/opendedup/sdfs/io/BufferClosedException.java @@ -1,16 +1,34 @@ -package org.opendedup.sdfs.io; - -/** - * - * @author Sam Silverberg This exception is thrown if a WritableCacheBuffer has - * already been closed for writing to a chunk store. - */ -public class BufferClosedException extends Exception { - - private static final long serialVersionUID = 1L; - - public BufferClosedException(String msg) { - super(msg); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io; + +/** + * + * @author Sam Silverberg This exception is thrown if a WritableCacheBuffer has + * already been closed for writing to a chunk store. + */ +public class BufferClosedException extends Exception { + + private static final long serialVersionUID = 1L; + + public BufferClosedException(String msg) { + super(msg); + } + +} diff --git a/src/org/opendedup/sdfs/io/DedupChunk.java b/src/org/opendedup/sdfs/io/DedupChunk.java index 18f6e548a..6c5106aa3 100755 --- a/src/org/opendedup/sdfs/io/DedupChunk.java +++ b/src/org/opendedup/sdfs/io/DedupChunk.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/io/DedupChunkInterface.java b/src/org/opendedup/sdfs/io/DedupChunkInterface.java index 84c393479..4d883b8d9 100644 --- a/src/org/opendedup/sdfs/io/DedupChunkInterface.java +++ b/src/org/opendedup/sdfs/io/DedupChunkInterface.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/io/DedupFile.java b/src/org/opendedup/sdfs/io/DedupFile.java index 4f7e58445..48a29ca66 100644 --- a/src/org/opendedup/sdfs/io/DedupFile.java +++ b/src/org/opendedup/sdfs/io/DedupFile.java @@ -1,328 +1,346 @@ -package org.opendedup.sdfs.io; - -import java.io.IOException; - -import org.opendedup.collections.DataArchivedException; -import org.opendedup.collections.HashtableFullException; - -/** - * - * @author annesam Interface that represents the file map associated with a - * specific MetaDataDedupFile. The file map stores data with a location - * index where each entry is a chunk of data. Locations in the index are - * offset by a specific chunksize - * @see org.opendedup.sdfs.Main#CHUNK_LENGTH . Data is written the DedupFile - * through the DedupFileChannel - * @see com.annesam.sdfs.io.DedupFileChannel . - * - */ -public interface DedupFile { - - public abstract void trim(long start, int len) throws IOException; - - /** - * - * @return true if the dedup file is closed - */ - public abstract boolean isClosed(); - - /** - * Writes all the cache buffers to the dedup chunk store service - */ - public abstract int writeCache() throws IOException, HashtableFullException; - - /** - * - * @param position - * the position in the dedup file where the write buffer is - * retrieved from - * @return the write buffer for the give position - * @throws IOException - */ - public abstract DedupChunkInterface getWriteBuffer(long position) - throws FileClosedException, IOException, DataArchivedException; - - public void updateMap(DedupChunkInterface writeBuffer, int doop) - throws FileClosedException, IOException; - - public void putBufferIntoFlush(DedupChunkInterface writeBuffer); - - public void removeBufferFromFlush(DedupChunkInterface writeBuffer); - - public void updateMap(DedupChunkInterface writeBuffer, int doop, - boolean propigateEvent) throws FileClosedException, IOException; - - /** - * Clones the DedupFile - * - * @param mf - * the MetaDataDedupFile to clone - * @return the cloned DedupFile - * @throws IOException - */ - public abstract DedupFile snapshot(MetaDataDedupFile mf) - throws IOException, HashtableFullException; - - /** - * Clones the DedupFile - * - * @param mf - * the MetaDataDedupFile to clone - * @param propigateEvent - * TODO - * @return the cloned DedupFile - * @throws IOException - */ - public abstract DedupFile snapshot(MetaDataDedupFile mf, - boolean propigateEvent) throws IOException, HashtableFullException; - - /** - * Clones the DedupFile - * - * @param mf - * the MetaDataDedupFile to clone - * @return the cloned DedupFile - * @throws IOException - */ - public abstract void copyTo(String path) throws IOException; - - /** - * Clones the DedupFile - * - * @param propigateEvent - * TODO - * @param mf - * the MetaDataDedupFile to clone - * - * @return the cloned DedupFile - * @throws IOException - */ - public abstract void copyTo(String path, boolean propigateEvent) - throws IOException; - - /** - * Deletes the DedupFile and all on disk references - * - * @return true if deleted - */ - public abstract boolean delete(); - - - /** - * Writes a specific cache buffer to the dedup chunk service - * - * @param writeBuffer - * the write buffer to persist - * @param removeWhenWritten - * whether or not to remove from the cached write buffers when - * written - * @throws IOException - */ - public abstract void writeCache(WritableCacheBuffer writeBuffer) - throws FileClosedException, IOException, HashtableFullException, - DataArchivedException; - - /** - * - * @return the number of chunks in the DedupFile - * @throws IOException - */ - public abstract long getNumberofChunks() throws FileClosedException, - IOException; - - /** - * Flushes all write buffers to disk - * - * @throws IOException - */ - public abstract void sync(boolean force) throws FileClosedException, - IOException; - - /** - * Flushes all write buffers to disk - * - * @param propigateEvent - * TODO - * - * @throws IOException - */ - public abstract void sync(boolean force, boolean propigateEvent) - throws FileClosedException, IOException; - - /** - * Creates a DedupFileChannel for writing data to this DedupFile - * - * @return a DedupFileChannel associated with this file - * @throws IOException - */ - public abstract DedupFileChannel getChannel(int flags) throws IOException; - - /** - * Removes a DedupFileChannel for writing to this DedupFile - * - * @param channel - * the channel to remove - */ - public abstract void unRegisterChannel(DedupFileChannel channel, int flags); - - public abstract void registerChannel(DedupFileChannel channel) - throws IOException; - - /** - * - * @return the path to the folder where the map for this dedup file is - * located. - */ - public abstract String getDatabaseDirPath(); - - /** - * Closes the DedupFile and all DedupFileChannels - */ - public abstract void forceClose() throws IOException; - - /** - * Gets the GUID associated with this file. Each DedupFile has an associated - * GUID. The GUID is typically used also as the file name of the associated - * Database file or on disk hashmap. - * - * @return the GUID - */ - public abstract String getGUID(); - - /** - * - * @return the MetaDataDedupFile associated with this DedupFile - */ - public abstract MetaDataDedupFile getMetaFile(); - - /** - * - * @param lock - * to remove from the file - */ - public abstract void removeLock(DedupFileLock lock); - - public abstract void setMetaDataDedupFile(MetaDataDedupFile mf); - - /** - * - * @param lock - * to remove from the file - * @param propigateEvent - * TODO - */ - public abstract void removeLock(DedupFileLock lock, boolean propigateEvent); - - /** - * Tries to lock a file at a specific position - * - * @param ch - * the channel that requested the lock - * @param position - * the position to lock the file at. - * @param size - * the size of the data to be locked - * @param shared - * if the lock is shared or not - * @return true if it is locked - * @throws IOException - */ - public abstract DedupFileLock addLock(DedupFileChannel ch, long position, - long len, boolean shared) throws IOException; - - /** - * Tries to lock a file at a specific position - * - * @param ch - * the channel that requested the lock - * @param position - * the position to lock the file at. - * @param shared - * if the lock is shared or not - * @param propigateEvent - * TODO - * @param size - * the size of the data to be locked - * @return true if it is locked - * @throws IOException - */ - public abstract DedupFileLock addLock(DedupFileChannel ch, long position, - long len, boolean shared, boolean propigateEvent) - throws IOException; - - /** - * - * @return when the file was last modified - */ - public abstract long lastModified() throws IOException; - - /** - * Returns the DedupChunk associated with a position in the DedupFile. - * - * @param location - * location to retieve. It will return the chunk where the - * location sits - * @param create - * Creates a new chunk if set to true and chunk does not exists. - * If the position is empty it should return an empty DedupChunk - * where the @see DedupChunk#isNewChunk() is set to true - * @return the DedupChunk of null if create is false and chunk is not found - * @throws IOException - */ - public abstract DedupChunkInterface getHash(long location, boolean create) - throws IOException, FileClosedException; - - /** - * - * @param location - * the location where to remove the hash from. This is often used - * when truncating a file - * @throws IOException - */ - public abstract void removeHash(long location) throws IOException; - - /** - * - * @param location - * the location where to remove the hash from. This is often used - * when truncating a file - * @param propigateEvent - * TODO - * @throws IOException - */ - public abstract void removeHash(long location, boolean propigateEvent) - throws IOException; - - /** - * - * @param location - * the location that is requested - * @return the base chunk location associated with a specific location - * within a file. As an example, if location "512" is requested it - * will return a chunk at location "0". If the chunk size is 4096 - * and location 8195 is requested it will return 8192 . - */ - public abstract long getChuckPosition(long location); - - /** - * - * @return - */ - public abstract boolean isAbsolute(); - - /** - * Optimizes the dedup file hash map for a specific length of file. - * - * @param length - * the lenght to optimize for - */ - public abstract void optimize() throws HashtableFullException; - - public abstract boolean hasOpenChannels(); - - public abstract void truncate(long length) throws IOException; - - public abstract void truncate(long length, boolean propigateEvent) - throws IOException; - +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io; + +import java.io.IOException; + +import org.opendedup.collections.DataArchivedException; +import org.opendedup.collections.HashtableFullException; + +/** + * + * @author annesam Interface that represents the file map associated with a + * specific MetaDataDedupFile. The file map stores data with a location + * index where each entry is a chunk of data. Locations in the index are + * offset by a specific chunksize + * @see org.opendedup.sdfs.Main#CHUNK_LENGTH . Data is written the DedupFile + * through the DedupFileChannel + * @see com.annesam.sdfs.io.DedupFileChannel . + * + */ +public interface DedupFile { + + public abstract void trim(long start, int len) throws IOException; + + /** + * + * @return true if the dedup file is closed + */ + public abstract boolean isClosed(); + + /** + * Writes all the cache buffers to the dedup chunk store service + */ + public abstract int writeCache() throws IOException, HashtableFullException; + + /** + * + * @param position + * the position in the dedup file where the write buffer is + * retrieved from + * @return the write buffer for the give position + * @throws IOException + */ + public abstract DedupChunkInterface getWriteBuffer(long position) + throws FileClosedException, IOException, DataArchivedException; + + public void updateMap(DedupChunkInterface writeBuffer, int doop) + throws FileClosedException, IOException; + + public void putBufferIntoFlush(DedupChunkInterface writeBuffer); + + public void removeBufferFromFlush(DedupChunkInterface writeBuffer); + + public void updateMap(DedupChunkInterface writeBuffer, int doop, + boolean propigateEvent) throws FileClosedException, IOException; + + /** + * Clones the DedupFile + * + * @param mf + * the MetaDataDedupFile to clone + * @return the cloned DedupFile + * @throws IOException + */ + public abstract DedupFile snapshot(MetaDataDedupFile mf) + throws IOException, HashtableFullException; + + /** + * Clones the DedupFile + * + * @param mf + * the MetaDataDedupFile to clone + * @param propigateEvent + * TODO + * @return the cloned DedupFile + * @throws IOException + */ + public abstract DedupFile snapshot(MetaDataDedupFile mf, + boolean propigateEvent) throws IOException, HashtableFullException; + + /** + * Clones the DedupFile + * + * @param mf + * the MetaDataDedupFile to clone + * @return the cloned DedupFile + * @throws IOException + */ + public abstract void copyTo(String path) throws IOException; + + /** + * Clones the DedupFile + * + * @param propigateEvent + * TODO + * @param mf + * the MetaDataDedupFile to clone + * + * @return the cloned DedupFile + * @throws IOException + */ + public abstract void copyTo(String path, boolean propigateEvent) + throws IOException; + + /** + * Deletes the DedupFile and all on disk references + * + * @return true if deleted + */ + public abstract boolean delete(); + + + /** + * Writes a specific cache buffer to the dedup chunk service + * + * @param writeBuffer + * the write buffer to persist + * @param removeWhenWritten + * whether or not to remove from the cached write buffers when + * written + * @throws IOException + */ + public abstract void writeCache(WritableCacheBuffer writeBuffer) + throws FileClosedException, IOException, HashtableFullException, + DataArchivedException; + + /** + * + * @return the number of chunks in the DedupFile + * @throws IOException + */ + public abstract long getNumberofChunks() throws FileClosedException, + IOException; + + /** + * Flushes all write buffers to disk + * + * @throws IOException + */ + public abstract void sync(boolean force) throws FileClosedException, + IOException; + + /** + * Flushes all write buffers to disk + * + * @param propigateEvent + * TODO + * + * @throws IOException + */ + public abstract void sync(boolean force, boolean propigateEvent) + throws FileClosedException, IOException; + + /** + * Creates a DedupFileChannel for writing data to this DedupFile + * + * @return a DedupFileChannel associated with this file + * @throws IOException + */ + public abstract DedupFileChannel getChannel(int flags) throws IOException; + + /** + * Removes a DedupFileChannel for writing to this DedupFile + * + * @param channel + * the channel to remove + */ + public abstract void unRegisterChannel(DedupFileChannel channel, int flags); + + public abstract void registerChannel(DedupFileChannel channel) + throws IOException; + + /** + * + * @return the path to the folder where the map for this dedup file is + * located. + */ + public abstract String getDatabaseDirPath(); + + /** + * Closes the DedupFile and all DedupFileChannels + */ + public abstract void forceClose() throws IOException; + + /** + * Gets the GUID associated with this file. Each DedupFile has an associated + * GUID. The GUID is typically used also as the file name of the associated + * Database file or on disk hashmap. + * + * @return the GUID + */ + public abstract String getGUID(); + + /** + * + * @return the MetaDataDedupFile associated with this DedupFile + */ + public abstract MetaDataDedupFile getMetaFile(); + + /** + * + * @param lock + * to remove from the file + */ + public abstract void removeLock(DedupFileLock lock); + + public abstract void setMetaDataDedupFile(MetaDataDedupFile mf); + + /** + * + * @param lock + * to remove from the file + * @param propigateEvent + * TODO + */ + public abstract void removeLock(DedupFileLock lock, boolean propigateEvent); + + /** + * Tries to lock a file at a specific position + * + * @param ch + * the channel that requested the lock + * @param position + * the position to lock the file at. + * @param size + * the size of the data to be locked + * @param shared + * if the lock is shared or not + * @return true if it is locked + * @throws IOException + */ + public abstract DedupFileLock addLock(DedupFileChannel ch, long position, + long len, boolean shared) throws IOException; + + /** + * Tries to lock a file at a specific position + * + * @param ch + * the channel that requested the lock + * @param position + * the position to lock the file at. + * @param shared + * if the lock is shared or not + * @param propigateEvent + * TODO + * @param size + * the size of the data to be locked + * @return true if it is locked + * @throws IOException + */ + public abstract DedupFileLock addLock(DedupFileChannel ch, long position, + long len, boolean shared, boolean propigateEvent) + throws IOException; + + /** + * + * @return when the file was last modified + */ + public abstract long lastModified() throws IOException; + + /** + * Returns the DedupChunk associated with a position in the DedupFile. + * + * @param location + * location to retieve. It will return the chunk where the + * location sits + * @param create + * Creates a new chunk if set to true and chunk does not exists. + * If the position is empty it should return an empty DedupChunk + * where the @see DedupChunk#isNewChunk() is set to true + * @return the DedupChunk of null if create is false and chunk is not found + * @throws IOException + */ + public abstract DedupChunkInterface getHash(long location, boolean create) + throws IOException, FileClosedException; + + /** + * + * @param location + * the location where to remove the hash from. This is often used + * when truncating a file + * @throws IOException + */ + public abstract void removeHash(long location) throws IOException; + + /** + * + * @param location + * the location where to remove the hash from. This is often used + * when truncating a file + * @param propigateEvent + * TODO + * @throws IOException + */ + public abstract void removeHash(long location, boolean propigateEvent) + throws IOException; + + /** + * + * @param location + * the location that is requested + * @return the base chunk location associated with a specific location + * within a file. As an example, if location "512" is requested it + * will return a chunk at location "0". If the chunk size is 4096 + * and location 8195 is requested it will return 8192 . + */ + public abstract long getChuckPosition(long location); + + /** + * + * @return + */ + public abstract boolean isAbsolute(); + + /** + * Optimizes the dedup file hash map for a specific length of file. + * + * @param length + * the lenght to optimize for + */ + public abstract void optimize() throws HashtableFullException; + + public abstract boolean hasOpenChannels(); + + public abstract void truncate(long length) throws IOException; + + public abstract void truncate(long length, boolean propigateEvent) + throws IOException; + } \ No newline at end of file diff --git a/src/org/opendedup/sdfs/io/DedupFileChannel.java b/src/org/opendedup/sdfs/io/DedupFileChannel.java index 91153b2f2..103f0a2a6 100755 --- a/src/org/opendedup/sdfs/io/DedupFileChannel.java +++ b/src/org/opendedup/sdfs/io/DedupFileChannel.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/io/DedupFileListener.java b/src/org/opendedup/sdfs/io/DedupFileListener.java index 5650cfe09..59b2016be 100644 --- a/src/org/opendedup/sdfs/io/DedupFileListener.java +++ b/src/org/opendedup/sdfs/io/DedupFileListener.java @@ -1,29 +1,47 @@ -package org.opendedup.sdfs.io; - -public interface DedupFileListener { - void onCreate(DedupFile file); - - void onCopyTo(String dst, DedupFile file); - - void onAddLock(long position, long len, boolean shared, DedupFileLock lock, - DedupFile file); - - void onCreateBlankFile(long size, DedupFile file); - - void onDelete(DedupFile file); - - void onForceClose(DedupFile file); - - void onRemoveHash(long position, DedupFile file); - - void onRemoveLock(DedupFileLock lock, DedupFile file); - - void onSnapShot(MetaDataDedupFile mf, DedupFile file); - - void onSync(DedupFile file); - - void onTruncate(long length, DedupFile file); - - void onUpdateMap(DedupChunkInterface writeBuffer, byte[] hash, - boolean doop, DedupFile file); -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io; + +public interface DedupFileListener { + void onCreate(DedupFile file); + + void onCopyTo(String dst, DedupFile file); + + void onAddLock(long position, long len, boolean shared, DedupFileLock lock, + DedupFile file); + + void onCreateBlankFile(long size, DedupFile file); + + void onDelete(DedupFile file); + + void onForceClose(DedupFile file); + + void onRemoveHash(long position, DedupFile file); + + void onRemoveLock(DedupFileLock lock, DedupFile file); + + void onSnapShot(MetaDataDedupFile mf, DedupFile file); + + void onSync(DedupFile file); + + void onTruncate(long length, DedupFile file); + + void onUpdateMap(DedupChunkInterface writeBuffer, byte[] hash, + boolean doop, DedupFile file); +} diff --git a/src/org/opendedup/sdfs/io/DedupFileLock.java b/src/org/opendedup/sdfs/io/DedupFileLock.java index 5b87c6b6e..6b0c63ea7 100755 --- a/src/org/opendedup/sdfs/io/DedupFileLock.java +++ b/src/org/opendedup/sdfs/io/DedupFileLock.java @@ -1,106 +1,124 @@ -package org.opendedup.sdfs.io; - -/** - * - * @author annesam Lock Object for DedupFiles. Locks do not actually prevent - * writing to locked space but rather reserve space so that other write - * threads know an area is locked from writing. - */ -public class DedupFileLock { - - private DedupFileChannel channel; - private long position; - private long size; - private boolean shared; - private boolean valid; - private String host; - - /** - * Instantiates a DedupFileLock - * - * @param ch - * DedupFileChannel associated with this lock. - * @param position - * the position where the lock start position is located. - * @param len - * the length of the lock - * @param shared - * if the lock is shared for reading or not. - */ - public DedupFileLock(DedupFileChannel ch, long position, long len, - boolean shared) { - this.channel = ch; - this.position = position; - this.size = len; - this.shared = shared; - this.valid = true; - - } - - public String getHost() { - return this.host; - } - - public void setHost(String host) { - this.host = host; - } - - /** - * - * @return the channel associated with this lock - */ - public DedupFileChannel channel() { - return this.channel; - } - - /** - * - * @return the size or length of the lock - */ - public long size() { - return this.size; - } - - /** - * - * @return If the lock is shared or not - */ - public boolean isShared() { - return this.shared; - } - - /** - * - * @return true if the lock is still valid - */ - public boolean isValid() { - return this.valid; - } - - /** - * sets the lock it valid = false. - */ - public void release() { - this.valid = false; - } - - /** - * checks to see if two locks overlap - * - * @param pos - * the proposed position of the lock - * @param sz - * the size of the proposed lock - * @return true if it overlaps - */ - public boolean overLaps(long pos, long sz) { - long endPos = this.position + this.size; - long pEndPos = pos + sz; - if (pos >= this.position && pos <= endPos) - return true; - if (pEndPos >= this.position && pEndPos <= endPos) - return true; - return false; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io; + +/** + * + * @author annesam Lock Object for DedupFiles. Locks do not actually prevent + * writing to locked space but rather reserve space so that other write + * threads know an area is locked from writing. + */ +public class DedupFileLock { + + private DedupFileChannel channel; + private long position; + private long size; + private boolean shared; + private boolean valid; + private String host; + + /** + * Instantiates a DedupFileLock + * + * @param ch + * DedupFileChannel associated with this lock. + * @param position + * the position where the lock start position is located. + * @param len + * the length of the lock + * @param shared + * if the lock is shared for reading or not. + */ + public DedupFileLock(DedupFileChannel ch, long position, long len, + boolean shared) { + this.channel = ch; + this.position = position; + this.size = len; + this.shared = shared; + this.valid = true; + + } + + public String getHost() { + return this.host; + } + + public void setHost(String host) { + this.host = host; + } + + /** + * + * @return the channel associated with this lock + */ + public DedupFileChannel channel() { + return this.channel; + } + + /** + * + * @return the size or length of the lock + */ + public long size() { + return this.size; + } + + /** + * + * @return If the lock is shared or not + */ + public boolean isShared() { + return this.shared; + } + + /** + * + * @return true if the lock is still valid + */ + public boolean isValid() { + return this.valid; + } + + /** + * sets the lock it valid = false. + */ + public void release() { + this.valid = false; + } + + /** + * checks to see if two locks overlap + * + * @param pos + * the proposed position of the lock + * @param sz + * the size of the proposed lock + * @return true if it overlaps + */ + public boolean overLaps(long pos, long sz) { + long endPos = this.position + this.size; + long pEndPos = pos + sz; + if (pos >= this.position && pos <= endPos) + return true; + if (pEndPos >= this.position && pEndPos <= endPos) + return true; + return false; + } + +} diff --git a/src/org/opendedup/sdfs/io/FileClosedException.java b/src/org/opendedup/sdfs/io/FileClosedException.java index 89807d84a..98689f97a 100644 --- a/src/org/opendedup/sdfs/io/FileClosedException.java +++ b/src/org/opendedup/sdfs/io/FileClosedException.java @@ -1,20 +1,38 @@ -package org.opendedup.sdfs.io; - -/** - * - * @author Sam Silverberg This exception is thrown if a WritableCacheBuffer has - * already been closed for writing to a chunk store. - */ -public class FileClosedException extends Exception { - - private static final long serialVersionUID = 1L; - - public FileClosedException(String msg) { - super(msg); - } - - public FileClosedException() { - super(); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io; + +/** + * + * @author Sam Silverberg This exception is thrown if a WritableCacheBuffer has + * already been closed for writing to a chunk store. + */ +public class FileClosedException extends Exception { + + private static final long serialVersionUID = 1L; + + public FileClosedException(String msg) { + super(msg); + } + + public FileClosedException() { + super(); + } + +} diff --git a/src/org/opendedup/sdfs/io/HashLocPair.java b/src/org/opendedup/sdfs/io/HashLocPair.java index 2e4b8484d..4db7e14ea 100644 --- a/src/org/opendedup/sdfs/io/HashLocPair.java +++ b/src/org/opendedup/sdfs/io/HashLocPair.java @@ -1,155 +1,173 @@ -package org.opendedup.sdfs.io; - -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import java.nio.ByteBuffer; -import java.util.Arrays; - -import org.opendedup.hashing.HashFunctionPool; -import org.opendedup.rabin.utils.StringUtils; - -import com.google.common.collect.Range; - -public class HashLocPair implements Comparable, Externalizable { - public static final int BAL = HashFunctionPool.hashLength + 8 + 4 + 4 + 4 - + 4; - public byte[] hash; - public byte[] hashloc; - public byte[] data; - public int len; - public int pos; - public int offset; - public int nlen; - private boolean dup = false; - - public byte[] asArray() throws IOException { - ByteBuffer bf = ByteBuffer.wrap(new byte[BAL]); - bf.put(hash); - bf.put(hashloc); - bf.putInt(len); - bf.putInt(pos); - bf.putInt(offset); - bf.putInt(nlen); - this.checkCorrupt(); - return bf.array(); - } - - private void checkCorrupt() throws IOException { - if (len < 0 || pos < 0 || offset < 0 || nlen < 0) - throw new IOException("data is corrupt " + this); - } - - public boolean isInvalid() { - return (len <= 0 || pos < 0 || offset < 0 || nlen <= 0); - } - - public HashLocPair() { - - } - - private int currentPos = 1; - - public synchronized void addHashLoc(byte loc) { - // SDFSLogger.getLog().info("set " + this.currentPos + " to " + loc); - if (currentPos < this.hashloc.length) { - if (this.hashloc[0] == -1) - this.hashloc[0] = 0; - this.hashloc[currentPos] = loc; - this.currentPos++; - } - } - - public void resetHashLoc() { - this.hashloc = new byte[8]; - this.hashloc[0] = -1; - currentPos = 1; - } - - public int getNumberHL() { - return this.currentPos - 1; - } - - public HashLocPair(byte[] b) throws IOException { - ByteBuffer bf = ByteBuffer.wrap(b); - hash = new byte[HashFunctionPool.hashLength]; - hashloc = new byte[8]; - bf.get(hash); - bf.get(hashloc); - len = bf.getInt(); - pos = bf.getInt(); - offset = bf.getInt(); - nlen = bf.getInt(); - this.checkCorrupt(); - } - - @Override - public int compareTo(HashLocPair p) { - if (this.pos == p.pos) - return 0; - if (this.pos > p.pos) - return 1; - else - return -1; - } - - public HashLocPair clone() { - HashLocPair p = new HashLocPair(); - p.hash = Arrays.copyOf(this.hash, this.hash.length); - p.hashloc = Arrays.copyOf(this.hashloc, this.hashloc.length); - p.len = len; - p.pos = pos; - p.offset = offset; - p.nlen = nlen; - return p; - } - - public Range getRange() { - return Range.closed(pos, pos + nlen); - } - - public String toString() { - String hashlocs = "["; - for (byte b : this.hashloc) { - hashlocs = hashlocs + Byte.toString(b) + " "; - } - hashlocs = hashlocs + "]"; - return "pos=" + pos + " len=" + len + " offset=" + offset + " nlen=" - + nlen + " ep=" + (pos + nlen) + " hash=" - + StringUtils.getHexString(hash) + " hashlocs=" + hashlocs; - } - - @Override - public void readExternal(ObjectInput in) throws IOException, - ClassNotFoundException { - in.readInt(); - this.hash = new byte[in.readInt()]; - in.read(this.hash); - this.hashloc = new byte[8]; - in.read(this.hashloc); - - } - - @Override - public void writeExternal(ObjectOutput out) throws IOException { - ByteBuffer bf = ByteBuffer.wrap(new byte[4 + this.hash.length - + this.hashloc.length]); - bf.putInt(this.hash.length); - bf.put(hash); - bf.put(hashloc); - byte[] b = bf.array(); - out.writeInt(b.length); - out.write(b); - - } - - public boolean isDup() { - return dup; - } - - public void setDup(boolean dup) { - this.dup = dup; - } - +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.opendedup.hashing.HashFunctionPool; +import org.opendedup.rabin.utils.StringUtils; + +import com.google.common.collect.Range; + +public class HashLocPair implements Comparable, Externalizable { + public static final int BAL = HashFunctionPool.hashLength + 8 + 4 + 4 + 4 + + 4; + public byte[] hash; + public byte[] hashloc; + public byte[] data; + public int len; + public int pos; + public int offset; + public int nlen; + private boolean dup = false; + + public byte[] asArray() throws IOException { + ByteBuffer bf = ByteBuffer.wrap(new byte[BAL]); + bf.put(hash); + bf.put(hashloc); + bf.putInt(len); + bf.putInt(pos); + bf.putInt(offset); + bf.putInt(nlen); + this.checkCorrupt(); + return bf.array(); + } + + private void checkCorrupt() throws IOException { + if (len < 0 || pos < 0 || offset < 0 || nlen < 0) + throw new IOException("data is corrupt " + this); + } + + public boolean isInvalid() { + return (len <= 0 || pos < 0 || offset < 0 || nlen <= 0); + } + + public HashLocPair() { + + } + + private int currentPos = 1; + + public synchronized void addHashLoc(byte loc) { + // SDFSLogger.getLog().info("set " + this.currentPos + " to " + loc); + if (currentPos < this.hashloc.length) { + if (this.hashloc[0] == -1) + this.hashloc[0] = 0; + this.hashloc[currentPos] = loc; + this.currentPos++; + } + } + + public void resetHashLoc() { + this.hashloc = new byte[8]; + this.hashloc[0] = -1; + currentPos = 1; + } + + public int getNumberHL() { + return this.currentPos - 1; + } + + public HashLocPair(byte[] b) throws IOException { + ByteBuffer bf = ByteBuffer.wrap(b); + hash = new byte[HashFunctionPool.hashLength]; + hashloc = new byte[8]; + bf.get(hash); + bf.get(hashloc); + len = bf.getInt(); + pos = bf.getInt(); + offset = bf.getInt(); + nlen = bf.getInt(); + this.checkCorrupt(); + } + + @Override + public int compareTo(HashLocPair p) { + if (this.pos == p.pos) + return 0; + if (this.pos > p.pos) + return 1; + else + return -1; + } + + public HashLocPair clone() { + HashLocPair p = new HashLocPair(); + p.hash = Arrays.copyOf(this.hash, this.hash.length); + p.hashloc = Arrays.copyOf(this.hashloc, this.hashloc.length); + p.len = len; + p.pos = pos; + p.offset = offset; + p.nlen = nlen; + return p; + } + + public Range getRange() { + return Range.closed(pos, pos + nlen); + } + + public String toString() { + String hashlocs = "["; + for (byte b : this.hashloc) { + hashlocs = hashlocs + Byte.toString(b) + " "; + } + hashlocs = hashlocs + "]"; + return "pos=" + pos + " len=" + len + " offset=" + offset + " nlen=" + + nlen + " ep=" + (pos + nlen) + " hash=" + + StringUtils.getHexString(hash) + " hashlocs=" + hashlocs; + } + + @Override + public void readExternal(ObjectInput in) throws IOException, + ClassNotFoundException { + in.readInt(); + this.hash = new byte[in.readInt()]; + in.read(this.hash); + this.hashloc = new byte[8]; + in.read(this.hashloc); + + } + + @Override + public void writeExternal(ObjectOutput out) throws IOException { + ByteBuffer bf = ByteBuffer.wrap(new byte[4 + this.hash.length + + this.hashloc.length]); + bf.putInt(this.hash.length); + bf.put(hash); + bf.put(hashloc); + byte[] b = bf.array(); + out.writeInt(b.length); + out.write(b); + + } + + public boolean isDup() { + return dup; + } + + public void setDup(boolean dup) { + this.dup = dup; + } + } \ No newline at end of file diff --git a/src/org/opendedup/sdfs/io/MetaDataDedupFile.java b/src/org/opendedup/sdfs/io/MetaDataDedupFile.java index 2363bff1b..45fb5ea38 100755 --- a/src/org/opendedup/sdfs/io/MetaDataDedupFile.java +++ b/src/org/opendedup/sdfs/io/MetaDataDedupFile.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.io.File; diff --git a/src/org/opendedup/sdfs/io/ReadAhead.java b/src/org/opendedup/sdfs/io/ReadAhead.java index a15a55407..7ca676c70 100644 --- a/src/org/opendedup/sdfs/io/ReadAhead.java +++ b/src/org/opendedup/sdfs/io/ReadAhead.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/io/ReadOnlyException.java b/src/org/opendedup/sdfs/io/ReadOnlyException.java index e92db99a6..206e76ecf 100644 --- a/src/org/opendedup/sdfs/io/ReadOnlyException.java +++ b/src/org/opendedup/sdfs/io/ReadOnlyException.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; public class ReadOnlyException extends Exception { diff --git a/src/org/opendedup/sdfs/io/RestoreRequest.java b/src/org/opendedup/sdfs/io/RestoreRequest.java index 2b8b0daf8..e163fc329 100644 --- a/src/org/opendedup/sdfs/io/RestoreRequest.java +++ b/src/org/opendedup/sdfs/io/RestoreRequest.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.util.HashMap; diff --git a/src/org/opendedup/sdfs/io/SeekType.java b/src/org/opendedup/sdfs/io/SeekType.java index 5e4a109d1..45b4ec1d6 100644 --- a/src/org/opendedup/sdfs/io/SeekType.java +++ b/src/org/opendedup/sdfs/io/SeekType.java @@ -1,19 +1,37 @@ -package org.opendedup.sdfs.io; - -/** - * Seek file position types. - * - *

- * Defines constants used by the SeekFile SMB request to specify where the seek - * position is relative to. - * - * @author gkspencer - */ -public class SeekType { - - // Seek file types - - public static final int StartOfFile = 0; - public static final int CurrentPos = 1; - public static final int EndOfFile = 2; +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io; + +/** + * Seek file position types. + * + *

+ * Defines constants used by the SeekFile SMB request to specify where the seek + * position is relative to. + * + * @author gkspencer + */ +public class SeekType { + + // Seek file types + + public static final int StartOfFile = 0; + public static final int CurrentPos = 1; + public static final int EndOfFile = 2; } \ No newline at end of file diff --git a/src/org/opendedup/sdfs/io/SparseDataChunk.java b/src/org/opendedup/sdfs/io/SparseDataChunk.java index 3b4d3096f..0f36ed86b 100644 --- a/src/org/opendedup/sdfs/io/SparseDataChunk.java +++ b/src/org/opendedup/sdfs/io/SparseDataChunk.java @@ -1,337 +1,355 @@ -package org.opendedup.sdfs.io; - -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.locks.ReentrantLock; - -import org.opendedup.collections.LongByteArrayMap; -import org.opendedup.hashing.HashFunctionPool; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; - -public class SparseDataChunk implements Externalizable { - private ReentrantLock l = new ReentrantLock(); - private int doop; - private int prevdoop; - // private int RAWDL; - private long fpos; - private static final long serialVersionUID = -2782607786999940224L; - public int len = 0; - public byte flags = 0; - public static final int RECONSTRUCTED = 1; // 0001 - private byte version = 0; - private List ar = new ArrayList(); - - public SparseDataChunk() { - - } - - public SparseDataChunk(byte[] rawData, byte version) throws IOException { - this.version = version; - this.marshall(rawData); - } - - public SparseDataChunk(int doop, List ar, boolean localData, - byte version) { - - this.version = version; - this.doop = doop; - this.ar = ar; - - } - - private void marshall(byte[] raw) throws IOException { - ByteBuffer buf = ByteBuffer.wrap(raw); - if (this.version == 0) { - ar = new ArrayList(1); - byte b = buf.get(); - if (b == 0) - doop = 0; - else - doop = Main.CHUNK_LENGTH; - HashLocPair p = new HashLocPair(); - p.hash = new byte[HashFunctionPool.hashLength]; - buf.get(p.hash); - buf.get(); - p.hashloc = new byte[8]; - buf.get(p.hashloc); - p.pos = 0; - p.len = Main.CHUNK_LENGTH; - p.nlen = p.len; - p.offset = 0; - ar.add(p); - } else if (version == 1) { - this.doop = buf.getInt(); - ar = new ArrayList(); - byte[] hash = new byte[HashFunctionPool.hashLength - * HashFunctionPool.max_hash_cluster]; - buf.get(hash); - byte[] hashlocs = new byte[8 * HashFunctionPool.max_hash_cluster]; - buf.get(hashlocs); - ByteBuffer hb = ByteBuffer.wrap(hash); - ByteBuffer hl = ByteBuffer.wrap(hashlocs); - for (int z = 0; z < HashFunctionPool.max_hash_cluster; z++) { - byte[] _hash = new byte[HashFunctionPool.hashLength]; - byte[] _hl = new byte[8]; - hl.get(_hl); - - hb.get(_hash); - if (_hl[1] != 0) { - HashLocPair p = new HashLocPair(); - p.hash = _hash; - p.hashloc = _hl; - p.pos = -1; - ar.add(p); - } else - break; - } - - } else { - this.flags = buf.get(); - buf.getInt(); - int zlen = buf.getInt(); - ar = new ArrayList(zlen); - for (int i = 0; i < zlen; i++) { - byte[] b = new byte[HashLocPair.BAL]; - buf.get(b); - HashLocPair p = new HashLocPair(b); - ar.add(p); - int ep = p.pos + p.len; - if (ep > len) - len = ep; - } - doop = buf.getInt(); - } - } - - public int getDoop() { - return doop; - } - - public HashLocPair getWL(int _pos) throws IOException { - l.lock(); - try { - for (HashLocPair h : ar) { - int ep = h.pos + h.nlen; - if (_pos >= h.pos && _pos < ep) { - HashLocPair _h = h.clone(); - int os = _pos - _h.pos; - _h.offset += os; - _h.nlen -= os; - _h.pos = _pos; - return _h; - } - } - for (HashLocPair h : ar) { - SDFSLogger.getLog().warn(h); - } - throw new IOException("Position not found " + _pos); - } finally { - l.unlock(); - } - - } - - public static void insertHashLocPair(List ar, HashLocPair p) - throws IOException { - int ep = p.pos + p.nlen; - if (ep > Main.CHUNK_LENGTH) - throw new IOException("Overflow ep=" + ep); - ArrayList rm = null; - ArrayList am = null; - // SDFSLogger.getLog().info("p = " + p); - - for (HashLocPair h : ar) { - int hep = h.pos + h.nlen; - if (h.pos >= ep) - break; - else if (h.pos >= p.pos && hep <= ep) { - // SDFSLogger.getLog().info("0 removing h = " + h); - if (rm == null) - rm = new ArrayList(); - rm.add(h); - } else if (h.pos >= p.pos && h.pos < ep && hep > ep) { - int no = ep - h.pos; - // int oh = h.pos; - h.pos = ep; - h.offset += no; - h.nlen -= no; - - // SDFSLogger.getLog().info("2 changing pos from " +oh - // +" to " + h.pos + " offset = " + h.offset); - } else if (h.pos <= p.pos && hep > p.pos) { - if (hep > ep) { - int offset = ep - h.pos; - HashLocPair _h = h.clone(); - _h.offset += offset; - _h.nlen -= offset; - _h.pos = ep; - if (!Main.chunkStoreLocal) - _h.hashloc[0] = 1; - else - _h.setDup(true); - if (am == null) - am = new ArrayList(); - - am.add(_h); - } - if (h.pos < p.pos) { - h.nlen = (p.pos - h.pos); - } else { - if (rm == null) - rm = new ArrayList(); - rm.add(h); - } - } - if (h.isInvalid()) { - SDFSLogger.getLog().error("h = " + h.toString()); - } - } - if (rm != null) { - for (HashLocPair z : rm) { - ar.remove(z); - } - } - if (am != null) { - for (HashLocPair z : am) { - ar.add(z); - } - } - if (!Main.chunkStoreLocal) - p.hashloc[0] = 1; - else - p.setDup(true); - ar.add(p); - - Collections.sort(ar); - } - - public void putHash(HashLocPair p) throws IOException { - l.lock(); - try { - insertHashLocPair(ar, p); - this.flags = RECONSTRUCTED; - } finally { - l.unlock(); - } - } - - public void setRecontructed(boolean reconstructed) { - if (reconstructed) - this.flags = RECONSTRUCTED; - - } - - public byte[] getBytes() throws IOException { - l.lock(); - try { - if (this.version == 0) { - ByteBuffer buf = ByteBuffer - .wrap(new byte[LongByteArrayMap._FREE.length]); - if (doop > 0) - buf.put((byte) 1); - else - buf.put((byte) 0); - buf.put(ar.get(0).hash); - buf.put((byte) 0); - buf.put(ar.get(0).hashloc); - return buf.array(); - } else if (this.version == 1) { - ByteBuffer buf = ByteBuffer - .wrap(new byte[LongByteArrayMap._v1arrayLength]); - buf.putInt(doop); - for (HashLocPair p : ar) { - buf.put(p.hash); - } - for (HashLocPair p : ar) { - buf.put(p.hashloc); - } - return buf.array(); - - } else { - ByteBuffer buf = null; - buf = ByteBuffer.wrap(new byte[1 + 4 + 4 + 4 - + (ar.size() * HashLocPair.BAL)]); - this.prevdoop = this.doop; - this.doop = 0; - buf.put(this.flags); - buf.putInt(buf.capacity()); - buf.putInt(this.ar.size()); - Collections.sort(this.ar); - if (ar.size() > (LongByteArrayMap.MAX_ELEMENTS_PER_AR)) { - SDFSLogger.getLog().error( - "Buffer overflow ar size = " + ar.size() - + " max size = " - + (LongByteArrayMap.MAX_ELEMENTS_PER_AR)); - throw new IOException("Buffer overflow ar size = " - + ar.size() + " max size = " - + (LongByteArrayMap.MAX_ELEMENTS_PER_AR)); - } - this.len = 0; - for (HashLocPair p : ar) { - boolean dup = p.isDup(); - if (!Main.chunkStoreLocal && p.hashloc[0] == 1) - dup = true; - if (dup) - this.doop += p.nlen; - buf.put(p.asArray()); - this.len += p.nlen; - } - buf.putInt(this.doop); - return buf.array(); - } - } finally { - l.unlock(); - } - } - - public void setDoop(int doop) { - this.doop = doop; - } - - public long getFpos() { - return fpos; - } - - public void setFpos(long fpos) { - this.fpos = fpos; - } - - public List getFingers() { - return ar; - } - - @Override - public void readExternal(ObjectInput in) throws IOException, - ClassNotFoundException { - byte[] b = new byte[in.readInt()]; - this.marshall(b); - - } - - @Override - public void writeExternal(ObjectOutput out) throws IOException { - byte[] b = this.getBytes(); - out.writeInt(b.length); - out.write(b); - - } - - public int getPrevdoop() { - return prevdoop; - } - - public boolean isRecontructed() { - if (this.flags == 0) - return false; - else - return true; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.locks.ReentrantLock; + +import org.opendedup.collections.LongByteArrayMap; +import org.opendedup.hashing.HashFunctionPool; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; + +public class SparseDataChunk implements Externalizable { + private ReentrantLock l = new ReentrantLock(); + private int doop; + private int prevdoop; + // private int RAWDL; + private long fpos; + private static final long serialVersionUID = -2782607786999940224L; + public int len = 0; + public byte flags = 0; + public static final int RECONSTRUCTED = 1; // 0001 + private byte version = 0; + private List ar = new ArrayList(); + + public SparseDataChunk() { + + } + + public SparseDataChunk(byte[] rawData, byte version) throws IOException { + this.version = version; + this.marshall(rawData); + } + + public SparseDataChunk(int doop, List ar, boolean localData, + byte version) { + + this.version = version; + this.doop = doop; + this.ar = ar; + + } + + private void marshall(byte[] raw) throws IOException { + ByteBuffer buf = ByteBuffer.wrap(raw); + if (this.version == 0) { + ar = new ArrayList(1); + byte b = buf.get(); + if (b == 0) + doop = 0; + else + doop = Main.CHUNK_LENGTH; + HashLocPair p = new HashLocPair(); + p.hash = new byte[HashFunctionPool.hashLength]; + buf.get(p.hash); + buf.get(); + p.hashloc = new byte[8]; + buf.get(p.hashloc); + p.pos = 0; + p.len = Main.CHUNK_LENGTH; + p.nlen = p.len; + p.offset = 0; + ar.add(p); + } else if (version == 1) { + this.doop = buf.getInt(); + ar = new ArrayList(); + byte[] hash = new byte[HashFunctionPool.hashLength + * HashFunctionPool.max_hash_cluster]; + buf.get(hash); + byte[] hashlocs = new byte[8 * HashFunctionPool.max_hash_cluster]; + buf.get(hashlocs); + ByteBuffer hb = ByteBuffer.wrap(hash); + ByteBuffer hl = ByteBuffer.wrap(hashlocs); + for (int z = 0; z < HashFunctionPool.max_hash_cluster; z++) { + byte[] _hash = new byte[HashFunctionPool.hashLength]; + byte[] _hl = new byte[8]; + hl.get(_hl); + + hb.get(_hash); + if (_hl[1] != 0) { + HashLocPair p = new HashLocPair(); + p.hash = _hash; + p.hashloc = _hl; + p.pos = -1; + ar.add(p); + } else + break; + } + + } else { + this.flags = buf.get(); + buf.getInt(); + int zlen = buf.getInt(); + ar = new ArrayList(zlen); + for (int i = 0; i < zlen; i++) { + byte[] b = new byte[HashLocPair.BAL]; + buf.get(b); + HashLocPair p = new HashLocPair(b); + ar.add(p); + int ep = p.pos + p.len; + if (ep > len) + len = ep; + } + doop = buf.getInt(); + } + } + + public int getDoop() { + return doop; + } + + public HashLocPair getWL(int _pos) throws IOException { + l.lock(); + try { + for (HashLocPair h : ar) { + int ep = h.pos + h.nlen; + if (_pos >= h.pos && _pos < ep) { + HashLocPair _h = h.clone(); + int os = _pos - _h.pos; + _h.offset += os; + _h.nlen -= os; + _h.pos = _pos; + return _h; + } + } + for (HashLocPair h : ar) { + SDFSLogger.getLog().warn(h); + } + throw new IOException("Position not found " + _pos); + } finally { + l.unlock(); + } + + } + + public static void insertHashLocPair(List ar, HashLocPair p) + throws IOException { + int ep = p.pos + p.nlen; + if (ep > Main.CHUNK_LENGTH) + throw new IOException("Overflow ep=" + ep); + ArrayList rm = null; + ArrayList am = null; + // SDFSLogger.getLog().info("p = " + p); + + for (HashLocPair h : ar) { + int hep = h.pos + h.nlen; + if (h.pos >= ep) + break; + else if (h.pos >= p.pos && hep <= ep) { + // SDFSLogger.getLog().info("0 removing h = " + h); + if (rm == null) + rm = new ArrayList(); + rm.add(h); + } else if (h.pos >= p.pos && h.pos < ep && hep > ep) { + int no = ep - h.pos; + // int oh = h.pos; + h.pos = ep; + h.offset += no; + h.nlen -= no; + + // SDFSLogger.getLog().info("2 changing pos from " +oh + // +" to " + h.pos + " offset = " + h.offset); + } else if (h.pos <= p.pos && hep > p.pos) { + if (hep > ep) { + int offset = ep - h.pos; + HashLocPair _h = h.clone(); + _h.offset += offset; + _h.nlen -= offset; + _h.pos = ep; + if (!Main.chunkStoreLocal) + _h.hashloc[0] = 1; + else + _h.setDup(true); + if (am == null) + am = new ArrayList(); + + am.add(_h); + } + if (h.pos < p.pos) { + h.nlen = (p.pos - h.pos); + } else { + if (rm == null) + rm = new ArrayList(); + rm.add(h); + } + } + if (h.isInvalid()) { + SDFSLogger.getLog().error("h = " + h.toString()); + } + } + if (rm != null) { + for (HashLocPair z : rm) { + ar.remove(z); + } + } + if (am != null) { + for (HashLocPair z : am) { + ar.add(z); + } + } + if (!Main.chunkStoreLocal) + p.hashloc[0] = 1; + else + p.setDup(true); + ar.add(p); + + Collections.sort(ar); + } + + public void putHash(HashLocPair p) throws IOException { + l.lock(); + try { + insertHashLocPair(ar, p); + this.flags = RECONSTRUCTED; + } finally { + l.unlock(); + } + } + + public void setRecontructed(boolean reconstructed) { + if (reconstructed) + this.flags = RECONSTRUCTED; + + } + + public byte[] getBytes() throws IOException { + l.lock(); + try { + if (this.version == 0) { + ByteBuffer buf = ByteBuffer + .wrap(new byte[LongByteArrayMap._FREE.length]); + if (doop > 0) + buf.put((byte) 1); + else + buf.put((byte) 0); + buf.put(ar.get(0).hash); + buf.put((byte) 0); + buf.put(ar.get(0).hashloc); + return buf.array(); + } else if (this.version == 1) { + ByteBuffer buf = ByteBuffer + .wrap(new byte[LongByteArrayMap._v1arrayLength]); + buf.putInt(doop); + for (HashLocPair p : ar) { + buf.put(p.hash); + } + for (HashLocPair p : ar) { + buf.put(p.hashloc); + } + return buf.array(); + + } else { + ByteBuffer buf = null; + buf = ByteBuffer.wrap(new byte[1 + 4 + 4 + 4 + + (ar.size() * HashLocPair.BAL)]); + this.prevdoop = this.doop; + this.doop = 0; + buf.put(this.flags); + buf.putInt(buf.capacity()); + buf.putInt(this.ar.size()); + Collections.sort(this.ar); + if (ar.size() > (LongByteArrayMap.MAX_ELEMENTS_PER_AR)) { + SDFSLogger.getLog().error( + "Buffer overflow ar size = " + ar.size() + + " max size = " + + (LongByteArrayMap.MAX_ELEMENTS_PER_AR)); + throw new IOException("Buffer overflow ar size = " + + ar.size() + " max size = " + + (LongByteArrayMap.MAX_ELEMENTS_PER_AR)); + } + this.len = 0; + for (HashLocPair p : ar) { + boolean dup = p.isDup(); + if (!Main.chunkStoreLocal && p.hashloc[0] == 1) + dup = true; + if (dup) + this.doop += p.nlen; + buf.put(p.asArray()); + this.len += p.nlen; + } + buf.putInt(this.doop); + return buf.array(); + } + } finally { + l.unlock(); + } + } + + public void setDoop(int doop) { + this.doop = doop; + } + + public long getFpos() { + return fpos; + } + + public void setFpos(long fpos) { + this.fpos = fpos; + } + + public List getFingers() { + return ar; + } + + @Override + public void readExternal(ObjectInput in) throws IOException, + ClassNotFoundException { + byte[] b = new byte[in.readInt()]; + this.marshall(b); + + } + + @Override + public void writeExternal(ObjectOutput out) throws IOException { + byte[] b = this.getBytes(); + out.writeInt(b.length); + out.write(b); + + } + + public int getPrevdoop() { + return prevdoop; + } + + public boolean isRecontructed() { + if (this.flags == 0) + return false; + else + return true; + } + +} diff --git a/src/org/opendedup/sdfs/io/SparseDedupFile.java b/src/org/opendedup/sdfs/io/SparseDedupFile.java index 8f8e01791..5a8f77640 100644 --- a/src/org/opendedup/sdfs/io/SparseDedupFile.java +++ b/src/org/opendedup/sdfs/io/SparseDedupFile.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.io.File; diff --git a/src/org/opendedup/sdfs/io/Volume.java b/src/org/opendedup/sdfs/io/Volume.java index d3e76771e..6fe5a9ec4 100644 --- a/src/org/opendedup/sdfs/io/Volume.java +++ b/src/org/opendedup/sdfs/io/Volume.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.io.File; diff --git a/src/org/opendedup/sdfs/io/VolumeConfigWriterThread.java b/src/org/opendedup/sdfs/io/VolumeConfigWriterThread.java index bff62e00d..14e53b14d 100644 --- a/src/org/opendedup/sdfs/io/VolumeConfigWriterThread.java +++ b/src/org/opendedup/sdfs/io/VolumeConfigWriterThread.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.io.File; diff --git a/src/org/opendedup/sdfs/io/VolumeFullThread.java b/src/org/opendedup/sdfs/io/VolumeFullThread.java index c6c879e54..bb7a7947f 100644 --- a/src/org/opendedup/sdfs/io/VolumeFullThread.java +++ b/src/org/opendedup/sdfs/io/VolumeFullThread.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import org.opendedup.logging.SDFSLogger; diff --git a/src/org/opendedup/sdfs/io/VolumeListenerInterface.java b/src/org/opendedup/sdfs/io/VolumeListenerInterface.java index a93d3fc5f..1fd4bb946 100644 --- a/src/org/opendedup/sdfs/io/VolumeListenerInterface.java +++ b/src/org/opendedup/sdfs/io/VolumeListenerInterface.java @@ -1,31 +1,49 @@ -package org.opendedup.sdfs.io; - -public interface VolumeListenerInterface { - void actualWriteBytesChanged(long change, double current, Volume vol); - - void duplicateBytesChanged(long change, double current, Volume vol); - - void readBytesChanged(long change, double current, Volume vol); - - void rIOChanged(long change, double current, Volume vol); - - void wIOChanged(long change, double current, Volume vol); - - void virtualBytesWrittenChanged(long change, double current, Volume vol); - - void allowExternalSymLinksChanged(boolean symlink, Volume vol); - - void capacityChanged(long capacity, Volume vol); - - void currentSizeChanged(long capacity, Volume vol); - - void usePerMonChanged(boolean perf, Volume vol); - - void started(Volume vol); - - void mounted(Volume vol); - - void unmounted(Volume vol); - - void stopped(Volume vol); -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io; + +public interface VolumeListenerInterface { + void actualWriteBytesChanged(long change, double current, Volume vol); + + void duplicateBytesChanged(long change, double current, Volume vol); + + void readBytesChanged(long change, double current, Volume vol); + + void rIOChanged(long change, double current, Volume vol); + + void wIOChanged(long change, double current, Volume vol); + + void virtualBytesWrittenChanged(long change, double current, Volume vol); + + void allowExternalSymLinksChanged(boolean symlink, Volume vol); + + void capacityChanged(long capacity, Volume vol); + + void currentSizeChanged(long capacity, Volume vol); + + void usePerMonChanged(boolean perf, Volume vol); + + void started(Volume vol); + + void mounted(Volume vol); + + void unmounted(Volume vol); + + void stopped(Volume vol); +} diff --git a/src/org/opendedup/sdfs/io/WritableCacheBuffer.java b/src/org/opendedup/sdfs/io/WritableCacheBuffer.java index 82cbfb894..e4e8639fa 100755 --- a/src/org/opendedup/sdfs/io/WritableCacheBuffer.java +++ b/src/org/opendedup/sdfs/io/WritableCacheBuffer.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/io/events/CloudSyncDLRequest.java b/src/org/opendedup/sdfs/io/events/CloudSyncDLRequest.java index bc281a3c4..65767cb8d 100644 --- a/src/org/opendedup/sdfs/io/events/CloudSyncDLRequest.java +++ b/src/org/opendedup/sdfs/io/events/CloudSyncDLRequest.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io.events; public class CloudSyncDLRequest { diff --git a/src/org/opendedup/sdfs/io/events/GenericEvent.java b/src/org/opendedup/sdfs/io/events/GenericEvent.java index e8a21dbf2..3a2667d0e 100644 --- a/src/org/opendedup/sdfs/io/events/GenericEvent.java +++ b/src/org/opendedup/sdfs/io/events/GenericEvent.java @@ -1,35 +1,53 @@ -package org.opendedup.sdfs.io.events; - -import java.util.concurrent.atomic.AtomicLong; - -import org.opendedup.sdfs.Main; - -import com.google.gson.JsonObject; - -public class GenericEvent { - private long sequence; - private static final AtomicLong sq = new AtomicLong(0); - private static final long MAX = Long.MAX_VALUE - (100000); - - public GenericEvent() { - sequence = sq.incrementAndGet(); - if (sequence >= MAX) { - synchronized (sq) { - if (sequence >= MAX) { - sq.set(0); - } - } - } - - } - - public JsonObject toJSONObject() { - JsonObject dataset = new JsonObject(); - dataset.addProperty("sequence", sequence); - dataset.addProperty("volumeid", Long.toString(Main.DSEID)); - dataset.addProperty("timestamp", - Long.toString(System.currentTimeMillis())); - return dataset; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io.events; + +import java.util.concurrent.atomic.AtomicLong; + +import org.opendedup.sdfs.Main; + +import com.google.gson.JsonObject; + +public class GenericEvent { + private long sequence; + private static final AtomicLong sq = new AtomicLong(0); + private static final long MAX = Long.MAX_VALUE - (100000); + + public GenericEvent() { + sequence = sq.incrementAndGet(); + if (sequence >= MAX) { + synchronized (sq) { + if (sequence >= MAX) { + sq.set(0); + } + } + } + + } + + public JsonObject toJSONObject() { + JsonObject dataset = new JsonObject(); + dataset.addProperty("sequence", sequence); + dataset.addProperty("volumeid", Long.toString(Main.DSEID)); + dataset.addProperty("timestamp", + Long.toString(System.currentTimeMillis())); + return dataset; + } + +} diff --git a/src/org/opendedup/sdfs/io/events/MFileDeleted.java b/src/org/opendedup/sdfs/io/events/MFileDeleted.java index 640ff6833..eeb64711f 100644 --- a/src/org/opendedup/sdfs/io/events/MFileDeleted.java +++ b/src/org/opendedup/sdfs/io/events/MFileDeleted.java @@ -1,44 +1,62 @@ -package org.opendedup.sdfs.io.events; - -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.io.MetaDataDedupFile; - -import com.google.gson.FieldNamingPolicy; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonObject; - -public class MFileDeleted extends GenericEvent { - - public MetaDataDedupFile mf; - public boolean dir; - private static final int pl = Main.volume.getPath().length(); - - public MFileDeleted(MetaDataDedupFile f) { - super(); - this.mf = f; - } - - public MFileDeleted(MetaDataDedupFile f, boolean dir) { - super(); - this.mf = f; - this.dir = dir; - } - - public String toJSON() { - JsonObject dataset = this.toJSONObject(); - dataset.addProperty("actionType", "mfileDelete"); - dataset.addProperty("object", mf.getPath().substring(pl)); - if (mf.isSymlink()) - dataset.addProperty("fileType", "symlink"); - else if (this.dir) - dataset.addProperty("fileType", "dir"); - else - dataset.addProperty("fileType", "file"); - Gson gson = new GsonBuilder().setPrettyPrinting().serializeNulls() - .setFieldNamingPolicy(FieldNamingPolicy.UPPER_CAMEL_CASE) - .create(); - return gson.toJson(dataset); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io.events; + +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.io.MetaDataDedupFile; + +import com.google.gson.FieldNamingPolicy; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonObject; + +public class MFileDeleted extends GenericEvent { + + public MetaDataDedupFile mf; + public boolean dir; + private static final int pl = Main.volume.getPath().length(); + + public MFileDeleted(MetaDataDedupFile f) { + super(); + this.mf = f; + } + + public MFileDeleted(MetaDataDedupFile f, boolean dir) { + super(); + this.mf = f; + this.dir = dir; + } + + public String toJSON() { + JsonObject dataset = this.toJSONObject(); + dataset.addProperty("actionType", "mfileDelete"); + dataset.addProperty("object", mf.getPath().substring(pl)); + if (mf.isSymlink()) + dataset.addProperty("fileType", "symlink"); + else if (this.dir) + dataset.addProperty("fileType", "dir"); + else + dataset.addProperty("fileType", "file"); + Gson gson = new GsonBuilder().setPrettyPrinting().serializeNulls() + .setFieldNamingPolicy(FieldNamingPolicy.UPPER_CAMEL_CASE) + .create(); + return gson.toJson(dataset); + } + +} diff --git a/src/org/opendedup/sdfs/io/events/MFileRenamed.java b/src/org/opendedup/sdfs/io/events/MFileRenamed.java index 871840e2d..093f1a160 100644 --- a/src/org/opendedup/sdfs/io/events/MFileRenamed.java +++ b/src/org/opendedup/sdfs/io/events/MFileRenamed.java @@ -1,42 +1,60 @@ -package org.opendedup.sdfs.io.events; - -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.io.MetaDataDedupFile; - -import com.google.gson.FieldNamingPolicy; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonObject; - -public class MFileRenamed extends GenericEvent { - - public MetaDataDedupFile mf; - public String from; - public String to; - private static final int pl = Main.volume.getPath().length(); - - public MFileRenamed(MetaDataDedupFile f, String from, String to) { - super(); - this.mf = f; - this.from = from; - this.to = to; - } - - public String toJSON() { - JsonObject dataset = this.toJSONObject(); - dataset.addProperty("actionType", "mfileRename"); - dataset.addProperty("object", mf.getPath().substring(pl)); - dataset.addProperty("from", this.from); - dataset.addProperty("to", this.to); - if (mf.isSymlink()) - dataset.addProperty("fileType", "symlink"); - else if (mf.isDirectory()) - dataset.addProperty("fileType", "dir"); - else - dataset.addProperty("fileType", "file"); - Gson gson = new GsonBuilder().setPrettyPrinting().serializeNulls() - .setFieldNamingPolicy(FieldNamingPolicy.UPPER_CAMEL_CASE) - .create(); - return gson.toJson(dataset); - } -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io.events; + +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.io.MetaDataDedupFile; + +import com.google.gson.FieldNamingPolicy; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonObject; + +public class MFileRenamed extends GenericEvent { + + public MetaDataDedupFile mf; + public String from; + public String to; + private static final int pl = Main.volume.getPath().length(); + + public MFileRenamed(MetaDataDedupFile f, String from, String to) { + super(); + this.mf = f; + this.from = from; + this.to = to; + } + + public String toJSON() { + JsonObject dataset = this.toJSONObject(); + dataset.addProperty("actionType", "mfileRename"); + dataset.addProperty("object", mf.getPath().substring(pl)); + dataset.addProperty("from", this.from); + dataset.addProperty("to", this.to); + if (mf.isSymlink()) + dataset.addProperty("fileType", "symlink"); + else if (mf.isDirectory()) + dataset.addProperty("fileType", "dir"); + else + dataset.addProperty("fileType", "file"); + Gson gson = new GsonBuilder().setPrettyPrinting().serializeNulls() + .setFieldNamingPolicy(FieldNamingPolicy.UPPER_CAMEL_CASE) + .create(); + return gson.toJson(dataset); + } +} diff --git a/src/org/opendedup/sdfs/io/events/MFileSync.java b/src/org/opendedup/sdfs/io/events/MFileSync.java index 91254c169..7d3fc20ef 100644 --- a/src/org/opendedup/sdfs/io/events/MFileSync.java +++ b/src/org/opendedup/sdfs/io/events/MFileSync.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io.events; import org.opendedup.sdfs.io.MetaDataDedupFile; diff --git a/src/org/opendedup/sdfs/io/events/MFileWritten.java b/src/org/opendedup/sdfs/io/events/MFileWritten.java index 3c91a8751..8e2064bcd 100644 --- a/src/org/opendedup/sdfs/io/events/MFileWritten.java +++ b/src/org/opendedup/sdfs/io/events/MFileWritten.java @@ -1,38 +1,56 @@ -package org.opendedup.sdfs.io.events; - -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.io.MetaDataDedupFile; - -import com.google.gson.FieldNamingPolicy; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonObject; - -public class MFileWritten extends GenericEvent { - private static final int pl = Main.volume.getPath().length(); - public MetaDataDedupFile mf; - - public MFileWritten(MetaDataDedupFile f) { - super(); - this.mf = f; - } - - public String toJSON() { - JsonObject dataset = this.toJSONObject(); - dataset.addProperty("actionType", "mfileWritten"); - dataset.addProperty("object", mf.getPath().substring(pl)); - - if (mf.isSymlink()) - dataset.addProperty("fileType", "symlink"); - else if (mf.isDirectory()) - dataset.addProperty("fileType", "dir"); - else { - dataset.addProperty("fileType", "file"); - dataset.addProperty("size", mf.length()); - } - Gson gson = new GsonBuilder().setPrettyPrinting().serializeNulls() - .setFieldNamingPolicy(FieldNamingPolicy.UPPER_CAMEL_CASE) - .create(); - return gson.toJson(dataset); - } -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.io.events; + +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.io.MetaDataDedupFile; + +import com.google.gson.FieldNamingPolicy; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonObject; + +public class MFileWritten extends GenericEvent { + private static final int pl = Main.volume.getPath().length(); + public MetaDataDedupFile mf; + + public MFileWritten(MetaDataDedupFile f) { + super(); + this.mf = f; + } + + public String toJSON() { + JsonObject dataset = this.toJSONObject(); + dataset.addProperty("actionType", "mfileWritten"); + dataset.addProperty("object", mf.getPath().substring(pl)); + + if (mf.isSymlink()) + dataset.addProperty("fileType", "symlink"); + else if (mf.isDirectory()) + dataset.addProperty("fileType", "dir"); + else { + dataset.addProperty("fileType", "file"); + dataset.addProperty("size", mf.length()); + } + Gson gson = new GsonBuilder().setPrettyPrinting().serializeNulls() + .setFieldNamingPolicy(FieldNamingPolicy.UPPER_CAMEL_CASE) + .create(); + return gson.toJson(dataset); + } +} diff --git a/src/org/opendedup/sdfs/io/events/SFileDeleted.java b/src/org/opendedup/sdfs/io/events/SFileDeleted.java index 038c50462..e2feed19d 100644 --- a/src/org/opendedup/sdfs/io/events/SFileDeleted.java +++ b/src/org/opendedup/sdfs/io/events/SFileDeleted.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io.events; import java.io.File; diff --git a/src/org/opendedup/sdfs/io/events/SFileSync.java b/src/org/opendedup/sdfs/io/events/SFileSync.java index 970ed6164..99316152d 100644 --- a/src/org/opendedup/sdfs/io/events/SFileSync.java +++ b/src/org/opendedup/sdfs/io/events/SFileSync.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io.events; import java.io.File; diff --git a/src/org/opendedup/sdfs/io/events/SFileWritten.java b/src/org/opendedup/sdfs/io/events/SFileWritten.java index 9d35034b9..80efd6b0c 100644 --- a/src/org/opendedup/sdfs/io/events/SFileWritten.java +++ b/src/org/opendedup/sdfs/io/events/SFileWritten.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io.events; import org.opendedup.sdfs.Main; diff --git a/src/org/opendedup/sdfs/io/events/VolumeWritten.java b/src/org/opendedup/sdfs/io/events/VolumeWritten.java index 467a20b90..72db10711 100644 --- a/src/org/opendedup/sdfs/io/events/VolumeWritten.java +++ b/src/org/opendedup/sdfs/io/events/VolumeWritten.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.io.events; import org.opendedup.sdfs.io.Volume; diff --git a/src/org/opendedup/sdfs/mgmt/cli/ProcessConnectedVolumes.java b/src/org/opendedup/sdfs/mgmt/cli/ProcessConnectedVolumes.java index 9ececd80d..b4adf69f2 100644 --- a/src/org/opendedup/sdfs/mgmt/cli/ProcessConnectedVolumes.java +++ b/src/org/opendedup/sdfs/mgmt/cli/ProcessConnectedVolumes.java @@ -2,7 +2,7 @@ import java.util.Formatter; -import org.opendedup.util.XMLUtils; + import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.NodeList; @@ -19,7 +19,7 @@ public static void runCmd() { formatter.format("file=%s&cmd=connectedvolumes", "null"); Document doc = MgmtServerConnection.getResponse(sb.toString()); Element root = doc.getDocumentElement(); - System.out.println(XMLUtils.toXMLString(doc)); + //System.out.println(XMLUtils.toXMLString(doc)); formatter.close(); if (root.getAttribute("status").equals("failed")) System.out.println(root.getAttribute("msg")); diff --git a/src/org/opendedup/sdfs/mgmt/websocket/DDBUpdate.java b/src/org/opendedup/sdfs/mgmt/websocket/DDBUpdate.java index c463d52ef..6c7d41dc1 100644 --- a/src/org/opendedup/sdfs/mgmt/websocket/DDBUpdate.java +++ b/src/org/opendedup/sdfs/mgmt/websocket/DDBUpdate.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.mgmt.websocket; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpdate.java b/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpdate.java index a38fbb4c0..829ceca33 100644 --- a/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpdate.java +++ b/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpdate.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.mgmt.websocket; import org.simpleframework.http.socket.Session; diff --git a/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpload.java b/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpload.java index eef661296..102af6835 100644 --- a/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpload.java +++ b/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpload.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.mgmt.websocket; import org.simpleframework.http.socket.Session; diff --git a/src/org/opendedup/sdfs/mgmt/websocket/PingService.java b/src/org/opendedup/sdfs/mgmt/websocket/PingService.java index eae1fab9c..d02500cc3 100644 --- a/src/org/opendedup/sdfs/mgmt/websocket/PingService.java +++ b/src/org/opendedup/sdfs/mgmt/websocket/PingService.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.mgmt.websocket; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/monitor/IOMeter.java b/src/org/opendedup/sdfs/monitor/IOMeter.java index 3d1e6e506..4306ffd5e 100755 --- a/src/org/opendedup/sdfs/monitor/IOMeter.java +++ b/src/org/opendedup/sdfs/monitor/IOMeter.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.monitor; import java.io.BufferedOutputStream; diff --git a/src/org/opendedup/sdfs/monitor/IOMonitor.java b/src/org/opendedup/sdfs/monitor/IOMonitor.java index 3b284ef0b..732cb20a6 100755 --- a/src/org/opendedup/sdfs/monitor/IOMonitor.java +++ b/src/org/opendedup/sdfs/monitor/IOMonitor.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.monitor; import java.nio.ByteBuffer; diff --git a/src/org/opendedup/sdfs/monitor/IOMonitorListener.java b/src/org/opendedup/sdfs/monitor/IOMonitorListener.java index 27c7792c8..8e8f0a6c5 100644 --- a/src/org/opendedup/sdfs/monitor/IOMonitorListener.java +++ b/src/org/opendedup/sdfs/monitor/IOMonitorListener.java @@ -1,45 +1,63 @@ -package org.opendedup.sdfs.monitor; - -public interface IOMonitorListener { - void actualBytesWrittenChanged(long total, int change, IOMonitor mon); - - void bytesReadChanged(long total, int change, IOMonitor mon); - - void duplicateBlockChanged(long total, IOMonitor mon); - - void rioChanged(long total, IOMonitor mon); - - void virtualBytesWrittenChanged(long total, int change, IOMonitor mon); - - void wioChanged(long total, IOMonitor mon); - - void clearAllCountersExecuted(long total, IOMonitor mon); - - void clearFileCountersExecuted(long total, IOMonitor mon); - - void removeDuplicateBlockChanged(long total, IOMonitor mon); - - void actualBytesWrittenChanged(long total, long change, IOMonitor mon); - - void bytesReadChanged(long total, long change, IOMonitor mon); - - void duplicateBlockChanged(long total, long change, IOMonitor mon); - - void virtualBytesWrittenChanged(long total, long change, IOMonitor mon); - - void riopsChanged(int iops, int changed, IOMonitor mon); - - void wiopsChanged(int iops, int changed, IOMonitor mon); - - void iopsChanged(int iops, int changed, IOMonitor mon); - - void rmbpsChanged(long mbps, int changed, IOMonitor mon); - - void wmbpsChanged(long mbps, int changed, IOMonitor mon); - - void mbpsChanged(long mbps, int changed, IOMonitor mon); - - void qosChanged(int old, int newQos, IOMonitor mon); - - void ioProfileChanged(String old, String newProf, IOMonitor mon); -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.monitor; + +public interface IOMonitorListener { + void actualBytesWrittenChanged(long total, int change, IOMonitor mon); + + void bytesReadChanged(long total, int change, IOMonitor mon); + + void duplicateBlockChanged(long total, IOMonitor mon); + + void rioChanged(long total, IOMonitor mon); + + void virtualBytesWrittenChanged(long total, int change, IOMonitor mon); + + void wioChanged(long total, IOMonitor mon); + + void clearAllCountersExecuted(long total, IOMonitor mon); + + void clearFileCountersExecuted(long total, IOMonitor mon); + + void removeDuplicateBlockChanged(long total, IOMonitor mon); + + void actualBytesWrittenChanged(long total, long change, IOMonitor mon); + + void bytesReadChanged(long total, long change, IOMonitor mon); + + void duplicateBlockChanged(long total, long change, IOMonitor mon); + + void virtualBytesWrittenChanged(long total, long change, IOMonitor mon); + + void riopsChanged(int iops, int changed, IOMonitor mon); + + void wiopsChanged(int iops, int changed, IOMonitor mon); + + void iopsChanged(int iops, int changed, IOMonitor mon); + + void rmbpsChanged(long mbps, int changed, IOMonitor mon); + + void wmbpsChanged(long mbps, int changed, IOMonitor mon); + + void mbpsChanged(long mbps, int changed, IOMonitor mon); + + void qosChanged(int old, int newQos, IOMonitor mon); + + void ioProfileChanged(String old, String newProf, IOMonitor mon); +} diff --git a/src/org/opendedup/sdfs/monitor/VolumeIOMeter.java b/src/org/opendedup/sdfs/monitor/VolumeIOMeter.java index b2cff5b5e..be16b1712 100644 --- a/src/org/opendedup/sdfs/monitor/VolumeIOMeter.java +++ b/src/org/opendedup/sdfs/monitor/VolumeIOMeter.java @@ -1,103 +1,121 @@ -package org.opendedup.sdfs.monitor; - -import java.io.IOException; -import java.lang.management.ManagementFactory; - -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.RollingFileAppender; -import org.opendedup.logging.JSONVolPerfLayout; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.io.Volume; -import org.opendedup.sdfs.servers.HCServiceProxy; -import org.slf4j.MDC; - -import com.sun.management.UnixOperatingSystemMXBean; - -public class VolumeIOMeter implements Runnable { - - private Volume vol; - private long bytesRead = 0, bytesWritten = 0, virtualBytesWritten = 0, - RIOPS = 0, WIOPS = 0, duplicateBytes = 0, dseSz = 0, dseCompSz = 0; - private double pbytesRead = 0, pbytesWritten = 0, pvirtualBytesWritten = 0, - pRIOPS = 0, pWIOPS = 0, pduplicateBytes = 0; - private Logger log = Logger.getLogger("volperflog"); - private boolean closed = false; - Thread th = null; - UnixOperatingSystemMXBean perf = (UnixOperatingSystemMXBean) ManagementFactory - .getOperatingSystemMXBean(); - - public VolumeIOMeter(Volume vol) { - RollingFileAppender app = null; - try { - app = new RollingFileAppender(new JSONVolPerfLayout(), - vol.getPerfMonFile(), true); - app.setMaxBackupIndex(2); - app.setMaxFileSize("10MB"); - } catch (IOException e) { - log.debug("unable to change appender", e); - } - this.vol = vol; - log.addAppender(app); - log.setLevel(Level.INFO); - th = new Thread(this); - th.start(); - } - - public void run() { - while (!closed) { - try { - Thread.sleep(15 * 1000); - - this.calPerf(); - } catch (Exception e) { - SDFSLogger.getLog().warn( - "Exception in " + this.getClass().getName(), e); - this.closed = true; - } - } - } - - private void calPerf() { - this.bytesRead = (long) (vol.getReadBytes() - this.pbytesRead); - this.pbytesRead = vol.getReadBytes(); - MDC.put("bytesRead", Long.toString(bytesRead)); - this.bytesWritten = (long) (vol.getActualWriteBytes() - this.pbytesWritten); - this.pbytesWritten = vol.getActualWriteBytes(); - MDC.put("bytesWritten", Long.toString(this.bytesWritten)); - this.duplicateBytes = (long) (vol.getDuplicateBytes() - this.pduplicateBytes); - this.pduplicateBytes = vol.getDuplicateBytes(); - MDC.put("duplicateBytes", Long.toString(this.duplicateBytes)); - this.virtualBytesWritten = (long) (vol.getVirtualBytesWritten() - this.pvirtualBytesWritten); - this.pvirtualBytesWritten = vol.getVirtualBytesWritten(); - MDC.put("virtualBytesWritten", Long.toString(this.virtualBytesWritten)); - this.RIOPS = (long) (vol.getReadOperations() - this.pRIOPS); - this.pRIOPS = vol.getReadOperations(); - MDC.put("RIOPS", Long.toString(this.RIOPS)); - this.WIOPS = (long) (vol.getWriteOperations() - this.pWIOPS); - this.pWIOPS = vol.getWriteOperations(); - this.dseSz = HCServiceProxy.getDSESize(); - this.dseCompSz = HCServiceProxy.getDSECompressedSize(); - MDC.put("dseSz", Long.toString(this.dseSz)); - MDC.put("dseCompSz", Long.toString(this.dseCompSz)); - MDC.put("WIOPS", Long.toString(this.WIOPS)); - MDC.put("sdfsCpuLoad", Double.toString(perf.getProcessCpuLoad())); - MDC.put("sdfsCpuTime", Double.toString(perf.getProcessCpuTime())); - MDC.put("systemCpuLoad", Double.toString(perf.getSystemCpuLoad())); - MDC.put("systemCpuAverage", - Double.toString(perf.getSystemLoadAverage())); - MDC.put("freeMemory", Long.toString(perf.getFreePhysicalMemorySize())); - MDC.put("totalMemory", Long.toString(perf.getTotalPhysicalMemorySize())); - MDC.put("freeSwap", Long.toString(perf.getFreeSwapSpaceSize())); - MDC.put("totalSwap", Long.toString(perf.getTotalSwapSpaceSize())); - log.info(vol.getName()); - MDC.clear(); - } - - public void close() { - this.closed = true; - th.interrupt(); - - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.monitor; + +import java.io.IOException; +import java.lang.management.ManagementFactory; + +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.RollingFileAppender; +import org.opendedup.logging.JSONVolPerfLayout; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.io.Volume; +import org.opendedup.sdfs.servers.HCServiceProxy; +import org.slf4j.MDC; + +import com.sun.management.UnixOperatingSystemMXBean; + +public class VolumeIOMeter implements Runnable { + + private Volume vol; + private long bytesRead = 0, bytesWritten = 0, virtualBytesWritten = 0, + RIOPS = 0, WIOPS = 0, duplicateBytes = 0, dseSz = 0, dseCompSz = 0; + private double pbytesRead = 0, pbytesWritten = 0, pvirtualBytesWritten = 0, + pRIOPS = 0, pWIOPS = 0, pduplicateBytes = 0; + private Logger log = Logger.getLogger("volperflog"); + private boolean closed = false; + Thread th = null; + UnixOperatingSystemMXBean perf = (UnixOperatingSystemMXBean) ManagementFactory + .getOperatingSystemMXBean(); + + public VolumeIOMeter(Volume vol) { + RollingFileAppender app = null; + try { + app = new RollingFileAppender(new JSONVolPerfLayout(), + vol.getPerfMonFile(), true); + app.setMaxBackupIndex(2); + app.setMaxFileSize("10MB"); + } catch (IOException e) { + log.debug("unable to change appender", e); + } + this.vol = vol; + log.addAppender(app); + log.setLevel(Level.INFO); + th = new Thread(this); + th.start(); + } + + public void run() { + while (!closed) { + try { + Thread.sleep(15 * 1000); + + this.calPerf(); + } catch (Exception e) { + SDFSLogger.getLog().warn( + "Exception in " + this.getClass().getName(), e); + this.closed = true; + } + } + } + + private void calPerf() { + this.bytesRead = (long) (vol.getReadBytes() - this.pbytesRead); + this.pbytesRead = vol.getReadBytes(); + MDC.put("bytesRead", Long.toString(bytesRead)); + this.bytesWritten = (long) (vol.getActualWriteBytes() - this.pbytesWritten); + this.pbytesWritten = vol.getActualWriteBytes(); + MDC.put("bytesWritten", Long.toString(this.bytesWritten)); + this.duplicateBytes = (long) (vol.getDuplicateBytes() - this.pduplicateBytes); + this.pduplicateBytes = vol.getDuplicateBytes(); + MDC.put("duplicateBytes", Long.toString(this.duplicateBytes)); + this.virtualBytesWritten = (long) (vol.getVirtualBytesWritten() - this.pvirtualBytesWritten); + this.pvirtualBytesWritten = vol.getVirtualBytesWritten(); + MDC.put("virtualBytesWritten", Long.toString(this.virtualBytesWritten)); + this.RIOPS = (long) (vol.getReadOperations() - this.pRIOPS); + this.pRIOPS = vol.getReadOperations(); + MDC.put("RIOPS", Long.toString(this.RIOPS)); + this.WIOPS = (long) (vol.getWriteOperations() - this.pWIOPS); + this.pWIOPS = vol.getWriteOperations(); + this.dseSz = HCServiceProxy.getDSESize(); + this.dseCompSz = HCServiceProxy.getDSECompressedSize(); + MDC.put("dseSz", Long.toString(this.dseSz)); + MDC.put("dseCompSz", Long.toString(this.dseCompSz)); + MDC.put("WIOPS", Long.toString(this.WIOPS)); + MDC.put("sdfsCpuLoad", Double.toString(perf.getProcessCpuLoad())); + MDC.put("sdfsCpuTime", Double.toString(perf.getProcessCpuTime())); + MDC.put("systemCpuLoad", Double.toString(perf.getSystemCpuLoad())); + MDC.put("systemCpuAverage", + Double.toString(perf.getSystemLoadAverage())); + MDC.put("freeMemory", Long.toString(perf.getFreePhysicalMemorySize())); + MDC.put("totalMemory", Long.toString(perf.getTotalPhysicalMemorySize())); + MDC.put("freeSwap", Long.toString(perf.getFreeSwapSpaceSize())); + MDC.put("totalSwap", Long.toString(perf.getTotalSwapSpaceSize())); + log.info(vol.getName()); + MDC.clear(); + } + + public void close() { + this.closed = true; + th.interrupt(); + + } + +} diff --git a/src/org/opendedup/sdfs/network/AsyncCmdListener.java b/src/org/opendedup/sdfs/network/AsyncCmdListener.java index 6afb0c813..784d283a9 100644 --- a/src/org/opendedup/sdfs/network/AsyncCmdListener.java +++ b/src/org/opendedup/sdfs/network/AsyncCmdListener.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.network; public interface AsyncCmdListener { diff --git a/src/org/opendedup/sdfs/network/BulkFetchChunkCmd.java b/src/org/opendedup/sdfs/network/BulkFetchChunkCmd.java index 57cba3504..dff8127b5 100644 --- a/src/org/opendedup/sdfs/network/BulkFetchChunkCmd.java +++ b/src/org/opendedup/sdfs/network/BulkFetchChunkCmd.java @@ -1,86 +1,104 @@ -package org.opendedup.sdfs.network; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.util.ArrayList; - -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.filestore.HashChunk; -import org.opendedup.util.CompressionUtils; - -public class BulkFetchChunkCmd implements IOCmd { - ArrayList hashes; - ArrayList chunks; - boolean written = false; - - public BulkFetchChunkCmd(ArrayList hashes) { - this.hashes = hashes; - } - - @Override - @SuppressWarnings("unchecked") - public void executeCmd(DataInputStream is, DataOutputStream os) - throws IOException { - - ByteArrayOutputStream bos = null; - bos = new ByteArrayOutputStream(); - ObjectOutputStream obj_out = new ObjectOutputStream(bos); - obj_out.writeObject(hashes); - byte[] sh = CompressionUtils.compressSnappy(bos.toByteArray()); - // byte [] sh = bos.toByteArray(); - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug("Sent bulkfetch [" + sh.length + "]"); - os.write(NetworkCMDS.BULK_FETCH_CMD); - os.writeInt(sh.length); - os.write(sh); - os.flush(); - bos.close(); - obj_out.close(); - sh = null; - obj_out = null; - bos = null; - int size = is.readInt(); - if (size == -1) { - throw new IOException("One of the Requested hashes does not exist."); - } - byte[] us = new byte[size]; - is.readFully(us); - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug("Recieved bulkfetch [" + us.length + "]"); - us = CompressionUtils.decompressSnappy(us); - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug( - "Recieved bulkfetch uncompressed [" + us.length + "]"); - ByteArrayInputStream bin = new ByteArrayInputStream(us); - ObjectInputStream obj_in = new ObjectInputStream(bin); - try { - chunks = (ArrayList) obj_in.readObject(); - } catch (ClassNotFoundException e) { - throw new IOException(e); - } finally { - us = null; - bin.close(); - obj_in.close(); - } - } - - public ArrayList getChunks() { - return this.chunks; - } - - @Override - public byte getCmdID() { - return NetworkCMDS.FETCH_CMD; - } - - @Override - public ArrayList getResult() { - return this.chunks; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.ArrayList; + +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.filestore.HashChunk; +import org.opendedup.util.CompressionUtils; + +public class BulkFetchChunkCmd implements IOCmd { + ArrayList hashes; + ArrayList chunks; + boolean written = false; + + public BulkFetchChunkCmd(ArrayList hashes) { + this.hashes = hashes; + } + + @Override + @SuppressWarnings("unchecked") + public void executeCmd(DataInputStream is, DataOutputStream os) + throws IOException { + + ByteArrayOutputStream bos = null; + bos = new ByteArrayOutputStream(); + ObjectOutputStream obj_out = new ObjectOutputStream(bos); + obj_out.writeObject(hashes); + byte[] sh = CompressionUtils.compressSnappy(bos.toByteArray()); + // byte [] sh = bos.toByteArray(); + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug("Sent bulkfetch [" + sh.length + "]"); + os.write(NetworkCMDS.BULK_FETCH_CMD); + os.writeInt(sh.length); + os.write(sh); + os.flush(); + bos.close(); + obj_out.close(); + sh = null; + obj_out = null; + bos = null; + int size = is.readInt(); + if (size == -1) { + throw new IOException("One of the Requested hashes does not exist."); + } + byte[] us = new byte[size]; + is.readFully(us); + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug("Recieved bulkfetch [" + us.length + "]"); + us = CompressionUtils.decompressSnappy(us); + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug( + "Recieved bulkfetch uncompressed [" + us.length + "]"); + ByteArrayInputStream bin = new ByteArrayInputStream(us); + ObjectInputStream obj_in = new ObjectInputStream(bin); + try { + chunks = (ArrayList) obj_in.readObject(); + } catch (ClassNotFoundException e) { + throw new IOException(e); + } finally { + us = null; + bin.close(); + obj_in.close(); + } + } + + public ArrayList getChunks() { + return this.chunks; + } + + @Override + public byte getCmdID() { + return NetworkCMDS.FETCH_CMD; + } + + @Override + public ArrayList getResult() { + return this.chunks; + } + +} diff --git a/src/org/opendedup/sdfs/network/BulkWriteChunkCmd.java b/src/org/opendedup/sdfs/network/BulkWriteChunkCmd.java index 8a717feaa..e20342964 100644 --- a/src/org/opendedup/sdfs/network/BulkWriteChunkCmd.java +++ b/src/org/opendedup/sdfs/network/BulkWriteChunkCmd.java @@ -1,81 +1,99 @@ -package org.opendedup.sdfs.network; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.util.ArrayList; -import java.util.List; - -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.filestore.HashChunk; - -public class BulkWriteChunkCmd implements IOCmd { - ArrayList chunks; - List response; - boolean written = false; - - public BulkWriteChunkCmd(ArrayList chunks) { - this.chunks = chunks; - } - - @Override - @SuppressWarnings("unchecked") - public void executeCmd(DataInputStream is, DataOutputStream os) - throws IOException { - - ByteArrayOutputStream bos = null; - ObjectOutputStream obj_out = null; - byte[] sh = null; - try { - bos = new ByteArrayOutputStream(); - obj_out = new ObjectOutputStream(bos); - obj_out.writeObject(chunks); - os.write(NetworkCMDS.BULK_FETCH_CMD); - sh = bos.toByteArray(); - os.writeInt(sh.length); - os.write(sh); - os.flush(); - } finally { - bos.close(); - obj_out.close(); - sh = null; - obj_out = null; - bos = null; - } - int size = is.readInt(); - if (size == -1) { - throw new IOException("an error happened while writing"); - } - byte[] us = new byte[size]; - is.readFully(us); - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug("Received bulkfetch [" + us.length + "]"); - // us = CompressionUtils.decompressSnappy(us); - ByteArrayInputStream bin = new ByteArrayInputStream(us); - ObjectInputStream obj_in = new ObjectInputStream(bin); - try { - response = (List) obj_in.readObject(); - } catch (ClassNotFoundException e) { - throw new IOException(e); - } finally { - us = null; - bin.close(); - obj_in.close(); - } - } - - @Override - public byte getCmdID() { - return NetworkCMDS.FETCH_CMD; - } - - @Override - public List getResult() { - return this.response; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.ArrayList; +import java.util.List; + +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.filestore.HashChunk; + +public class BulkWriteChunkCmd implements IOCmd { + ArrayList chunks; + List response; + boolean written = false; + + public BulkWriteChunkCmd(ArrayList chunks) { + this.chunks = chunks; + } + + @Override + @SuppressWarnings("unchecked") + public void executeCmd(DataInputStream is, DataOutputStream os) + throws IOException { + + ByteArrayOutputStream bos = null; + ObjectOutputStream obj_out = null; + byte[] sh = null; + try { + bos = new ByteArrayOutputStream(); + obj_out = new ObjectOutputStream(bos); + obj_out.writeObject(chunks); + os.write(NetworkCMDS.BULK_FETCH_CMD); + sh = bos.toByteArray(); + os.writeInt(sh.length); + os.write(sh); + os.flush(); + } finally { + bos.close(); + obj_out.close(); + sh = null; + obj_out = null; + bos = null; + } + int size = is.readInt(); + if (size == -1) { + throw new IOException("an error happened while writing"); + } + byte[] us = new byte[size]; + is.readFully(us); + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug("Received bulkfetch [" + us.length + "]"); + // us = CompressionUtils.decompressSnappy(us); + ByteArrayInputStream bin = new ByteArrayInputStream(us); + ObjectInputStream obj_in = new ObjectInputStream(bin); + try { + response = (List) obj_in.readObject(); + } catch (ClassNotFoundException e) { + throw new IOException(e); + } finally { + us = null; + bin.close(); + obj_in.close(); + } + } + + @Override + public byte getCmdID() { + return NetworkCMDS.FETCH_CMD; + } + + @Override + public List getResult() { + return this.response; + } + +} diff --git a/src/org/opendedup/sdfs/network/ChunkNotFoundException.java b/src/org/opendedup/sdfs/network/ChunkNotFoundException.java index 482947a2b..95aa2ab61 100644 --- a/src/org/opendedup/sdfs/network/ChunkNotFoundException.java +++ b/src/org/opendedup/sdfs/network/ChunkNotFoundException.java @@ -1,16 +1,34 @@ -package org.opendedup.sdfs.network; - -import java.io.IOException; - -public class ChunkNotFoundException extends IOException { - - /** - * - */ - private static final long serialVersionUID = -5398045346438784590L; - - public ChunkNotFoundException(String hash) { - super("could not find chunk " + hash); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.IOException; + +public class ChunkNotFoundException extends IOException { + + /** + * + */ + private static final long serialVersionUID = -5398045346438784590L; + + public ChunkNotFoundException(String hash) { + super("could not find chunk " + hash); + } + +} diff --git a/src/org/opendedup/sdfs/network/ClientThread.java b/src/org/opendedup/sdfs/network/ClientThread.java index b5d1b129a..444a5eda8 100755 --- a/src/org/opendedup/sdfs/network/ClientThread.java +++ b/src/org/opendedup/sdfs/network/ClientThread.java @@ -1,365 +1,383 @@ -package org.opendedup.sdfs.network; - -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.BufferedReader; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.ObjectInput; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.net.Socket; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.locks.ReentrantLock; - -import org.opendedup.collections.InsertRecord; -import org.opendedup.collections.QuickList; -import org.opendedup.hashing.HashFunctions; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.filestore.HashChunk; -import org.opendedup.sdfs.servers.HCServiceProxy; -import org.opendedup.util.CompressionUtils; -import org.opendedup.util.StringUtils; - -/** - * @author Sam Silverberg This is the network class that is used within the - * Chunk store to service all client requests and responses. It is - * threaded and is spawned by @see - * com.annesam.sdfs.network.NetworkHCServer when a new TCP connect in - * accepted. - */ - -public class ClientThread extends Thread { - - // DataInputStream is = null; - - Socket clientSocket = null; - private ReentrantLock writelock = new ReentrantLock(); - - private static ArrayList clients = new ArrayList(); - private static int MAX_BATCH_SZ = (Main.MAX_REPL_BATCH_SZ * 1024 * 1024) - / Main.CHUNK_LENGTH; - - public ClientThread(Socket clientSocket) { - this.clientSocket = clientSocket; - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug("Client Threads is " + clients.size()); - addClient(this); - } - - public static void addClient(ClientThread client) { - clients.add(client); - } - - public static void removeClient(ClientThread client) { - clients.remove(client); - } - - @SuppressWarnings("unchecked") - @Override - public void run() { - DataOutputStream os = null; - DataInputStream is = null; - BufferedReader reader = null; - try { - // is = new DataInputStream(clientSocket.getInputStream()); - reader = new BufferedReader(new InputStreamReader( - clientSocket.getInputStream()), 32768 * 2); - is = new DataInputStream(new BufferedInputStream( - clientSocket.getInputStream(), 32768)); - os = new DataOutputStream(new BufferedOutputStream( - clientSocket.getOutputStream(), 32768)); - String versionMessage = "SDFS version " + Main.PROTOCOL_VERSION - + "\r\n"; - os.write(versionMessage.getBytes()); - os.flush(); - String cPasswd = reader.readLine(); - String phash = HashFunctions.getSHAHash(cPasswd.trim().getBytes(), - Main.sdfsPasswordSalt.getBytes()); - if (phash.equals(Main.sdfsPassword)) { - os.writeInt(0); - os.flush(); - throw new IOException("Authentication failed"); - } else { - os.writeInt(1); - os.flush(); - } - while (true) { - byte cmd = is.readByte(); - if (cmd == NetworkCMDS.QUIT_CMD) { - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug( - "Quiting Client Network Thread"); - break; - } - if (cmd == NetworkCMDS.HASH_EXISTS_CMD) { - byte[] hash = new byte[is.readShort()]; - is.readFully(hash); - boolean exists = HCServiceProxy.hashExists(hash); - - try { - writelock.lock(); - os.writeBoolean(exists); - os.flush(); - writelock.unlock(); - } catch (IOException e) { - if (writelock.isLocked()) - writelock.unlock(); - throw new IOException(e); - } finally { - - } - - } - if (cmd == NetworkCMDS.WRITE_HASH_CMD) { - byte[] hash = new byte[is.readShort()]; - is.readFully(hash); - int len = is.readInt(); - byte[] chunkBytes = new byte[len]; - is.readFully(chunkBytes); - InsertRecord rec = HCServiceProxy.writeChunk(hash, - chunkBytes); - try { - writelock.lock(); - os.writeBoolean(rec.getInserted()); - os.flush(); - writelock.unlock(); - } catch (IOException e) { - if (writelock.isLocked()) - writelock.unlock(); - throw new IOException(e); - } finally { - - } - } - if (cmd == NetworkCMDS.BATCH_WRITE_HASH_CMD) { - // long tm = System.currentTimeMillis(); - byte[] arb = new byte[is.readInt()]; - is.readFully(arb); - ByteArrayInputStream bis = new ByteArrayInputStream(arb); - ObjectInput in = null; - List chunks = null; - try { - in = new ObjectInputStream(bis); - chunks = (List) in.readObject(); - } finally { - bis.close(); - in.close(); - } - QuickList rsults = new QuickList( - chunks.size()); - for (int i = 0; i < chunks.size(); i++) { - try { - HashChunk ck = chunks.get(i); - if (ck != null) { - rsults.add(i, Boolean.valueOf(HCServiceProxy - .writeChunk(ck.getName(), ck.getData()) - .getInserted())); - } else - rsults.add(i, null); - } catch (Exception e) { - SDFSLogger.getLog().warn( - "unable to find if hash exists", e); - rsults.add(i, Boolean.valueOf(false)); - } - } - ByteArrayOutputStream bos = null; - ObjectOutputStream obj_out = null; - byte[] sh = null; - try { - bos = new ByteArrayOutputStream(); - obj_out = new ObjectOutputStream(bos); - obj_out.writeObject(rsults); - sh = bos.toByteArray(); - os.writeInt(sh.length); - os.write(sh); - os.flush(); - } finally { - obj_out.close(); - bos.close(); - } - - } - if (cmd == NetworkCMDS.FETCH_CMD - || cmd == NetworkCMDS.FETCH_COMPRESSED_CMD) { - byte[] hash = new byte[is.readShort()]; - is.readFully(hash); - HashChunk dChunk = null; - try { - dChunk = HCServiceProxy.fetchHashChunk(hash); - if (cmd == NetworkCMDS.FETCH_COMPRESSED_CMD - && !dChunk.isCompressed()) { - - throw new Exception("not implemented"); - } else if (cmd == NetworkCMDS.FETCH_CMD - && dChunk.isCompressed()) { - - throw new IOException("Not implemented"); - } else { - try { - writelock.lock(); - os.writeInt(dChunk.getData().length); - os.write(dChunk.getData()); - os.flush(); - } catch (IOException e) { - - throw new IOException(e); - } finally { - writelock.unlock(); - } - } - - } catch (NullPointerException e) { - SDFSLogger.getLog().warn( - "chunk " + StringUtils.getHexString(hash) - + " does not exist"); - try { - writelock.lock(); - os.writeInt(-1); - os.flush(); - writelock.unlock(); - } catch (IOException e1) { - if (writelock.isLocked()) - writelock.unlock(); - throw new IOException(e1.toString()); - } finally { - - } - } - } - if (cmd == NetworkCMDS.BULK_FETCH_CMD) { - int len = is.readInt(); - byte[] sh = new byte[len]; - is.readFully(sh); - sh = CompressionUtils.decompressSnappy(sh); - ObjectInputStream obj_in = new ObjectInputStream( - new ByteArrayInputStream(sh)); - ArrayList hashes = (ArrayList) obj_in - .readObject(); - String hash = null; - if (hashes.size() > MAX_BATCH_SZ) { - SDFSLogger.getLog().warn( - "requested hash list to long " + hashes.size() - + " > " + MAX_BATCH_SZ); - try { - writelock.lock(); - os.writeInt(-1); - os.flush(); - writelock.unlock(); - } catch (IOException e1) { - if (writelock.isLocked()) - writelock.unlock(); - throw new IOException(e1.toString()); - } finally { - - } - } - ArrayList chunks = new ArrayList( - hashes.size()); - try { - for (int i = 0; i < hashes.size(); i++) { - hash = hashes.get(i); - HashChunk dChunk = HCServiceProxy - .fetchHashChunk(StringUtils - .getHexBytes(hash)); - - chunks.add(i, dChunk); - } - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - ObjectOutputStream obj_out = new ObjectOutputStream(bos); - obj_out.writeObject(chunks); - byte[] b = CompressionUtils.compressSnappy(bos - .toByteArray()); - // byte [] b =bos.toByteArray(); - writelock.lock(); - try { - os.writeInt(b.length); - os.write(b); - os.flush(); - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug( - "wrote " + b.length + " entries " - + chunks.size()); - } finally { - writelock.unlock(); - bos.close(); - obj_out.close(); - obj_in.close(); - chunks.clear(); - chunks = null; - } - - } catch (NullPointerException e) { - SDFSLogger.getLog().warn( - "chunk " + hash + " does not exist"); - try { - writelock.lock(); - os.writeInt(-1); - os.flush(); - writelock.unlock(); - } catch (IOException e1) { - if (writelock.isLocked()) - writelock.unlock(); - throw new IOException(e1.toString()); - } finally { - - } - } - } - if (cmd == NetworkCMDS.PING_CMD) { - try { - writelock.lock(); - os.writeShort(NetworkCMDS.PING_CMD); - os.flush(); - writelock.unlock(); - } catch (IOException e) { - if (writelock.isLocked()) - writelock.unlock(); - throw new IOException(e); - } finally { - - } - } - } - } catch (Exception e) { - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug("connection failed ", e); - - } finally { - try { - reader.close(); - } catch (Exception e1) { - } - try { - os.close(); - } catch (Exception e1) { - } - try { - is.close(); - } catch (Exception e1) { - } - try { - clientSocket.close(); - } catch (Exception e1) { - } - - try { - clientSocket.close(); - } catch (IOException e1) { - } - ClientThread.removeClient(this); - } - } - - public static final int byteArrayToInt(byte[] b) { - return (b[0] << 24) + ((b[1] & 0xFF) << 16) + ((b[2] & 0xFF) << 8) - + (b[3] & 0xFF); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.ObjectInput; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.net.Socket; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.locks.ReentrantLock; + +import org.opendedup.collections.InsertRecord; +import org.opendedup.collections.QuickList; +import org.opendedup.hashing.HashFunctions; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.filestore.HashChunk; +import org.opendedup.sdfs.servers.HCServiceProxy; +import org.opendedup.util.CompressionUtils; +import org.opendedup.util.StringUtils; + +/** + * @author Sam Silverberg This is the network class that is used within the + * Chunk store to service all client requests and responses. It is + * threaded and is spawned by @see + * com.annesam.sdfs.network.NetworkHCServer when a new TCP connect in + * accepted. + */ + +public class ClientThread extends Thread { + + // DataInputStream is = null; + + Socket clientSocket = null; + private ReentrantLock writelock = new ReentrantLock(); + + private static ArrayList clients = new ArrayList(); + private static int MAX_BATCH_SZ = (Main.MAX_REPL_BATCH_SZ * 1024 * 1024) + / Main.CHUNK_LENGTH; + + public ClientThread(Socket clientSocket) { + this.clientSocket = clientSocket; + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug("Client Threads is " + clients.size()); + addClient(this); + } + + public static void addClient(ClientThread client) { + clients.add(client); + } + + public static void removeClient(ClientThread client) { + clients.remove(client); + } + + @SuppressWarnings("unchecked") + @Override + public void run() { + DataOutputStream os = null; + DataInputStream is = null; + BufferedReader reader = null; + try { + // is = new DataInputStream(clientSocket.getInputStream()); + reader = new BufferedReader(new InputStreamReader( + clientSocket.getInputStream()), 32768 * 2); + is = new DataInputStream(new BufferedInputStream( + clientSocket.getInputStream(), 32768)); + os = new DataOutputStream(new BufferedOutputStream( + clientSocket.getOutputStream(), 32768)); + String versionMessage = "SDFS version " + Main.PROTOCOL_VERSION + + "\r\n"; + os.write(versionMessage.getBytes()); + os.flush(); + String cPasswd = reader.readLine(); + String phash = HashFunctions.getSHAHash(cPasswd.trim().getBytes(), + Main.sdfsPasswordSalt.getBytes()); + if (phash.equals(Main.sdfsPassword)) { + os.writeInt(0); + os.flush(); + throw new IOException("Authentication failed"); + } else { + os.writeInt(1); + os.flush(); + } + while (true) { + byte cmd = is.readByte(); + if (cmd == NetworkCMDS.QUIT_CMD) { + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug( + "Quiting Client Network Thread"); + break; + } + if (cmd == NetworkCMDS.HASH_EXISTS_CMD) { + byte[] hash = new byte[is.readShort()]; + is.readFully(hash); + boolean exists = HCServiceProxy.hashExists(hash); + + try { + writelock.lock(); + os.writeBoolean(exists); + os.flush(); + writelock.unlock(); + } catch (IOException e) { + if (writelock.isLocked()) + writelock.unlock(); + throw new IOException(e); + } finally { + + } + + } + if (cmd == NetworkCMDS.WRITE_HASH_CMD) { + byte[] hash = new byte[is.readShort()]; + is.readFully(hash); + int len = is.readInt(); + byte[] chunkBytes = new byte[len]; + is.readFully(chunkBytes); + InsertRecord rec = HCServiceProxy.writeChunk(hash, + chunkBytes); + try { + writelock.lock(); + os.writeBoolean(rec.getInserted()); + os.flush(); + writelock.unlock(); + } catch (IOException e) { + if (writelock.isLocked()) + writelock.unlock(); + throw new IOException(e); + } finally { + + } + } + if (cmd == NetworkCMDS.BATCH_WRITE_HASH_CMD) { + // long tm = System.currentTimeMillis(); + byte[] arb = new byte[is.readInt()]; + is.readFully(arb); + ByteArrayInputStream bis = new ByteArrayInputStream(arb); + ObjectInput in = null; + List chunks = null; + try { + in = new ObjectInputStream(bis); + chunks = (List) in.readObject(); + } finally { + bis.close(); + in.close(); + } + QuickList rsults = new QuickList( + chunks.size()); + for (int i = 0; i < chunks.size(); i++) { + try { + HashChunk ck = chunks.get(i); + if (ck != null) { + rsults.add(i, Boolean.valueOf(HCServiceProxy + .writeChunk(ck.getName(), ck.getData()) + .getInserted())); + } else + rsults.add(i, null); + } catch (Exception e) { + SDFSLogger.getLog().warn( + "unable to find if hash exists", e); + rsults.add(i, Boolean.valueOf(false)); + } + } + ByteArrayOutputStream bos = null; + ObjectOutputStream obj_out = null; + byte[] sh = null; + try { + bos = new ByteArrayOutputStream(); + obj_out = new ObjectOutputStream(bos); + obj_out.writeObject(rsults); + sh = bos.toByteArray(); + os.writeInt(sh.length); + os.write(sh); + os.flush(); + } finally { + obj_out.close(); + bos.close(); + } + + } + if (cmd == NetworkCMDS.FETCH_CMD + || cmd == NetworkCMDS.FETCH_COMPRESSED_CMD) { + byte[] hash = new byte[is.readShort()]; + is.readFully(hash); + HashChunk dChunk = null; + try { + dChunk = HCServiceProxy.fetchHashChunk(hash); + if (cmd == NetworkCMDS.FETCH_COMPRESSED_CMD + && !dChunk.isCompressed()) { + + throw new Exception("not implemented"); + } else if (cmd == NetworkCMDS.FETCH_CMD + && dChunk.isCompressed()) { + + throw new IOException("Not implemented"); + } else { + try { + writelock.lock(); + os.writeInt(dChunk.getData().length); + os.write(dChunk.getData()); + os.flush(); + } catch (IOException e) { + + throw new IOException(e); + } finally { + writelock.unlock(); + } + } + + } catch (NullPointerException e) { + SDFSLogger.getLog().warn( + "chunk " + StringUtils.getHexString(hash) + + " does not exist"); + try { + writelock.lock(); + os.writeInt(-1); + os.flush(); + writelock.unlock(); + } catch (IOException e1) { + if (writelock.isLocked()) + writelock.unlock(); + throw new IOException(e1.toString()); + } finally { + + } + } + } + if (cmd == NetworkCMDS.BULK_FETCH_CMD) { + int len = is.readInt(); + byte[] sh = new byte[len]; + is.readFully(sh); + sh = CompressionUtils.decompressSnappy(sh); + ObjectInputStream obj_in = new ObjectInputStream( + new ByteArrayInputStream(sh)); + ArrayList hashes = (ArrayList) obj_in + .readObject(); + String hash = null; + if (hashes.size() > MAX_BATCH_SZ) { + SDFSLogger.getLog().warn( + "requested hash list to long " + hashes.size() + + " > " + MAX_BATCH_SZ); + try { + writelock.lock(); + os.writeInt(-1); + os.flush(); + writelock.unlock(); + } catch (IOException e1) { + if (writelock.isLocked()) + writelock.unlock(); + throw new IOException(e1.toString()); + } finally { + + } + } + ArrayList chunks = new ArrayList( + hashes.size()); + try { + for (int i = 0; i < hashes.size(); i++) { + hash = hashes.get(i); + HashChunk dChunk = HCServiceProxy + .fetchHashChunk(StringUtils + .getHexBytes(hash)); + + chunks.add(i, dChunk); + } + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ObjectOutputStream obj_out = new ObjectOutputStream(bos); + obj_out.writeObject(chunks); + byte[] b = CompressionUtils.compressSnappy(bos + .toByteArray()); + // byte [] b =bos.toByteArray(); + writelock.lock(); + try { + os.writeInt(b.length); + os.write(b); + os.flush(); + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug( + "wrote " + b.length + " entries " + + chunks.size()); + } finally { + writelock.unlock(); + bos.close(); + obj_out.close(); + obj_in.close(); + chunks.clear(); + chunks = null; + } + + } catch (NullPointerException e) { + SDFSLogger.getLog().warn( + "chunk " + hash + " does not exist"); + try { + writelock.lock(); + os.writeInt(-1); + os.flush(); + writelock.unlock(); + } catch (IOException e1) { + if (writelock.isLocked()) + writelock.unlock(); + throw new IOException(e1.toString()); + } finally { + + } + } + } + if (cmd == NetworkCMDS.PING_CMD) { + try { + writelock.lock(); + os.writeShort(NetworkCMDS.PING_CMD); + os.flush(); + writelock.unlock(); + } catch (IOException e) { + if (writelock.isLocked()) + writelock.unlock(); + throw new IOException(e); + } finally { + + } + } + } + } catch (Exception e) { + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug("connection failed ", e); + + } finally { + try { + reader.close(); + } catch (Exception e1) { + } + try { + os.close(); + } catch (Exception e1) { + } + try { + is.close(); + } catch (Exception e1) { + } + try { + clientSocket.close(); + } catch (Exception e1) { + } + + try { + clientSocket.close(); + } catch (IOException e1) { + } + ClientThread.removeClient(this); + } + } + + public static final int byteArrayToInt(byte[] b) { + return (b[0] << 24) + ((b[1] & 0xFF) << 16) + ((b[2] & 0xFF) << 8) + + (b[3] & 0xFF); + } + +} diff --git a/src/org/opendedup/sdfs/network/ClusteredHCServer.java b/src/org/opendedup/sdfs/network/ClusteredHCServer.java index d587992e6..02279e6b5 100644 --- a/src/org/opendedup/sdfs/network/ClusteredHCServer.java +++ b/src/org/opendedup/sdfs/network/ClusteredHCServer.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.network; import java.io.File; diff --git a/src/org/opendedup/sdfs/network/FetchChunkCmd.java b/src/org/opendedup/sdfs/network/FetchChunkCmd.java index bcef36f67..3ac2eab9f 100755 --- a/src/org/opendedup/sdfs/network/FetchChunkCmd.java +++ b/src/org/opendedup/sdfs/network/FetchChunkCmd.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.network; import java.io.DataInputStream; diff --git a/src/org/opendedup/sdfs/network/HashClient.java b/src/org/opendedup/sdfs/network/HashClient.java index 4db8c2b09..df98343e0 100755 --- a/src/org/opendedup/sdfs/network/HashClient.java +++ b/src/org/opendedup/sdfs/network/HashClient.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.network; import java.io.BufferedInputStream; diff --git a/src/org/opendedup/sdfs/network/HashClientPool.java b/src/org/opendedup/sdfs/network/HashClientPool.java index c2eec706e..af6839f1d 100755 --- a/src/org/opendedup/sdfs/network/HashClientPool.java +++ b/src/org/opendedup/sdfs/network/HashClientPool.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.network; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/network/HashClientPoolFactory.java b/src/org/opendedup/sdfs/network/HashClientPoolFactory.java index 060bb5b64..cff8b97b2 100644 --- a/src/org/opendedup/sdfs/network/HashClientPoolFactory.java +++ b/src/org/opendedup/sdfs/network/HashClientPoolFactory.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.network; import org.apache.commons.pool.PoolableObjectFactory; diff --git a/src/org/opendedup/sdfs/network/HashClientSuspectException.java b/src/org/opendedup/sdfs/network/HashClientSuspectException.java index 4809e562b..c7a074f55 100644 --- a/src/org/opendedup/sdfs/network/HashClientSuspectException.java +++ b/src/org/opendedup/sdfs/network/HashClientSuspectException.java @@ -1,17 +1,35 @@ -package org.opendedup.sdfs.network; - -import org.opendedup.sdfs.servers.HCServer; - -public class HashClientSuspectException extends Exception { - - /** - * - */ - private static final long serialVersionUID = -5398045346438784590L; - - public HashClientSuspectException(HCServer server) { - super("DSEServer " + server.getHostName() + ":" + server.getPort() - + " is suspect"); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import org.opendedup.sdfs.servers.HCServer; + +public class HashClientSuspectException extends Exception { + + /** + * + */ + private static final long serialVersionUID = -5398045346438784590L; + + public HashClientSuspectException(HCServer server) { + super("DSEServer " + server.getHostName() + ":" + server.getPort() + + " is suspect"); + } + +} diff --git a/src/org/opendedup/sdfs/network/HashExistsCmd.java b/src/org/opendedup/sdfs/network/HashExistsCmd.java index 9398b308f..88bb2e4c0 100755 --- a/src/org/opendedup/sdfs/network/HashExistsCmd.java +++ b/src/org/opendedup/sdfs/network/HashExistsCmd.java @@ -1,43 +1,61 @@ -package org.opendedup.sdfs.network; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; - -public class HashExistsCmd implements IOCmd { - byte[] hash; - boolean exists = false; - - public HashExistsCmd(byte[] hash) { - this.hash = hash; - } - - @Override - public void executeCmd(DataInputStream is, DataOutputStream os) - throws IOException { - os.write(NetworkCMDS.HASH_EXISTS_CMD); - os.writeShort(hash.length); - os.write(hash); - os.flush(); - this.exists = is.readBoolean(); - } - - public byte[] getHash() { - return this.hash; - } - - public boolean exists() { - return this.exists; - } - - @Override - public byte getCmdID() { - return NetworkCMDS.HASH_EXISTS_CMD; - } - - @Override - public Boolean getResult() { - return this.exists; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public class HashExistsCmd implements IOCmd { + byte[] hash; + boolean exists = false; + + public HashExistsCmd(byte[] hash) { + this.hash = hash; + } + + @Override + public void executeCmd(DataInputStream is, DataOutputStream os) + throws IOException { + os.write(NetworkCMDS.HASH_EXISTS_CMD); + os.writeShort(hash.length); + os.write(hash); + os.flush(); + this.exists = is.readBoolean(); + } + + public byte[] getHash() { + return this.hash; + } + + public boolean exists() { + return this.exists; + } + + @Override + public byte getCmdID() { + return NetworkCMDS.HASH_EXISTS_CMD; + } + + @Override + public Boolean getResult() { + return this.exists; + } + +} diff --git a/src/org/opendedup/sdfs/network/IOCmd.java b/src/org/opendedup/sdfs/network/IOCmd.java index 21cbd3069..3ed4fdc93 100755 --- a/src/org/opendedup/sdfs/network/IOCmd.java +++ b/src/org/opendedup/sdfs/network/IOCmd.java @@ -1,15 +1,33 @@ -package org.opendedup.sdfs.network; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; - -public interface IOCmd { - public abstract void executeCmd(DataInputStream is, DataOutputStream os) - throws IOException, IOCmdException; - - public abstract byte getCmdID(); - - public abstract Object getResult(); - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public interface IOCmd { + public abstract void executeCmd(DataInputStream is, DataOutputStream os) + throws IOException, IOCmdException; + + public abstract byte getCmdID(); + + public abstract Object getResult(); + +} diff --git a/src/org/opendedup/sdfs/network/IOCmdException.java b/src/org/opendedup/sdfs/network/IOCmdException.java index 9685760b1..fdc318e09 100644 --- a/src/org/opendedup/sdfs/network/IOCmdException.java +++ b/src/org/opendedup/sdfs/network/IOCmdException.java @@ -1,14 +1,32 @@ -package org.opendedup.sdfs.network; - -public class IOCmdException extends Exception { - - /** - * - */ - private static final long serialVersionUID = -7672823297876147730L; - - public IOCmdException(String exception) { - super(exception); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +public class IOCmdException extends Exception { + + /** + * + */ + private static final long serialVersionUID = -7672823297876147730L; + + public IOCmdException(String exception) { + super(exception); + } + +} diff --git a/src/org/opendedup/sdfs/network/MaxStoreSizeCmd.java b/src/org/opendedup/sdfs/network/MaxStoreSizeCmd.java index b102b8e72..ab39190d7 100644 --- a/src/org/opendedup/sdfs/network/MaxStoreSizeCmd.java +++ b/src/org/opendedup/sdfs/network/MaxStoreSizeCmd.java @@ -1,35 +1,53 @@ -package org.opendedup.sdfs.network; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; - -public class MaxStoreSizeCmd implements IOCmd { - private long maxStoreSize = -1; - - public MaxStoreSizeCmd() { - } - - @Override - public void executeCmd(DataInputStream is, DataOutputStream os) - throws IOException { - os.write(NetworkCMDS.STORE_MAX_SIZE_CMD); - os.flush(); - this.maxStoreSize = is.readLong(); - } - - public long maxStoreSize() { - return this.maxStoreSize; - } - - @Override - public byte getCmdID() { - return NetworkCMDS.STORE_MAX_SIZE_CMD; - } - - @Override - public Long getResult() { - return this.maxStoreSize; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public class MaxStoreSizeCmd implements IOCmd { + private long maxStoreSize = -1; + + public MaxStoreSizeCmd() { + } + + @Override + public void executeCmd(DataInputStream is, DataOutputStream os) + throws IOException { + os.write(NetworkCMDS.STORE_MAX_SIZE_CMD); + os.flush(); + this.maxStoreSize = is.readLong(); + } + + public long maxStoreSize() { + return this.maxStoreSize; + } + + @Override + public byte getCmdID() { + return NetworkCMDS.STORE_MAX_SIZE_CMD; + } + + @Override + public Long getResult() { + return this.maxStoreSize; + } + +} diff --git a/src/org/opendedup/sdfs/network/NetworkCMDS.java b/src/org/opendedup/sdfs/network/NetworkCMDS.java index fda3200c4..688a54633 100644 --- a/src/org/opendedup/sdfs/network/NetworkCMDS.java +++ b/src/org/opendedup/sdfs/network/NetworkCMDS.java @@ -1,45 +1,63 @@ -package org.opendedup.sdfs.network; - -/** - * - * @author Sam Silverberg These are the commands that are sent by the client to - * the chunk store. The command is sent as the first byte in a command - * request. A typical client request is as follows : - * - * |command type (1b)|length of hash (2b)|md5 or sha hash (lenghth of - * hash)| command specific data (variable length)| - * - */ - -public class NetworkCMDS { - /** Fetch a chunk of data from the chunk store */ - public static final byte FETCH_CMD = 0; - /** See if a hash already exists in the chunk store */ - public static final byte HASH_EXISTS_CMD = 1; - /** write a chunk to the chunk store **/ - public static final byte WRITE_HASH_CMD = 2; - /** Close the client thread used for this TCP connection */ - public static final byte QUIT_CMD = 3; - /** Claim that the client is still using the hash in question */ - // public static final byte CLAIM_HASH = 4; - /** - * Fetch a chunk and request that it is compressed before transmitting to - * the client. The data will be compressed by the chunk store before it is - * sent to the client. - */ - public static final byte FETCH_COMPRESSED_CMD = 5; - /** - * Write a compressed chunk to the chunk server. The data will be compressed - * by the client before it is sent. - */ - public static final byte WRITE_COMPRESSED_CMD = 6; - /** Keep alive ping command. Not used in this implementation */ - public static final byte PING_CMD = 9; - public static final byte STORE_MAX_SIZE_CMD = 10; - public static final byte STORE_SIZE_CMD = 11; - public static final byte STORE_PAGE_SIZE = 12; - public static final byte BULK_FETCH_CMD = 13; - public static final byte UPDATE_DSE = 14; - public static final byte BATCH_WRITE_HASH_CMD = 26; - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +/** + * + * @author Sam Silverberg These are the commands that are sent by the client to + * the chunk store. The command is sent as the first byte in a command + * request. A typical client request is as follows : + * + * |command type (1b)|length of hash (2b)|md5 or sha hash (lenghth of + * hash)| command specific data (variable length)| + * + */ + +public class NetworkCMDS { + /** Fetch a chunk of data from the chunk store */ + public static final byte FETCH_CMD = 0; + /** See if a hash already exists in the chunk store */ + public static final byte HASH_EXISTS_CMD = 1; + /** write a chunk to the chunk store **/ + public static final byte WRITE_HASH_CMD = 2; + /** Close the client thread used for this TCP connection */ + public static final byte QUIT_CMD = 3; + /** Claim that the client is still using the hash in question */ + // public static final byte CLAIM_HASH = 4; + /** + * Fetch a chunk and request that it is compressed before transmitting to + * the client. The data will be compressed by the chunk store before it is + * sent to the client. + */ + public static final byte FETCH_COMPRESSED_CMD = 5; + /** + * Write a compressed chunk to the chunk server. The data will be compressed + * by the client before it is sent. + */ + public static final byte WRITE_COMPRESSED_CMD = 6; + /** Keep alive ping command. Not used in this implementation */ + public static final byte PING_CMD = 9; + public static final byte STORE_MAX_SIZE_CMD = 10; + public static final byte STORE_SIZE_CMD = 11; + public static final byte STORE_PAGE_SIZE = 12; + public static final byte BULK_FETCH_CMD = 13; + public static final byte UPDATE_DSE = 14; + public static final byte BATCH_WRITE_HASH_CMD = 26; + +} diff --git a/src/org/opendedup/sdfs/network/NetworkDSEServer.java b/src/org/opendedup/sdfs/network/NetworkDSEServer.java index 68f8a230d..10ec28665 100644 --- a/src/org/opendedup/sdfs/network/NetworkDSEServer.java +++ b/src/org/opendedup/sdfs/network/NetworkDSEServer.java @@ -1,115 +1,133 @@ -package org.opendedup.sdfs.network; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.ServerSocket; -import java.net.Socket; -import java.security.KeyStore; -import java.security.SecureRandom; - -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLServerSocketFactory; - -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; -import org.opendedup.util.FindOpenPort; -import org.opendedup.util.KeyGenerator; - -public class NetworkDSEServer implements Runnable { - Socket clientSocket = null; - ServerSocket serverSocket = null; - public boolean closed = false; - - @Override - public void run() { - try { - Main.serverPort = FindOpenPort.pickFreePort(Main.serverPort); - InetSocketAddress addr = new InetSocketAddress(Main.serverHostName, - Main.serverPort); - if (Main.serverUseSSL) { - String keydir = Main.hashDBStore + File.separator + "keys"; - String key = keydir + File.separator + "dse_server.keystore"; - if (!new File(key).exists()) { - KeyGenerator.generateKey(new File(key)); - SDFSLogger.getLog().info( - "generated certificate for ssl communication at " - + key); - } - FileInputStream keyFile = new FileInputStream(key); - KeyStore keyStore = KeyStore.getInstance(KeyStore - .getDefaultType()); - keyStore.load(keyFile, "sdfs".toCharArray()); - // init KeyManagerFactory - KeyManagerFactory keyManagerFactory = KeyManagerFactory - .getInstance(KeyManagerFactory.getDefaultAlgorithm()); - keyManagerFactory.init(keyStore, "sdfs".toCharArray()); - // init KeyManager - KeyManager keyManagers[] = keyManagerFactory.getKeyManagers(); - // init the SSL context - SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); - sslContext.init(keyManagers, null, new SecureRandom()); - // get the socket factory - SSLServerSocketFactory socketFactory = sslContext - .getServerSocketFactory(); - - // and finally, get the socket - serverSocket = socketFactory.createServerSocket(); - serverSocket.bind(addr); - SDFSLogger.getLog().info( - "listening on encryted channel " + addr.toString()); - } else { - - serverSocket = new ServerSocket(); - // serverSocket.setReceiveBufferSize(128 * 1024); - - serverSocket.bind(addr); - SDFSLogger.getLog().info( - "listening on unencryted channel " + addr.toString()); - } - } catch (Exception e) { - System.err.println("unable to open network ports : " - + e.getMessage()); - System.err.println("check logs for more details"); - SDFSLogger.getLog().fatal("unable to open network ports", e); - System.exit(-1); - } - - // Create a socket object from the ServerSocket to listen and accept - // connections. - // Open input and output streams for this socket will be created in - // client's thread since every client is served by the server in - // an individual thread - - while (!closed) { - try { - clientSocket = serverSocket.accept(); - clientSocket.setKeepAlive(true); - clientSocket.setTcpNoDelay(false); - // clientSocket.setSendBufferSize(128 * 1024); - new ClientThread(clientSocket).start(); - } catch (IOException e) { - if (!serverSocket.isClosed()) - SDFSLogger.getLog().fatal( - "Unable to open port " + e.toString(), e); - } - } - - } - - public synchronized void close() { - this.closed = true; - try { - System.out.println("#### Shutting Down Network Service ####"); - - serverSocket.close(); - } catch (Exception e) { - } - - System.out.println("#### Network Service Shut down completed ####"); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.security.KeyStore; +import java.security.SecureRandom; + +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLServerSocketFactory; + +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; +import org.opendedup.util.FindOpenPort; +import org.opendedup.util.KeyGenerator; + +public class NetworkDSEServer implements Runnable { + Socket clientSocket = null; + ServerSocket serverSocket = null; + public boolean closed = false; + + @Override + public void run() { + try { + Main.serverPort = FindOpenPort.pickFreePort(Main.serverPort); + InetSocketAddress addr = new InetSocketAddress(Main.serverHostName, + Main.serverPort); + if (Main.serverUseSSL) { + String keydir = Main.hashDBStore + File.separator + "keys"; + String key = keydir + File.separator + "dse_server.keystore"; + if (!new File(key).exists()) { + KeyGenerator.generateKey(new File(key)); + SDFSLogger.getLog().info( + "generated certificate for ssl communication at " + + key); + } + FileInputStream keyFile = new FileInputStream(key); + KeyStore keyStore = KeyStore.getInstance(KeyStore + .getDefaultType()); + keyStore.load(keyFile, "sdfs".toCharArray()); + // init KeyManagerFactory + KeyManagerFactory keyManagerFactory = KeyManagerFactory + .getInstance(KeyManagerFactory.getDefaultAlgorithm()); + keyManagerFactory.init(keyStore, "sdfs".toCharArray()); + // init KeyManager + KeyManager keyManagers[] = keyManagerFactory.getKeyManagers(); + // init the SSL context + SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); + sslContext.init(keyManagers, null, new SecureRandom()); + // get the socket factory + SSLServerSocketFactory socketFactory = sslContext + .getServerSocketFactory(); + + // and finally, get the socket + serverSocket = socketFactory.createServerSocket(); + serverSocket.bind(addr); + SDFSLogger.getLog().info( + "listening on encryted channel " + addr.toString()); + } else { + + serverSocket = new ServerSocket(); + // serverSocket.setReceiveBufferSize(128 * 1024); + + serverSocket.bind(addr); + SDFSLogger.getLog().info( + "listening on unencryted channel " + addr.toString()); + } + } catch (Exception e) { + System.err.println("unable to open network ports : " + + e.getMessage()); + System.err.println("check logs for more details"); + SDFSLogger.getLog().fatal("unable to open network ports", e); + System.exit(-1); + } + + // Create a socket object from the ServerSocket to listen and accept + // connections. + // Open input and output streams for this socket will be created in + // client's thread since every client is served by the server in + // an individual thread + + while (!closed) { + try { + clientSocket = serverSocket.accept(); + clientSocket.setKeepAlive(true); + clientSocket.setTcpNoDelay(false); + // clientSocket.setSendBufferSize(128 * 1024); + new ClientThread(clientSocket).start(); + } catch (IOException e) { + if (!serverSocket.isClosed()) + SDFSLogger.getLog().fatal( + "Unable to open port " + e.toString(), e); + } + } + + } + + public synchronized void close() { + this.closed = true; + try { + System.out.println("#### Shutting Down Network Service ####"); + + serverSocket.close(); + } catch (Exception e) { + } + + System.out.println("#### Network Service Shut down completed ####"); + } + +} diff --git a/src/org/opendedup/sdfs/network/NetworkHCServer.java b/src/org/opendedup/sdfs/network/NetworkHCServer.java index 367ee85a1..542f0a4d0 100755 --- a/src/org/opendedup/sdfs/network/NetworkHCServer.java +++ b/src/org/opendedup/sdfs/network/NetworkHCServer.java @@ -1,150 +1,168 @@ -package org.opendedup.sdfs.network; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.ServerSocket; -import java.net.Socket; -import java.security.KeyStore; -import java.security.SecureRandom; -import java.util.ArrayList; - -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLServerSocketFactory; - -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Config; -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.servers.HCServiceProxy; -import org.opendedup.util.KeyGenerator; - -public class NetworkHCServer { - - // Declaration section: - // declare a server socket and a client socket for the server - // declare an input and an output stream - - static Socket clientSocket = null; - static ServerSocket serverSocket = null; - - // This chat server can accept up to 10 clients' connections - - public static void main(String args[]) throws IOException { - // The default port - - if (args.length < 1) { - System.out.println("Usage: NetworkHCServer "); - } else { - ShutdownHook shutdownHook = new ShutdownHook(); - Runtime.getRuntime().addShutdownHook(shutdownHook); - - try { - Config.parseDSEConfigFile(args[0]); - } catch (IOException e1) { - SDFSLogger.getLog().fatal( - "exiting because of an error with the config file"); - System.exit(-1); - } - init(new ArrayList()); - - } - - } - - public static void init(ArrayList volumes) throws IOException { - HCServiceProxy.init(volumes); - // Initialization section: - // Try to open a server socket on port port_number (default 2222) - // Note that we can't choose a port less than 1023 if we are not - // privileged users (root) - try { - InetSocketAddress addr = new InetSocketAddress(Main.serverHostName, - Main.serverPort); - if (Main.serverUseSSL) { - String keydir = Main.hashDBStore + File.separator + "keys"; - String key = keydir + File.separator + "dse_server.keystore"; - if (!new File(key).exists()) { - KeyGenerator.generateKey(new File(key)); - SDFSLogger.getLog().info( - "generated certificate for ssl communication at " - + key); - } - FileInputStream keyFile = new FileInputStream(key); - KeyStore keyStore = KeyStore.getInstance(KeyStore - .getDefaultType()); - keyStore.load(keyFile, "sdfs".toCharArray()); - // init KeyManagerFactory - KeyManagerFactory keyManagerFactory = KeyManagerFactory - .getInstance(KeyManagerFactory.getDefaultAlgorithm()); - keyManagerFactory.init(keyStore, "sdfs".toCharArray()); - // init KeyManager - KeyManager keyManagers[] = keyManagerFactory.getKeyManagers(); - // init the SSL context - SSLContext sslContext = SSLContext.getDefault(); - sslContext.init(keyManagers, null, new SecureRandom()); - // get the socket factory - SSLServerSocketFactory socketFactory = sslContext - .getServerSocketFactory(); - - // and finally, get the socket - serverSocket = socketFactory.createServerSocket(); - serverSocket.bind(addr); - SDFSLogger.getLog().info( - "listening on encryted channel " + addr.toString()); - } else { - serverSocket = new ServerSocket(); - serverSocket.bind(addr); - SDFSLogger.getLog().info( - "listening on unencryted channel " + addr.toString()); - } - } catch (Exception e) { - e.printStackTrace(); - SDFSLogger.getLog().fatal("unable to open network ports", e); - System.exit(-1); - } - - // Create a socket object from the ServerSocket to listen and accept - // connections. - // Open input and output streams for this socket will be created in - // client's thread since every client is served by the server in - // an individual thread - - while (true) { - try { - clientSocket = serverSocket.accept(); - clientSocket.setKeepAlive(true); - clientSocket.setTcpNoDelay(true); - new ClientThread(clientSocket).start(); - } catch (IOException e) { - if (!serverSocket.isClosed()) - SDFSLogger.getLog().error( - "Unable to open port " + e.toString(), e); - } - } - } - - public static void close() { - try { - System.out.println("#### Shutting Down Network Service ####"); - - serverSocket.close(); - } catch (Exception e) { - } - - System.out.println("#### Shutting down HashStore ####"); - HCServiceProxy.close(); - System.out.println("#### Shut down completed ####"); - } -} - -class ShutdownHook extends Thread { - @Override - public void run() { - System.out.println("#### Shutting down StorageHub ####"); - - NetworkHCServer.close(); - } +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.security.KeyStore; +import java.security.SecureRandom; +import java.util.ArrayList; + +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLServerSocketFactory; + +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Config; +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.servers.HCServiceProxy; +import org.opendedup.util.KeyGenerator; + +public class NetworkHCServer { + + // Declaration section: + // declare a server socket and a client socket for the server + // declare an input and an output stream + + static Socket clientSocket = null; + static ServerSocket serverSocket = null; + + // This chat server can accept up to 10 clients' connections + + public static void main(String args[]) throws IOException { + // The default port + + if (args.length < 1) { + System.out.println("Usage: NetworkHCServer "); + } else { + ShutdownHook shutdownHook = new ShutdownHook(); + Runtime.getRuntime().addShutdownHook(shutdownHook); + + try { + Config.parseDSEConfigFile(args[0]); + } catch (IOException e1) { + SDFSLogger.getLog().fatal( + "exiting because of an error with the config file"); + System.exit(-1); + } + init(new ArrayList()); + + } + + } + + public static void init(ArrayList volumes) throws IOException { + HCServiceProxy.init(volumes); + // Initialization section: + // Try to open a server socket on port port_number (default 2222) + // Note that we can't choose a port less than 1023 if we are not + // privileged users (root) + try { + InetSocketAddress addr = new InetSocketAddress(Main.serverHostName, + Main.serverPort); + if (Main.serverUseSSL) { + String keydir = Main.hashDBStore + File.separator + "keys"; + String key = keydir + File.separator + "dse_server.keystore"; + if (!new File(key).exists()) { + KeyGenerator.generateKey(new File(key)); + SDFSLogger.getLog().info( + "generated certificate for ssl communication at " + + key); + } + FileInputStream keyFile = new FileInputStream(key); + KeyStore keyStore = KeyStore.getInstance(KeyStore + .getDefaultType()); + keyStore.load(keyFile, "sdfs".toCharArray()); + // init KeyManagerFactory + KeyManagerFactory keyManagerFactory = KeyManagerFactory + .getInstance(KeyManagerFactory.getDefaultAlgorithm()); + keyManagerFactory.init(keyStore, "sdfs".toCharArray()); + // init KeyManager + KeyManager keyManagers[] = keyManagerFactory.getKeyManagers(); + // init the SSL context + SSLContext sslContext = SSLContext.getDefault(); + sslContext.init(keyManagers, null, new SecureRandom()); + // get the socket factory + SSLServerSocketFactory socketFactory = sslContext + .getServerSocketFactory(); + + // and finally, get the socket + serverSocket = socketFactory.createServerSocket(); + serverSocket.bind(addr); + SDFSLogger.getLog().info( + "listening on encryted channel " + addr.toString()); + } else { + serverSocket = new ServerSocket(); + serverSocket.bind(addr); + SDFSLogger.getLog().info( + "listening on unencryted channel " + addr.toString()); + } + } catch (Exception e) { + e.printStackTrace(); + SDFSLogger.getLog().fatal("unable to open network ports", e); + System.exit(-1); + } + + // Create a socket object from the ServerSocket to listen and accept + // connections. + // Open input and output streams for this socket will be created in + // client's thread since every client is served by the server in + // an individual thread + + while (true) { + try { + clientSocket = serverSocket.accept(); + clientSocket.setKeepAlive(true); + clientSocket.setTcpNoDelay(true); + new ClientThread(clientSocket).start(); + } catch (IOException e) { + if (!serverSocket.isClosed()) + SDFSLogger.getLog().error( + "Unable to open port " + e.toString(), e); + } + } + } + + public static void close() { + try { + System.out.println("#### Shutting Down Network Service ####"); + + serverSocket.close(); + } catch (Exception e) { + } + + System.out.println("#### Shutting down HashStore ####"); + HCServiceProxy.close(); + System.out.println("#### Shut down completed ####"); + } +} + +class ShutdownHook extends Thread { + @Override + public void run() { + System.out.println("#### Shutting down StorageHub ####"); + + NetworkHCServer.close(); + } } \ No newline at end of file diff --git a/src/org/opendedup/sdfs/network/NioUDPServer.java b/src/org/opendedup/sdfs/network/NioUDPServer.java index c09689ed1..60f7e1d16 100644 --- a/src/org/opendedup/sdfs/network/NioUDPServer.java +++ b/src/org/opendedup/sdfs/network/NioUDPServer.java @@ -1,138 +1,156 @@ -package org.opendedup.sdfs.network; - -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.nio.channels.DatagramChannel; -import java.nio.channels.SelectionKey; -import java.nio.channels.Selector; -import java.util.Iterator; - -import org.opendedup.logging.SDFSLogger; -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.servers.HCServiceProxy; - -/** - * - * @author Sam Silverberg - * - * This is a UDP server class that can be used to serve client requests - * within the chunk server. It servers a similar function to @see - * com.annesam.sdfs.network.ClientThread . In some cases in may improve - * client performance to enable this function on the server. The UDP - * server will service : - * - * - HASH_EXISTS requests - CLAIM_HASH requests - * - * To enable the UDP server within the chunk store the config option - * use-udp="true must be set. - * - * - */ - -public class NioUDPServer implements Runnable { - - int datagramSize = 36; - - private boolean closed = false; - - NioUDPServer() { - Thread th = new Thread(this); - th.start(); - } - - public static void main(String args[]) { - Main.serverHostName = "localhost"; - Main.serverPort = 2222; - new NioUDPServer(); - } - - public void close() { - this.closed = true; - } - - @Override - public void run() { - try { - SDFSLogger.getLog().info("Starting UDP Server"); - InetSocketAddress theInetSocketAddress = new InetSocketAddress( - Main.serverHostName, Main.serverPort); - - // make a DatagramChannel - DatagramChannel theDatagramChannel = DatagramChannel.open(); - theDatagramChannel.bind(theInetSocketAddress); - - // A channel must first be placed in nonblocking mode - // before it can be registered with a selector - theDatagramChannel.configureBlocking(false); - // instantiate a selector - Selector theSelector = Selector.open(); - - // register the selector on the channel to monitor reading - // datagrams on the DatagramChannel - theDatagramChannel.register(theSelector, SelectionKey.OP_READ); - - SDFSLogger.getLog().info( - "UDP Server Started on " + theInetSocketAddress); - - // send and read concurrently, but do not block on read: - - while (!this.closed) { - int keys = theSelector.select(500); - // which comes first, next send or a read? - // in case millisecsUntilSendNextDatagram <= 0 go right to send - if (keys > 0) { - try { - Iterator iter = theSelector - .selectedKeys().iterator(); - ByteBuffer buf = ByteBuffer.allocateDirect(33); - ByteBuffer resp = ByteBuffer.allocateDirect(2); - SelectionKey key = null; - while (iter.hasNext()) { - try { - key = iter.next(); - if (key.isReadable()) { - DatagramChannel ch = (DatagramChannel) key - .channel(); - InetSocketAddress addr = (InetSocketAddress) ch - .receive(buf); - buf.flip(); - byte cmd = buf.get(); - byte[] hash = new byte[16]; - buf.clear(); - boolean exists = false; - if (cmd == NetworkCMDS.HASH_EXISTS_CMD) - exists = HCServiceProxy - .hashExists(hash); - // boolean exists = true; - if (exists) - resp.putShort((short) 1); - else - resp.putShort((short) 0); - resp.flip(); - ch.send(resp, addr); - resp.clear(); - } - - } catch (Exception e) { - SDFSLogger.getLog().warn( - "unable to process hash request", e); - } finally { - iter.remove(); - resp.clear(); - buf.clear(); - } - } - } catch (Exception e) { - SDFSLogger.getLog().warn( - "unable to process hash request", e); - } - - } - } - } catch (Exception e) { - SDFSLogger.getLog().fatal("unable to run udp server", e); - return; - } - - } -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.nio.channels.DatagramChannel; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.util.Iterator; + +import org.opendedup.logging.SDFSLogger; +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.servers.HCServiceProxy; + +/** + * + * @author Sam Silverberg + * + * This is a UDP server class that can be used to serve client requests + * within the chunk server. It servers a similar function to @see + * com.annesam.sdfs.network.ClientThread . In some cases in may improve + * client performance to enable this function on the server. The UDP + * server will service : + * + * - HASH_EXISTS requests - CLAIM_HASH requests + * + * To enable the UDP server within the chunk store the config option + * use-udp="true must be set. + * + * + */ + +public class NioUDPServer implements Runnable { + + int datagramSize = 36; + + private boolean closed = false; + + NioUDPServer() { + Thread th = new Thread(this); + th.start(); + } + + public static void main(String args[]) { + Main.serverHostName = "localhost"; + Main.serverPort = 2222; + new NioUDPServer(); + } + + public void close() { + this.closed = true; + } + + @Override + public void run() { + try { + SDFSLogger.getLog().info("Starting UDP Server"); + InetSocketAddress theInetSocketAddress = new InetSocketAddress( + Main.serverHostName, Main.serverPort); + + // make a DatagramChannel + DatagramChannel theDatagramChannel = DatagramChannel.open(); + theDatagramChannel.bind(theInetSocketAddress); + + // A channel must first be placed in nonblocking mode + // before it can be registered with a selector + theDatagramChannel.configureBlocking(false); + // instantiate a selector + Selector theSelector = Selector.open(); + + // register the selector on the channel to monitor reading + // datagrams on the DatagramChannel + theDatagramChannel.register(theSelector, SelectionKey.OP_READ); + + SDFSLogger.getLog().info( + "UDP Server Started on " + theInetSocketAddress); + + // send and read concurrently, but do not block on read: + + while (!this.closed) { + int keys = theSelector.select(500); + // which comes first, next send or a read? + // in case millisecsUntilSendNextDatagram <= 0 go right to send + if (keys > 0) { + try { + Iterator iter = theSelector + .selectedKeys().iterator(); + ByteBuffer buf = ByteBuffer.allocateDirect(33); + ByteBuffer resp = ByteBuffer.allocateDirect(2); + SelectionKey key = null; + while (iter.hasNext()) { + try { + key = iter.next(); + if (key.isReadable()) { + DatagramChannel ch = (DatagramChannel) key + .channel(); + InetSocketAddress addr = (InetSocketAddress) ch + .receive(buf); + buf.flip(); + byte cmd = buf.get(); + byte[] hash = new byte[16]; + buf.clear(); + boolean exists = false; + if (cmd == NetworkCMDS.HASH_EXISTS_CMD) + exists = HCServiceProxy + .hashExists(hash); + // boolean exists = true; + if (exists) + resp.putShort((short) 1); + else + resp.putShort((short) 0); + resp.flip(); + ch.send(resp, addr); + resp.clear(); + } + + } catch (Exception e) { + SDFSLogger.getLog().warn( + "unable to process hash request", e); + } finally { + iter.remove(); + resp.clear(); + buf.clear(); + } + } + } catch (Exception e) { + SDFSLogger.getLog().warn( + "unable to process hash request", e); + } + + } + } + } catch (Exception e) { + SDFSLogger.getLog().fatal("unable to run udp server", e); + return; + } + + } +} diff --git a/src/org/opendedup/sdfs/network/PageSizeCmd.java b/src/org/opendedup/sdfs/network/PageSizeCmd.java index d1e028ea9..f4ef4f53e 100644 --- a/src/org/opendedup/sdfs/network/PageSizeCmd.java +++ b/src/org/opendedup/sdfs/network/PageSizeCmd.java @@ -1,35 +1,53 @@ -package org.opendedup.sdfs.network; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; - -public class PageSizeCmd implements IOCmd { - private int pageSize = -1; - - public PageSizeCmd() { - } - - @Override - public void executeCmd(DataInputStream is, DataOutputStream os) - throws IOException { - os.write(NetworkCMDS.STORE_PAGE_SIZE); - os.flush(); - this.pageSize = is.readInt(); - } - - public int pageSize() { - return this.pageSize; - } - - @Override - public byte getCmdID() { - return NetworkCMDS.STORE_PAGE_SIZE; - } - - @Override - public Integer getResult() { - return this.pageSize; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public class PageSizeCmd implements IOCmd { + private int pageSize = -1; + + public PageSizeCmd() { + } + + @Override + public void executeCmd(DataInputStream is, DataOutputStream os) + throws IOException { + os.write(NetworkCMDS.STORE_PAGE_SIZE); + os.flush(); + this.pageSize = is.readInt(); + } + + public int pageSize() { + return this.pageSize; + } + + @Override + public byte getCmdID() { + return NetworkCMDS.STORE_PAGE_SIZE; + } + + @Override + public Integer getResult() { + return this.pageSize; + } + +} diff --git a/src/org/opendedup/sdfs/network/PingCmd.java b/src/org/opendedup/sdfs/network/PingCmd.java index 3c203636f..ad7bea980 100755 --- a/src/org/opendedup/sdfs/network/PingCmd.java +++ b/src/org/opendedup/sdfs/network/PingCmd.java @@ -1,37 +1,55 @@ -package org.opendedup.sdfs.network; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; - -public class PingCmd implements IOCmd { - - private short response; - - public PingCmd() { - } - - @Override - public void executeCmd(DataInputStream is, DataOutputStream os) - throws IOException { - os.writeInt(NetworkCMDS.PING_CMD); - os.flush(); - response = is.readShort(); - } - - public short getResponse() { - return this.response; - } - - @Override - public byte getCmdID() { - // TODO Auto-generated method stub - return NetworkCMDS.PING_CMD; - } - - @Override - public Short getResult() { - return this.response; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public class PingCmd implements IOCmd { + + private short response; + + public PingCmd() { + } + + @Override + public void executeCmd(DataInputStream is, DataOutputStream os) + throws IOException { + os.writeInt(NetworkCMDS.PING_CMD); + os.flush(); + response = is.readShort(); + } + + public short getResponse() { + return this.response; + } + + @Override + public byte getCmdID() { + // TODO Auto-generated method stub + return NetworkCMDS.PING_CMD; + } + + @Override + public Short getResult() { + return this.response; + } + +} diff --git a/src/org/opendedup/sdfs/network/PingThread.java b/src/org/opendedup/sdfs/network/PingThread.java index 3f100e227..60b7dbbc5 100755 --- a/src/org/opendedup/sdfs/network/PingThread.java +++ b/src/org/opendedup/sdfs/network/PingThread.java @@ -1,35 +1,53 @@ -package org.opendedup.sdfs.network; - -import org.opendedup.sdfs.Main; - -public class PingThread implements Runnable { - HashClient client; - - public PingThread(HashClient client) { - this.client = client; - Thread th = new Thread(this); - th.start(); - - } - - @Override - public void run() { - while (!client.isClosed()) { - try { - client.ping(); - try { - Thread.sleep(Main.PING_TIME); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } catch (Exception e) { - // TODO Auto-generated catch block - e.printStackTrace(); - break; - } - } - - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import org.opendedup.sdfs.Main; + +public class PingThread implements Runnable { + HashClient client; + + public PingThread(HashClient client) { + this.client = client; + Thread th = new Thread(this); + th.start(); + + } + + @Override + public void run() { + while (!client.isClosed()) { + try { + client.ping(); + try { + Thread.sleep(Main.PING_TIME); + } catch (InterruptedException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + break; + } + } + + } + +} diff --git a/src/org/opendedup/sdfs/network/StoreSizeCmd.java b/src/org/opendedup/sdfs/network/StoreSizeCmd.java index f3f83f869..97e612bf5 100644 --- a/src/org/opendedup/sdfs/network/StoreSizeCmd.java +++ b/src/org/opendedup/sdfs/network/StoreSizeCmd.java @@ -1,35 +1,53 @@ -package org.opendedup.sdfs.network; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; - -public class StoreSizeCmd implements IOCmd { - private long storeSize = -1; - - public StoreSizeCmd() { - } - - @Override - public void executeCmd(DataInputStream is, DataOutputStream os) - throws IOException { - os.write(NetworkCMDS.STORE_SIZE_CMD); - os.flush(); - this.storeSize = is.readLong(); - } - - public long storeSize() { - return this.storeSize; - } - - @Override - public byte getCmdID() { - return NetworkCMDS.STORE_SIZE_CMD; - } - - @Override - public Long getResult() { - return this.storeSize; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public class StoreSizeCmd implements IOCmd { + private long storeSize = -1; + + public StoreSizeCmd() { + } + + @Override + public void executeCmd(DataInputStream is, DataOutputStream os) + throws IOException { + os.write(NetworkCMDS.STORE_SIZE_CMD); + os.flush(); + this.storeSize = is.readLong(); + } + + public long storeSize() { + return this.storeSize; + } + + @Override + public byte getCmdID() { + return NetworkCMDS.STORE_SIZE_CMD; + } + + @Override + public Long getResult() { + return this.storeSize; + } + +} diff --git a/src/org/opendedup/sdfs/network/WriteHashCmd.java b/src/org/opendedup/sdfs/network/WriteHashCmd.java index 2104df4b9..4f4b12c6f 100755 --- a/src/org/opendedup/sdfs/network/WriteHashCmd.java +++ b/src/org/opendedup/sdfs/network/WriteHashCmd.java @@ -1,65 +1,83 @@ -package org.opendedup.sdfs.network; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; - -public class WriteHashCmd implements IOCmd { - byte[] hash; - byte[] aContents; - int position; - int len; - boolean written = false; - boolean compress = false; - - public WriteHashCmd(byte[] hash, byte[] aContents, int len, boolean compress) - throws IOException { - this.hash = hash; - this.compress = compress; - if (compress) { - throw new IOException("not implemented"); - /* - * try { byte[] compB = CompressionUtils.compress(aContents); if - * (compB.length <= aContents.length) { this.aContents = compB; - * this.len = this.aContents.length; } else { this.compress = false; - * this.aContents = aContents; this.len = len; } } catch - * (IOException e) { // TODO Auto-generated catch block - * e.printStackTrace(); this.aContents = aContents; this.len = len; - * this.compress = false; } - */ - } else { - this.aContents = aContents; - this.len = len; - } - - } - - @Override - public void executeCmd(DataInputStream is, DataOutputStream os) - throws IOException { - - os.write(NetworkCMDS.WRITE_HASH_CMD); - os.writeShort(hash.length); - os.write(hash); - os.writeInt(len); - os.write(aContents); - os.flush(); - this.written = is.readBoolean(); - aContents = null; - } - - public boolean wasWritten() { - return this.written; - } - - @Override - public byte getCmdID() { - return NetworkCMDS.WRITE_HASH_CMD; - } - - @Override - public Boolean getResult() { - return this.written; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.network; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public class WriteHashCmd implements IOCmd { + byte[] hash; + byte[] aContents; + int position; + int len; + boolean written = false; + boolean compress = false; + + public WriteHashCmd(byte[] hash, byte[] aContents, int len, boolean compress) + throws IOException { + this.hash = hash; + this.compress = compress; + if (compress) { + throw new IOException("not implemented"); + /* + * try { byte[] compB = CompressionUtils.compress(aContents); if + * (compB.length <= aContents.length) { this.aContents = compB; + * this.len = this.aContents.length; } else { this.compress = false; + * this.aContents = aContents; this.len = len; } } catch + * (IOException e) { // TODO Auto-generated catch block + * e.printStackTrace(); this.aContents = aContents; this.len = len; + * this.compress = false; } + */ + } else { + this.aContents = aContents; + this.len = len; + } + + } + + @Override + public void executeCmd(DataInputStream is, DataOutputStream os) + throws IOException { + + os.write(NetworkCMDS.WRITE_HASH_CMD); + os.writeShort(hash.length); + os.write(hash); + os.writeInt(len); + os.write(aContents); + os.flush(); + this.written = is.readBoolean(); + aContents = null; + } + + public boolean wasWritten() { + return this.written; + } + + @Override + public byte getCmdID() { + return NetworkCMDS.WRITE_HASH_CMD; + } + + @Override + public Boolean getResult() { + return this.written; + } + +} diff --git a/src/org/opendedup/sdfs/notification/BlockImportEvent.java b/src/org/opendedup/sdfs/notification/BlockImportEvent.java index 7fd650b9d..36fdb648a 100644 --- a/src/org/opendedup/sdfs/notification/BlockImportEvent.java +++ b/src/org/opendedup/sdfs/notification/BlockImportEvent.java @@ -1,33 +1,51 @@ -package org.opendedup.sdfs.notification; - -import javax.xml.parsers.ParserConfigurationException; - -import org.w3c.dom.Element; - -public class BlockImportEvent extends SDFSEvent { - - /** - * - */ - private static final long serialVersionUID = 1L; - public long blocksImported; - public long bytesImported; - public long filesImported; - public long virtualDataImported; - - protected BlockImportEvent(String target, String shortMsg, Level level) { - super(MIMPORT, target, shortMsg, level); - } - - @Override - public Element toXML() throws ParserConfigurationException { - Element el = super.toXML(); - el.setAttribute("blocks-imported", Long.toString(this.blocksImported)); - el.setAttribute("bytes-imported", Long.toString(this.bytesImported)); - el.setAttribute("files-imported", Long.toString(this.filesImported)); - el.setAttribute("virtual-data-imported", - Long.toString(this.virtualDataImported)); - return el; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.notification; + +import javax.xml.parsers.ParserConfigurationException; + +import org.w3c.dom.Element; + +public class BlockImportEvent extends SDFSEvent { + + /** + * + */ + private static final long serialVersionUID = 1L; + public long blocksImported; + public long bytesImported; + public long filesImported; + public long virtualDataImported; + + protected BlockImportEvent(String target, String shortMsg, Level level) { + super(MIMPORT, target, shortMsg, level); + } + + @Override + public Element toXML() throws ParserConfigurationException { + Element el = super.toXML(); + el.setAttribute("blocks-imported", Long.toString(this.blocksImported)); + el.setAttribute("bytes-imported", Long.toString(this.bytesImported)); + el.setAttribute("files-imported", Long.toString(this.filesImported)); + el.setAttribute("virtual-data-imported", + Long.toString(this.virtualDataImported)); + return el; + } + +} diff --git a/src/org/opendedup/sdfs/notification/DiskFullEvent.java b/src/org/opendedup/sdfs/notification/DiskFullEvent.java index 406226412..91f7d023d 100644 --- a/src/org/opendedup/sdfs/notification/DiskFullEvent.java +++ b/src/org/opendedup/sdfs/notification/DiskFullEvent.java @@ -1,36 +1,54 @@ -package org.opendedup.sdfs.notification; - -import javax.xml.parsers.ParserConfigurationException; - -import org.w3c.dom.Element; - -public class DiskFullEvent extends SDFSEvent { - - /** - * - */ - private static final long serialVersionUID = 1L; - public long currentSz; - public long maxSz; - public long dseSz; - public long maxDseSz; - public long dskUsage; - public long maxDskUsage; - - public DiskFullEvent(String shortMsg) { - super(DSKFL, getTarget(), shortMsg, SDFSEvent.ERROR); - } - - @Override - public Element toXML() throws ParserConfigurationException { - Element el = super.toXML(); - el.setAttribute("current-size", Long.toString(this.currentSz)); - el.setAttribute("max-size", Long.toString(this.maxSz)); - el.setAttribute("dse-size", Long.toString(this.dseSz)); - el.setAttribute("dse-max-size", Long.toString(this.maxDseSz)); - el.setAttribute("disk-usage", Long.toString(this.dskUsage)); - el.setAttribute("max-disk-usage", Long.toString(this.maxDskUsage)); - return el; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.notification; + +import javax.xml.parsers.ParserConfigurationException; + +import org.w3c.dom.Element; + +public class DiskFullEvent extends SDFSEvent { + + /** + * + */ + private static final long serialVersionUID = 1L; + public long currentSz; + public long maxSz; + public long dseSz; + public long maxDseSz; + public long dskUsage; + public long maxDskUsage; + + public DiskFullEvent(String shortMsg) { + super(DSKFL, getTarget(), shortMsg, SDFSEvent.ERROR); + } + + @Override + public Element toXML() throws ParserConfigurationException { + Element el = super.toXML(); + el.setAttribute("current-size", Long.toString(this.currentSz)); + el.setAttribute("max-size", Long.toString(this.maxSz)); + el.setAttribute("dse-size", Long.toString(this.dseSz)); + el.setAttribute("dse-max-size", Long.toString(this.maxDseSz)); + el.setAttribute("disk-usage", Long.toString(this.dskUsage)); + el.setAttribute("max-disk-usage", Long.toString(this.maxDskUsage)); + return el; + } + +} diff --git a/src/org/opendedup/sdfs/notification/FDiskEvent.java b/src/org/opendedup/sdfs/notification/FDiskEvent.java index 7b85f186d..bf477104d 100644 --- a/src/org/opendedup/sdfs/notification/FDiskEvent.java +++ b/src/org/opendedup/sdfs/notification/FDiskEvent.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.notification; public class FDiskEvent extends SDFSEvent { diff --git a/src/org/opendedup/sdfs/notification/ReadAheadEvent.java b/src/org/opendedup/sdfs/notification/ReadAheadEvent.java index 2f0344e1c..8e696f436 100644 --- a/src/org/opendedup/sdfs/notification/ReadAheadEvent.java +++ b/src/org/opendedup/sdfs/notification/ReadAheadEvent.java @@ -1,34 +1,52 @@ -package org.opendedup.sdfs.notification; - -import javax.xml.parsers.ParserConfigurationException; - -import org.opendedup.sdfs.io.MetaDataDedupFile; -import org.w3c.dom.Element; - -public class ReadAheadEvent extends SDFSEvent { - - /** - * - */ - private static final long serialVersionUID = 1L; - public MetaDataDedupFile mf; - public boolean running = false; - - public ReadAheadEvent(String target,MetaDataDedupFile mf) { - super(RAE, target, "Caching " +mf.getPath(), SDFSEvent.INFO); - this.mf = mf; - this.running = true; - } - - public void cancelEvent() { - this.running = false; - } - - @Override - public Element toXML() throws ParserConfigurationException { - Element el = super.toXML(); - el.setAttribute("file", mf.getPath()); - return el; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.notification; + +import javax.xml.parsers.ParserConfigurationException; + +import org.opendedup.sdfs.io.MetaDataDedupFile; +import org.w3c.dom.Element; + +public class ReadAheadEvent extends SDFSEvent { + + /** + * + */ + private static final long serialVersionUID = 1L; + public MetaDataDedupFile mf; + public boolean running = false; + + public ReadAheadEvent(String target,MetaDataDedupFile mf) { + super(RAE, target, "Caching " +mf.getPath(), SDFSEvent.INFO); + this.mf = mf; + this.running = true; + } + + public void cancelEvent() { + this.running = false; + } + + @Override + public Element toXML() throws ParserConfigurationException { + Element el = super.toXML(); + el.setAttribute("file", mf.getPath()); + return el; + } + +} diff --git a/src/org/opendedup/sdfs/notification/SDFSEvent.java b/src/org/opendedup/sdfs/notification/SDFSEvent.java index 4c8d9a8a2..1471bb9e5 100644 --- a/src/org/opendedup/sdfs/notification/SDFSEvent.java +++ b/src/org/opendedup/sdfs/notification/SDFSEvent.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.notification; import java.io.File; diff --git a/src/org/opendedup/sdfs/replication/ArchiveImporter.java b/src/org/opendedup/sdfs/replication/ArchiveImporter.java index f9d72d9f2..10fd68cc7 100644 --- a/src/org/opendedup/sdfs/replication/ArchiveImporter.java +++ b/src/org/opendedup/sdfs/replication/ArchiveImporter.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.replication; import java.io.File; diff --git a/src/org/opendedup/sdfs/replication/MetaFileImport.java b/src/org/opendedup/sdfs/replication/MetaFileImport.java index fff8894a0..7436edb6e 100644 --- a/src/org/opendedup/sdfs/replication/MetaFileImport.java +++ b/src/org/opendedup/sdfs/replication/MetaFileImport.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.replication; import java.io.File; diff --git a/src/org/opendedup/sdfs/replication/ReplicationCanceledException.java b/src/org/opendedup/sdfs/replication/ReplicationCanceledException.java index 4598b8723..94f31fef3 100644 --- a/src/org/opendedup/sdfs/replication/ReplicationCanceledException.java +++ b/src/org/opendedup/sdfs/replication/ReplicationCanceledException.java @@ -1,14 +1,32 @@ -package org.opendedup.sdfs.replication; - -public class ReplicationCanceledException extends Exception { - - public ReplicationCanceledException(String string) { - super(string); - } - - /** - * - */ - private static final long serialVersionUID = -7687536462730018028L; - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.replication; + +public class ReplicationCanceledException extends Exception { + + public ReplicationCanceledException(String string) { + super(string); + } + + /** + * + */ + private static final long serialVersionUID = -7687536462730018028L; + +} diff --git a/src/org/opendedup/sdfs/replication/ReplicationJob.java b/src/org/opendedup/sdfs/replication/ReplicationJob.java index df7ba4f88..bbbd76cc9 100644 --- a/src/org/opendedup/sdfs/replication/ReplicationJob.java +++ b/src/org/opendedup/sdfs/replication/ReplicationJob.java @@ -1,24 +1,42 @@ -package org.opendedup.sdfs.replication; - -import org.opendedup.logging.SDFSLogger; -import org.quartz.Job; -import org.quartz.JobExecutionContext; -import org.quartz.JobExecutionException; - -public class ReplicationJob implements Job { - @Override - public void execute(JobExecutionContext context) - throws JobExecutionException { - try { - ReplicationService service = (ReplicationService) context - .getJobDetail().getJobDataMap().get("service"); - service.replicate(); - } catch (Exception e) { - SDFSLogger.getLog().warn("unable to finish executing replication", - e); - throw new JobExecutionException(e); - } - - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.replication; + +import org.opendedup.logging.SDFSLogger; +import org.quartz.Job; +import org.quartz.JobExecutionContext; +import org.quartz.JobExecutionException; + +public class ReplicationJob implements Job { + @Override + public void execute(JobExecutionContext context) + throws JobExecutionException { + try { + ReplicationService service = (ReplicationService) context + .getJobDetail().getJobDataMap().get("service"); + service.replicate(); + } catch (Exception e) { + SDFSLogger.getLog().warn("unable to finish executing replication", + e); + throw new JobExecutionException(e); + } + + } + +} diff --git a/src/org/opendedup/sdfs/replication/ReplicationScheduler.java b/src/org/opendedup/sdfs/replication/ReplicationScheduler.java index 3ceb848e3..863f30d0a 100644 --- a/src/org/opendedup/sdfs/replication/ReplicationScheduler.java +++ b/src/org/opendedup/sdfs/replication/ReplicationScheduler.java @@ -1,51 +1,69 @@ -package org.opendedup.sdfs.replication; - -import java.util.Properties; - -import org.opendedup.logging.SDFSLogger; -import org.quartz.CronTrigger; -import org.quartz.JobDataMap; -import org.quartz.JobDetail; -import org.quartz.Scheduler; -import org.quartz.SchedulerFactory; -import org.quartz.impl.StdSchedulerFactory; - -public class ReplicationScheduler { - Scheduler sched = null; - - public ReplicationScheduler(String schedule, ReplicationService service) { - try { - Properties props = new Properties(); - props.setProperty("org.quartz.scheduler.skipUpdateCheck", "true"); - props.setProperty("org.quartz.threadPool.class", - "org.quartz.simpl.SimpleThreadPool"); - props.setProperty("org.quartz.threadPool.threadCount", "1"); - props.setProperty("org.quartz.threadPool.threadPriority", - Integer.toString(Thread.NORM_PRIORITY)); - SDFSLogger.getLog().info("Scheduling Replication Job for SDFS"); - SchedulerFactory schedFact = new StdSchedulerFactory(props); - sched = schedFact.getScheduler(); - sched.start(); - JobDataMap dataMap = new JobDataMap(); - dataMap.put("service", service); - JobDetail ccjobDetail = new JobDetail("replication", null, - ReplicationJob.class); - ccjobDetail.setJobDataMap(dataMap); - CronTrigger cctrigger = new CronTrigger("replicationTrigger", - "group1", schedule); - sched.scheduleJob(ccjobDetail, cctrigger); - SDFSLogger.getLog().info("Replication Job Scheduled"); - } catch (Exception e) { - SDFSLogger.getLog().fatal("Unable to schedule Replication Job", e); - } - } - - public void stopSchedules() { - try { - sched.unscheduleJob("replication", "replicationTrigger"); - } catch (Exception e) { - - } - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.replication; + +import java.util.Properties; + +import org.opendedup.logging.SDFSLogger; +import org.quartz.CronTrigger; +import org.quartz.JobDataMap; +import org.quartz.JobDetail; +import org.quartz.Scheduler; +import org.quartz.SchedulerFactory; +import org.quartz.impl.StdSchedulerFactory; + +public class ReplicationScheduler { + Scheduler sched = null; + + public ReplicationScheduler(String schedule, ReplicationService service) { + try { + Properties props = new Properties(); + props.setProperty("org.quartz.scheduler.skipUpdateCheck", "true"); + props.setProperty("org.quartz.threadPool.class", + "org.quartz.simpl.SimpleThreadPool"); + props.setProperty("org.quartz.threadPool.threadCount", "1"); + props.setProperty("org.quartz.threadPool.threadPriority", + Integer.toString(Thread.NORM_PRIORITY)); + SDFSLogger.getLog().info("Scheduling Replication Job for SDFS"); + SchedulerFactory schedFact = new StdSchedulerFactory(props); + sched = schedFact.getScheduler(); + sched.start(); + JobDataMap dataMap = new JobDataMap(); + dataMap.put("service", service); + JobDetail ccjobDetail = new JobDetail("replication", null, + ReplicationJob.class); + ccjobDetail.setJobDataMap(dataMap); + CronTrigger cctrigger = new CronTrigger("replicationTrigger", + "group1", schedule); + sched.scheduleJob(ccjobDetail, cctrigger); + SDFSLogger.getLog().info("Replication Job Scheduled"); + } catch (Exception e) { + SDFSLogger.getLog().fatal("Unable to schedule Replication Job", e); + } + } + + public void stopSchedules() { + try { + sched.unscheduleJob("replication", "replicationTrigger"); + } catch (Exception e) { + + } + } + +} diff --git a/src/org/opendedup/sdfs/replication/ReplicationService.java b/src/org/opendedup/sdfs/replication/ReplicationService.java index 4f90a7c6d..d3df53436 100644 --- a/src/org/opendedup/sdfs/replication/ReplicationService.java +++ b/src/org/opendedup/sdfs/replication/ReplicationService.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.replication; import java.io.File; diff --git a/src/org/opendedup/sdfs/replication/ReplicationSnapShotManager.java b/src/org/opendedup/sdfs/replication/ReplicationSnapShotManager.java index af0a16fba..bceac2826 100644 --- a/src/org/opendedup/sdfs/replication/ReplicationSnapShotManager.java +++ b/src/org/opendedup/sdfs/replication/ReplicationSnapShotManager.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.replication; import java.util.concurrent.ConcurrentHashMap; diff --git a/src/org/opendedup/sdfs/replication/ShutdownHook.java b/src/org/opendedup/sdfs/replication/ShutdownHook.java index d7b0cf825..cae417ca7 100644 --- a/src/org/opendedup/sdfs/replication/ShutdownHook.java +++ b/src/org/opendedup/sdfs/replication/ShutdownHook.java @@ -1,23 +1,41 @@ -package org.opendedup.sdfs.replication; - -import org.opendedup.logging.SDFSLogger; - -class ShutdownHook extends Thread { - ReplicationScheduler sched; - String name; - - public ShutdownHook(ReplicationScheduler sched, String name) { - this.sched = sched; - this.name = name; - } - - @Override - public void run() { - SDFSLogger.getLog().info( - "Please Wait while shutting down SDFS Relication Service for " - + name); - sched.stopSchedules(); - SDFSLogger.getLog().info( - "SDFS Relication Service Shut Down Cleanly for " + name); - } +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.replication; + +import org.opendedup.logging.SDFSLogger; + +class ShutdownHook extends Thread { + ReplicationScheduler sched; + String name; + + public ShutdownHook(ReplicationScheduler sched, String name) { + this.sched = sched; + this.name = name; + } + + @Override + public void run() { + SDFSLogger.getLog().info( + "Please Wait while shutting down SDFS Relication Service for " + + name); + sched.stopSchedules(); + SDFSLogger.getLog().info( + "SDFS Relication Service Shut Down Cleanly for " + name); + } } \ No newline at end of file diff --git a/src/org/opendedup/sdfs/servers/GCStandaloneService.java b/src/org/opendedup/sdfs/servers/GCStandaloneService.java index 6b98f76d0..fea8cface 100644 --- a/src/org/opendedup/sdfs/servers/GCStandaloneService.java +++ b/src/org/opendedup/sdfs/servers/GCStandaloneService.java @@ -1,5 +1,23 @@ -package org.opendedup.sdfs.servers; - -public class GCStandaloneService { - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.servers; + +public class GCStandaloneService { + +} diff --git a/src/org/opendedup/sdfs/servers/HCServer.java b/src/org/opendedup/sdfs/servers/HCServer.java index b5b913ec5..f9619f68f 100755 --- a/src/org/opendedup/sdfs/servers/HCServer.java +++ b/src/org/opendedup/sdfs/servers/HCServer.java @@ -1,39 +1,57 @@ -package org.opendedup.sdfs.servers; - -public class HCServer { - String hostName; - int port; - boolean useUDP; - boolean compress; - boolean useSSL; - - public HCServer(String hostName, int port, boolean useUDP, - boolean compress, boolean useSSL) { - this.hostName = hostName; - this.port = port; - this.useUDP = useUDP; - this.compress = compress; - this.useSSL = useSSL; - } - - public boolean isCompress() { - return compress; - } - - public boolean isUseUDP() { - return useUDP; - } - - public String getHostName() { - return hostName; - } - - public int getPort() { - return port; - } - - public boolean isSSL() { - return this.useSSL; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.servers; + +public class HCServer { + String hostName; + int port; + boolean useUDP; + boolean compress; + boolean useSSL; + + public HCServer(String hostName, int port, boolean useUDP, + boolean compress, boolean useSSL) { + this.hostName = hostName; + this.port = port; + this.useUDP = useUDP; + this.compress = compress; + this.useSSL = useSSL; + } + + public boolean isCompress() { + return compress; + } + + public boolean isUseUDP() { + return useUDP; + } + + public String getHostName() { + return hostName; + } + + public int getPort() { + return port; + } + + public boolean isSSL() { + return this.useSSL; + } + +} diff --git a/src/org/opendedup/sdfs/servers/HCServiceProxy.java b/src/org/opendedup/sdfs/servers/HCServiceProxy.java index 1b9a2d43f..b2304d234 100755 --- a/src/org/opendedup/sdfs/servers/HCServiceProxy.java +++ b/src/org/opendedup/sdfs/servers/HCServiceProxy.java @@ -1,673 +1,691 @@ -package org.opendedup.sdfs.servers; - -import java.io.File; - -import java.io.FileOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.ExecutionException; - -import org.opendedup.collections.AbstractHashesMap; -import org.opendedup.collections.DataArchivedException; -import org.opendedup.collections.HashtableFullException; -import org.opendedup.collections.InsertRecord; -import org.opendedup.hashing.LargeFileBloomFilter; -import org.opendedup.hashing.Murmur3HashEngine; -import org.opendedup.logging.SDFSLogger; -import org.opendedup.mtools.BloomFDisk; -import org.opendedup.mtools.FDiskException; -import org.opendedup.sdfs.Main; -import org.opendedup.sdfs.cluster.ClusterSocket; -import org.opendedup.sdfs.cluster.DSEClientSocket; -import org.opendedup.sdfs.cluster.cmds.BFClaimHashesCmd; -import org.opendedup.sdfs.cluster.cmds.BatchHashExistsCmd; -import org.opendedup.sdfs.cluster.cmds.BatchWriteHashCmd; -import org.opendedup.sdfs.cluster.cmds.ClaimHashesCmd; -import org.opendedup.sdfs.cluster.cmds.DirectFetchChunkCmd; -import org.opendedup.sdfs.cluster.cmds.DirectWriteHashCmd; -import org.opendedup.sdfs.cluster.cmds.FDiskCmd; -import org.opendedup.sdfs.cluster.cmds.FetchChunkCmd; -import org.opendedup.sdfs.cluster.cmds.HashExistsCmd; -import org.opendedup.sdfs.cluster.cmds.RedundancyNotMetException; -import org.opendedup.sdfs.cluster.cmds.WriteHashCmd; -import org.opendedup.sdfs.filestore.AbstractChunkStore; -import org.opendedup.sdfs.filestore.HashChunk; -import org.opendedup.sdfs.io.HashLocPair; -import org.opendedup.sdfs.io.events.CloudSyncDLRequest; -import org.opendedup.sdfs.notification.FDiskEvent; -import org.opendedup.sdfs.notification.SDFSEvent; - -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.eventbus.EventBus; -import com.google.common.primitives.Longs; - -public class HCServiceProxy { - - private static HashChunkServiceInterface hcService = null; - private static DSEClientSocket socket = null; - private static EventBus eventBus = new EventBus(); - public static ClusterSocket cs = null; - private static int cacheSize = 104857600 / Main.CHUNK_LENGTH; - private static final LoadingCache chunks = CacheBuilder - .newBuilder().maximumSize(cacheSize).concurrencyLevel(72) - .build(new CacheLoader() { - public byte[] load(ByteArrayWrapper key) throws IOException { - if (Main.DSEClusterDirectIO) { - DirectFetchChunkCmd cmd = new DirectFetchChunkCmd( - key.data, key.hashloc); - cmd.executeCmd(socket); - return cmd.getChunk(); - } else { - FetchChunkCmd cmd = new FetchChunkCmd(key.data, - key.hashloc); - cmd.executeCmd(socket); - return cmd.getChunk(); - } - - } - }); - - // private static boolean initialized = false; - - public static void registerListener(Object obj) { - eventBus.register(obj); - } - - public static synchronized void processHashClaims(SDFSEvent evt) - throws IOException { - if (Main.chunkStoreLocal) - hcService.processHashClaims(evt); - else { - new ClaimHashesCmd(evt).executeCmd(cs); - - } - } - - public static synchronized long processHashClaims(SDFSEvent evt, - LargeFileBloomFilter bf) throws IOException { - if (Main.chunkStoreLocal) - return hcService.processHashClaims(evt, bf); - else { - new BFClaimHashesCmd(evt).executeCmd(cs); - } - return 0; - } - - public static synchronized boolean hashExists(byte[] hash) - throws IOException, HashtableFullException { - long pos = hcService.hashExists(hash); - if(pos != -1) - return true; - else - return false; - } - - public static HashChunk fetchHashChunk(byte[] hash) throws IOException, - DataArchivedException { - return hcService.fetchChunk(hash,-1); - } - - public static synchronized long getCacheSize() { - if (Main.chunkStoreLocal) { - return hcService.getCacheSize(); - } else - return 0; - } - - public static synchronized long getMaxCacheSize() { - if (Main.chunkStoreLocal) { - return hcService.getMaxCacheSize(); - } else - return 0; - } - - public static synchronized int getReadSpeed() { - if (Main.chunkStoreLocal) { - return hcService.getReadSpeed(); - } else - return 0; - } - - public static synchronized int getWriteSpeed() { - if (Main.chunkStoreLocal) { - return hcService.getWriteSpeed(); - } else - return 0; - } - - public static synchronized void setReadSpeed(int speed) { - if (Main.chunkStoreLocal) { - hcService.setReadSpeed(speed); - } - } - - public static synchronized void setWriteSpeed(int speed) { - if (Main.chunkStoreLocal) { - hcService.setWriteSpeed(speed); - } - } - - public static synchronized void setCacheSize(long sz) throws IOException { - if (Main.chunkStoreLocal) { - hcService.setCacheSize(sz); - } - } - - public static long getChunksFetched() { - return -1; - } - - public static synchronized void init(ArrayList volumes) { - try { - if (Main.chunkStoreLocal) { - SDFSLogger.getLog().info("Starting local chunkstore"); - hcService = new HashChunkService(); - hcService.init(); - File file = new File(Main.hashDBStore + File.separator - + ".lock"); - if (Main.runConsistancyCheck || file.exists()) { - hcService.runConsistancyCheck(); - } - touchRunFile(); - if (Main.syncDL) { - eventBus.post(new CloudSyncDLRequest(Main.DSEID,true)); - } - } - - else { - SDFSLogger.getLog().info( - "Starting clustered Volume with id=" - + Main.DSEClusterID + " config=" - + Main.DSEClusterConfig); - socket = new DSEClientSocket(Main.DSEClusterConfig, - Main.DSEClusterID, volumes); - cs = socket; - socket.startGCIfNone(); - } - } catch (Exception e) { - SDFSLogger.getLog().error("Unable to initialize HashChunkService ", - e); - System.err.println("Unable to initialize HashChunkService "); - e.printStackTrace(); - System.exit(-1); - } - } - - public static void syncVolume(long volumeID,boolean syncMap) { - if(Main.chunkStoreLocal) { - eventBus.post(new CloudSyncDLRequest(volumeID,syncMap)); - } - } - - public static byte getDseCount() { - if (Main.chunkStoreLocal) - return 1; - else { - - return (byte) socket.serverState.size(); - } - } - - public static AbstractHashesMap getHashesMap() { - if (Main.chunkStoreLocal) - return hcService.getHashesMap(); - else - return null; - } - - public static long getSize() { - if (Main.chunkStoreLocal) { - return hcService.getSize(); - } else { - return socket.getCurrentSize(); - } - } - - public static long getDSESize() { - if (Main.chunkStoreLocal) { - return HCServiceProxy.getChunkStore().size(); - } else { - return socket.getCurrentDSESize(); - } - } - - public static long getDSECompressedSize() { - if (Main.chunkStoreLocal) { - return HCServiceProxy.getChunkStore().compressedSize(); - } else { - return socket.getCurrentDSECompSize(); - } - } - - public static long getDSEMaxSize() { - if (Main.chunkStoreLocal) { - return HCServiceProxy.getChunkStore().maxSize(); - } else { - return socket.getDSEMaxSize(); - } - } - - public static long getMaxSize() { - if (Main.chunkStoreLocal) { - return HCServiceProxy.hcService.getMaxSize(); - } else { - return socket.getMaxSize(); - } - } - - public static long getFreeBlocks() { - if (Main.chunkStoreLocal) { - return HCServiceProxy.getChunkStore().getFreeBlocks(); - } else { - return socket.getFreeBlocks(); - } - } - - public static AbstractChunkStore getChunkStore() { - if (Main.chunkStoreLocal) - return hcService.getChuckStore(); - else - return null; - } - - public static int getPageSize() { - if (Main.chunkStoreLocal) { - return HCServiceProxy.hcService.getPageSize(); - } else { - return Main.CHUNK_LENGTH; - } - } - - public static void sync() throws IOException { - if (Main.chunkStoreLocal) - hcService.sync(); - } - - private static InsertRecord _write(byte[] hash, byte[] aContents, - byte[] hashloc) throws IOException, RedundancyNotMetException { - if (Main.DSEClusterDirectIO) - return new InsertRecord(true, directWriteChunk(hash, aContents, - hashloc)); - else { - int ncopies = 0; - for (int i = 1; i < 8; i++) { - if (hashloc[i] > (byte) 0) { - ncopies++; - } - } - if (ncopies >= Main.volume.getClusterCopies()) { - return new InsertRecord(true, hashloc); - } else if (ncopies > 0) { - byte[] ignoredHosts = new byte[ncopies]; - for (int i = 0; i < ncopies; i++) - ignoredHosts[i] = hashloc[i + 1]; - WriteHashCmd cmd = new WriteHashCmd(hash, aContents, false, - Main.volume.getClusterCopies(), ignoredHosts); - - cmd.executeCmd(socket); - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug( - "wrote data when found some but not all"); - return new InsertRecord(true, cmd.reponse()); - } else { - WriteHashCmd cmd = new WriteHashCmd(hash, aContents, false, - Main.volume.getClusterCopies()); - cmd.executeCmd(socket); - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug("wrote data when found none"); - - return new InsertRecord(true, cmd.reponse()); - } - } - } - - public static InsertRecord writeChunk(byte[] hash, byte[] aContents, - byte[] hashloc) throws IOException { - - int tries = 0; - while (true) { - try { - return _write(hash, aContents, hashloc); - } catch (IOException e) { - tries++; - if (tries > 10) { - throw e; - } - } catch (RedundancyNotMetException e) { - tries++; - hashloc = e.hashloc; - if (tries > 10) { - SDFSLogger.getLog().warn( - "Redundancy Requirements have not been met"); - // throw e; - } - } - } - - } - - public static byte[] directWriteChunk(byte[] hash, byte[] aContents, - byte[] hashloc) throws IOException { - int ncopies = 0; - for (int i = 1; i < 8; i++) { - if (hashloc[i] > (byte) 0) { - ncopies++; - } - } - if (ncopies >= Main.volume.getClusterCopies()) { - return hashloc; - } else if (ncopies > 0) { - byte[] ignoredHosts = new byte[ncopies]; - for (int i = 0; i < ncopies; i++) - ignoredHosts[i] = hashloc[i + 1]; - DirectWriteHashCmd cmd = new DirectWriteHashCmd(hash, aContents, - aContents.length, false, Main.volume.getClusterCopies(), - ignoredHosts); - cmd.executeCmd(socket); // - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug( - "wrote data when found some but not all"); - return cmd.reponse(); - - } else { - DirectWriteHashCmd cmd = new DirectWriteHashCmd(hash, aContents, - aContents.length, false, Main.volume.getClusterCopies()); - cmd.executeCmd(socket); - SDFSLogger.getLog().debug("wrote data when found none"); - if (cmd.getExDn() > 0) { - SDFSLogger - .getLog() - .warn("Was unable to write to all storage nodes, trying again"); - cmd = new DirectWriteHashCmd(hash, aContents, aContents.length, - false, Main.volume.getClusterCopies(), cmd.reponse()); - } - - return cmd.reponse(); - } - - } - - public static InsertRecord writeChunk(byte[] hash, byte[] aContents) - throws IOException, HashtableFullException { - if (Main.chunkStoreLocal) { - // doop = HCServiceProxy.hcService.hashExists(hash); - return HCServiceProxy.hcService.writeChunk(hash, aContents, false); - } else { - try { - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug("looking for hash"); - HashExistsCmd hcmd = new HashExistsCmd(hash, false, - Main.volume.getClusterCopies()); - hcmd.executeCmd(socket); - if (hcmd.meetsRedundancyRequirements()) { - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug("found all"); - return new InsertRecord(false, hcmd.getResponse()); - } else if (hcmd.exists()) { - byte[] ignoredHosts = new byte[hcmd.responses()]; - for (int i = 0; i < hcmd.responses(); i++) - ignoredHosts[i] = hcmd.getResponse()[i + 1]; - WriteHashCmd cmd = new WriteHashCmd(hash, aContents, false, - Main.volume.getClusterCopies(), ignoredHosts); - int tries = 0; - while (true) { - try { - cmd.executeCmd(socket); - break; - } catch (IOException e) { - tries++; - if (tries > 10) - throw e; - } - } - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug( - "wrote data when found some but not all"); - return new InsertRecord(true, cmd.reponse()); - } else { - WriteHashCmd cmd = new WriteHashCmd(hash, aContents, false, - Main.volume.getClusterCopies()); - int tries = 0; - while (true) { - try { - cmd.executeCmd(socket); - break; - } catch (IOException e) { - tries++; - if (tries > 10) - throw e; - } - } - if (SDFSLogger.isDebug()) - SDFSLogger.getLog().debug("wrote data when found none"); - - // if(cmd.getExDn() > 0) { - // SDFSLogger.getLog().warn("Was unable to write to all storage nodes."); - /* - * cmd = new DirectWriteHashCmd(hash, aContents, - * aContents.length, false, Main.volume.getClusterCopies(), - * cmd.reponse()); - */ - // } - return new InsertRecord(false, cmd.reponse()); - } - } catch (Exception e1) { - SDFSLogger.getLog().fatal("Unable to write chunk " + hash, e1); - throw new IOException("Unable to write chunk " + hash); - } finally { - - } - } - - } - - /* - * public static InsertRecord writeChunk(byte[] hash, byte[] aContents, - * byte[] ignoredHosts) throws IOException, HashtableFullException { if - * (Main.chunkStoreLocal) { // doop = - * HCServiceProxy.hcService.hashExists(hash); return - * HCServiceProxy.hcService.writeChunk(hash, aContents, false); } else { - * - * try { if (ignoredHosts != null) { WriteHashCmd cmd = new - * WriteHashCmd(hash, aContents, false, Main.volume.getClusterCopies(), - * ignoredHosts); cmd.executeCmd(socket); return new - * InsertRecord(true,cmd.reponse()); } else { WriteHashCmd cmd = new - * WriteHashCmd(hash, aContents, false, Main.volume.getClusterCopies()); - * cmd.executeCmd(socket); return new InsertRecord(true,cmd.reponse()); } } - * catch (Exception e1) { // - * SDFSLogger.getLog().fatal("Unable to write chunk " + hash, // e1); throw - * new IOException("Unable to write chunk " + hash); } finally { - * - * } } } - */ - - public static void runFDisk(FDiskEvent evt) throws FDiskException, - IOException { - if (Main.chunkStoreLocal) { - BloomFDisk fd = new BloomFDisk(evt); - fd.vanish(); - } - else { - long sz = HCServiceProxy.getSize(); - FDiskCmd cmd = new FDiskCmd(sz, evt); - cmd.executeCmd(cs); - } - } - - /* - * public static void fetchChunks(ArrayList hashes, String server, - * String password, int port, boolean useSSL) throws IOException, - * HashtableFullException { if (Main.chunkStoreLocal) { - * HCServiceProxy.hcService.remoteFetchChunks(hashes, server, password, - * port, useSSL); } else { throw new IllegalStateException( - * "not implemented for remote chunkstores"); } } - */ - - public static long hashExists(byte[] hash, boolean findAll) - throws IOException, HashtableFullException { - if (Main.chunkStoreLocal) { - return HCServiceProxy.hcService.hashExists(hash); - - } else { - HashExistsCmd cmd = new HashExistsCmd(hash, findAll, - Main.volume.getClusterCopies()); - cmd.executeCmd(socket); - return Longs.fromByteArray(cmd.getResponse()); - } - } - - public static List batchHashExists(List hashes) - throws IOException { - if (Main.chunkStoreLocal) { - throw new IOException("not implemented for localstore"); - - } else { - BatchHashExistsCmd cmd = new BatchHashExistsCmd(hashes); - cmd.executeCmd(socket); - return cmd.getHashes(); - } - } - - public static List batchWriteHash(List hashes) - throws IOException { - if (Main.chunkStoreLocal) { - throw new IOException("not implemented for localstore"); - - } else { - BatchWriteHashCmd cmd = new BatchWriteHashCmd(hashes); - cmd.executeCmd(socket); - return cmd.getHashes(); - } - } - - public static long hashExists(byte[] hash, boolean findAll, - byte numtowaitfor) throws IOException, HashtableFullException { - if (Main.chunkStoreLocal) { - return HCServiceProxy.hcService.hashExists(hash); - } else { - HashExistsCmd cmd = new HashExistsCmd(hash, findAll, numtowaitfor); - cmd.executeCmd(socket); - return Longs.fromByteArray(cmd.getResponse()); - } - } - - static Murmur3HashEngine he = new Murmur3HashEngine(); - - public static byte[] fetchChunk(byte[] hash, byte[] hashloc, boolean direct) - throws IOException, DataArchivedException { - - if (Main.chunkStoreLocal) { - byte[] data = null; - long pos = -1; - if (direct) { - pos = Longs.fromByteArray(hashloc); - } - - data = HCServiceProxy.hcService.fetchChunk(hash,pos).getData(); - - - return data; - } else { - ByteArrayWrapper wrapper = new ByteArrayWrapper(hash, hashloc); - try { - byte[] _bz = chunks.get(wrapper); - byte[] bz = org.bouncycastle.util.Arrays.clone(_bz); - return bz; - } catch (ExecutionException e) { - throw new IOException(e); - } - } - } - - public static void cacheData(byte[] hash, byte[] hashloc,boolean direct) - throws IOException, DataArchivedException { - - if (Main.chunkStoreLocal) { - long pos = -1; - if (direct) { - pos = Longs.fromByteArray(hashloc); - } - HCServiceProxy.hcService.cacheChunk(hash,pos); - } - } - - public static long getChunksRead() { - return hcService.getChunksRead(); - } - - public static double getChunksWritten() { - return hcService.getChunksWritten(); - } - - public static double getKBytesRead() { - return hcService.getKBytesRead(); - } - - public static double getKBytesWrite() { - return hcService.getKBytesWrite(); - } - - public static long getDupsFound() { - return hcService.getDupsFound(); - } - - public static void close() { - hcService.close(); - SDFSLogger.getLog().info("Deleting lock file"); - File file = new File(Main.hashDBStore + File.separator + ".lock"); - file.delete(); - } - - private static void touchRunFile() { - File file = new File(Main.hashDBStore + File.separator + ".lock"); - try { - - if (!file.exists()) - new FileOutputStream(file).close(); - file.setLastModified(System.currentTimeMillis()); - SDFSLogger.getLog().warn("Write lock file " + file.getPath()); - } catch (IOException e) { - SDFSLogger.getLog().warn( - "unable to create lock file " + file.getPath(), e); - } - } - - private static final class ByteArrayWrapper { - private final byte[] data; - public final byte[] hashloc; - - public ByteArrayWrapper(byte[] data, byte[] hashloc) { - if (data == null) { - throw new NullPointerException(); - } - this.data = data; - this.hashloc = hashloc; - } - - @Override - public boolean equals(Object other) { - if (!(other instanceof ByteArrayWrapper)) { - return false; - } - return Arrays.equals(data, ((ByteArrayWrapper) other).data); - } - - @Override - public int hashCode() { - return Arrays.hashCode(data); - } - } - - public static String restoreBlock(byte[] hash) throws IOException { - return hcService.restoreBlock(hash); - } - - public static boolean blockRestored(String id) throws IOException { - return hcService.blockRestored(id); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.servers; + +import java.io.File; + +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import org.opendedup.collections.AbstractHashesMap; +import org.opendedup.collections.DataArchivedException; +import org.opendedup.collections.HashtableFullException; +import org.opendedup.collections.InsertRecord; +import org.opendedup.hashing.LargeFileBloomFilter; +import org.opendedup.hashing.Murmur3HashEngine; +import org.opendedup.logging.SDFSLogger; +import org.opendedup.mtools.BloomFDisk; +import org.opendedup.mtools.FDiskException; +import org.opendedup.sdfs.Main; +import org.opendedup.sdfs.cluster.ClusterSocket; +import org.opendedup.sdfs.cluster.DSEClientSocket; +import org.opendedup.sdfs.cluster.cmds.BFClaimHashesCmd; +import org.opendedup.sdfs.cluster.cmds.BatchHashExistsCmd; +import org.opendedup.sdfs.cluster.cmds.BatchWriteHashCmd; +import org.opendedup.sdfs.cluster.cmds.ClaimHashesCmd; +import org.opendedup.sdfs.cluster.cmds.DirectFetchChunkCmd; +import org.opendedup.sdfs.cluster.cmds.DirectWriteHashCmd; +import org.opendedup.sdfs.cluster.cmds.FDiskCmd; +import org.opendedup.sdfs.cluster.cmds.FetchChunkCmd; +import org.opendedup.sdfs.cluster.cmds.HashExistsCmd; +import org.opendedup.sdfs.cluster.cmds.RedundancyNotMetException; +import org.opendedup.sdfs.cluster.cmds.WriteHashCmd; +import org.opendedup.sdfs.filestore.AbstractChunkStore; +import org.opendedup.sdfs.filestore.HashChunk; +import org.opendedup.sdfs.io.HashLocPair; +import org.opendedup.sdfs.io.events.CloudSyncDLRequest; +import org.opendedup.sdfs.notification.FDiskEvent; +import org.opendedup.sdfs.notification.SDFSEvent; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.eventbus.EventBus; +import com.google.common.primitives.Longs; + +public class HCServiceProxy { + + private static HashChunkServiceInterface hcService = null; + private static DSEClientSocket socket = null; + private static EventBus eventBus = new EventBus(); + public static ClusterSocket cs = null; + private static int cacheSize = 104857600 / Main.CHUNK_LENGTH; + private static final LoadingCache chunks = CacheBuilder + .newBuilder().maximumSize(cacheSize).concurrencyLevel(72) + .build(new CacheLoader() { + public byte[] load(ByteArrayWrapper key) throws IOException { + if (Main.DSEClusterDirectIO) { + DirectFetchChunkCmd cmd = new DirectFetchChunkCmd( + key.data, key.hashloc); + cmd.executeCmd(socket); + return cmd.getChunk(); + } else { + FetchChunkCmd cmd = new FetchChunkCmd(key.data, + key.hashloc); + cmd.executeCmd(socket); + return cmd.getChunk(); + } + + } + }); + + // private static boolean initialized = false; + + public static void registerListener(Object obj) { + eventBus.register(obj); + } + + public static synchronized void processHashClaims(SDFSEvent evt) + throws IOException { + if (Main.chunkStoreLocal) + hcService.processHashClaims(evt); + else { + new ClaimHashesCmd(evt).executeCmd(cs); + + } + } + + public static synchronized long processHashClaims(SDFSEvent evt, + LargeFileBloomFilter bf) throws IOException { + if (Main.chunkStoreLocal) + return hcService.processHashClaims(evt, bf); + else { + new BFClaimHashesCmd(evt).executeCmd(cs); + } + return 0; + } + + public static synchronized boolean hashExists(byte[] hash) + throws IOException, HashtableFullException { + long pos = hcService.hashExists(hash); + if(pos != -1) + return true; + else + return false; + } + + public static HashChunk fetchHashChunk(byte[] hash) throws IOException, + DataArchivedException { + return hcService.fetchChunk(hash,-1); + } + + public static synchronized long getCacheSize() { + if (Main.chunkStoreLocal) { + return hcService.getCacheSize(); + } else + return 0; + } + + public static synchronized long getMaxCacheSize() { + if (Main.chunkStoreLocal) { + return hcService.getMaxCacheSize(); + } else + return 0; + } + + public static synchronized int getReadSpeed() { + if (Main.chunkStoreLocal) { + return hcService.getReadSpeed(); + } else + return 0; + } + + public static synchronized int getWriteSpeed() { + if (Main.chunkStoreLocal) { + return hcService.getWriteSpeed(); + } else + return 0; + } + + public static synchronized void setReadSpeed(int speed) { + if (Main.chunkStoreLocal) { + hcService.setReadSpeed(speed); + } + } + + public static synchronized void setWriteSpeed(int speed) { + if (Main.chunkStoreLocal) { + hcService.setWriteSpeed(speed); + } + } + + public static synchronized void setCacheSize(long sz) throws IOException { + if (Main.chunkStoreLocal) { + hcService.setCacheSize(sz); + } + } + + public static long getChunksFetched() { + return -1; + } + + public static synchronized void init(ArrayList volumes) { + try { + if (Main.chunkStoreLocal) { + SDFSLogger.getLog().info("Starting local chunkstore"); + hcService = new HashChunkService(); + hcService.init(); + File file = new File(Main.hashDBStore + File.separator + + ".lock"); + if (Main.runConsistancyCheck || file.exists()) { + hcService.runConsistancyCheck(); + } + touchRunFile(); + if (Main.syncDL) { + eventBus.post(new CloudSyncDLRequest(Main.DSEID,true)); + } + } + + else { + SDFSLogger.getLog().info( + "Starting clustered Volume with id=" + + Main.DSEClusterID + " config=" + + Main.DSEClusterConfig); + socket = new DSEClientSocket(Main.DSEClusterConfig, + Main.DSEClusterID, volumes); + cs = socket; + socket.startGCIfNone(); + } + } catch (Exception e) { + SDFSLogger.getLog().error("Unable to initialize HashChunkService ", + e); + System.err.println("Unable to initialize HashChunkService "); + e.printStackTrace(); + System.exit(-1); + } + } + + public static void syncVolume(long volumeID,boolean syncMap) { + if(Main.chunkStoreLocal) { + eventBus.post(new CloudSyncDLRequest(volumeID,syncMap)); + } + } + + public static byte getDseCount() { + if (Main.chunkStoreLocal) + return 1; + else { + + return (byte) socket.serverState.size(); + } + } + + public static AbstractHashesMap getHashesMap() { + if (Main.chunkStoreLocal) + return hcService.getHashesMap(); + else + return null; + } + + public static long getSize() { + if (Main.chunkStoreLocal) { + return hcService.getSize(); + } else { + return socket.getCurrentSize(); + } + } + + public static long getDSESize() { + if (Main.chunkStoreLocal) { + return HCServiceProxy.getChunkStore().size(); + } else { + return socket.getCurrentDSESize(); + } + } + + public static long getDSECompressedSize() { + if (Main.chunkStoreLocal) { + return HCServiceProxy.getChunkStore().compressedSize(); + } else { + return socket.getCurrentDSECompSize(); + } + } + + public static long getDSEMaxSize() { + if (Main.chunkStoreLocal) { + return HCServiceProxy.getChunkStore().maxSize(); + } else { + return socket.getDSEMaxSize(); + } + } + + public static long getMaxSize() { + if (Main.chunkStoreLocal) { + return HCServiceProxy.hcService.getMaxSize(); + } else { + return socket.getMaxSize(); + } + } + + public static long getFreeBlocks() { + if (Main.chunkStoreLocal) { + return HCServiceProxy.getChunkStore().getFreeBlocks(); + } else { + return socket.getFreeBlocks(); + } + } + + public static AbstractChunkStore getChunkStore() { + if (Main.chunkStoreLocal) + return hcService.getChuckStore(); + else + return null; + } + + public static int getPageSize() { + if (Main.chunkStoreLocal) { + return HCServiceProxy.hcService.getPageSize(); + } else { + return Main.CHUNK_LENGTH; + } + } + + public static void sync() throws IOException { + if (Main.chunkStoreLocal) + hcService.sync(); + } + + private static InsertRecord _write(byte[] hash, byte[] aContents, + byte[] hashloc) throws IOException, RedundancyNotMetException { + if (Main.DSEClusterDirectIO) + return new InsertRecord(true, directWriteChunk(hash, aContents, + hashloc)); + else { + int ncopies = 0; + for (int i = 1; i < 8; i++) { + if (hashloc[i] > (byte) 0) { + ncopies++; + } + } + if (ncopies >= Main.volume.getClusterCopies()) { + return new InsertRecord(true, hashloc); + } else if (ncopies > 0) { + byte[] ignoredHosts = new byte[ncopies]; + for (int i = 0; i < ncopies; i++) + ignoredHosts[i] = hashloc[i + 1]; + WriteHashCmd cmd = new WriteHashCmd(hash, aContents, false, + Main.volume.getClusterCopies(), ignoredHosts); + + cmd.executeCmd(socket); + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug( + "wrote data when found some but not all"); + return new InsertRecord(true, cmd.reponse()); + } else { + WriteHashCmd cmd = new WriteHashCmd(hash, aContents, false, + Main.volume.getClusterCopies()); + cmd.executeCmd(socket); + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug("wrote data when found none"); + + return new InsertRecord(true, cmd.reponse()); + } + } + } + + public static InsertRecord writeChunk(byte[] hash, byte[] aContents, + byte[] hashloc) throws IOException { + + int tries = 0; + while (true) { + try { + return _write(hash, aContents, hashloc); + } catch (IOException e) { + tries++; + if (tries > 10) { + throw e; + } + } catch (RedundancyNotMetException e) { + tries++; + hashloc = e.hashloc; + if (tries > 10) { + SDFSLogger.getLog().warn( + "Redundancy Requirements have not been met"); + // throw e; + } + } + } + + } + + public static byte[] directWriteChunk(byte[] hash, byte[] aContents, + byte[] hashloc) throws IOException { + int ncopies = 0; + for (int i = 1; i < 8; i++) { + if (hashloc[i] > (byte) 0) { + ncopies++; + } + } + if (ncopies >= Main.volume.getClusterCopies()) { + return hashloc; + } else if (ncopies > 0) { + byte[] ignoredHosts = new byte[ncopies]; + for (int i = 0; i < ncopies; i++) + ignoredHosts[i] = hashloc[i + 1]; + DirectWriteHashCmd cmd = new DirectWriteHashCmd(hash, aContents, + aContents.length, false, Main.volume.getClusterCopies(), + ignoredHosts); + cmd.executeCmd(socket); // + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug( + "wrote data when found some but not all"); + return cmd.reponse(); + + } else { + DirectWriteHashCmd cmd = new DirectWriteHashCmd(hash, aContents, + aContents.length, false, Main.volume.getClusterCopies()); + cmd.executeCmd(socket); + SDFSLogger.getLog().debug("wrote data when found none"); + if (cmd.getExDn() > 0) { + SDFSLogger + .getLog() + .warn("Was unable to write to all storage nodes, trying again"); + cmd = new DirectWriteHashCmd(hash, aContents, aContents.length, + false, Main.volume.getClusterCopies(), cmd.reponse()); + } + + return cmd.reponse(); + } + + } + + public static InsertRecord writeChunk(byte[] hash, byte[] aContents) + throws IOException, HashtableFullException { + if (Main.chunkStoreLocal) { + // doop = HCServiceProxy.hcService.hashExists(hash); + return HCServiceProxy.hcService.writeChunk(hash, aContents, false); + } else { + try { + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug("looking for hash"); + HashExistsCmd hcmd = new HashExistsCmd(hash, false, + Main.volume.getClusterCopies()); + hcmd.executeCmd(socket); + if (hcmd.meetsRedundancyRequirements()) { + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug("found all"); + return new InsertRecord(false, hcmd.getResponse()); + } else if (hcmd.exists()) { + byte[] ignoredHosts = new byte[hcmd.responses()]; + for (int i = 0; i < hcmd.responses(); i++) + ignoredHosts[i] = hcmd.getResponse()[i + 1]; + WriteHashCmd cmd = new WriteHashCmd(hash, aContents, false, + Main.volume.getClusterCopies(), ignoredHosts); + int tries = 0; + while (true) { + try { + cmd.executeCmd(socket); + break; + } catch (IOException e) { + tries++; + if (tries > 10) + throw e; + } + } + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug( + "wrote data when found some but not all"); + return new InsertRecord(true, cmd.reponse()); + } else { + WriteHashCmd cmd = new WriteHashCmd(hash, aContents, false, + Main.volume.getClusterCopies()); + int tries = 0; + while (true) { + try { + cmd.executeCmd(socket); + break; + } catch (IOException e) { + tries++; + if (tries > 10) + throw e; + } + } + if (SDFSLogger.isDebug()) + SDFSLogger.getLog().debug("wrote data when found none"); + + // if(cmd.getExDn() > 0) { + // SDFSLogger.getLog().warn("Was unable to write to all storage nodes."); + /* + * cmd = new DirectWriteHashCmd(hash, aContents, + * aContents.length, false, Main.volume.getClusterCopies(), + * cmd.reponse()); + */ + // } + return new InsertRecord(false, cmd.reponse()); + } + } catch (Exception e1) { + SDFSLogger.getLog().fatal("Unable to write chunk " + hash, e1); + throw new IOException("Unable to write chunk " + hash); + } finally { + + } + } + + } + + /* + * public static InsertRecord writeChunk(byte[] hash, byte[] aContents, + * byte[] ignoredHosts) throws IOException, HashtableFullException { if + * (Main.chunkStoreLocal) { // doop = + * HCServiceProxy.hcService.hashExists(hash); return + * HCServiceProxy.hcService.writeChunk(hash, aContents, false); } else { + * + * try { if (ignoredHosts != null) { WriteHashCmd cmd = new + * WriteHashCmd(hash, aContents, false, Main.volume.getClusterCopies(), + * ignoredHosts); cmd.executeCmd(socket); return new + * InsertRecord(true,cmd.reponse()); } else { WriteHashCmd cmd = new + * WriteHashCmd(hash, aContents, false, Main.volume.getClusterCopies()); + * cmd.executeCmd(socket); return new InsertRecord(true,cmd.reponse()); } } + * catch (Exception e1) { // + * SDFSLogger.getLog().fatal("Unable to write chunk " + hash, // e1); throw + * new IOException("Unable to write chunk " + hash); } finally { + * + * } } } + */ + + public static void runFDisk(FDiskEvent evt) throws FDiskException, + IOException { + if (Main.chunkStoreLocal) { + BloomFDisk fd = new BloomFDisk(evt); + fd.vanish(); + } + else { + long sz = HCServiceProxy.getSize(); + FDiskCmd cmd = new FDiskCmd(sz, evt); + cmd.executeCmd(cs); + } + } + + /* + * public static void fetchChunks(ArrayList hashes, String server, + * String password, int port, boolean useSSL) throws IOException, + * HashtableFullException { if (Main.chunkStoreLocal) { + * HCServiceProxy.hcService.remoteFetchChunks(hashes, server, password, + * port, useSSL); } else { throw new IllegalStateException( + * "not implemented for remote chunkstores"); } } + */ + + public static long hashExists(byte[] hash, boolean findAll) + throws IOException, HashtableFullException { + if (Main.chunkStoreLocal) { + return HCServiceProxy.hcService.hashExists(hash); + + } else { + HashExistsCmd cmd = new HashExistsCmd(hash, findAll, + Main.volume.getClusterCopies()); + cmd.executeCmd(socket); + return Longs.fromByteArray(cmd.getResponse()); + } + } + + public static List batchHashExists(List hashes) + throws IOException { + if (Main.chunkStoreLocal) { + throw new IOException("not implemented for localstore"); + + } else { + BatchHashExistsCmd cmd = new BatchHashExistsCmd(hashes); + cmd.executeCmd(socket); + return cmd.getHashes(); + } + } + + public static List batchWriteHash(List hashes) + throws IOException { + if (Main.chunkStoreLocal) { + throw new IOException("not implemented for localstore"); + + } else { + BatchWriteHashCmd cmd = new BatchWriteHashCmd(hashes); + cmd.executeCmd(socket); + return cmd.getHashes(); + } + } + + public static long hashExists(byte[] hash, boolean findAll, + byte numtowaitfor) throws IOException, HashtableFullException { + if (Main.chunkStoreLocal) { + return HCServiceProxy.hcService.hashExists(hash); + } else { + HashExistsCmd cmd = new HashExistsCmd(hash, findAll, numtowaitfor); + cmd.executeCmd(socket); + return Longs.fromByteArray(cmd.getResponse()); + } + } + + static Murmur3HashEngine he = new Murmur3HashEngine(); + + public static byte[] fetchChunk(byte[] hash, byte[] hashloc, boolean direct) + throws IOException, DataArchivedException { + + if (Main.chunkStoreLocal) { + byte[] data = null; + long pos = -1; + if (direct) { + pos = Longs.fromByteArray(hashloc); + } + + data = HCServiceProxy.hcService.fetchChunk(hash,pos).getData(); + + + return data; + } else { + ByteArrayWrapper wrapper = new ByteArrayWrapper(hash, hashloc); + try { + byte[] _bz = chunks.get(wrapper); + byte[] bz = org.bouncycastle.util.Arrays.clone(_bz); + return bz; + } catch (ExecutionException e) { + throw new IOException(e); + } + } + } + + public static void cacheData(byte[] hash, byte[] hashloc,boolean direct) + throws IOException, DataArchivedException { + + if (Main.chunkStoreLocal) { + long pos = -1; + if (direct) { + pos = Longs.fromByteArray(hashloc); + } + HCServiceProxy.hcService.cacheChunk(hash,pos); + } + } + + public static long getChunksRead() { + return hcService.getChunksRead(); + } + + public static double getChunksWritten() { + return hcService.getChunksWritten(); + } + + public static double getKBytesRead() { + return hcService.getKBytesRead(); + } + + public static double getKBytesWrite() { + return hcService.getKBytesWrite(); + } + + public static long getDupsFound() { + return hcService.getDupsFound(); + } + + public static void close() { + hcService.close(); + SDFSLogger.getLog().info("Deleting lock file"); + File file = new File(Main.hashDBStore + File.separator + ".lock"); + file.delete(); + } + + private static void touchRunFile() { + File file = new File(Main.hashDBStore + File.separator + ".lock"); + try { + + if (!file.exists()) + new FileOutputStream(file).close(); + file.setLastModified(System.currentTimeMillis()); + SDFSLogger.getLog().warn("Write lock file " + file.getPath()); + } catch (IOException e) { + SDFSLogger.getLog().warn( + "unable to create lock file " + file.getPath(), e); + } + } + + private static final class ByteArrayWrapper { + private final byte[] data; + public final byte[] hashloc; + + public ByteArrayWrapper(byte[] data, byte[] hashloc) { + if (data == null) { + throw new NullPointerException(); + } + this.data = data; + this.hashloc = hashloc; + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof ByteArrayWrapper)) { + return false; + } + return Arrays.equals(data, ((ByteArrayWrapper) other).data); + } + + @Override + public int hashCode() { + return Arrays.hashCode(data); + } + } + + public static String restoreBlock(byte[] hash) throws IOException { + return hcService.restoreBlock(hash); + } + + public static boolean blockRestored(String id) throws IOException { + return hcService.blockRestored(id); + } + +} diff --git a/src/org/opendedup/sdfs/servers/HashChunkService.java b/src/org/opendedup/sdfs/servers/HashChunkService.java index 0ef60b823..128e3ad7f 100755 --- a/src/org/opendedup/sdfs/servers/HashChunkService.java +++ b/src/org/opendedup/sdfs/servers/HashChunkService.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.servers; import java.io.IOException; diff --git a/src/org/opendedup/sdfs/servers/HashChunkServiceInterface.java b/src/org/opendedup/sdfs/servers/HashChunkServiceInterface.java index 3de85a117..d1dd1d0ba 100644 --- a/src/org/opendedup/sdfs/servers/HashChunkServiceInterface.java +++ b/src/org/opendedup/sdfs/servers/HashChunkServiceInterface.java @@ -1,96 +1,114 @@ -package org.opendedup.sdfs.servers; - -import java.io.IOException; -import java.util.ArrayList; - -import org.opendedup.collections.AbstractHashesMap; -import org.opendedup.collections.DataArchivedException; -import org.opendedup.collections.HashtableFullException; -import org.opendedup.collections.InsertRecord; -import org.opendedup.hashing.LargeFileBloomFilter; -import org.opendedup.sdfs.filestore.AbstractChunkStore; -import org.opendedup.sdfs.filestore.HashChunk; -import org.opendedup.sdfs.filestore.cloud.RemoteVolumeInfo; -import org.opendedup.sdfs.notification.SDFSEvent; - -public interface HashChunkServiceInterface { - - /** - * @return the chunksFetched - */ - public abstract String restoreBlock(byte[] hash) throws IOException; - - public abstract boolean blockRestored(String id) throws IOException; - - public abstract long getChunksFetched(); - - public abstract AbstractChunkStore getChuckStore(); - - public abstract InsertRecord writeChunk(byte[] hash, byte[] aContents, - boolean compressed) throws IOException, HashtableFullException; - - public abstract void remoteFetchChunks(ArrayList al, String server, - String password, int port, boolean useSSL) throws IOException, - HashtableFullException; - - public abstract long hashExists(byte[] hash) throws IOException, - HashtableFullException; - - public abstract HashChunk fetchChunk(byte[] hash,long pos) throws IOException, - DataArchivedException; - - public abstract void cacheChunk(byte[] hash,long pos) throws IOException, - DataArchivedException; - - public abstract byte getHashRoute(byte[] hash); - - public abstract void processHashClaims(SDFSEvent evt) throws IOException; - - public abstract long processHashClaims(SDFSEvent evt, LargeFileBloomFilter bf) - throws IOException; - - public abstract void commitChunks(); - - public abstract AbstractHashesMap getHashesMap(); - - public abstract void runConsistancyCheck(); - - public abstract long getSize(); - - public abstract long getMaxSize(); - - public abstract int getPageSize(); - - public abstract long getChunksRead(); - - public abstract long getChunksWritten(); - - public abstract double getKBytesRead(); - - public abstract double getKBytesWrite(); - - public abstract long getDupsFound(); - - public abstract void close(); - - public abstract void sync() throws IOException; - - public abstract void init() throws IOException; - - public abstract void setReadSpeed(int speed); - - public abstract void setWriteSpeed(int speed); - - public abstract long getCacheSize(); - - public abstract long getMaxCacheSize(); - - public abstract int getReadSpeed(); - - public abstract int getWriteSpeed(); - - public abstract void setCacheSize(long sz) throws IOException; - - public abstract RemoteVolumeInfo[] getConnectedVolumes() throws IOException; - +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.servers; + +import java.io.IOException; +import java.util.ArrayList; + +import org.opendedup.collections.AbstractHashesMap; +import org.opendedup.collections.DataArchivedException; +import org.opendedup.collections.HashtableFullException; +import org.opendedup.collections.InsertRecord; +import org.opendedup.hashing.LargeFileBloomFilter; +import org.opendedup.sdfs.filestore.AbstractChunkStore; +import org.opendedup.sdfs.filestore.HashChunk; +import org.opendedup.sdfs.filestore.cloud.RemoteVolumeInfo; +import org.opendedup.sdfs.notification.SDFSEvent; + +public interface HashChunkServiceInterface { + + /** + * @return the chunksFetched + */ + public abstract String restoreBlock(byte[] hash) throws IOException; + + public abstract boolean blockRestored(String id) throws IOException; + + public abstract long getChunksFetched(); + + public abstract AbstractChunkStore getChuckStore(); + + public abstract InsertRecord writeChunk(byte[] hash, byte[] aContents, + boolean compressed) throws IOException, HashtableFullException; + + public abstract void remoteFetchChunks(ArrayList al, String server, + String password, int port, boolean useSSL) throws IOException, + HashtableFullException; + + public abstract long hashExists(byte[] hash) throws IOException, + HashtableFullException; + + public abstract HashChunk fetchChunk(byte[] hash,long pos) throws IOException, + DataArchivedException; + + public abstract void cacheChunk(byte[] hash,long pos) throws IOException, + DataArchivedException; + + public abstract byte getHashRoute(byte[] hash); + + public abstract void processHashClaims(SDFSEvent evt) throws IOException; + + public abstract long processHashClaims(SDFSEvent evt, LargeFileBloomFilter bf) + throws IOException; + + public abstract void commitChunks(); + + public abstract AbstractHashesMap getHashesMap(); + + public abstract void runConsistancyCheck(); + + public abstract long getSize(); + + public abstract long getMaxSize(); + + public abstract int getPageSize(); + + public abstract long getChunksRead(); + + public abstract long getChunksWritten(); + + public abstract double getKBytesRead(); + + public abstract double getKBytesWrite(); + + public abstract long getDupsFound(); + + public abstract void close(); + + public abstract void sync() throws IOException; + + public abstract void init() throws IOException; + + public abstract void setReadSpeed(int speed); + + public abstract void setWriteSpeed(int speed); + + public abstract long getCacheSize(); + + public abstract long getMaxCacheSize(); + + public abstract int getReadSpeed(); + + public abstract int getWriteSpeed(); + + public abstract void setCacheSize(long sz) throws IOException; + + public abstract RemoteVolumeInfo[] getConnectedVolumes() throws IOException; + } \ No newline at end of file diff --git a/src/org/opendedup/sdfs/servers/HashEngine.java b/src/org/opendedup/sdfs/servers/HashEngine.java index 62184eed2..309e76575 100644 --- a/src/org/opendedup/sdfs/servers/HashEngine.java +++ b/src/org/opendedup/sdfs/servers/HashEngine.java @@ -1,5 +1,23 @@ -package org.opendedup.sdfs.servers; - -public class HashEngine { - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.sdfs.servers; + +public class HashEngine { + +} diff --git a/src/org/opendedup/sdfs/servers/SDFSService.java b/src/org/opendedup/sdfs/servers/SDFSService.java index 0c42d45be..13069b3ee 100644 --- a/src/org/opendedup/sdfs/servers/SDFSService.java +++ b/src/org/opendedup/sdfs/servers/SDFSService.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.servers; import java.util.ArrayList; diff --git a/src/org/opendedup/sdfs/windows/fs/MetaDataFileInfo.java b/src/org/opendedup/sdfs/windows/fs/MetaDataFileInfo.java index 0baef3dbb..da8a4c630 100644 --- a/src/org/opendedup/sdfs/windows/fs/MetaDataFileInfo.java +++ b/src/org/opendedup/sdfs/windows/fs/MetaDataFileInfo.java @@ -1,7 +1,7 @@ /* The MIT License -Copyright (C) 2008 Yu Kobayashi http://yukoba.accelart.jp/ +Copyright (C) 2008, 2016 Yu Kobayashi http://yukoba.accelart.jp/ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -20,7 +20,7 @@ of this software and associated documentation files (the "Software"), to deal LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ + */ package org.opendedup.sdfs.windows.fs; diff --git a/src/org/opendedup/sdfs/windows/fs/MountSDFS.java b/src/org/opendedup/sdfs/windows/fs/MountSDFS.java index d7343b235..0fc3a637c 100644 --- a/src/org/opendedup/sdfs/windows/fs/MountSDFS.java +++ b/src/org/opendedup/sdfs/windows/fs/MountSDFS.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.windows.fs; import java.io.File; diff --git a/src/org/opendedup/sdfs/windows/fs/ShutdownHook.java b/src/org/opendedup/sdfs/windows/fs/ShutdownHook.java index 8e02bea7c..f970fb242 100644 --- a/src/org/opendedup/sdfs/windows/fs/ShutdownHook.java +++ b/src/org/opendedup/sdfs/windows/fs/ShutdownHook.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.windows.fs; import net.decasdev.dokan.Dokan; diff --git a/src/org/opendedup/sdfs/windows/fs/UnmountFS.java b/src/org/opendedup/sdfs/windows/fs/UnmountFS.java index b94cac809..eae375318 100644 --- a/src/org/opendedup/sdfs/windows/fs/UnmountFS.java +++ b/src/org/opendedup/sdfs/windows/fs/UnmountFS.java @@ -1,7 +1,7 @@ /* The MIT License -Copyright (C) 2008 Yu Kobayashi http://yukoba.accelart.jp/ +Copyright (C) 2008, 2011 Yu Kobayashi http://yukoba.accelart.jp/ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -20,7 +20,7 @@ of this software and associated documentation files (the "Software"), to deal LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ + */ package org.opendedup.sdfs.windows.fs; diff --git a/src/org/opendedup/sdfs/windows/fs/Utils.java b/src/org/opendedup/sdfs/windows/fs/Utils.java index acc2f5704..1e5ffb76d 100644 --- a/src/org/opendedup/sdfs/windows/fs/Utils.java +++ b/src/org/opendedup/sdfs/windows/fs/Utils.java @@ -1,7 +1,7 @@ /* The MIT License -Copyright (C) 2008 Yu Kobayashi http://yukoba.accelart.jp/ +Copyright (C) 2008, 2011 Yu Kobayashi http://yukoba.accelart.jp/ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -20,7 +20,7 @@ of this software and associated documentation files (the "Software"), to deal LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ + */ package org.opendedup.sdfs.windows.fs; diff --git a/src/org/opendedup/sdfs/windows/fs/WinSDFS.java b/src/org/opendedup/sdfs/windows/fs/WinSDFS.java index 70d0de4c9..80b4fa352 100644 --- a/src/org/opendedup/sdfs/windows/fs/WinSDFS.java +++ b/src/org/opendedup/sdfs/windows/fs/WinSDFS.java @@ -1,7 +1,7 @@ /* The MIT License -Copyright (C) 2008 Yu Kobayashi http://yukoba.accelart.jp/ +Copyright (C) 2008, 2016 Yu Kobayashi http://yukoba.accelart.jp/ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -20,7 +20,7 @@ of this software and associated documentation files (the "Software"), to deal LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ + */ package org.opendedup.sdfs.windows.fs; diff --git a/src/org/opendedup/sdfs/windows/utils/DriveIcon.java b/src/org/opendedup/sdfs/windows/utils/DriveIcon.java index f8fa266fa..4d2c168ae 100644 --- a/src/org/opendedup/sdfs/windows/utils/DriveIcon.java +++ b/src/org/opendedup/sdfs/windows/utils/DriveIcon.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.windows.utils; import java.io.File; diff --git a/src/org/opendedup/sdfs/windows/utils/WinRegistry.java b/src/org/opendedup/sdfs/windows/utils/WinRegistry.java index 912420ff2..b9c96b100 100644 --- a/src/org/opendedup/sdfs/windows/utils/WinRegistry.java +++ b/src/org/opendedup/sdfs/windows/utils/WinRegistry.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.sdfs.windows.utils; import java.lang.reflect.InvocationTargetException; diff --git a/src/org/opendedup/util/AsyncChunkWriteActionListener.java b/src/org/opendedup/util/AsyncChunkWriteActionListener.java index 49537e49d..4d061c531 100644 --- a/src/org/opendedup/util/AsyncChunkWriteActionListener.java +++ b/src/org/opendedup/util/AsyncChunkWriteActionListener.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; public abstract class AsyncChunkWriteActionListener { diff --git a/src/org/opendedup/util/BitSetTest.java b/src/org/opendedup/util/BitSetTest.java index cccda8fae..2e61798f8 100644 --- a/src/org/opendedup/util/BitSetTest.java +++ b/src/org/opendedup/util/BitSetTest.java @@ -1,39 +1,57 @@ -package org.opendedup.util; - -import java.io.IOException; -import java.util.Random; - -import org.apache.lucene.util.OpenBitSet; - -public class BitSetTest { - public static void main(String[] args) throws IOException, - ClassNotFoundException { - OpenBitSet set = new OpenBitSet(18719476739L); - // long bv = (Long.MAX_VALUE/2)+4; - // set.set((Long.MAX_VALUE/2)+4, true); - // System.out.println("bv=" + bv + " lv=" +set.nextSetBit(0)); - Random r = new Random(); - long smallest = Long.MAX_VALUE; - long tm = System.currentTimeMillis(); - for (int i = 0; i < 10000000; i++) { - long nv = (long) (r.nextDouble() * (18719476739L)); - if (nv < 0) - nv = nv * -1; - if (nv < 18719476736L) { - if (nv < smallest) - smallest = nv; - set.fastSet(nv); - if (!set.get(nv)) - System.out.println("failed at " + nv); - } - } - long dur = System.currentTimeMillis() - tm; - System.out.println("duration=" + dur); - System.out.println("Size=" + set.cardinality()); - long sm = set.nextSetBit(0); - System.out.println("smallest=" + smallest + " sm=" + sm); - - OpenBitSetSerialize.writeOut("/tmp/test.bin", set); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.IOException; +import java.util.Random; + +import org.apache.lucene.util.OpenBitSet; + +public class BitSetTest { + public static void main(String[] args) throws IOException, + ClassNotFoundException { + OpenBitSet set = new OpenBitSet(18719476739L); + // long bv = (Long.MAX_VALUE/2)+4; + // set.set((Long.MAX_VALUE/2)+4, true); + // System.out.println("bv=" + bv + " lv=" +set.nextSetBit(0)); + Random r = new Random(); + long smallest = Long.MAX_VALUE; + long tm = System.currentTimeMillis(); + for (int i = 0; i < 10000000; i++) { + long nv = (long) (r.nextDouble() * (18719476739L)); + if (nv < 0) + nv = nv * -1; + if (nv < 18719476736L) { + if (nv < smallest) + smallest = nv; + set.fastSet(nv); + if (!set.get(nv)) + System.out.println("failed at " + nv); + } + } + long dur = System.currentTimeMillis() - tm; + System.out.println("duration=" + dur); + System.out.println("Size=" + set.cardinality()); + long sm = set.nextSetBit(0); + System.out.println("smallest=" + smallest + " sm=" + sm); + + OpenBitSetSerialize.writeOut("/tmp/test.bin", set); + } + +} diff --git a/src/org/opendedup/util/BoundedExecutor.java b/src/org/opendedup/util/BoundedExecutor.java index 1a1ec208d..cf45cd866 100644 --- a/src/org/opendedup/util/BoundedExecutor.java +++ b/src/org/opendedup/util/BoundedExecutor.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.util.concurrent.ExecutorService; diff --git a/src/org/opendedup/util/ByteUtils.java b/src/org/opendedup/util/ByteUtils.java index ead138867..86d08ca97 100644 --- a/src/org/opendedup/util/ByteUtils.java +++ b/src/org/opendedup/util/ByteUtils.java @@ -1,52 +1,70 @@ -package org.opendedup.util; - -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.Iterator; - -public class ByteUtils { - - public static byte[] serializeHashMap(HashMap map) { - StringBuffer keys = new StringBuffer(); - Iterator iter = map.keySet().iterator(); - while (iter.hasNext()) { - String key = iter.next(); - keys.append(key); - if (iter.hasNext()) - keys.append(","); - } - StringBuffer values = new StringBuffer(); - iter = map.values().iterator(); - while (iter.hasNext()) { - String key = iter.next(); - values.append(key); - if (iter.hasNext()) - values.append(","); - } - byte[] kb = keys.toString().getBytes(); - byte[] vb = values.toString().getBytes(); - byte[] out = new byte[kb.length + vb.length + 8]; - ByteBuffer buf = ByteBuffer.wrap(out); - buf.putInt(kb.length); - buf.put(kb); - buf.putInt(vb.length); - buf.put(vb); - return buf.array(); - } - - public static HashMap deSerializeHashMap(byte[] b) { - ByteBuffer buf = ByteBuffer.wrap(b); - byte[] kb = new byte[buf.getInt()]; - buf.get(kb); - byte[] vb = new byte[buf.getInt()]; - buf.get(vb); - String[] keys = new String(kb).split(","); - String[] values = new String(vb).split(","); - HashMap map = new HashMap(); - for (int i = 0; i < keys.length; i++) { - map.put(keys[i], values[i]); - } - return map; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Iterator; + +public class ByteUtils { + + public static byte[] serializeHashMap(HashMap map) { + StringBuffer keys = new StringBuffer(); + Iterator iter = map.keySet().iterator(); + while (iter.hasNext()) { + String key = iter.next(); + keys.append(key); + if (iter.hasNext()) + keys.append(","); + } + StringBuffer values = new StringBuffer(); + iter = map.values().iterator(); + while (iter.hasNext()) { + String key = iter.next(); + values.append(key); + if (iter.hasNext()) + values.append(","); + } + byte[] kb = keys.toString().getBytes(); + byte[] vb = values.toString().getBytes(); + byte[] out = new byte[kb.length + vb.length + 8]; + ByteBuffer buf = ByteBuffer.wrap(out); + buf.putInt(kb.length); + buf.put(kb); + buf.putInt(vb.length); + buf.put(vb); + return buf.array(); + } + + public static HashMap deSerializeHashMap(byte[] b) { + ByteBuffer buf = ByteBuffer.wrap(b); + byte[] kb = new byte[buf.getInt()]; + buf.get(kb); + byte[] vb = new byte[buf.getInt()]; + buf.get(vb); + String[] keys = new String(kb).split(","); + String[] values = new String(vb).split(","); + HashMap map = new HashMap(); + for (int i = 0; i < keys.length; i++) { + map.put(keys[i], values[i]); + } + return map; + } + +} diff --git a/src/org/opendedup/util/CloneMagic.java b/src/org/opendedup/util/CloneMagic.java index 82a85068f..454751acd 100644 --- a/src/org/opendedup/util/CloneMagic.java +++ b/src/org/opendedup/util/CloneMagic.java @@ -1,35 +1,53 @@ -package org.opendedup.util; - -import java.lang.reflect.Field; - -public class CloneMagic { - public static Object clone(Object o) { - Object clone = null; - - try { - clone = o.getClass().newInstance(); - } catch (InstantiationException e) { - e.printStackTrace(); - } catch (IllegalAccessException e) { - e.printStackTrace(); - } - - // Walk up the superclass hierarchy - for (Class obj = o.getClass(); !obj.equals(Object.class); obj = obj - .getSuperclass()) { - Field[] fields = obj.getDeclaredFields(); - for (int i = 0; i < fields.length; i++) { - fields[i].setAccessible(true); - try { - // for each class/suerclass, copy all fields - // from this object to the clone - fields[i].set(clone, fields[i].get(o)); - } catch (IllegalArgumentException e) { - } catch (IllegalAccessException e) { - } - } - } - return clone; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.lang.reflect.Field; + +public class CloneMagic { + public static Object clone(Object o) { + Object clone = null; + + try { + clone = o.getClass().newInstance(); + } catch (InstantiationException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } + + // Walk up the superclass hierarchy + for (Class obj = o.getClass(); !obj.equals(Object.class); obj = obj + .getSuperclass()) { + Field[] fields = obj.getDeclaredFields(); + for (int i = 0; i < fields.length; i++) { + fields[i].setAccessible(true); + try { + // for each class/suerclass, copy all fields + // from this object to the clone + fields[i].set(clone, fields[i].get(o)); + } catch (IllegalArgumentException e) { + } catch (IllegalAccessException e) { + } + } + } + return clone; + } + +} diff --git a/src/org/opendedup/util/CommandLineProgressBar.java b/src/org/opendedup/util/CommandLineProgressBar.java index 5c24ceb66..00defda02 100644 --- a/src/org/opendedup/util/CommandLineProgressBar.java +++ b/src/org/opendedup/util/CommandLineProgressBar.java @@ -1,50 +1,68 @@ -package org.opendedup.util; - -import java.io.PrintStream; - -public class CommandLineProgressBar { - private final String info; - private final PrintStream out; - private final float onePercent; - private long currentPercent = -1; - - public CommandLineProgressBar(String info, long totalCount, PrintStream out) { - this.info = info; - this.out = out; - this.onePercent = Float.valueOf(100F) - / Float.valueOf(String.valueOf(totalCount)); - this.update(0); - } - - public void update(long currentCount) { - int percent = (int) (onePercent * currentCount); - if (percent != currentPercent) { - this.currentPercent = percent; - printProgBar(this.currentPercent); - } - } - - public void finish() { - printProgBar(100); - } - - private void printProgBar(long percent) { - StringBuilder bar = new StringBuilder(info + " |"); - - for (int i = 0; i < 50; i++) { - if (i < (percent / 2)) { - bar.append(")"); - } else if (i == (percent / 2)) { - bar.append("]"); - } else { - bar.append(" "); - } - } - bar.append("| " + percent + "% "); - out.print("\r" + bar.toString()); - if (percent == 100) { - out.println("\r\n"); - out.flush(); - } - } +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.PrintStream; + +public class CommandLineProgressBar { + private final String info; + private final PrintStream out; + private final float onePercent; + private long currentPercent = -1; + + public CommandLineProgressBar(String info, long totalCount, PrintStream out) { + this.info = info; + this.out = out; + this.onePercent = Float.valueOf(100F) + / Float.valueOf(String.valueOf(totalCount)); + this.update(0); + } + + public void update(long currentCount) { + int percent = (int) (onePercent * currentCount); + if (percent != currentPercent) { + this.currentPercent = percent; + printProgBar(this.currentPercent); + } + } + + public void finish() { + printProgBar(100); + } + + private void printProgBar(long percent) { + StringBuilder bar = new StringBuilder(info + " |"); + + for (int i = 0; i < 50; i++) { + if (i < (percent / 2)) { + bar.append(")"); + } else if (i == (percent / 2)) { + bar.append("]"); + } else { + bar.append(" "); + } + } + bar.append("| " + percent + "% "); + out.print("\r" + bar.toString()); + if (percent == 100) { + out.println("\r\n"); + out.flush(); + } + } } \ No newline at end of file diff --git a/src/org/opendedup/util/CompressionUtils.java b/src/org/opendedup/util/CompressionUtils.java index 6a963bf54..40d488966 100644 --- a/src/org/opendedup/util/CompressionUtils.java +++ b/src/org/opendedup/util/CompressionUtils.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.io.ByteArrayOutputStream; diff --git a/src/org/opendedup/util/DeleteDir.java b/src/org/opendedup/util/DeleteDir.java index 0647581aa..29accdfa0 100644 --- a/src/org/opendedup/util/DeleteDir.java +++ b/src/org/opendedup/util/DeleteDir.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.io.File; diff --git a/src/org/opendedup/util/DirectBufPool.java b/src/org/opendedup/util/DirectBufPool.java index ccd7fce84..1f14cc24f 100644 --- a/src/org/opendedup/util/DirectBufPool.java +++ b/src/org/opendedup/util/DirectBufPool.java @@ -1,74 +1,92 @@ -package org.opendedup.util; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.concurrent.ConcurrentLinkedQueue; - -import org.opendedup.logging.SDFSLogger; - -public class DirectBufPool { - - private int poolSize = 1; - - private ConcurrentLinkedQueue passiveObjects = new ConcurrentLinkedQueue(); - private int size = 0; - private boolean closed = false; - - public DirectBufPool(int sz) { - this.size = sz; - this.populatePool(); - } - - public void populatePool() { - for (int i = 0; i < poolSize; i++) { - try { - this.passiveObjects.add(this.makeObject()); - } catch (Exception e) { - e.printStackTrace(); - SDFSLogger.getLog().fatal( - "unable to instancial Hash Function pool", e); - - } finally { - - } - } - } - - public ByteBuffer borrowObject() throws IOException { - if (this.closed) - throw new IOException("Buf Pool closed"); - ByteBuffer hc = null; - hc = this.passiveObjects.poll(); - if (hc == null) { - hc = makeObject(); - } - return hc; - } - - public void returnObject(ByteBuffer buf) { - if (!this.closed) { - buf.position(0); - this.passiveObjects.add(buf); - } else { - buf.clear(); - buf = null; - } - } - - public ByteBuffer makeObject() { - return ByteBuffer.allocateDirect(size); - } - - public void destroyObject(ByteBuffer buf) { - buf.clear(); - buf = null; - } - - public void close() { - this.closed = true; - while (this.passiveObjects.peek() != null) { - this.destroyObject(this.passiveObjects.poll()); - } - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.ConcurrentLinkedQueue; + +import org.opendedup.logging.SDFSLogger; + +public class DirectBufPool { + + private int poolSize = 1; + + private ConcurrentLinkedQueue passiveObjects = new ConcurrentLinkedQueue(); + private int size = 0; + private boolean closed = false; + + public DirectBufPool(int sz) { + this.size = sz; + this.populatePool(); + } + + public void populatePool() { + for (int i = 0; i < poolSize; i++) { + try { + this.passiveObjects.add(this.makeObject()); + } catch (Exception e) { + e.printStackTrace(); + SDFSLogger.getLog().fatal( + "unable to instancial Hash Function pool", e); + + } finally { + + } + } + } + + public ByteBuffer borrowObject() throws IOException { + if (this.closed) + throw new IOException("Buf Pool closed"); + ByteBuffer hc = null; + hc = this.passiveObjects.poll(); + if (hc == null) { + hc = makeObject(); + } + return hc; + } + + public void returnObject(ByteBuffer buf) { + if (!this.closed) { + buf.position(0); + this.passiveObjects.add(buf); + } else { + buf.clear(); + buf = null; + } + } + + public ByteBuffer makeObject() { + return ByteBuffer.allocateDirect(size); + } + + public void destroyObject(ByteBuffer buf) { + buf.clear(); + buf = null; + } + + public void close() { + this.closed = true; + while (this.passiveObjects.peek() != null) { + this.destroyObject(this.passiveObjects.poll()); + } + } + +} diff --git a/src/org/opendedup/util/DomXmlExample.java b/src/org/opendedup/util/DomXmlExample.java index 70de5211c..4506503bc 100644 --- a/src/org/opendedup/util/DomXmlExample.java +++ b/src/org/opendedup/util/DomXmlExample.java @@ -1,81 +1,99 @@ -package org.opendedup.util; - -import java.io.StringWriter; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.transform.OutputKeys; -import javax.xml.transform.Transformer; -import javax.xml.transform.TransformerFactory; -import javax.xml.transform.dom.DOMSource; -import javax.xml.transform.stream.StreamResult; - -import org.w3c.dom.Comment; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Text; - -public class DomXmlExample { - - /** - * Our goal is to create a DOM XML tree and then print the XML. - */ - public static void main(String args[]) { - new DomXmlExample(); - } - - public DomXmlExample() { - try { - // /////////////////////////// - // Creating an empty XML Document - - // We need a Document - DocumentBuilderFactory dbfac = DocumentBuilderFactory.newInstance(); - DocumentBuilder docBuilder = dbfac.newDocumentBuilder(); - Document doc = docBuilder.newDocument(); - - // ////////////////////// - // Creating the XML tree - - // create the root element and add it to the document - Element root = doc.createElement("root"); - doc.appendChild(root); - - // create a comment and put it in the root element - Comment comment = doc.createComment("Just a thought"); - root.appendChild(comment); - - // create child element, add an attribute, and add to root - Element child = doc.createElement("child"); - child.setAttribute("name", "value"); - root.appendChild(child); - - // add a text element to the child - Text text = doc - .createTextNode("Filler, ... I could have had a foo!"); - child.appendChild(text); - - // /////////////// - // Output the XML - - // set up a transformer - TransformerFactory transfac = TransformerFactory.newInstance(); - Transformer trans = transfac.newTransformer(); - trans.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes"); - trans.setOutputProperty(OutputKeys.INDENT, "yes"); - - // create string from xml tree - StringWriter sw = new StringWriter(); - StreamResult result = new StreamResult(sw); - DOMSource source = new DOMSource(doc); - trans.transform(source, result); - String xmlString = sw.toString(); - - // print xml - System.out.println("Here's the xml:\n\n" + xmlString); - - } catch (Exception e) { - System.out.println(e); - } - } -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.StringWriter; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.transform.OutputKeys; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; + +import org.w3c.dom.Comment; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.Text; + +public class DomXmlExample { + + /** + * Our goal is to create a DOM XML tree and then print the XML. + */ + public static void main(String args[]) { + new DomXmlExample(); + } + + public DomXmlExample() { + try { + // /////////////////////////// + // Creating an empty XML Document + + // We need a Document + DocumentBuilderFactory dbfac = DocumentBuilderFactory.newInstance(); + DocumentBuilder docBuilder = dbfac.newDocumentBuilder(); + Document doc = docBuilder.newDocument(); + + // ////////////////////// + // Creating the XML tree + + // create the root element and add it to the document + Element root = doc.createElement("root"); + doc.appendChild(root); + + // create a comment and put it in the root element + Comment comment = doc.createComment("Just a thought"); + root.appendChild(comment); + + // create child element, add an attribute, and add to root + Element child = doc.createElement("child"); + child.setAttribute("name", "value"); + root.appendChild(child); + + // add a text element to the child + Text text = doc + .createTextNode("Filler, ... I could have had a foo!"); + child.appendChild(text); + + // /////////////// + // Output the XML + + // set up a transformer + TransformerFactory transfac = TransformerFactory.newInstance(); + Transformer trans = transfac.newTransformer(); + trans.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes"); + trans.setOutputProperty(OutputKeys.INDENT, "yes"); + + // create string from xml tree + StringWriter sw = new StringWriter(); + StreamResult result = new StreamResult(sw); + DOMSource source = new DOMSource(doc); + trans.transform(source, result); + String xmlString = sw.toString(); + + // print xml + System.out.println("Here's the xml:\n\n" + xmlString); + + } catch (Exception e) { + System.out.println(e); + } + } +} diff --git a/src/org/opendedup/util/ElapsedTime.java b/src/org/opendedup/util/ElapsedTime.java index 6493af300..7f1a4f40a 100755 --- a/src/org/opendedup/util/ElapsedTime.java +++ b/src/org/opendedup/util/ElapsedTime.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.text.SimpleDateFormat; diff --git a/src/org/opendedup/util/EncryptUtils.java b/src/org/opendedup/util/EncryptUtils.java index 490c7bb43..8bd771e18 100644 --- a/src/org/opendedup/util/EncryptUtils.java +++ b/src/org/opendedup/util/EncryptUtils.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.io.File; diff --git a/src/org/opendedup/util/FactorTest.java b/src/org/opendedup/util/FactorTest.java index e727c5491..4b3d36e20 100644 --- a/src/org/opendedup/util/FactorTest.java +++ b/src/org/opendedup/util/FactorTest.java @@ -1,62 +1,80 @@ -package org.opendedup.util; - -import java.util.ArrayList; - -public class FactorTest { - public static void main(String[] args) { - int val = 128 * 1024; - - System.out.println("\nThe factors of " + val + " are:"); - int[] result = factorsOf(val); - for (int i = 0; i < result.length && result[i] != 0; i++) { - System.out.println(result[i]); - } - System.out.println("closest=" + result[closest2Pos(3122, result)]); - } - - public static int[] factorsOf(double val) { - ArrayList al = new ArrayList(); - double prev = val; - al.add((int) val); - while (val >= 512) { - if ((prev - val) >= 512) { - al.add((int) val); - prev = val; - } - val = val / 1.2; - } - int[] z = new int[al.size()]; - for (int i = 0; i < al.size(); i++) { - z[i] = al.get(i); - } - return z; - } - - public static int closest2(int find, int[] values) { - int distance = Integer.MAX_VALUE; - int closest = -1; - for (int i : values) { - int distanceI = i - find; - if (distanceI > -1 && distance >= distanceI) { - distance = distanceI; - closest = i; - } - } - return closest; - } - - public static int closest2Pos(int find, int[] values) { - int distance = Integer.MAX_VALUE; - int closest = -1; - for (int z = 0; z < values.length; z++) { - int i = values[z]; - int distanceI = i - find; - if (distanceI > -1 && distance >= distanceI) { - distance = distanceI; - closest = z; - } - } - return closest; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.util.ArrayList; + +public class FactorTest { + public static void main(String[] args) { + int val = 128 * 1024; + + System.out.println("\nThe factors of " + val + " are:"); + int[] result = factorsOf(val); + for (int i = 0; i < result.length && result[i] != 0; i++) { + System.out.println(result[i]); + } + System.out.println("closest=" + result[closest2Pos(3122, result)]); + } + + public static int[] factorsOf(double val) { + ArrayList al = new ArrayList(); + double prev = val; + al.add((int) val); + while (val >= 512) { + if ((prev - val) >= 512) { + al.add((int) val); + prev = val; + } + val = val / 1.2; + } + int[] z = new int[al.size()]; + for (int i = 0; i < al.size(); i++) { + z[i] = al.get(i); + } + return z; + } + + public static int closest2(int find, int[] values) { + int distance = Integer.MAX_VALUE; + int closest = -1; + for (int i : values) { + int distanceI = i - find; + if (distanceI > -1 && distance >= distanceI) { + distance = distanceI; + closest = i; + } + } + return closest; + } + + public static int closest2Pos(int find, int[] values) { + int distance = Integer.MAX_VALUE; + int closest = -1; + for (int z = 0; z < values.length; z++) { + int i = values[z]; + int distanceI = i - find; + if (distanceI > -1 && distance >= distanceI) { + distance = distanceI; + closest = z; + } + } + return closest; + } + +} diff --git a/src/org/opendedup/util/FileCounts.java b/src/org/opendedup/util/FileCounts.java index d4b426720..ad0816a0b 100644 --- a/src/org/opendedup/util/FileCounts.java +++ b/src/org/opendedup/util/FileCounts.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.io.File; diff --git a/src/org/opendedup/util/FileLock.java b/src/org/opendedup/util/FileLock.java index 3d5455c48..d42e0e250 100644 --- a/src/org/opendedup/util/FileLock.java +++ b/src/org/opendedup/util/FileLock.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.util.concurrent.ConcurrentHashMap; diff --git a/src/org/opendedup/util/FindOpenPort.java b/src/org/opendedup/util/FindOpenPort.java index 6d3a1a064..7d9b5be62 100644 --- a/src/org/opendedup/util/FindOpenPort.java +++ b/src/org/opendedup/util/FindOpenPort.java @@ -1,66 +1,84 @@ -package org.opendedup.util; - -import java.io.IOException; -import java.net.ServerSocket; - -public class FindOpenPort { - - public static int pickFreePort(int start) - - { - int port = -1; - while (port == -1) { - ServerSocket socket = null; - - try - - { - - socket = new ServerSocket(start); - - port = socket.getLocalPort(); - - } - - catch (IOException e) - - { - - } - - finally - - { - - if (socket != null) - - { - - try - - { - - socket.close(); - - } - - catch (IOException e) - - { - - } - - } - - } - start++; - } - return port; - - } - - public static void main(String[] args) { - System.out.println(pickFreePort(6442)); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.IOException; +import java.net.ServerSocket; + +public class FindOpenPort { + + public static int pickFreePort(int start) + + { + int port = -1; + while (port == -1) { + ServerSocket socket = null; + + try + + { + + socket = new ServerSocket(start); + + port = socket.getLocalPort(); + + } + + catch (IOException e) + + { + + } + + finally + + { + + if (socket != null) + + { + + try + + { + + socket.close(); + + } + + catch (IOException e) + + { + + } + + } + + } + start++; + } + return port; + + } + + public static void main(String[] args) { + System.out.println(pickFreePort(6442)); + } + +} diff --git a/src/org/opendedup/util/ISCSITargetExplorer.java b/src/org/opendedup/util/ISCSITargetExplorer.java index 482e86642..c542042f7 100644 --- a/src/org/opendedup/util/ISCSITargetExplorer.java +++ b/src/org/opendedup/util/ISCSITargetExplorer.java @@ -1,134 +1,152 @@ -package org.opendedup.util; - -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.attribute.PosixFilePermission; -import java.nio.file.attribute.PosixFilePermissions; -import java.util.Set; - -public class ISCSITargetExplorer { - - public static synchronized void export(String iscsiRootPath, - String fileName, String network, long length, String iqn) - throws IOException { - try { - File f = new File(iscsiRootPath); - if (!f.exists()) { - f.mkdirs(); - Set perms = PosixFilePermissions - .fromString("rwxrwxrwx"); - try { - Files.setPosixFilePermissions(f.toPath(), perms); - } catch (Exception e) { - System.out.println("Error creating nfs folder" - + e.getMessage()); - } - } - String tcmProc = "tcm_node --fileio fileio_0/" + fileName + " " - + iscsiRootPath + "/" + fileName + " " + length; - String addLunProc = "lio_node --addlun " + iqn + " 1 0 " + fileName - + " " + "fileio_0/" + fileName; - String addNetProc = "lio_node --addnp " + iqn + " 1 " + network - + ":3260"; - String addPermProc = "lio_node --permissive " + iqn + " 1"; - String iqn2 = iqn.replaceAll(":", "\\:"); - String addRWProc = "/sys/kernel/config/target/iscsi/" + iqn2 - + "/tpgt_1/attrib/demo_mode_write_protect"; - String disAuthProc = "lio_node --disableauth=" + iqn + " 1"; - String enProc = "lio_node --enabletpg " + iqn + " 1"; - Process p = Runtime.getRuntime().exec(tcmProc); - if (p.waitFor() != 0) { - throw new IOException("unable to execute \"" + tcmProc + "\""); - } - p = Runtime.getRuntime().exec(addLunProc); - if (p.waitFor() != 0) { - throw new IOException("unable to execute \"" + addLunProc - + "\""); - } - p = Runtime.getRuntime().exec(addNetProc); - if (p.waitFor() != 0) { - throw new IOException("unable to execute \"" + addNetProc - + "\""); - } - p = Runtime.getRuntime().exec(addPermProc); - if (p.waitFor() != 0) { - throw new IOException("unable to execute \"" + addPermProc - + "\""); - } - FileOutputStream out = new FileOutputStream(addRWProc); - DataOutputStream dout = new DataOutputStream(out); - dout.writeInt(0); - out.close(); - out.flush(); - p = Runtime.getRuntime().exec(disAuthProc); - if (p.waitFor() != 0) { - throw new IOException("unable to execute \"" + disAuthProc - + "\""); - } - p = Runtime.getRuntime().exec(enProc); - if (p.waitFor() != 0) { - throw new IOException("unable to execute \"" + enProc + "\""); - } - } catch (Exception e) { - throw new IOException(e); - } - } - - public static synchronized void unexport(String iscsiRootPath, - String fileName, String network, long length, String iqn) - throws IOException { - try { - File f = new File(iscsiRootPath); - if (!f.exists()) { - f.mkdirs(); - Set perms = PosixFilePermissions - .fromString("rwxrwxrwx"); - try { - Files.setPosixFilePermissions(f.toPath(), perms); - } catch (Exception e) { - System.out.println("Error creating nfs folder" - + e.getMessage()); - } - } - String delIqn = "lio_node --deliqn " + iqn; - String delTcm = "tcm_node --freedev " + "fileio_0/" + fileName; - String disProc = "lio_node --deltpg " + iqn + " 1"; - Process p = Runtime.getRuntime().exec(disProc); - if (p.waitFor() != 0) { - throw new IOException("unable to execute \"" + disProc + "\""); - } - p = Runtime.getRuntime().exec(delIqn); - if (p.waitFor() != 0) { - throw new IOException("unable to execute \"" + delIqn + "\""); - } - p = Runtime.getRuntime().exec(delTcm); - if (p.waitFor() != 0) { - throw new IOException("unable to execute \"" + delTcm + "\""); - } - } catch (Exception e) { - throw new IOException(e); - } - } - - public static synchronized boolean available(String iqn) { - return new File("/sys/kernel/config/target/iscsi/" + iqn).exists(); - } - - public static void main(String[] args) throws NumberFormatException, - IOException { - if (args[0].equalsIgnoreCase("up")) - export(args[1], args[2], args[3], Long.parseLong(args[4]), args[5]); - else if (args[0].equalsIgnoreCase("down")) - unexport(args[1], args[2], args[3], Long.parseLong(args[4]), - args[5]); - else if (args[0].equalsIgnoreCase("avail")) - System.out.println(available(args[1])); - else { - System.out.println("invalid arguement " + args[0]); - } - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.PosixFilePermissions; +import java.util.Set; + +public class ISCSITargetExplorer { + + public static synchronized void export(String iscsiRootPath, + String fileName, String network, long length, String iqn) + throws IOException { + try { + File f = new File(iscsiRootPath); + if (!f.exists()) { + f.mkdirs(); + Set perms = PosixFilePermissions + .fromString("rwxrwxrwx"); + try { + Files.setPosixFilePermissions(f.toPath(), perms); + } catch (Exception e) { + System.out.println("Error creating nfs folder" + + e.getMessage()); + } + } + String tcmProc = "tcm_node --fileio fileio_0/" + fileName + " " + + iscsiRootPath + "/" + fileName + " " + length; + String addLunProc = "lio_node --addlun " + iqn + " 1 0 " + fileName + + " " + "fileio_0/" + fileName; + String addNetProc = "lio_node --addnp " + iqn + " 1 " + network + + ":3260"; + String addPermProc = "lio_node --permissive " + iqn + " 1"; + String iqn2 = iqn.replaceAll(":", "\\:"); + String addRWProc = "/sys/kernel/config/target/iscsi/" + iqn2 + + "/tpgt_1/attrib/demo_mode_write_protect"; + String disAuthProc = "lio_node --disableauth=" + iqn + " 1"; + String enProc = "lio_node --enabletpg " + iqn + " 1"; + Process p = Runtime.getRuntime().exec(tcmProc); + if (p.waitFor() != 0) { + throw new IOException("unable to execute \"" + tcmProc + "\""); + } + p = Runtime.getRuntime().exec(addLunProc); + if (p.waitFor() != 0) { + throw new IOException("unable to execute \"" + addLunProc + + "\""); + } + p = Runtime.getRuntime().exec(addNetProc); + if (p.waitFor() != 0) { + throw new IOException("unable to execute \"" + addNetProc + + "\""); + } + p = Runtime.getRuntime().exec(addPermProc); + if (p.waitFor() != 0) { + throw new IOException("unable to execute \"" + addPermProc + + "\""); + } + FileOutputStream out = new FileOutputStream(addRWProc); + DataOutputStream dout = new DataOutputStream(out); + dout.writeInt(0); + out.close(); + out.flush(); + p = Runtime.getRuntime().exec(disAuthProc); + if (p.waitFor() != 0) { + throw new IOException("unable to execute \"" + disAuthProc + + "\""); + } + p = Runtime.getRuntime().exec(enProc); + if (p.waitFor() != 0) { + throw new IOException("unable to execute \"" + enProc + "\""); + } + } catch (Exception e) { + throw new IOException(e); + } + } + + public static synchronized void unexport(String iscsiRootPath, + String fileName, String network, long length, String iqn) + throws IOException { + try { + File f = new File(iscsiRootPath); + if (!f.exists()) { + f.mkdirs(); + Set perms = PosixFilePermissions + .fromString("rwxrwxrwx"); + try { + Files.setPosixFilePermissions(f.toPath(), perms); + } catch (Exception e) { + System.out.println("Error creating nfs folder" + + e.getMessage()); + } + } + String delIqn = "lio_node --deliqn " + iqn; + String delTcm = "tcm_node --freedev " + "fileio_0/" + fileName; + String disProc = "lio_node --deltpg " + iqn + " 1"; + Process p = Runtime.getRuntime().exec(disProc); + if (p.waitFor() != 0) { + throw new IOException("unable to execute \"" + disProc + "\""); + } + p = Runtime.getRuntime().exec(delIqn); + if (p.waitFor() != 0) { + throw new IOException("unable to execute \"" + delIqn + "\""); + } + p = Runtime.getRuntime().exec(delTcm); + if (p.waitFor() != 0) { + throw new IOException("unable to execute \"" + delTcm + "\""); + } + } catch (Exception e) { + throw new IOException(e); + } + } + + public static synchronized boolean available(String iqn) { + return new File("/sys/kernel/config/target/iscsi/" + iqn).exists(); + } + + public static void main(String[] args) throws NumberFormatException, + IOException { + if (args[0].equalsIgnoreCase("up")) + export(args[1], args[2], args[3], Long.parseLong(args[4]), args[5]); + else if (args[0].equalsIgnoreCase("down")) + unexport(args[1], args[2], args[3], Long.parseLong(args[4]), + args[5]); + else if (args[0].equalsIgnoreCase("avail")) + System.out.println(available(args[1])); + else { + System.out.println("invalid arguement " + args[0]); + } + } + +} diff --git a/src/org/opendedup/util/KeyGenerator.java b/src/org/opendedup/util/KeyGenerator.java index 870eed528..01094df79 100644 --- a/src/org/opendedup/util/KeyGenerator.java +++ b/src/org/opendedup/util/KeyGenerator.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.io.File; diff --git a/src/org/opendedup/util/LRUCache.java b/src/org/opendedup/util/LRUCache.java index 876f6948b..b5d858f4e 100644 --- a/src/org/opendedup/util/LRUCache.java +++ b/src/org/opendedup/util/LRUCache.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.util.LinkedHashMap; diff --git a/src/org/opendedup/util/LargFile.java b/src/org/opendedup/util/LargFile.java index 0d5494e1a..bc61c9231 100644 --- a/src/org/opendedup/util/LargFile.java +++ b/src/org/opendedup/util/LargFile.java @@ -1,59 +1,77 @@ -package org.opendedup.util; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Random; - -public class LargFile { - public static void writeFile(String path, int size, int bs, boolean unique) - throws IOException { - long len = 1024L * 1024L * 1024L * size; - long sz = 0; - File log = new File(path + "log"); - File f = new File(path); - java.io.FileWriter writer = new FileWriter(log); - System.out.println("unique data=" + unique); - FileOutputStream str = new FileOutputStream(f, true); - Random rnd = new Random(); - byte[] b = new byte[bs]; - if (!unique) - rnd.nextBytes(b); - System.out.println("1:" + len); - long time = System.currentTimeMillis(); - int writes = 0; - int interval = (32768 * 10000) / bs; - while (sz < len) { - if (unique) { - rnd.nextBytes(b); - } - ByteBuffer buf = ByteBuffer.wrap(b); - str.getChannel().write(buf); - sz = sz + b.length; - if (writes > interval) { - - float mb = (float) (writes * bs) / (1024 * 1024); - float duration = (float) (System.currentTimeMillis() - time) / 1000; - float mbps = mb / duration; - System.out.println(mbps + " (mb/s)"); - writer.write(Float.toString(mbps) + "\n"); - time = System.currentTimeMillis(); - writes = 0; - - } else { - writes++; - } - } - writer.flush(); - writer.close(); - str.close(); - } - - public static void main(String[] args) throws IOException { - writeFile(args[0], Integer.parseInt(args[1]), 1048576, - Boolean.parseBoolean(args[2])); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Random; + +public class LargFile { + public static void writeFile(String path, int size, int bs, boolean unique) + throws IOException { + long len = 1024L * 1024L * 1024L * size; + long sz = 0; + File log = new File(path + "log"); + File f = new File(path); + java.io.FileWriter writer = new FileWriter(log); + System.out.println("unique data=" + unique); + FileOutputStream str = new FileOutputStream(f, true); + Random rnd = new Random(); + byte[] b = new byte[bs]; + if (!unique) + rnd.nextBytes(b); + System.out.println("1:" + len); + long time = System.currentTimeMillis(); + int writes = 0; + int interval = (32768 * 10000) / bs; + while (sz < len) { + if (unique) { + rnd.nextBytes(b); + } + ByteBuffer buf = ByteBuffer.wrap(b); + str.getChannel().write(buf); + sz = sz + b.length; + if (writes > interval) { + + float mb = (float) (writes * bs) / (1024 * 1024); + float duration = (float) (System.currentTimeMillis() - time) / 1000; + float mbps = mb / duration; + System.out.println(mbps + " (mb/s)"); + writer.write(Float.toString(mbps) + "\n"); + time = System.currentTimeMillis(); + writes = 0; + + } else { + writes++; + } + } + writer.flush(); + writer.close(); + str.close(); + } + + public static void main(String[] args) throws IOException { + writeFile(args[0], Integer.parseInt(args[1]), 1048576, + Boolean.parseBoolean(args[2])); + } + +} diff --git a/src/org/opendedup/util/LongBitSet.java b/src/org/opendedup/util/LongBitSet.java index bc5d60145..c90271c79 100644 --- a/src/org/opendedup/util/LongBitSet.java +++ b/src/org/opendedup/util/LongBitSet.java @@ -1,176 +1,194 @@ -package org.opendedup.util; - -import java.io.Serializable; -import java.util.BitSet; -import java.util.HashMap; -import java.util.Map; - -public class LongBitSet implements Serializable { - /** - * - */ - private static final long serialVersionUID = 1L; - /** Number of bits allocated to a value in an index */ - private static final int VALUE_BITS = 32; // 1M values per bit set - private static final int INDEX_BITS = 64 - VALUE_BITS; // 1M values per bit - // set - /** Mask for extracting values */ - private static final long VALUE_MASK = Long.MAX_VALUE << VALUE_BITS; - private static final long INDEX_MASK = Long.MAX_VALUE >> INDEX_BITS; - /** - * Map from a value stored in high bits of a long index to a bit set mapped - * to the lower bits of an index. Bit sets size should be balanced - not to - * long (otherwise setting a single bit may waste megabytes of memory) but - * not too short (otherwise this map will get too big). Update value of - * {@code VALUE_BITS} for your needs. In most cases it is ok to keep 1M - - * 64M values in a bit set, so each bit set will occupy 128Kb - 8Mb. - */ - private Map m_sets = new HashMap(); - - /** - * Get set index by long index (extract bits 20-63) - * - * @param index - * Long index - * @return Index of a bit set in the inner map - */ - private long getSetIndex(final long index) { - return index & INDEX_MASK; - } - - /** - * Get index of a value in a bit set (bits 0-19) - * - * @param index - * Long index - * @return Index of a value in a bit set - */ - private int getPos(final long index) { - return (int) (index & VALUE_MASK); - } - - /** - * Helper method to get (or create, if necessary) a bit set for a given long - * index - * - * @param index - * Long index - * @return A bit set for a given index (always not null) - */ - private BitSet bitSet(final long index) { - final Long iIndex = getSetIndex(index); - BitSet bitSet = m_sets.get(iIndex); - if (bitSet == null) { - bitSet = new BitSet(Integer.MAX_VALUE); - m_sets.put(iIndex, bitSet); - System.out.println("set=" + m_sets.size()); - } - return bitSet; - } - - /** - * Set a given value for a given index - * - * @param index - * Long index - * @param value - * Value to set - */ - public void set(final long index, final boolean value) { - if (value) { - bitSet(index).set(getPos(index), value); - } else { // if value shall be cleared, check first if given partition - // exists - final BitSet bitSet = m_sets.get(getSetIndex(index)); - if (bitSet != null) - bitSet.clear(getPos(index)); - } - } - - /** - * Get a value for a given index - * - * @param index - * Long index - * @return Value associated with a given index - */ - public boolean get(final long index) { - final BitSet bitSet = m_sets.get(getSetIndex(index)); - return bitSet != null && bitSet.get(getPos(index)); - } - - /** - * Clear all bits between {@code fromIndex} (inclusive) and {@code toIndex} - * (exclusive) - * - * @param fromIndex - * Start index (inclusive) - * @param toIndex - * End index (exclusive) - */ - public void clear(final long fromIndex, final long toIndex) { - if (fromIndex >= toIndex) - return; - final long fromPos = getSetIndex(fromIndex); - final long toPos = getSetIndex(toIndex); - // remove all maps in the middle - for (long i = fromPos + 1; i < toPos; ++i) - m_sets.remove(i); - // clean two corner sets manually - final BitSet fromSet = m_sets.get(fromPos); - final BitSet toSet = m_sets.get(toPos); - // /are both ends in the same subset? - if (fromSet != null && fromSet == toSet) { - fromSet.clear(getPos(fromIndex), getPos(toIndex)); - return; - } - // clean left subset from left index to the end - if (fromSet != null) - fromSet.clear(getPos(fromIndex), fromSet.length()); - // clean right subset from 0 to given index. Note that both checks are - // independent - if (toSet != null) - toSet.clear(0, getPos(toIndex)); - } - - /** - * Iteration over all set values in a LongBitSet. Order of iteration is not - * specified. - * - * @param proc - * Procedure to call. If it returns {@code false}, then iteration - * will stop at once - */ - public long nextSetBit(long pos) { - for (final Map.Entry entry : m_sets.entrySet()) { - final BitSet bs = entry.getValue(); - final long baseIndex = entry.getKey(); - if (baseIndex >= pos) { - int i = bs.nextSetBit(0); - if (i >= 0) { - return (baseIndex + i); - } - } - - } - return -1; - } - - /** - * Iteration over all set values in a LongBitSet. Order of iteration is not - * specified. - * - * @param proc - * Procedure to call. If it returns {@code false}, then iteration - * will stop at once - */ - public long cardinality() { - long cd = 0; - for (final Map.Entry entry : m_sets.entrySet()) { - final BitSet bs = entry.getValue(); - cd = cd + (long) bs.cardinality(); - - } - return cd; - } -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.Serializable; +import java.util.BitSet; +import java.util.HashMap; +import java.util.Map; + +public class LongBitSet implements Serializable { + /** + * + */ + private static final long serialVersionUID = 1L; + /** Number of bits allocated to a value in an index */ + private static final int VALUE_BITS = 32; // 1M values per bit set + private static final int INDEX_BITS = 64 - VALUE_BITS; // 1M values per bit + // set + /** Mask for extracting values */ + private static final long VALUE_MASK = Long.MAX_VALUE << VALUE_BITS; + private static final long INDEX_MASK = Long.MAX_VALUE >> INDEX_BITS; + /** + * Map from a value stored in high bits of a long index to a bit set mapped + * to the lower bits of an index. Bit sets size should be balanced - not to + * long (otherwise setting a single bit may waste megabytes of memory) but + * not too short (otherwise this map will get too big). Update value of + * {@code VALUE_BITS} for your needs. In most cases it is ok to keep 1M - + * 64M values in a bit set, so each bit set will occupy 128Kb - 8Mb. + */ + private Map m_sets = new HashMap(); + + /** + * Get set index by long index (extract bits 20-63) + * + * @param index + * Long index + * @return Index of a bit set in the inner map + */ + private long getSetIndex(final long index) { + return index & INDEX_MASK; + } + + /** + * Get index of a value in a bit set (bits 0-19) + * + * @param index + * Long index + * @return Index of a value in a bit set + */ + private int getPos(final long index) { + return (int) (index & VALUE_MASK); + } + + /** + * Helper method to get (or create, if necessary) a bit set for a given long + * index + * + * @param index + * Long index + * @return A bit set for a given index (always not null) + */ + private BitSet bitSet(final long index) { + final Long iIndex = getSetIndex(index); + BitSet bitSet = m_sets.get(iIndex); + if (bitSet == null) { + bitSet = new BitSet(Integer.MAX_VALUE); + m_sets.put(iIndex, bitSet); + System.out.println("set=" + m_sets.size()); + } + return bitSet; + } + + /** + * Set a given value for a given index + * + * @param index + * Long index + * @param value + * Value to set + */ + public void set(final long index, final boolean value) { + if (value) { + bitSet(index).set(getPos(index), value); + } else { // if value shall be cleared, check first if given partition + // exists + final BitSet bitSet = m_sets.get(getSetIndex(index)); + if (bitSet != null) + bitSet.clear(getPos(index)); + } + } + + /** + * Get a value for a given index + * + * @param index + * Long index + * @return Value associated with a given index + */ + public boolean get(final long index) { + final BitSet bitSet = m_sets.get(getSetIndex(index)); + return bitSet != null && bitSet.get(getPos(index)); + } + + /** + * Clear all bits between {@code fromIndex} (inclusive) and {@code toIndex} + * (exclusive) + * + * @param fromIndex + * Start index (inclusive) + * @param toIndex + * End index (exclusive) + */ + public void clear(final long fromIndex, final long toIndex) { + if (fromIndex >= toIndex) + return; + final long fromPos = getSetIndex(fromIndex); + final long toPos = getSetIndex(toIndex); + // remove all maps in the middle + for (long i = fromPos + 1; i < toPos; ++i) + m_sets.remove(i); + // clean two corner sets manually + final BitSet fromSet = m_sets.get(fromPos); + final BitSet toSet = m_sets.get(toPos); + // /are both ends in the same subset? + if (fromSet != null && fromSet == toSet) { + fromSet.clear(getPos(fromIndex), getPos(toIndex)); + return; + } + // clean left subset from left index to the end + if (fromSet != null) + fromSet.clear(getPos(fromIndex), fromSet.length()); + // clean right subset from 0 to given index. Note that both checks are + // independent + if (toSet != null) + toSet.clear(0, getPos(toIndex)); + } + + /** + * Iteration over all set values in a LongBitSet. Order of iteration is not + * specified. + * + * @param proc + * Procedure to call. If it returns {@code false}, then iteration + * will stop at once + */ + public long nextSetBit(long pos) { + for (final Map.Entry entry : m_sets.entrySet()) { + final BitSet bs = entry.getValue(); + final long baseIndex = entry.getKey(); + if (baseIndex >= pos) { + int i = bs.nextSetBit(0); + if (i >= 0) { + return (baseIndex + i); + } + } + + } + return -1; + } + + /** + * Iteration over all set values in a LongBitSet. Order of iteration is not + * specified. + * + * @param proc + * Procedure to call. If it returns {@code false}, then iteration + * will stop at once + */ + public long cardinality() { + long cd = 0; + for (final Map.Entry entry : m_sets.entrySet()) { + final BitSet bs = entry.getValue(); + cd = cd + (long) bs.cardinality(); + + } + return cd; + } +} diff --git a/src/org/opendedup/util/MappedByteBufferBitSet.java b/src/org/opendedup/util/MappedByteBufferBitSet.java index 57d89091c..21667a452 100644 --- a/src/org/opendedup/util/MappedByteBufferBitSet.java +++ b/src/org/opendedup/util/MappedByteBufferBitSet.java @@ -1,732 +1,750 @@ -package org.opendedup.util; - -import java.io.File; -import java.io.IOException; -import java.io.ObjectStreamField; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.nio.MappedByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.channels.FileChannel.MapMode; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.StandardOpenOption; - -/** - * This class implements a vector of bits that grows as needed. Each component - * of the bit set has a {@code boolean} value. The bits of a {@code BitSet} are - * indexed by nonnegative integers. Individual indexed bits can be examined, - * set, or cleared. One {@code BitSet} may be used to modify the contents of - * another {@code BitSet} through logical AND, logical inclusive OR, and logical - * exclusive OR operations. - * - *

- * By default, all bits in the set initially have the value {@code false}. - * - *

- * Every bit set has a current size, which is the number of bits of space - * currently in use by the bit set. Note that the size is related to the - * implementation of a bit set, so it may change with implementation. The length - * of a bit set relates to logical length of a bit set and is defined - * independently of implementation. - * - *

- * Unless otherwise noted, passing a null parameter to any of the methods in a - * {@code BitSet} will result in a {@code NullPointerException}. - * - *

- * A {@code BitSet} is not safe for multithreaded use without external - * synchronization. - * - * @author Arthur van Hoff - * @author Michael McCloskey - * @author Martin Buchholz - * @since JDK1.0 - */ -public class MappedByteBufferBitSet implements Cloneable, java.io.Serializable { - /* - * BitSets are packed into arrays of "words." Currently a word is a long, - * which consists of 64 bits, requiring 6 address bits. The choice of word - * size is determined purely by performance concerns. - */ - private final static int ADDRESS_BITS_PER_WORD = 6; - private final static int BITS_PER_WORD = 1 << ADDRESS_BITS_PER_WORD; - - /* Used to shift left or right for a partial word mask */ - private static final long WORD_MASK = 0xffffffffffffffffL; - - /** - * @serialField - * bits long[] - * - * The bits in this BitSet. The ith bit is stored in - * bits[i/64] at bit position i % 64 (where bit position 0 - * refers to the least significant bit and 63 refers to the - * most significant bit). - */ - private static final ObjectStreamField[] serialPersistentFields = { new ObjectStreamField( - "bits", long[].class), }; - - /** - * The internal field corresponding to the serialField "bits". - */ - MappedByteBuffer buf = null; - FileChannel fc = null; - // private long[] words; - - /** - * The number of words in the logical size of this BitSet. - */ - private transient int wordsInUse = 0; - - private String fileName; - - /* use serialVersionUID from JDK 1.0.2 for interoperability */ - private static final long serialVersionUID = 7997698588986878753L; - - /** - * Given a bit index, return word index containing it. - */ - private static int wordIndex(long bitIndex) { - return (int) (bitIndex >> ADDRESS_BITS_PER_WORD); - } - - private long getWord(int arrayPos) { - int pos = 0; - try { - pos = (arrayPos * 8) + 8; - return this.buf.getLong(pos); - } catch (Exception e) { - System.out.println("unable to get " + pos + " buf cap = " - + this.buf.capacity()); - return 0; - } - } - - private void setWord(int arrayPos, long value) { - int pos = (arrayPos * 8) + 8; - this.buf.putLong(pos, value); - } - - private int getWordLength() { - return (buf.capacity() - 8) / 8; - } - - /** - * Every public method must preserve these invariants. - */ - private void checkInvariants() { - assert (wordsInUse == 0 || getWord(wordsInUse - 1) != 0); - assert (wordsInUse >= 0 && wordsInUse <= this.getWordLength()); - assert (wordsInUse == this.getWordLength() || this.getWord(wordsInUse) == 0); - } - - /** - * Sets the field wordsInUse to the logical size in words of the bit set. - * WARNING:This method assumes that the number of words actually in use is - * less than or equal to the current value of wordsInUse! - */ - private void recalculateWordsInUse() { - // Traverse the bitset until a used word is found - int i; - for (i = wordsInUse - 1; i >= 0; i--) - if (this.getWord(i) != 0) - break; - - wordsInUse = i + 1; // The new logical size - } - - /** - * Creates a new bit set. All bits are initially {@code false}. - * - * @throws IOException - */ - public MappedByteBufferBitSet(String fileName) throws IOException { - this.fileName = fileName; - File f = new File(fileName); - if (f.exists()) { - this.initWords((int) f.length()); - } else { - int len = ((wordIndex(BITS_PER_WORD - 1) + 1) * 8) + 8; - this.initWords(len); - } - } - - /** - * Creates a bit set whose initial size is large enough to explicitly - * represent bits with indices in the range {@code 0} through - * {@code nbits-1}. All bits are initially {@code false}. - * - * @param nbits - * the initial size of the bit set - * @throws IOException - * @throws NegativeArraySizeException - * if the specified initial size is negative - */ - public MappedByteBufferBitSet(String fileName, int nbits) - throws IOException { - // nbits can't be negative; size 0 is OK - this.fileName = fileName; - if (nbits < 0) - throw new NegativeArraySizeException("nbits < 0: " + nbits); - - initWords(nbits); - } - - private void initWords(int length) throws IOException { - Path p = Paths.get(fileName); - fc = (FileChannel) Files.newByteChannel(p, StandardOpenOption.CREATE, - StandardOpenOption.WRITE, StandardOpenOption.READ, - StandardOpenOption.SPARSE); - buf = fc.map(MapMode.READ_WRITE, 0, length); - this.wordsInUse = buf.getInt(0); - } - - /** - * Returns a new byte array containing all the bits in this bit set. - * - *

- * More precisely, if
- * {@code byte[] bytes = s.toByteArray();}
- * then {@code bytes.length == (s.length()+7)/8} and
- * {@code s.get(n) == ((bytes[n/8] & (1<<(n%8))) != 0)}
- * for all {@code n < 8 * bytes.length}. - * - * @return a byte array containing a little-endian representation of all the - * bits in this bit set - * @since 1.7 - */ - public byte[] toByteArray() { - int n = wordsInUse; - if (n == 0) - return new byte[0]; - int len = 8 * (n - 1); - for (long x = this.getWord(n - 1); x != 0; x >>>= 8) - len++; - byte[] bytes = new byte[len]; - ByteBuffer bb = ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN); - for (int i = 0; i < n - 1; i++) - bb.putLong(this.getWord(i)); - for (long x = this.getWord(n - 1); x != 0; x >>>= 8) - bb.put((byte) (x & 0xff)); - return bytes; - } - - /** - * Returns a new long array containing all the bits in this bit set. - * - *

- * More precisely, if
- * {@code long[] longs = s.toLongArray();}
- * then {@code longs.length == (s.length()+63)/64} and
- * {@code s.get(n) == ((longs[n/64] & (1L<<(n%64))) != 0)}
- * for all {@code n < 64 * longs.length}. - * - * @return a long array containing a little-endian representation of all the - * bits in this bit set - * @since 1.7 - */ - public long[] toLongArray() { - long[] words = new long[this.getWordLength()]; - int i = 0; - while (i <= buf.capacity()) { - words[i] = buf.getLong(i); - i = i + 8; - } - return words; - } - - /** - * Ensures that the BitSet can hold enough words. - * - * @param wordsRequired - * the minimum acceptable number of words. - * @throws IOException - */ - private void ensureCapacity(int wordsRequired) throws IOException { - if (this.getWordLength() < wordsRequired) { - // Allocate larger of doubled size or required size - int request = Math.max(2 * this.getWordLength(), wordsRequired); - Path p = Paths.get(fileName); - fc.close(); - fc = null; - fc = (FileChannel) Files.newByteChannel(p, - StandardOpenOption.CREATE, StandardOpenOption.WRITE, - StandardOpenOption.READ, StandardOpenOption.SPARSE); - buf = fc.map(MapMode.READ_WRITE, 0, (request * 8) + 8); - buf.putInt(0, this.wordsInUse); - } - } - - /** - * Ensures that the BitSet can accommodate a given wordIndex, temporarily - * violating the invariants. The caller must restore the invariants before - * returning to the user, possibly using recalculateWordsInUse(). - * - * @param wordIndex - * the index to be accommodated. - * @throws IOException - */ - private void expandTo(int wordIndex) throws IOException { - int wordsRequired = wordIndex + 1; - if (wordsInUse < wordsRequired) { - ensureCapacity(wordsRequired); - wordsInUse = wordsRequired; - } - } - - /** - * Checks that fromIndex ... toIndex is a valid range of bit indices. - */ - private static void checkRange(int fromIndex, int toIndex) { - if (fromIndex < 0) - throw new IndexOutOfBoundsException("fromIndex < 0: " + fromIndex); - if (toIndex < 0) - throw new IndexOutOfBoundsException("toIndex < 0: " + toIndex); - if (fromIndex > toIndex) - throw new IndexOutOfBoundsException("fromIndex: " + fromIndex - + " > toIndex: " + toIndex); - } - - /** - * Sets the bit at the specified index to the complement of its current - * value. - * - * @param bitIndex - * the index of the bit to flip - * @throws IOException - * @throws IndexOutOfBoundsException - * if the specified index is negative - * @since 1.4 - */ - public void flip(int bitIndex) throws IOException { - if (bitIndex < 0) - throw new IndexOutOfBoundsException("bitIndex < 0: " + bitIndex); - - int wordIndex = wordIndex(bitIndex); - expandTo(wordIndex); - long cv = this.getWord(wordIndex); - cv ^= (1L << bitIndex); - this.setWord(wordIndex, cv); - - recalculateWordsInUse(); - checkInvariants(); - } - - /** - * Sets each bit from the specified {@code fromIndex} (inclusive) to the - * specified {@code toIndex} (exclusive) to the complement of its current - * value. - * - * @param fromIndex - * index of the first bit to flip - * @param toIndex - * index after the last bit to flip - * @throws IOException - * @throws IndexOutOfBoundsException - * if {@code fromIndex} is negative, or {@code toIndex} is - * negative, or {@code fromIndex} is larger than {@code toIndex} - * @since 1.4 - */ - public void flip(int fromIndex, int toIndex) throws IOException { - checkRange(fromIndex, toIndex); - - if (fromIndex == toIndex) - return; - - int startWordIndex = wordIndex(fromIndex); - int endWordIndex = wordIndex(toIndex - 1); - expandTo(endWordIndex); - - long firstWordMask = WORD_MASK << fromIndex; - long lastWordMask = WORD_MASK >>> -toIndex; - long cv = this.getWord(startWordIndex); - if (startWordIndex == endWordIndex) { - // Case 1: One word - - cv ^= (firstWordMask & lastWordMask); - this.setWord(startWordIndex, cv); - } else { - // Case 2: Multiple words - // Handle first word - cv ^= firstWordMask; - this.setWord(startWordIndex, cv); - // Handle intermediate words, if any - for (int i = startWordIndex + 1; i < endWordIndex; i++) { - long word = this.getWord(i); - word ^= WORD_MASK; - this.setWord(i, word); - } - long word = this.getWord(endWordIndex); - // Handle last word - word ^= lastWordMask; - this.setWord(endWordIndex, word); - } - - recalculateWordsInUse(); - checkInvariants(); - } - - /** - * Sets the bit at the specified index to {@code true}. - * - * @param bitIndex - * a bit index - * @throws IOException - * @throws IndexOutOfBoundsException - * if the specified index is negative - * @since JDK1.0 - */ - public void set(long bitIndex) throws IOException { - if (bitIndex < 0) - throw new IndexOutOfBoundsException("bitIndex < 0: " + bitIndex); - - int wordIndex = wordIndex(bitIndex); - expandTo(wordIndex); - long word = this.getWord(wordIndex); - // Handle last word - word |= (1L << bitIndex); - this.setWord(wordIndex, word); - - checkInvariants(); - } - - /** - * Sets the bit at the specified index to the specified value. - * - * @param bitIndex - * a bit index - * @param value - * a boolean value to set - * @throws IOException - * @throws IndexOutOfBoundsException - * if the specified index is negative - * @since 1.4 - */ - public void set(long bitIndex, boolean value) throws IOException { - if (value) - set(bitIndex); - else - clear(bitIndex); - } - - /** - * Sets the bits from the specified {@code fromIndex} (inclusive) to the - * specified {@code toIndex} (exclusive) to {@code true}. - * - * @param fromIndex - * index of the first bit to be set - * @param toIndex - * index after the last bit to be set - * @throws IOException - * @throws IndexOutOfBoundsException - * if {@code fromIndex} is negative, or {@code toIndex} is - * negative, or {@code fromIndex} is larger than {@code toIndex} - * @since 1.4 - */ - public void set(int fromIndex, int toIndex) throws IOException { - checkRange(fromIndex, toIndex); - - if (fromIndex == toIndex) - return; - - // Increase capacity if necessary - int startWordIndex = wordIndex(fromIndex); - int endWordIndex = wordIndex(toIndex - 1); - expandTo(endWordIndex); - - long firstWordMask = WORD_MASK << fromIndex; - long lastWordMask = WORD_MASK >>> -toIndex; - long cv = this.getWord(startWordIndex); - if (startWordIndex == endWordIndex) { - // Case 1: One word - cv |= (firstWordMask & lastWordMask); - this.setWord(startWordIndex, cv); - } else { - // Case 2: Multiple words - // Handle first word - cv |= firstWordMask; - this.setWord(startWordIndex, cv); - // Handle intermediate words, if any - for (int i = startWordIndex + 1; i < endWordIndex; i++) { - long word = this.getWord(i); - word = WORD_MASK; - this.setWord(i, word); - } - // Handle last word (restores invariants) - long word = this.getWord(endWordIndex); - word |= lastWordMask; - this.setWord(endWordIndex, word); - } - - checkInvariants(); - } - - /** - * Sets the bits from the specified {@code fromIndex} (inclusive) to the - * specified {@code toIndex} (exclusive) to the specified value. - * - * @param fromIndex - * index of the first bit to be set - * @param toIndex - * index after the last bit to be set - * @param value - * value to set the selected bits to - * @throws IndexOutOfBoundsException - * if {@code fromIndex} is negative, or {@code toIndex} is - * negative, or {@code fromIndex} is larger than {@code toIndex} - * @since 1.4 - */ - /* - * public void set(int fromIndex, int toIndex, boolean value) { if (value) - * set(fromIndex, toIndex); else clear(fromIndex, toIndex); } - */ - - /** - * Sets the bit specified by the index to {@code false}. - * - * @param bitIndex - * the index of the bit to be cleared - * @throws IndexOutOfBoundsException - * if the specified index is negative - * @since JDK1.0 - */ - public void clear(long bitIndex) { - if (bitIndex < 0) - throw new IndexOutOfBoundsException("bitIndex < 0: " + bitIndex); - - int wordIndex = wordIndex(bitIndex); - if (wordIndex >= wordsInUse) - return; - long cv = this.getWord(wordIndex); - cv &= ~(1L << bitIndex); - this.setWord(wordIndex, cv); - recalculateWordsInUse(); - checkInvariants(); - } - - /** - * Sets the bits from the specified {@code fromIndex} (inclusive) to the - * specified {@code toIndex} (exclusive) to {@code false}. - * - * @param fromIndex - * index of the first bit to be cleared - * @param toIndex - * index after the last bit to be cleared - * @throws IndexOutOfBoundsException - * if {@code fromIndex} is negative, or {@code toIndex} is - * negative, or {@code fromIndex} is larger than {@code toIndex} - * @since 1.4 - */ - /* - * public void clear(int fromIndex, int toIndex) { checkRange(fromIndex, - * toIndex); - * - * if (fromIndex == toIndex) return; - * - * int startWordIndex = wordIndex(fromIndex); if (startWordIndex >= - * wordsInUse) return; - * - * int endWordIndex = wordIndex(toIndex - 1); if (endWordIndex >= - * wordsInUse) { toIndex = length(); endWordIndex = wordsInUse - 1; } - * - * long firstWordMask = WORD_MASK << fromIndex; long lastWordMask = - * WORD_MASK >>> -toIndex; if (startWordIndex == endWordIndex) { // Case 1: - * One word words[startWordIndex] &= ~(firstWordMask & lastWordMask); } else - * { // Case 2: Multiple words // Handle first word words[startWordIndex] &= - * ~firstWordMask; - * - * // Handle intermediate words, if any for (int i = startWordIndex+1; i < - * endWordIndex; i++) words[i] = 0; - * - * // Handle last word words[endWordIndex] &= ~lastWordMask; } - * - * recalculateWordsInUse(); checkInvariants(); } - */ - - /** - * Sets all of the bits in this BitSet to {@code false}. - * - * @throws IOException - * - * @since 1.4 - */ - public void clear() throws IOException { - fc.close(); - fc = null; - Path p = Paths.get(fileName); - Files.deleteIfExists(p); - this.wordsInUse = 0; - - int cap = buf.capacity(); - buf = null; - fc = (FileChannel) Files.newByteChannel(p, StandardOpenOption.CREATE, - StandardOpenOption.WRITE, StandardOpenOption.READ, - StandardOpenOption.SPARSE); - buf = fc.map(MapMode.READ_WRITE, 0, cap); - } - - /** - * Deletes the MappedByteBufferBitSet {@code false}. - * - * @throws IOException - * - * @since 1.4 - */ - public void delete() throws IOException { - fc.close(); - fc = null; - Path p = Paths.get(fileName); - Files.deleteIfExists(p); - buf = null; - } - - /** - * Deletes the MappedByteBufferBitSet {@code false}. - * - * @throws IOException - * - * @since 1.4 - */ - public void close() throws IOException { - buf.putInt(0, this.wordsInUse); - fc.force(false); - fc.close(); - fc = null; - buf = null; - } - - /** - * Returns the value of the bit with the specified index. The value is - * {@code true} if the bit with the index {@code bitIndex} is currently set - * in this {@code BitSet}; otherwise, the result is {@code false}. - * - * @param bitIndex - * the bit index - * @return the value of the bit with the specified index - * @throws IndexOutOfBoundsException - * if the specified index is negative - */ - public boolean get(long bitIndex) { - if (bitIndex < 0) - throw new IndexOutOfBoundsException("bitIndex < 0: " + bitIndex); - - checkInvariants(); - - int wordIndex = wordIndex(bitIndex); - return (wordIndex < wordsInUse) - && ((this.getWord(wordIndex) & (1L << bitIndex)) != 0); - } - - /** - * Returns a new {@code BitSet} composed of bits from this {@code BitSet} - * from {@code fromIndex} (inclusive) to {@code toIndex} (exclusive). - * - * @param fromIndex - * index of the first bit to include - * @param toIndex - * index after the last bit to include - * @return a new {@code BitSet} from a range of this {@code BitSet} - * @throws IndexOutOfBoundsException - * if {@code fromIndex} is negative, or {@code toIndex} is - * negative, or {@code fromIndex} is larger than {@code toIndex} - * @since 1.4 - */ - - /** - * Returns the "logical size" of this {@code BitSet}: the index of the - * highest set bit in the {@code BitSet} plus one. Returns zero if the - * {@code BitSet} contains no set bits. - * - * @return the logical size of this {@code BitSet} - * @since 1.2 - */ - public int length() { - if (wordsInUse == 0) - return 0; - - return BITS_PER_WORD - * (wordsInUse - 1) - + (BITS_PER_WORD - Long.numberOfLeadingZeros(this - .getWord(wordsInUse - 1))); - } - - /** - * Returns true if this {@code BitSet} contains no bits that are set to - * {@code true}. - * - * @return boolean indicating whether this {@code BitSet} is empty - * @since 1.4 - */ - public boolean isEmpty() { - return wordsInUse == 0; - } - - /** - * Returns the number of bits set to {@code true} in this {@code BitSet}. - * - * @return the number of bits set to {@code true} in this {@code BitSet} - * @since 1.4 - */ - public int cardinality() { - int sum = 0; - for (int i = 0; i < wordsInUse; i++) - sum += Long.bitCount(this.getWord(i)); - return sum; - } - - /** - * Returns the hash code value for this bit set. The hash code depends only - * on which bits are set within this {@code BitSet}. - * - *

- * The hash code is defined to be the result of the following calculation: - * - *

-	 * {@code
-	 * public int hashCode() {
-	 *     long h = 1234;
-	 *     long[] words = toLongArray();
-	 *     for (int i = words.length; --i >= 0; )
-	 *         h ^= words[i] * (i + 1);
-	 *     return (int)((h >> 32) ^ h);
-	 * }}
-	 * 
- * - * Note that the hash code changes if the set of bits is altered. - * - * @return the hash code value for this bit set - */ - @Override - public int hashCode() { - long h = 1234; - for (int i = wordsInUse; --i >= 0;) - h ^= this.getWord(i) * (i + 1); - - return (int) ((h >> 32) ^ h); - } - - /** - * Returns the number of bits of space actually in use by this - * {@code BitSet} to represent bit values. The maximum element in the set is - * the size - 1st element. - * - * @return the number of bits currently in this bit set - */ - public int size() { - return (this.getWordLength()) * BITS_PER_WORD; - } - - public static void main(String[] args) throws IOException { - MappedByteBufferBitSet bs = new MappedByteBufferBitSet( - "/home/annesam/test.map"); - System.out.println(bs.cardinality()); - for (int i = 0; i < 10000000; i++) { - bs.set(i, true); - } - System.out.println(bs.cardinality()); - bs.close(); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.File; +import java.io.IOException; +import java.io.ObjectStreamField; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.FileChannel.MapMode; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; + +/** + * This class implements a vector of bits that grows as needed. Each component + * of the bit set has a {@code boolean} value. The bits of a {@code BitSet} are + * indexed by nonnegative integers. Individual indexed bits can be examined, + * set, or cleared. One {@code BitSet} may be used to modify the contents of + * another {@code BitSet} through logical AND, logical inclusive OR, and logical + * exclusive OR operations. + * + *

+ * By default, all bits in the set initially have the value {@code false}. + * + *

+ * Every bit set has a current size, which is the number of bits of space + * currently in use by the bit set. Note that the size is related to the + * implementation of a bit set, so it may change with implementation. The length + * of a bit set relates to logical length of a bit set and is defined + * independently of implementation. + * + *

+ * Unless otherwise noted, passing a null parameter to any of the methods in a + * {@code BitSet} will result in a {@code NullPointerException}. + * + *

+ * A {@code BitSet} is not safe for multithreaded use without external + * synchronization. + * + * @author Arthur van Hoff + * @author Michael McCloskey + * @author Martin Buchholz + * @since JDK1.0 + */ +public class MappedByteBufferBitSet implements Cloneable, java.io.Serializable { + /* + * BitSets are packed into arrays of "words." Currently a word is a long, + * which consists of 64 bits, requiring 6 address bits. The choice of word + * size is determined purely by performance concerns. + */ + private final static int ADDRESS_BITS_PER_WORD = 6; + private final static int BITS_PER_WORD = 1 << ADDRESS_BITS_PER_WORD; + + /* Used to shift left or right for a partial word mask */ + private static final long WORD_MASK = 0xffffffffffffffffL; + + /** + * @serialField + * bits long[] + * + * The bits in this BitSet. The ith bit is stored in + * bits[i/64] at bit position i % 64 (where bit position 0 + * refers to the least significant bit and 63 refers to the + * most significant bit). + */ + private static final ObjectStreamField[] serialPersistentFields = { new ObjectStreamField( + "bits", long[].class), }; + + /** + * The internal field corresponding to the serialField "bits". + */ + MappedByteBuffer buf = null; + FileChannel fc = null; + // private long[] words; + + /** + * The number of words in the logical size of this BitSet. + */ + private transient int wordsInUse = 0; + + private String fileName; + + /* use serialVersionUID from JDK 1.0.2 for interoperability */ + private static final long serialVersionUID = 7997698588986878753L; + + /** + * Given a bit index, return word index containing it. + */ + private static int wordIndex(long bitIndex) { + return (int) (bitIndex >> ADDRESS_BITS_PER_WORD); + } + + private long getWord(int arrayPos) { + int pos = 0; + try { + pos = (arrayPos * 8) + 8; + return this.buf.getLong(pos); + } catch (Exception e) { + System.out.println("unable to get " + pos + " buf cap = " + + this.buf.capacity()); + return 0; + } + } + + private void setWord(int arrayPos, long value) { + int pos = (arrayPos * 8) + 8; + this.buf.putLong(pos, value); + } + + private int getWordLength() { + return (buf.capacity() - 8) / 8; + } + + /** + * Every public method must preserve these invariants. + */ + private void checkInvariants() { + assert (wordsInUse == 0 || getWord(wordsInUse - 1) != 0); + assert (wordsInUse >= 0 && wordsInUse <= this.getWordLength()); + assert (wordsInUse == this.getWordLength() || this.getWord(wordsInUse) == 0); + } + + /** + * Sets the field wordsInUse to the logical size in words of the bit set. + * WARNING:This method assumes that the number of words actually in use is + * less than or equal to the current value of wordsInUse! + */ + private void recalculateWordsInUse() { + // Traverse the bitset until a used word is found + int i; + for (i = wordsInUse - 1; i >= 0; i--) + if (this.getWord(i) != 0) + break; + + wordsInUse = i + 1; // The new logical size + } + + /** + * Creates a new bit set. All bits are initially {@code false}. + * + * @throws IOException + */ + public MappedByteBufferBitSet(String fileName) throws IOException { + this.fileName = fileName; + File f = new File(fileName); + if (f.exists()) { + this.initWords((int) f.length()); + } else { + int len = ((wordIndex(BITS_PER_WORD - 1) + 1) * 8) + 8; + this.initWords(len); + } + } + + /** + * Creates a bit set whose initial size is large enough to explicitly + * represent bits with indices in the range {@code 0} through + * {@code nbits-1}. All bits are initially {@code false}. + * + * @param nbits + * the initial size of the bit set + * @throws IOException + * @throws NegativeArraySizeException + * if the specified initial size is negative + */ + public MappedByteBufferBitSet(String fileName, int nbits) + throws IOException { + // nbits can't be negative; size 0 is OK + this.fileName = fileName; + if (nbits < 0) + throw new NegativeArraySizeException("nbits < 0: " + nbits); + + initWords(nbits); + } + + private void initWords(int length) throws IOException { + Path p = Paths.get(fileName); + fc = (FileChannel) Files.newByteChannel(p, StandardOpenOption.CREATE, + StandardOpenOption.WRITE, StandardOpenOption.READ, + StandardOpenOption.SPARSE); + buf = fc.map(MapMode.READ_WRITE, 0, length); + this.wordsInUse = buf.getInt(0); + } + + /** + * Returns a new byte array containing all the bits in this bit set. + * + *

+ * More precisely, if
+ * {@code byte[] bytes = s.toByteArray();}
+ * then {@code bytes.length == (s.length()+7)/8} and
+ * {@code s.get(n) == ((bytes[n/8] & (1<<(n%8))) != 0)}
+ * for all {@code n < 8 * bytes.length}. + * + * @return a byte array containing a little-endian representation of all the + * bits in this bit set + * @since 1.7 + */ + public byte[] toByteArray() { + int n = wordsInUse; + if (n == 0) + return new byte[0]; + int len = 8 * (n - 1); + for (long x = this.getWord(n - 1); x != 0; x >>>= 8) + len++; + byte[] bytes = new byte[len]; + ByteBuffer bb = ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN); + for (int i = 0; i < n - 1; i++) + bb.putLong(this.getWord(i)); + for (long x = this.getWord(n - 1); x != 0; x >>>= 8) + bb.put((byte) (x & 0xff)); + return bytes; + } + + /** + * Returns a new long array containing all the bits in this bit set. + * + *

+ * More precisely, if
+ * {@code long[] longs = s.toLongArray();}
+ * then {@code longs.length == (s.length()+63)/64} and
+ * {@code s.get(n) == ((longs[n/64] & (1L<<(n%64))) != 0)}
+ * for all {@code n < 64 * longs.length}. + * + * @return a long array containing a little-endian representation of all the + * bits in this bit set + * @since 1.7 + */ + public long[] toLongArray() { + long[] words = new long[this.getWordLength()]; + int i = 0; + while (i <= buf.capacity()) { + words[i] = buf.getLong(i); + i = i + 8; + } + return words; + } + + /** + * Ensures that the BitSet can hold enough words. + * + * @param wordsRequired + * the minimum acceptable number of words. + * @throws IOException + */ + private void ensureCapacity(int wordsRequired) throws IOException { + if (this.getWordLength() < wordsRequired) { + // Allocate larger of doubled size or required size + int request = Math.max(2 * this.getWordLength(), wordsRequired); + Path p = Paths.get(fileName); + fc.close(); + fc = null; + fc = (FileChannel) Files.newByteChannel(p, + StandardOpenOption.CREATE, StandardOpenOption.WRITE, + StandardOpenOption.READ, StandardOpenOption.SPARSE); + buf = fc.map(MapMode.READ_WRITE, 0, (request * 8) + 8); + buf.putInt(0, this.wordsInUse); + } + } + + /** + * Ensures that the BitSet can accommodate a given wordIndex, temporarily + * violating the invariants. The caller must restore the invariants before + * returning to the user, possibly using recalculateWordsInUse(). + * + * @param wordIndex + * the index to be accommodated. + * @throws IOException + */ + private void expandTo(int wordIndex) throws IOException { + int wordsRequired = wordIndex + 1; + if (wordsInUse < wordsRequired) { + ensureCapacity(wordsRequired); + wordsInUse = wordsRequired; + } + } + + /** + * Checks that fromIndex ... toIndex is a valid range of bit indices. + */ + private static void checkRange(int fromIndex, int toIndex) { + if (fromIndex < 0) + throw new IndexOutOfBoundsException("fromIndex < 0: " + fromIndex); + if (toIndex < 0) + throw new IndexOutOfBoundsException("toIndex < 0: " + toIndex); + if (fromIndex > toIndex) + throw new IndexOutOfBoundsException("fromIndex: " + fromIndex + + " > toIndex: " + toIndex); + } + + /** + * Sets the bit at the specified index to the complement of its current + * value. + * + * @param bitIndex + * the index of the bit to flip + * @throws IOException + * @throws IndexOutOfBoundsException + * if the specified index is negative + * @since 1.4 + */ + public void flip(int bitIndex) throws IOException { + if (bitIndex < 0) + throw new IndexOutOfBoundsException("bitIndex < 0: " + bitIndex); + + int wordIndex = wordIndex(bitIndex); + expandTo(wordIndex); + long cv = this.getWord(wordIndex); + cv ^= (1L << bitIndex); + this.setWord(wordIndex, cv); + + recalculateWordsInUse(); + checkInvariants(); + } + + /** + * Sets each bit from the specified {@code fromIndex} (inclusive) to the + * specified {@code toIndex} (exclusive) to the complement of its current + * value. + * + * @param fromIndex + * index of the first bit to flip + * @param toIndex + * index after the last bit to flip + * @throws IOException + * @throws IndexOutOfBoundsException + * if {@code fromIndex} is negative, or {@code toIndex} is + * negative, or {@code fromIndex} is larger than {@code toIndex} + * @since 1.4 + */ + public void flip(int fromIndex, int toIndex) throws IOException { + checkRange(fromIndex, toIndex); + + if (fromIndex == toIndex) + return; + + int startWordIndex = wordIndex(fromIndex); + int endWordIndex = wordIndex(toIndex - 1); + expandTo(endWordIndex); + + long firstWordMask = WORD_MASK << fromIndex; + long lastWordMask = WORD_MASK >>> -toIndex; + long cv = this.getWord(startWordIndex); + if (startWordIndex == endWordIndex) { + // Case 1: One word + + cv ^= (firstWordMask & lastWordMask); + this.setWord(startWordIndex, cv); + } else { + // Case 2: Multiple words + // Handle first word + cv ^= firstWordMask; + this.setWord(startWordIndex, cv); + // Handle intermediate words, if any + for (int i = startWordIndex + 1; i < endWordIndex; i++) { + long word = this.getWord(i); + word ^= WORD_MASK; + this.setWord(i, word); + } + long word = this.getWord(endWordIndex); + // Handle last word + word ^= lastWordMask; + this.setWord(endWordIndex, word); + } + + recalculateWordsInUse(); + checkInvariants(); + } + + /** + * Sets the bit at the specified index to {@code true}. + * + * @param bitIndex + * a bit index + * @throws IOException + * @throws IndexOutOfBoundsException + * if the specified index is negative + * @since JDK1.0 + */ + public void set(long bitIndex) throws IOException { + if (bitIndex < 0) + throw new IndexOutOfBoundsException("bitIndex < 0: " + bitIndex); + + int wordIndex = wordIndex(bitIndex); + expandTo(wordIndex); + long word = this.getWord(wordIndex); + // Handle last word + word |= (1L << bitIndex); + this.setWord(wordIndex, word); + + checkInvariants(); + } + + /** + * Sets the bit at the specified index to the specified value. + * + * @param bitIndex + * a bit index + * @param value + * a boolean value to set + * @throws IOException + * @throws IndexOutOfBoundsException + * if the specified index is negative + * @since 1.4 + */ + public void set(long bitIndex, boolean value) throws IOException { + if (value) + set(bitIndex); + else + clear(bitIndex); + } + + /** + * Sets the bits from the specified {@code fromIndex} (inclusive) to the + * specified {@code toIndex} (exclusive) to {@code true}. + * + * @param fromIndex + * index of the first bit to be set + * @param toIndex + * index after the last bit to be set + * @throws IOException + * @throws IndexOutOfBoundsException + * if {@code fromIndex} is negative, or {@code toIndex} is + * negative, or {@code fromIndex} is larger than {@code toIndex} + * @since 1.4 + */ + public void set(int fromIndex, int toIndex) throws IOException { + checkRange(fromIndex, toIndex); + + if (fromIndex == toIndex) + return; + + // Increase capacity if necessary + int startWordIndex = wordIndex(fromIndex); + int endWordIndex = wordIndex(toIndex - 1); + expandTo(endWordIndex); + + long firstWordMask = WORD_MASK << fromIndex; + long lastWordMask = WORD_MASK >>> -toIndex; + long cv = this.getWord(startWordIndex); + if (startWordIndex == endWordIndex) { + // Case 1: One word + cv |= (firstWordMask & lastWordMask); + this.setWord(startWordIndex, cv); + } else { + // Case 2: Multiple words + // Handle first word + cv |= firstWordMask; + this.setWord(startWordIndex, cv); + // Handle intermediate words, if any + for (int i = startWordIndex + 1; i < endWordIndex; i++) { + long word = this.getWord(i); + word = WORD_MASK; + this.setWord(i, word); + } + // Handle last word (restores invariants) + long word = this.getWord(endWordIndex); + word |= lastWordMask; + this.setWord(endWordIndex, word); + } + + checkInvariants(); + } + + /** + * Sets the bits from the specified {@code fromIndex} (inclusive) to the + * specified {@code toIndex} (exclusive) to the specified value. + * + * @param fromIndex + * index of the first bit to be set + * @param toIndex + * index after the last bit to be set + * @param value + * value to set the selected bits to + * @throws IndexOutOfBoundsException + * if {@code fromIndex} is negative, or {@code toIndex} is + * negative, or {@code fromIndex} is larger than {@code toIndex} + * @since 1.4 + */ + /* + * public void set(int fromIndex, int toIndex, boolean value) { if (value) + * set(fromIndex, toIndex); else clear(fromIndex, toIndex); } + */ + + /** + * Sets the bit specified by the index to {@code false}. + * + * @param bitIndex + * the index of the bit to be cleared + * @throws IndexOutOfBoundsException + * if the specified index is negative + * @since JDK1.0 + */ + public void clear(long bitIndex) { + if (bitIndex < 0) + throw new IndexOutOfBoundsException("bitIndex < 0: " + bitIndex); + + int wordIndex = wordIndex(bitIndex); + if (wordIndex >= wordsInUse) + return; + long cv = this.getWord(wordIndex); + cv &= ~(1L << bitIndex); + this.setWord(wordIndex, cv); + recalculateWordsInUse(); + checkInvariants(); + } + + /** + * Sets the bits from the specified {@code fromIndex} (inclusive) to the + * specified {@code toIndex} (exclusive) to {@code false}. + * + * @param fromIndex + * index of the first bit to be cleared + * @param toIndex + * index after the last bit to be cleared + * @throws IndexOutOfBoundsException + * if {@code fromIndex} is negative, or {@code toIndex} is + * negative, or {@code fromIndex} is larger than {@code toIndex} + * @since 1.4 + */ + /* + * public void clear(int fromIndex, int toIndex) { checkRange(fromIndex, + * toIndex); + * + * if (fromIndex == toIndex) return; + * + * int startWordIndex = wordIndex(fromIndex); if (startWordIndex >= + * wordsInUse) return; + * + * int endWordIndex = wordIndex(toIndex - 1); if (endWordIndex >= + * wordsInUse) { toIndex = length(); endWordIndex = wordsInUse - 1; } + * + * long firstWordMask = WORD_MASK << fromIndex; long lastWordMask = + * WORD_MASK >>> -toIndex; if (startWordIndex == endWordIndex) { // Case 1: + * One word words[startWordIndex] &= ~(firstWordMask & lastWordMask); } else + * { // Case 2: Multiple words // Handle first word words[startWordIndex] &= + * ~firstWordMask; + * + * // Handle intermediate words, if any for (int i = startWordIndex+1; i < + * endWordIndex; i++) words[i] = 0; + * + * // Handle last word words[endWordIndex] &= ~lastWordMask; } + * + * recalculateWordsInUse(); checkInvariants(); } + */ + + /** + * Sets all of the bits in this BitSet to {@code false}. + * + * @throws IOException + * + * @since 1.4 + */ + public void clear() throws IOException { + fc.close(); + fc = null; + Path p = Paths.get(fileName); + Files.deleteIfExists(p); + this.wordsInUse = 0; + + int cap = buf.capacity(); + buf = null; + fc = (FileChannel) Files.newByteChannel(p, StandardOpenOption.CREATE, + StandardOpenOption.WRITE, StandardOpenOption.READ, + StandardOpenOption.SPARSE); + buf = fc.map(MapMode.READ_WRITE, 0, cap); + } + + /** + * Deletes the MappedByteBufferBitSet {@code false}. + * + * @throws IOException + * + * @since 1.4 + */ + public void delete() throws IOException { + fc.close(); + fc = null; + Path p = Paths.get(fileName); + Files.deleteIfExists(p); + buf = null; + } + + /** + * Deletes the MappedByteBufferBitSet {@code false}. + * + * @throws IOException + * + * @since 1.4 + */ + public void close() throws IOException { + buf.putInt(0, this.wordsInUse); + fc.force(false); + fc.close(); + fc = null; + buf = null; + } + + /** + * Returns the value of the bit with the specified index. The value is + * {@code true} if the bit with the index {@code bitIndex} is currently set + * in this {@code BitSet}; otherwise, the result is {@code false}. + * + * @param bitIndex + * the bit index + * @return the value of the bit with the specified index + * @throws IndexOutOfBoundsException + * if the specified index is negative + */ + public boolean get(long bitIndex) { + if (bitIndex < 0) + throw new IndexOutOfBoundsException("bitIndex < 0: " + bitIndex); + + checkInvariants(); + + int wordIndex = wordIndex(bitIndex); + return (wordIndex < wordsInUse) + && ((this.getWord(wordIndex) & (1L << bitIndex)) != 0); + } + + /** + * Returns a new {@code BitSet} composed of bits from this {@code BitSet} + * from {@code fromIndex} (inclusive) to {@code toIndex} (exclusive). + * + * @param fromIndex + * index of the first bit to include + * @param toIndex + * index after the last bit to include + * @return a new {@code BitSet} from a range of this {@code BitSet} + * @throws IndexOutOfBoundsException + * if {@code fromIndex} is negative, or {@code toIndex} is + * negative, or {@code fromIndex} is larger than {@code toIndex} + * @since 1.4 + */ + + /** + * Returns the "logical size" of this {@code BitSet}: the index of the + * highest set bit in the {@code BitSet} plus one. Returns zero if the + * {@code BitSet} contains no set bits. + * + * @return the logical size of this {@code BitSet} + * @since 1.2 + */ + public int length() { + if (wordsInUse == 0) + return 0; + + return BITS_PER_WORD + * (wordsInUse - 1) + + (BITS_PER_WORD - Long.numberOfLeadingZeros(this + .getWord(wordsInUse - 1))); + } + + /** + * Returns true if this {@code BitSet} contains no bits that are set to + * {@code true}. + * + * @return boolean indicating whether this {@code BitSet} is empty + * @since 1.4 + */ + public boolean isEmpty() { + return wordsInUse == 0; + } + + /** + * Returns the number of bits set to {@code true} in this {@code BitSet}. + * + * @return the number of bits set to {@code true} in this {@code BitSet} + * @since 1.4 + */ + public int cardinality() { + int sum = 0; + for (int i = 0; i < wordsInUse; i++) + sum += Long.bitCount(this.getWord(i)); + return sum; + } + + /** + * Returns the hash code value for this bit set. The hash code depends only + * on which bits are set within this {@code BitSet}. + * + *

+ * The hash code is defined to be the result of the following calculation: + * + *

+	 * {@code
+	 * public int hashCode() {
+	 *     long h = 1234;
+	 *     long[] words = toLongArray();
+	 *     for (int i = words.length; --i >= 0; )
+	 *         h ^= words[i] * (i + 1);
+	 *     return (int)((h >> 32) ^ h);
+	 * }}
+	 * 
+ * + * Note that the hash code changes if the set of bits is altered. + * + * @return the hash code value for this bit set + */ + @Override + public int hashCode() { + long h = 1234; + for (int i = wordsInUse; --i >= 0;) + h ^= this.getWord(i) * (i + 1); + + return (int) ((h >> 32) ^ h); + } + + /** + * Returns the number of bits of space actually in use by this + * {@code BitSet} to represent bit values. The maximum element in the set is + * the size - 1st element. + * + * @return the number of bits currently in this bit set + */ + public int size() { + return (this.getWordLength()) * BITS_PER_WORD; + } + + public static void main(String[] args) throws IOException { + MappedByteBufferBitSet bs = new MappedByteBufferBitSet( + "/home/annesam/test.map"); + System.out.println(bs.cardinality()); + for (int i = 0; i < 10000000; i++) { + bs.set(i, true); + } + System.out.println(bs.cardinality()); + bs.close(); + } + +} diff --git a/src/org/opendedup/util/NVPair.java b/src/org/opendedup/util/NVPair.java index 21f6673c4..85dbfd33c 100755 --- a/src/org/opendedup/util/NVPair.java +++ b/src/org/opendedup/util/NVPair.java @@ -1,20 +1,38 @@ -package org.opendedup.util; - -public class NVPair { - String name; - String value; - - public NVPair(String name, String value) { - this.name = name; - this.value = value; - } - - public String getName() { - return name; - } - - public String getValue() { - return value; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +public class NVPair { + String name; + String value; + + public NVPair(String name, String value) { + this.name = name; + this.value = value; + } + + public String getName() { + return name; + } + + public String getValue() { + return value; + } + +} diff --git a/src/org/opendedup/util/NextPrime.java b/src/org/opendedup/util/NextPrime.java index 8f4fb304e..17d367588 100644 --- a/src/org/opendedup/util/NextPrime.java +++ b/src/org/opendedup/util/NextPrime.java @@ -1,72 +1,90 @@ -package org.opendedup.util; - -import java.io.IOException; - -public class NextPrime { - - public static long getNextPrimeL(long input) { - long root; - boolean isPrime = false; - if (input <= 2) { - return 2; - } - for (int k = 3; k < 9; k += 2) { - if (input <= k) { - return k; - } - } - if (input == ((input >> 1) << 1)) { - input += 1; - } - for (long i = input;; i += 2) { - root = (long) Math.sqrt(i); - for (long j = 3; j <= root; j++) { - if (i == (i / j) * j) { - isPrime = false; - break; - } - if (j == root) { - isPrime = true; - } - } - if (isPrime == true) { - return i; - } - } - } - - public static int getNextPrimeI(long input) throws IOException { - long root; - boolean isPrime = false; - if (input <= 2) { - return 2; - } - for (int k = 3; k < 9; k += 2) { - if (input <= k) { - return k; - } - } - if (input == ((input >> 1) << 1)) { - input += 1; - } - for (long i = input;; i += 2) { - root = (long) Math.sqrt(i); - for (long j = 3; j <= root; j++) { - if (i == (i / j) * j) { - isPrime = false; - break; - } - if (j == root) { - isPrime = true; - } - } - if (isPrime == true) { - if (i > Integer.MAX_VALUE) - throw new IOException( - "Next Prime is Greater than max Int value"); - return (int) i; - } - } - } - +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.IOException; + +public class NextPrime { + + public static long getNextPrimeL(long input) { + long root; + boolean isPrime = false; + if (input <= 2) { + return 2; + } + for (int k = 3; k < 9; k += 2) { + if (input <= k) { + return k; + } + } + if (input == ((input >> 1) << 1)) { + input += 1; + } + for (long i = input;; i += 2) { + root = (long) Math.sqrt(i); + for (long j = 3; j <= root; j++) { + if (i == (i / j) * j) { + isPrime = false; + break; + } + if (j == root) { + isPrime = true; + } + } + if (isPrime == true) { + return i; + } + } + } + + public static int getNextPrimeI(long input) throws IOException { + long root; + boolean isPrime = false; + if (input <= 2) { + return 2; + } + for (int k = 3; k < 9; k += 2) { + if (input <= k) { + return k; + } + } + if (input == ((input >> 1) << 1)) { + input += 1; + } + for (long i = input;; i += 2) { + root = (long) Math.sqrt(i); + for (long j = 3; j <= root; j++) { + if (i == (i / j) * j) { + isPrime = false; + break; + } + if (j == root) { + isPrime = true; + } + } + if (isPrime == true) { + if (i > Integer.MAX_VALUE) + throw new IOException( + "Next Prime is Greater than max Int value"); + return (int) i; + } + } + } + } \ No newline at end of file diff --git a/src/org/opendedup/util/NumberUtils.java b/src/org/opendedup/util/NumberUtils.java index 8e545830c..60c85b6d1 100644 --- a/src/org/opendedup/util/NumberUtils.java +++ b/src/org/opendedup/util/NumberUtils.java @@ -1,26 +1,44 @@ -package org.opendedup.util; - -import java.nio.ByteBuffer; - -public class NumberUtils { - public static byte[] getBytesfromLong(long num) { - ByteBuffer buf = ByteBuffer.wrap(new byte[8]); - buf.putLong(num); - return buf.array(); - } - - public static void main(String[] args) { - int cl = 131072; - long pos = 4096; - int len = 4096; - double spos = Math.ceil(((double) pos / (double) cl)); - long ep = pos + len; - double epos = Math.floor(((double) ep / (double) cl)); - long ls = (long) spos; - long es = (long) epos; - if (es <= ls) - System.out.println("eeks"); - System.out.println("spos=" + ls + " epos=" + es); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.nio.ByteBuffer; + +public class NumberUtils { + public static byte[] getBytesfromLong(long num) { + ByteBuffer buf = ByteBuffer.wrap(new byte[8]); + buf.putLong(num); + return buf.array(); + } + + public static void main(String[] args) { + int cl = 131072; + long pos = 4096; + int len = 4096; + double spos = Math.ceil(((double) pos / (double) cl)); + long ep = pos + len; + double epos = Math.floor(((double) ep / (double) cl)); + long ls = (long) spos; + long es = (long) epos; + if (es <= ls) + System.out.println("eeks"); + System.out.println("spos=" + ls + " epos=" + es); + } + +} diff --git a/src/org/opendedup/util/OSValidator.java b/src/org/opendedup/util/OSValidator.java index c40424e97..673723865 100644 --- a/src/org/opendedup/util/OSValidator.java +++ b/src/org/opendedup/util/OSValidator.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.io.File; diff --git a/src/org/opendedup/util/OpenBitSetSerialize.java b/src/org/opendedup/util/OpenBitSetSerialize.java index bfca948a2..3a679c7a7 100644 --- a/src/org/opendedup/util/OpenBitSetSerialize.java +++ b/src/org/opendedup/util/OpenBitSetSerialize.java @@ -1,91 +1,109 @@ -package org.opendedup.util; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.StandardOpenOption; - -import org.apache.lucene.util.OpenBitSet; - -public class OpenBitSetSerialize { - public static void writeOut(String fileName, OpenBitSet set) - throws IOException { - FileChannel fc = null; - try { - File f = new File(fileName); - if (f.exists()) - f.delete(); - Path p = Paths.get(f.getPath()); - fc = FileChannel.open(p, StandardOpenOption.CREATE, - StandardOpenOption.SPARSE, StandardOpenOption.WRITE, - StandardOpenOption.READ); - synchronized (set) { - ByteBuffer buff = ByteBuffer.allocate(4); - buff.putInt(set.getNumWords()); - buff.flip(); - fc.write(buff); - long[] nums = set.getBits(); - buff.clear(); - buff.putInt(nums.length); - buff.flip(); - fc.write(buff); - - buff = ByteBuffer.allocate(8); - - for (long num : nums) { - buff.position(0); - buff.putLong(num); - buff.flip(); - fc.write(buff); - } - } - } finally { - if (fc != null) - fc.close(); - } - } - - public static OpenBitSet readIn(String fileName) throws IOException { - FileChannel fc = null; - try { - File f = new File(fileName); - Path p = Paths.get(f.getPath()); - fc = FileChannel.open(p, StandardOpenOption.READ); - ByteBuffer buff = ByteBuffer.allocate(4); - fc.read(buff); - int numWords = buff.getInt(0); - buff.clear(); - buff.position(0); - fc.read(buff); - int numL = buff.getInt(0); - buff = ByteBuffer.allocate(8); - long[] l = new long[numL]; - for (int i = 0; i < numL; i++) { - buff.position(0); - fc.read(buff); - buff.position(0); - l[i] = buff.getLong(); - } - fc.close(); - return new OpenBitSet(l, numWords); - } finally { - if (fc != null) - fc.close(); - } - - } - - public static void main(String[] args) throws IOException { - OpenBitSet set = new OpenBitSet(1000000000L); - long pos = 1010; - set.set(pos); - System.out.println(set.get(pos)); - writeOut("/tmp/bitset.bt", set); - System.out.println(readIn("/tmp/bitset.bt").get(pos)); - - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; + +import org.apache.lucene.util.OpenBitSet; + +public class OpenBitSetSerialize { + public static void writeOut(String fileName, OpenBitSet set) + throws IOException { + FileChannel fc = null; + try { + File f = new File(fileName); + if (f.exists()) + f.delete(); + Path p = Paths.get(f.getPath()); + fc = FileChannel.open(p, StandardOpenOption.CREATE, + StandardOpenOption.SPARSE, StandardOpenOption.WRITE, + StandardOpenOption.READ); + synchronized (set) { + ByteBuffer buff = ByteBuffer.allocate(4); + buff.putInt(set.getNumWords()); + buff.flip(); + fc.write(buff); + long[] nums = set.getBits(); + buff.clear(); + buff.putInt(nums.length); + buff.flip(); + fc.write(buff); + + buff = ByteBuffer.allocate(8); + + for (long num : nums) { + buff.position(0); + buff.putLong(num); + buff.flip(); + fc.write(buff); + } + } + } finally { + if (fc != null) + fc.close(); + } + } + + public static OpenBitSet readIn(String fileName) throws IOException { + FileChannel fc = null; + try { + File f = new File(fileName); + Path p = Paths.get(f.getPath()); + fc = FileChannel.open(p, StandardOpenOption.READ); + ByteBuffer buff = ByteBuffer.allocate(4); + fc.read(buff); + int numWords = buff.getInt(0); + buff.clear(); + buff.position(0); + fc.read(buff); + int numL = buff.getInt(0); + buff = ByteBuffer.allocate(8); + long[] l = new long[numL]; + for (int i = 0; i < numL; i++) { + buff.position(0); + fc.read(buff); + buff.position(0); + l[i] = buff.getLong(); + } + fc.close(); + return new OpenBitSet(l, numWords); + } finally { + if (fc != null) + fc.close(); + } + + } + + public static void main(String[] args) throws IOException { + OpenBitSet set = new OpenBitSet(1000000000L); + long pos = 1010; + set.set(pos); + System.out.println(set.get(pos)); + writeOut("/tmp/bitset.bt", set); + System.out.println(readIn("/tmp/bitset.bt").get(pos)); + + } + +} diff --git a/src/org/opendedup/util/Orderbytest.java b/src/org/opendedup/util/Orderbytest.java index 9e49f65bd..f9fb9eac7 100644 --- a/src/org/opendedup/util/Orderbytest.java +++ b/src/org/opendedup/util/Orderbytest.java @@ -1,29 +1,47 @@ -package org.opendedup.util; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.Random; - -public class Orderbytest { - public static void main(String[] args) { - ArrayList al = new ArrayList(); - Random r = new Random(); - for (int i = 0; i < 15; i++) { - al.add(new Long(r.nextLong())); - } - Collections.sort(al, new CustomComparator()); - for (int i = 0; i < al.size(); i++) { - System.out.println(al.get(i)); - } - } - - private static class CustomComparator implements Comparator { - @Override - public int compare(Long o1, Long o2) { - - return o1.compareTo(o2); - } - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Random; + +public class Orderbytest { + public static void main(String[] args) { + ArrayList al = new ArrayList(); + Random r = new Random(); + for (int i = 0; i < 15; i++) { + al.add(new Long(r.nextLong())); + } + Collections.sort(al, new CustomComparator()); + for (int i = 0; i < al.size(); i++) { + System.out.println(al.get(i)); + } + } + + private static class CustomComparator implements Comparator { + @Override + public int compare(Long o1, Long o2) { + + return o1.compareTo(o2); + } + } + +} diff --git a/src/org/opendedup/util/PassPhrase.java b/src/org/opendedup/util/PassPhrase.java index 0929728e7..3efa290c1 100644 --- a/src/org/opendedup/util/PassPhrase.java +++ b/src/org/opendedup/util/PassPhrase.java @@ -3,7 +3,7 @@ import java.security.SecureRandom; /* - * Copyright (c) Ian F. Darwin, http://www.darwinsys.com/, 1996-2002. + * Copyright (c) Ian F. Darwin, http://www.darwinsys.com/, 1996-2016. * All rights reserved. Software written by Ian F. Darwin and others. * $Id: LICENSE,v 1.8 2004/02/09 03:33:38 ian Exp $ @@ -36,7 +36,7 @@ * * The pioneering role of Dennis Ritchie and Bjarne Stroustrup, of AT&T, for * inventing predecessor languages C and C++ is also gratefully acknowledged. - */ + */ /** * Cheap, lightweight, low-security password generator. See also: diff --git a/src/org/opendedup/util/ProcessWorker.java b/src/org/opendedup/util/ProcessWorker.java index 317355aae..4c2d16e55 100644 --- a/src/org/opendedup/util/ProcessWorker.java +++ b/src/org/opendedup/util/ProcessWorker.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.io.BufferedReader; diff --git a/src/org/opendedup/util/RandomFile.java b/src/org/opendedup/util/RandomFile.java index 9ad7b4cc0..80520d75b 100755 --- a/src/org/opendedup/util/RandomFile.java +++ b/src/org/opendedup/util/RandomFile.java @@ -1,33 +1,51 @@ -package org.opendedup.util; - -import java.io.BufferedOutputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.util.Random; - -import org.opendedup.sdfs.Main; - -public class RandomFile { - public static void writeRandomFile(String fileName, double size) - throws IOException { - BufferedOutputStream out = new BufferedOutputStream( - new FileOutputStream(fileName)); - long currentpos = 0; - Random r = new Random(); - while (currentpos < size) { - byte[] rndB = new byte[Main.CHUNK_LENGTH]; - r.nextBytes(rndB); - out.write(rndB); - currentpos = currentpos + rndB.length; - out.flush(); - } - out.flush(); - out.close(); - } - - public static void main(String[] args) throws IOException { - long size = 100 * 1024L * 1024L * 1024L; - writeRandomFile("/media/dedup/rnd.bin", size); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.BufferedOutputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.Random; + +import org.opendedup.sdfs.Main; + +public class RandomFile { + public static void writeRandomFile(String fileName, double size) + throws IOException { + BufferedOutputStream out = new BufferedOutputStream( + new FileOutputStream(fileName)); + long currentpos = 0; + Random r = new Random(); + while (currentpos < size) { + byte[] rndB = new byte[Main.CHUNK_LENGTH]; + r.nextBytes(rndB); + out.write(rndB); + currentpos = currentpos + rndB.length; + out.flush(); + } + out.flush(); + out.close(); + } + + public static void main(String[] args) throws IOException { + long size = 100 * 1024L * 1024L * 1024L; + writeRandomFile("/media/dedup/rnd.bin", size); + } + +} diff --git a/src/org/opendedup/util/RandomGUID.java b/src/org/opendedup/util/RandomGUID.java index 16a95a417..2ef369d73 100755 --- a/src/org/opendedup/util/RandomGUID.java +++ b/src/org/opendedup/util/RandomGUID.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.io.IOException; diff --git a/src/org/opendedup/util/SDFSVolumeTLogger.java b/src/org/opendedup/util/SDFSVolumeTLogger.java index d908632e2..c71105e5d 100644 --- a/src/org/opendedup/util/SDFSVolumeTLogger.java +++ b/src/org/opendedup/util/SDFSVolumeTLogger.java @@ -1,14 +1,32 @@ -package org.opendedup.util; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -public class SDFSVolumeTLogger { - - private static Log log = LogFactory.getLog("sdfs"); - - public static Log getLog() { - return log; - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +public class SDFSVolumeTLogger { + + private static Log log = LogFactory.getLog("sdfs"); + + public static Log getLog() { + return log; + } + +} diff --git a/src/org/opendedup/util/StorageUnit.java b/src/org/opendedup/util/StorageUnit.java index bac972b98..71fed3cd6 100644 --- a/src/org/opendedup/util/StorageUnit.java +++ b/src/org/opendedup/util/StorageUnit.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; public enum StorageUnit { diff --git a/src/org/opendedup/util/StringUtils.java b/src/org/opendedup/util/StringUtils.java index 140d3b939..b4e138fd3 100755 --- a/src/org/opendedup/util/StringUtils.java +++ b/src/org/opendedup/util/StringUtils.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.io.IOException; diff --git a/src/org/opendedup/util/TestFile.java b/src/org/opendedup/util/TestFile.java index f9c2ff19d..b017cc95e 100755 --- a/src/org/opendedup/util/TestFile.java +++ b/src/org/opendedup/util/TestFile.java @@ -1,12 +1,30 @@ -package org.opendedup.util; - -import java.io.IOException; - -public class TestFile { - public static void main(String[] args) throws IOException { - String tst = "/sys/kernel/config/target/iscsi/iqn.2008-06.org.opendedup.iscsi:n7YOQDe9TQKD/tpgt_1/attrib/demo_mode_write_protect"; - tst = tst.replaceAll("\\:", "\\\\:"); - System.out.println(tst); - } - -} +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ +package org.opendedup.util; + +import java.io.IOException; + +public class TestFile { + public static void main(String[] args) throws IOException { + String tst = "/sys/kernel/config/target/iscsi/iqn.2008-06.org.opendedup.iscsi:n7YOQDe9TQKD/tpgt_1/attrib/demo_mode_write_protect"; + tst = tst.replaceAll("\\:", "\\\\:"); + System.out.println(tst); + } + +} diff --git a/src/org/opendedup/util/Tiger_Hash.java b/src/org/opendedup/util/Tiger_Hash.java index 8016bccee..c42aed21d 100644 --- a/src/org/opendedup/util/Tiger_Hash.java +++ b/src/org/opendedup/util/Tiger_Hash.java @@ -1,794 +1,794 @@ -package org.opendedup.util; - -/* - * The rest of this file contains Java code - * derived from code provided by Cryptix and is covered - * under the Cryptix General License. - * - * The following code orginally stems from - * cryptix code... which in turn stems from Eli Biham and - * Ross Anderson's original C sample code. +package org.opendedup.util; + +/* + * The rest of this file contains Java code + * derived from code provided by Cryptix and is covered + * under the Cryptix General License. + * + * The following code orginally stems from + * cryptix code... which in turn stems from Eli Biham and + * Ross Anderson's original C sample code. + */ + +/* $Id: Tiger_Hash.java,v 1.5 2001, 2012/06/25 20:30:26 gelderen Exp $ + * + * Copyright (C) 2000 The Cryptix Foundation Limited. + * All rights reserved. + * + * Use, modification, copying and distribution of this software is subject to + * the terms and conditions of the Cryptix General Licence. You should have + * received a copy of the Cryptix General Licence along with this library; + * if not, you can download a copy from http://www.cryptix.org/ . */ - -/* $Id: Tiger_Hash.java,v 1.5 2001/06/25 20:30:26 gelderen Exp $ - * - * Copyright (C) 2000 The Cryptix Foundation Limited. - * All rights reserved. - * - * Use, modification, copying and distribution of this software is subject to - * the terms and conditions of the Cryptix General Licence. You should have - * received a copy of the Cryptix General Licence along with this library; - * if not, you can download a copy from http://www.cryptix.org/ . - */ - -/** - * @version $Revision: 1.5 $ - * @author Jeroen C. van Gelderen (gelderen@cryptix.org) - */ - -class Tiger_Hash implements Cloneable { - - // Constants - // ........................................................................... - - /** Length of this hash */ - - // Instance variables - // ........................................................................... - - /** Hash context */ - private long a, b, c; - - // Constructors - // ........................................................................... - - public Tiger_Hash() { - coreReset(); - } - - private Tiger_Hash(Tiger_Hash src) { - this.a = src.a; - this.b = src.b; - this.c = src.c; - } - - @Override - public Object clone() { - return new Tiger_Hash(this); - } - - // Concreteness - // ........................................................................... - - protected void coreDigest(byte[] buf, int off) { - buf[off++] = (byte) (a); - buf[off++] = (byte) (a >> 8); - buf[off++] = (byte) (a >> 16); - buf[off++] = (byte) (a >> 24); - buf[off++] = (byte) (a >> 32); - buf[off++] = (byte) (a >> 40); - buf[off++] = (byte) (a >> 48); - buf[off++] = (byte) (a >> 56); - - buf[off++] = (byte) (b); - buf[off++] = (byte) (b >> 8); - buf[off++] = (byte) (b >> 16); - buf[off++] = (byte) (b >> 24); - buf[off++] = (byte) (b >> 32); - buf[off++] = (byte) (b >> 40); - buf[off++] = (byte) (b >> 48); - buf[off++] = (byte) (b >> 56); - - buf[off++] = (byte) (c); - buf[off++] = (byte) (c >> 8); - buf[off++] = (byte) (c >> 16); - buf[off++] = (byte) (c >> 24); - buf[off++] = (byte) (c >> 32); - buf[off++] = (byte) (c >> 40); - buf[off++] = (byte) (c >> 48); - buf[off] = (byte) (c >> 56); - } - - protected void coreReset() { - a = 0x0123456789ABCDEFL; - b = 0xFEDCBA9876543210L; - c = 0xF096A5B4C3B2E187L; - } - - protected void coreUpdate(byte[] block, int offset, int run_until) { - int off = offset; - long[] tmp = new long[8]; - for (int i = 0; i < 8; i++) - tmp[i] = ((block[offset++] & (long) 0xFF)) - | ((block[offset++] & (long) 0xFF) << 8) - | ((block[offset++] & (long) 0xFF) << 16) - | ((block[offset++] & (long) 0xFF) << 24) - | ((block[offset++] & (long) 0xFF) << 32) - | ((block[offset++] & (long) 0xFF) << 40) - | ((block[offset++] & (long) 0xFF) << 48) - | ((block[offset++] & (long) 0xFF) << 56); - - compress(tmp, run_until); - - // just for us... - for (int i = 0; i < 8; i++) { - long ct = tmp[i]; - block[off++] = (byte) (ct); - block[off++] = (byte) (ct >> 8); - block[off++] = (byte) (ct >> 16); - block[off++] = (byte) (ct >> 24); - block[off++] = (byte) (ct >> 32); - block[off++] = (byte) (ct >> 40); - block[off++] = (byte) (ct >> 48); - block[off] = (byte) (ct >> 56); - } - } - - // Private methods - // ........................................................................... - - private void compress(long[] x, int breakpoint) { - System.out.println("Starting compress."); - long aa = a, bb = b, cc = c; - - roundABC(x[0], 5); - if (breakpoint == 1) - return; - roundBCA(x[1], 5); - if (breakpoint == 2) - return; - roundCAB(x[2], 5); - if (breakpoint == 3) - return; - roundABC(x[3], 5); - if (breakpoint == 4) - return; - roundBCA(x[4], 5); - if (breakpoint == 5) - return; - roundCAB(x[5], 5); - if (breakpoint == 6) - return; - roundABC(x[6], 5); - if (breakpoint == 7) - return; - roundBCA(x[7], 5); - if (breakpoint == 8) - return; - - schedule(x); - if (breakpoint == 9) - return; - - roundCAB(x[0], 7); - if (breakpoint == 10) - return; - roundABC(x[1], 7); - if (breakpoint == 11) - return; - roundBCA(x[2], 7); - if (breakpoint == 12) - return; - roundCAB(x[3], 7); - if (breakpoint == 13) - return; - roundABC(x[4], 7); - if (breakpoint == 14) - return; - roundBCA(x[5], 7); - if (breakpoint == 15) - return; - roundCAB(x[6], 7); - if (breakpoint == 16) - return; - roundABC(x[7], 7); - if (breakpoint == 17) - return; - - schedule(x); - if (breakpoint == 18) - return; - - roundBCA(x[0], 9); - if (breakpoint == 19) - return; - roundCAB(x[1], 9); - if (breakpoint == 20) - return; - roundABC(x[2], 9); - if (breakpoint == 21) - return; - roundBCA(x[3], 9); - if (breakpoint == 22) - return; - roundCAB(x[4], 9); - if (breakpoint == 23) - return; - roundABC(x[5], 9); - if (breakpoint == 24) - return; - roundBCA(x[6], 9); - if (breakpoint == 25) - return; - roundCAB(x[7], 9); - if (breakpoint == 26) - return; - - // feed forward - a ^= aa; - b -= bb; - c += cc; - - System.out.println("Completed entire compress."); - } - - private void schedule(long[] x) { - x[0] -= x[7] ^ 0xA5A5A5A5A5A5A5A5L; - x[1] ^= x[0]; - x[2] += x[1]; - x[3] -= x[2] ^ ((~x[1]) << 19); - x[4] ^= x[3]; - x[5] += x[4]; - x[6] -= x[5] ^ ((~x[4]) >>> 23); - x[7] ^= x[6]; - x[0] += x[7]; - x[1] -= x[0] ^ ((~x[7]) << 19); - x[2] ^= x[1]; - x[3] += x[2]; - x[4] -= x[3] ^ ((~x[2]) >>> 23); - x[5] ^= x[4]; - x[6] += x[5]; - x[7] -= x[6] ^ 0x0123456789ABCDEFL; - } - - private void roundABC(long x, int mul) { - c ^= x; - - int c0 = (int) (c) & 0xFF, c1 = (int) (c >>> 8) & 0xFF, c2 = (int) (c >>> 16) & 0xFF, c3 = (int) (c >>> 24) & 0xFF, c4 = (int) (c >>> 32) & 0xFF, c5 = (int) (c >>> 40) & 0xFF, c6 = (int) (c >>> 48) & 0xFF, c7 = (int) (c >>> 56); - - a -= T1[c0] ^ T2[c2] ^ T3[c4] ^ T4[c6]; - b += T4[c1] ^ T3[c3] ^ T2[c5] ^ T1[c7]; - b *= mul; - } - - private void roundBCA(long x, int mul) { - a ^= x; - - int a0 = (int) (a) & 0xFF, a1 = (int) (a >>> 8) & 0xFF, a2 = (int) (a >>> 16) & 0xFF, a3 = (int) (a >>> 24) & 0xFF, a4 = (int) (a >>> 32) & 0xFF, a5 = (int) (a >>> 40) & 0xFF, a6 = (int) (a >>> 48) & 0xFF, a7 = (int) (a >>> 56); - - b -= T1[a0] ^ T2[a2] ^ T3[a4] ^ T4[a6]; - c += T4[a1] ^ T3[a3] ^ T2[a5] ^ T1[a7]; - c *= mul; - } - - private void roundCAB(long x, int mul) { - b ^= x; - - int b0 = (int) (b) & 0xFF, b1 = (int) (b >>> 8) & 0xFF, b2 = (int) (b >>> 16) & 0xFF, b3 = (int) (b >>> 24) & 0xFF, b4 = (int) (b >>> 32) & 0xFF, b5 = (int) (b >>> 40) & 0xFF, b6 = (int) (b >>> 48) & 0xFF, b7 = (int) (b >>> 56); - - c -= T1[b0] ^ T2[b2] ^ T3[b4] ^ T4[b6]; - a += T4[b1] ^ T3[b3] ^ T2[b5] ^ T1[b7]; - a *= mul; - } - - // Tables - // ........................................................................... - - /** - * Tiger_Hash S-Boxes - */ - private static final long[] T1 = { 0x02AAB17CF7E90C5EL /* 0 */, - 0xAC424B03E243A8ECL /* 1 */, 0x72CD5BE30DD5FCD3L /* 2 */, - 0x6D019B93F6F97F3AL /* 3 */, 0xCD9978FFD21F9193L /* 4 */, - 0x7573A1C9708029E2L /* 5 */, 0xB164326B922A83C3L /* 6 */, - 0x46883EEE04915870L /* 7 */, 0xEAACE3057103ECE6L /* 8 */, - 0xC54169B808A3535CL /* 9 */, 0x4CE754918DDEC47CL /* 10 */, - 0x0AA2F4DFDC0DF40CL /* 11 */, 0x10B76F18A74DBEFAL /* 12 */, - 0xC6CCB6235AD1AB6AL /* 13 */, 0x13726121572FE2FFL /* 14 */, - 0x1A488C6F199D921EL /* 15 */, 0x4BC9F9F4DA0007CAL /* 16 */, - 0x26F5E6F6E85241C7L /* 17 */, 0x859079DBEA5947B6L /* 18 */, - 0x4F1885C5C99E8C92L /* 19 */, 0xD78E761EA96F864BL /* 20 */, - 0x8E36428C52B5C17DL /* 21 */, 0x69CF6827373063C1L /* 22 */, - 0xB607C93D9BB4C56EL /* 23 */, 0x7D820E760E76B5EAL /* 24 */, - 0x645C9CC6F07FDC42L /* 25 */, 0xBF38A078243342E0L /* 26 */, - 0x5F6B343C9D2E7D04L /* 27 */, 0xF2C28AEB600B0EC6L /* 28 */, - 0x6C0ED85F7254BCACL /* 29 */, 0x71592281A4DB4FE5L /* 30 */, - 0x1967FA69CE0FED9FL /* 31 */, 0xFD5293F8B96545DBL /* 32 */, - 0xC879E9D7F2A7600BL /* 33 */, 0x860248920193194EL /* 34 */, - 0xA4F9533B2D9CC0B3L /* 35 */, 0x9053836C15957613L /* 36 */, - 0xDB6DCF8AFC357BF1L /* 37 */, 0x18BEEA7A7A370F57L /* 38 */, - 0x037117CA50B99066L /* 39 */, 0x6AB30A9774424A35L /* 40 */, - 0xF4E92F02E325249BL /* 41 */, 0x7739DB07061CCAE1L /* 42 */, - 0xD8F3B49CECA42A05L /* 43 */, 0xBD56BE3F51382F73L /* 44 */, - 0x45FAED5843B0BB28L /* 45 */, 0x1C813D5C11BF1F83L /* 46 */, - 0x8AF0E4B6D75FA169L /* 47 */, 0x33EE18A487AD9999L /* 48 */, - 0x3C26E8EAB1C94410L /* 49 */, 0xB510102BC0A822F9L /* 50 */, - 0x141EEF310CE6123BL /* 51 */, 0xFC65B90059DDB154L /* 52 */, - 0xE0158640C5E0E607L /* 53 */, 0x884E079826C3A3CFL /* 54 */, - 0x930D0D9523C535FDL /* 55 */, 0x35638D754E9A2B00L /* 56 */, - 0x4085FCCF40469DD5L /* 57 */, 0xC4B17AD28BE23A4CL /* 58 */, - 0xCAB2F0FC6A3E6A2EL /* 59 */, 0x2860971A6B943FCDL /* 60 */, - 0x3DDE6EE212E30446L /* 61 */, 0x6222F32AE01765AEL /* 62 */, - 0x5D550BB5478308FEL /* 63 */, 0xA9EFA98DA0EDA22AL /* 64 */, - 0xC351A71686C40DA7L /* 65 */, 0x1105586D9C867C84L /* 66 */, - 0xDCFFEE85FDA22853L /* 67 */, 0xCCFBD0262C5EEF76L /* 68 */, - 0xBAF294CB8990D201L /* 69 */, 0xE69464F52AFAD975L /* 70 */, - 0x94B013AFDF133E14L /* 71 */, 0x06A7D1A32823C958L /* 72 */, - 0x6F95FE5130F61119L /* 73 */, 0xD92AB34E462C06C0L /* 74 */, - 0xED7BDE33887C71D2L /* 75 */, 0x79746D6E6518393EL /* 76 */, - 0x5BA419385D713329L /* 77 */, 0x7C1BA6B948A97564L /* 78 */, - 0x31987C197BFDAC67L /* 79 */, 0xDE6C23C44B053D02L /* 80 */, - 0x581C49FED002D64DL /* 81 */, 0xDD474D6338261571L /* 82 */, - 0xAA4546C3E473D062L /* 83 */, 0x928FCE349455F860L /* 84 */, - 0x48161BBACAAB94D9L /* 85 */, 0x63912430770E6F68L /* 86 */, - 0x6EC8A5E602C6641CL /* 87 */, 0x87282515337DDD2BL /* 88 */, - 0x2CDA6B42034B701BL /* 89 */, 0xB03D37C181CB096DL /* 90 */, - 0xE108438266C71C6FL /* 91 */, 0x2B3180C7EB51B255L /* 92 */, - 0xDF92B82F96C08BBCL /* 93 */, 0x5C68C8C0A632F3BAL /* 94 */, - 0x5504CC861C3D0556L /* 95 */, 0xABBFA4E55FB26B8FL /* 96 */, - 0x41848B0AB3BACEB4L /* 97 */, 0xB334A273AA445D32L /* 98 */, - 0xBCA696F0A85AD881L /* 99 */, 0x24F6EC65B528D56CL /* 100 */, - 0x0CE1512E90F4524AL /* 101 */, 0x4E9DD79D5506D35AL /* 102 */, - 0x258905FAC6CE9779L /* 103 */, 0x2019295B3E109B33L /* 104 */, - 0xF8A9478B73A054CCL /* 105 */, 0x2924F2F934417EB0L /* 106 */, - 0x3993357D536D1BC4L /* 107 */, 0x38A81AC21DB6FF8BL /* 108 */, - 0x47C4FBF17D6016BFL /* 109 */, 0x1E0FAADD7667E3F5L /* 110 */, - 0x7ABCFF62938BEB96L /* 111 */, 0xA78DAD948FC179C9L /* 112 */, - 0x8F1F98B72911E50DL /* 113 */, 0x61E48EAE27121A91L /* 114 */, - 0x4D62F7AD31859808L /* 115 */, 0xECEBA345EF5CEAEBL /* 116 */, - 0xF5CEB25EBC9684CEL /* 117 */, 0xF633E20CB7F76221L /* 118 */, - 0xA32CDF06AB8293E4L /* 119 */, 0x985A202CA5EE2CA4L /* 120 */, - 0xCF0B8447CC8A8FB1L /* 121 */, 0x9F765244979859A3L /* 122 */, - 0xA8D516B1A1240017L /* 123 */, 0x0BD7BA3EBB5DC726L /* 124 */, - 0xE54BCA55B86ADB39L /* 125 */, 0x1D7A3AFD6C478063L /* 126 */, - 0x519EC608E7669EDDL /* 127 */, 0x0E5715A2D149AA23L /* 128 */, - 0x177D4571848FF194L /* 129 */, 0xEEB55F3241014C22L /* 130 */, - 0x0F5E5CA13A6E2EC2L /* 131 */, 0x8029927B75F5C361L /* 132 */, - 0xAD139FABC3D6E436L /* 133 */, 0x0D5DF1A94CCF402FL /* 134 */, - 0x3E8BD948BEA5DFC8L /* 135 */, 0xA5A0D357BD3FF77EL /* 136 */, - 0xA2D12E251F74F645L /* 137 */, 0x66FD9E525E81A082L /* 138 */, - 0x2E0C90CE7F687A49L /* 139 */, 0xC2E8BCBEBA973BC5L /* 140 */, - 0x000001BCE509745FL /* 141 */, 0x423777BBE6DAB3D6L /* 142 */, - 0xD1661C7EAEF06EB5L /* 143 */, 0xA1781F354DAACFD8L /* 144 */, - 0x2D11284A2B16AFFCL /* 145 */, 0xF1FC4F67FA891D1FL /* 146 */, - 0x73ECC25DCB920ADAL /* 147 */, 0xAE610C22C2A12651L /* 148 */, - 0x96E0A810D356B78AL /* 149 */, 0x5A9A381F2FE7870FL /* 150 */, - 0xD5AD62EDE94E5530L /* 151 */, 0xD225E5E8368D1427L /* 152 */, - 0x65977B70C7AF4631L /* 153 */, 0x99F889B2DE39D74FL /* 154 */, - 0x233F30BF54E1D143L /* 155 */, 0x9A9675D3D9A63C97L /* 156 */, - 0x5470554FF334F9A8L /* 157 */, 0x166ACB744A4F5688L /* 158 */, - 0x70C74CAAB2E4AEADL /* 159 */, 0xF0D091646F294D12L /* 160 */, - 0x57B82A89684031D1L /* 161 */, 0xEFD95A5A61BE0B6BL /* 162 */, - 0x2FBD12E969F2F29AL /* 163 */, 0x9BD37013FEFF9FE8L /* 164 */, - 0x3F9B0404D6085A06L /* 165 */, 0x4940C1F3166CFE15L /* 166 */, - 0x09542C4DCDF3DEFBL /* 167 */, 0xB4C5218385CD5CE3L /* 168 */, - 0xC935B7DC4462A641L /* 169 */, 0x3417F8A68ED3B63FL /* 170 */, - 0xB80959295B215B40L /* 171 */, 0xF99CDAEF3B8C8572L /* 172 */, - 0x018C0614F8FCB95DL /* 173 */, 0x1B14ACCD1A3ACDF3L /* 174 */, - 0x84D471F200BB732DL /* 175 */, 0xC1A3110E95E8DA16L /* 176 */, - 0x430A7220BF1A82B8L /* 177 */, 0xB77E090D39DF210EL /* 178 */, - 0x5EF4BD9F3CD05E9DL /* 179 */, 0x9D4FF6DA7E57A444L /* 180 */, - 0xDA1D60E183D4A5F8L /* 181 */, 0xB287C38417998E47L /* 182 */, - 0xFE3EDC121BB31886L /* 183 */, 0xC7FE3CCC980CCBEFL /* 184 */, - 0xE46FB590189BFD03L /* 185 */, 0x3732FD469A4C57DCL /* 186 */, - 0x7EF700A07CF1AD65L /* 187 */, 0x59C64468A31D8859L /* 188 */, - 0x762FB0B4D45B61F6L /* 189 */, 0x155BAED099047718L /* 190 */, - 0x68755E4C3D50BAA6L /* 191 */, 0xE9214E7F22D8B4DFL /* 192 */, - 0x2ADDBF532EAC95F4L /* 193 */, 0x32AE3909B4BD0109L /* 194 */, - 0x834DF537B08E3450L /* 195 */, 0xFA209DA84220728DL /* 196 */, - 0x9E691D9B9EFE23F7L /* 197 */, 0x0446D288C4AE8D7FL /* 198 */, - 0x7B4CC524E169785BL /* 199 */, 0x21D87F0135CA1385L /* 200 */, - 0xCEBB400F137B8AA5L /* 201 */, 0x272E2B66580796BEL /* 202 */, - 0x3612264125C2B0DEL /* 203 */, 0x057702BDAD1EFBB2L /* 204 */, - 0xD4BABB8EACF84BE9L /* 205 */, 0x91583139641BC67BL /* 206 */, - 0x8BDC2DE08036E024L /* 207 */, 0x603C8156F49F68EDL /* 208 */, - 0xF7D236F7DBEF5111L /* 209 */, 0x9727C4598AD21E80L /* 210 */, - 0xA08A0896670A5FD7L /* 211 */, 0xCB4A8F4309EBA9CBL /* 212 */, - 0x81AF564B0F7036A1L /* 213 */, 0xC0B99AA778199ABDL /* 214 */, - 0x959F1EC83FC8E952L /* 215 */, 0x8C505077794A81B9L /* 216 */, - 0x3ACAAF8F056338F0L /* 217 */, 0x07B43F50627A6778L /* 218 */, - 0x4A44AB49F5ECCC77L /* 219 */, 0x3BC3D6E4B679EE98L /* 220 */, - 0x9CC0D4D1CF14108CL /* 221 */, 0x4406C00B206BC8A0L /* 222 */, - 0x82A18854C8D72D89L /* 223 */, 0x67E366B35C3C432CL /* 224 */, - 0xB923DD61102B37F2L /* 225 */, 0x56AB2779D884271DL /* 226 */, - 0xBE83E1B0FF1525AFL /* 227 */, 0xFB7C65D4217E49A9L /* 228 */, - 0x6BDBE0E76D48E7D4L /* 229 */, 0x08DF828745D9179EL /* 230 */, - 0x22EA6A9ADD53BD34L /* 231 */, 0xE36E141C5622200AL /* 232 */, - 0x7F805D1B8CB750EEL /* 233 */, 0xAFE5C7A59F58E837L /* 234 */, - 0xE27F996A4FB1C23CL /* 235 */, 0xD3867DFB0775F0D0L /* 236 */, - 0xD0E673DE6E88891AL /* 237 */, 0x123AEB9EAFB86C25L /* 238 */, - 0x30F1D5D5C145B895L /* 239 */, 0xBB434A2DEE7269E7L /* 240 */, - 0x78CB67ECF931FA38L /* 241 */, 0xF33B0372323BBF9CL /* 242 */, - 0x52D66336FB279C74L /* 243 */, 0x505F33AC0AFB4EAAL /* 244 */, - 0xE8A5CD99A2CCE187L /* 245 */, 0x534974801E2D30BBL /* 246 */, - 0x8D2D5711D5876D90L /* 247 */, 0x1F1A412891BC038EL /* 248 */, - 0xD6E2E71D82E56648L /* 249 */, 0x74036C3A497732B7L /* 250 */, - 0x89B67ED96361F5ABL /* 251 */, 0xFFED95D8F1EA02A2L /* 252 */, - 0xE72B3BD61464D43DL /* 253 */, 0xA6300F170BDC4820L /* 254 */, - 0xEBC18760ED78A77AL /* 255 */ - }, T2 = { 0xE6A6BE5A05A12138L /* 256 */, 0xB5A122A5B4F87C98L /* 257 */, - 0x563C6089140B6990L /* 258 */, 0x4C46CB2E391F5DD5L /* 259 */, - 0xD932ADDBC9B79434L /* 260 */, 0x08EA70E42015AFF5L /* 261 */, - 0xD765A6673E478CF1L /* 262 */, 0xC4FB757EAB278D99L /* 263 */, - 0xDF11C6862D6E0692L /* 264 */, 0xDDEB84F10D7F3B16L /* 265 */, - 0x6F2EF604A665EA04L /* 266 */, 0x4A8E0F0FF0E0DFB3L /* 267 */, - 0xA5EDEEF83DBCBA51L /* 268 */, 0xFC4F0A2A0EA4371EL /* 269 */, - 0xE83E1DA85CB38429L /* 270 */, 0xDC8FF882BA1B1CE2L /* 271 */, - 0xCD45505E8353E80DL /* 272 */, 0x18D19A00D4DB0717L /* 273 */, - 0x34A0CFEDA5F38101L /* 274 */, 0x0BE77E518887CAF2L /* 275 */, - 0x1E341438B3C45136L /* 276 */, 0xE05797F49089CCF9L /* 277 */, - 0xFFD23F9DF2591D14L /* 278 */, 0x543DDA228595C5CDL /* 279 */, - 0x661F81FD99052A33L /* 280 */, 0x8736E641DB0F7B76L /* 281 */, - 0x15227725418E5307L /* 282 */, 0xE25F7F46162EB2FAL /* 283 */, - 0x48A8B2126C13D9FEL /* 284 */, 0xAFDC541792E76EEAL /* 285 */, - 0x03D912BFC6D1898FL /* 286 */, 0x31B1AAFA1B83F51BL /* 287 */, - 0xF1AC2796E42AB7D9L /* 288 */, 0x40A3A7D7FCD2EBACL /* 289 */, - 0x1056136D0AFBBCC5L /* 290 */, 0x7889E1DD9A6D0C85L /* 291 */, - 0xD33525782A7974AAL /* 292 */, 0xA7E25D09078AC09BL /* 293 */, - 0xBD4138B3EAC6EDD0L /* 294 */, 0x920ABFBE71EB9E70L /* 295 */, - 0xA2A5D0F54FC2625CL /* 296 */, 0xC054E36B0B1290A3L /* 297 */, - 0xF6DD59FF62FE932BL /* 298 */, 0x3537354511A8AC7DL /* 299 */, - 0xCA845E9172FADCD4L /* 300 */, 0x84F82B60329D20DCL /* 301 */, - 0x79C62CE1CD672F18L /* 302 */, 0x8B09A2ADD124642CL /* 303 */, - 0xD0C1E96A19D9E726L /* 304 */, 0x5A786A9B4BA9500CL /* 305 */, - 0x0E020336634C43F3L /* 306 */, 0xC17B474AEB66D822L /* 307 */, - 0x6A731AE3EC9BAAC2L /* 308 */, 0x8226667AE0840258L /* 309 */, - 0x67D4567691CAECA5L /* 310 */, 0x1D94155C4875ADB5L /* 311 */, - 0x6D00FD985B813FDFL /* 312 */, 0x51286EFCB774CD06L /* 313 */, - 0x5E8834471FA744AFL /* 314 */, 0xF72CA0AEE761AE2EL /* 315 */, - 0xBE40E4CDAEE8E09AL /* 316 */, 0xE9970BBB5118F665L /* 317 */, - 0x726E4BEB33DF1964L /* 318 */, 0x703B000729199762L /* 319 */, - 0x4631D816F5EF30A7L /* 320 */, 0xB880B5B51504A6BEL /* 321 */, - 0x641793C37ED84B6CL /* 322 */, 0x7B21ED77F6E97D96L /* 323 */, - 0x776306312EF96B73L /* 324 */, 0xAE528948E86FF3F4L /* 325 */, - 0x53DBD7F286A3F8F8L /* 326 */, 0x16CADCE74CFC1063L /* 327 */, - 0x005C19BDFA52C6DDL /* 328 */, 0x68868F5D64D46AD3L /* 329 */, - 0x3A9D512CCF1E186AL /* 330 */, 0x367E62C2385660AEL /* 331 */, - 0xE359E7EA77DCB1D7L /* 332 */, 0x526C0773749ABE6EL /* 333 */, - 0x735AE5F9D09F734BL /* 334 */, 0x493FC7CC8A558BA8L /* 335 */, - 0xB0B9C1533041AB45L /* 336 */, 0x321958BA470A59BDL /* 337 */, - 0x852DB00B5F46C393L /* 338 */, 0x91209B2BD336B0E5L /* 339 */, - 0x6E604F7D659EF19FL /* 340 */, 0xB99A8AE2782CCB24L /* 341 */, - 0xCCF52AB6C814C4C7L /* 342 */, 0x4727D9AFBE11727BL /* 343 */, - 0x7E950D0C0121B34DL /* 344 */, 0x756F435670AD471FL /* 345 */, - 0xF5ADD442615A6849L /* 346 */, 0x4E87E09980B9957AL /* 347 */, - 0x2ACFA1DF50AEE355L /* 348 */, 0xD898263AFD2FD556L /* 349 */, - 0xC8F4924DD80C8FD6L /* 350 */, 0xCF99CA3D754A173AL /* 351 */, - 0xFE477BACAF91BF3CL /* 352 */, 0xED5371F6D690C12DL /* 353 */, - 0x831A5C285E687094L /* 354 */, 0xC5D3C90A3708A0A4L /* 355 */, - 0x0F7F903717D06580L /* 356 */, 0x19F9BB13B8FDF27FL /* 357 */, - 0xB1BD6F1B4D502843L /* 358 */, 0x1C761BA38FFF4012L /* 359 */, - 0x0D1530C4E2E21F3BL /* 360 */, 0x8943CE69A7372C8AL /* 361 */, - 0xE5184E11FEB5CE66L /* 362 */, 0x618BDB80BD736621L /* 363 */, - 0x7D29BAD68B574D0BL /* 364 */, 0x81BB613E25E6FE5BL /* 365 */, - 0x071C9C10BC07913FL /* 366 */, 0xC7BEEB7909AC2D97L /* 367 */, - 0xC3E58D353BC5D757L /* 368 */, 0xEB017892F38F61E8L /* 369 */, - 0xD4EFFB9C9B1CC21AL /* 370 */, 0x99727D26F494F7ABL /* 371 */, - 0xA3E063A2956B3E03L /* 372 */, 0x9D4A8B9A4AA09C30L /* 373 */, - 0x3F6AB7D500090FB4L /* 374 */, 0x9CC0F2A057268AC0L /* 375 */, - 0x3DEE9D2DEDBF42D1L /* 376 */, 0x330F49C87960A972L /* 377 */, - 0xC6B2720287421B41L /* 378 */, 0x0AC59EC07C00369CL /* 379 */, - 0xEF4EAC49CB353425L /* 380 */, 0xF450244EEF0129D8L /* 381 */, - 0x8ACC46E5CAF4DEB6L /* 382 */, 0x2FFEAB63989263F7L /* 383 */, - 0x8F7CB9FE5D7A4578L /* 384 */, 0x5BD8F7644E634635L /* 385 */, - 0x427A7315BF2DC900L /* 386 */, 0x17D0C4AA2125261CL /* 387 */, - 0x3992486C93518E50L /* 388 */, 0xB4CBFEE0A2D7D4C3L /* 389 */, - 0x7C75D6202C5DDD8DL /* 390 */, 0xDBC295D8E35B6C61L /* 391 */, - 0x60B369D302032B19L /* 392 */, 0xCE42685FDCE44132L /* 393 */, - 0x06F3DDB9DDF65610L /* 394 */, 0x8EA4D21DB5E148F0L /* 395 */, - 0x20B0FCE62FCD496FL /* 396 */, 0x2C1B912358B0EE31L /* 397 */, - 0xB28317B818F5A308L /* 398 */, 0xA89C1E189CA6D2CFL /* 399 */, - 0x0C6B18576AAADBC8L /* 400 */, 0xB65DEAA91299FAE3L /* 401 */, - 0xFB2B794B7F1027E7L /* 402 */, 0x04E4317F443B5BEBL /* 403 */, - 0x4B852D325939D0A6L /* 404 */, 0xD5AE6BEEFB207FFCL /* 405 */, - 0x309682B281C7D374L /* 406 */, 0xBAE309A194C3B475L /* 407 */, - 0x8CC3F97B13B49F05L /* 408 */, 0x98A9422FF8293967L /* 409 */, - 0x244B16B01076FF7CL /* 410 */, 0xF8BF571C663D67EEL /* 411 */, - 0x1F0D6758EEE30DA1L /* 412 */, 0xC9B611D97ADEB9B7L /* 413 */, - 0xB7AFD5887B6C57A2L /* 414 */, 0x6290AE846B984FE1L /* 415 */, - 0x94DF4CDEACC1A5FDL /* 416 */, 0x058A5BD1C5483AFFL /* 417 */, - 0x63166CC142BA3C37L /* 418 */, 0x8DB8526EB2F76F40L /* 419 */, - 0xE10880036F0D6D4EL /* 420 */, 0x9E0523C9971D311DL /* 421 */, - 0x45EC2824CC7CD691L /* 422 */, 0x575B8359E62382C9L /* 423 */, - 0xFA9E400DC4889995L /* 424 */, 0xD1823ECB45721568L /* 425 */, - 0xDAFD983B8206082FL /* 426 */, 0xAA7D29082386A8CBL /* 427 */, - 0x269FCD4403B87588L /* 428 */, 0x1B91F5F728BDD1E0L /* 429 */, - 0xE4669F39040201F6L /* 430 */, 0x7A1D7C218CF04ADEL /* 431 */, - 0x65623C29D79CE5CEL /* 432 */, 0x2368449096C00BB1L /* 433 */, - 0xAB9BF1879DA503BAL /* 434 */, 0xBC23ECB1A458058EL /* 435 */, - 0x9A58DF01BB401ECCL /* 436 */, 0xA070E868A85F143DL /* 437 */, - 0x4FF188307DF2239EL /* 438 */, 0x14D565B41A641183L /* 439 */, - 0xEE13337452701602L /* 440 */, 0x950E3DCF3F285E09L /* 441 */, - 0x59930254B9C80953L /* 442 */, 0x3BF299408930DA6DL /* 443 */, - 0xA955943F53691387L /* 444 */, 0xA15EDECAA9CB8784L /* 445 */, - 0x29142127352BE9A0L /* 446 */, 0x76F0371FFF4E7AFBL /* 447 */, - 0x0239F450274F2228L /* 448 */, 0xBB073AF01D5E868BL /* 449 */, - 0xBFC80571C10E96C1L /* 450 */, 0xD267088568222E23L /* 451 */, - 0x9671A3D48E80B5B0L /* 452 */, 0x55B5D38AE193BB81L /* 453 */, - 0x693AE2D0A18B04B8L /* 454 */, 0x5C48B4ECADD5335FL /* 455 */, - 0xFD743B194916A1CAL /* 456 */, 0x2577018134BE98C4L /* 457 */, - 0xE77987E83C54A4ADL /* 458 */, 0x28E11014DA33E1B9L /* 459 */, - 0x270CC59E226AA213L /* 460 */, 0x71495F756D1A5F60L /* 461 */, - 0x9BE853FB60AFEF77L /* 462 */, 0xADC786A7F7443DBFL /* 463 */, - 0x0904456173B29A82L /* 464 */, 0x58BC7A66C232BD5EL /* 465 */, - 0xF306558C673AC8B2L /* 466 */, 0x41F639C6B6C9772AL /* 467 */, - 0x216DEFE99FDA35DAL /* 468 */, 0x11640CC71C7BE615L /* 469 */, - 0x93C43694565C5527L /* 470 */, 0xEA038E6246777839L /* 471 */, - 0xF9ABF3CE5A3E2469L /* 472 */, 0x741E768D0FD312D2L /* 473 */, - 0x0144B883CED652C6L /* 474 */, 0xC20B5A5BA33F8552L /* 475 */, - 0x1AE69633C3435A9DL /* 476 */, 0x97A28CA4088CFDECL /* 477 */, - 0x8824A43C1E96F420L /* 478 */, 0x37612FA66EEEA746L /* 479 */, - 0x6B4CB165F9CF0E5AL /* 480 */, 0x43AA1C06A0ABFB4AL /* 481 */, - 0x7F4DC26FF162796BL /* 482 */, 0x6CBACC8E54ED9B0FL /* 483 */, - 0xA6B7FFEFD2BB253EL /* 484 */, 0x2E25BC95B0A29D4FL /* 485 */, - 0x86D6A58BDEF1388CL /* 486 */, 0xDED74AC576B6F054L /* 487 */, - 0x8030BDBC2B45805DL /* 488 */, 0x3C81AF70E94D9289L /* 489 */, - 0x3EFF6DDA9E3100DBL /* 490 */, 0xB38DC39FDFCC8847L /* 491 */, - 0x123885528D17B87EL /* 492 */, 0xF2DA0ED240B1B642L /* 493 */, - 0x44CEFADCD54BF9A9L /* 494 */, 0x1312200E433C7EE6L /* 495 */, - 0x9FFCC84F3A78C748L /* 496 */, 0xF0CD1F72248576BBL /* 497 */, - 0xEC6974053638CFE4L /* 498 */, 0x2BA7B67C0CEC4E4CL /* 499 */, - 0xAC2F4DF3E5CE32EDL /* 500 */, 0xCB33D14326EA4C11L /* 501 */, - 0xA4E9044CC77E58BCL /* 502 */, 0x5F513293D934FCEFL /* 503 */, - 0x5DC9645506E55444L /* 504 */, 0x50DE418F317DE40AL /* 505 */, - 0x388CB31A69DDE259L /* 506 */, 0x2DB4A83455820A86L /* 507 */, - 0x9010A91E84711AE9L /* 508 */, 0x4DF7F0B7B1498371L /* 509 */, - 0xD62A2EABC0977179L /* 510 */, 0x22FAC097AA8D5C0EL /* 511 */ - }, T3 = { 0xF49FCC2FF1DAF39BL /* 512 */, 0x487FD5C66FF29281L /* 513 */, - 0xE8A30667FCDCA83FL /* 514 */, 0x2C9B4BE3D2FCCE63L /* 515 */, - 0xDA3FF74B93FBBBC2L /* 516 */, 0x2FA165D2FE70BA66L /* 517 */, - 0xA103E279970E93D4L /* 518 */, 0xBECDEC77B0E45E71L /* 519 */, - 0xCFB41E723985E497L /* 520 */, 0xB70AAA025EF75017L /* 521 */, - 0xD42309F03840B8E0L /* 522 */, 0x8EFC1AD035898579L /* 523 */, - 0x96C6920BE2B2ABC5L /* 524 */, 0x66AF4163375A9172L /* 525 */, - 0x2174ABDCCA7127FBL /* 526 */, 0xB33CCEA64A72FF41L /* 527 */, - 0xF04A4933083066A5L /* 528 */, 0x8D970ACDD7289AF5L /* 529 */, - 0x8F96E8E031C8C25EL /* 530 */, 0xF3FEC02276875D47L /* 531 */, - 0xEC7BF310056190DDL /* 532 */, 0xF5ADB0AEBB0F1491L /* 533 */, - 0x9B50F8850FD58892L /* 534 */, 0x4975488358B74DE8L /* 535 */, - 0xA3354FF691531C61L /* 536 */, 0x0702BBE481D2C6EEL /* 537 */, - 0x89FB24057DEDED98L /* 538 */, 0xAC3075138596E902L /* 539 */, - 0x1D2D3580172772EDL /* 540 */, 0xEB738FC28E6BC30DL /* 541 */, - 0x5854EF8F63044326L /* 542 */, 0x9E5C52325ADD3BBEL /* 543 */, - 0x90AA53CF325C4623L /* 544 */, 0xC1D24D51349DD067L /* 545 */, - 0x2051CFEEA69EA624L /* 546 */, 0x13220F0A862E7E4FL /* 547 */, - 0xCE39399404E04864L /* 548 */, 0xD9C42CA47086FCB7L /* 549 */, - 0x685AD2238A03E7CCL /* 550 */, 0x066484B2AB2FF1DBL /* 551 */, - 0xFE9D5D70EFBF79ECL /* 552 */, 0x5B13B9DD9C481854L /* 553 */, - 0x15F0D475ED1509ADL /* 554 */, 0x0BEBCD060EC79851L /* 555 */, - 0xD58C6791183AB7F8L /* 556 */, 0xD1187C5052F3EEE4L /* 557 */, - 0xC95D1192E54E82FFL /* 558 */, 0x86EEA14CB9AC6CA2L /* 559 */, - 0x3485BEB153677D5DL /* 560 */, 0xDD191D781F8C492AL /* 561 */, - 0xF60866BAA784EBF9L /* 562 */, 0x518F643BA2D08C74L /* 563 */, - 0x8852E956E1087C22L /* 564 */, 0xA768CB8DC410AE8DL /* 565 */, - 0x38047726BFEC8E1AL /* 566 */, 0xA67738B4CD3B45AAL /* 567 */, - 0xAD16691CEC0DDE19L /* 568 */, 0xC6D4319380462E07L /* 569 */, - 0xC5A5876D0BA61938L /* 570 */, 0x16B9FA1FA58FD840L /* 571 */, - 0x188AB1173CA74F18L /* 572 */, 0xABDA2F98C99C021FL /* 573 */, - 0x3E0580AB134AE816L /* 574 */, 0x5F3B05B773645ABBL /* 575 */, - 0x2501A2BE5575F2F6L /* 576 */, 0x1B2F74004E7E8BA9L /* 577 */, - 0x1CD7580371E8D953L /* 578 */, 0x7F6ED89562764E30L /* 579 */, - 0xB15926FF596F003DL /* 580 */, 0x9F65293DA8C5D6B9L /* 581 */, - 0x6ECEF04DD690F84CL /* 582 */, 0x4782275FFF33AF88L /* 583 */, - 0xE41433083F820801L /* 584 */, 0xFD0DFE409A1AF9B5L /* 585 */, - 0x4325A3342CDB396BL /* 586 */, 0x8AE77E62B301B252L /* 587 */, - 0xC36F9E9F6655615AL /* 588 */, 0x85455A2D92D32C09L /* 589 */, - 0xF2C7DEA949477485L /* 590 */, 0x63CFB4C133A39EBAL /* 591 */, - 0x83B040CC6EBC5462L /* 592 */, 0x3B9454C8FDB326B0L /* 593 */, - 0x56F56A9E87FFD78CL /* 594 */, 0x2DC2940D99F42BC6L /* 595 */, - 0x98F7DF096B096E2DL /* 596 */, 0x19A6E01E3AD852BFL /* 597 */, - 0x42A99CCBDBD4B40BL /* 598 */, 0xA59998AF45E9C559L /* 599 */, - 0x366295E807D93186L /* 600 */, 0x6B48181BFAA1F773L /* 601 */, - 0x1FEC57E2157A0A1DL /* 602 */, 0x4667446AF6201AD5L /* 603 */, - 0xE615EBCACFB0F075L /* 604 */, 0xB8F31F4F68290778L /* 605 */, - 0x22713ED6CE22D11EL /* 606 */, 0x3057C1A72EC3C93BL /* 607 */, - 0xCB46ACC37C3F1F2FL /* 608 */, 0xDBB893FD02AAF50EL /* 609 */, - 0x331FD92E600B9FCFL /* 610 */, 0xA498F96148EA3AD6L /* 611 */, - 0xA8D8426E8B6A83EAL /* 612 */, 0xA089B274B7735CDCL /* 613 */, - 0x87F6B3731E524A11L /* 614 */, 0x118808E5CBC96749L /* 615 */, - 0x9906E4C7B19BD394L /* 616 */, 0xAFED7F7E9B24A20CL /* 617 */, - 0x6509EADEEB3644A7L /* 618 */, 0x6C1EF1D3E8EF0EDEL /* 619 */, - 0xB9C97D43E9798FB4L /* 620 */, 0xA2F2D784740C28A3L /* 621 */, - 0x7B8496476197566FL /* 622 */, 0x7A5BE3E6B65F069DL /* 623 */, - 0xF96330ED78BE6F10L /* 624 */, 0xEEE60DE77A076A15L /* 625 */, - 0x2B4BEE4AA08B9BD0L /* 626 */, 0x6A56A63EC7B8894EL /* 627 */, - 0x02121359BA34FEF4L /* 628 */, 0x4CBF99F8283703FCL /* 629 */, - 0x398071350CAF30C8L /* 630 */, 0xD0A77A89F017687AL /* 631 */, - 0xF1C1A9EB9E423569L /* 632 */, 0x8C7976282DEE8199L /* 633 */, - 0x5D1737A5DD1F7ABDL /* 634 */, 0x4F53433C09A9FA80L /* 635 */, - 0xFA8B0C53DF7CA1D9L /* 636 */, 0x3FD9DCBC886CCB77L /* 637 */, - 0xC040917CA91B4720L /* 638 */, 0x7DD00142F9D1DCDFL /* 639 */, - 0x8476FC1D4F387B58L /* 640 */, 0x23F8E7C5F3316503L /* 641 */, - 0x032A2244E7E37339L /* 642 */, 0x5C87A5D750F5A74BL /* 643 */, - 0x082B4CC43698992EL /* 644 */, 0xDF917BECB858F63CL /* 645 */, - 0x3270B8FC5BF86DDAL /* 646 */, 0x10AE72BB29B5DD76L /* 647 */, - 0x576AC94E7700362BL /* 648 */, 0x1AD112DAC61EFB8FL /* 649 */, - 0x691BC30EC5FAA427L /* 650 */, 0xFF246311CC327143L /* 651 */, - 0x3142368E30E53206L /* 652 */, 0x71380E31E02CA396L /* 653 */, - 0x958D5C960AAD76F1L /* 654 */, 0xF8D6F430C16DA536L /* 655 */, - 0xC8FFD13F1BE7E1D2L /* 656 */, 0x7578AE66004DDBE1L /* 657 */, - 0x05833F01067BE646L /* 658 */, 0xBB34B5AD3BFE586DL /* 659 */, - 0x095F34C9A12B97F0L /* 660 */, 0x247AB64525D60CA8L /* 661 */, - 0xDCDBC6F3017477D1L /* 662 */, 0x4A2E14D4DECAD24DL /* 663 */, - 0xBDB5E6D9BE0A1EEBL /* 664 */, 0x2A7E70F7794301ABL /* 665 */, - 0xDEF42D8A270540FDL /* 666 */, 0x01078EC0A34C22C1L /* 667 */, - 0xE5DE511AF4C16387L /* 668 */, 0x7EBB3A52BD9A330AL /* 669 */, - 0x77697857AA7D6435L /* 670 */, 0x004E831603AE4C32L /* 671 */, - 0xE7A21020AD78E312L /* 672 */, 0x9D41A70C6AB420F2L /* 673 */, - 0x28E06C18EA1141E6L /* 674 */, 0xD2B28CBD984F6B28L /* 675 */, - 0x26B75F6C446E9D83L /* 676 */, 0xBA47568C4D418D7FL /* 677 */, - 0xD80BADBFE6183D8EL /* 678 */, 0x0E206D7F5F166044L /* 679 */, - 0xE258A43911CBCA3EL /* 680 */, 0x723A1746B21DC0BCL /* 681 */, - 0xC7CAA854F5D7CDD3L /* 682 */, 0x7CAC32883D261D9CL /* 683 */, - 0x7690C26423BA942CL /* 684 */, 0x17E55524478042B8L /* 685 */, - 0xE0BE477656A2389FL /* 686 */, 0x4D289B5E67AB2DA0L /* 687 */, - 0x44862B9C8FBBFD31L /* 688 */, 0xB47CC8049D141365L /* 689 */, - 0x822C1B362B91C793L /* 690 */, 0x4EB14655FB13DFD8L /* 691 */, - 0x1ECBBA0714E2A97BL /* 692 */, 0x6143459D5CDE5F14L /* 693 */, - 0x53A8FBF1D5F0AC89L /* 694 */, 0x97EA04D81C5E5B00L /* 695 */, - 0x622181A8D4FDB3F3L /* 696 */, 0xE9BCD341572A1208L /* 697 */, - 0x1411258643CCE58AL /* 698 */, 0x9144C5FEA4C6E0A4L /* 699 */, - 0x0D33D06565CF620FL /* 700 */, 0x54A48D489F219CA1L /* 701 */, - 0xC43E5EAC6D63C821L /* 702 */, 0xA9728B3A72770DAFL /* 703 */, - 0xD7934E7B20DF87EFL /* 704 */, 0xE35503B61A3E86E5L /* 705 */, - 0xCAE321FBC819D504L /* 706 */, 0x129A50B3AC60BFA6L /* 707 */, - 0xCD5E68EA7E9FB6C3L /* 708 */, 0xB01C90199483B1C7L /* 709 */, - 0x3DE93CD5C295376CL /* 710 */, 0xAED52EDF2AB9AD13L /* 711 */, - 0x2E60F512C0A07884L /* 712 */, 0xBC3D86A3E36210C9L /* 713 */, - 0x35269D9B163951CEL /* 714 */, 0x0C7D6E2AD0CDB5FAL /* 715 */, - 0x59E86297D87F5733L /* 716 */, 0x298EF221898DB0E7L /* 717 */, - 0x55000029D1A5AA7EL /* 718 */, 0x8BC08AE1B5061B45L /* 719 */, - 0xC2C31C2B6C92703AL /* 720 */, 0x94CC596BAF25EF42L /* 721 */, - 0x0A1D73DB22540456L /* 722 */, 0x04B6A0F9D9C4179AL /* 723 */, - 0xEFFDAFA2AE3D3C60L /* 724 */, 0xF7C8075BB49496C4L /* 725 */, - 0x9CC5C7141D1CD4E3L /* 726 */, 0x78BD1638218E5534L /* 727 */, - 0xB2F11568F850246AL /* 728 */, 0xEDFABCFA9502BC29L /* 729 */, - 0x796CE5F2DA23051BL /* 730 */, 0xAAE128B0DC93537CL /* 731 */, - 0x3A493DA0EE4B29AEL /* 732 */, 0xB5DF6B2C416895D7L /* 733 */, - 0xFCABBD25122D7F37L /* 734 */, 0x70810B58105DC4B1L /* 735 */, - 0xE10FDD37F7882A90L /* 736 */, 0x524DCAB5518A3F5CL /* 737 */, - 0x3C9E85878451255BL /* 738 */, 0x4029828119BD34E2L /* 739 */, - 0x74A05B6F5D3CECCBL /* 740 */, 0xB610021542E13ECAL /* 741 */, - 0x0FF979D12F59E2ACL /* 742 */, 0x6037DA27E4F9CC50L /* 743 */, - 0x5E92975A0DF1847DL /* 744 */, 0xD66DE190D3E623FEL /* 745 */, - 0x5032D6B87B568048L /* 746 */, 0x9A36B7CE8235216EL /* 747 */, - 0x80272A7A24F64B4AL /* 748 */, 0x93EFED8B8C6916F7L /* 749 */, - 0x37DDBFF44CCE1555L /* 750 */, 0x4B95DB5D4B99BD25L /* 751 */, - 0x92D3FDA169812FC0L /* 752 */, 0xFB1A4A9A90660BB6L /* 753 */, - 0x730C196946A4B9B2L /* 754 */, 0x81E289AA7F49DA68L /* 755 */, - 0x64669A0F83B1A05FL /* 756 */, 0x27B3FF7D9644F48BL /* 757 */, - 0xCC6B615C8DB675B3L /* 758 */, 0x674F20B9BCEBBE95L /* 759 */, - 0x6F31238275655982L /* 760 */, 0x5AE488713E45CF05L /* 761 */, - 0xBF619F9954C21157L /* 762 */, 0xEABAC46040A8EAE9L /* 763 */, - 0x454C6FE9F2C0C1CDL /* 764 */, 0x419CF6496412691CL /* 765 */, - 0xD3DC3BEF265B0F70L /* 766 */, 0x6D0E60F5C3578A9EL /* 767 */ - }, T4 = { 0x5B0E608526323C55L /* 768 */, 0x1A46C1A9FA1B59F5L /* 769 */, - 0xA9E245A17C4C8FFAL /* 770 */, 0x65CA5159DB2955D7L /* 771 */, - 0x05DB0A76CE35AFC2L /* 772 */, 0x81EAC77EA9113D45L /* 773 */, - 0x528EF88AB6AC0A0DL /* 774 */, 0xA09EA253597BE3FFL /* 775 */, - 0x430DDFB3AC48CD56L /* 776 */, 0xC4B3A67AF45CE46FL /* 777 */, - 0x4ECECFD8FBE2D05EL /* 778 */, 0x3EF56F10B39935F0L /* 779 */, - 0x0B22D6829CD619C6L /* 780 */, 0x17FD460A74DF2069L /* 781 */, - 0x6CF8CC8E8510ED40L /* 782 */, 0xD6C824BF3A6ECAA7L /* 783 */, - 0x61243D581A817049L /* 784 */, 0x048BACB6BBC163A2L /* 785 */, - 0xD9A38AC27D44CC32L /* 786 */, 0x7FDDFF5BAAF410ABL /* 787 */, - 0xAD6D495AA804824BL /* 788 */, 0xE1A6A74F2D8C9F94L /* 789 */, - 0xD4F7851235DEE8E3L /* 790 */, 0xFD4B7F886540D893L /* 791 */, - 0x247C20042AA4BFDAL /* 792 */, 0x096EA1C517D1327CL /* 793 */, - 0xD56966B4361A6685L /* 794 */, 0x277DA5C31221057DL /* 795 */, - 0x94D59893A43ACFF7L /* 796 */, 0x64F0C51CCDC02281L /* 797 */, - 0x3D33BCC4FF6189DBL /* 798 */, 0xE005CB184CE66AF1L /* 799 */, - 0xFF5CCD1D1DB99BEAL /* 800 */, 0xB0B854A7FE42980FL /* 801 */, - 0x7BD46A6A718D4B9FL /* 802 */, 0xD10FA8CC22A5FD8CL /* 803 */, - 0xD31484952BE4BD31L /* 804 */, 0xC7FA975FCB243847L /* 805 */, - 0x4886ED1E5846C407L /* 806 */, 0x28CDDB791EB70B04L /* 807 */, - 0xC2B00BE2F573417FL /* 808 */, 0x5C9590452180F877L /* 809 */, - 0x7A6BDDFFF370EB00L /* 810 */, 0xCE509E38D6D9D6A4L /* 811 */, - 0xEBEB0F00647FA702L /* 812 */, 0x1DCC06CF76606F06L /* 813 */, - 0xE4D9F28BA286FF0AL /* 814 */, 0xD85A305DC918C262L /* 815 */, - 0x475B1D8732225F54L /* 816 */, 0x2D4FB51668CCB5FEL /* 817 */, - 0xA679B9D9D72BBA20L /* 818 */, 0x53841C0D912D43A5L /* 819 */, - 0x3B7EAA48BF12A4E8L /* 820 */, 0x781E0E47F22F1DDFL /* 821 */, - 0xEFF20CE60AB50973L /* 822 */, 0x20D261D19DFFB742L /* 823 */, - 0x16A12B03062A2E39L /* 824 */, 0x1960EB2239650495L /* 825 */, - 0x251C16FED50EB8B8L /* 826 */, 0x9AC0C330F826016EL /* 827 */, - 0xED152665953E7671L /* 828 */, 0x02D63194A6369570L /* 829 */, - 0x5074F08394B1C987L /* 830 */, 0x70BA598C90B25CE1L /* 831 */, - 0x794A15810B9742F6L /* 832 */, 0x0D5925E9FCAF8C6CL /* 833 */, - 0x3067716CD868744EL /* 834 */, 0x910AB077E8D7731BL /* 835 */, - 0x6A61BBDB5AC42F61L /* 836 */, 0x93513EFBF0851567L /* 837 */, - 0xF494724B9E83E9D5L /* 838 */, 0xE887E1985C09648DL /* 839 */, - 0x34B1D3C675370CFDL /* 840 */, 0xDC35E433BC0D255DL /* 841 */, - 0xD0AAB84234131BE0L /* 842 */, 0x08042A50B48B7EAFL /* 843 */, - 0x9997C4EE44A3AB35L /* 844 */, 0x829A7B49201799D0L /* 845 */, - 0x263B8307B7C54441L /* 846 */, 0x752F95F4FD6A6CA6L /* 847 */, - 0x927217402C08C6E5L /* 848 */, 0x2A8AB754A795D9EEL /* 849 */, - 0xA442F7552F72943DL /* 850 */, 0x2C31334E19781208L /* 851 */, - 0x4FA98D7CEAEE6291L /* 852 */, 0x55C3862F665DB309L /* 853 */, - 0xBD0610175D53B1F3L /* 854 */, 0x46FE6CB840413F27L /* 855 */, - 0x3FE03792DF0CFA59L /* 856 */, 0xCFE700372EB85E8FL /* 857 */, - 0xA7BE29E7ADBCE118L /* 858 */, 0xE544EE5CDE8431DDL /* 859 */, - 0x8A781B1B41F1873EL /* 860 */, 0xA5C94C78A0D2F0E7L /* 861 */, - 0x39412E2877B60728L /* 862 */, 0xA1265EF3AFC9A62CL /* 863 */, - 0xBCC2770C6A2506C5L /* 864 */, 0x3AB66DD5DCE1CE12L /* 865 */, - 0xE65499D04A675B37L /* 866 */, 0x7D8F523481BFD216L /* 867 */, - 0x0F6F64FCEC15F389L /* 868 */, 0x74EFBE618B5B13C8L /* 869 */, - 0xACDC82B714273E1DL /* 870 */, 0xDD40BFE003199D17L /* 871 */, - 0x37E99257E7E061F8L /* 872 */, 0xFA52626904775AAAL /* 873 */, - 0x8BBBF63A463D56F9L /* 874 */, 0xF0013F1543A26E64L /* 875 */, - 0xA8307E9F879EC898L /* 876 */, 0xCC4C27A4150177CCL /* 877 */, - 0x1B432F2CCA1D3348L /* 878 */, 0xDE1D1F8F9F6FA013L /* 879 */, - 0x606602A047A7DDD6L /* 880 */, 0xD237AB64CC1CB2C7L /* 881 */, - 0x9B938E7225FCD1D3L /* 882 */, 0xEC4E03708E0FF476L /* 883 */, - 0xFEB2FBDA3D03C12DL /* 884 */, 0xAE0BCED2EE43889AL /* 885 */, - 0x22CB8923EBFB4F43L /* 886 */, 0x69360D013CF7396DL /* 887 */, - 0x855E3602D2D4E022L /* 888 */, 0x073805BAD01F784CL /* 889 */, - 0x33E17A133852F546L /* 890 */, 0xDF4874058AC7B638L /* 891 */, - 0xBA92B29C678AA14AL /* 892 */, 0x0CE89FC76CFAADCDL /* 893 */, - 0x5F9D4E0908339E34L /* 894 */, 0xF1AFE9291F5923B9L /* 895 */, - 0x6E3480F60F4A265FL /* 896 */, 0xEEBF3A2AB29B841CL /* 897 */, - 0xE21938A88F91B4ADL /* 898 */, 0x57DFEFF845C6D3C3L /* 899 */, - 0x2F006B0BF62CAAF2L /* 900 */, 0x62F479EF6F75EE78L /* 901 */, - 0x11A55AD41C8916A9L /* 902 */, 0xF229D29084FED453L /* 903 */, - 0x42F1C27B16B000E6L /* 904 */, 0x2B1F76749823C074L /* 905 */, - 0x4B76ECA3C2745360L /* 906 */, 0x8C98F463B91691BDL /* 907 */, - 0x14BCC93CF1ADE66AL /* 908 */, 0x8885213E6D458397L /* 909 */, - 0x8E177DF0274D4711L /* 910 */, 0xB49B73B5503F2951L /* 911 */, - 0x10168168C3F96B6BL /* 912 */, 0x0E3D963B63CAB0AEL /* 913 */, - 0x8DFC4B5655A1DB14L /* 914 */, 0xF789F1356E14DE5CL /* 915 */, - 0x683E68AF4E51DAC1L /* 916 */, 0xC9A84F9D8D4B0FD9L /* 917 */, - 0x3691E03F52A0F9D1L /* 918 */, 0x5ED86E46E1878E80L /* 919 */, - 0x3C711A0E99D07150L /* 920 */, 0x5A0865B20C4E9310L /* 921 */, - 0x56FBFC1FE4F0682EL /* 922 */, 0xEA8D5DE3105EDF9BL /* 923 */, - 0x71ABFDB12379187AL /* 924 */, 0x2EB99DE1BEE77B9CL /* 925 */, - 0x21ECC0EA33CF4523L /* 926 */, 0x59A4D7521805C7A1L /* 927 */, - 0x3896F5EB56AE7C72L /* 928 */, 0xAA638F3DB18F75DCL /* 929 */, - 0x9F39358DABE9808EL /* 930 */, 0xB7DEFA91C00B72ACL /* 931 */, - 0x6B5541FD62492D92L /* 932 */, 0x6DC6DEE8F92E4D5BL /* 933 */, - 0x353F57ABC4BEEA7EL /* 934 */, 0x735769D6DA5690CEL /* 935 */, - 0x0A234AA642391484L /* 936 */, 0xF6F9508028F80D9DL /* 937 */, - 0xB8E319A27AB3F215L /* 938 */, 0x31AD9C1151341A4DL /* 939 */, - 0x773C22A57BEF5805L /* 940 */, 0x45C7561A07968633L /* 941 */, - 0xF913DA9E249DBE36L /* 942 */, 0xDA652D9B78A64C68L /* 943 */, - 0x4C27A97F3BC334EFL /* 944 */, 0x76621220E66B17F4L /* 945 */, - 0x967743899ACD7D0BL /* 946 */, 0xF3EE5BCAE0ED6782L /* 947 */, - 0x409F753600C879FCL /* 948 */, 0x06D09A39B5926DB6L /* 949 */, - 0x6F83AEB0317AC588L /* 950 */, 0x01E6CA4A86381F21L /* 951 */, - 0x66FF3462D19F3025L /* 952 */, 0x72207C24DDFD3BFBL /* 953 */, - 0x4AF6B6D3E2ECE2EBL /* 954 */, 0x9C994DBEC7EA08DEL /* 955 */, - 0x49ACE597B09A8BC4L /* 956 */, 0xB38C4766CF0797BAL /* 957 */, - 0x131B9373C57C2A75L /* 958 */, 0xB1822CCE61931E58L /* 959 */, - 0x9D7555B909BA1C0CL /* 960 */, 0x127FAFDD937D11D2L /* 961 */, - 0x29DA3BADC66D92E4L /* 962 */, 0xA2C1D57154C2ECBCL /* 963 */, - 0x58C5134D82F6FE24L /* 964 */, 0x1C3AE3515B62274FL /* 965 */, - 0xE907C82E01CB8126L /* 966 */, 0xF8ED091913E37FCBL /* 967 */, - 0x3249D8F9C80046C9L /* 968 */, 0x80CF9BEDE388FB63L /* 969 */, - 0x1881539A116CF19EL /* 970 */, 0x5103F3F76BD52457L /* 971 */, - 0x15B7E6F5AE47F7A8L /* 972 */, 0xDBD7C6DED47E9CCFL /* 973 */, - 0x44E55C410228BB1AL /* 974 */, 0xB647D4255EDB4E99L /* 975 */, - 0x5D11882BB8AAFC30L /* 976 */, 0xF5098BBB29D3212AL /* 977 */, - 0x8FB5EA14E90296B3L /* 978 */, 0x677B942157DD025AL /* 979 */, - 0xFB58E7C0A390ACB5L /* 980 */, 0x89D3674C83BD4A01L /* 981 */, - 0x9E2DA4DF4BF3B93BL /* 982 */, 0xFCC41E328CAB4829L /* 983 */, - 0x03F38C96BA582C52L /* 984 */, 0xCAD1BDBD7FD85DB2L /* 985 */, - 0xBBB442C16082AE83L /* 986 */, 0xB95FE86BA5DA9AB0L /* 987 */, - 0xB22E04673771A93FL /* 988 */, 0x845358C9493152D8L /* 989 */, - 0xBE2A488697B4541EL /* 990 */, 0x95A2DC2DD38E6966L /* 991 */, - 0xC02C11AC923C852BL /* 992 */, 0x2388B1990DF2A87BL /* 993 */, - 0x7C8008FA1B4F37BEL /* 994 */, 0x1F70D0C84D54E503L /* 995 */, - 0x5490ADEC7ECE57D4L /* 996 */, 0x002B3C27D9063A3AL /* 997 */, - 0x7EAEA3848030A2BFL /* 998 */, 0xC602326DED2003C0L /* 999 */, - 0x83A7287D69A94086L /* 1000 */, 0xC57A5FCB30F57A8AL /* 1001 */, - 0xB56844E479EBE779L /* 1002 */, 0xA373B40F05DCBCE9L /* 1003 */, - 0xD71A786E88570EE2L /* 1004 */, 0x879CBACDBDE8F6A0L /* 1005 */, - 0x976AD1BCC164A32FL /* 1006 */, 0xAB21E25E9666D78BL /* 1007 */, - 0x901063AAE5E5C33CL /* 1008 */, 0x9818B34448698D90L /* 1009 */, - 0xE36487AE3E1E8ABBL /* 1010 */, 0xAFBDF931893BDCB4L /* 1011 */, - 0x6345A0DC5FBBD519L /* 1012 */, 0x8628FE269B9465CAL /* 1013 */, - 0x1E5D01603F9C51ECL /* 1014 */, 0x4DE44006A15049B7L /* 1015 */, - 0xBF6C70E5F776CBB1L /* 1016 */, 0x411218F2EF552BEDL /* 1017 */, - 0xCB0C0708705A36A3L /* 1018 */, 0xE74D14754F986044L /* 1019 */, - 0xCD56D9430EA8280EL /* 1020 */, 0xC12591D7535F5065L /* 1021 */, - 0xC83223F1720AEF96L /* 1022 */, 0xC3A0396F7363A51FL /* 1023 */ - }; -} + +/** + * @version $Revision: 1.5 $ + * @author Jeroen C. van Gelderen (gelderen@cryptix.org) + */ + +class Tiger_Hash implements Cloneable { + + // Constants + // ........................................................................... + + /** Length of this hash */ + + // Instance variables + // ........................................................................... + + /** Hash context */ + private long a, b, c; + + // Constructors + // ........................................................................... + + public Tiger_Hash() { + coreReset(); + } + + private Tiger_Hash(Tiger_Hash src) { + this.a = src.a; + this.b = src.b; + this.c = src.c; + } + + @Override + public Object clone() { + return new Tiger_Hash(this); + } + + // Concreteness + // ........................................................................... + + protected void coreDigest(byte[] buf, int off) { + buf[off++] = (byte) (a); + buf[off++] = (byte) (a >> 8); + buf[off++] = (byte) (a >> 16); + buf[off++] = (byte) (a >> 24); + buf[off++] = (byte) (a >> 32); + buf[off++] = (byte) (a >> 40); + buf[off++] = (byte) (a >> 48); + buf[off++] = (byte) (a >> 56); + + buf[off++] = (byte) (b); + buf[off++] = (byte) (b >> 8); + buf[off++] = (byte) (b >> 16); + buf[off++] = (byte) (b >> 24); + buf[off++] = (byte) (b >> 32); + buf[off++] = (byte) (b >> 40); + buf[off++] = (byte) (b >> 48); + buf[off++] = (byte) (b >> 56); + + buf[off++] = (byte) (c); + buf[off++] = (byte) (c >> 8); + buf[off++] = (byte) (c >> 16); + buf[off++] = (byte) (c >> 24); + buf[off++] = (byte) (c >> 32); + buf[off++] = (byte) (c >> 40); + buf[off++] = (byte) (c >> 48); + buf[off] = (byte) (c >> 56); + } + + protected void coreReset() { + a = 0x0123456789ABCDEFL; + b = 0xFEDCBA9876543210L; + c = 0xF096A5B4C3B2E187L; + } + + protected void coreUpdate(byte[] block, int offset, int run_until) { + int off = offset; + long[] tmp = new long[8]; + for (int i = 0; i < 8; i++) + tmp[i] = ((block[offset++] & (long) 0xFF)) + | ((block[offset++] & (long) 0xFF) << 8) + | ((block[offset++] & (long) 0xFF) << 16) + | ((block[offset++] & (long) 0xFF) << 24) + | ((block[offset++] & (long) 0xFF) << 32) + | ((block[offset++] & (long) 0xFF) << 40) + | ((block[offset++] & (long) 0xFF) << 48) + | ((block[offset++] & (long) 0xFF) << 56); + + compress(tmp, run_until); + + // just for us... + for (int i = 0; i < 8; i++) { + long ct = tmp[i]; + block[off++] = (byte) (ct); + block[off++] = (byte) (ct >> 8); + block[off++] = (byte) (ct >> 16); + block[off++] = (byte) (ct >> 24); + block[off++] = (byte) (ct >> 32); + block[off++] = (byte) (ct >> 40); + block[off++] = (byte) (ct >> 48); + block[off] = (byte) (ct >> 56); + } + } + + // Private methods + // ........................................................................... + + private void compress(long[] x, int breakpoint) { + System.out.println("Starting compress."); + long aa = a, bb = b, cc = c; + + roundABC(x[0], 5); + if (breakpoint == 1) + return; + roundBCA(x[1], 5); + if (breakpoint == 2) + return; + roundCAB(x[2], 5); + if (breakpoint == 3) + return; + roundABC(x[3], 5); + if (breakpoint == 4) + return; + roundBCA(x[4], 5); + if (breakpoint == 5) + return; + roundCAB(x[5], 5); + if (breakpoint == 6) + return; + roundABC(x[6], 5); + if (breakpoint == 7) + return; + roundBCA(x[7], 5); + if (breakpoint == 8) + return; + + schedule(x); + if (breakpoint == 9) + return; + + roundCAB(x[0], 7); + if (breakpoint == 10) + return; + roundABC(x[1], 7); + if (breakpoint == 11) + return; + roundBCA(x[2], 7); + if (breakpoint == 12) + return; + roundCAB(x[3], 7); + if (breakpoint == 13) + return; + roundABC(x[4], 7); + if (breakpoint == 14) + return; + roundBCA(x[5], 7); + if (breakpoint == 15) + return; + roundCAB(x[6], 7); + if (breakpoint == 16) + return; + roundABC(x[7], 7); + if (breakpoint == 17) + return; + + schedule(x); + if (breakpoint == 18) + return; + + roundBCA(x[0], 9); + if (breakpoint == 19) + return; + roundCAB(x[1], 9); + if (breakpoint == 20) + return; + roundABC(x[2], 9); + if (breakpoint == 21) + return; + roundBCA(x[3], 9); + if (breakpoint == 22) + return; + roundCAB(x[4], 9); + if (breakpoint == 23) + return; + roundABC(x[5], 9); + if (breakpoint == 24) + return; + roundBCA(x[6], 9); + if (breakpoint == 25) + return; + roundCAB(x[7], 9); + if (breakpoint == 26) + return; + + // feed forward + a ^= aa; + b -= bb; + c += cc; + + System.out.println("Completed entire compress."); + } + + private void schedule(long[] x) { + x[0] -= x[7] ^ 0xA5A5A5A5A5A5A5A5L; + x[1] ^= x[0]; + x[2] += x[1]; + x[3] -= x[2] ^ ((~x[1]) << 19); + x[4] ^= x[3]; + x[5] += x[4]; + x[6] -= x[5] ^ ((~x[4]) >>> 23); + x[7] ^= x[6]; + x[0] += x[7]; + x[1] -= x[0] ^ ((~x[7]) << 19); + x[2] ^= x[1]; + x[3] += x[2]; + x[4] -= x[3] ^ ((~x[2]) >>> 23); + x[5] ^= x[4]; + x[6] += x[5]; + x[7] -= x[6] ^ 0x0123456789ABCDEFL; + } + + private void roundABC(long x, int mul) { + c ^= x; + + int c0 = (int) (c) & 0xFF, c1 = (int) (c >>> 8) & 0xFF, c2 = (int) (c >>> 16) & 0xFF, c3 = (int) (c >>> 24) & 0xFF, c4 = (int) (c >>> 32) & 0xFF, c5 = (int) (c >>> 40) & 0xFF, c6 = (int) (c >>> 48) & 0xFF, c7 = (int) (c >>> 56); + + a -= T1[c0] ^ T2[c2] ^ T3[c4] ^ T4[c6]; + b += T4[c1] ^ T3[c3] ^ T2[c5] ^ T1[c7]; + b *= mul; + } + + private void roundBCA(long x, int mul) { + a ^= x; + + int a0 = (int) (a) & 0xFF, a1 = (int) (a >>> 8) & 0xFF, a2 = (int) (a >>> 16) & 0xFF, a3 = (int) (a >>> 24) & 0xFF, a4 = (int) (a >>> 32) & 0xFF, a5 = (int) (a >>> 40) & 0xFF, a6 = (int) (a >>> 48) & 0xFF, a7 = (int) (a >>> 56); + + b -= T1[a0] ^ T2[a2] ^ T3[a4] ^ T4[a6]; + c += T4[a1] ^ T3[a3] ^ T2[a5] ^ T1[a7]; + c *= mul; + } + + private void roundCAB(long x, int mul) { + b ^= x; + + int b0 = (int) (b) & 0xFF, b1 = (int) (b >>> 8) & 0xFF, b2 = (int) (b >>> 16) & 0xFF, b3 = (int) (b >>> 24) & 0xFF, b4 = (int) (b >>> 32) & 0xFF, b5 = (int) (b >>> 40) & 0xFF, b6 = (int) (b >>> 48) & 0xFF, b7 = (int) (b >>> 56); + + c -= T1[b0] ^ T2[b2] ^ T3[b4] ^ T4[b6]; + a += T4[b1] ^ T3[b3] ^ T2[b5] ^ T1[b7]; + a *= mul; + } + + // Tables + // ........................................................................... + + /** + * Tiger_Hash S-Boxes + */ + private static final long[] T1 = { 0x02AAB17CF7E90C5EL /* 0 */, + 0xAC424B03E243A8ECL /* 1 */, 0x72CD5BE30DD5FCD3L /* 2 */, + 0x6D019B93F6F97F3AL /* 3 */, 0xCD9978FFD21F9193L /* 4 */, + 0x7573A1C9708029E2L /* 5 */, 0xB164326B922A83C3L /* 6 */, + 0x46883EEE04915870L /* 7 */, 0xEAACE3057103ECE6L /* 8 */, + 0xC54169B808A3535CL /* 9 */, 0x4CE754918DDEC47CL /* 10 */, + 0x0AA2F4DFDC0DF40CL /* 11 */, 0x10B76F18A74DBEFAL /* 12 */, + 0xC6CCB6235AD1AB6AL /* 13 */, 0x13726121572FE2FFL /* 14 */, + 0x1A488C6F199D921EL /* 15 */, 0x4BC9F9F4DA0007CAL /* 16 */, + 0x26F5E6F6E85241C7L /* 17 */, 0x859079DBEA5947B6L /* 18 */, + 0x4F1885C5C99E8C92L /* 19 */, 0xD78E761EA96F864BL /* 20 */, + 0x8E36428C52B5C17DL /* 21 */, 0x69CF6827373063C1L /* 22 */, + 0xB607C93D9BB4C56EL /* 23 */, 0x7D820E760E76B5EAL /* 24 */, + 0x645C9CC6F07FDC42L /* 25 */, 0xBF38A078243342E0L /* 26 */, + 0x5F6B343C9D2E7D04L /* 27 */, 0xF2C28AEB600B0EC6L /* 28 */, + 0x6C0ED85F7254BCACL /* 29 */, 0x71592281A4DB4FE5L /* 30 */, + 0x1967FA69CE0FED9FL /* 31 */, 0xFD5293F8B96545DBL /* 32 */, + 0xC879E9D7F2A7600BL /* 33 */, 0x860248920193194EL /* 34 */, + 0xA4F9533B2D9CC0B3L /* 35 */, 0x9053836C15957613L /* 36 */, + 0xDB6DCF8AFC357BF1L /* 37 */, 0x18BEEA7A7A370F57L /* 38 */, + 0x037117CA50B99066L /* 39 */, 0x6AB30A9774424A35L /* 40 */, + 0xF4E92F02E325249BL /* 41 */, 0x7739DB07061CCAE1L /* 42 */, + 0xD8F3B49CECA42A05L /* 43 */, 0xBD56BE3F51382F73L /* 44 */, + 0x45FAED5843B0BB28L /* 45 */, 0x1C813D5C11BF1F83L /* 46 */, + 0x8AF0E4B6D75FA169L /* 47 */, 0x33EE18A487AD9999L /* 48 */, + 0x3C26E8EAB1C94410L /* 49 */, 0xB510102BC0A822F9L /* 50 */, + 0x141EEF310CE6123BL /* 51 */, 0xFC65B90059DDB154L /* 52 */, + 0xE0158640C5E0E607L /* 53 */, 0x884E079826C3A3CFL /* 54 */, + 0x930D0D9523C535FDL /* 55 */, 0x35638D754E9A2B00L /* 56 */, + 0x4085FCCF40469DD5L /* 57 */, 0xC4B17AD28BE23A4CL /* 58 */, + 0xCAB2F0FC6A3E6A2EL /* 59 */, 0x2860971A6B943FCDL /* 60 */, + 0x3DDE6EE212E30446L /* 61 */, 0x6222F32AE01765AEL /* 62 */, + 0x5D550BB5478308FEL /* 63 */, 0xA9EFA98DA0EDA22AL /* 64 */, + 0xC351A71686C40DA7L /* 65 */, 0x1105586D9C867C84L /* 66 */, + 0xDCFFEE85FDA22853L /* 67 */, 0xCCFBD0262C5EEF76L /* 68 */, + 0xBAF294CB8990D201L /* 69 */, 0xE69464F52AFAD975L /* 70 */, + 0x94B013AFDF133E14L /* 71 */, 0x06A7D1A32823C958L /* 72 */, + 0x6F95FE5130F61119L /* 73 */, 0xD92AB34E462C06C0L /* 74 */, + 0xED7BDE33887C71D2L /* 75 */, 0x79746D6E6518393EL /* 76 */, + 0x5BA419385D713329L /* 77 */, 0x7C1BA6B948A97564L /* 78 */, + 0x31987C197BFDAC67L /* 79 */, 0xDE6C23C44B053D02L /* 80 */, + 0x581C49FED002D64DL /* 81 */, 0xDD474D6338261571L /* 82 */, + 0xAA4546C3E473D062L /* 83 */, 0x928FCE349455F860L /* 84 */, + 0x48161BBACAAB94D9L /* 85 */, 0x63912430770E6F68L /* 86 */, + 0x6EC8A5E602C6641CL /* 87 */, 0x87282515337DDD2BL /* 88 */, + 0x2CDA6B42034B701BL /* 89 */, 0xB03D37C181CB096DL /* 90 */, + 0xE108438266C71C6FL /* 91 */, 0x2B3180C7EB51B255L /* 92 */, + 0xDF92B82F96C08BBCL /* 93 */, 0x5C68C8C0A632F3BAL /* 94 */, + 0x5504CC861C3D0556L /* 95 */, 0xABBFA4E55FB26B8FL /* 96 */, + 0x41848B0AB3BACEB4L /* 97 */, 0xB334A273AA445D32L /* 98 */, + 0xBCA696F0A85AD881L /* 99 */, 0x24F6EC65B528D56CL /* 100 */, + 0x0CE1512E90F4524AL /* 101 */, 0x4E9DD79D5506D35AL /* 102 */, + 0x258905FAC6CE9779L /* 103 */, 0x2019295B3E109B33L /* 104 */, + 0xF8A9478B73A054CCL /* 105 */, 0x2924F2F934417EB0L /* 106 */, + 0x3993357D536D1BC4L /* 107 */, 0x38A81AC21DB6FF8BL /* 108 */, + 0x47C4FBF17D6016BFL /* 109 */, 0x1E0FAADD7667E3F5L /* 110 */, + 0x7ABCFF62938BEB96L /* 111 */, 0xA78DAD948FC179C9L /* 112 */, + 0x8F1F98B72911E50DL /* 113 */, 0x61E48EAE27121A91L /* 114 */, + 0x4D62F7AD31859808L /* 115 */, 0xECEBA345EF5CEAEBL /* 116 */, + 0xF5CEB25EBC9684CEL /* 117 */, 0xF633E20CB7F76221L /* 118 */, + 0xA32CDF06AB8293E4L /* 119 */, 0x985A202CA5EE2CA4L /* 120 */, + 0xCF0B8447CC8A8FB1L /* 121 */, 0x9F765244979859A3L /* 122 */, + 0xA8D516B1A1240017L /* 123 */, 0x0BD7BA3EBB5DC726L /* 124 */, + 0xE54BCA55B86ADB39L /* 125 */, 0x1D7A3AFD6C478063L /* 126 */, + 0x519EC608E7669EDDL /* 127 */, 0x0E5715A2D149AA23L /* 128 */, + 0x177D4571848FF194L /* 129 */, 0xEEB55F3241014C22L /* 130 */, + 0x0F5E5CA13A6E2EC2L /* 131 */, 0x8029927B75F5C361L /* 132 */, + 0xAD139FABC3D6E436L /* 133 */, 0x0D5DF1A94CCF402FL /* 134 */, + 0x3E8BD948BEA5DFC8L /* 135 */, 0xA5A0D357BD3FF77EL /* 136 */, + 0xA2D12E251F74F645L /* 137 */, 0x66FD9E525E81A082L /* 138 */, + 0x2E0C90CE7F687A49L /* 139 */, 0xC2E8BCBEBA973BC5L /* 140 */, + 0x000001BCE509745FL /* 141 */, 0x423777BBE6DAB3D6L /* 142 */, + 0xD1661C7EAEF06EB5L /* 143 */, 0xA1781F354DAACFD8L /* 144 */, + 0x2D11284A2B16AFFCL /* 145 */, 0xF1FC4F67FA891D1FL /* 146 */, + 0x73ECC25DCB920ADAL /* 147 */, 0xAE610C22C2A12651L /* 148 */, + 0x96E0A810D356B78AL /* 149 */, 0x5A9A381F2FE7870FL /* 150 */, + 0xD5AD62EDE94E5530L /* 151 */, 0xD225E5E8368D1427L /* 152 */, + 0x65977B70C7AF4631L /* 153 */, 0x99F889B2DE39D74FL /* 154 */, + 0x233F30BF54E1D143L /* 155 */, 0x9A9675D3D9A63C97L /* 156 */, + 0x5470554FF334F9A8L /* 157 */, 0x166ACB744A4F5688L /* 158 */, + 0x70C74CAAB2E4AEADL /* 159 */, 0xF0D091646F294D12L /* 160 */, + 0x57B82A89684031D1L /* 161 */, 0xEFD95A5A61BE0B6BL /* 162 */, + 0x2FBD12E969F2F29AL /* 163 */, 0x9BD37013FEFF9FE8L /* 164 */, + 0x3F9B0404D6085A06L /* 165 */, 0x4940C1F3166CFE15L /* 166 */, + 0x09542C4DCDF3DEFBL /* 167 */, 0xB4C5218385CD5CE3L /* 168 */, + 0xC935B7DC4462A641L /* 169 */, 0x3417F8A68ED3B63FL /* 170 */, + 0xB80959295B215B40L /* 171 */, 0xF99CDAEF3B8C8572L /* 172 */, + 0x018C0614F8FCB95DL /* 173 */, 0x1B14ACCD1A3ACDF3L /* 174 */, + 0x84D471F200BB732DL /* 175 */, 0xC1A3110E95E8DA16L /* 176 */, + 0x430A7220BF1A82B8L /* 177 */, 0xB77E090D39DF210EL /* 178 */, + 0x5EF4BD9F3CD05E9DL /* 179 */, 0x9D4FF6DA7E57A444L /* 180 */, + 0xDA1D60E183D4A5F8L /* 181 */, 0xB287C38417998E47L /* 182 */, + 0xFE3EDC121BB31886L /* 183 */, 0xC7FE3CCC980CCBEFL /* 184 */, + 0xE46FB590189BFD03L /* 185 */, 0x3732FD469A4C57DCL /* 186 */, + 0x7EF700A07CF1AD65L /* 187 */, 0x59C64468A31D8859L /* 188 */, + 0x762FB0B4D45B61F6L /* 189 */, 0x155BAED099047718L /* 190 */, + 0x68755E4C3D50BAA6L /* 191 */, 0xE9214E7F22D8B4DFL /* 192 */, + 0x2ADDBF532EAC95F4L /* 193 */, 0x32AE3909B4BD0109L /* 194 */, + 0x834DF537B08E3450L /* 195 */, 0xFA209DA84220728DL /* 196 */, + 0x9E691D9B9EFE23F7L /* 197 */, 0x0446D288C4AE8D7FL /* 198 */, + 0x7B4CC524E169785BL /* 199 */, 0x21D87F0135CA1385L /* 200 */, + 0xCEBB400F137B8AA5L /* 201 */, 0x272E2B66580796BEL /* 202 */, + 0x3612264125C2B0DEL /* 203 */, 0x057702BDAD1EFBB2L /* 204 */, + 0xD4BABB8EACF84BE9L /* 205 */, 0x91583139641BC67BL /* 206 */, + 0x8BDC2DE08036E024L /* 207 */, 0x603C8156F49F68EDL /* 208 */, + 0xF7D236F7DBEF5111L /* 209 */, 0x9727C4598AD21E80L /* 210 */, + 0xA08A0896670A5FD7L /* 211 */, 0xCB4A8F4309EBA9CBL /* 212 */, + 0x81AF564B0F7036A1L /* 213 */, 0xC0B99AA778199ABDL /* 214 */, + 0x959F1EC83FC8E952L /* 215 */, 0x8C505077794A81B9L /* 216 */, + 0x3ACAAF8F056338F0L /* 217 */, 0x07B43F50627A6778L /* 218 */, + 0x4A44AB49F5ECCC77L /* 219 */, 0x3BC3D6E4B679EE98L /* 220 */, + 0x9CC0D4D1CF14108CL /* 221 */, 0x4406C00B206BC8A0L /* 222 */, + 0x82A18854C8D72D89L /* 223 */, 0x67E366B35C3C432CL /* 224 */, + 0xB923DD61102B37F2L /* 225 */, 0x56AB2779D884271DL /* 226 */, + 0xBE83E1B0FF1525AFL /* 227 */, 0xFB7C65D4217E49A9L /* 228 */, + 0x6BDBE0E76D48E7D4L /* 229 */, 0x08DF828745D9179EL /* 230 */, + 0x22EA6A9ADD53BD34L /* 231 */, 0xE36E141C5622200AL /* 232 */, + 0x7F805D1B8CB750EEL /* 233 */, 0xAFE5C7A59F58E837L /* 234 */, + 0xE27F996A4FB1C23CL /* 235 */, 0xD3867DFB0775F0D0L /* 236 */, + 0xD0E673DE6E88891AL /* 237 */, 0x123AEB9EAFB86C25L /* 238 */, + 0x30F1D5D5C145B895L /* 239 */, 0xBB434A2DEE7269E7L /* 240 */, + 0x78CB67ECF931FA38L /* 241 */, 0xF33B0372323BBF9CL /* 242 */, + 0x52D66336FB279C74L /* 243 */, 0x505F33AC0AFB4EAAL /* 244 */, + 0xE8A5CD99A2CCE187L /* 245 */, 0x534974801E2D30BBL /* 246 */, + 0x8D2D5711D5876D90L /* 247 */, 0x1F1A412891BC038EL /* 248 */, + 0xD6E2E71D82E56648L /* 249 */, 0x74036C3A497732B7L /* 250 */, + 0x89B67ED96361F5ABL /* 251 */, 0xFFED95D8F1EA02A2L /* 252 */, + 0xE72B3BD61464D43DL /* 253 */, 0xA6300F170BDC4820L /* 254 */, + 0xEBC18760ED78A77AL /* 255 */ + }, T2 = { 0xE6A6BE5A05A12138L /* 256 */, 0xB5A122A5B4F87C98L /* 257 */, + 0x563C6089140B6990L /* 258 */, 0x4C46CB2E391F5DD5L /* 259 */, + 0xD932ADDBC9B79434L /* 260 */, 0x08EA70E42015AFF5L /* 261 */, + 0xD765A6673E478CF1L /* 262 */, 0xC4FB757EAB278D99L /* 263 */, + 0xDF11C6862D6E0692L /* 264 */, 0xDDEB84F10D7F3B16L /* 265 */, + 0x6F2EF604A665EA04L /* 266 */, 0x4A8E0F0FF0E0DFB3L /* 267 */, + 0xA5EDEEF83DBCBA51L /* 268 */, 0xFC4F0A2A0EA4371EL /* 269 */, + 0xE83E1DA85CB38429L /* 270 */, 0xDC8FF882BA1B1CE2L /* 271 */, + 0xCD45505E8353E80DL /* 272 */, 0x18D19A00D4DB0717L /* 273 */, + 0x34A0CFEDA5F38101L /* 274 */, 0x0BE77E518887CAF2L /* 275 */, + 0x1E341438B3C45136L /* 276 */, 0xE05797F49089CCF9L /* 277 */, + 0xFFD23F9DF2591D14L /* 278 */, 0x543DDA228595C5CDL /* 279 */, + 0x661F81FD99052A33L /* 280 */, 0x8736E641DB0F7B76L /* 281 */, + 0x15227725418E5307L /* 282 */, 0xE25F7F46162EB2FAL /* 283 */, + 0x48A8B2126C13D9FEL /* 284 */, 0xAFDC541792E76EEAL /* 285 */, + 0x03D912BFC6D1898FL /* 286 */, 0x31B1AAFA1B83F51BL /* 287 */, + 0xF1AC2796E42AB7D9L /* 288 */, 0x40A3A7D7FCD2EBACL /* 289 */, + 0x1056136D0AFBBCC5L /* 290 */, 0x7889E1DD9A6D0C85L /* 291 */, + 0xD33525782A7974AAL /* 292 */, 0xA7E25D09078AC09BL /* 293 */, + 0xBD4138B3EAC6EDD0L /* 294 */, 0x920ABFBE71EB9E70L /* 295 */, + 0xA2A5D0F54FC2625CL /* 296 */, 0xC054E36B0B1290A3L /* 297 */, + 0xF6DD59FF62FE932BL /* 298 */, 0x3537354511A8AC7DL /* 299 */, + 0xCA845E9172FADCD4L /* 300 */, 0x84F82B60329D20DCL /* 301 */, + 0x79C62CE1CD672F18L /* 302 */, 0x8B09A2ADD124642CL /* 303 */, + 0xD0C1E96A19D9E726L /* 304 */, 0x5A786A9B4BA9500CL /* 305 */, + 0x0E020336634C43F3L /* 306 */, 0xC17B474AEB66D822L /* 307 */, + 0x6A731AE3EC9BAAC2L /* 308 */, 0x8226667AE0840258L /* 309 */, + 0x67D4567691CAECA5L /* 310 */, 0x1D94155C4875ADB5L /* 311 */, + 0x6D00FD985B813FDFL /* 312 */, 0x51286EFCB774CD06L /* 313 */, + 0x5E8834471FA744AFL /* 314 */, 0xF72CA0AEE761AE2EL /* 315 */, + 0xBE40E4CDAEE8E09AL /* 316 */, 0xE9970BBB5118F665L /* 317 */, + 0x726E4BEB33DF1964L /* 318 */, 0x703B000729199762L /* 319 */, + 0x4631D816F5EF30A7L /* 320 */, 0xB880B5B51504A6BEL /* 321 */, + 0x641793C37ED84B6CL /* 322 */, 0x7B21ED77F6E97D96L /* 323 */, + 0x776306312EF96B73L /* 324 */, 0xAE528948E86FF3F4L /* 325 */, + 0x53DBD7F286A3F8F8L /* 326 */, 0x16CADCE74CFC1063L /* 327 */, + 0x005C19BDFA52C6DDL /* 328 */, 0x68868F5D64D46AD3L /* 329 */, + 0x3A9D512CCF1E186AL /* 330 */, 0x367E62C2385660AEL /* 331 */, + 0xE359E7EA77DCB1D7L /* 332 */, 0x526C0773749ABE6EL /* 333 */, + 0x735AE5F9D09F734BL /* 334 */, 0x493FC7CC8A558BA8L /* 335 */, + 0xB0B9C1533041AB45L /* 336 */, 0x321958BA470A59BDL /* 337 */, + 0x852DB00B5F46C393L /* 338 */, 0x91209B2BD336B0E5L /* 339 */, + 0x6E604F7D659EF19FL /* 340 */, 0xB99A8AE2782CCB24L /* 341 */, + 0xCCF52AB6C814C4C7L /* 342 */, 0x4727D9AFBE11727BL /* 343 */, + 0x7E950D0C0121B34DL /* 344 */, 0x756F435670AD471FL /* 345 */, + 0xF5ADD442615A6849L /* 346 */, 0x4E87E09980B9957AL /* 347 */, + 0x2ACFA1DF50AEE355L /* 348 */, 0xD898263AFD2FD556L /* 349 */, + 0xC8F4924DD80C8FD6L /* 350 */, 0xCF99CA3D754A173AL /* 351 */, + 0xFE477BACAF91BF3CL /* 352 */, 0xED5371F6D690C12DL /* 353 */, + 0x831A5C285E687094L /* 354 */, 0xC5D3C90A3708A0A4L /* 355 */, + 0x0F7F903717D06580L /* 356 */, 0x19F9BB13B8FDF27FL /* 357 */, + 0xB1BD6F1B4D502843L /* 358 */, 0x1C761BA38FFF4012L /* 359 */, + 0x0D1530C4E2E21F3BL /* 360 */, 0x8943CE69A7372C8AL /* 361 */, + 0xE5184E11FEB5CE66L /* 362 */, 0x618BDB80BD736621L /* 363 */, + 0x7D29BAD68B574D0BL /* 364 */, 0x81BB613E25E6FE5BL /* 365 */, + 0x071C9C10BC07913FL /* 366 */, 0xC7BEEB7909AC2D97L /* 367 */, + 0xC3E58D353BC5D757L /* 368 */, 0xEB017892F38F61E8L /* 369 */, + 0xD4EFFB9C9B1CC21AL /* 370 */, 0x99727D26F494F7ABL /* 371 */, + 0xA3E063A2956B3E03L /* 372 */, 0x9D4A8B9A4AA09C30L /* 373 */, + 0x3F6AB7D500090FB4L /* 374 */, 0x9CC0F2A057268AC0L /* 375 */, + 0x3DEE9D2DEDBF42D1L /* 376 */, 0x330F49C87960A972L /* 377 */, + 0xC6B2720287421B41L /* 378 */, 0x0AC59EC07C00369CL /* 379 */, + 0xEF4EAC49CB353425L /* 380 */, 0xF450244EEF0129D8L /* 381 */, + 0x8ACC46E5CAF4DEB6L /* 382 */, 0x2FFEAB63989263F7L /* 383 */, + 0x8F7CB9FE5D7A4578L /* 384 */, 0x5BD8F7644E634635L /* 385 */, + 0x427A7315BF2DC900L /* 386 */, 0x17D0C4AA2125261CL /* 387 */, + 0x3992486C93518E50L /* 388 */, 0xB4CBFEE0A2D7D4C3L /* 389 */, + 0x7C75D6202C5DDD8DL /* 390 */, 0xDBC295D8E35B6C61L /* 391 */, + 0x60B369D302032B19L /* 392 */, 0xCE42685FDCE44132L /* 393 */, + 0x06F3DDB9DDF65610L /* 394 */, 0x8EA4D21DB5E148F0L /* 395 */, + 0x20B0FCE62FCD496FL /* 396 */, 0x2C1B912358B0EE31L /* 397 */, + 0xB28317B818F5A308L /* 398 */, 0xA89C1E189CA6D2CFL /* 399 */, + 0x0C6B18576AAADBC8L /* 400 */, 0xB65DEAA91299FAE3L /* 401 */, + 0xFB2B794B7F1027E7L /* 402 */, 0x04E4317F443B5BEBL /* 403 */, + 0x4B852D325939D0A6L /* 404 */, 0xD5AE6BEEFB207FFCL /* 405 */, + 0x309682B281C7D374L /* 406 */, 0xBAE309A194C3B475L /* 407 */, + 0x8CC3F97B13B49F05L /* 408 */, 0x98A9422FF8293967L /* 409 */, + 0x244B16B01076FF7CL /* 410 */, 0xF8BF571C663D67EEL /* 411 */, + 0x1F0D6758EEE30DA1L /* 412 */, 0xC9B611D97ADEB9B7L /* 413 */, + 0xB7AFD5887B6C57A2L /* 414 */, 0x6290AE846B984FE1L /* 415 */, + 0x94DF4CDEACC1A5FDL /* 416 */, 0x058A5BD1C5483AFFL /* 417 */, + 0x63166CC142BA3C37L /* 418 */, 0x8DB8526EB2F76F40L /* 419 */, + 0xE10880036F0D6D4EL /* 420 */, 0x9E0523C9971D311DL /* 421 */, + 0x45EC2824CC7CD691L /* 422 */, 0x575B8359E62382C9L /* 423 */, + 0xFA9E400DC4889995L /* 424 */, 0xD1823ECB45721568L /* 425 */, + 0xDAFD983B8206082FL /* 426 */, 0xAA7D29082386A8CBL /* 427 */, + 0x269FCD4403B87588L /* 428 */, 0x1B91F5F728BDD1E0L /* 429 */, + 0xE4669F39040201F6L /* 430 */, 0x7A1D7C218CF04ADEL /* 431 */, + 0x65623C29D79CE5CEL /* 432 */, 0x2368449096C00BB1L /* 433 */, + 0xAB9BF1879DA503BAL /* 434 */, 0xBC23ECB1A458058EL /* 435 */, + 0x9A58DF01BB401ECCL /* 436 */, 0xA070E868A85F143DL /* 437 */, + 0x4FF188307DF2239EL /* 438 */, 0x14D565B41A641183L /* 439 */, + 0xEE13337452701602L /* 440 */, 0x950E3DCF3F285E09L /* 441 */, + 0x59930254B9C80953L /* 442 */, 0x3BF299408930DA6DL /* 443 */, + 0xA955943F53691387L /* 444 */, 0xA15EDECAA9CB8784L /* 445 */, + 0x29142127352BE9A0L /* 446 */, 0x76F0371FFF4E7AFBL /* 447 */, + 0x0239F450274F2228L /* 448 */, 0xBB073AF01D5E868BL /* 449 */, + 0xBFC80571C10E96C1L /* 450 */, 0xD267088568222E23L /* 451 */, + 0x9671A3D48E80B5B0L /* 452 */, 0x55B5D38AE193BB81L /* 453 */, + 0x693AE2D0A18B04B8L /* 454 */, 0x5C48B4ECADD5335FL /* 455 */, + 0xFD743B194916A1CAL /* 456 */, 0x2577018134BE98C4L /* 457 */, + 0xE77987E83C54A4ADL /* 458 */, 0x28E11014DA33E1B9L /* 459 */, + 0x270CC59E226AA213L /* 460 */, 0x71495F756D1A5F60L /* 461 */, + 0x9BE853FB60AFEF77L /* 462 */, 0xADC786A7F7443DBFL /* 463 */, + 0x0904456173B29A82L /* 464 */, 0x58BC7A66C232BD5EL /* 465 */, + 0xF306558C673AC8B2L /* 466 */, 0x41F639C6B6C9772AL /* 467 */, + 0x216DEFE99FDA35DAL /* 468 */, 0x11640CC71C7BE615L /* 469 */, + 0x93C43694565C5527L /* 470 */, 0xEA038E6246777839L /* 471 */, + 0xF9ABF3CE5A3E2469L /* 472 */, 0x741E768D0FD312D2L /* 473 */, + 0x0144B883CED652C6L /* 474 */, 0xC20B5A5BA33F8552L /* 475 */, + 0x1AE69633C3435A9DL /* 476 */, 0x97A28CA4088CFDECL /* 477 */, + 0x8824A43C1E96F420L /* 478 */, 0x37612FA66EEEA746L /* 479 */, + 0x6B4CB165F9CF0E5AL /* 480 */, 0x43AA1C06A0ABFB4AL /* 481 */, + 0x7F4DC26FF162796BL /* 482 */, 0x6CBACC8E54ED9B0FL /* 483 */, + 0xA6B7FFEFD2BB253EL /* 484 */, 0x2E25BC95B0A29D4FL /* 485 */, + 0x86D6A58BDEF1388CL /* 486 */, 0xDED74AC576B6F054L /* 487 */, + 0x8030BDBC2B45805DL /* 488 */, 0x3C81AF70E94D9289L /* 489 */, + 0x3EFF6DDA9E3100DBL /* 490 */, 0xB38DC39FDFCC8847L /* 491 */, + 0x123885528D17B87EL /* 492 */, 0xF2DA0ED240B1B642L /* 493 */, + 0x44CEFADCD54BF9A9L /* 494 */, 0x1312200E433C7EE6L /* 495 */, + 0x9FFCC84F3A78C748L /* 496 */, 0xF0CD1F72248576BBL /* 497 */, + 0xEC6974053638CFE4L /* 498 */, 0x2BA7B67C0CEC4E4CL /* 499 */, + 0xAC2F4DF3E5CE32EDL /* 500 */, 0xCB33D14326EA4C11L /* 501 */, + 0xA4E9044CC77E58BCL /* 502 */, 0x5F513293D934FCEFL /* 503 */, + 0x5DC9645506E55444L /* 504 */, 0x50DE418F317DE40AL /* 505 */, + 0x388CB31A69DDE259L /* 506 */, 0x2DB4A83455820A86L /* 507 */, + 0x9010A91E84711AE9L /* 508 */, 0x4DF7F0B7B1498371L /* 509 */, + 0xD62A2EABC0977179L /* 510 */, 0x22FAC097AA8D5C0EL /* 511 */ + }, T3 = { 0xF49FCC2FF1DAF39BL /* 512 */, 0x487FD5C66FF29281L /* 513 */, + 0xE8A30667FCDCA83FL /* 514 */, 0x2C9B4BE3D2FCCE63L /* 515 */, + 0xDA3FF74B93FBBBC2L /* 516 */, 0x2FA165D2FE70BA66L /* 517 */, + 0xA103E279970E93D4L /* 518 */, 0xBECDEC77B0E45E71L /* 519 */, + 0xCFB41E723985E497L /* 520 */, 0xB70AAA025EF75017L /* 521 */, + 0xD42309F03840B8E0L /* 522 */, 0x8EFC1AD035898579L /* 523 */, + 0x96C6920BE2B2ABC5L /* 524 */, 0x66AF4163375A9172L /* 525 */, + 0x2174ABDCCA7127FBL /* 526 */, 0xB33CCEA64A72FF41L /* 527 */, + 0xF04A4933083066A5L /* 528 */, 0x8D970ACDD7289AF5L /* 529 */, + 0x8F96E8E031C8C25EL /* 530 */, 0xF3FEC02276875D47L /* 531 */, + 0xEC7BF310056190DDL /* 532 */, 0xF5ADB0AEBB0F1491L /* 533 */, + 0x9B50F8850FD58892L /* 534 */, 0x4975488358B74DE8L /* 535 */, + 0xA3354FF691531C61L /* 536 */, 0x0702BBE481D2C6EEL /* 537 */, + 0x89FB24057DEDED98L /* 538 */, 0xAC3075138596E902L /* 539 */, + 0x1D2D3580172772EDL /* 540 */, 0xEB738FC28E6BC30DL /* 541 */, + 0x5854EF8F63044326L /* 542 */, 0x9E5C52325ADD3BBEL /* 543 */, + 0x90AA53CF325C4623L /* 544 */, 0xC1D24D51349DD067L /* 545 */, + 0x2051CFEEA69EA624L /* 546 */, 0x13220F0A862E7E4FL /* 547 */, + 0xCE39399404E04864L /* 548 */, 0xD9C42CA47086FCB7L /* 549 */, + 0x685AD2238A03E7CCL /* 550 */, 0x066484B2AB2FF1DBL /* 551 */, + 0xFE9D5D70EFBF79ECL /* 552 */, 0x5B13B9DD9C481854L /* 553 */, + 0x15F0D475ED1509ADL /* 554 */, 0x0BEBCD060EC79851L /* 555 */, + 0xD58C6791183AB7F8L /* 556 */, 0xD1187C5052F3EEE4L /* 557 */, + 0xC95D1192E54E82FFL /* 558 */, 0x86EEA14CB9AC6CA2L /* 559 */, + 0x3485BEB153677D5DL /* 560 */, 0xDD191D781F8C492AL /* 561 */, + 0xF60866BAA784EBF9L /* 562 */, 0x518F643BA2D08C74L /* 563 */, + 0x8852E956E1087C22L /* 564 */, 0xA768CB8DC410AE8DL /* 565 */, + 0x38047726BFEC8E1AL /* 566 */, 0xA67738B4CD3B45AAL /* 567 */, + 0xAD16691CEC0DDE19L /* 568 */, 0xC6D4319380462E07L /* 569 */, + 0xC5A5876D0BA61938L /* 570 */, 0x16B9FA1FA58FD840L /* 571 */, + 0x188AB1173CA74F18L /* 572 */, 0xABDA2F98C99C021FL /* 573 */, + 0x3E0580AB134AE816L /* 574 */, 0x5F3B05B773645ABBL /* 575 */, + 0x2501A2BE5575F2F6L /* 576 */, 0x1B2F74004E7E8BA9L /* 577 */, + 0x1CD7580371E8D953L /* 578 */, 0x7F6ED89562764E30L /* 579 */, + 0xB15926FF596F003DL /* 580 */, 0x9F65293DA8C5D6B9L /* 581 */, + 0x6ECEF04DD690F84CL /* 582 */, 0x4782275FFF33AF88L /* 583 */, + 0xE41433083F820801L /* 584 */, 0xFD0DFE409A1AF9B5L /* 585 */, + 0x4325A3342CDB396BL /* 586 */, 0x8AE77E62B301B252L /* 587 */, + 0xC36F9E9F6655615AL /* 588 */, 0x85455A2D92D32C09L /* 589 */, + 0xF2C7DEA949477485L /* 590 */, 0x63CFB4C133A39EBAL /* 591 */, + 0x83B040CC6EBC5462L /* 592 */, 0x3B9454C8FDB326B0L /* 593 */, + 0x56F56A9E87FFD78CL /* 594 */, 0x2DC2940D99F42BC6L /* 595 */, + 0x98F7DF096B096E2DL /* 596 */, 0x19A6E01E3AD852BFL /* 597 */, + 0x42A99CCBDBD4B40BL /* 598 */, 0xA59998AF45E9C559L /* 599 */, + 0x366295E807D93186L /* 600 */, 0x6B48181BFAA1F773L /* 601 */, + 0x1FEC57E2157A0A1DL /* 602 */, 0x4667446AF6201AD5L /* 603 */, + 0xE615EBCACFB0F075L /* 604 */, 0xB8F31F4F68290778L /* 605 */, + 0x22713ED6CE22D11EL /* 606 */, 0x3057C1A72EC3C93BL /* 607 */, + 0xCB46ACC37C3F1F2FL /* 608 */, 0xDBB893FD02AAF50EL /* 609 */, + 0x331FD92E600B9FCFL /* 610 */, 0xA498F96148EA3AD6L /* 611 */, + 0xA8D8426E8B6A83EAL /* 612 */, 0xA089B274B7735CDCL /* 613 */, + 0x87F6B3731E524A11L /* 614 */, 0x118808E5CBC96749L /* 615 */, + 0x9906E4C7B19BD394L /* 616 */, 0xAFED7F7E9B24A20CL /* 617 */, + 0x6509EADEEB3644A7L /* 618 */, 0x6C1EF1D3E8EF0EDEL /* 619 */, + 0xB9C97D43E9798FB4L /* 620 */, 0xA2F2D784740C28A3L /* 621 */, + 0x7B8496476197566FL /* 622 */, 0x7A5BE3E6B65F069DL /* 623 */, + 0xF96330ED78BE6F10L /* 624 */, 0xEEE60DE77A076A15L /* 625 */, + 0x2B4BEE4AA08B9BD0L /* 626 */, 0x6A56A63EC7B8894EL /* 627 */, + 0x02121359BA34FEF4L /* 628 */, 0x4CBF99F8283703FCL /* 629 */, + 0x398071350CAF30C8L /* 630 */, 0xD0A77A89F017687AL /* 631 */, + 0xF1C1A9EB9E423569L /* 632 */, 0x8C7976282DEE8199L /* 633 */, + 0x5D1737A5DD1F7ABDL /* 634 */, 0x4F53433C09A9FA80L /* 635 */, + 0xFA8B0C53DF7CA1D9L /* 636 */, 0x3FD9DCBC886CCB77L /* 637 */, + 0xC040917CA91B4720L /* 638 */, 0x7DD00142F9D1DCDFL /* 639 */, + 0x8476FC1D4F387B58L /* 640 */, 0x23F8E7C5F3316503L /* 641 */, + 0x032A2244E7E37339L /* 642 */, 0x5C87A5D750F5A74BL /* 643 */, + 0x082B4CC43698992EL /* 644 */, 0xDF917BECB858F63CL /* 645 */, + 0x3270B8FC5BF86DDAL /* 646 */, 0x10AE72BB29B5DD76L /* 647 */, + 0x576AC94E7700362BL /* 648 */, 0x1AD112DAC61EFB8FL /* 649 */, + 0x691BC30EC5FAA427L /* 650 */, 0xFF246311CC327143L /* 651 */, + 0x3142368E30E53206L /* 652 */, 0x71380E31E02CA396L /* 653 */, + 0x958D5C960AAD76F1L /* 654 */, 0xF8D6F430C16DA536L /* 655 */, + 0xC8FFD13F1BE7E1D2L /* 656 */, 0x7578AE66004DDBE1L /* 657 */, + 0x05833F01067BE646L /* 658 */, 0xBB34B5AD3BFE586DL /* 659 */, + 0x095F34C9A12B97F0L /* 660 */, 0x247AB64525D60CA8L /* 661 */, + 0xDCDBC6F3017477D1L /* 662 */, 0x4A2E14D4DECAD24DL /* 663 */, + 0xBDB5E6D9BE0A1EEBL /* 664 */, 0x2A7E70F7794301ABL /* 665 */, + 0xDEF42D8A270540FDL /* 666 */, 0x01078EC0A34C22C1L /* 667 */, + 0xE5DE511AF4C16387L /* 668 */, 0x7EBB3A52BD9A330AL /* 669 */, + 0x77697857AA7D6435L /* 670 */, 0x004E831603AE4C32L /* 671 */, + 0xE7A21020AD78E312L /* 672 */, 0x9D41A70C6AB420F2L /* 673 */, + 0x28E06C18EA1141E6L /* 674 */, 0xD2B28CBD984F6B28L /* 675 */, + 0x26B75F6C446E9D83L /* 676 */, 0xBA47568C4D418D7FL /* 677 */, + 0xD80BADBFE6183D8EL /* 678 */, 0x0E206D7F5F166044L /* 679 */, + 0xE258A43911CBCA3EL /* 680 */, 0x723A1746B21DC0BCL /* 681 */, + 0xC7CAA854F5D7CDD3L /* 682 */, 0x7CAC32883D261D9CL /* 683 */, + 0x7690C26423BA942CL /* 684 */, 0x17E55524478042B8L /* 685 */, + 0xE0BE477656A2389FL /* 686 */, 0x4D289B5E67AB2DA0L /* 687 */, + 0x44862B9C8FBBFD31L /* 688 */, 0xB47CC8049D141365L /* 689 */, + 0x822C1B362B91C793L /* 690 */, 0x4EB14655FB13DFD8L /* 691 */, + 0x1ECBBA0714E2A97BL /* 692 */, 0x6143459D5CDE5F14L /* 693 */, + 0x53A8FBF1D5F0AC89L /* 694 */, 0x97EA04D81C5E5B00L /* 695 */, + 0x622181A8D4FDB3F3L /* 696 */, 0xE9BCD341572A1208L /* 697 */, + 0x1411258643CCE58AL /* 698 */, 0x9144C5FEA4C6E0A4L /* 699 */, + 0x0D33D06565CF620FL /* 700 */, 0x54A48D489F219CA1L /* 701 */, + 0xC43E5EAC6D63C821L /* 702 */, 0xA9728B3A72770DAFL /* 703 */, + 0xD7934E7B20DF87EFL /* 704 */, 0xE35503B61A3E86E5L /* 705 */, + 0xCAE321FBC819D504L /* 706 */, 0x129A50B3AC60BFA6L /* 707 */, + 0xCD5E68EA7E9FB6C3L /* 708 */, 0xB01C90199483B1C7L /* 709 */, + 0x3DE93CD5C295376CL /* 710 */, 0xAED52EDF2AB9AD13L /* 711 */, + 0x2E60F512C0A07884L /* 712 */, 0xBC3D86A3E36210C9L /* 713 */, + 0x35269D9B163951CEL /* 714 */, 0x0C7D6E2AD0CDB5FAL /* 715 */, + 0x59E86297D87F5733L /* 716 */, 0x298EF221898DB0E7L /* 717 */, + 0x55000029D1A5AA7EL /* 718 */, 0x8BC08AE1B5061B45L /* 719 */, + 0xC2C31C2B6C92703AL /* 720 */, 0x94CC596BAF25EF42L /* 721 */, + 0x0A1D73DB22540456L /* 722 */, 0x04B6A0F9D9C4179AL /* 723 */, + 0xEFFDAFA2AE3D3C60L /* 724 */, 0xF7C8075BB49496C4L /* 725 */, + 0x9CC5C7141D1CD4E3L /* 726 */, 0x78BD1638218E5534L /* 727 */, + 0xB2F11568F850246AL /* 728 */, 0xEDFABCFA9502BC29L /* 729 */, + 0x796CE5F2DA23051BL /* 730 */, 0xAAE128B0DC93537CL /* 731 */, + 0x3A493DA0EE4B29AEL /* 732 */, 0xB5DF6B2C416895D7L /* 733 */, + 0xFCABBD25122D7F37L /* 734 */, 0x70810B58105DC4B1L /* 735 */, + 0xE10FDD37F7882A90L /* 736 */, 0x524DCAB5518A3F5CL /* 737 */, + 0x3C9E85878451255BL /* 738 */, 0x4029828119BD34E2L /* 739 */, + 0x74A05B6F5D3CECCBL /* 740 */, 0xB610021542E13ECAL /* 741 */, + 0x0FF979D12F59E2ACL /* 742 */, 0x6037DA27E4F9CC50L /* 743 */, + 0x5E92975A0DF1847DL /* 744 */, 0xD66DE190D3E623FEL /* 745 */, + 0x5032D6B87B568048L /* 746 */, 0x9A36B7CE8235216EL /* 747 */, + 0x80272A7A24F64B4AL /* 748 */, 0x93EFED8B8C6916F7L /* 749 */, + 0x37DDBFF44CCE1555L /* 750 */, 0x4B95DB5D4B99BD25L /* 751 */, + 0x92D3FDA169812FC0L /* 752 */, 0xFB1A4A9A90660BB6L /* 753 */, + 0x730C196946A4B9B2L /* 754 */, 0x81E289AA7F49DA68L /* 755 */, + 0x64669A0F83B1A05FL /* 756 */, 0x27B3FF7D9644F48BL /* 757 */, + 0xCC6B615C8DB675B3L /* 758 */, 0x674F20B9BCEBBE95L /* 759 */, + 0x6F31238275655982L /* 760 */, 0x5AE488713E45CF05L /* 761 */, + 0xBF619F9954C21157L /* 762 */, 0xEABAC46040A8EAE9L /* 763 */, + 0x454C6FE9F2C0C1CDL /* 764 */, 0x419CF6496412691CL /* 765 */, + 0xD3DC3BEF265B0F70L /* 766 */, 0x6D0E60F5C3578A9EL /* 767 */ + }, T4 = { 0x5B0E608526323C55L /* 768 */, 0x1A46C1A9FA1B59F5L /* 769 */, + 0xA9E245A17C4C8FFAL /* 770 */, 0x65CA5159DB2955D7L /* 771 */, + 0x05DB0A76CE35AFC2L /* 772 */, 0x81EAC77EA9113D45L /* 773 */, + 0x528EF88AB6AC0A0DL /* 774 */, 0xA09EA253597BE3FFL /* 775 */, + 0x430DDFB3AC48CD56L /* 776 */, 0xC4B3A67AF45CE46FL /* 777 */, + 0x4ECECFD8FBE2D05EL /* 778 */, 0x3EF56F10B39935F0L /* 779 */, + 0x0B22D6829CD619C6L /* 780 */, 0x17FD460A74DF2069L /* 781 */, + 0x6CF8CC8E8510ED40L /* 782 */, 0xD6C824BF3A6ECAA7L /* 783 */, + 0x61243D581A817049L /* 784 */, 0x048BACB6BBC163A2L /* 785 */, + 0xD9A38AC27D44CC32L /* 786 */, 0x7FDDFF5BAAF410ABL /* 787 */, + 0xAD6D495AA804824BL /* 788 */, 0xE1A6A74F2D8C9F94L /* 789 */, + 0xD4F7851235DEE8E3L /* 790 */, 0xFD4B7F886540D893L /* 791 */, + 0x247C20042AA4BFDAL /* 792 */, 0x096EA1C517D1327CL /* 793 */, + 0xD56966B4361A6685L /* 794 */, 0x277DA5C31221057DL /* 795 */, + 0x94D59893A43ACFF7L /* 796 */, 0x64F0C51CCDC02281L /* 797 */, + 0x3D33BCC4FF6189DBL /* 798 */, 0xE005CB184CE66AF1L /* 799 */, + 0xFF5CCD1D1DB99BEAL /* 800 */, 0xB0B854A7FE42980FL /* 801 */, + 0x7BD46A6A718D4B9FL /* 802 */, 0xD10FA8CC22A5FD8CL /* 803 */, + 0xD31484952BE4BD31L /* 804 */, 0xC7FA975FCB243847L /* 805 */, + 0x4886ED1E5846C407L /* 806 */, 0x28CDDB791EB70B04L /* 807 */, + 0xC2B00BE2F573417FL /* 808 */, 0x5C9590452180F877L /* 809 */, + 0x7A6BDDFFF370EB00L /* 810 */, 0xCE509E38D6D9D6A4L /* 811 */, + 0xEBEB0F00647FA702L /* 812 */, 0x1DCC06CF76606F06L /* 813 */, + 0xE4D9F28BA286FF0AL /* 814 */, 0xD85A305DC918C262L /* 815 */, + 0x475B1D8732225F54L /* 816 */, 0x2D4FB51668CCB5FEL /* 817 */, + 0xA679B9D9D72BBA20L /* 818 */, 0x53841C0D912D43A5L /* 819 */, + 0x3B7EAA48BF12A4E8L /* 820 */, 0x781E0E47F22F1DDFL /* 821 */, + 0xEFF20CE60AB50973L /* 822 */, 0x20D261D19DFFB742L /* 823 */, + 0x16A12B03062A2E39L /* 824 */, 0x1960EB2239650495L /* 825 */, + 0x251C16FED50EB8B8L /* 826 */, 0x9AC0C330F826016EL /* 827 */, + 0xED152665953E7671L /* 828 */, 0x02D63194A6369570L /* 829 */, + 0x5074F08394B1C987L /* 830 */, 0x70BA598C90B25CE1L /* 831 */, + 0x794A15810B9742F6L /* 832 */, 0x0D5925E9FCAF8C6CL /* 833 */, + 0x3067716CD868744EL /* 834 */, 0x910AB077E8D7731BL /* 835 */, + 0x6A61BBDB5AC42F61L /* 836 */, 0x93513EFBF0851567L /* 837 */, + 0xF494724B9E83E9D5L /* 838 */, 0xE887E1985C09648DL /* 839 */, + 0x34B1D3C675370CFDL /* 840 */, 0xDC35E433BC0D255DL /* 841 */, + 0xD0AAB84234131BE0L /* 842 */, 0x08042A50B48B7EAFL /* 843 */, + 0x9997C4EE44A3AB35L /* 844 */, 0x829A7B49201799D0L /* 845 */, + 0x263B8307B7C54441L /* 846 */, 0x752F95F4FD6A6CA6L /* 847 */, + 0x927217402C08C6E5L /* 848 */, 0x2A8AB754A795D9EEL /* 849 */, + 0xA442F7552F72943DL /* 850 */, 0x2C31334E19781208L /* 851 */, + 0x4FA98D7CEAEE6291L /* 852 */, 0x55C3862F665DB309L /* 853 */, + 0xBD0610175D53B1F3L /* 854 */, 0x46FE6CB840413F27L /* 855 */, + 0x3FE03792DF0CFA59L /* 856 */, 0xCFE700372EB85E8FL /* 857 */, + 0xA7BE29E7ADBCE118L /* 858 */, 0xE544EE5CDE8431DDL /* 859 */, + 0x8A781B1B41F1873EL /* 860 */, 0xA5C94C78A0D2F0E7L /* 861 */, + 0x39412E2877B60728L /* 862 */, 0xA1265EF3AFC9A62CL /* 863 */, + 0xBCC2770C6A2506C5L /* 864 */, 0x3AB66DD5DCE1CE12L /* 865 */, + 0xE65499D04A675B37L /* 866 */, 0x7D8F523481BFD216L /* 867 */, + 0x0F6F64FCEC15F389L /* 868 */, 0x74EFBE618B5B13C8L /* 869 */, + 0xACDC82B714273E1DL /* 870 */, 0xDD40BFE003199D17L /* 871 */, + 0x37E99257E7E061F8L /* 872 */, 0xFA52626904775AAAL /* 873 */, + 0x8BBBF63A463D56F9L /* 874 */, 0xF0013F1543A26E64L /* 875 */, + 0xA8307E9F879EC898L /* 876 */, 0xCC4C27A4150177CCL /* 877 */, + 0x1B432F2CCA1D3348L /* 878 */, 0xDE1D1F8F9F6FA013L /* 879 */, + 0x606602A047A7DDD6L /* 880 */, 0xD237AB64CC1CB2C7L /* 881 */, + 0x9B938E7225FCD1D3L /* 882 */, 0xEC4E03708E0FF476L /* 883 */, + 0xFEB2FBDA3D03C12DL /* 884 */, 0xAE0BCED2EE43889AL /* 885 */, + 0x22CB8923EBFB4F43L /* 886 */, 0x69360D013CF7396DL /* 887 */, + 0x855E3602D2D4E022L /* 888 */, 0x073805BAD01F784CL /* 889 */, + 0x33E17A133852F546L /* 890 */, 0xDF4874058AC7B638L /* 891 */, + 0xBA92B29C678AA14AL /* 892 */, 0x0CE89FC76CFAADCDL /* 893 */, + 0x5F9D4E0908339E34L /* 894 */, 0xF1AFE9291F5923B9L /* 895 */, + 0x6E3480F60F4A265FL /* 896 */, 0xEEBF3A2AB29B841CL /* 897 */, + 0xE21938A88F91B4ADL /* 898 */, 0x57DFEFF845C6D3C3L /* 899 */, + 0x2F006B0BF62CAAF2L /* 900 */, 0x62F479EF6F75EE78L /* 901 */, + 0x11A55AD41C8916A9L /* 902 */, 0xF229D29084FED453L /* 903 */, + 0x42F1C27B16B000E6L /* 904 */, 0x2B1F76749823C074L /* 905 */, + 0x4B76ECA3C2745360L /* 906 */, 0x8C98F463B91691BDL /* 907 */, + 0x14BCC93CF1ADE66AL /* 908 */, 0x8885213E6D458397L /* 909 */, + 0x8E177DF0274D4711L /* 910 */, 0xB49B73B5503F2951L /* 911 */, + 0x10168168C3F96B6BL /* 912 */, 0x0E3D963B63CAB0AEL /* 913 */, + 0x8DFC4B5655A1DB14L /* 914 */, 0xF789F1356E14DE5CL /* 915 */, + 0x683E68AF4E51DAC1L /* 916 */, 0xC9A84F9D8D4B0FD9L /* 917 */, + 0x3691E03F52A0F9D1L /* 918 */, 0x5ED86E46E1878E80L /* 919 */, + 0x3C711A0E99D07150L /* 920 */, 0x5A0865B20C4E9310L /* 921 */, + 0x56FBFC1FE4F0682EL /* 922 */, 0xEA8D5DE3105EDF9BL /* 923 */, + 0x71ABFDB12379187AL /* 924 */, 0x2EB99DE1BEE77B9CL /* 925 */, + 0x21ECC0EA33CF4523L /* 926 */, 0x59A4D7521805C7A1L /* 927 */, + 0x3896F5EB56AE7C72L /* 928 */, 0xAA638F3DB18F75DCL /* 929 */, + 0x9F39358DABE9808EL /* 930 */, 0xB7DEFA91C00B72ACL /* 931 */, + 0x6B5541FD62492D92L /* 932 */, 0x6DC6DEE8F92E4D5BL /* 933 */, + 0x353F57ABC4BEEA7EL /* 934 */, 0x735769D6DA5690CEL /* 935 */, + 0x0A234AA642391484L /* 936 */, 0xF6F9508028F80D9DL /* 937 */, + 0xB8E319A27AB3F215L /* 938 */, 0x31AD9C1151341A4DL /* 939 */, + 0x773C22A57BEF5805L /* 940 */, 0x45C7561A07968633L /* 941 */, + 0xF913DA9E249DBE36L /* 942 */, 0xDA652D9B78A64C68L /* 943 */, + 0x4C27A97F3BC334EFL /* 944 */, 0x76621220E66B17F4L /* 945 */, + 0x967743899ACD7D0BL /* 946 */, 0xF3EE5BCAE0ED6782L /* 947 */, + 0x409F753600C879FCL /* 948 */, 0x06D09A39B5926DB6L /* 949 */, + 0x6F83AEB0317AC588L /* 950 */, 0x01E6CA4A86381F21L /* 951 */, + 0x66FF3462D19F3025L /* 952 */, 0x72207C24DDFD3BFBL /* 953 */, + 0x4AF6B6D3E2ECE2EBL /* 954 */, 0x9C994DBEC7EA08DEL /* 955 */, + 0x49ACE597B09A8BC4L /* 956 */, 0xB38C4766CF0797BAL /* 957 */, + 0x131B9373C57C2A75L /* 958 */, 0xB1822CCE61931E58L /* 959 */, + 0x9D7555B909BA1C0CL /* 960 */, 0x127FAFDD937D11D2L /* 961 */, + 0x29DA3BADC66D92E4L /* 962 */, 0xA2C1D57154C2ECBCL /* 963 */, + 0x58C5134D82F6FE24L /* 964 */, 0x1C3AE3515B62274FL /* 965 */, + 0xE907C82E01CB8126L /* 966 */, 0xF8ED091913E37FCBL /* 967 */, + 0x3249D8F9C80046C9L /* 968 */, 0x80CF9BEDE388FB63L /* 969 */, + 0x1881539A116CF19EL /* 970 */, 0x5103F3F76BD52457L /* 971 */, + 0x15B7E6F5AE47F7A8L /* 972 */, 0xDBD7C6DED47E9CCFL /* 973 */, + 0x44E55C410228BB1AL /* 974 */, 0xB647D4255EDB4E99L /* 975 */, + 0x5D11882BB8AAFC30L /* 976 */, 0xF5098BBB29D3212AL /* 977 */, + 0x8FB5EA14E90296B3L /* 978 */, 0x677B942157DD025AL /* 979 */, + 0xFB58E7C0A390ACB5L /* 980 */, 0x89D3674C83BD4A01L /* 981 */, + 0x9E2DA4DF4BF3B93BL /* 982 */, 0xFCC41E328CAB4829L /* 983 */, + 0x03F38C96BA582C52L /* 984 */, 0xCAD1BDBD7FD85DB2L /* 985 */, + 0xBBB442C16082AE83L /* 986 */, 0xB95FE86BA5DA9AB0L /* 987 */, + 0xB22E04673771A93FL /* 988 */, 0x845358C9493152D8L /* 989 */, + 0xBE2A488697B4541EL /* 990 */, 0x95A2DC2DD38E6966L /* 991 */, + 0xC02C11AC923C852BL /* 992 */, 0x2388B1990DF2A87BL /* 993 */, + 0x7C8008FA1B4F37BEL /* 994 */, 0x1F70D0C84D54E503L /* 995 */, + 0x5490ADEC7ECE57D4L /* 996 */, 0x002B3C27D9063A3AL /* 997 */, + 0x7EAEA3848030A2BFL /* 998 */, 0xC602326DED2003C0L /* 999 */, + 0x83A7287D69A94086L /* 1000 */, 0xC57A5FCB30F57A8AL /* 1001 */, + 0xB56844E479EBE779L /* 1002 */, 0xA373B40F05DCBCE9L /* 1003 */, + 0xD71A786E88570EE2L /* 1004 */, 0x879CBACDBDE8F6A0L /* 1005 */, + 0x976AD1BCC164A32FL /* 1006 */, 0xAB21E25E9666D78BL /* 1007 */, + 0x901063AAE5E5C33CL /* 1008 */, 0x9818B34448698D90L /* 1009 */, + 0xE36487AE3E1E8ABBL /* 1010 */, 0xAFBDF931893BDCB4L /* 1011 */, + 0x6345A0DC5FBBD519L /* 1012 */, 0x8628FE269B9465CAL /* 1013 */, + 0x1E5D01603F9C51ECL /* 1014 */, 0x4DE44006A15049B7L /* 1015 */, + 0xBF6C70E5F776CBB1L /* 1016 */, 0x411218F2EF552BEDL /* 1017 */, + 0xCB0C0708705A36A3L /* 1018 */, 0xE74D14754F986044L /* 1019 */, + 0xCD56D9430EA8280EL /* 1020 */, 0xC12591D7535F5065L /* 1021 */, + 0xC83223F1720AEF96L /* 1022 */, 0xC3A0396F7363A51FL /* 1023 */ + }; +} diff --git a/src/org/opendedup/util/XMLUtils.java b/src/org/opendedup/util/XMLUtils.java index 3a4dc8e7d..8efbebc0a 100644 --- a/src/org/opendedup/util/XMLUtils.java +++ b/src/org/opendedup/util/XMLUtils.java @@ -1,3 +1,21 @@ +/******************************************************************************* + * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com + * + * This file is part of OpenDedupe SDFS. + * + * OpenDedupe SDFS is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * OpenDedupe SDFS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Foobar. If not, see . + *******************************************************************************/ package org.opendedup.util; import java.io.File;