getRange() {
+ return Range.closed(pos, pos + nlen);
+ }
+
+ public String toString() {
+ String hashlocs = "[";
+ for (byte b : this.hashloc) {
+ hashlocs = hashlocs + Byte.toString(b) + " ";
+ }
+ hashlocs = hashlocs + "]";
+ return "pos=" + pos + " len=" + len + " offset=" + offset + " nlen="
+ + nlen + " ep=" + (pos + nlen) + " hash="
+ + StringUtils.getHexString(hash) + " hashlocs=" + hashlocs;
+ }
+
+ @Override
+ public void readExternal(ObjectInput in) throws IOException,
+ ClassNotFoundException {
+ in.readInt();
+ this.hash = new byte[in.readInt()];
+ in.read(this.hash);
+ this.hashloc = new byte[8];
+ in.read(this.hashloc);
+
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ ByteBuffer bf = ByteBuffer.wrap(new byte[4 + this.hash.length
+ + this.hashloc.length]);
+ bf.putInt(this.hash.length);
+ bf.put(hash);
+ bf.put(hashloc);
+ byte[] b = bf.array();
+ out.writeInt(b.length);
+ out.write(b);
+
+ }
+
+ public boolean isDup() {
+ return dup;
+ }
+
+ public void setDup(boolean dup) {
+ this.dup = dup;
+ }
+
}
\ No newline at end of file
diff --git a/src/org/opendedup/sdfs/io/MetaDataDedupFile.java b/src/org/opendedup/sdfs/io/MetaDataDedupFile.java
index 2363bff1b..45fb5ea38 100755
--- a/src/org/opendedup/sdfs/io/MetaDataDedupFile.java
+++ b/src/org/opendedup/sdfs/io/MetaDataDedupFile.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io;
import java.io.File;
diff --git a/src/org/opendedup/sdfs/io/ReadAhead.java b/src/org/opendedup/sdfs/io/ReadAhead.java
index a15a55407..7ca676c70 100644
--- a/src/org/opendedup/sdfs/io/ReadAhead.java
+++ b/src/org/opendedup/sdfs/io/ReadAhead.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io;
import java.io.IOException;
diff --git a/src/org/opendedup/sdfs/io/ReadOnlyException.java b/src/org/opendedup/sdfs/io/ReadOnlyException.java
index e92db99a6..206e76ecf 100644
--- a/src/org/opendedup/sdfs/io/ReadOnlyException.java
+++ b/src/org/opendedup/sdfs/io/ReadOnlyException.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io;
public class ReadOnlyException extends Exception {
diff --git a/src/org/opendedup/sdfs/io/RestoreRequest.java b/src/org/opendedup/sdfs/io/RestoreRequest.java
index 2b8b0daf8..e163fc329 100644
--- a/src/org/opendedup/sdfs/io/RestoreRequest.java
+++ b/src/org/opendedup/sdfs/io/RestoreRequest.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io;
import java.util.HashMap;
diff --git a/src/org/opendedup/sdfs/io/SeekType.java b/src/org/opendedup/sdfs/io/SeekType.java
index 5e4a109d1..45b4ec1d6 100644
--- a/src/org/opendedup/sdfs/io/SeekType.java
+++ b/src/org/opendedup/sdfs/io/SeekType.java
@@ -1,19 +1,37 @@
-package org.opendedup.sdfs.io;
-
-/**
- * Seek file position types.
- *
- *
- * Defines constants used by the SeekFile SMB request to specify where the seek
- * position is relative to.
- *
- * @author gkspencer
- */
-public class SeekType {
-
- // Seek file types
-
- public static final int StartOfFile = 0;
- public static final int CurrentPos = 1;
- public static final int EndOfFile = 2;
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.io;
+
+/**
+ * Seek file position types.
+ *
+ *
+ * Defines constants used by the SeekFile SMB request to specify where the seek
+ * position is relative to.
+ *
+ * @author gkspencer
+ */
+public class SeekType {
+
+ // Seek file types
+
+ public static final int StartOfFile = 0;
+ public static final int CurrentPos = 1;
+ public static final int EndOfFile = 2;
}
\ No newline at end of file
diff --git a/src/org/opendedup/sdfs/io/SparseDataChunk.java b/src/org/opendedup/sdfs/io/SparseDataChunk.java
index 3b4d3096f..0f36ed86b 100644
--- a/src/org/opendedup/sdfs/io/SparseDataChunk.java
+++ b/src/org/opendedup/sdfs/io/SparseDataChunk.java
@@ -1,337 +1,355 @@
-package org.opendedup.sdfs.io;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.opendedup.collections.LongByteArrayMap;
-import org.opendedup.hashing.HashFunctionPool;
-import org.opendedup.logging.SDFSLogger;
-import org.opendedup.sdfs.Main;
-
-public class SparseDataChunk implements Externalizable {
- private ReentrantLock l = new ReentrantLock();
- private int doop;
- private int prevdoop;
- // private int RAWDL;
- private long fpos;
- private static final long serialVersionUID = -2782607786999940224L;
- public int len = 0;
- public byte flags = 0;
- public static final int RECONSTRUCTED = 1; // 0001
- private byte version = 0;
- private List ar = new ArrayList();
-
- public SparseDataChunk() {
-
- }
-
- public SparseDataChunk(byte[] rawData, byte version) throws IOException {
- this.version = version;
- this.marshall(rawData);
- }
-
- public SparseDataChunk(int doop, List ar, boolean localData,
- byte version) {
-
- this.version = version;
- this.doop = doop;
- this.ar = ar;
-
- }
-
- private void marshall(byte[] raw) throws IOException {
- ByteBuffer buf = ByteBuffer.wrap(raw);
- if (this.version == 0) {
- ar = new ArrayList(1);
- byte b = buf.get();
- if (b == 0)
- doop = 0;
- else
- doop = Main.CHUNK_LENGTH;
- HashLocPair p = new HashLocPair();
- p.hash = new byte[HashFunctionPool.hashLength];
- buf.get(p.hash);
- buf.get();
- p.hashloc = new byte[8];
- buf.get(p.hashloc);
- p.pos = 0;
- p.len = Main.CHUNK_LENGTH;
- p.nlen = p.len;
- p.offset = 0;
- ar.add(p);
- } else if (version == 1) {
- this.doop = buf.getInt();
- ar = new ArrayList();
- byte[] hash = new byte[HashFunctionPool.hashLength
- * HashFunctionPool.max_hash_cluster];
- buf.get(hash);
- byte[] hashlocs = new byte[8 * HashFunctionPool.max_hash_cluster];
- buf.get(hashlocs);
- ByteBuffer hb = ByteBuffer.wrap(hash);
- ByteBuffer hl = ByteBuffer.wrap(hashlocs);
- for (int z = 0; z < HashFunctionPool.max_hash_cluster; z++) {
- byte[] _hash = new byte[HashFunctionPool.hashLength];
- byte[] _hl = new byte[8];
- hl.get(_hl);
-
- hb.get(_hash);
- if (_hl[1] != 0) {
- HashLocPair p = new HashLocPair();
- p.hash = _hash;
- p.hashloc = _hl;
- p.pos = -1;
- ar.add(p);
- } else
- break;
- }
-
- } else {
- this.flags = buf.get();
- buf.getInt();
- int zlen = buf.getInt();
- ar = new ArrayList(zlen);
- for (int i = 0; i < zlen; i++) {
- byte[] b = new byte[HashLocPair.BAL];
- buf.get(b);
- HashLocPair p = new HashLocPair(b);
- ar.add(p);
- int ep = p.pos + p.len;
- if (ep > len)
- len = ep;
- }
- doop = buf.getInt();
- }
- }
-
- public int getDoop() {
- return doop;
- }
-
- public HashLocPair getWL(int _pos) throws IOException {
- l.lock();
- try {
- for (HashLocPair h : ar) {
- int ep = h.pos + h.nlen;
- if (_pos >= h.pos && _pos < ep) {
- HashLocPair _h = h.clone();
- int os = _pos - _h.pos;
- _h.offset += os;
- _h.nlen -= os;
- _h.pos = _pos;
- return _h;
- }
- }
- for (HashLocPair h : ar) {
- SDFSLogger.getLog().warn(h);
- }
- throw new IOException("Position not found " + _pos);
- } finally {
- l.unlock();
- }
-
- }
-
- public static void insertHashLocPair(List ar, HashLocPair p)
- throws IOException {
- int ep = p.pos + p.nlen;
- if (ep > Main.CHUNK_LENGTH)
- throw new IOException("Overflow ep=" + ep);
- ArrayList rm = null;
- ArrayList am = null;
- // SDFSLogger.getLog().info("p = " + p);
-
- for (HashLocPair h : ar) {
- int hep = h.pos + h.nlen;
- if (h.pos >= ep)
- break;
- else if (h.pos >= p.pos && hep <= ep) {
- // SDFSLogger.getLog().info("0 removing h = " + h);
- if (rm == null)
- rm = new ArrayList();
- rm.add(h);
- } else if (h.pos >= p.pos && h.pos < ep && hep > ep) {
- int no = ep - h.pos;
- // int oh = h.pos;
- h.pos = ep;
- h.offset += no;
- h.nlen -= no;
-
- // SDFSLogger.getLog().info("2 changing pos from " +oh
- // +" to " + h.pos + " offset = " + h.offset);
- } else if (h.pos <= p.pos && hep > p.pos) {
- if (hep > ep) {
- int offset = ep - h.pos;
- HashLocPair _h = h.clone();
- _h.offset += offset;
- _h.nlen -= offset;
- _h.pos = ep;
- if (!Main.chunkStoreLocal)
- _h.hashloc[0] = 1;
- else
- _h.setDup(true);
- if (am == null)
- am = new ArrayList();
-
- am.add(_h);
- }
- if (h.pos < p.pos) {
- h.nlen = (p.pos - h.pos);
- } else {
- if (rm == null)
- rm = new ArrayList();
- rm.add(h);
- }
- }
- if (h.isInvalid()) {
- SDFSLogger.getLog().error("h = " + h.toString());
- }
- }
- if (rm != null) {
- for (HashLocPair z : rm) {
- ar.remove(z);
- }
- }
- if (am != null) {
- for (HashLocPair z : am) {
- ar.add(z);
- }
- }
- if (!Main.chunkStoreLocal)
- p.hashloc[0] = 1;
- else
- p.setDup(true);
- ar.add(p);
-
- Collections.sort(ar);
- }
-
- public void putHash(HashLocPair p) throws IOException {
- l.lock();
- try {
- insertHashLocPair(ar, p);
- this.flags = RECONSTRUCTED;
- } finally {
- l.unlock();
- }
- }
-
- public void setRecontructed(boolean reconstructed) {
- if (reconstructed)
- this.flags = RECONSTRUCTED;
-
- }
-
- public byte[] getBytes() throws IOException {
- l.lock();
- try {
- if (this.version == 0) {
- ByteBuffer buf = ByteBuffer
- .wrap(new byte[LongByteArrayMap._FREE.length]);
- if (doop > 0)
- buf.put((byte) 1);
- else
- buf.put((byte) 0);
- buf.put(ar.get(0).hash);
- buf.put((byte) 0);
- buf.put(ar.get(0).hashloc);
- return buf.array();
- } else if (this.version == 1) {
- ByteBuffer buf = ByteBuffer
- .wrap(new byte[LongByteArrayMap._v1arrayLength]);
- buf.putInt(doop);
- for (HashLocPair p : ar) {
- buf.put(p.hash);
- }
- for (HashLocPair p : ar) {
- buf.put(p.hashloc);
- }
- return buf.array();
-
- } else {
- ByteBuffer buf = null;
- buf = ByteBuffer.wrap(new byte[1 + 4 + 4 + 4
- + (ar.size() * HashLocPair.BAL)]);
- this.prevdoop = this.doop;
- this.doop = 0;
- buf.put(this.flags);
- buf.putInt(buf.capacity());
- buf.putInt(this.ar.size());
- Collections.sort(this.ar);
- if (ar.size() > (LongByteArrayMap.MAX_ELEMENTS_PER_AR)) {
- SDFSLogger.getLog().error(
- "Buffer overflow ar size = " + ar.size()
- + " max size = "
- + (LongByteArrayMap.MAX_ELEMENTS_PER_AR));
- throw new IOException("Buffer overflow ar size = "
- + ar.size() + " max size = "
- + (LongByteArrayMap.MAX_ELEMENTS_PER_AR));
- }
- this.len = 0;
- for (HashLocPair p : ar) {
- boolean dup = p.isDup();
- if (!Main.chunkStoreLocal && p.hashloc[0] == 1)
- dup = true;
- if (dup)
- this.doop += p.nlen;
- buf.put(p.asArray());
- this.len += p.nlen;
- }
- buf.putInt(this.doop);
- return buf.array();
- }
- } finally {
- l.unlock();
- }
- }
-
- public void setDoop(int doop) {
- this.doop = doop;
- }
-
- public long getFpos() {
- return fpos;
- }
-
- public void setFpos(long fpos) {
- this.fpos = fpos;
- }
-
- public List getFingers() {
- return ar;
- }
-
- @Override
- public void readExternal(ObjectInput in) throws IOException,
- ClassNotFoundException {
- byte[] b = new byte[in.readInt()];
- this.marshall(b);
-
- }
-
- @Override
- public void writeExternal(ObjectOutput out) throws IOException {
- byte[] b = this.getBytes();
- out.writeInt(b.length);
- out.write(b);
-
- }
-
- public int getPrevdoop() {
- return prevdoop;
- }
-
- public boolean isRecontructed() {
- if (this.flags == 0)
- return false;
- else
- return true;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.io;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.opendedup.collections.LongByteArrayMap;
+import org.opendedup.hashing.HashFunctionPool;
+import org.opendedup.logging.SDFSLogger;
+import org.opendedup.sdfs.Main;
+
+public class SparseDataChunk implements Externalizable {
+ private ReentrantLock l = new ReentrantLock();
+ private int doop;
+ private int prevdoop;
+ // private int RAWDL;
+ private long fpos;
+ private static final long serialVersionUID = -2782607786999940224L;
+ public int len = 0;
+ public byte flags = 0;
+ public static final int RECONSTRUCTED = 1; // 0001
+ private byte version = 0;
+ private List ar = new ArrayList();
+
+ public SparseDataChunk() {
+
+ }
+
+ public SparseDataChunk(byte[] rawData, byte version) throws IOException {
+ this.version = version;
+ this.marshall(rawData);
+ }
+
+ public SparseDataChunk(int doop, List ar, boolean localData,
+ byte version) {
+
+ this.version = version;
+ this.doop = doop;
+ this.ar = ar;
+
+ }
+
+ private void marshall(byte[] raw) throws IOException {
+ ByteBuffer buf = ByteBuffer.wrap(raw);
+ if (this.version == 0) {
+ ar = new ArrayList(1);
+ byte b = buf.get();
+ if (b == 0)
+ doop = 0;
+ else
+ doop = Main.CHUNK_LENGTH;
+ HashLocPair p = new HashLocPair();
+ p.hash = new byte[HashFunctionPool.hashLength];
+ buf.get(p.hash);
+ buf.get();
+ p.hashloc = new byte[8];
+ buf.get(p.hashloc);
+ p.pos = 0;
+ p.len = Main.CHUNK_LENGTH;
+ p.nlen = p.len;
+ p.offset = 0;
+ ar.add(p);
+ } else if (version == 1) {
+ this.doop = buf.getInt();
+ ar = new ArrayList();
+ byte[] hash = new byte[HashFunctionPool.hashLength
+ * HashFunctionPool.max_hash_cluster];
+ buf.get(hash);
+ byte[] hashlocs = new byte[8 * HashFunctionPool.max_hash_cluster];
+ buf.get(hashlocs);
+ ByteBuffer hb = ByteBuffer.wrap(hash);
+ ByteBuffer hl = ByteBuffer.wrap(hashlocs);
+ for (int z = 0; z < HashFunctionPool.max_hash_cluster; z++) {
+ byte[] _hash = new byte[HashFunctionPool.hashLength];
+ byte[] _hl = new byte[8];
+ hl.get(_hl);
+
+ hb.get(_hash);
+ if (_hl[1] != 0) {
+ HashLocPair p = new HashLocPair();
+ p.hash = _hash;
+ p.hashloc = _hl;
+ p.pos = -1;
+ ar.add(p);
+ } else
+ break;
+ }
+
+ } else {
+ this.flags = buf.get();
+ buf.getInt();
+ int zlen = buf.getInt();
+ ar = new ArrayList(zlen);
+ for (int i = 0; i < zlen; i++) {
+ byte[] b = new byte[HashLocPair.BAL];
+ buf.get(b);
+ HashLocPair p = new HashLocPair(b);
+ ar.add(p);
+ int ep = p.pos + p.len;
+ if (ep > len)
+ len = ep;
+ }
+ doop = buf.getInt();
+ }
+ }
+
+ public int getDoop() {
+ return doop;
+ }
+
+ public HashLocPair getWL(int _pos) throws IOException {
+ l.lock();
+ try {
+ for (HashLocPair h : ar) {
+ int ep = h.pos + h.nlen;
+ if (_pos >= h.pos && _pos < ep) {
+ HashLocPair _h = h.clone();
+ int os = _pos - _h.pos;
+ _h.offset += os;
+ _h.nlen -= os;
+ _h.pos = _pos;
+ return _h;
+ }
+ }
+ for (HashLocPair h : ar) {
+ SDFSLogger.getLog().warn(h);
+ }
+ throw new IOException("Position not found " + _pos);
+ } finally {
+ l.unlock();
+ }
+
+ }
+
+ public static void insertHashLocPair(List ar, HashLocPair p)
+ throws IOException {
+ int ep = p.pos + p.nlen;
+ if (ep > Main.CHUNK_LENGTH)
+ throw new IOException("Overflow ep=" + ep);
+ ArrayList rm = null;
+ ArrayList am = null;
+ // SDFSLogger.getLog().info("p = " + p);
+
+ for (HashLocPair h : ar) {
+ int hep = h.pos + h.nlen;
+ if (h.pos >= ep)
+ break;
+ else if (h.pos >= p.pos && hep <= ep) {
+ // SDFSLogger.getLog().info("0 removing h = " + h);
+ if (rm == null)
+ rm = new ArrayList();
+ rm.add(h);
+ } else if (h.pos >= p.pos && h.pos < ep && hep > ep) {
+ int no = ep - h.pos;
+ // int oh = h.pos;
+ h.pos = ep;
+ h.offset += no;
+ h.nlen -= no;
+
+ // SDFSLogger.getLog().info("2 changing pos from " +oh
+ // +" to " + h.pos + " offset = " + h.offset);
+ } else if (h.pos <= p.pos && hep > p.pos) {
+ if (hep > ep) {
+ int offset = ep - h.pos;
+ HashLocPair _h = h.clone();
+ _h.offset += offset;
+ _h.nlen -= offset;
+ _h.pos = ep;
+ if (!Main.chunkStoreLocal)
+ _h.hashloc[0] = 1;
+ else
+ _h.setDup(true);
+ if (am == null)
+ am = new ArrayList();
+
+ am.add(_h);
+ }
+ if (h.pos < p.pos) {
+ h.nlen = (p.pos - h.pos);
+ } else {
+ if (rm == null)
+ rm = new ArrayList();
+ rm.add(h);
+ }
+ }
+ if (h.isInvalid()) {
+ SDFSLogger.getLog().error("h = " + h.toString());
+ }
+ }
+ if (rm != null) {
+ for (HashLocPair z : rm) {
+ ar.remove(z);
+ }
+ }
+ if (am != null) {
+ for (HashLocPair z : am) {
+ ar.add(z);
+ }
+ }
+ if (!Main.chunkStoreLocal)
+ p.hashloc[0] = 1;
+ else
+ p.setDup(true);
+ ar.add(p);
+
+ Collections.sort(ar);
+ }
+
+ public void putHash(HashLocPair p) throws IOException {
+ l.lock();
+ try {
+ insertHashLocPair(ar, p);
+ this.flags = RECONSTRUCTED;
+ } finally {
+ l.unlock();
+ }
+ }
+
+ public void setRecontructed(boolean reconstructed) {
+ if (reconstructed)
+ this.flags = RECONSTRUCTED;
+
+ }
+
+ public byte[] getBytes() throws IOException {
+ l.lock();
+ try {
+ if (this.version == 0) {
+ ByteBuffer buf = ByteBuffer
+ .wrap(new byte[LongByteArrayMap._FREE.length]);
+ if (doop > 0)
+ buf.put((byte) 1);
+ else
+ buf.put((byte) 0);
+ buf.put(ar.get(0).hash);
+ buf.put((byte) 0);
+ buf.put(ar.get(0).hashloc);
+ return buf.array();
+ } else if (this.version == 1) {
+ ByteBuffer buf = ByteBuffer
+ .wrap(new byte[LongByteArrayMap._v1arrayLength]);
+ buf.putInt(doop);
+ for (HashLocPair p : ar) {
+ buf.put(p.hash);
+ }
+ for (HashLocPair p : ar) {
+ buf.put(p.hashloc);
+ }
+ return buf.array();
+
+ } else {
+ ByteBuffer buf = null;
+ buf = ByteBuffer.wrap(new byte[1 + 4 + 4 + 4
+ + (ar.size() * HashLocPair.BAL)]);
+ this.prevdoop = this.doop;
+ this.doop = 0;
+ buf.put(this.flags);
+ buf.putInt(buf.capacity());
+ buf.putInt(this.ar.size());
+ Collections.sort(this.ar);
+ if (ar.size() > (LongByteArrayMap.MAX_ELEMENTS_PER_AR)) {
+ SDFSLogger.getLog().error(
+ "Buffer overflow ar size = " + ar.size()
+ + " max size = "
+ + (LongByteArrayMap.MAX_ELEMENTS_PER_AR));
+ throw new IOException("Buffer overflow ar size = "
+ + ar.size() + " max size = "
+ + (LongByteArrayMap.MAX_ELEMENTS_PER_AR));
+ }
+ this.len = 0;
+ for (HashLocPair p : ar) {
+ boolean dup = p.isDup();
+ if (!Main.chunkStoreLocal && p.hashloc[0] == 1)
+ dup = true;
+ if (dup)
+ this.doop += p.nlen;
+ buf.put(p.asArray());
+ this.len += p.nlen;
+ }
+ buf.putInt(this.doop);
+ return buf.array();
+ }
+ } finally {
+ l.unlock();
+ }
+ }
+
+ public void setDoop(int doop) {
+ this.doop = doop;
+ }
+
+ public long getFpos() {
+ return fpos;
+ }
+
+ public void setFpos(long fpos) {
+ this.fpos = fpos;
+ }
+
+ public List getFingers() {
+ return ar;
+ }
+
+ @Override
+ public void readExternal(ObjectInput in) throws IOException,
+ ClassNotFoundException {
+ byte[] b = new byte[in.readInt()];
+ this.marshall(b);
+
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ byte[] b = this.getBytes();
+ out.writeInt(b.length);
+ out.write(b);
+
+ }
+
+ public int getPrevdoop() {
+ return prevdoop;
+ }
+
+ public boolean isRecontructed() {
+ if (this.flags == 0)
+ return false;
+ else
+ return true;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/io/SparseDedupFile.java b/src/org/opendedup/sdfs/io/SparseDedupFile.java
index 8f8e01791..5a8f77640 100644
--- a/src/org/opendedup/sdfs/io/SparseDedupFile.java
+++ b/src/org/opendedup/sdfs/io/SparseDedupFile.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io;
import java.io.File;
diff --git a/src/org/opendedup/sdfs/io/Volume.java b/src/org/opendedup/sdfs/io/Volume.java
index d3e76771e..6fe5a9ec4 100644
--- a/src/org/opendedup/sdfs/io/Volume.java
+++ b/src/org/opendedup/sdfs/io/Volume.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io;
import java.io.File;
diff --git a/src/org/opendedup/sdfs/io/VolumeConfigWriterThread.java b/src/org/opendedup/sdfs/io/VolumeConfigWriterThread.java
index bff62e00d..14e53b14d 100644
--- a/src/org/opendedup/sdfs/io/VolumeConfigWriterThread.java
+++ b/src/org/opendedup/sdfs/io/VolumeConfigWriterThread.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io;
import java.io.File;
diff --git a/src/org/opendedup/sdfs/io/VolumeFullThread.java b/src/org/opendedup/sdfs/io/VolumeFullThread.java
index c6c879e54..bb7a7947f 100644
--- a/src/org/opendedup/sdfs/io/VolumeFullThread.java
+++ b/src/org/opendedup/sdfs/io/VolumeFullThread.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io;
import org.opendedup.logging.SDFSLogger;
diff --git a/src/org/opendedup/sdfs/io/VolumeListenerInterface.java b/src/org/opendedup/sdfs/io/VolumeListenerInterface.java
index a93d3fc5f..1fd4bb946 100644
--- a/src/org/opendedup/sdfs/io/VolumeListenerInterface.java
+++ b/src/org/opendedup/sdfs/io/VolumeListenerInterface.java
@@ -1,31 +1,49 @@
-package org.opendedup.sdfs.io;
-
-public interface VolumeListenerInterface {
- void actualWriteBytesChanged(long change, double current, Volume vol);
-
- void duplicateBytesChanged(long change, double current, Volume vol);
-
- void readBytesChanged(long change, double current, Volume vol);
-
- void rIOChanged(long change, double current, Volume vol);
-
- void wIOChanged(long change, double current, Volume vol);
-
- void virtualBytesWrittenChanged(long change, double current, Volume vol);
-
- void allowExternalSymLinksChanged(boolean symlink, Volume vol);
-
- void capacityChanged(long capacity, Volume vol);
-
- void currentSizeChanged(long capacity, Volume vol);
-
- void usePerMonChanged(boolean perf, Volume vol);
-
- void started(Volume vol);
-
- void mounted(Volume vol);
-
- void unmounted(Volume vol);
-
- void stopped(Volume vol);
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.io;
+
+public interface VolumeListenerInterface {
+ void actualWriteBytesChanged(long change, double current, Volume vol);
+
+ void duplicateBytesChanged(long change, double current, Volume vol);
+
+ void readBytesChanged(long change, double current, Volume vol);
+
+ void rIOChanged(long change, double current, Volume vol);
+
+ void wIOChanged(long change, double current, Volume vol);
+
+ void virtualBytesWrittenChanged(long change, double current, Volume vol);
+
+ void allowExternalSymLinksChanged(boolean symlink, Volume vol);
+
+ void capacityChanged(long capacity, Volume vol);
+
+ void currentSizeChanged(long capacity, Volume vol);
+
+ void usePerMonChanged(boolean perf, Volume vol);
+
+ void started(Volume vol);
+
+ void mounted(Volume vol);
+
+ void unmounted(Volume vol);
+
+ void stopped(Volume vol);
+}
diff --git a/src/org/opendedup/sdfs/io/WritableCacheBuffer.java b/src/org/opendedup/sdfs/io/WritableCacheBuffer.java
index 82cbfb894..e4e8639fa 100755
--- a/src/org/opendedup/sdfs/io/WritableCacheBuffer.java
+++ b/src/org/opendedup/sdfs/io/WritableCacheBuffer.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io;
import java.io.IOException;
diff --git a/src/org/opendedup/sdfs/io/events/CloudSyncDLRequest.java b/src/org/opendedup/sdfs/io/events/CloudSyncDLRequest.java
index bc281a3c4..65767cb8d 100644
--- a/src/org/opendedup/sdfs/io/events/CloudSyncDLRequest.java
+++ b/src/org/opendedup/sdfs/io/events/CloudSyncDLRequest.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io.events;
public class CloudSyncDLRequest {
diff --git a/src/org/opendedup/sdfs/io/events/GenericEvent.java b/src/org/opendedup/sdfs/io/events/GenericEvent.java
index e8a21dbf2..3a2667d0e 100644
--- a/src/org/opendedup/sdfs/io/events/GenericEvent.java
+++ b/src/org/opendedup/sdfs/io/events/GenericEvent.java
@@ -1,35 +1,53 @@
-package org.opendedup.sdfs.io.events;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.opendedup.sdfs.Main;
-
-import com.google.gson.JsonObject;
-
-public class GenericEvent {
- private long sequence;
- private static final AtomicLong sq = new AtomicLong(0);
- private static final long MAX = Long.MAX_VALUE - (100000);
-
- public GenericEvent() {
- sequence = sq.incrementAndGet();
- if (sequence >= MAX) {
- synchronized (sq) {
- if (sequence >= MAX) {
- sq.set(0);
- }
- }
- }
-
- }
-
- public JsonObject toJSONObject() {
- JsonObject dataset = new JsonObject();
- dataset.addProperty("sequence", sequence);
- dataset.addProperty("volumeid", Long.toString(Main.DSEID));
- dataset.addProperty("timestamp",
- Long.toString(System.currentTimeMillis()));
- return dataset;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.io.events;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.opendedup.sdfs.Main;
+
+import com.google.gson.JsonObject;
+
+public class GenericEvent {
+ private long sequence;
+ private static final AtomicLong sq = new AtomicLong(0);
+ private static final long MAX = Long.MAX_VALUE - (100000);
+
+ public GenericEvent() {
+ sequence = sq.incrementAndGet();
+ if (sequence >= MAX) {
+ synchronized (sq) {
+ if (sequence >= MAX) {
+ sq.set(0);
+ }
+ }
+ }
+
+ }
+
+ public JsonObject toJSONObject() {
+ JsonObject dataset = new JsonObject();
+ dataset.addProperty("sequence", sequence);
+ dataset.addProperty("volumeid", Long.toString(Main.DSEID));
+ dataset.addProperty("timestamp",
+ Long.toString(System.currentTimeMillis()));
+ return dataset;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/io/events/MFileDeleted.java b/src/org/opendedup/sdfs/io/events/MFileDeleted.java
index 640ff6833..eeb64711f 100644
--- a/src/org/opendedup/sdfs/io/events/MFileDeleted.java
+++ b/src/org/opendedup/sdfs/io/events/MFileDeleted.java
@@ -1,44 +1,62 @@
-package org.opendedup.sdfs.io.events;
-
-import org.opendedup.sdfs.Main;
-import org.opendedup.sdfs.io.MetaDataDedupFile;
-
-import com.google.gson.FieldNamingPolicy;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonObject;
-
-public class MFileDeleted extends GenericEvent {
-
- public MetaDataDedupFile mf;
- public boolean dir;
- private static final int pl = Main.volume.getPath().length();
-
- public MFileDeleted(MetaDataDedupFile f) {
- super();
- this.mf = f;
- }
-
- public MFileDeleted(MetaDataDedupFile f, boolean dir) {
- super();
- this.mf = f;
- this.dir = dir;
- }
-
- public String toJSON() {
- JsonObject dataset = this.toJSONObject();
- dataset.addProperty("actionType", "mfileDelete");
- dataset.addProperty("object", mf.getPath().substring(pl));
- if (mf.isSymlink())
- dataset.addProperty("fileType", "symlink");
- else if (this.dir)
- dataset.addProperty("fileType", "dir");
- else
- dataset.addProperty("fileType", "file");
- Gson gson = new GsonBuilder().setPrettyPrinting().serializeNulls()
- .setFieldNamingPolicy(FieldNamingPolicy.UPPER_CAMEL_CASE)
- .create();
- return gson.toJson(dataset);
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.io.events;
+
+import org.opendedup.sdfs.Main;
+import org.opendedup.sdfs.io.MetaDataDedupFile;
+
+import com.google.gson.FieldNamingPolicy;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonObject;
+
+public class MFileDeleted extends GenericEvent {
+
+ public MetaDataDedupFile mf;
+ public boolean dir;
+ private static final int pl = Main.volume.getPath().length();
+
+ public MFileDeleted(MetaDataDedupFile f) {
+ super();
+ this.mf = f;
+ }
+
+ public MFileDeleted(MetaDataDedupFile f, boolean dir) {
+ super();
+ this.mf = f;
+ this.dir = dir;
+ }
+
+ public String toJSON() {
+ JsonObject dataset = this.toJSONObject();
+ dataset.addProperty("actionType", "mfileDelete");
+ dataset.addProperty("object", mf.getPath().substring(pl));
+ if (mf.isSymlink())
+ dataset.addProperty("fileType", "symlink");
+ else if (this.dir)
+ dataset.addProperty("fileType", "dir");
+ else
+ dataset.addProperty("fileType", "file");
+ Gson gson = new GsonBuilder().setPrettyPrinting().serializeNulls()
+ .setFieldNamingPolicy(FieldNamingPolicy.UPPER_CAMEL_CASE)
+ .create();
+ return gson.toJson(dataset);
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/io/events/MFileRenamed.java b/src/org/opendedup/sdfs/io/events/MFileRenamed.java
index 871840e2d..093f1a160 100644
--- a/src/org/opendedup/sdfs/io/events/MFileRenamed.java
+++ b/src/org/opendedup/sdfs/io/events/MFileRenamed.java
@@ -1,42 +1,60 @@
-package org.opendedup.sdfs.io.events;
-
-import org.opendedup.sdfs.Main;
-import org.opendedup.sdfs.io.MetaDataDedupFile;
-
-import com.google.gson.FieldNamingPolicy;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonObject;
-
-public class MFileRenamed extends GenericEvent {
-
- public MetaDataDedupFile mf;
- public String from;
- public String to;
- private static final int pl = Main.volume.getPath().length();
-
- public MFileRenamed(MetaDataDedupFile f, String from, String to) {
- super();
- this.mf = f;
- this.from = from;
- this.to = to;
- }
-
- public String toJSON() {
- JsonObject dataset = this.toJSONObject();
- dataset.addProperty("actionType", "mfileRename");
- dataset.addProperty("object", mf.getPath().substring(pl));
- dataset.addProperty("from", this.from);
- dataset.addProperty("to", this.to);
- if (mf.isSymlink())
- dataset.addProperty("fileType", "symlink");
- else if (mf.isDirectory())
- dataset.addProperty("fileType", "dir");
- else
- dataset.addProperty("fileType", "file");
- Gson gson = new GsonBuilder().setPrettyPrinting().serializeNulls()
- .setFieldNamingPolicy(FieldNamingPolicy.UPPER_CAMEL_CASE)
- .create();
- return gson.toJson(dataset);
- }
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.io.events;
+
+import org.opendedup.sdfs.Main;
+import org.opendedup.sdfs.io.MetaDataDedupFile;
+
+import com.google.gson.FieldNamingPolicy;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonObject;
+
+public class MFileRenamed extends GenericEvent {
+
+ public MetaDataDedupFile mf;
+ public String from;
+ public String to;
+ private static final int pl = Main.volume.getPath().length();
+
+ public MFileRenamed(MetaDataDedupFile f, String from, String to) {
+ super();
+ this.mf = f;
+ this.from = from;
+ this.to = to;
+ }
+
+ public String toJSON() {
+ JsonObject dataset = this.toJSONObject();
+ dataset.addProperty("actionType", "mfileRename");
+ dataset.addProperty("object", mf.getPath().substring(pl));
+ dataset.addProperty("from", this.from);
+ dataset.addProperty("to", this.to);
+ if (mf.isSymlink())
+ dataset.addProperty("fileType", "symlink");
+ else if (mf.isDirectory())
+ dataset.addProperty("fileType", "dir");
+ else
+ dataset.addProperty("fileType", "file");
+ Gson gson = new GsonBuilder().setPrettyPrinting().serializeNulls()
+ .setFieldNamingPolicy(FieldNamingPolicy.UPPER_CAMEL_CASE)
+ .create();
+ return gson.toJson(dataset);
+ }
+}
diff --git a/src/org/opendedup/sdfs/io/events/MFileSync.java b/src/org/opendedup/sdfs/io/events/MFileSync.java
index 91254c169..7d3fc20ef 100644
--- a/src/org/opendedup/sdfs/io/events/MFileSync.java
+++ b/src/org/opendedup/sdfs/io/events/MFileSync.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io.events;
import org.opendedup.sdfs.io.MetaDataDedupFile;
diff --git a/src/org/opendedup/sdfs/io/events/MFileWritten.java b/src/org/opendedup/sdfs/io/events/MFileWritten.java
index 3c91a8751..8e2064bcd 100644
--- a/src/org/opendedup/sdfs/io/events/MFileWritten.java
+++ b/src/org/opendedup/sdfs/io/events/MFileWritten.java
@@ -1,38 +1,56 @@
-package org.opendedup.sdfs.io.events;
-
-import org.opendedup.sdfs.Main;
-import org.opendedup.sdfs.io.MetaDataDedupFile;
-
-import com.google.gson.FieldNamingPolicy;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonObject;
-
-public class MFileWritten extends GenericEvent {
- private static final int pl = Main.volume.getPath().length();
- public MetaDataDedupFile mf;
-
- public MFileWritten(MetaDataDedupFile f) {
- super();
- this.mf = f;
- }
-
- public String toJSON() {
- JsonObject dataset = this.toJSONObject();
- dataset.addProperty("actionType", "mfileWritten");
- dataset.addProperty("object", mf.getPath().substring(pl));
-
- if (mf.isSymlink())
- dataset.addProperty("fileType", "symlink");
- else if (mf.isDirectory())
- dataset.addProperty("fileType", "dir");
- else {
- dataset.addProperty("fileType", "file");
- dataset.addProperty("size", mf.length());
- }
- Gson gson = new GsonBuilder().setPrettyPrinting().serializeNulls()
- .setFieldNamingPolicy(FieldNamingPolicy.UPPER_CAMEL_CASE)
- .create();
- return gson.toJson(dataset);
- }
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.io.events;
+
+import org.opendedup.sdfs.Main;
+import org.opendedup.sdfs.io.MetaDataDedupFile;
+
+import com.google.gson.FieldNamingPolicy;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonObject;
+
+public class MFileWritten extends GenericEvent {
+ private static final int pl = Main.volume.getPath().length();
+ public MetaDataDedupFile mf;
+
+ public MFileWritten(MetaDataDedupFile f) {
+ super();
+ this.mf = f;
+ }
+
+ public String toJSON() {
+ JsonObject dataset = this.toJSONObject();
+ dataset.addProperty("actionType", "mfileWritten");
+ dataset.addProperty("object", mf.getPath().substring(pl));
+
+ if (mf.isSymlink())
+ dataset.addProperty("fileType", "symlink");
+ else if (mf.isDirectory())
+ dataset.addProperty("fileType", "dir");
+ else {
+ dataset.addProperty("fileType", "file");
+ dataset.addProperty("size", mf.length());
+ }
+ Gson gson = new GsonBuilder().setPrettyPrinting().serializeNulls()
+ .setFieldNamingPolicy(FieldNamingPolicy.UPPER_CAMEL_CASE)
+ .create();
+ return gson.toJson(dataset);
+ }
+}
diff --git a/src/org/opendedup/sdfs/io/events/SFileDeleted.java b/src/org/opendedup/sdfs/io/events/SFileDeleted.java
index 038c50462..e2feed19d 100644
--- a/src/org/opendedup/sdfs/io/events/SFileDeleted.java
+++ b/src/org/opendedup/sdfs/io/events/SFileDeleted.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io.events;
import java.io.File;
diff --git a/src/org/opendedup/sdfs/io/events/SFileSync.java b/src/org/opendedup/sdfs/io/events/SFileSync.java
index 970ed6164..99316152d 100644
--- a/src/org/opendedup/sdfs/io/events/SFileSync.java
+++ b/src/org/opendedup/sdfs/io/events/SFileSync.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io.events;
import java.io.File;
diff --git a/src/org/opendedup/sdfs/io/events/SFileWritten.java b/src/org/opendedup/sdfs/io/events/SFileWritten.java
index 9d35034b9..80efd6b0c 100644
--- a/src/org/opendedup/sdfs/io/events/SFileWritten.java
+++ b/src/org/opendedup/sdfs/io/events/SFileWritten.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io.events;
import org.opendedup.sdfs.Main;
diff --git a/src/org/opendedup/sdfs/io/events/VolumeWritten.java b/src/org/opendedup/sdfs/io/events/VolumeWritten.java
index 467a20b90..72db10711 100644
--- a/src/org/opendedup/sdfs/io/events/VolumeWritten.java
+++ b/src/org/opendedup/sdfs/io/events/VolumeWritten.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.io.events;
import org.opendedup.sdfs.io.Volume;
diff --git a/src/org/opendedup/sdfs/mgmt/cli/ProcessConnectedVolumes.java b/src/org/opendedup/sdfs/mgmt/cli/ProcessConnectedVolumes.java
index 9ececd80d..b4adf69f2 100644
--- a/src/org/opendedup/sdfs/mgmt/cli/ProcessConnectedVolumes.java
+++ b/src/org/opendedup/sdfs/mgmt/cli/ProcessConnectedVolumes.java
@@ -2,7 +2,7 @@
import java.util.Formatter;
-import org.opendedup.util.XMLUtils;
+
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
@@ -19,7 +19,7 @@ public static void runCmd() {
formatter.format("file=%s&cmd=connectedvolumes", "null");
Document doc = MgmtServerConnection.getResponse(sb.toString());
Element root = doc.getDocumentElement();
- System.out.println(XMLUtils.toXMLString(doc));
+ //System.out.println(XMLUtils.toXMLString(doc));
formatter.close();
if (root.getAttribute("status").equals("failed"))
System.out.println(root.getAttribute("msg"));
diff --git a/src/org/opendedup/sdfs/mgmt/websocket/DDBUpdate.java b/src/org/opendedup/sdfs/mgmt/websocket/DDBUpdate.java
index c463d52ef..6c7d41dc1 100644
--- a/src/org/opendedup/sdfs/mgmt/websocket/DDBUpdate.java
+++ b/src/org/opendedup/sdfs/mgmt/websocket/DDBUpdate.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.mgmt.websocket;
import java.io.IOException;
diff --git a/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpdate.java b/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpdate.java
index a38fbb4c0..829ceca33 100644
--- a/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpdate.java
+++ b/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpdate.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.mgmt.websocket;
import org.simpleframework.http.socket.Session;
diff --git a/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpload.java b/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpload.java
index eef661296..102af6835 100644
--- a/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpload.java
+++ b/src/org/opendedup/sdfs/mgmt/websocket/MetaDataUpload.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.mgmt.websocket;
import org.simpleframework.http.socket.Session;
diff --git a/src/org/opendedup/sdfs/mgmt/websocket/PingService.java b/src/org/opendedup/sdfs/mgmt/websocket/PingService.java
index eae1fab9c..d02500cc3 100644
--- a/src/org/opendedup/sdfs/mgmt/websocket/PingService.java
+++ b/src/org/opendedup/sdfs/mgmt/websocket/PingService.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.mgmt.websocket;
import java.io.IOException;
diff --git a/src/org/opendedup/sdfs/monitor/IOMeter.java b/src/org/opendedup/sdfs/monitor/IOMeter.java
index 3d1e6e506..4306ffd5e 100755
--- a/src/org/opendedup/sdfs/monitor/IOMeter.java
+++ b/src/org/opendedup/sdfs/monitor/IOMeter.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.monitor;
import java.io.BufferedOutputStream;
diff --git a/src/org/opendedup/sdfs/monitor/IOMonitor.java b/src/org/opendedup/sdfs/monitor/IOMonitor.java
index 3b284ef0b..732cb20a6 100755
--- a/src/org/opendedup/sdfs/monitor/IOMonitor.java
+++ b/src/org/opendedup/sdfs/monitor/IOMonitor.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.monitor;
import java.nio.ByteBuffer;
diff --git a/src/org/opendedup/sdfs/monitor/IOMonitorListener.java b/src/org/opendedup/sdfs/monitor/IOMonitorListener.java
index 27c7792c8..8e8f0a6c5 100644
--- a/src/org/opendedup/sdfs/monitor/IOMonitorListener.java
+++ b/src/org/opendedup/sdfs/monitor/IOMonitorListener.java
@@ -1,45 +1,63 @@
-package org.opendedup.sdfs.monitor;
-
-public interface IOMonitorListener {
- void actualBytesWrittenChanged(long total, int change, IOMonitor mon);
-
- void bytesReadChanged(long total, int change, IOMonitor mon);
-
- void duplicateBlockChanged(long total, IOMonitor mon);
-
- void rioChanged(long total, IOMonitor mon);
-
- void virtualBytesWrittenChanged(long total, int change, IOMonitor mon);
-
- void wioChanged(long total, IOMonitor mon);
-
- void clearAllCountersExecuted(long total, IOMonitor mon);
-
- void clearFileCountersExecuted(long total, IOMonitor mon);
-
- void removeDuplicateBlockChanged(long total, IOMonitor mon);
-
- void actualBytesWrittenChanged(long total, long change, IOMonitor mon);
-
- void bytesReadChanged(long total, long change, IOMonitor mon);
-
- void duplicateBlockChanged(long total, long change, IOMonitor mon);
-
- void virtualBytesWrittenChanged(long total, long change, IOMonitor mon);
-
- void riopsChanged(int iops, int changed, IOMonitor mon);
-
- void wiopsChanged(int iops, int changed, IOMonitor mon);
-
- void iopsChanged(int iops, int changed, IOMonitor mon);
-
- void rmbpsChanged(long mbps, int changed, IOMonitor mon);
-
- void wmbpsChanged(long mbps, int changed, IOMonitor mon);
-
- void mbpsChanged(long mbps, int changed, IOMonitor mon);
-
- void qosChanged(int old, int newQos, IOMonitor mon);
-
- void ioProfileChanged(String old, String newProf, IOMonitor mon);
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.monitor;
+
+public interface IOMonitorListener {
+ void actualBytesWrittenChanged(long total, int change, IOMonitor mon);
+
+ void bytesReadChanged(long total, int change, IOMonitor mon);
+
+ void duplicateBlockChanged(long total, IOMonitor mon);
+
+ void rioChanged(long total, IOMonitor mon);
+
+ void virtualBytesWrittenChanged(long total, int change, IOMonitor mon);
+
+ void wioChanged(long total, IOMonitor mon);
+
+ void clearAllCountersExecuted(long total, IOMonitor mon);
+
+ void clearFileCountersExecuted(long total, IOMonitor mon);
+
+ void removeDuplicateBlockChanged(long total, IOMonitor mon);
+
+ void actualBytesWrittenChanged(long total, long change, IOMonitor mon);
+
+ void bytesReadChanged(long total, long change, IOMonitor mon);
+
+ void duplicateBlockChanged(long total, long change, IOMonitor mon);
+
+ void virtualBytesWrittenChanged(long total, long change, IOMonitor mon);
+
+ void riopsChanged(int iops, int changed, IOMonitor mon);
+
+ void wiopsChanged(int iops, int changed, IOMonitor mon);
+
+ void iopsChanged(int iops, int changed, IOMonitor mon);
+
+ void rmbpsChanged(long mbps, int changed, IOMonitor mon);
+
+ void wmbpsChanged(long mbps, int changed, IOMonitor mon);
+
+ void mbpsChanged(long mbps, int changed, IOMonitor mon);
+
+ void qosChanged(int old, int newQos, IOMonitor mon);
+
+ void ioProfileChanged(String old, String newProf, IOMonitor mon);
+}
diff --git a/src/org/opendedup/sdfs/monitor/VolumeIOMeter.java b/src/org/opendedup/sdfs/monitor/VolumeIOMeter.java
index b2cff5b5e..be16b1712 100644
--- a/src/org/opendedup/sdfs/monitor/VolumeIOMeter.java
+++ b/src/org/opendedup/sdfs/monitor/VolumeIOMeter.java
@@ -1,103 +1,121 @@
-package org.opendedup.sdfs.monitor;
-
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.RollingFileAppender;
-import org.opendedup.logging.JSONVolPerfLayout;
-import org.opendedup.logging.SDFSLogger;
-import org.opendedup.sdfs.io.Volume;
-import org.opendedup.sdfs.servers.HCServiceProxy;
-import org.slf4j.MDC;
-
-import com.sun.management.UnixOperatingSystemMXBean;
-
-public class VolumeIOMeter implements Runnable {
-
- private Volume vol;
- private long bytesRead = 0, bytesWritten = 0, virtualBytesWritten = 0,
- RIOPS = 0, WIOPS = 0, duplicateBytes = 0, dseSz = 0, dseCompSz = 0;
- private double pbytesRead = 0, pbytesWritten = 0, pvirtualBytesWritten = 0,
- pRIOPS = 0, pWIOPS = 0, pduplicateBytes = 0;
- private Logger log = Logger.getLogger("volperflog");
- private boolean closed = false;
- Thread th = null;
- UnixOperatingSystemMXBean perf = (UnixOperatingSystemMXBean) ManagementFactory
- .getOperatingSystemMXBean();
-
- public VolumeIOMeter(Volume vol) {
- RollingFileAppender app = null;
- try {
- app = new RollingFileAppender(new JSONVolPerfLayout(),
- vol.getPerfMonFile(), true);
- app.setMaxBackupIndex(2);
- app.setMaxFileSize("10MB");
- } catch (IOException e) {
- log.debug("unable to change appender", e);
- }
- this.vol = vol;
- log.addAppender(app);
- log.setLevel(Level.INFO);
- th = new Thread(this);
- th.start();
- }
-
- public void run() {
- while (!closed) {
- try {
- Thread.sleep(15 * 1000);
-
- this.calPerf();
- } catch (Exception e) {
- SDFSLogger.getLog().warn(
- "Exception in " + this.getClass().getName(), e);
- this.closed = true;
- }
- }
- }
-
- private void calPerf() {
- this.bytesRead = (long) (vol.getReadBytes() - this.pbytesRead);
- this.pbytesRead = vol.getReadBytes();
- MDC.put("bytesRead", Long.toString(bytesRead));
- this.bytesWritten = (long) (vol.getActualWriteBytes() - this.pbytesWritten);
- this.pbytesWritten = vol.getActualWriteBytes();
- MDC.put("bytesWritten", Long.toString(this.bytesWritten));
- this.duplicateBytes = (long) (vol.getDuplicateBytes() - this.pduplicateBytes);
- this.pduplicateBytes = vol.getDuplicateBytes();
- MDC.put("duplicateBytes", Long.toString(this.duplicateBytes));
- this.virtualBytesWritten = (long) (vol.getVirtualBytesWritten() - this.pvirtualBytesWritten);
- this.pvirtualBytesWritten = vol.getVirtualBytesWritten();
- MDC.put("virtualBytesWritten", Long.toString(this.virtualBytesWritten));
- this.RIOPS = (long) (vol.getReadOperations() - this.pRIOPS);
- this.pRIOPS = vol.getReadOperations();
- MDC.put("RIOPS", Long.toString(this.RIOPS));
- this.WIOPS = (long) (vol.getWriteOperations() - this.pWIOPS);
- this.pWIOPS = vol.getWriteOperations();
- this.dseSz = HCServiceProxy.getDSESize();
- this.dseCompSz = HCServiceProxy.getDSECompressedSize();
- MDC.put("dseSz", Long.toString(this.dseSz));
- MDC.put("dseCompSz", Long.toString(this.dseCompSz));
- MDC.put("WIOPS", Long.toString(this.WIOPS));
- MDC.put("sdfsCpuLoad", Double.toString(perf.getProcessCpuLoad()));
- MDC.put("sdfsCpuTime", Double.toString(perf.getProcessCpuTime()));
- MDC.put("systemCpuLoad", Double.toString(perf.getSystemCpuLoad()));
- MDC.put("systemCpuAverage",
- Double.toString(perf.getSystemLoadAverage()));
- MDC.put("freeMemory", Long.toString(perf.getFreePhysicalMemorySize()));
- MDC.put("totalMemory", Long.toString(perf.getTotalPhysicalMemorySize()));
- MDC.put("freeSwap", Long.toString(perf.getFreeSwapSpaceSize()));
- MDC.put("totalSwap", Long.toString(perf.getTotalSwapSpaceSize()));
- log.info(vol.getName());
- MDC.clear();
- }
-
- public void close() {
- this.closed = true;
- th.interrupt();
-
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.monitor;
+
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.RollingFileAppender;
+import org.opendedup.logging.JSONVolPerfLayout;
+import org.opendedup.logging.SDFSLogger;
+import org.opendedup.sdfs.io.Volume;
+import org.opendedup.sdfs.servers.HCServiceProxy;
+import org.slf4j.MDC;
+
+import com.sun.management.UnixOperatingSystemMXBean;
+
+public class VolumeIOMeter implements Runnable {
+
+ private Volume vol;
+ private long bytesRead = 0, bytesWritten = 0, virtualBytesWritten = 0,
+ RIOPS = 0, WIOPS = 0, duplicateBytes = 0, dseSz = 0, dseCompSz = 0;
+ private double pbytesRead = 0, pbytesWritten = 0, pvirtualBytesWritten = 0,
+ pRIOPS = 0, pWIOPS = 0, pduplicateBytes = 0;
+ private Logger log = Logger.getLogger("volperflog");
+ private boolean closed = false;
+ Thread th = null;
+ UnixOperatingSystemMXBean perf = (UnixOperatingSystemMXBean) ManagementFactory
+ .getOperatingSystemMXBean();
+
+ public VolumeIOMeter(Volume vol) {
+ RollingFileAppender app = null;
+ try {
+ app = new RollingFileAppender(new JSONVolPerfLayout(),
+ vol.getPerfMonFile(), true);
+ app.setMaxBackupIndex(2);
+ app.setMaxFileSize("10MB");
+ } catch (IOException e) {
+ log.debug("unable to change appender", e);
+ }
+ this.vol = vol;
+ log.addAppender(app);
+ log.setLevel(Level.INFO);
+ th = new Thread(this);
+ th.start();
+ }
+
+ public void run() {
+ while (!closed) {
+ try {
+ Thread.sleep(15 * 1000);
+
+ this.calPerf();
+ } catch (Exception e) {
+ SDFSLogger.getLog().warn(
+ "Exception in " + this.getClass().getName(), e);
+ this.closed = true;
+ }
+ }
+ }
+
+ private void calPerf() {
+ this.bytesRead = (long) (vol.getReadBytes() - this.pbytesRead);
+ this.pbytesRead = vol.getReadBytes();
+ MDC.put("bytesRead", Long.toString(bytesRead));
+ this.bytesWritten = (long) (vol.getActualWriteBytes() - this.pbytesWritten);
+ this.pbytesWritten = vol.getActualWriteBytes();
+ MDC.put("bytesWritten", Long.toString(this.bytesWritten));
+ this.duplicateBytes = (long) (vol.getDuplicateBytes() - this.pduplicateBytes);
+ this.pduplicateBytes = vol.getDuplicateBytes();
+ MDC.put("duplicateBytes", Long.toString(this.duplicateBytes));
+ this.virtualBytesWritten = (long) (vol.getVirtualBytesWritten() - this.pvirtualBytesWritten);
+ this.pvirtualBytesWritten = vol.getVirtualBytesWritten();
+ MDC.put("virtualBytesWritten", Long.toString(this.virtualBytesWritten));
+ this.RIOPS = (long) (vol.getReadOperations() - this.pRIOPS);
+ this.pRIOPS = vol.getReadOperations();
+ MDC.put("RIOPS", Long.toString(this.RIOPS));
+ this.WIOPS = (long) (vol.getWriteOperations() - this.pWIOPS);
+ this.pWIOPS = vol.getWriteOperations();
+ this.dseSz = HCServiceProxy.getDSESize();
+ this.dseCompSz = HCServiceProxy.getDSECompressedSize();
+ MDC.put("dseSz", Long.toString(this.dseSz));
+ MDC.put("dseCompSz", Long.toString(this.dseCompSz));
+ MDC.put("WIOPS", Long.toString(this.WIOPS));
+ MDC.put("sdfsCpuLoad", Double.toString(perf.getProcessCpuLoad()));
+ MDC.put("sdfsCpuTime", Double.toString(perf.getProcessCpuTime()));
+ MDC.put("systemCpuLoad", Double.toString(perf.getSystemCpuLoad()));
+ MDC.put("systemCpuAverage",
+ Double.toString(perf.getSystemLoadAverage()));
+ MDC.put("freeMemory", Long.toString(perf.getFreePhysicalMemorySize()));
+ MDC.put("totalMemory", Long.toString(perf.getTotalPhysicalMemorySize()));
+ MDC.put("freeSwap", Long.toString(perf.getFreeSwapSpaceSize()));
+ MDC.put("totalSwap", Long.toString(perf.getTotalSwapSpaceSize()));
+ log.info(vol.getName());
+ MDC.clear();
+ }
+
+ public void close() {
+ this.closed = true;
+ th.interrupt();
+
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/AsyncCmdListener.java b/src/org/opendedup/sdfs/network/AsyncCmdListener.java
index 6afb0c813..784d283a9 100644
--- a/src/org/opendedup/sdfs/network/AsyncCmdListener.java
+++ b/src/org/opendedup/sdfs/network/AsyncCmdListener.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.network;
public interface AsyncCmdListener {
diff --git a/src/org/opendedup/sdfs/network/BulkFetchChunkCmd.java b/src/org/opendedup/sdfs/network/BulkFetchChunkCmd.java
index 57cba3504..dff8127b5 100644
--- a/src/org/opendedup/sdfs/network/BulkFetchChunkCmd.java
+++ b/src/org/opendedup/sdfs/network/BulkFetchChunkCmd.java
@@ -1,86 +1,104 @@
-package org.opendedup.sdfs.network;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.util.ArrayList;
-
-import org.opendedup.logging.SDFSLogger;
-import org.opendedup.sdfs.filestore.HashChunk;
-import org.opendedup.util.CompressionUtils;
-
-public class BulkFetchChunkCmd implements IOCmd {
- ArrayList hashes;
- ArrayList chunks;
- boolean written = false;
-
- public BulkFetchChunkCmd(ArrayList hashes) {
- this.hashes = hashes;
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public void executeCmd(DataInputStream is, DataOutputStream os)
- throws IOException {
-
- ByteArrayOutputStream bos = null;
- bos = new ByteArrayOutputStream();
- ObjectOutputStream obj_out = new ObjectOutputStream(bos);
- obj_out.writeObject(hashes);
- byte[] sh = CompressionUtils.compressSnappy(bos.toByteArray());
- // byte [] sh = bos.toByteArray();
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug("Sent bulkfetch [" + sh.length + "]");
- os.write(NetworkCMDS.BULK_FETCH_CMD);
- os.writeInt(sh.length);
- os.write(sh);
- os.flush();
- bos.close();
- obj_out.close();
- sh = null;
- obj_out = null;
- bos = null;
- int size = is.readInt();
- if (size == -1) {
- throw new IOException("One of the Requested hashes does not exist.");
- }
- byte[] us = new byte[size];
- is.readFully(us);
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug("Recieved bulkfetch [" + us.length + "]");
- us = CompressionUtils.decompressSnappy(us);
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug(
- "Recieved bulkfetch uncompressed [" + us.length + "]");
- ByteArrayInputStream bin = new ByteArrayInputStream(us);
- ObjectInputStream obj_in = new ObjectInputStream(bin);
- try {
- chunks = (ArrayList) obj_in.readObject();
- } catch (ClassNotFoundException e) {
- throw new IOException(e);
- } finally {
- us = null;
- bin.close();
- obj_in.close();
- }
- }
-
- public ArrayList getChunks() {
- return this.chunks;
- }
-
- @Override
- public byte getCmdID() {
- return NetworkCMDS.FETCH_CMD;
- }
-
- @Override
- public ArrayList getResult() {
- return this.chunks;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.util.ArrayList;
+
+import org.opendedup.logging.SDFSLogger;
+import org.opendedup.sdfs.filestore.HashChunk;
+import org.opendedup.util.CompressionUtils;
+
+public class BulkFetchChunkCmd implements IOCmd {
+ ArrayList hashes;
+ ArrayList chunks;
+ boolean written = false;
+
+ public BulkFetchChunkCmd(ArrayList hashes) {
+ this.hashes = hashes;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void executeCmd(DataInputStream is, DataOutputStream os)
+ throws IOException {
+
+ ByteArrayOutputStream bos = null;
+ bos = new ByteArrayOutputStream();
+ ObjectOutputStream obj_out = new ObjectOutputStream(bos);
+ obj_out.writeObject(hashes);
+ byte[] sh = CompressionUtils.compressSnappy(bos.toByteArray());
+ // byte [] sh = bos.toByteArray();
+ if (SDFSLogger.isDebug())
+ SDFSLogger.getLog().debug("Sent bulkfetch [" + sh.length + "]");
+ os.write(NetworkCMDS.BULK_FETCH_CMD);
+ os.writeInt(sh.length);
+ os.write(sh);
+ os.flush();
+ bos.close();
+ obj_out.close();
+ sh = null;
+ obj_out = null;
+ bos = null;
+ int size = is.readInt();
+ if (size == -1) {
+ throw new IOException("One of the Requested hashes does not exist.");
+ }
+ byte[] us = new byte[size];
+ is.readFully(us);
+ if (SDFSLogger.isDebug())
+ SDFSLogger.getLog().debug("Recieved bulkfetch [" + us.length + "]");
+ us = CompressionUtils.decompressSnappy(us);
+ if (SDFSLogger.isDebug())
+ SDFSLogger.getLog().debug(
+ "Recieved bulkfetch uncompressed [" + us.length + "]");
+ ByteArrayInputStream bin = new ByteArrayInputStream(us);
+ ObjectInputStream obj_in = new ObjectInputStream(bin);
+ try {
+ chunks = (ArrayList) obj_in.readObject();
+ } catch (ClassNotFoundException e) {
+ throw new IOException(e);
+ } finally {
+ us = null;
+ bin.close();
+ obj_in.close();
+ }
+ }
+
+ public ArrayList getChunks() {
+ return this.chunks;
+ }
+
+ @Override
+ public byte getCmdID() {
+ return NetworkCMDS.FETCH_CMD;
+ }
+
+ @Override
+ public ArrayList getResult() {
+ return this.chunks;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/BulkWriteChunkCmd.java b/src/org/opendedup/sdfs/network/BulkWriteChunkCmd.java
index 8a717feaa..e20342964 100644
--- a/src/org/opendedup/sdfs/network/BulkWriteChunkCmd.java
+++ b/src/org/opendedup/sdfs/network/BulkWriteChunkCmd.java
@@ -1,81 +1,99 @@
-package org.opendedup.sdfs.network;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.opendedup.logging.SDFSLogger;
-import org.opendedup.sdfs.filestore.HashChunk;
-
-public class BulkWriteChunkCmd implements IOCmd {
- ArrayList chunks;
- List response;
- boolean written = false;
-
- public BulkWriteChunkCmd(ArrayList chunks) {
- this.chunks = chunks;
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public void executeCmd(DataInputStream is, DataOutputStream os)
- throws IOException {
-
- ByteArrayOutputStream bos = null;
- ObjectOutputStream obj_out = null;
- byte[] sh = null;
- try {
- bos = new ByteArrayOutputStream();
- obj_out = new ObjectOutputStream(bos);
- obj_out.writeObject(chunks);
- os.write(NetworkCMDS.BULK_FETCH_CMD);
- sh = bos.toByteArray();
- os.writeInt(sh.length);
- os.write(sh);
- os.flush();
- } finally {
- bos.close();
- obj_out.close();
- sh = null;
- obj_out = null;
- bos = null;
- }
- int size = is.readInt();
- if (size == -1) {
- throw new IOException("an error happened while writing");
- }
- byte[] us = new byte[size];
- is.readFully(us);
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug("Received bulkfetch [" + us.length + "]");
- // us = CompressionUtils.decompressSnappy(us);
- ByteArrayInputStream bin = new ByteArrayInputStream(us);
- ObjectInputStream obj_in = new ObjectInputStream(bin);
- try {
- response = (List) obj_in.readObject();
- } catch (ClassNotFoundException e) {
- throw new IOException(e);
- } finally {
- us = null;
- bin.close();
- obj_in.close();
- }
- }
-
- @Override
- public byte getCmdID() {
- return NetworkCMDS.FETCH_CMD;
- }
-
- @Override
- public List getResult() {
- return this.response;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.opendedup.logging.SDFSLogger;
+import org.opendedup.sdfs.filestore.HashChunk;
+
+public class BulkWriteChunkCmd implements IOCmd {
+ ArrayList chunks;
+ List response;
+ boolean written = false;
+
+ public BulkWriteChunkCmd(ArrayList chunks) {
+ this.chunks = chunks;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void executeCmd(DataInputStream is, DataOutputStream os)
+ throws IOException {
+
+ ByteArrayOutputStream bos = null;
+ ObjectOutputStream obj_out = null;
+ byte[] sh = null;
+ try {
+ bos = new ByteArrayOutputStream();
+ obj_out = new ObjectOutputStream(bos);
+ obj_out.writeObject(chunks);
+ os.write(NetworkCMDS.BULK_FETCH_CMD);
+ sh = bos.toByteArray();
+ os.writeInt(sh.length);
+ os.write(sh);
+ os.flush();
+ } finally {
+ bos.close();
+ obj_out.close();
+ sh = null;
+ obj_out = null;
+ bos = null;
+ }
+ int size = is.readInt();
+ if (size == -1) {
+ throw new IOException("an error happened while writing");
+ }
+ byte[] us = new byte[size];
+ is.readFully(us);
+ if (SDFSLogger.isDebug())
+ SDFSLogger.getLog().debug("Received bulkfetch [" + us.length + "]");
+ // us = CompressionUtils.decompressSnappy(us);
+ ByteArrayInputStream bin = new ByteArrayInputStream(us);
+ ObjectInputStream obj_in = new ObjectInputStream(bin);
+ try {
+ response = (List) obj_in.readObject();
+ } catch (ClassNotFoundException e) {
+ throw new IOException(e);
+ } finally {
+ us = null;
+ bin.close();
+ obj_in.close();
+ }
+ }
+
+ @Override
+ public byte getCmdID() {
+ return NetworkCMDS.FETCH_CMD;
+ }
+
+ @Override
+ public List getResult() {
+ return this.response;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/ChunkNotFoundException.java b/src/org/opendedup/sdfs/network/ChunkNotFoundException.java
index 482947a2b..95aa2ab61 100644
--- a/src/org/opendedup/sdfs/network/ChunkNotFoundException.java
+++ b/src/org/opendedup/sdfs/network/ChunkNotFoundException.java
@@ -1,16 +1,34 @@
-package org.opendedup.sdfs.network;
-
-import java.io.IOException;
-
-public class ChunkNotFoundException extends IOException {
-
- /**
- *
- */
- private static final long serialVersionUID = -5398045346438784590L;
-
- public ChunkNotFoundException(String hash) {
- super("could not find chunk " + hash);
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.IOException;
+
+public class ChunkNotFoundException extends IOException {
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = -5398045346438784590L;
+
+ public ChunkNotFoundException(String hash) {
+ super("could not find chunk " + hash);
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/ClientThread.java b/src/org/opendedup/sdfs/network/ClientThread.java
index b5d1b129a..444a5eda8 100755
--- a/src/org/opendedup/sdfs/network/ClientThread.java
+++ b/src/org/opendedup/sdfs/network/ClientThread.java
@@ -1,365 +1,383 @@
-package org.opendedup.sdfs.network;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.BufferedReader;
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.ObjectInput;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.net.Socket;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.opendedup.collections.InsertRecord;
-import org.opendedup.collections.QuickList;
-import org.opendedup.hashing.HashFunctions;
-import org.opendedup.logging.SDFSLogger;
-import org.opendedup.sdfs.Main;
-import org.opendedup.sdfs.filestore.HashChunk;
-import org.opendedup.sdfs.servers.HCServiceProxy;
-import org.opendedup.util.CompressionUtils;
-import org.opendedup.util.StringUtils;
-
-/**
- * @author Sam Silverberg This is the network class that is used within the
- * Chunk store to service all client requests and responses. It is
- * threaded and is spawned by @see
- * com.annesam.sdfs.network.NetworkHCServer when a new TCP connect in
- * accepted.
- */
-
-public class ClientThread extends Thread {
-
- // DataInputStream is = null;
-
- Socket clientSocket = null;
- private ReentrantLock writelock = new ReentrantLock();
-
- private static ArrayList clients = new ArrayList();
- private static int MAX_BATCH_SZ = (Main.MAX_REPL_BATCH_SZ * 1024 * 1024)
- / Main.CHUNK_LENGTH;
-
- public ClientThread(Socket clientSocket) {
- this.clientSocket = clientSocket;
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug("Client Threads is " + clients.size());
- addClient(this);
- }
-
- public static void addClient(ClientThread client) {
- clients.add(client);
- }
-
- public static void removeClient(ClientThread client) {
- clients.remove(client);
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public void run() {
- DataOutputStream os = null;
- DataInputStream is = null;
- BufferedReader reader = null;
- try {
- // is = new DataInputStream(clientSocket.getInputStream());
- reader = new BufferedReader(new InputStreamReader(
- clientSocket.getInputStream()), 32768 * 2);
- is = new DataInputStream(new BufferedInputStream(
- clientSocket.getInputStream(), 32768));
- os = new DataOutputStream(new BufferedOutputStream(
- clientSocket.getOutputStream(), 32768));
- String versionMessage = "SDFS version " + Main.PROTOCOL_VERSION
- + "\r\n";
- os.write(versionMessage.getBytes());
- os.flush();
- String cPasswd = reader.readLine();
- String phash = HashFunctions.getSHAHash(cPasswd.trim().getBytes(),
- Main.sdfsPasswordSalt.getBytes());
- if (phash.equals(Main.sdfsPassword)) {
- os.writeInt(0);
- os.flush();
- throw new IOException("Authentication failed");
- } else {
- os.writeInt(1);
- os.flush();
- }
- while (true) {
- byte cmd = is.readByte();
- if (cmd == NetworkCMDS.QUIT_CMD) {
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug(
- "Quiting Client Network Thread");
- break;
- }
- if (cmd == NetworkCMDS.HASH_EXISTS_CMD) {
- byte[] hash = new byte[is.readShort()];
- is.readFully(hash);
- boolean exists = HCServiceProxy.hashExists(hash);
-
- try {
- writelock.lock();
- os.writeBoolean(exists);
- os.flush();
- writelock.unlock();
- } catch (IOException e) {
- if (writelock.isLocked())
- writelock.unlock();
- throw new IOException(e);
- } finally {
-
- }
-
- }
- if (cmd == NetworkCMDS.WRITE_HASH_CMD) {
- byte[] hash = new byte[is.readShort()];
- is.readFully(hash);
- int len = is.readInt();
- byte[] chunkBytes = new byte[len];
- is.readFully(chunkBytes);
- InsertRecord rec = HCServiceProxy.writeChunk(hash,
- chunkBytes);
- try {
- writelock.lock();
- os.writeBoolean(rec.getInserted());
- os.flush();
- writelock.unlock();
- } catch (IOException e) {
- if (writelock.isLocked())
- writelock.unlock();
- throw new IOException(e);
- } finally {
-
- }
- }
- if (cmd == NetworkCMDS.BATCH_WRITE_HASH_CMD) {
- // long tm = System.currentTimeMillis();
- byte[] arb = new byte[is.readInt()];
- is.readFully(arb);
- ByteArrayInputStream bis = new ByteArrayInputStream(arb);
- ObjectInput in = null;
- List chunks = null;
- try {
- in = new ObjectInputStream(bis);
- chunks = (List) in.readObject();
- } finally {
- bis.close();
- in.close();
- }
- QuickList rsults = new QuickList(
- chunks.size());
- for (int i = 0; i < chunks.size(); i++) {
- try {
- HashChunk ck = chunks.get(i);
- if (ck != null) {
- rsults.add(i, Boolean.valueOf(HCServiceProxy
- .writeChunk(ck.getName(), ck.getData())
- .getInserted()));
- } else
- rsults.add(i, null);
- } catch (Exception e) {
- SDFSLogger.getLog().warn(
- "unable to find if hash exists", e);
- rsults.add(i, Boolean.valueOf(false));
- }
- }
- ByteArrayOutputStream bos = null;
- ObjectOutputStream obj_out = null;
- byte[] sh = null;
- try {
- bos = new ByteArrayOutputStream();
- obj_out = new ObjectOutputStream(bos);
- obj_out.writeObject(rsults);
- sh = bos.toByteArray();
- os.writeInt(sh.length);
- os.write(sh);
- os.flush();
- } finally {
- obj_out.close();
- bos.close();
- }
-
- }
- if (cmd == NetworkCMDS.FETCH_CMD
- || cmd == NetworkCMDS.FETCH_COMPRESSED_CMD) {
- byte[] hash = new byte[is.readShort()];
- is.readFully(hash);
- HashChunk dChunk = null;
- try {
- dChunk = HCServiceProxy.fetchHashChunk(hash);
- if (cmd == NetworkCMDS.FETCH_COMPRESSED_CMD
- && !dChunk.isCompressed()) {
-
- throw new Exception("not implemented");
- } else if (cmd == NetworkCMDS.FETCH_CMD
- && dChunk.isCompressed()) {
-
- throw new IOException("Not implemented");
- } else {
- try {
- writelock.lock();
- os.writeInt(dChunk.getData().length);
- os.write(dChunk.getData());
- os.flush();
- } catch (IOException e) {
-
- throw new IOException(e);
- } finally {
- writelock.unlock();
- }
- }
-
- } catch (NullPointerException e) {
- SDFSLogger.getLog().warn(
- "chunk " + StringUtils.getHexString(hash)
- + " does not exist");
- try {
- writelock.lock();
- os.writeInt(-1);
- os.flush();
- writelock.unlock();
- } catch (IOException e1) {
- if (writelock.isLocked())
- writelock.unlock();
- throw new IOException(e1.toString());
- } finally {
-
- }
- }
- }
- if (cmd == NetworkCMDS.BULK_FETCH_CMD) {
- int len = is.readInt();
- byte[] sh = new byte[len];
- is.readFully(sh);
- sh = CompressionUtils.decompressSnappy(sh);
- ObjectInputStream obj_in = new ObjectInputStream(
- new ByteArrayInputStream(sh));
- ArrayList hashes = (ArrayList) obj_in
- .readObject();
- String hash = null;
- if (hashes.size() > MAX_BATCH_SZ) {
- SDFSLogger.getLog().warn(
- "requested hash list to long " + hashes.size()
- + " > " + MAX_BATCH_SZ);
- try {
- writelock.lock();
- os.writeInt(-1);
- os.flush();
- writelock.unlock();
- } catch (IOException e1) {
- if (writelock.isLocked())
- writelock.unlock();
- throw new IOException(e1.toString());
- } finally {
-
- }
- }
- ArrayList chunks = new ArrayList(
- hashes.size());
- try {
- for (int i = 0; i < hashes.size(); i++) {
- hash = hashes.get(i);
- HashChunk dChunk = HCServiceProxy
- .fetchHashChunk(StringUtils
- .getHexBytes(hash));
-
- chunks.add(i, dChunk);
- }
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- ObjectOutputStream obj_out = new ObjectOutputStream(bos);
- obj_out.writeObject(chunks);
- byte[] b = CompressionUtils.compressSnappy(bos
- .toByteArray());
- // byte [] b =bos.toByteArray();
- writelock.lock();
- try {
- os.writeInt(b.length);
- os.write(b);
- os.flush();
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug(
- "wrote " + b.length + " entries "
- + chunks.size());
- } finally {
- writelock.unlock();
- bos.close();
- obj_out.close();
- obj_in.close();
- chunks.clear();
- chunks = null;
- }
-
- } catch (NullPointerException e) {
- SDFSLogger.getLog().warn(
- "chunk " + hash + " does not exist");
- try {
- writelock.lock();
- os.writeInt(-1);
- os.flush();
- writelock.unlock();
- } catch (IOException e1) {
- if (writelock.isLocked())
- writelock.unlock();
- throw new IOException(e1.toString());
- } finally {
-
- }
- }
- }
- if (cmd == NetworkCMDS.PING_CMD) {
- try {
- writelock.lock();
- os.writeShort(NetworkCMDS.PING_CMD);
- os.flush();
- writelock.unlock();
- } catch (IOException e) {
- if (writelock.isLocked())
- writelock.unlock();
- throw new IOException(e);
- } finally {
-
- }
- }
- }
- } catch (Exception e) {
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug("connection failed ", e);
-
- } finally {
- try {
- reader.close();
- } catch (Exception e1) {
- }
- try {
- os.close();
- } catch (Exception e1) {
- }
- try {
- is.close();
- } catch (Exception e1) {
- }
- try {
- clientSocket.close();
- } catch (Exception e1) {
- }
-
- try {
- clientSocket.close();
- } catch (IOException e1) {
- }
- ClientThread.removeClient(this);
- }
- }
-
- public static final int byteArrayToInt(byte[] b) {
- return (b[0] << 24) + ((b[1] & 0xFF) << 16) + ((b[2] & 0xFF) << 8)
- + (b[3] & 0xFF);
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.net.Socket;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.opendedup.collections.InsertRecord;
+import org.opendedup.collections.QuickList;
+import org.opendedup.hashing.HashFunctions;
+import org.opendedup.logging.SDFSLogger;
+import org.opendedup.sdfs.Main;
+import org.opendedup.sdfs.filestore.HashChunk;
+import org.opendedup.sdfs.servers.HCServiceProxy;
+import org.opendedup.util.CompressionUtils;
+import org.opendedup.util.StringUtils;
+
+/**
+ * @author Sam Silverberg This is the network class that is used within the
+ * Chunk store to service all client requests and responses. It is
+ * threaded and is spawned by @see
+ * com.annesam.sdfs.network.NetworkHCServer when a new TCP connect in
+ * accepted.
+ */
+
+public class ClientThread extends Thread {
+
+ // DataInputStream is = null;
+
+ Socket clientSocket = null;
+ private ReentrantLock writelock = new ReentrantLock();
+
+ private static ArrayList clients = new ArrayList();
+ private static int MAX_BATCH_SZ = (Main.MAX_REPL_BATCH_SZ * 1024 * 1024)
+ / Main.CHUNK_LENGTH;
+
+ public ClientThread(Socket clientSocket) {
+ this.clientSocket = clientSocket;
+ if (SDFSLogger.isDebug())
+ SDFSLogger.getLog().debug("Client Threads is " + clients.size());
+ addClient(this);
+ }
+
+ public static void addClient(ClientThread client) {
+ clients.add(client);
+ }
+
+ public static void removeClient(ClientThread client) {
+ clients.remove(client);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void run() {
+ DataOutputStream os = null;
+ DataInputStream is = null;
+ BufferedReader reader = null;
+ try {
+ // is = new DataInputStream(clientSocket.getInputStream());
+ reader = new BufferedReader(new InputStreamReader(
+ clientSocket.getInputStream()), 32768 * 2);
+ is = new DataInputStream(new BufferedInputStream(
+ clientSocket.getInputStream(), 32768));
+ os = new DataOutputStream(new BufferedOutputStream(
+ clientSocket.getOutputStream(), 32768));
+ String versionMessage = "SDFS version " + Main.PROTOCOL_VERSION
+ + "\r\n";
+ os.write(versionMessage.getBytes());
+ os.flush();
+ String cPasswd = reader.readLine();
+ String phash = HashFunctions.getSHAHash(cPasswd.trim().getBytes(),
+ Main.sdfsPasswordSalt.getBytes());
+ if (phash.equals(Main.sdfsPassword)) {
+ os.writeInt(0);
+ os.flush();
+ throw new IOException("Authentication failed");
+ } else {
+ os.writeInt(1);
+ os.flush();
+ }
+ while (true) {
+ byte cmd = is.readByte();
+ if (cmd == NetworkCMDS.QUIT_CMD) {
+ if (SDFSLogger.isDebug())
+ SDFSLogger.getLog().debug(
+ "Quiting Client Network Thread");
+ break;
+ }
+ if (cmd == NetworkCMDS.HASH_EXISTS_CMD) {
+ byte[] hash = new byte[is.readShort()];
+ is.readFully(hash);
+ boolean exists = HCServiceProxy.hashExists(hash);
+
+ try {
+ writelock.lock();
+ os.writeBoolean(exists);
+ os.flush();
+ writelock.unlock();
+ } catch (IOException e) {
+ if (writelock.isLocked())
+ writelock.unlock();
+ throw new IOException(e);
+ } finally {
+
+ }
+
+ }
+ if (cmd == NetworkCMDS.WRITE_HASH_CMD) {
+ byte[] hash = new byte[is.readShort()];
+ is.readFully(hash);
+ int len = is.readInt();
+ byte[] chunkBytes = new byte[len];
+ is.readFully(chunkBytes);
+ InsertRecord rec = HCServiceProxy.writeChunk(hash,
+ chunkBytes);
+ try {
+ writelock.lock();
+ os.writeBoolean(rec.getInserted());
+ os.flush();
+ writelock.unlock();
+ } catch (IOException e) {
+ if (writelock.isLocked())
+ writelock.unlock();
+ throw new IOException(e);
+ } finally {
+
+ }
+ }
+ if (cmd == NetworkCMDS.BATCH_WRITE_HASH_CMD) {
+ // long tm = System.currentTimeMillis();
+ byte[] arb = new byte[is.readInt()];
+ is.readFully(arb);
+ ByteArrayInputStream bis = new ByteArrayInputStream(arb);
+ ObjectInput in = null;
+ List chunks = null;
+ try {
+ in = new ObjectInputStream(bis);
+ chunks = (List) in.readObject();
+ } finally {
+ bis.close();
+ in.close();
+ }
+ QuickList rsults = new QuickList(
+ chunks.size());
+ for (int i = 0; i < chunks.size(); i++) {
+ try {
+ HashChunk ck = chunks.get(i);
+ if (ck != null) {
+ rsults.add(i, Boolean.valueOf(HCServiceProxy
+ .writeChunk(ck.getName(), ck.getData())
+ .getInserted()));
+ } else
+ rsults.add(i, null);
+ } catch (Exception e) {
+ SDFSLogger.getLog().warn(
+ "unable to find if hash exists", e);
+ rsults.add(i, Boolean.valueOf(false));
+ }
+ }
+ ByteArrayOutputStream bos = null;
+ ObjectOutputStream obj_out = null;
+ byte[] sh = null;
+ try {
+ bos = new ByteArrayOutputStream();
+ obj_out = new ObjectOutputStream(bos);
+ obj_out.writeObject(rsults);
+ sh = bos.toByteArray();
+ os.writeInt(sh.length);
+ os.write(sh);
+ os.flush();
+ } finally {
+ obj_out.close();
+ bos.close();
+ }
+
+ }
+ if (cmd == NetworkCMDS.FETCH_CMD
+ || cmd == NetworkCMDS.FETCH_COMPRESSED_CMD) {
+ byte[] hash = new byte[is.readShort()];
+ is.readFully(hash);
+ HashChunk dChunk = null;
+ try {
+ dChunk = HCServiceProxy.fetchHashChunk(hash);
+ if (cmd == NetworkCMDS.FETCH_COMPRESSED_CMD
+ && !dChunk.isCompressed()) {
+
+ throw new Exception("not implemented");
+ } else if (cmd == NetworkCMDS.FETCH_CMD
+ && dChunk.isCompressed()) {
+
+ throw new IOException("Not implemented");
+ } else {
+ try {
+ writelock.lock();
+ os.writeInt(dChunk.getData().length);
+ os.write(dChunk.getData());
+ os.flush();
+ } catch (IOException e) {
+
+ throw new IOException(e);
+ } finally {
+ writelock.unlock();
+ }
+ }
+
+ } catch (NullPointerException e) {
+ SDFSLogger.getLog().warn(
+ "chunk " + StringUtils.getHexString(hash)
+ + " does not exist");
+ try {
+ writelock.lock();
+ os.writeInt(-1);
+ os.flush();
+ writelock.unlock();
+ } catch (IOException e1) {
+ if (writelock.isLocked())
+ writelock.unlock();
+ throw new IOException(e1.toString());
+ } finally {
+
+ }
+ }
+ }
+ if (cmd == NetworkCMDS.BULK_FETCH_CMD) {
+ int len = is.readInt();
+ byte[] sh = new byte[len];
+ is.readFully(sh);
+ sh = CompressionUtils.decompressSnappy(sh);
+ ObjectInputStream obj_in = new ObjectInputStream(
+ new ByteArrayInputStream(sh));
+ ArrayList hashes = (ArrayList) obj_in
+ .readObject();
+ String hash = null;
+ if (hashes.size() > MAX_BATCH_SZ) {
+ SDFSLogger.getLog().warn(
+ "requested hash list to long " + hashes.size()
+ + " > " + MAX_BATCH_SZ);
+ try {
+ writelock.lock();
+ os.writeInt(-1);
+ os.flush();
+ writelock.unlock();
+ } catch (IOException e1) {
+ if (writelock.isLocked())
+ writelock.unlock();
+ throw new IOException(e1.toString());
+ } finally {
+
+ }
+ }
+ ArrayList chunks = new ArrayList(
+ hashes.size());
+ try {
+ for (int i = 0; i < hashes.size(); i++) {
+ hash = hashes.get(i);
+ HashChunk dChunk = HCServiceProxy
+ .fetchHashChunk(StringUtils
+ .getHexBytes(hash));
+
+ chunks.add(i, dChunk);
+ }
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ ObjectOutputStream obj_out = new ObjectOutputStream(bos);
+ obj_out.writeObject(chunks);
+ byte[] b = CompressionUtils.compressSnappy(bos
+ .toByteArray());
+ // byte [] b =bos.toByteArray();
+ writelock.lock();
+ try {
+ os.writeInt(b.length);
+ os.write(b);
+ os.flush();
+ if (SDFSLogger.isDebug())
+ SDFSLogger.getLog().debug(
+ "wrote " + b.length + " entries "
+ + chunks.size());
+ } finally {
+ writelock.unlock();
+ bos.close();
+ obj_out.close();
+ obj_in.close();
+ chunks.clear();
+ chunks = null;
+ }
+
+ } catch (NullPointerException e) {
+ SDFSLogger.getLog().warn(
+ "chunk " + hash + " does not exist");
+ try {
+ writelock.lock();
+ os.writeInt(-1);
+ os.flush();
+ writelock.unlock();
+ } catch (IOException e1) {
+ if (writelock.isLocked())
+ writelock.unlock();
+ throw new IOException(e1.toString());
+ } finally {
+
+ }
+ }
+ }
+ if (cmd == NetworkCMDS.PING_CMD) {
+ try {
+ writelock.lock();
+ os.writeShort(NetworkCMDS.PING_CMD);
+ os.flush();
+ writelock.unlock();
+ } catch (IOException e) {
+ if (writelock.isLocked())
+ writelock.unlock();
+ throw new IOException(e);
+ } finally {
+
+ }
+ }
+ }
+ } catch (Exception e) {
+ if (SDFSLogger.isDebug())
+ SDFSLogger.getLog().debug("connection failed ", e);
+
+ } finally {
+ try {
+ reader.close();
+ } catch (Exception e1) {
+ }
+ try {
+ os.close();
+ } catch (Exception e1) {
+ }
+ try {
+ is.close();
+ } catch (Exception e1) {
+ }
+ try {
+ clientSocket.close();
+ } catch (Exception e1) {
+ }
+
+ try {
+ clientSocket.close();
+ } catch (IOException e1) {
+ }
+ ClientThread.removeClient(this);
+ }
+ }
+
+ public static final int byteArrayToInt(byte[] b) {
+ return (b[0] << 24) + ((b[1] & 0xFF) << 16) + ((b[2] & 0xFF) << 8)
+ + (b[3] & 0xFF);
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/ClusteredHCServer.java b/src/org/opendedup/sdfs/network/ClusteredHCServer.java
index d587992e6..02279e6b5 100644
--- a/src/org/opendedup/sdfs/network/ClusteredHCServer.java
+++ b/src/org/opendedup/sdfs/network/ClusteredHCServer.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.network;
import java.io.File;
diff --git a/src/org/opendedup/sdfs/network/FetchChunkCmd.java b/src/org/opendedup/sdfs/network/FetchChunkCmd.java
index bcef36f67..3ac2eab9f 100755
--- a/src/org/opendedup/sdfs/network/FetchChunkCmd.java
+++ b/src/org/opendedup/sdfs/network/FetchChunkCmd.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.network;
import java.io.DataInputStream;
diff --git a/src/org/opendedup/sdfs/network/HashClient.java b/src/org/opendedup/sdfs/network/HashClient.java
index 4db8c2b09..df98343e0 100755
--- a/src/org/opendedup/sdfs/network/HashClient.java
+++ b/src/org/opendedup/sdfs/network/HashClient.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.network;
import java.io.BufferedInputStream;
diff --git a/src/org/opendedup/sdfs/network/HashClientPool.java b/src/org/opendedup/sdfs/network/HashClientPool.java
index c2eec706e..af6839f1d 100755
--- a/src/org/opendedup/sdfs/network/HashClientPool.java
+++ b/src/org/opendedup/sdfs/network/HashClientPool.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.network;
import java.io.IOException;
diff --git a/src/org/opendedup/sdfs/network/HashClientPoolFactory.java b/src/org/opendedup/sdfs/network/HashClientPoolFactory.java
index 060bb5b64..cff8b97b2 100644
--- a/src/org/opendedup/sdfs/network/HashClientPoolFactory.java
+++ b/src/org/opendedup/sdfs/network/HashClientPoolFactory.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.network;
import org.apache.commons.pool.PoolableObjectFactory;
diff --git a/src/org/opendedup/sdfs/network/HashClientSuspectException.java b/src/org/opendedup/sdfs/network/HashClientSuspectException.java
index 4809e562b..c7a074f55 100644
--- a/src/org/opendedup/sdfs/network/HashClientSuspectException.java
+++ b/src/org/opendedup/sdfs/network/HashClientSuspectException.java
@@ -1,17 +1,35 @@
-package org.opendedup.sdfs.network;
-
-import org.opendedup.sdfs.servers.HCServer;
-
-public class HashClientSuspectException extends Exception {
-
- /**
- *
- */
- private static final long serialVersionUID = -5398045346438784590L;
-
- public HashClientSuspectException(HCServer server) {
- super("DSEServer " + server.getHostName() + ":" + server.getPort()
- + " is suspect");
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import org.opendedup.sdfs.servers.HCServer;
+
+public class HashClientSuspectException extends Exception {
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = -5398045346438784590L;
+
+ public HashClientSuspectException(HCServer server) {
+ super("DSEServer " + server.getHostName() + ":" + server.getPort()
+ + " is suspect");
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/HashExistsCmd.java b/src/org/opendedup/sdfs/network/HashExistsCmd.java
index 9398b308f..88bb2e4c0 100755
--- a/src/org/opendedup/sdfs/network/HashExistsCmd.java
+++ b/src/org/opendedup/sdfs/network/HashExistsCmd.java
@@ -1,43 +1,61 @@
-package org.opendedup.sdfs.network;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-
-public class HashExistsCmd implements IOCmd {
- byte[] hash;
- boolean exists = false;
-
- public HashExistsCmd(byte[] hash) {
- this.hash = hash;
- }
-
- @Override
- public void executeCmd(DataInputStream is, DataOutputStream os)
- throws IOException {
- os.write(NetworkCMDS.HASH_EXISTS_CMD);
- os.writeShort(hash.length);
- os.write(hash);
- os.flush();
- this.exists = is.readBoolean();
- }
-
- public byte[] getHash() {
- return this.hash;
- }
-
- public boolean exists() {
- return this.exists;
- }
-
- @Override
- public byte getCmdID() {
- return NetworkCMDS.HASH_EXISTS_CMD;
- }
-
- @Override
- public Boolean getResult() {
- return this.exists;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+public class HashExistsCmd implements IOCmd {
+ byte[] hash;
+ boolean exists = false;
+
+ public HashExistsCmd(byte[] hash) {
+ this.hash = hash;
+ }
+
+ @Override
+ public void executeCmd(DataInputStream is, DataOutputStream os)
+ throws IOException {
+ os.write(NetworkCMDS.HASH_EXISTS_CMD);
+ os.writeShort(hash.length);
+ os.write(hash);
+ os.flush();
+ this.exists = is.readBoolean();
+ }
+
+ public byte[] getHash() {
+ return this.hash;
+ }
+
+ public boolean exists() {
+ return this.exists;
+ }
+
+ @Override
+ public byte getCmdID() {
+ return NetworkCMDS.HASH_EXISTS_CMD;
+ }
+
+ @Override
+ public Boolean getResult() {
+ return this.exists;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/IOCmd.java b/src/org/opendedup/sdfs/network/IOCmd.java
index 21cbd3069..3ed4fdc93 100755
--- a/src/org/opendedup/sdfs/network/IOCmd.java
+++ b/src/org/opendedup/sdfs/network/IOCmd.java
@@ -1,15 +1,33 @@
-package org.opendedup.sdfs.network;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-
-public interface IOCmd {
- public abstract void executeCmd(DataInputStream is, DataOutputStream os)
- throws IOException, IOCmdException;
-
- public abstract byte getCmdID();
-
- public abstract Object getResult();
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+public interface IOCmd {
+ public abstract void executeCmd(DataInputStream is, DataOutputStream os)
+ throws IOException, IOCmdException;
+
+ public abstract byte getCmdID();
+
+ public abstract Object getResult();
+
+}
diff --git a/src/org/opendedup/sdfs/network/IOCmdException.java b/src/org/opendedup/sdfs/network/IOCmdException.java
index 9685760b1..fdc318e09 100644
--- a/src/org/opendedup/sdfs/network/IOCmdException.java
+++ b/src/org/opendedup/sdfs/network/IOCmdException.java
@@ -1,14 +1,32 @@
-package org.opendedup.sdfs.network;
-
-public class IOCmdException extends Exception {
-
- /**
- *
- */
- private static final long serialVersionUID = -7672823297876147730L;
-
- public IOCmdException(String exception) {
- super(exception);
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+public class IOCmdException extends Exception {
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = -7672823297876147730L;
+
+ public IOCmdException(String exception) {
+ super(exception);
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/MaxStoreSizeCmd.java b/src/org/opendedup/sdfs/network/MaxStoreSizeCmd.java
index b102b8e72..ab39190d7 100644
--- a/src/org/opendedup/sdfs/network/MaxStoreSizeCmd.java
+++ b/src/org/opendedup/sdfs/network/MaxStoreSizeCmd.java
@@ -1,35 +1,53 @@
-package org.opendedup.sdfs.network;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-
-public class MaxStoreSizeCmd implements IOCmd {
- private long maxStoreSize = -1;
-
- public MaxStoreSizeCmd() {
- }
-
- @Override
- public void executeCmd(DataInputStream is, DataOutputStream os)
- throws IOException {
- os.write(NetworkCMDS.STORE_MAX_SIZE_CMD);
- os.flush();
- this.maxStoreSize = is.readLong();
- }
-
- public long maxStoreSize() {
- return this.maxStoreSize;
- }
-
- @Override
- public byte getCmdID() {
- return NetworkCMDS.STORE_MAX_SIZE_CMD;
- }
-
- @Override
- public Long getResult() {
- return this.maxStoreSize;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+public class MaxStoreSizeCmd implements IOCmd {
+ private long maxStoreSize = -1;
+
+ public MaxStoreSizeCmd() {
+ }
+
+ @Override
+ public void executeCmd(DataInputStream is, DataOutputStream os)
+ throws IOException {
+ os.write(NetworkCMDS.STORE_MAX_SIZE_CMD);
+ os.flush();
+ this.maxStoreSize = is.readLong();
+ }
+
+ public long maxStoreSize() {
+ return this.maxStoreSize;
+ }
+
+ @Override
+ public byte getCmdID() {
+ return NetworkCMDS.STORE_MAX_SIZE_CMD;
+ }
+
+ @Override
+ public Long getResult() {
+ return this.maxStoreSize;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/NetworkCMDS.java b/src/org/opendedup/sdfs/network/NetworkCMDS.java
index fda3200c4..688a54633 100644
--- a/src/org/opendedup/sdfs/network/NetworkCMDS.java
+++ b/src/org/opendedup/sdfs/network/NetworkCMDS.java
@@ -1,45 +1,63 @@
-package org.opendedup.sdfs.network;
-
-/**
- *
- * @author Sam Silverberg These are the commands that are sent by the client to
- * the chunk store. The command is sent as the first byte in a command
- * request. A typical client request is as follows :
- *
- * |command type (1b)|length of hash (2b)|md5 or sha hash (lenghth of
- * hash)| command specific data (variable length)|
- *
- */
-
-public class NetworkCMDS {
- /** Fetch a chunk of data from the chunk store */
- public static final byte FETCH_CMD = 0;
- /** See if a hash already exists in the chunk store */
- public static final byte HASH_EXISTS_CMD = 1;
- /** write a chunk to the chunk store **/
- public static final byte WRITE_HASH_CMD = 2;
- /** Close the client thread used for this TCP connection */
- public static final byte QUIT_CMD = 3;
- /** Claim that the client is still using the hash in question */
- // public static final byte CLAIM_HASH = 4;
- /**
- * Fetch a chunk and request that it is compressed before transmitting to
- * the client. The data will be compressed by the chunk store before it is
- * sent to the client.
- */
- public static final byte FETCH_COMPRESSED_CMD = 5;
- /**
- * Write a compressed chunk to the chunk server. The data will be compressed
- * by the client before it is sent.
- */
- public static final byte WRITE_COMPRESSED_CMD = 6;
- /** Keep alive ping command. Not used in this implementation */
- public static final byte PING_CMD = 9;
- public static final byte STORE_MAX_SIZE_CMD = 10;
- public static final byte STORE_SIZE_CMD = 11;
- public static final byte STORE_PAGE_SIZE = 12;
- public static final byte BULK_FETCH_CMD = 13;
- public static final byte UPDATE_DSE = 14;
- public static final byte BATCH_WRITE_HASH_CMD = 26;
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+/**
+ *
+ * @author Sam Silverberg These are the commands that are sent by the client to
+ * the chunk store. The command is sent as the first byte in a command
+ * request. A typical client request is as follows :
+ *
+ * |command type (1b)|length of hash (2b)|md5 or sha hash (lenghth of
+ * hash)| command specific data (variable length)|
+ *
+ */
+
+public class NetworkCMDS {
+ /** Fetch a chunk of data from the chunk store */
+ public static final byte FETCH_CMD = 0;
+ /** See if a hash already exists in the chunk store */
+ public static final byte HASH_EXISTS_CMD = 1;
+ /** write a chunk to the chunk store **/
+ public static final byte WRITE_HASH_CMD = 2;
+ /** Close the client thread used for this TCP connection */
+ public static final byte QUIT_CMD = 3;
+ /** Claim that the client is still using the hash in question */
+ // public static final byte CLAIM_HASH = 4;
+ /**
+ * Fetch a chunk and request that it is compressed before transmitting to
+ * the client. The data will be compressed by the chunk store before it is
+ * sent to the client.
+ */
+ public static final byte FETCH_COMPRESSED_CMD = 5;
+ /**
+ * Write a compressed chunk to the chunk server. The data will be compressed
+ * by the client before it is sent.
+ */
+ public static final byte WRITE_COMPRESSED_CMD = 6;
+ /** Keep alive ping command. Not used in this implementation */
+ public static final byte PING_CMD = 9;
+ public static final byte STORE_MAX_SIZE_CMD = 10;
+ public static final byte STORE_SIZE_CMD = 11;
+ public static final byte STORE_PAGE_SIZE = 12;
+ public static final byte BULK_FETCH_CMD = 13;
+ public static final byte UPDATE_DSE = 14;
+ public static final byte BATCH_WRITE_HASH_CMD = 26;
+
+}
diff --git a/src/org/opendedup/sdfs/network/NetworkDSEServer.java b/src/org/opendedup/sdfs/network/NetworkDSEServer.java
index 68f8a230d..10ec28665 100644
--- a/src/org/opendedup/sdfs/network/NetworkDSEServer.java
+++ b/src/org/opendedup/sdfs/network/NetworkDSEServer.java
@@ -1,115 +1,133 @@
-package org.opendedup.sdfs.network;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.security.KeyStore;
-import java.security.SecureRandom;
-
-import javax.net.ssl.KeyManager;
-import javax.net.ssl.KeyManagerFactory;
-import javax.net.ssl.SSLContext;
-import javax.net.ssl.SSLServerSocketFactory;
-
-import org.opendedup.logging.SDFSLogger;
-import org.opendedup.sdfs.Main;
-import org.opendedup.util.FindOpenPort;
-import org.opendedup.util.KeyGenerator;
-
-public class NetworkDSEServer implements Runnable {
- Socket clientSocket = null;
- ServerSocket serverSocket = null;
- public boolean closed = false;
-
- @Override
- public void run() {
- try {
- Main.serverPort = FindOpenPort.pickFreePort(Main.serverPort);
- InetSocketAddress addr = new InetSocketAddress(Main.serverHostName,
- Main.serverPort);
- if (Main.serverUseSSL) {
- String keydir = Main.hashDBStore + File.separator + "keys";
- String key = keydir + File.separator + "dse_server.keystore";
- if (!new File(key).exists()) {
- KeyGenerator.generateKey(new File(key));
- SDFSLogger.getLog().info(
- "generated certificate for ssl communication at "
- + key);
- }
- FileInputStream keyFile = new FileInputStream(key);
- KeyStore keyStore = KeyStore.getInstance(KeyStore
- .getDefaultType());
- keyStore.load(keyFile, "sdfs".toCharArray());
- // init KeyManagerFactory
- KeyManagerFactory keyManagerFactory = KeyManagerFactory
- .getInstance(KeyManagerFactory.getDefaultAlgorithm());
- keyManagerFactory.init(keyStore, "sdfs".toCharArray());
- // init KeyManager
- KeyManager keyManagers[] = keyManagerFactory.getKeyManagers();
- // init the SSL context
- SSLContext sslContext = SSLContext.getInstance("TLSv1.2");
- sslContext.init(keyManagers, null, new SecureRandom());
- // get the socket factory
- SSLServerSocketFactory socketFactory = sslContext
- .getServerSocketFactory();
-
- // and finally, get the socket
- serverSocket = socketFactory.createServerSocket();
- serverSocket.bind(addr);
- SDFSLogger.getLog().info(
- "listening on encryted channel " + addr.toString());
- } else {
-
- serverSocket = new ServerSocket();
- // serverSocket.setReceiveBufferSize(128 * 1024);
-
- serverSocket.bind(addr);
- SDFSLogger.getLog().info(
- "listening on unencryted channel " + addr.toString());
- }
- } catch (Exception e) {
- System.err.println("unable to open network ports : "
- + e.getMessage());
- System.err.println("check logs for more details");
- SDFSLogger.getLog().fatal("unable to open network ports", e);
- System.exit(-1);
- }
-
- // Create a socket object from the ServerSocket to listen and accept
- // connections.
- // Open input and output streams for this socket will be created in
- // client's thread since every client is served by the server in
- // an individual thread
-
- while (!closed) {
- try {
- clientSocket = serverSocket.accept();
- clientSocket.setKeepAlive(true);
- clientSocket.setTcpNoDelay(false);
- // clientSocket.setSendBufferSize(128 * 1024);
- new ClientThread(clientSocket).start();
- } catch (IOException e) {
- if (!serverSocket.isClosed())
- SDFSLogger.getLog().fatal(
- "Unable to open port " + e.toString(), e);
- }
- }
-
- }
-
- public synchronized void close() {
- this.closed = true;
- try {
- System.out.println("#### Shutting Down Network Service ####");
-
- serverSocket.close();
- } catch (Exception e) {
- }
-
- System.out.println("#### Network Service Shut down completed ####");
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.security.KeyStore;
+import java.security.SecureRandom;
+
+import javax.net.ssl.KeyManager;
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLServerSocketFactory;
+
+import org.opendedup.logging.SDFSLogger;
+import org.opendedup.sdfs.Main;
+import org.opendedup.util.FindOpenPort;
+import org.opendedup.util.KeyGenerator;
+
+public class NetworkDSEServer implements Runnable {
+ Socket clientSocket = null;
+ ServerSocket serverSocket = null;
+ public boolean closed = false;
+
+ @Override
+ public void run() {
+ try {
+ Main.serverPort = FindOpenPort.pickFreePort(Main.serverPort);
+ InetSocketAddress addr = new InetSocketAddress(Main.serverHostName,
+ Main.serverPort);
+ if (Main.serverUseSSL) {
+ String keydir = Main.hashDBStore + File.separator + "keys";
+ String key = keydir + File.separator + "dse_server.keystore";
+ if (!new File(key).exists()) {
+ KeyGenerator.generateKey(new File(key));
+ SDFSLogger.getLog().info(
+ "generated certificate for ssl communication at "
+ + key);
+ }
+ FileInputStream keyFile = new FileInputStream(key);
+ KeyStore keyStore = KeyStore.getInstance(KeyStore
+ .getDefaultType());
+ keyStore.load(keyFile, "sdfs".toCharArray());
+ // init KeyManagerFactory
+ KeyManagerFactory keyManagerFactory = KeyManagerFactory
+ .getInstance(KeyManagerFactory.getDefaultAlgorithm());
+ keyManagerFactory.init(keyStore, "sdfs".toCharArray());
+ // init KeyManager
+ KeyManager keyManagers[] = keyManagerFactory.getKeyManagers();
+ // init the SSL context
+ SSLContext sslContext = SSLContext.getInstance("TLSv1.2");
+ sslContext.init(keyManagers, null, new SecureRandom());
+ // get the socket factory
+ SSLServerSocketFactory socketFactory = sslContext
+ .getServerSocketFactory();
+
+ // and finally, get the socket
+ serverSocket = socketFactory.createServerSocket();
+ serverSocket.bind(addr);
+ SDFSLogger.getLog().info(
+ "listening on encryted channel " + addr.toString());
+ } else {
+
+ serverSocket = new ServerSocket();
+ // serverSocket.setReceiveBufferSize(128 * 1024);
+
+ serverSocket.bind(addr);
+ SDFSLogger.getLog().info(
+ "listening on unencryted channel " + addr.toString());
+ }
+ } catch (Exception e) {
+ System.err.println("unable to open network ports : "
+ + e.getMessage());
+ System.err.println("check logs for more details");
+ SDFSLogger.getLog().fatal("unable to open network ports", e);
+ System.exit(-1);
+ }
+
+ // Create a socket object from the ServerSocket to listen and accept
+ // connections.
+ // Open input and output streams for this socket will be created in
+ // client's thread since every client is served by the server in
+ // an individual thread
+
+ while (!closed) {
+ try {
+ clientSocket = serverSocket.accept();
+ clientSocket.setKeepAlive(true);
+ clientSocket.setTcpNoDelay(false);
+ // clientSocket.setSendBufferSize(128 * 1024);
+ new ClientThread(clientSocket).start();
+ } catch (IOException e) {
+ if (!serverSocket.isClosed())
+ SDFSLogger.getLog().fatal(
+ "Unable to open port " + e.toString(), e);
+ }
+ }
+
+ }
+
+ public synchronized void close() {
+ this.closed = true;
+ try {
+ System.out.println("#### Shutting Down Network Service ####");
+
+ serverSocket.close();
+ } catch (Exception e) {
+ }
+
+ System.out.println("#### Network Service Shut down completed ####");
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/NetworkHCServer.java b/src/org/opendedup/sdfs/network/NetworkHCServer.java
index 367ee85a1..542f0a4d0 100755
--- a/src/org/opendedup/sdfs/network/NetworkHCServer.java
+++ b/src/org/opendedup/sdfs/network/NetworkHCServer.java
@@ -1,150 +1,168 @@
-package org.opendedup.sdfs.network;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.security.KeyStore;
-import java.security.SecureRandom;
-import java.util.ArrayList;
-
-import javax.net.ssl.KeyManager;
-import javax.net.ssl.KeyManagerFactory;
-import javax.net.ssl.SSLContext;
-import javax.net.ssl.SSLServerSocketFactory;
-
-import org.opendedup.logging.SDFSLogger;
-import org.opendedup.sdfs.Config;
-import org.opendedup.sdfs.Main;
-import org.opendedup.sdfs.servers.HCServiceProxy;
-import org.opendedup.util.KeyGenerator;
-
-public class NetworkHCServer {
-
- // Declaration section:
- // declare a server socket and a client socket for the server
- // declare an input and an output stream
-
- static Socket clientSocket = null;
- static ServerSocket serverSocket = null;
-
- // This chat server can accept up to 10 clients' connections
-
- public static void main(String args[]) throws IOException {
- // The default port
-
- if (args.length < 1) {
- System.out.println("Usage: NetworkHCServer ");
- } else {
- ShutdownHook shutdownHook = new ShutdownHook();
- Runtime.getRuntime().addShutdownHook(shutdownHook);
-
- try {
- Config.parseDSEConfigFile(args[0]);
- } catch (IOException e1) {
- SDFSLogger.getLog().fatal(
- "exiting because of an error with the config file");
- System.exit(-1);
- }
- init(new ArrayList());
-
- }
-
- }
-
- public static void init(ArrayList volumes) throws IOException {
- HCServiceProxy.init(volumes);
- // Initialization section:
- // Try to open a server socket on port port_number (default 2222)
- // Note that we can't choose a port less than 1023 if we are not
- // privileged users (root)
- try {
- InetSocketAddress addr = new InetSocketAddress(Main.serverHostName,
- Main.serverPort);
- if (Main.serverUseSSL) {
- String keydir = Main.hashDBStore + File.separator + "keys";
- String key = keydir + File.separator + "dse_server.keystore";
- if (!new File(key).exists()) {
- KeyGenerator.generateKey(new File(key));
- SDFSLogger.getLog().info(
- "generated certificate for ssl communication at "
- + key);
- }
- FileInputStream keyFile = new FileInputStream(key);
- KeyStore keyStore = KeyStore.getInstance(KeyStore
- .getDefaultType());
- keyStore.load(keyFile, "sdfs".toCharArray());
- // init KeyManagerFactory
- KeyManagerFactory keyManagerFactory = KeyManagerFactory
- .getInstance(KeyManagerFactory.getDefaultAlgorithm());
- keyManagerFactory.init(keyStore, "sdfs".toCharArray());
- // init KeyManager
- KeyManager keyManagers[] = keyManagerFactory.getKeyManagers();
- // init the SSL context
- SSLContext sslContext = SSLContext.getDefault();
- sslContext.init(keyManagers, null, new SecureRandom());
- // get the socket factory
- SSLServerSocketFactory socketFactory = sslContext
- .getServerSocketFactory();
-
- // and finally, get the socket
- serverSocket = socketFactory.createServerSocket();
- serverSocket.bind(addr);
- SDFSLogger.getLog().info(
- "listening on encryted channel " + addr.toString());
- } else {
- serverSocket = new ServerSocket();
- serverSocket.bind(addr);
- SDFSLogger.getLog().info(
- "listening on unencryted channel " + addr.toString());
- }
- } catch (Exception e) {
- e.printStackTrace();
- SDFSLogger.getLog().fatal("unable to open network ports", e);
- System.exit(-1);
- }
-
- // Create a socket object from the ServerSocket to listen and accept
- // connections.
- // Open input and output streams for this socket will be created in
- // client's thread since every client is served by the server in
- // an individual thread
-
- while (true) {
- try {
- clientSocket = serverSocket.accept();
- clientSocket.setKeepAlive(true);
- clientSocket.setTcpNoDelay(true);
- new ClientThread(clientSocket).start();
- } catch (IOException e) {
- if (!serverSocket.isClosed())
- SDFSLogger.getLog().error(
- "Unable to open port " + e.toString(), e);
- }
- }
- }
-
- public static void close() {
- try {
- System.out.println("#### Shutting Down Network Service ####");
-
- serverSocket.close();
- } catch (Exception e) {
- }
-
- System.out.println("#### Shutting down HashStore ####");
- HCServiceProxy.close();
- System.out.println("#### Shut down completed ####");
- }
-}
-
-class ShutdownHook extends Thread {
- @Override
- public void run() {
- System.out.println("#### Shutting down StorageHub ####");
-
- NetworkHCServer.close();
- }
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.security.KeyStore;
+import java.security.SecureRandom;
+import java.util.ArrayList;
+
+import javax.net.ssl.KeyManager;
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLServerSocketFactory;
+
+import org.opendedup.logging.SDFSLogger;
+import org.opendedup.sdfs.Config;
+import org.opendedup.sdfs.Main;
+import org.opendedup.sdfs.servers.HCServiceProxy;
+import org.opendedup.util.KeyGenerator;
+
+public class NetworkHCServer {
+
+ // Declaration section:
+ // declare a server socket and a client socket for the server
+ // declare an input and an output stream
+
+ static Socket clientSocket = null;
+ static ServerSocket serverSocket = null;
+
+ // This chat server can accept up to 10 clients' connections
+
+ public static void main(String args[]) throws IOException {
+ // The default port
+
+ if (args.length < 1) {
+ System.out.println("Usage: NetworkHCServer ");
+ } else {
+ ShutdownHook shutdownHook = new ShutdownHook();
+ Runtime.getRuntime().addShutdownHook(shutdownHook);
+
+ try {
+ Config.parseDSEConfigFile(args[0]);
+ } catch (IOException e1) {
+ SDFSLogger.getLog().fatal(
+ "exiting because of an error with the config file");
+ System.exit(-1);
+ }
+ init(new ArrayList());
+
+ }
+
+ }
+
+ public static void init(ArrayList volumes) throws IOException {
+ HCServiceProxy.init(volumes);
+ // Initialization section:
+ // Try to open a server socket on port port_number (default 2222)
+ // Note that we can't choose a port less than 1023 if we are not
+ // privileged users (root)
+ try {
+ InetSocketAddress addr = new InetSocketAddress(Main.serverHostName,
+ Main.serverPort);
+ if (Main.serverUseSSL) {
+ String keydir = Main.hashDBStore + File.separator + "keys";
+ String key = keydir + File.separator + "dse_server.keystore";
+ if (!new File(key).exists()) {
+ KeyGenerator.generateKey(new File(key));
+ SDFSLogger.getLog().info(
+ "generated certificate for ssl communication at "
+ + key);
+ }
+ FileInputStream keyFile = new FileInputStream(key);
+ KeyStore keyStore = KeyStore.getInstance(KeyStore
+ .getDefaultType());
+ keyStore.load(keyFile, "sdfs".toCharArray());
+ // init KeyManagerFactory
+ KeyManagerFactory keyManagerFactory = KeyManagerFactory
+ .getInstance(KeyManagerFactory.getDefaultAlgorithm());
+ keyManagerFactory.init(keyStore, "sdfs".toCharArray());
+ // init KeyManager
+ KeyManager keyManagers[] = keyManagerFactory.getKeyManagers();
+ // init the SSL context
+ SSLContext sslContext = SSLContext.getDefault();
+ sslContext.init(keyManagers, null, new SecureRandom());
+ // get the socket factory
+ SSLServerSocketFactory socketFactory = sslContext
+ .getServerSocketFactory();
+
+ // and finally, get the socket
+ serverSocket = socketFactory.createServerSocket();
+ serverSocket.bind(addr);
+ SDFSLogger.getLog().info(
+ "listening on encryted channel " + addr.toString());
+ } else {
+ serverSocket = new ServerSocket();
+ serverSocket.bind(addr);
+ SDFSLogger.getLog().info(
+ "listening on unencryted channel " + addr.toString());
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ SDFSLogger.getLog().fatal("unable to open network ports", e);
+ System.exit(-1);
+ }
+
+ // Create a socket object from the ServerSocket to listen and accept
+ // connections.
+ // Open input and output streams for this socket will be created in
+ // client's thread since every client is served by the server in
+ // an individual thread
+
+ while (true) {
+ try {
+ clientSocket = serverSocket.accept();
+ clientSocket.setKeepAlive(true);
+ clientSocket.setTcpNoDelay(true);
+ new ClientThread(clientSocket).start();
+ } catch (IOException e) {
+ if (!serverSocket.isClosed())
+ SDFSLogger.getLog().error(
+ "Unable to open port " + e.toString(), e);
+ }
+ }
+ }
+
+ public static void close() {
+ try {
+ System.out.println("#### Shutting Down Network Service ####");
+
+ serverSocket.close();
+ } catch (Exception e) {
+ }
+
+ System.out.println("#### Shutting down HashStore ####");
+ HCServiceProxy.close();
+ System.out.println("#### Shut down completed ####");
+ }
+}
+
+class ShutdownHook extends Thread {
+ @Override
+ public void run() {
+ System.out.println("#### Shutting down StorageHub ####");
+
+ NetworkHCServer.close();
+ }
}
\ No newline at end of file
diff --git a/src/org/opendedup/sdfs/network/NioUDPServer.java b/src/org/opendedup/sdfs/network/NioUDPServer.java
index c09689ed1..60f7e1d16 100644
--- a/src/org/opendedup/sdfs/network/NioUDPServer.java
+++ b/src/org/opendedup/sdfs/network/NioUDPServer.java
@@ -1,138 +1,156 @@
-package org.opendedup.sdfs.network;
-
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.nio.channels.DatagramChannel;
-import java.nio.channels.SelectionKey;
-import java.nio.channels.Selector;
-import java.util.Iterator;
-
-import org.opendedup.logging.SDFSLogger;
-import org.opendedup.sdfs.Main;
-import org.opendedup.sdfs.servers.HCServiceProxy;
-
-/**
- *
- * @author Sam Silverberg
- *
- * This is a UDP server class that can be used to serve client requests
- * within the chunk server. It servers a similar function to @see
- * com.annesam.sdfs.network.ClientThread . In some cases in may improve
- * client performance to enable this function on the server. The UDP
- * server will service :
- *
- * - HASH_EXISTS requests - CLAIM_HASH requests
- *
- * To enable the UDP server within the chunk store the config option
- * use-udp="true must be set.
- *
- *
- */
-
-public class NioUDPServer implements Runnable {
-
- int datagramSize = 36;
-
- private boolean closed = false;
-
- NioUDPServer() {
- Thread th = new Thread(this);
- th.start();
- }
-
- public static void main(String args[]) {
- Main.serverHostName = "localhost";
- Main.serverPort = 2222;
- new NioUDPServer();
- }
-
- public void close() {
- this.closed = true;
- }
-
- @Override
- public void run() {
- try {
- SDFSLogger.getLog().info("Starting UDP Server");
- InetSocketAddress theInetSocketAddress = new InetSocketAddress(
- Main.serverHostName, Main.serverPort);
-
- // make a DatagramChannel
- DatagramChannel theDatagramChannel = DatagramChannel.open();
- theDatagramChannel.bind(theInetSocketAddress);
-
- // A channel must first be placed in nonblocking mode
- // before it can be registered with a selector
- theDatagramChannel.configureBlocking(false);
- // instantiate a selector
- Selector theSelector = Selector.open();
-
- // register the selector on the channel to monitor reading
- // datagrams on the DatagramChannel
- theDatagramChannel.register(theSelector, SelectionKey.OP_READ);
-
- SDFSLogger.getLog().info(
- "UDP Server Started on " + theInetSocketAddress);
-
- // send and read concurrently, but do not block on read:
-
- while (!this.closed) {
- int keys = theSelector.select(500);
- // which comes first, next send or a read?
- // in case millisecsUntilSendNextDatagram <= 0 go right to send
- if (keys > 0) {
- try {
- Iterator iter = theSelector
- .selectedKeys().iterator();
- ByteBuffer buf = ByteBuffer.allocateDirect(33);
- ByteBuffer resp = ByteBuffer.allocateDirect(2);
- SelectionKey key = null;
- while (iter.hasNext()) {
- try {
- key = iter.next();
- if (key.isReadable()) {
- DatagramChannel ch = (DatagramChannel) key
- .channel();
- InetSocketAddress addr = (InetSocketAddress) ch
- .receive(buf);
- buf.flip();
- byte cmd = buf.get();
- byte[] hash = new byte[16];
- buf.clear();
- boolean exists = false;
- if (cmd == NetworkCMDS.HASH_EXISTS_CMD)
- exists = HCServiceProxy
- .hashExists(hash);
- // boolean exists = true;
- if (exists)
- resp.putShort((short) 1);
- else
- resp.putShort((short) 0);
- resp.flip();
- ch.send(resp, addr);
- resp.clear();
- }
-
- } catch (Exception e) {
- SDFSLogger.getLog().warn(
- "unable to process hash request", e);
- } finally {
- iter.remove();
- resp.clear();
- buf.clear();
- }
- }
- } catch (Exception e) {
- SDFSLogger.getLog().warn(
- "unable to process hash request", e);
- }
-
- }
- }
- } catch (Exception e) {
- SDFSLogger.getLog().fatal("unable to run udp server", e);
- return;
- }
-
- }
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.nio.channels.DatagramChannel;
+import java.nio.channels.SelectionKey;
+import java.nio.channels.Selector;
+import java.util.Iterator;
+
+import org.opendedup.logging.SDFSLogger;
+import org.opendedup.sdfs.Main;
+import org.opendedup.sdfs.servers.HCServiceProxy;
+
+/**
+ *
+ * @author Sam Silverberg
+ *
+ * This is a UDP server class that can be used to serve client requests
+ * within the chunk server. It servers a similar function to @see
+ * com.annesam.sdfs.network.ClientThread . In some cases in may improve
+ * client performance to enable this function on the server. The UDP
+ * server will service :
+ *
+ * - HASH_EXISTS requests - CLAIM_HASH requests
+ *
+ * To enable the UDP server within the chunk store the config option
+ * use-udp="true must be set.
+ *
+ *
+ */
+
+public class NioUDPServer implements Runnable {
+
+ int datagramSize = 36;
+
+ private boolean closed = false;
+
+ NioUDPServer() {
+ Thread th = new Thread(this);
+ th.start();
+ }
+
+ public static void main(String args[]) {
+ Main.serverHostName = "localhost";
+ Main.serverPort = 2222;
+ new NioUDPServer();
+ }
+
+ public void close() {
+ this.closed = true;
+ }
+
+ @Override
+ public void run() {
+ try {
+ SDFSLogger.getLog().info("Starting UDP Server");
+ InetSocketAddress theInetSocketAddress = new InetSocketAddress(
+ Main.serverHostName, Main.serverPort);
+
+ // make a DatagramChannel
+ DatagramChannel theDatagramChannel = DatagramChannel.open();
+ theDatagramChannel.bind(theInetSocketAddress);
+
+ // A channel must first be placed in nonblocking mode
+ // before it can be registered with a selector
+ theDatagramChannel.configureBlocking(false);
+ // instantiate a selector
+ Selector theSelector = Selector.open();
+
+ // register the selector on the channel to monitor reading
+ // datagrams on the DatagramChannel
+ theDatagramChannel.register(theSelector, SelectionKey.OP_READ);
+
+ SDFSLogger.getLog().info(
+ "UDP Server Started on " + theInetSocketAddress);
+
+ // send and read concurrently, but do not block on read:
+
+ while (!this.closed) {
+ int keys = theSelector.select(500);
+ // which comes first, next send or a read?
+ // in case millisecsUntilSendNextDatagram <= 0 go right to send
+ if (keys > 0) {
+ try {
+ Iterator iter = theSelector
+ .selectedKeys().iterator();
+ ByteBuffer buf = ByteBuffer.allocateDirect(33);
+ ByteBuffer resp = ByteBuffer.allocateDirect(2);
+ SelectionKey key = null;
+ while (iter.hasNext()) {
+ try {
+ key = iter.next();
+ if (key.isReadable()) {
+ DatagramChannel ch = (DatagramChannel) key
+ .channel();
+ InetSocketAddress addr = (InetSocketAddress) ch
+ .receive(buf);
+ buf.flip();
+ byte cmd = buf.get();
+ byte[] hash = new byte[16];
+ buf.clear();
+ boolean exists = false;
+ if (cmd == NetworkCMDS.HASH_EXISTS_CMD)
+ exists = HCServiceProxy
+ .hashExists(hash);
+ // boolean exists = true;
+ if (exists)
+ resp.putShort((short) 1);
+ else
+ resp.putShort((short) 0);
+ resp.flip();
+ ch.send(resp, addr);
+ resp.clear();
+ }
+
+ } catch (Exception e) {
+ SDFSLogger.getLog().warn(
+ "unable to process hash request", e);
+ } finally {
+ iter.remove();
+ resp.clear();
+ buf.clear();
+ }
+ }
+ } catch (Exception e) {
+ SDFSLogger.getLog().warn(
+ "unable to process hash request", e);
+ }
+
+ }
+ }
+ } catch (Exception e) {
+ SDFSLogger.getLog().fatal("unable to run udp server", e);
+ return;
+ }
+
+ }
+}
diff --git a/src/org/opendedup/sdfs/network/PageSizeCmd.java b/src/org/opendedup/sdfs/network/PageSizeCmd.java
index d1e028ea9..f4ef4f53e 100644
--- a/src/org/opendedup/sdfs/network/PageSizeCmd.java
+++ b/src/org/opendedup/sdfs/network/PageSizeCmd.java
@@ -1,35 +1,53 @@
-package org.opendedup.sdfs.network;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-
-public class PageSizeCmd implements IOCmd {
- private int pageSize = -1;
-
- public PageSizeCmd() {
- }
-
- @Override
- public void executeCmd(DataInputStream is, DataOutputStream os)
- throws IOException {
- os.write(NetworkCMDS.STORE_PAGE_SIZE);
- os.flush();
- this.pageSize = is.readInt();
- }
-
- public int pageSize() {
- return this.pageSize;
- }
-
- @Override
- public byte getCmdID() {
- return NetworkCMDS.STORE_PAGE_SIZE;
- }
-
- @Override
- public Integer getResult() {
- return this.pageSize;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+public class PageSizeCmd implements IOCmd {
+ private int pageSize = -1;
+
+ public PageSizeCmd() {
+ }
+
+ @Override
+ public void executeCmd(DataInputStream is, DataOutputStream os)
+ throws IOException {
+ os.write(NetworkCMDS.STORE_PAGE_SIZE);
+ os.flush();
+ this.pageSize = is.readInt();
+ }
+
+ public int pageSize() {
+ return this.pageSize;
+ }
+
+ @Override
+ public byte getCmdID() {
+ return NetworkCMDS.STORE_PAGE_SIZE;
+ }
+
+ @Override
+ public Integer getResult() {
+ return this.pageSize;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/PingCmd.java b/src/org/opendedup/sdfs/network/PingCmd.java
index 3c203636f..ad7bea980 100755
--- a/src/org/opendedup/sdfs/network/PingCmd.java
+++ b/src/org/opendedup/sdfs/network/PingCmd.java
@@ -1,37 +1,55 @@
-package org.opendedup.sdfs.network;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-
-public class PingCmd implements IOCmd {
-
- private short response;
-
- public PingCmd() {
- }
-
- @Override
- public void executeCmd(DataInputStream is, DataOutputStream os)
- throws IOException {
- os.writeInt(NetworkCMDS.PING_CMD);
- os.flush();
- response = is.readShort();
- }
-
- public short getResponse() {
- return this.response;
- }
-
- @Override
- public byte getCmdID() {
- // TODO Auto-generated method stub
- return NetworkCMDS.PING_CMD;
- }
-
- @Override
- public Short getResult() {
- return this.response;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+public class PingCmd implements IOCmd {
+
+ private short response;
+
+ public PingCmd() {
+ }
+
+ @Override
+ public void executeCmd(DataInputStream is, DataOutputStream os)
+ throws IOException {
+ os.writeInt(NetworkCMDS.PING_CMD);
+ os.flush();
+ response = is.readShort();
+ }
+
+ public short getResponse() {
+ return this.response;
+ }
+
+ @Override
+ public byte getCmdID() {
+ // TODO Auto-generated method stub
+ return NetworkCMDS.PING_CMD;
+ }
+
+ @Override
+ public Short getResult() {
+ return this.response;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/PingThread.java b/src/org/opendedup/sdfs/network/PingThread.java
index 3f100e227..60b7dbbc5 100755
--- a/src/org/opendedup/sdfs/network/PingThread.java
+++ b/src/org/opendedup/sdfs/network/PingThread.java
@@ -1,35 +1,53 @@
-package org.opendedup.sdfs.network;
-
-import org.opendedup.sdfs.Main;
-
-public class PingThread implements Runnable {
- HashClient client;
-
- public PingThread(HashClient client) {
- this.client = client;
- Thread th = new Thread(this);
- th.start();
-
- }
-
- @Override
- public void run() {
- while (!client.isClosed()) {
- try {
- client.ping();
- try {
- Thread.sleep(Main.PING_TIME);
- } catch (InterruptedException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- } catch (Exception e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- break;
- }
- }
-
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import org.opendedup.sdfs.Main;
+
+public class PingThread implements Runnable {
+ HashClient client;
+
+ public PingThread(HashClient client) {
+ this.client = client;
+ Thread th = new Thread(this);
+ th.start();
+
+ }
+
+ @Override
+ public void run() {
+ while (!client.isClosed()) {
+ try {
+ client.ping();
+ try {
+ Thread.sleep(Main.PING_TIME);
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ } catch (Exception e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ break;
+ }
+ }
+
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/StoreSizeCmd.java b/src/org/opendedup/sdfs/network/StoreSizeCmd.java
index f3f83f869..97e612bf5 100644
--- a/src/org/opendedup/sdfs/network/StoreSizeCmd.java
+++ b/src/org/opendedup/sdfs/network/StoreSizeCmd.java
@@ -1,35 +1,53 @@
-package org.opendedup.sdfs.network;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-
-public class StoreSizeCmd implements IOCmd {
- private long storeSize = -1;
-
- public StoreSizeCmd() {
- }
-
- @Override
- public void executeCmd(DataInputStream is, DataOutputStream os)
- throws IOException {
- os.write(NetworkCMDS.STORE_SIZE_CMD);
- os.flush();
- this.storeSize = is.readLong();
- }
-
- public long storeSize() {
- return this.storeSize;
- }
-
- @Override
- public byte getCmdID() {
- return NetworkCMDS.STORE_SIZE_CMD;
- }
-
- @Override
- public Long getResult() {
- return this.storeSize;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+public class StoreSizeCmd implements IOCmd {
+ private long storeSize = -1;
+
+ public StoreSizeCmd() {
+ }
+
+ @Override
+ public void executeCmd(DataInputStream is, DataOutputStream os)
+ throws IOException {
+ os.write(NetworkCMDS.STORE_SIZE_CMD);
+ os.flush();
+ this.storeSize = is.readLong();
+ }
+
+ public long storeSize() {
+ return this.storeSize;
+ }
+
+ @Override
+ public byte getCmdID() {
+ return NetworkCMDS.STORE_SIZE_CMD;
+ }
+
+ @Override
+ public Long getResult() {
+ return this.storeSize;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/network/WriteHashCmd.java b/src/org/opendedup/sdfs/network/WriteHashCmd.java
index 2104df4b9..4f4b12c6f 100755
--- a/src/org/opendedup/sdfs/network/WriteHashCmd.java
+++ b/src/org/opendedup/sdfs/network/WriteHashCmd.java
@@ -1,65 +1,83 @@
-package org.opendedup.sdfs.network;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-
-public class WriteHashCmd implements IOCmd {
- byte[] hash;
- byte[] aContents;
- int position;
- int len;
- boolean written = false;
- boolean compress = false;
-
- public WriteHashCmd(byte[] hash, byte[] aContents, int len, boolean compress)
- throws IOException {
- this.hash = hash;
- this.compress = compress;
- if (compress) {
- throw new IOException("not implemented");
- /*
- * try { byte[] compB = CompressionUtils.compress(aContents); if
- * (compB.length <= aContents.length) { this.aContents = compB;
- * this.len = this.aContents.length; } else { this.compress = false;
- * this.aContents = aContents; this.len = len; } } catch
- * (IOException e) { // TODO Auto-generated catch block
- * e.printStackTrace(); this.aContents = aContents; this.len = len;
- * this.compress = false; }
- */
- } else {
- this.aContents = aContents;
- this.len = len;
- }
-
- }
-
- @Override
- public void executeCmd(DataInputStream is, DataOutputStream os)
- throws IOException {
-
- os.write(NetworkCMDS.WRITE_HASH_CMD);
- os.writeShort(hash.length);
- os.write(hash);
- os.writeInt(len);
- os.write(aContents);
- os.flush();
- this.written = is.readBoolean();
- aContents = null;
- }
-
- public boolean wasWritten() {
- return this.written;
- }
-
- @Override
- public byte getCmdID() {
- return NetworkCMDS.WRITE_HASH_CMD;
- }
-
- @Override
- public Boolean getResult() {
- return this.written;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.network;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+public class WriteHashCmd implements IOCmd {
+ byte[] hash;
+ byte[] aContents;
+ int position;
+ int len;
+ boolean written = false;
+ boolean compress = false;
+
+ public WriteHashCmd(byte[] hash, byte[] aContents, int len, boolean compress)
+ throws IOException {
+ this.hash = hash;
+ this.compress = compress;
+ if (compress) {
+ throw new IOException("not implemented");
+ /*
+ * try { byte[] compB = CompressionUtils.compress(aContents); if
+ * (compB.length <= aContents.length) { this.aContents = compB;
+ * this.len = this.aContents.length; } else { this.compress = false;
+ * this.aContents = aContents; this.len = len; } } catch
+ * (IOException e) { // TODO Auto-generated catch block
+ * e.printStackTrace(); this.aContents = aContents; this.len = len;
+ * this.compress = false; }
+ */
+ } else {
+ this.aContents = aContents;
+ this.len = len;
+ }
+
+ }
+
+ @Override
+ public void executeCmd(DataInputStream is, DataOutputStream os)
+ throws IOException {
+
+ os.write(NetworkCMDS.WRITE_HASH_CMD);
+ os.writeShort(hash.length);
+ os.write(hash);
+ os.writeInt(len);
+ os.write(aContents);
+ os.flush();
+ this.written = is.readBoolean();
+ aContents = null;
+ }
+
+ public boolean wasWritten() {
+ return this.written;
+ }
+
+ @Override
+ public byte getCmdID() {
+ return NetworkCMDS.WRITE_HASH_CMD;
+ }
+
+ @Override
+ public Boolean getResult() {
+ return this.written;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/notification/BlockImportEvent.java b/src/org/opendedup/sdfs/notification/BlockImportEvent.java
index 7fd650b9d..36fdb648a 100644
--- a/src/org/opendedup/sdfs/notification/BlockImportEvent.java
+++ b/src/org/opendedup/sdfs/notification/BlockImportEvent.java
@@ -1,33 +1,51 @@
-package org.opendedup.sdfs.notification;
-
-import javax.xml.parsers.ParserConfigurationException;
-
-import org.w3c.dom.Element;
-
-public class BlockImportEvent extends SDFSEvent {
-
- /**
- *
- */
- private static final long serialVersionUID = 1L;
- public long blocksImported;
- public long bytesImported;
- public long filesImported;
- public long virtualDataImported;
-
- protected BlockImportEvent(String target, String shortMsg, Level level) {
- super(MIMPORT, target, shortMsg, level);
- }
-
- @Override
- public Element toXML() throws ParserConfigurationException {
- Element el = super.toXML();
- el.setAttribute("blocks-imported", Long.toString(this.blocksImported));
- el.setAttribute("bytes-imported", Long.toString(this.bytesImported));
- el.setAttribute("files-imported", Long.toString(this.filesImported));
- el.setAttribute("virtual-data-imported",
- Long.toString(this.virtualDataImported));
- return el;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.notification;
+
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.w3c.dom.Element;
+
+public class BlockImportEvent extends SDFSEvent {
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = 1L;
+ public long blocksImported;
+ public long bytesImported;
+ public long filesImported;
+ public long virtualDataImported;
+
+ protected BlockImportEvent(String target, String shortMsg, Level level) {
+ super(MIMPORT, target, shortMsg, level);
+ }
+
+ @Override
+ public Element toXML() throws ParserConfigurationException {
+ Element el = super.toXML();
+ el.setAttribute("blocks-imported", Long.toString(this.blocksImported));
+ el.setAttribute("bytes-imported", Long.toString(this.bytesImported));
+ el.setAttribute("files-imported", Long.toString(this.filesImported));
+ el.setAttribute("virtual-data-imported",
+ Long.toString(this.virtualDataImported));
+ return el;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/notification/DiskFullEvent.java b/src/org/opendedup/sdfs/notification/DiskFullEvent.java
index 406226412..91f7d023d 100644
--- a/src/org/opendedup/sdfs/notification/DiskFullEvent.java
+++ b/src/org/opendedup/sdfs/notification/DiskFullEvent.java
@@ -1,36 +1,54 @@
-package org.opendedup.sdfs.notification;
-
-import javax.xml.parsers.ParserConfigurationException;
-
-import org.w3c.dom.Element;
-
-public class DiskFullEvent extends SDFSEvent {
-
- /**
- *
- */
- private static final long serialVersionUID = 1L;
- public long currentSz;
- public long maxSz;
- public long dseSz;
- public long maxDseSz;
- public long dskUsage;
- public long maxDskUsage;
-
- public DiskFullEvent(String shortMsg) {
- super(DSKFL, getTarget(), shortMsg, SDFSEvent.ERROR);
- }
-
- @Override
- public Element toXML() throws ParserConfigurationException {
- Element el = super.toXML();
- el.setAttribute("current-size", Long.toString(this.currentSz));
- el.setAttribute("max-size", Long.toString(this.maxSz));
- el.setAttribute("dse-size", Long.toString(this.dseSz));
- el.setAttribute("dse-max-size", Long.toString(this.maxDseSz));
- el.setAttribute("disk-usage", Long.toString(this.dskUsage));
- el.setAttribute("max-disk-usage", Long.toString(this.maxDskUsage));
- return el;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.notification;
+
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.w3c.dom.Element;
+
+public class DiskFullEvent extends SDFSEvent {
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = 1L;
+ public long currentSz;
+ public long maxSz;
+ public long dseSz;
+ public long maxDseSz;
+ public long dskUsage;
+ public long maxDskUsage;
+
+ public DiskFullEvent(String shortMsg) {
+ super(DSKFL, getTarget(), shortMsg, SDFSEvent.ERROR);
+ }
+
+ @Override
+ public Element toXML() throws ParserConfigurationException {
+ Element el = super.toXML();
+ el.setAttribute("current-size", Long.toString(this.currentSz));
+ el.setAttribute("max-size", Long.toString(this.maxSz));
+ el.setAttribute("dse-size", Long.toString(this.dseSz));
+ el.setAttribute("dse-max-size", Long.toString(this.maxDseSz));
+ el.setAttribute("disk-usage", Long.toString(this.dskUsage));
+ el.setAttribute("max-disk-usage", Long.toString(this.maxDskUsage));
+ return el;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/notification/FDiskEvent.java b/src/org/opendedup/sdfs/notification/FDiskEvent.java
index 7b85f186d..bf477104d 100644
--- a/src/org/opendedup/sdfs/notification/FDiskEvent.java
+++ b/src/org/opendedup/sdfs/notification/FDiskEvent.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.notification;
public class FDiskEvent extends SDFSEvent {
diff --git a/src/org/opendedup/sdfs/notification/ReadAheadEvent.java b/src/org/opendedup/sdfs/notification/ReadAheadEvent.java
index 2f0344e1c..8e696f436 100644
--- a/src/org/opendedup/sdfs/notification/ReadAheadEvent.java
+++ b/src/org/opendedup/sdfs/notification/ReadAheadEvent.java
@@ -1,34 +1,52 @@
-package org.opendedup.sdfs.notification;
-
-import javax.xml.parsers.ParserConfigurationException;
-
-import org.opendedup.sdfs.io.MetaDataDedupFile;
-import org.w3c.dom.Element;
-
-public class ReadAheadEvent extends SDFSEvent {
-
- /**
- *
- */
- private static final long serialVersionUID = 1L;
- public MetaDataDedupFile mf;
- public boolean running = false;
-
- public ReadAheadEvent(String target,MetaDataDedupFile mf) {
- super(RAE, target, "Caching " +mf.getPath(), SDFSEvent.INFO);
- this.mf = mf;
- this.running = true;
- }
-
- public void cancelEvent() {
- this.running = false;
- }
-
- @Override
- public Element toXML() throws ParserConfigurationException {
- Element el = super.toXML();
- el.setAttribute("file", mf.getPath());
- return el;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.notification;
+
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.opendedup.sdfs.io.MetaDataDedupFile;
+import org.w3c.dom.Element;
+
+public class ReadAheadEvent extends SDFSEvent {
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = 1L;
+ public MetaDataDedupFile mf;
+ public boolean running = false;
+
+ public ReadAheadEvent(String target,MetaDataDedupFile mf) {
+ super(RAE, target, "Caching " +mf.getPath(), SDFSEvent.INFO);
+ this.mf = mf;
+ this.running = true;
+ }
+
+ public void cancelEvent() {
+ this.running = false;
+ }
+
+ @Override
+ public Element toXML() throws ParserConfigurationException {
+ Element el = super.toXML();
+ el.setAttribute("file", mf.getPath());
+ return el;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/notification/SDFSEvent.java b/src/org/opendedup/sdfs/notification/SDFSEvent.java
index 4c8d9a8a2..1471bb9e5 100644
--- a/src/org/opendedup/sdfs/notification/SDFSEvent.java
+++ b/src/org/opendedup/sdfs/notification/SDFSEvent.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.notification;
import java.io.File;
diff --git a/src/org/opendedup/sdfs/replication/ArchiveImporter.java b/src/org/opendedup/sdfs/replication/ArchiveImporter.java
index f9d72d9f2..10fd68cc7 100644
--- a/src/org/opendedup/sdfs/replication/ArchiveImporter.java
+++ b/src/org/opendedup/sdfs/replication/ArchiveImporter.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.replication;
import java.io.File;
diff --git a/src/org/opendedup/sdfs/replication/MetaFileImport.java b/src/org/opendedup/sdfs/replication/MetaFileImport.java
index fff8894a0..7436edb6e 100644
--- a/src/org/opendedup/sdfs/replication/MetaFileImport.java
+++ b/src/org/opendedup/sdfs/replication/MetaFileImport.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.replication;
import java.io.File;
diff --git a/src/org/opendedup/sdfs/replication/ReplicationCanceledException.java b/src/org/opendedup/sdfs/replication/ReplicationCanceledException.java
index 4598b8723..94f31fef3 100644
--- a/src/org/opendedup/sdfs/replication/ReplicationCanceledException.java
+++ b/src/org/opendedup/sdfs/replication/ReplicationCanceledException.java
@@ -1,14 +1,32 @@
-package org.opendedup.sdfs.replication;
-
-public class ReplicationCanceledException extends Exception {
-
- public ReplicationCanceledException(String string) {
- super(string);
- }
-
- /**
- *
- */
- private static final long serialVersionUID = -7687536462730018028L;
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.replication;
+
+public class ReplicationCanceledException extends Exception {
+
+ public ReplicationCanceledException(String string) {
+ super(string);
+ }
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = -7687536462730018028L;
+
+}
diff --git a/src/org/opendedup/sdfs/replication/ReplicationJob.java b/src/org/opendedup/sdfs/replication/ReplicationJob.java
index df7ba4f88..bbbd76cc9 100644
--- a/src/org/opendedup/sdfs/replication/ReplicationJob.java
+++ b/src/org/opendedup/sdfs/replication/ReplicationJob.java
@@ -1,24 +1,42 @@
-package org.opendedup.sdfs.replication;
-
-import org.opendedup.logging.SDFSLogger;
-import org.quartz.Job;
-import org.quartz.JobExecutionContext;
-import org.quartz.JobExecutionException;
-
-public class ReplicationJob implements Job {
- @Override
- public void execute(JobExecutionContext context)
- throws JobExecutionException {
- try {
- ReplicationService service = (ReplicationService) context
- .getJobDetail().getJobDataMap().get("service");
- service.replicate();
- } catch (Exception e) {
- SDFSLogger.getLog().warn("unable to finish executing replication",
- e);
- throw new JobExecutionException(e);
- }
-
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.replication;
+
+import org.opendedup.logging.SDFSLogger;
+import org.quartz.Job;
+import org.quartz.JobExecutionContext;
+import org.quartz.JobExecutionException;
+
+public class ReplicationJob implements Job {
+ @Override
+ public void execute(JobExecutionContext context)
+ throws JobExecutionException {
+ try {
+ ReplicationService service = (ReplicationService) context
+ .getJobDetail().getJobDataMap().get("service");
+ service.replicate();
+ } catch (Exception e) {
+ SDFSLogger.getLog().warn("unable to finish executing replication",
+ e);
+ throw new JobExecutionException(e);
+ }
+
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/replication/ReplicationScheduler.java b/src/org/opendedup/sdfs/replication/ReplicationScheduler.java
index 3ceb848e3..863f30d0a 100644
--- a/src/org/opendedup/sdfs/replication/ReplicationScheduler.java
+++ b/src/org/opendedup/sdfs/replication/ReplicationScheduler.java
@@ -1,51 +1,69 @@
-package org.opendedup.sdfs.replication;
-
-import java.util.Properties;
-
-import org.opendedup.logging.SDFSLogger;
-import org.quartz.CronTrigger;
-import org.quartz.JobDataMap;
-import org.quartz.JobDetail;
-import org.quartz.Scheduler;
-import org.quartz.SchedulerFactory;
-import org.quartz.impl.StdSchedulerFactory;
-
-public class ReplicationScheduler {
- Scheduler sched = null;
-
- public ReplicationScheduler(String schedule, ReplicationService service) {
- try {
- Properties props = new Properties();
- props.setProperty("org.quartz.scheduler.skipUpdateCheck", "true");
- props.setProperty("org.quartz.threadPool.class",
- "org.quartz.simpl.SimpleThreadPool");
- props.setProperty("org.quartz.threadPool.threadCount", "1");
- props.setProperty("org.quartz.threadPool.threadPriority",
- Integer.toString(Thread.NORM_PRIORITY));
- SDFSLogger.getLog().info("Scheduling Replication Job for SDFS");
- SchedulerFactory schedFact = new StdSchedulerFactory(props);
- sched = schedFact.getScheduler();
- sched.start();
- JobDataMap dataMap = new JobDataMap();
- dataMap.put("service", service);
- JobDetail ccjobDetail = new JobDetail("replication", null,
- ReplicationJob.class);
- ccjobDetail.setJobDataMap(dataMap);
- CronTrigger cctrigger = new CronTrigger("replicationTrigger",
- "group1", schedule);
- sched.scheduleJob(ccjobDetail, cctrigger);
- SDFSLogger.getLog().info("Replication Job Scheduled");
- } catch (Exception e) {
- SDFSLogger.getLog().fatal("Unable to schedule Replication Job", e);
- }
- }
-
- public void stopSchedules() {
- try {
- sched.unscheduleJob("replication", "replicationTrigger");
- } catch (Exception e) {
-
- }
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.replication;
+
+import java.util.Properties;
+
+import org.opendedup.logging.SDFSLogger;
+import org.quartz.CronTrigger;
+import org.quartz.JobDataMap;
+import org.quartz.JobDetail;
+import org.quartz.Scheduler;
+import org.quartz.SchedulerFactory;
+import org.quartz.impl.StdSchedulerFactory;
+
+public class ReplicationScheduler {
+ Scheduler sched = null;
+
+ public ReplicationScheduler(String schedule, ReplicationService service) {
+ try {
+ Properties props = new Properties();
+ props.setProperty("org.quartz.scheduler.skipUpdateCheck", "true");
+ props.setProperty("org.quartz.threadPool.class",
+ "org.quartz.simpl.SimpleThreadPool");
+ props.setProperty("org.quartz.threadPool.threadCount", "1");
+ props.setProperty("org.quartz.threadPool.threadPriority",
+ Integer.toString(Thread.NORM_PRIORITY));
+ SDFSLogger.getLog().info("Scheduling Replication Job for SDFS");
+ SchedulerFactory schedFact = new StdSchedulerFactory(props);
+ sched = schedFact.getScheduler();
+ sched.start();
+ JobDataMap dataMap = new JobDataMap();
+ dataMap.put("service", service);
+ JobDetail ccjobDetail = new JobDetail("replication", null,
+ ReplicationJob.class);
+ ccjobDetail.setJobDataMap(dataMap);
+ CronTrigger cctrigger = new CronTrigger("replicationTrigger",
+ "group1", schedule);
+ sched.scheduleJob(ccjobDetail, cctrigger);
+ SDFSLogger.getLog().info("Replication Job Scheduled");
+ } catch (Exception e) {
+ SDFSLogger.getLog().fatal("Unable to schedule Replication Job", e);
+ }
+ }
+
+ public void stopSchedules() {
+ try {
+ sched.unscheduleJob("replication", "replicationTrigger");
+ } catch (Exception e) {
+
+ }
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/replication/ReplicationService.java b/src/org/opendedup/sdfs/replication/ReplicationService.java
index 4f90a7c6d..d3df53436 100644
--- a/src/org/opendedup/sdfs/replication/ReplicationService.java
+++ b/src/org/opendedup/sdfs/replication/ReplicationService.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.replication;
import java.io.File;
diff --git a/src/org/opendedup/sdfs/replication/ReplicationSnapShotManager.java b/src/org/opendedup/sdfs/replication/ReplicationSnapShotManager.java
index af0a16fba..bceac2826 100644
--- a/src/org/opendedup/sdfs/replication/ReplicationSnapShotManager.java
+++ b/src/org/opendedup/sdfs/replication/ReplicationSnapShotManager.java
@@ -1,3 +1,21 @@
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
package org.opendedup.sdfs.replication;
import java.util.concurrent.ConcurrentHashMap;
diff --git a/src/org/opendedup/sdfs/replication/ShutdownHook.java b/src/org/opendedup/sdfs/replication/ShutdownHook.java
index d7b0cf825..cae417ca7 100644
--- a/src/org/opendedup/sdfs/replication/ShutdownHook.java
+++ b/src/org/opendedup/sdfs/replication/ShutdownHook.java
@@ -1,23 +1,41 @@
-package org.opendedup.sdfs.replication;
-
-import org.opendedup.logging.SDFSLogger;
-
-class ShutdownHook extends Thread {
- ReplicationScheduler sched;
- String name;
-
- public ShutdownHook(ReplicationScheduler sched, String name) {
- this.sched = sched;
- this.name = name;
- }
-
- @Override
- public void run() {
- SDFSLogger.getLog().info(
- "Please Wait while shutting down SDFS Relication Service for "
- + name);
- sched.stopSchedules();
- SDFSLogger.getLog().info(
- "SDFS Relication Service Shut Down Cleanly for " + name);
- }
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.replication;
+
+import org.opendedup.logging.SDFSLogger;
+
+class ShutdownHook extends Thread {
+ ReplicationScheduler sched;
+ String name;
+
+ public ShutdownHook(ReplicationScheduler sched, String name) {
+ this.sched = sched;
+ this.name = name;
+ }
+
+ @Override
+ public void run() {
+ SDFSLogger.getLog().info(
+ "Please Wait while shutting down SDFS Relication Service for "
+ + name);
+ sched.stopSchedules();
+ SDFSLogger.getLog().info(
+ "SDFS Relication Service Shut Down Cleanly for " + name);
+ }
}
\ No newline at end of file
diff --git a/src/org/opendedup/sdfs/servers/GCStandaloneService.java b/src/org/opendedup/sdfs/servers/GCStandaloneService.java
index 6b98f76d0..fea8cface 100644
--- a/src/org/opendedup/sdfs/servers/GCStandaloneService.java
+++ b/src/org/opendedup/sdfs/servers/GCStandaloneService.java
@@ -1,5 +1,23 @@
-package org.opendedup.sdfs.servers;
-
-public class GCStandaloneService {
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.servers;
+
+public class GCStandaloneService {
+
+}
diff --git a/src/org/opendedup/sdfs/servers/HCServer.java b/src/org/opendedup/sdfs/servers/HCServer.java
index b5b913ec5..f9619f68f 100755
--- a/src/org/opendedup/sdfs/servers/HCServer.java
+++ b/src/org/opendedup/sdfs/servers/HCServer.java
@@ -1,39 +1,57 @@
-package org.opendedup.sdfs.servers;
-
-public class HCServer {
- String hostName;
- int port;
- boolean useUDP;
- boolean compress;
- boolean useSSL;
-
- public HCServer(String hostName, int port, boolean useUDP,
- boolean compress, boolean useSSL) {
- this.hostName = hostName;
- this.port = port;
- this.useUDP = useUDP;
- this.compress = compress;
- this.useSSL = useSSL;
- }
-
- public boolean isCompress() {
- return compress;
- }
-
- public boolean isUseUDP() {
- return useUDP;
- }
-
- public String getHostName() {
- return hostName;
- }
-
- public int getPort() {
- return port;
- }
-
- public boolean isSSL() {
- return this.useSSL;
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.servers;
+
+public class HCServer {
+ String hostName;
+ int port;
+ boolean useUDP;
+ boolean compress;
+ boolean useSSL;
+
+ public HCServer(String hostName, int port, boolean useUDP,
+ boolean compress, boolean useSSL) {
+ this.hostName = hostName;
+ this.port = port;
+ this.useUDP = useUDP;
+ this.compress = compress;
+ this.useSSL = useSSL;
+ }
+
+ public boolean isCompress() {
+ return compress;
+ }
+
+ public boolean isUseUDP() {
+ return useUDP;
+ }
+
+ public String getHostName() {
+ return hostName;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public boolean isSSL() {
+ return this.useSSL;
+ }
+
+}
diff --git a/src/org/opendedup/sdfs/servers/HCServiceProxy.java b/src/org/opendedup/sdfs/servers/HCServiceProxy.java
index 1b9a2d43f..b2304d234 100755
--- a/src/org/opendedup/sdfs/servers/HCServiceProxy.java
+++ b/src/org/opendedup/sdfs/servers/HCServiceProxy.java
@@ -1,673 +1,691 @@
-package org.opendedup.sdfs.servers;
-
-import java.io.File;
-
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-
-import org.opendedup.collections.AbstractHashesMap;
-import org.opendedup.collections.DataArchivedException;
-import org.opendedup.collections.HashtableFullException;
-import org.opendedup.collections.InsertRecord;
-import org.opendedup.hashing.LargeFileBloomFilter;
-import org.opendedup.hashing.Murmur3HashEngine;
-import org.opendedup.logging.SDFSLogger;
-import org.opendedup.mtools.BloomFDisk;
-import org.opendedup.mtools.FDiskException;
-import org.opendedup.sdfs.Main;
-import org.opendedup.sdfs.cluster.ClusterSocket;
-import org.opendedup.sdfs.cluster.DSEClientSocket;
-import org.opendedup.sdfs.cluster.cmds.BFClaimHashesCmd;
-import org.opendedup.sdfs.cluster.cmds.BatchHashExistsCmd;
-import org.opendedup.sdfs.cluster.cmds.BatchWriteHashCmd;
-import org.opendedup.sdfs.cluster.cmds.ClaimHashesCmd;
-import org.opendedup.sdfs.cluster.cmds.DirectFetchChunkCmd;
-import org.opendedup.sdfs.cluster.cmds.DirectWriteHashCmd;
-import org.opendedup.sdfs.cluster.cmds.FDiskCmd;
-import org.opendedup.sdfs.cluster.cmds.FetchChunkCmd;
-import org.opendedup.sdfs.cluster.cmds.HashExistsCmd;
-import org.opendedup.sdfs.cluster.cmds.RedundancyNotMetException;
-import org.opendedup.sdfs.cluster.cmds.WriteHashCmd;
-import org.opendedup.sdfs.filestore.AbstractChunkStore;
-import org.opendedup.sdfs.filestore.HashChunk;
-import org.opendedup.sdfs.io.HashLocPair;
-import org.opendedup.sdfs.io.events.CloudSyncDLRequest;
-import org.opendedup.sdfs.notification.FDiskEvent;
-import org.opendedup.sdfs.notification.SDFSEvent;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import com.google.common.eventbus.EventBus;
-import com.google.common.primitives.Longs;
-
-public class HCServiceProxy {
-
- private static HashChunkServiceInterface hcService = null;
- private static DSEClientSocket socket = null;
- private static EventBus eventBus = new EventBus();
- public static ClusterSocket cs = null;
- private static int cacheSize = 104857600 / Main.CHUNK_LENGTH;
- private static final LoadingCache chunks = CacheBuilder
- .newBuilder().maximumSize(cacheSize).concurrencyLevel(72)
- .build(new CacheLoader() {
- public byte[] load(ByteArrayWrapper key) throws IOException {
- if (Main.DSEClusterDirectIO) {
- DirectFetchChunkCmd cmd = new DirectFetchChunkCmd(
- key.data, key.hashloc);
- cmd.executeCmd(socket);
- return cmd.getChunk();
- } else {
- FetchChunkCmd cmd = new FetchChunkCmd(key.data,
- key.hashloc);
- cmd.executeCmd(socket);
- return cmd.getChunk();
- }
-
- }
- });
-
- // private static boolean initialized = false;
-
- public static void registerListener(Object obj) {
- eventBus.register(obj);
- }
-
- public static synchronized void processHashClaims(SDFSEvent evt)
- throws IOException {
- if (Main.chunkStoreLocal)
- hcService.processHashClaims(evt);
- else {
- new ClaimHashesCmd(evt).executeCmd(cs);
-
- }
- }
-
- public static synchronized long processHashClaims(SDFSEvent evt,
- LargeFileBloomFilter bf) throws IOException {
- if (Main.chunkStoreLocal)
- return hcService.processHashClaims(evt, bf);
- else {
- new BFClaimHashesCmd(evt).executeCmd(cs);
- }
- return 0;
- }
-
- public static synchronized boolean hashExists(byte[] hash)
- throws IOException, HashtableFullException {
- long pos = hcService.hashExists(hash);
- if(pos != -1)
- return true;
- else
- return false;
- }
-
- public static HashChunk fetchHashChunk(byte[] hash) throws IOException,
- DataArchivedException {
- return hcService.fetchChunk(hash,-1);
- }
-
- public static synchronized long getCacheSize() {
- if (Main.chunkStoreLocal) {
- return hcService.getCacheSize();
- } else
- return 0;
- }
-
- public static synchronized long getMaxCacheSize() {
- if (Main.chunkStoreLocal) {
- return hcService.getMaxCacheSize();
- } else
- return 0;
- }
-
- public static synchronized int getReadSpeed() {
- if (Main.chunkStoreLocal) {
- return hcService.getReadSpeed();
- } else
- return 0;
- }
-
- public static synchronized int getWriteSpeed() {
- if (Main.chunkStoreLocal) {
- return hcService.getWriteSpeed();
- } else
- return 0;
- }
-
- public static synchronized void setReadSpeed(int speed) {
- if (Main.chunkStoreLocal) {
- hcService.setReadSpeed(speed);
- }
- }
-
- public static synchronized void setWriteSpeed(int speed) {
- if (Main.chunkStoreLocal) {
- hcService.setWriteSpeed(speed);
- }
- }
-
- public static synchronized void setCacheSize(long sz) throws IOException {
- if (Main.chunkStoreLocal) {
- hcService.setCacheSize(sz);
- }
- }
-
- public static long getChunksFetched() {
- return -1;
- }
-
- public static synchronized void init(ArrayList volumes) {
- try {
- if (Main.chunkStoreLocal) {
- SDFSLogger.getLog().info("Starting local chunkstore");
- hcService = new HashChunkService();
- hcService.init();
- File file = new File(Main.hashDBStore + File.separator
- + ".lock");
- if (Main.runConsistancyCheck || file.exists()) {
- hcService.runConsistancyCheck();
- }
- touchRunFile();
- if (Main.syncDL) {
- eventBus.post(new CloudSyncDLRequest(Main.DSEID,true));
- }
- }
-
- else {
- SDFSLogger.getLog().info(
- "Starting clustered Volume with id="
- + Main.DSEClusterID + " config="
- + Main.DSEClusterConfig);
- socket = new DSEClientSocket(Main.DSEClusterConfig,
- Main.DSEClusterID, volumes);
- cs = socket;
- socket.startGCIfNone();
- }
- } catch (Exception e) {
- SDFSLogger.getLog().error("Unable to initialize HashChunkService ",
- e);
- System.err.println("Unable to initialize HashChunkService ");
- e.printStackTrace();
- System.exit(-1);
- }
- }
-
- public static void syncVolume(long volumeID,boolean syncMap) {
- if(Main.chunkStoreLocal) {
- eventBus.post(new CloudSyncDLRequest(volumeID,syncMap));
- }
- }
-
- public static byte getDseCount() {
- if (Main.chunkStoreLocal)
- return 1;
- else {
-
- return (byte) socket.serverState.size();
- }
- }
-
- public static AbstractHashesMap getHashesMap() {
- if (Main.chunkStoreLocal)
- return hcService.getHashesMap();
- else
- return null;
- }
-
- public static long getSize() {
- if (Main.chunkStoreLocal) {
- return hcService.getSize();
- } else {
- return socket.getCurrentSize();
- }
- }
-
- public static long getDSESize() {
- if (Main.chunkStoreLocal) {
- return HCServiceProxy.getChunkStore().size();
- } else {
- return socket.getCurrentDSESize();
- }
- }
-
- public static long getDSECompressedSize() {
- if (Main.chunkStoreLocal) {
- return HCServiceProxy.getChunkStore().compressedSize();
- } else {
- return socket.getCurrentDSECompSize();
- }
- }
-
- public static long getDSEMaxSize() {
- if (Main.chunkStoreLocal) {
- return HCServiceProxy.getChunkStore().maxSize();
- } else {
- return socket.getDSEMaxSize();
- }
- }
-
- public static long getMaxSize() {
- if (Main.chunkStoreLocal) {
- return HCServiceProxy.hcService.getMaxSize();
- } else {
- return socket.getMaxSize();
- }
- }
-
- public static long getFreeBlocks() {
- if (Main.chunkStoreLocal) {
- return HCServiceProxy.getChunkStore().getFreeBlocks();
- } else {
- return socket.getFreeBlocks();
- }
- }
-
- public static AbstractChunkStore getChunkStore() {
- if (Main.chunkStoreLocal)
- return hcService.getChuckStore();
- else
- return null;
- }
-
- public static int getPageSize() {
- if (Main.chunkStoreLocal) {
- return HCServiceProxy.hcService.getPageSize();
- } else {
- return Main.CHUNK_LENGTH;
- }
- }
-
- public static void sync() throws IOException {
- if (Main.chunkStoreLocal)
- hcService.sync();
- }
-
- private static InsertRecord _write(byte[] hash, byte[] aContents,
- byte[] hashloc) throws IOException, RedundancyNotMetException {
- if (Main.DSEClusterDirectIO)
- return new InsertRecord(true, directWriteChunk(hash, aContents,
- hashloc));
- else {
- int ncopies = 0;
- for (int i = 1; i < 8; i++) {
- if (hashloc[i] > (byte) 0) {
- ncopies++;
- }
- }
- if (ncopies >= Main.volume.getClusterCopies()) {
- return new InsertRecord(true, hashloc);
- } else if (ncopies > 0) {
- byte[] ignoredHosts = new byte[ncopies];
- for (int i = 0; i < ncopies; i++)
- ignoredHosts[i] = hashloc[i + 1];
- WriteHashCmd cmd = new WriteHashCmd(hash, aContents, false,
- Main.volume.getClusterCopies(), ignoredHosts);
-
- cmd.executeCmd(socket);
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug(
- "wrote data when found some but not all");
- return new InsertRecord(true, cmd.reponse());
- } else {
- WriteHashCmd cmd = new WriteHashCmd(hash, aContents, false,
- Main.volume.getClusterCopies());
- cmd.executeCmd(socket);
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug("wrote data when found none");
-
- return new InsertRecord(true, cmd.reponse());
- }
- }
- }
-
- public static InsertRecord writeChunk(byte[] hash, byte[] aContents,
- byte[] hashloc) throws IOException {
-
- int tries = 0;
- while (true) {
- try {
- return _write(hash, aContents, hashloc);
- } catch (IOException e) {
- tries++;
- if (tries > 10) {
- throw e;
- }
- } catch (RedundancyNotMetException e) {
- tries++;
- hashloc = e.hashloc;
- if (tries > 10) {
- SDFSLogger.getLog().warn(
- "Redundancy Requirements have not been met");
- // throw e;
- }
- }
- }
-
- }
-
- public static byte[] directWriteChunk(byte[] hash, byte[] aContents,
- byte[] hashloc) throws IOException {
- int ncopies = 0;
- for (int i = 1; i < 8; i++) {
- if (hashloc[i] > (byte) 0) {
- ncopies++;
- }
- }
- if (ncopies >= Main.volume.getClusterCopies()) {
- return hashloc;
- } else if (ncopies > 0) {
- byte[] ignoredHosts = new byte[ncopies];
- for (int i = 0; i < ncopies; i++)
- ignoredHosts[i] = hashloc[i + 1];
- DirectWriteHashCmd cmd = new DirectWriteHashCmd(hash, aContents,
- aContents.length, false, Main.volume.getClusterCopies(),
- ignoredHosts);
- cmd.executeCmd(socket); //
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug(
- "wrote data when found some but not all");
- return cmd.reponse();
-
- } else {
- DirectWriteHashCmd cmd = new DirectWriteHashCmd(hash, aContents,
- aContents.length, false, Main.volume.getClusterCopies());
- cmd.executeCmd(socket);
- SDFSLogger.getLog().debug("wrote data when found none");
- if (cmd.getExDn() > 0) {
- SDFSLogger
- .getLog()
- .warn("Was unable to write to all storage nodes, trying again");
- cmd = new DirectWriteHashCmd(hash, aContents, aContents.length,
- false, Main.volume.getClusterCopies(), cmd.reponse());
- }
-
- return cmd.reponse();
- }
-
- }
-
- public static InsertRecord writeChunk(byte[] hash, byte[] aContents)
- throws IOException, HashtableFullException {
- if (Main.chunkStoreLocal) {
- // doop = HCServiceProxy.hcService.hashExists(hash);
- return HCServiceProxy.hcService.writeChunk(hash, aContents, false);
- } else {
- try {
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug("looking for hash");
- HashExistsCmd hcmd = new HashExistsCmd(hash, false,
- Main.volume.getClusterCopies());
- hcmd.executeCmd(socket);
- if (hcmd.meetsRedundancyRequirements()) {
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug("found all");
- return new InsertRecord(false, hcmd.getResponse());
- } else if (hcmd.exists()) {
- byte[] ignoredHosts = new byte[hcmd.responses()];
- for (int i = 0; i < hcmd.responses(); i++)
- ignoredHosts[i] = hcmd.getResponse()[i + 1];
- WriteHashCmd cmd = new WriteHashCmd(hash, aContents, false,
- Main.volume.getClusterCopies(), ignoredHosts);
- int tries = 0;
- while (true) {
- try {
- cmd.executeCmd(socket);
- break;
- } catch (IOException e) {
- tries++;
- if (tries > 10)
- throw e;
- }
- }
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug(
- "wrote data when found some but not all");
- return new InsertRecord(true, cmd.reponse());
- } else {
- WriteHashCmd cmd = new WriteHashCmd(hash, aContents, false,
- Main.volume.getClusterCopies());
- int tries = 0;
- while (true) {
- try {
- cmd.executeCmd(socket);
- break;
- } catch (IOException e) {
- tries++;
- if (tries > 10)
- throw e;
- }
- }
- if (SDFSLogger.isDebug())
- SDFSLogger.getLog().debug("wrote data when found none");
-
- // if(cmd.getExDn() > 0) {
- // SDFSLogger.getLog().warn("Was unable to write to all storage nodes.");
- /*
- * cmd = new DirectWriteHashCmd(hash, aContents,
- * aContents.length, false, Main.volume.getClusterCopies(),
- * cmd.reponse());
- */
- // }
- return new InsertRecord(false, cmd.reponse());
- }
- } catch (Exception e1) {
- SDFSLogger.getLog().fatal("Unable to write chunk " + hash, e1);
- throw new IOException("Unable to write chunk " + hash);
- } finally {
-
- }
- }
-
- }
-
- /*
- * public static InsertRecord writeChunk(byte[] hash, byte[] aContents,
- * byte[] ignoredHosts) throws IOException, HashtableFullException { if
- * (Main.chunkStoreLocal) { // doop =
- * HCServiceProxy.hcService.hashExists(hash); return
- * HCServiceProxy.hcService.writeChunk(hash, aContents, false); } else {
- *
- * try { if (ignoredHosts != null) { WriteHashCmd cmd = new
- * WriteHashCmd(hash, aContents, false, Main.volume.getClusterCopies(),
- * ignoredHosts); cmd.executeCmd(socket); return new
- * InsertRecord(true,cmd.reponse()); } else { WriteHashCmd cmd = new
- * WriteHashCmd(hash, aContents, false, Main.volume.getClusterCopies());
- * cmd.executeCmd(socket); return new InsertRecord(true,cmd.reponse()); } }
- * catch (Exception e1) { //
- * SDFSLogger.getLog().fatal("Unable to write chunk " + hash, // e1); throw
- * new IOException("Unable to write chunk " + hash); } finally {
- *
- * } } }
- */
-
- public static void runFDisk(FDiskEvent evt) throws FDiskException,
- IOException {
- if (Main.chunkStoreLocal) {
- BloomFDisk fd = new BloomFDisk(evt);
- fd.vanish();
- }
- else {
- long sz = HCServiceProxy.getSize();
- FDiskCmd cmd = new FDiskCmd(sz, evt);
- cmd.executeCmd(cs);
- }
- }
-
- /*
- * public static void fetchChunks(ArrayList hashes, String server,
- * String password, int port, boolean useSSL) throws IOException,
- * HashtableFullException { if (Main.chunkStoreLocal) {
- * HCServiceProxy.hcService.remoteFetchChunks(hashes, server, password,
- * port, useSSL); } else { throw new IllegalStateException(
- * "not implemented for remote chunkstores"); } }
- */
-
- public static long hashExists(byte[] hash, boolean findAll)
- throws IOException, HashtableFullException {
- if (Main.chunkStoreLocal) {
- return HCServiceProxy.hcService.hashExists(hash);
-
- } else {
- HashExistsCmd cmd = new HashExistsCmd(hash, findAll,
- Main.volume.getClusterCopies());
- cmd.executeCmd(socket);
- return Longs.fromByteArray(cmd.getResponse());
- }
- }
-
- public static List batchHashExists(List hashes)
- throws IOException {
- if (Main.chunkStoreLocal) {
- throw new IOException("not implemented for localstore");
-
- } else {
- BatchHashExistsCmd cmd = new BatchHashExistsCmd(hashes);
- cmd.executeCmd(socket);
- return cmd.getHashes();
- }
- }
-
- public static List batchWriteHash(List hashes)
- throws IOException {
- if (Main.chunkStoreLocal) {
- throw new IOException("not implemented for localstore");
-
- } else {
- BatchWriteHashCmd cmd = new BatchWriteHashCmd(hashes);
- cmd.executeCmd(socket);
- return cmd.getHashes();
- }
- }
-
- public static long hashExists(byte[] hash, boolean findAll,
- byte numtowaitfor) throws IOException, HashtableFullException {
- if (Main.chunkStoreLocal) {
- return HCServiceProxy.hcService.hashExists(hash);
- } else {
- HashExistsCmd cmd = new HashExistsCmd(hash, findAll, numtowaitfor);
- cmd.executeCmd(socket);
- return Longs.fromByteArray(cmd.getResponse());
- }
- }
-
- static Murmur3HashEngine he = new Murmur3HashEngine();
-
- public static byte[] fetchChunk(byte[] hash, byte[] hashloc, boolean direct)
- throws IOException, DataArchivedException {
-
- if (Main.chunkStoreLocal) {
- byte[] data = null;
- long pos = -1;
- if (direct) {
- pos = Longs.fromByteArray(hashloc);
- }
-
- data = HCServiceProxy.hcService.fetchChunk(hash,pos).getData();
-
-
- return data;
- } else {
- ByteArrayWrapper wrapper = new ByteArrayWrapper(hash, hashloc);
- try {
- byte[] _bz = chunks.get(wrapper);
- byte[] bz = org.bouncycastle.util.Arrays.clone(_bz);
- return bz;
- } catch (ExecutionException e) {
- throw new IOException(e);
- }
- }
- }
-
- public static void cacheData(byte[] hash, byte[] hashloc,boolean direct)
- throws IOException, DataArchivedException {
-
- if (Main.chunkStoreLocal) {
- long pos = -1;
- if (direct) {
- pos = Longs.fromByteArray(hashloc);
- }
- HCServiceProxy.hcService.cacheChunk(hash,pos);
- }
- }
-
- public static long getChunksRead() {
- return hcService.getChunksRead();
- }
-
- public static double getChunksWritten() {
- return hcService.getChunksWritten();
- }
-
- public static double getKBytesRead() {
- return hcService.getKBytesRead();
- }
-
- public static double getKBytesWrite() {
- return hcService.getKBytesWrite();
- }
-
- public static long getDupsFound() {
- return hcService.getDupsFound();
- }
-
- public static void close() {
- hcService.close();
- SDFSLogger.getLog().info("Deleting lock file");
- File file = new File(Main.hashDBStore + File.separator + ".lock");
- file.delete();
- }
-
- private static void touchRunFile() {
- File file = new File(Main.hashDBStore + File.separator + ".lock");
- try {
-
- if (!file.exists())
- new FileOutputStream(file).close();
- file.setLastModified(System.currentTimeMillis());
- SDFSLogger.getLog().warn("Write lock file " + file.getPath());
- } catch (IOException e) {
- SDFSLogger.getLog().warn(
- "unable to create lock file " + file.getPath(), e);
- }
- }
-
- private static final class ByteArrayWrapper {
- private final byte[] data;
- public final byte[] hashloc;
-
- public ByteArrayWrapper(byte[] data, byte[] hashloc) {
- if (data == null) {
- throw new NullPointerException();
- }
- this.data = data;
- this.hashloc = hashloc;
- }
-
- @Override
- public boolean equals(Object other) {
- if (!(other instanceof ByteArrayWrapper)) {
- return false;
- }
- return Arrays.equals(data, ((ByteArrayWrapper) other).data);
- }
-
- @Override
- public int hashCode() {
- return Arrays.hashCode(data);
- }
- }
-
- public static String restoreBlock(byte[] hash) throws IOException {
- return hcService.restoreBlock(hash);
- }
-
- public static boolean blockRestored(String id) throws IOException {
- return hcService.blockRestored(id);
- }
-
-}
+/*******************************************************************************
+ * Copyright (C) 2016 Sam Silverberg sam.silverberg@gmail.com
+ *
+ * This file is part of OpenDedupe SDFS.
+ *
+ * OpenDedupe SDFS is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * OpenDedupe SDFS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see .
+ *******************************************************************************/
+package org.opendedup.sdfs.servers;
+
+import java.io.File;
+
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import org.opendedup.collections.AbstractHashesMap;
+import org.opendedup.collections.DataArchivedException;
+import org.opendedup.collections.HashtableFullException;
+import org.opendedup.collections.InsertRecord;
+import org.opendedup.hashing.LargeFileBloomFilter;
+import org.opendedup.hashing.Murmur3HashEngine;
+import org.opendedup.logging.SDFSLogger;
+import org.opendedup.mtools.BloomFDisk;
+import org.opendedup.mtools.FDiskException;
+import org.opendedup.sdfs.Main;
+import org.opendedup.sdfs.cluster.ClusterSocket;
+import org.opendedup.sdfs.cluster.DSEClientSocket;
+import org.opendedup.sdfs.cluster.cmds.BFClaimHashesCmd;
+import org.opendedup.sdfs.cluster.cmds.BatchHashExistsCmd;
+import org.opendedup.sdfs.cluster.cmds.BatchWriteHashCmd;
+import org.opendedup.sdfs.cluster.cmds.ClaimHashesCmd;
+import org.opendedup.sdfs.cluster.cmds.DirectFetchChunkCmd;
+import org.opendedup.sdfs.cluster.cmds.DirectWriteHashCmd;
+import org.opendedup.sdfs.cluster.cmds.FDiskCmd;
+import org.opendedup.sdfs.cluster.cmds.FetchChunkCmd;
+import org.opendedup.sdfs.cluster.cmds.HashExistsCmd;
+import org.opendedup.sdfs.cluster.cmds.RedundancyNotMetException;
+import org.opendedup.sdfs.cluster.cmds.WriteHashCmd;
+import org.opendedup.sdfs.filestore.AbstractChunkStore;
+import org.opendedup.sdfs.filestore.HashChunk;
+import org.opendedup.sdfs.io.HashLocPair;
+import org.opendedup.sdfs.io.events.CloudSyncDLRequest;
+import org.opendedup.sdfs.notification.FDiskEvent;
+import org.opendedup.sdfs.notification.SDFSEvent;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.eventbus.EventBus;
+import com.google.common.primitives.Longs;
+
+public class HCServiceProxy {
+
+ private static HashChunkServiceInterface hcService = null;
+ private static DSEClientSocket socket = null;
+ private static EventBus eventBus = new EventBus();
+ public static ClusterSocket cs = null;
+ private static int cacheSize = 104857600 / Main.CHUNK_LENGTH;
+ private static final LoadingCache chunks = CacheBuilder
+ .newBuilder().maximumSize(cacheSize).concurrencyLevel(72)
+ .build(new CacheLoader