Skip to content

Commit

Permalink
File cloud upload and recovery fixes for SDFS
Browse files Browse the repository at this point in the history
  • Loading branch information
opendedup committed Mar 5, 2016
1 parent 646d017 commit eaa5045
Show file tree
Hide file tree
Showing 14 changed files with 200 additions and 183 deletions.
2 changes: 1 addition & 1 deletion install-packages/build.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
VERSION=3.1.0
VERSION=3.1.1
DEBFILE="sdfs_${VERSION}_amd64.deb"
echo $DEBFILE
sudo rm *.rpm
Expand Down
Binary file modified install-packages/deb/usr/share/sdfs/lib/sdfs.jar
Binary file not shown.
6 changes: 6 additions & 0 deletions src/changes.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
Version 3.1.1
Fixes
* Fixed cloud filesync
* Fixed dr restore from cloud to new local server
Version 3.1.0
Fixes
* Fixed distributed DSE code for the 3.0 tree
Expand All @@ -6,13 +10,15 @@ Version 3.1.0
* Updated script files to point to appropriate sources
* Remove orphaned symlinks
* Fixed Cache Expiring for cloud storage data retrieval
* Fixed crash recovery of cloud storage
Enhancements
* Write Performance Improvements. These performance improvements should increase write speeds by 50%-100% if your system can handle it
* Read Performance Speed Improvements. Refactored read pipeline to simplify read access. Increases speed by 20%.
* Default to TLSv1.2 for all communication
* Improved space savings on compaction. Removes individual orphaned blocks from chunks stored on disk. Not yet implemented for cloud storage
because of round trip cost.
* Ported Windows code to Dokany v 0.8
* added last accessed to delay Glacier archival for frequently accessed data
Version 3.0.4
Fixes :
* Fixed xml commandline parsing in mount.sdfs script
Expand Down
6 changes: 3 additions & 3 deletions src/org/opendedup/sdfs/Main.java
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ public class Main {
public static boolean checkArchiveOnOpen= false;
public static boolean checkArchiveOnRead = false;

public static int writeTimeoutSeconds = -1; //1 hour timeout
public static int readTimeoutSeconds = -1; //1 hour timeout
public static int writeTimeoutSeconds = -1; //Disable timeout
public static int readTimeoutSeconds = -1; //Disable timeout
// public static VolumeConfigWriterThread wth = null;
public static boolean runConsistancyCheck = false;

Expand Down Expand Up @@ -95,7 +95,7 @@ public class Main {
/**
* The Version of SDFS this is
*/
public static String version = "3.1.0";
public static String version = "3.1.1";

public static boolean readAhead = false;

Expand Down
7 changes: 1 addition & 6 deletions src/org/opendedup/sdfs/filestore/ConsistancyCheck.java
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,6 @@ public class ConsistancyCheck {
public static synchronized void runCheck(AbstractHashesMap map, AbstractChunkStore store) {
try {
store.iterationInit(false);
ChunkData data = store.getNextChunck();
if (data == null)
return;

data.recoverd = true;
System.out.println("Running Consistancy Check on DSE, this may take a while");
SDFSLogger.getLog().warn("Running Consistancy Check on DSE, this may take a while");
SDFSEvent evt = SDFSEvent.consistancyCheckEvent("Running Consistancy Check on DSE, this may take a while",
Expand Down Expand Up @@ -95,7 +90,7 @@ public void run() {
if (pos == -1) {
if (map.put(data))
recordsRecovered.incrementAndGet();
}
}

try {
synchronized (store) {
Expand Down
38 changes: 17 additions & 21 deletions src/org/opendedup/sdfs/filestore/cloud/BatchAwsS3ChunkStore.java
Original file line number Diff line number Diff line change
Expand Up @@ -566,6 +566,7 @@ public boolean isTrusted(X509Certificate[] certificate,
int k = 0;
@Override
public ChunkData getNextChunck() throws IOException {
synchronized(this) {
if (ht == null || !ht.hasMoreElements()) {
StringResult rs;
try {
Expand All @@ -582,9 +583,13 @@ public ChunkData getNextChunck() throws IOException {
ht = rs.st;
hid = rs.id;
}
String tk = ht.nextToken();
SDFSLogger.getLog().debug("hid=" + hid + " val=" + StringUtils.getHexString(BaseEncoding.base64().decode(
tk.split(":")[0])));
ChunkData chk = new ChunkData(BaseEncoding.base64().decode(
ht.nextToken().split(":")[0]), hid);
tk.split(":")[0]), hid);
return chk;
}

}

Expand Down Expand Up @@ -755,6 +760,7 @@ public void writeHashBlobArchive(HashBlobArchive arc,long id) throws IOException
}
md = new ObjectMetadata();
md.addUserMetadata("size", Integer.toString(sz));
md.addUserMetadata("lastaccessed", "0");
md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
md.addUserMetadata("encrypt",
Boolean.toString(Main.chunkStoreEncryptionEnabled));
Expand Down Expand Up @@ -876,6 +882,12 @@ else if (remaining < downloadBlockSize)
tm = System.currentTimeMillis();
if (compress)
data = CompressionUtils.decompressZLIB(data);
mp.put("lastaccessed",Long.toString(System.currentTimeMillis()));
omd.setUserMetadata(mp);
CopyObjectRequest req = new CopyObjectRequest(this.name,
"blocks/" + haName, this.name, "blocks/" + haName)
.withNewObjectMetadata(omd);
s3Service.copyObject(req);
if (mp.containsKey("deleted")) {
boolean del = Boolean.parseBoolean((String) mp.get("deleted"));
if (del) {
Expand All @@ -895,7 +907,7 @@ else if (remaining < downloadBlockSize)
mp.put("deleted-objects", Integer.toString(delobj));
mp.put("suspect", "true");
omd.setUserMetadata(mp);
CopyObjectRequest req = new CopyObjectRequest(this.name,
req = new CopyObjectRequest(this.name,
"keys/" + haName, this.name, "keys/" + haName)
.withNewObjectMetadata(omd);
s3Service.copyObject(req);
Expand Down Expand Up @@ -1202,32 +1214,14 @@ public void sync() throws IOException {
HashBlobArchive.sync();
}

private long getLastModified(String st) {

try {
ObjectMetadata obj = s3Service.getObjectMetadata(this.name, st);
Map<String, String> metaData = obj.getUserMetadata();
if (metaData.containsKey("lastmodified")) {
return Long.parseLong((String) metaData.get("lastmodified"));
} else {
return 0;
}
} catch (Exception e) {
return -1;
} finally {

}
}

@Override
public void uploadFile(File f, String to, String pp) throws IOException {

BufferedInputStream in = null;
while (to.startsWith(File.separator))
to = to.substring(1);
String pth = pp + "/"
+ EncyptUtils.encString(to, Main.chunkStoreEncryptionEnabled);
if (f.lastModified() == this.getLastModified(pth))
return;
boolean isDir = false;
boolean isSymlink = false;
if (!OSValidator.isWindows()) {
Expand Down Expand Up @@ -1290,6 +1284,7 @@ public void uploadFile(File f, String to, String pp) throws IOException {
e = new File(this.staged_sync_location, rnd + ".e");
}
try {

BufferedInputStream is = new BufferedInputStream(
new FileInputStream(f));
BufferedOutputStream os = new BufferedOutputStream(
Expand Down Expand Up @@ -1342,6 +1337,7 @@ public void uploadFile(File f, String to, String pp) throws IOException {
PutObjectRequest req = new PutObjectRequest(this.name,
objName, in, md);
s3Service.putObject(req);
SDFSLogger.getLog().debug("uploaded=" + f.getPath() + " lm=" + md.getUserMetadata().get("lastmodified"));
} catch (Exception e1) {
// SDFSLogger.getLog().error("error uploading", e1);
throw new IOException(e1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ public long getFreeBlocks() {
private long hid = 0;

@Override
public ChunkData getNextChunck() throws IOException {
public synchronized ChunkData getNextChunck() throws IOException {
if (ht == null || !ht.hasMoreElements()) {
StringResult rs;
try {
Expand Down
Loading

0 comments on commit eaa5045

Please sign in to comment.