Skip to content

Commit

Permalink
Fixed Issues with MapCloseException and Usage after compaction
Browse files Browse the repository at this point in the history
  • Loading branch information
opendedup committed Dec 7, 2015
1 parent ea978e1 commit 615bea6
Show file tree
Hide file tree
Showing 6 changed files with 18 additions and 18 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,4 @@ windows-exe/Debug/
windows-exe/Release/
windows-exe/ipch/
/test.sh
install-packages/SDFS-3.0.3-Setup.exe
Binary file modified install-packages/deb/usr/share/sdfs/lib/sdfs.jar
Binary file not shown.
2 changes: 1 addition & 1 deletion install-packages/windows/sdfs_win.nsi
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
; Sets the theme path


!define VERSION '3.0.1'
!define VERSION '3.0.3'
!define MUI_PRODUCT "SDFS Cloud File System"


Expand Down
12 changes: 9 additions & 3 deletions src/org/opendedup/collections/SimpleByteArrayLongMap.java
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,9 @@ public boolean containsKey(byte[] key) throws MapClosedException {
return true;
}
return false;
} catch (Exception e) {
} catch(MapClosedException e) {
throw e;
} catch (Exception e) {
SDFSLogger.getLog().fatal("error getting record", e);
return false;
} finally {
Expand Down Expand Up @@ -327,7 +329,9 @@ public boolean put(byte[] key, int value) throws MapClosedException{
vb.position(0);
this.currentSz++;
return pos > -1 ? true : false;
} catch (Exception e) {
} catch(MapClosedException e){
throw e;
}catch (Exception e) {
SDFSLogger.getLog().fatal("error inserting record", e);
e.printStackTrace();
return false;
Expand Down Expand Up @@ -355,7 +359,9 @@ public int get(byte[] key) throws MapClosedException {
return val;

}
} catch (Exception e) {
} catch(MapClosedException e){
throw e;
}catch (Exception e) {
SDFSLogger.getLog().fatal("error getting record", e);
return -1;
} finally {
Expand Down
3 changes: 2 additions & 1 deletion src/org/opendedup/sdfs/filestore/BatchFileChunkStore.java
Original file line number Diff line number Diff line change
Expand Up @@ -449,9 +449,10 @@ public void run() {

}
} else {
long fs = blob.length();
HashBlobArchive.deleteArchive(k);
HashBlobArchive.currentLength.addAndGet(-1 * Integer.parseInt(metaData.get("bsize")));
HashBlobArchive.compressedLength.addAndGet(-1 * blob.length());
HashBlobArchive.compressedLength.addAndGet(-1 *fs);
File _f = new File(HashBlobArchive.getPath(k).getPath() + ".md");
_f.delete();
}
Expand Down
18 changes: 5 additions & 13 deletions src/org/opendedup/sdfs/filestore/HashBlobArchive.java
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package org.opendedup.sdfs.filestore;

import java.io.File;

import java.io.IOException;
import java.io.RandomAccessFile;
import java.io.Serializable;
Expand Down Expand Up @@ -655,6 +656,7 @@ private void putChunk(byte[] hash, byte[] chunk)
synchronized (this) {
this.notifyAll();
}
SDFSLogger.getLog().info("zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz");
throw new ArchiveFullException();
}

Expand Down Expand Up @@ -864,19 +866,6 @@ private byte[] getChunk(byte[] hash) throws IOException, DataArchivedException {
} else {
throw new IOException("requested block not found in " + f.getPath());
}
/*
* byte[] b = new byte[hash.length]; ByteBuffer buf = ByteBuffer
* .allocateDirect(4 + 4 + HashFunctionPool.hashLength); ch =
* rf.getChannel(); while (ch.position() < ch.size()) {
* buf.position(0); ch.read(buf); buf.position(0); buf.getInt();
* buf.get(b); pos = (int) ch.position() - 4; blockMap.put(hash,
* pos); if (Arrays.equals(b, hash)) { nlen = buf.getInt(); byte[]
* chk = new byte[nlen]; ByteBuffer.wrap(chk);
* ch.read(ByteBuffer.wrap(chk)); return chk; } else { int _nlen =
* buf.getInt(); if ((ch.position() + _nlen) < ch.size())
* ch.position(ch.position() + _nlen); } } buf = null; throw new
* IOException("hash not found");
*/
} catch (ClosedChannelException e) {
return getChunk(hash);
} catch (MapClosedException e) {
Expand Down Expand Up @@ -1051,6 +1040,7 @@ public long compact() throws IOException {
Thread.sleep(100);
}
} else {

_har.delete();
return 0;
}
Expand All @@ -1062,6 +1052,8 @@ public long compact() throws IOException {
}
} catch (Exception e) {
SDFSLogger.getLog().error("unable to compact " + id, e);
HashBlobArchive.compressedLength.addAndGet(-1 * _har.f.length());
HashBlobArchive.currentLength.addAndGet(-1 * _har.uncompressedLength.get());
_har.delete();
throw new IOException(e);
}
Expand Down

0 comments on commit 615bea6

Please sign in to comment.