From 8d7ab1d781d28632ddc75c309efb5070b04f9798 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Wed, 15 Feb 2017 17:38:15 +0200 Subject: [PATCH 01/66] fix typo --- .../oss/storage/resources/fs/BinaryStorageResource.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/BinaryStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/BinaryStorageResource.java index 40b1ba1..0203278 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/BinaryStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/BinaryStorageResource.java @@ -108,15 +108,15 @@ public Response delete(@PathParam("serviceId") @DefaultValue("") String serviceI protected Response internalGet(Record record) throws IOException { InputStream is = filesystem.get(record); - - long size = fs.size(record); - + + long size = filesystem.size(record); + return Response.ok(is).header(HttpHeaders.CONTENT_LENGTH, size).build(); } protected Response internalHead(Record record) throws IOException { long size = filesystem.size(record); - + return Response.ok().header(HttpHeaders.CONTENT_LENGTH, size).build(); } From fd8edfed95a692df8dcd32e463b1f32dddaa0a55 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Thu, 16 Feb 2017 12:27:29 +0200 Subject: [PATCH 02/66] fix problem with abstraction of file system --- .../takipi/oss/storage/TakipiStorageMain.java | 31 ++++++----- .../com/takipi/oss/storage/fs/BaseRecord.java | 11 ++++ .../com/takipi/oss/storage/fs/Record.java | 6 ++- .../oss/storage/fs/SimplePathRecord.java | 53 +++++++++++++++++++ .../takipi/oss/storage/fs/api/Filesystem.java | 15 ++++-- .../storage/fs/folder/FolderFilesystem.java | 3 +- .../record/HashSubfolderFilesystem.java | 6 +-- .../fs/folder/record/RecordFilesystem.java | 12 +++-- .../fs/folder/simple/SimpleFilesystem.java | 18 ++++--- .../oss/storage/fs/s3/S3Filesystem.java | 26 +++++---- .../oss/storage/helper/FilesystemUtil.java | 3 +- .../fs/JsonSimpleFetchStorageResource.java | 26 ++++----- 12 files changed, 154 insertions(+), 56 deletions(-) create mode 100644 src/main/java/com/takipi/oss/storage/fs/BaseRecord.java create mode 100644 src/main/java/com/takipi/oss/storage/fs/SimplePathRecord.java diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index ace2506..b5cb05b 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -1,38 +1,37 @@ package com.takipi.oss.storage; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; -import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.fs.folder.simple.SimpleFilesystem; -import com.takipi.oss.storage.fs.s3.S3Filesystem; -import com.takipi.oss.storage.resources.diag.*; -import io.dropwizard.Application; -import io.dropwizard.setup.Bootstrap; -import io.dropwizard.setup.Environment; - import java.util.EnumSet; import javax.servlet.DispatcherType; import javax.servlet.FilterRegistration; import org.eclipse.jetty.servlets.CrossOriginFilter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3Client; +import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.folder.simple.SimpleFilesystem; +import com.takipi.oss.storage.fs.s3.S3Filesystem; import com.takipi.oss.storage.health.FilesystemHealthCheck; - +import com.takipi.oss.storage.resources.diag.MachineInfoOnlyStatusStorageResource; +import com.takipi.oss.storage.resources.diag.NoOpTreeStorageResource; import com.takipi.oss.storage.resources.diag.PingStorageResource; import com.takipi.oss.storage.resources.diag.StatusStorageResource; import com.takipi.oss.storage.resources.diag.TreeStorageResource; import com.takipi.oss.storage.resources.diag.VersionStorageResource; - import com.takipi.oss.storage.resources.fs.BinaryStorageResource; import com.takipi.oss.storage.resources.fs.JsonMultiDeleteStorageResource; import com.takipi.oss.storage.resources.fs.JsonMultiFetchStorageResource; import com.takipi.oss.storage.resources.fs.JsonSimpleFetchStorageResource; import com.takipi.oss.storage.resources.fs.JsonSimpleSearchStorageResource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import io.dropwizard.Application; +import io.dropwizard.setup.Bootstrap; +import io.dropwizard.setup.Environment; public class TakipiStorageMain extends Application { diff --git a/src/main/java/com/takipi/oss/storage/fs/BaseRecord.java b/src/main/java/com/takipi/oss/storage/fs/BaseRecord.java new file mode 100644 index 0000000..1999772 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/fs/BaseRecord.java @@ -0,0 +1,11 @@ +package com.takipi.oss.storage.fs; + +public interface BaseRecord { + public String getServiceId(); + + public String getType(); + + public String getKey(); + + public String getPath(); +} diff --git a/src/main/java/com/takipi/oss/storage/fs/Record.java b/src/main/java/com/takipi/oss/storage/fs/Record.java index f396dab..6f14ecb 100644 --- a/src/main/java/com/takipi/oss/storage/fs/Record.java +++ b/src/main/java/com/takipi/oss/storage/fs/Record.java @@ -2,7 +2,7 @@ import org.apache.commons.lang.StringUtils; -public class Record { +public class Record implements BaseRecord { private String serviceId; private String type; private String key; @@ -33,6 +33,10 @@ public String getKey() { return key; } + public String getPath() { + return this.getServiceId() + "/" + this.getType() + "/" + this.getKey(); + } + @Override public String toString() { return "service: " + serviceId + ". type: " + type + ". key: " + key + "."; diff --git a/src/main/java/com/takipi/oss/storage/fs/SimplePathRecord.java b/src/main/java/com/takipi/oss/storage/fs/SimplePathRecord.java new file mode 100644 index 0000000..8997db3 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/fs/SimplePathRecord.java @@ -0,0 +1,53 @@ +package com.takipi.oss.storage.fs; + +public class SimplePathRecord implements BaseRecord { + + private final String path; + + private final String[] pathParts; + + public static SimplePathRecord newRecord(String path) { + return new SimplePathRecord(path); + } + + private SimplePathRecord(String path) { + this.path = path; + + this.pathParts = path.split("/", 3); + } + + public String getPath() { + return path; + } + + @Override + public String getServiceId() { + if (pathParts.length > 0) + { + return pathParts[0]; + } + + return ""; + } + + @Override + public String getType() { + if (pathParts.length > 1) + { + return pathParts[1]; + } + + return ""; + } + + @Override + public String getKey() { + if (pathParts.length > 2) + { + return pathParts[2]; + } + + return ""; + } +} + diff --git a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java index f9bd808..568dd39 100644 --- a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java @@ -1,11 +1,12 @@ package com.takipi.oss.storage.fs.api; -import com.takipi.oss.storage.data.simple.SimpleSearchRequest; - import java.io.IOException; import java.io.InputStream; -public interface Filesystem extends FilesystemHealth { +import com.takipi.oss.storage.fs.BaseRecord; +import com.takipi.oss.storage.fs.Record; + +public interface Filesystem extends FilesystemHealth { /** * Put record * @@ -80,4 +81,12 @@ public interface Filesystem extends FilesystemHealth { */ SearchResult search(SearchRequest searchRequest) throws IOException; + /** + * Convert string path to record object + * + * @param path + * @return record + */ + BaseRecord pathToRecord(String path); + } diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java index 4ee4138..bb68daa 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java @@ -1,11 +1,12 @@ package com.takipi.oss.storage.fs.folder; +import com.takipi.oss.storage.fs.BaseRecord; import com.takipi.oss.storage.fs.api.Filesystem; import org.apache.commons.io.IOUtils; import java.io.*; -public abstract class FolderFilesystem extends FolderFilesystemHealth implements Filesystem { +public abstract class FolderFilesystem extends FolderFilesystemHealth implements Filesystem { public FolderFilesystem(String rootFolder, double maxUsedStoragePercentage) { super(rootFolder, maxUsedStoragePercentage); } diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/record/HashSubfolderFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/record/HashSubfolderFilesystem.java index 8c7e153..61cb4df 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/record/HashSubfolderFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/record/HashSubfolderFilesystem.java @@ -7,9 +7,9 @@ import com.google.common.base.Charsets; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; -import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.BaseRecord; -public abstract class HashSubfolderFilesystem extends RecordFilesystem { +public abstract class HashSubfolderFilesystem extends RecordFilesystem { private HashFunction func; public HashSubfolderFilesystem(String rootFolder, double maxUsedStoragePercentage) { @@ -19,7 +19,7 @@ public HashSubfolderFilesystem(String rootFolder, double maxUsedStoragePercentag } @Override - protected String buildPath(Record record) { + protected String buildPath(T record) { String key = record.getKey(); String hashKey = hashKey(key); diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/record/RecordFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/record/RecordFilesystem.java index 72dd791..cca7f1c 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/record/RecordFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/record/RecordFilesystem.java @@ -8,19 +8,20 @@ import com.google.common.base.Predicate; import com.takipi.oss.storage.data.simple.SimpleSearchResponse; -import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.BaseRecord; +import com.takipi.oss.storage.fs.SimplePathRecord; import com.takipi.oss.storage.fs.api.SearchRequest; import com.takipi.oss.storage.fs.api.SearchResult; import com.takipi.oss.storage.fs.folder.FolderFilesystem; import com.takipi.oss.storage.helper.FilesystemUtil; -public class RecordFilesystem extends FolderFilesystem { +public class RecordFilesystem extends FolderFilesystem { public RecordFilesystem(String rootFolder, double maxUsedStoragePercentage) { super(rootFolder, maxUsedStoragePercentage); } @Override - protected String buildPath(Record record) { + protected String buildPath(T record) { Path recordPath = Paths.get(root.getPath(), escape(record.getServiceId()), escape(record.getType()), escape(record.getKey())); @@ -92,4 +93,9 @@ public File getFoundFile() return foundFile; } } + + @Override + public BaseRecord pathToRecord(String path) { + return SimplePathRecord.newRecord(path); + } } diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/simple/SimpleFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/simple/SimpleFilesystem.java index 7f36a17..d009046 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/simple/SimpleFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/simple/SimpleFilesystem.java @@ -7,22 +7,21 @@ import com.google.common.base.Predicate; import com.takipi.oss.storage.data.simple.SimpleSearchResponse; +import com.takipi.oss.storage.fs.BaseRecord; +import com.takipi.oss.storage.fs.SimplePathRecord; import com.takipi.oss.storage.fs.api.SearchRequest; import com.takipi.oss.storage.fs.api.SearchResult; import com.takipi.oss.storage.fs.folder.FolderFilesystem; import com.takipi.oss.storage.helper.FilesystemUtil; -import com.takipi.oss.storage.resources.fs.JsonSimpleSearchStorageResource; -import javax.ws.rs.core.Response; - -public class SimpleFilesystem extends FolderFilesystem { +public class SimpleFilesystem extends FolderFilesystem { public SimpleFilesystem(String rootFolder, double maxUsedStoragePercentage) { super(rootFolder, maxUsedStoragePercentage); } @Override - protected String buildPath(String record) { - Path recordPath = Paths.get(root.getPath(), escape(record)); + protected String buildPath(SimplePathRecord record) { + Path recordPath = Paths.get(root.getPath(), escape(record.getPath())); return recordPath.toString(); } @@ -44,7 +43,7 @@ public SearchResult search(SearchRequest searchRequest) throws IOException { } String relFSPath = result.getAbsolutePath().replace(getRoot().getAbsolutePath(), ""); - String data = FilesystemUtil.read(this, relFSPath, searchRequest.getEncodingType()); + String data = FilesystemUtil.read(this, SimplePathRecord.newRecord(relFSPath), searchRequest.getEncodingType()); if (data == null) { return null; @@ -92,4 +91,9 @@ public File getFoundFile() return foundFile; } } + + @Override + public BaseRecord pathToRecord(String path) { + return SimplePathRecord.newRecord(path); + } } diff --git a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java index c111030..ab1a9cc 100644 --- a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java @@ -1,20 +1,23 @@ package com.takipi.oss.storage.fs.s3; +import java.io.IOException; +import java.io.InputStream; + import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.*; +import com.amazonaws.services.s3.model.ListObjectsRequest; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.S3Object; +import com.amazonaws.services.s3.model.S3ObjectSummary; import com.takipi.oss.storage.data.simple.SimpleSearchResponse; -import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.BaseRecord; +import com.takipi.oss.storage.fs.SimplePathRecord; import com.takipi.oss.storage.fs.api.Filesystem; import com.takipi.oss.storage.fs.api.SearchRequest; import com.takipi.oss.storage.fs.api.SearchResult; -import com.takipi.oss.storage.fs.folder.simple.SimpleFilesystem; import com.takipi.oss.storage.helper.FilesystemUtil; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; - -public class S3Filesystem implements Filesystem { +public class S3Filesystem implements Filesystem { private final AmazonS3 amazonS3; private final String bucket; @@ -81,8 +84,13 @@ public boolean healthy() { return true; } + @Override + public BaseRecord pathToRecord(String path) { + return SimplePathRecord.newRecord(path); + } + private String keyOf(T record) { - return record.getServiceId() + "/" + record.getType() + "/" + record.getKey(); + return record.getPath(); } } diff --git a/src/main/java/com/takipi/oss/storage/helper/FilesystemUtil.java b/src/main/java/com/takipi/oss/storage/helper/FilesystemUtil.java index b418ae6..cb48cad 100644 --- a/src/main/java/com/takipi/oss/storage/helper/FilesystemUtil.java +++ b/src/main/java/com/takipi/oss/storage/helper/FilesystemUtil.java @@ -14,6 +14,7 @@ import com.google.common.base.Predicate; import com.takipi.oss.storage.data.EncodingType; +import com.takipi.oss.storage.fs.BaseRecord; import com.takipi.oss.storage.fs.api.Filesystem; public class FilesystemUtil { @@ -23,7 +24,7 @@ public static String fixPath(String path) { return path.replace("/", File.separator).replace("\\", File.separator); } - public static String read(Filesystem fs, T record, EncodingType encodingType) { + public static String read(Filesystem fs, T record, EncodingType encodingType) { InputStream is = null; try { diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleFetchStorageResource.java index d9de99d..8b2947f 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleFetchStorageResource.java @@ -1,13 +1,5 @@ package com.takipi.oss.storage.resources.fs; -import com.codahale.metrics.annotation.Timed; -import com.takipi.oss.storage.data.simple.SimpleFetchRequest; -import com.takipi.oss.storage.data.simple.SimpleFetchResponse; -import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.helper.FilesystemUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import javax.ws.rs.Consumes; import javax.ws.rs.POST; import javax.ws.rs.Path; @@ -15,6 +7,16 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.codahale.metrics.annotation.Timed; +import com.takipi.oss.storage.data.simple.SimpleFetchRequest; +import com.takipi.oss.storage.data.simple.SimpleFetchResponse; +import com.takipi.oss.storage.fs.BaseRecord; +import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.helper.FilesystemUtil; + @Path("/storage/v1/json/simplefetch") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) @@ -22,9 +24,9 @@ public class JsonSimpleFetchStorageResource { private static final Logger logger = LoggerFactory.getLogger(JsonSimpleFetchStorageResource.class); - private final Filesystem filesystem; + private final Filesystem filesystem; - public JsonSimpleFetchStorageResource(Filesystem filesystem) { + public JsonSimpleFetchStorageResource(Filesystem filesystem) { this.filesystem = filesystem; } @@ -40,8 +42,8 @@ public Response post(SimpleFetchRequest request) { private Response handleResponse(SimpleFetchRequest request) { try { - String data = FilesystemUtil.read(filesystem, request.path, request.encodingType); - + String data = FilesystemUtil.read(filesystem, filesystem.pathToRecord(request.path), request.encodingType); + if (data != null) { return Response.ok(new SimpleFetchResponse(data)).build(); } else { From 0f41f66d1f2fe2b2028ef22fb498a90af9a1e0ab Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Tue, 21 Feb 2017 16:24:15 +0200 Subject: [PATCH 03/66] fix double / --- .../java/com/takipi/oss/storage/fs/SimplePathRecord.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/fs/SimplePathRecord.java b/src/main/java/com/takipi/oss/storage/fs/SimplePathRecord.java index 8997db3..d4bc0e6 100644 --- a/src/main/java/com/takipi/oss/storage/fs/SimplePathRecord.java +++ b/src/main/java/com/takipi/oss/storage/fs/SimplePathRecord.java @@ -11,13 +11,14 @@ public static SimplePathRecord newRecord(String path) { } private SimplePathRecord(String path) { - this.path = path; + this.path = path.replaceAll("//", "/"); this.pathParts = path.split("/", 3); } + @Override public String getPath() { - return path; + return path; } @Override From b0c9210393d856381fec9962ed3a76f96f361522 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Tue, 21 Feb 2017 17:25:12 +0200 Subject: [PATCH 04/66] final touches - credits and settings.yml --- README.md | 3 +++ settings.yml | 13 +++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 131308c..b08c912 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,13 @@ Takipi Storage ============== +With thanks to moovel for developting this version supporting s3: https://github.com/moovel/takipi-storage/tree/s3-storage + Build and run: - clone the repo - `cd takipi-storage` - `mvn compile package` +- edit settings.yml to contain , , to access your s3 bucket - `java -jar target/takipi-storage-1.7.0.jar server settings.yml` Deploy: diff --git a/settings.yml b/settings.yml index 09361ef..07078f6 100644 --- a/settings.yml +++ b/settings.yml @@ -1,8 +1,17 @@ -folderPath: /opt/takipi-storage/storage -maxUsedStoragePercentage: 0.95 enableCors: true corsOrigins: "*" +s3Fs: + bucket: + credentials: + accessKey: + secretKey: + +#folderFs: +# folderPath: /opt/takipi-storage/storage +# maxUsedStoragePercentage: 0.95 + + server: # softNofileLimit: 1000 # hardNofileLimit: 1000 From 1940c18f382db3fc64532f7e91a571061480ca4b Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Tue, 21 Feb 2017 18:19:20 +0200 Subject: [PATCH 05/66] merge with moovel s3 --- pom.xml | 5 + .../storage/TakipiStorageConfiguration.java | 133 +++++++++++++++--- .../takipi/oss/storage/TakipiStorageMain.java | 66 +++++++-- .../data/simple/SimpleSearchRequest.java | 23 ++- .../data/simple/SimpleSearchResponse.java | 4 +- .../takipi/oss/storage/fs/api/Filesystem.java | 28 +++- .../oss/storage/fs/api/SearchRequest.java | 15 ++ .../oss/storage/fs/api/SearchResult.java | 9 ++ .../storage/fs/folder/FolderFilesystem.java | 11 +- .../record/HashSubfolderFilesystem.java | 4 +- .../fs/folder/record/RecordFilesystem.java | 70 +++++++++ .../fs/folder/simple/SimpleFilesystem.java | 71 ++++++++++ .../oss/storage/fs/s3/S3Filesystem.java | 88 ++++++++++++ .../storage/health/FilesystemHealthCheck.java | 13 +- .../oss/storage/helper/FilesystemUtil.java | 2 +- .../MachineInfoOnlyStatusStorageResource.java | 49 +++++++ .../diag/NoOpTreeStorageResource.java | 25 ++++ .../resources/diag/StatusStorageResource.java | 2 +- .../resources/diag/TreeStorageResource.java | 2 +- .../resources/fs/BinaryStorageResource.java | 46 +++--- .../fs/JsonMultiDeleteStorageResource.java | 32 ++--- .../fs/JsonMultiFetchStorageResource.java | 35 +++-- .../fs/JsonSimpleFetchStorageResource.java | 29 ++-- .../fs/JsonSimpleSearchStorageResource.java | 91 +++--------- .../base/FolderFileSystemStorageResource.java | 14 -- .../base/HashFileSystemStorageResource.java | 17 --- .../base/SimpleFileSystemStorageResource.java | 16 --- 27 files changed, 654 insertions(+), 246 deletions(-) create mode 100644 src/main/java/com/takipi/oss/storage/fs/api/SearchRequest.java create mode 100644 src/main/java/com/takipi/oss/storage/fs/api/SearchResult.java create mode 100644 src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java create mode 100644 src/main/java/com/takipi/oss/storage/resources/diag/MachineInfoOnlyStatusStorageResource.java create mode 100644 src/main/java/com/takipi/oss/storage/resources/diag/NoOpTreeStorageResource.java delete mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/base/FolderFileSystemStorageResource.java delete mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/base/HashFileSystemStorageResource.java delete mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/base/SimpleFileSystemStorageResource.java diff --git a/pom.xml b/pom.xml index 160ab5f..5c3b4df 100644 --- a/pom.xml +++ b/pom.xml @@ -30,6 +30,11 @@ junit 4.12 + + com.amazonaws + aws-java-sdk-s3 + 1.11.13 + diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java index 8693630..05508e8 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java @@ -1,7 +1,9 @@ package com.takipi.oss.storage; +import javax.validation.Valid; import javax.validation.constraints.Max; import javax.validation.constraints.Min; +import javax.validation.constraints.NotNull; import org.hibernate.validator.constraints.NotEmpty; @@ -10,12 +12,100 @@ import io.dropwizard.Configuration; public class TakipiStorageConfiguration extends Configuration { - @NotEmpty - private String folderPath; - @Min(0) - @Max(1) - private double maxUsedStoragePercentage = 0.9; + @Valid + @JsonProperty + private FolderFs folderFs; + + public static class FolderFs { + @NotEmpty + private String folderPath; + + @Min(0) + @Max(1) + private double maxUsedStoragePercentage = 0.9; + + @JsonProperty + public String getFolderPath() { + return folderPath; + } + + @JsonProperty + public void setFolderPath(String folderPath) { + this.folderPath = folderPath; + } + + @JsonProperty + public double getMaxUsedStoragePercentage() { + return maxUsedStoragePercentage; + } + + @JsonProperty + public void setMaxUsedStoragePercentage(double maxUsedStoragePercentage) { + this.maxUsedStoragePercentage = maxUsedStoragePercentage; + } + } + + @Valid + @JsonProperty + private S3Fs s3Fs; + + public static class S3Fs { + @NotEmpty + private String bucket; + + @NotNull + @Valid + private Credentials credentials; + + public static class Credentials { + @NotEmpty + private String accessKey; + + @NotEmpty + private String secretKey; + + @JsonProperty + public String getAccessKey() { + return accessKey; + } + + @JsonProperty + public void setAccessKey(String accessKey) { + this.accessKey = accessKey; + } + + @JsonProperty + public String getSecretKey() { + return secretKey; + } + + @JsonProperty + public void setSecretKey(String secretKey) { + this.secretKey = secretKey; + } + } + + @JsonProperty + public String getBucket() { + return bucket; + } + + @JsonProperty + public void setBucket(String bucket) { + this.bucket = bucket; + } + + @JsonProperty + public Credentials getCredentials() { + return credentials; + } + + @JsonProperty + public void setCredentials(Credentials credentials) { + this.credentials = credentials; + } + } private boolean enableCors; @@ -32,33 +122,42 @@ public void setEnableCors(boolean enableCors) { this.enableCors = enableCors; } + @JsonProperty - public double getMaxUsedStoragePercentage() { - return maxUsedStoragePercentage; + public String getCorsOrigins() { + return corsOrigins; } @JsonProperty - public void setMaxUsedStoragePercentage(double maxUsedStoragePercentage) { - this.maxUsedStoragePercentage = maxUsedStoragePercentage; + public void setCorsOrigins(String corsOrigins) { + this.corsOrigins = corsOrigins; } @JsonProperty - public String getCorsOrigins() { - return corsOrigins; + public FolderFs getFolderFs() { + return folderFs; + } + + public boolean hasFolderFs() { + return folderFs != null; } @JsonProperty - public void setCorsOrigins(String corsOrigins) { - this.corsOrigins = corsOrigins; + public void setFolderFs(FolderFs folderFs) { + this.folderFs = folderFs; } @JsonProperty - public String getFolderPath() { - return folderPath; + public S3Fs getS3Fs() { + return s3Fs; } @JsonProperty - public void setFolderPath(String folderPath) { - this.folderPath = folderPath; + public void setS3Fs(S3Fs s3Fs) { + this.s3Fs = s3Fs; + } + + public boolean hasS3Fs() { + return s3Fs != null; } } diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index c998dae..ace2506 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -1,5 +1,13 @@ package com.takipi.oss.storage; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3Client; +import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.folder.simple.SimpleFilesystem; +import com.takipi.oss.storage.fs.s3.S3Filesystem; +import com.takipi.oss.storage.resources.diag.*; import io.dropwizard.Application; import io.dropwizard.setup.Bootstrap; import io.dropwizard.setup.Environment; @@ -12,17 +20,24 @@ import org.eclipse.jetty.servlets.CrossOriginFilter; import com.takipi.oss.storage.health.FilesystemHealthCheck; + import com.takipi.oss.storage.resources.diag.PingStorageResource; import com.takipi.oss.storage.resources.diag.StatusStorageResource; import com.takipi.oss.storage.resources.diag.TreeStorageResource; import com.takipi.oss.storage.resources.diag.VersionStorageResource; + import com.takipi.oss.storage.resources.fs.BinaryStorageResource; import com.takipi.oss.storage.resources.fs.JsonMultiDeleteStorageResource; import com.takipi.oss.storage.resources.fs.JsonMultiFetchStorageResource; import com.takipi.oss.storage.resources.fs.JsonSimpleFetchStorageResource; import com.takipi.oss.storage.resources.fs.JsonSimpleSearchStorageResource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TakipiStorageMain extends Application { + + private final static Logger log = LoggerFactory.getLogger(TakipiStorageMain.class); + public static void main(String[] args) throws Exception { new TakipiStorageMain().run(args); } @@ -43,19 +58,52 @@ public void run(TakipiStorageConfiguration configuration, Environment environmen enableCors(configuration, environment); } - environment.jersey().register(new BinaryStorageResource(configuration)); - environment.jersey().register(new JsonMultiFetchStorageResource(configuration)); - environment.jersey().register(new JsonMultiDeleteStorageResource(configuration)); - - environment.jersey().register(new JsonSimpleFetchStorageResource(configuration)); - environment.jersey().register(new JsonSimpleSearchStorageResource(configuration)); - + Filesystem filesystem = configureFilesystem(configuration, environment); + + environment.healthChecks().register("filesystem", new FilesystemHealthCheck(filesystem)); + environment.jersey().register(new BinaryStorageResource(filesystem)); + environment.jersey().register(new JsonMultiFetchStorageResource(filesystem)); + environment.jersey().register(new JsonMultiDeleteStorageResource(filesystem)); + environment.jersey().register(new JsonSimpleFetchStorageResource(filesystem)); + environment.jersey().register(new JsonSimpleSearchStorageResource(filesystem)); environment.jersey().register(new PingStorageResource()); environment.jersey().register(new VersionStorageResource()); + } + + private Filesystem configureFilesystem(TakipiStorageConfiguration configuration, Environment environment) { + if(configuration.hasFolderFs()) { + return configureFolderFilesystem(configuration, environment); + } else if(configuration.hasS3Fs()) { + return configureS3Filesystem(configuration, environment); + } + else { + throw new IllegalArgumentException("Configuration problem. Please configure either folderFs or s3Fs config property."); + } + } + + private Filesystem configureFolderFilesystem(TakipiStorageConfiguration configuration, Environment environment) { + log.debug("Using local filesystem at: {}", configuration.getFolderFs().getFolderPath()); + environment.jersey().register(new TreeStorageResource(configuration)); environment.jersey().register(new StatusStorageResource(configuration)); - - environment.healthChecks().register("filesystem", new FilesystemHealthCheck(configuration)); + return new SimpleFilesystem(configuration.getFolderFs().getFolderPath(), configuration.getFolderFs().getMaxUsedStoragePercentage()); + } + + private Filesystem configureS3Filesystem(TakipiStorageConfiguration configuration, Environment environment) { + // Setup basically mocked versions of info resources. + environment.jersey().register(new NoOpTreeStorageResource()); + environment.jersey().register(new MachineInfoOnlyStatusStorageResource()); + + // Setup Amazon S3 client + TakipiStorageConfiguration.S3Fs.Credentials credentials = configuration.getS3Fs().getCredentials(); + AWSCredentials awsCredentials = new BasicAWSCredentials(credentials.getAccessKey(), credentials.getSecretKey()); + AmazonS3 amazonS3 = new AmazonS3Client(awsCredentials); + + // S3 bucket + String bucket = configuration.getS3Fs().getBucket(); + log.debug("Using AWS S3 based filesystem with bucket: {}", bucket); + + return new S3Filesystem(amazonS3, bucket); } private void enableCors(TakipiStorageConfiguration configuration, Environment environment) { diff --git a/src/main/java/com/takipi/oss/storage/data/simple/SimpleSearchRequest.java b/src/main/java/com/takipi/oss/storage/data/simple/SimpleSearchRequest.java index 199df63..3bbdb23 100644 --- a/src/main/java/com/takipi/oss/storage/data/simple/SimpleSearchRequest.java +++ b/src/main/java/com/takipi/oss/storage/data/simple/SimpleSearchRequest.java @@ -1,10 +1,31 @@ package com.takipi.oss.storage.data.simple; import com.takipi.oss.storage.data.EncodingType; +import com.takipi.oss.storage.fs.api.SearchRequest; -public class SimpleSearchRequest { +public class SimpleSearchRequest implements SearchRequest { public EncodingType encodingType; public String name; public String baseSearchPath; public boolean preventDuplicates; + + @Override + public EncodingType getEncodingType() { + return encodingType; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getBaseSearchPath() { + return baseSearchPath; + } + + @Override + public boolean shouldPreventDuplicates() { + return preventDuplicates; + } } diff --git a/src/main/java/com/takipi/oss/storage/data/simple/SimpleSearchResponse.java b/src/main/java/com/takipi/oss/storage/data/simple/SimpleSearchResponse.java index cad24b5..8dc82e4 100644 --- a/src/main/java/com/takipi/oss/storage/data/simple/SimpleSearchResponse.java +++ b/src/main/java/com/takipi/oss/storage/data/simple/SimpleSearchResponse.java @@ -1,6 +1,8 @@ package com.takipi.oss.storage.data.simple; -public class SimpleSearchResponse { +import com.takipi.oss.storage.fs.api.SearchResult; + +public class SimpleSearchResponse implements SearchResult { String data; String path; diff --git a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java index 2320903..f9bd808 100644 --- a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java @@ -1,5 +1,7 @@ package com.takipi.oss.storage.fs.api; +import com.takipi.oss.storage.data.simple.SimpleSearchRequest; + import java.io.IOException; import java.io.InputStream; @@ -9,8 +11,8 @@ public interface Filesystem extends FilesystemHealth { * * @param record * - the record to save the input stream to - * @param bytes - * - the byte array to save + * @param is + * - the input stream to save * @throws IOException * - if there's an error */ @@ -56,4 +58,26 @@ public interface Filesystem extends FilesystemHealth { * - if there's an error */ long size(T record) throws IOException; + + + /** + * Returns the {@link SearchResult} that matches the search query. + * + * @return + * @throws IOException + * @param request + */ + + /** + * Returns the {@link SearchResult} that matches the search query. + * + * @param searchRequest + * - the search request + * @return + * - the result of the search or null if nothing was found. + * @throws IOException + * - if there's an error + */ + SearchResult search(SearchRequest searchRequest) throws IOException; + } diff --git a/src/main/java/com/takipi/oss/storage/fs/api/SearchRequest.java b/src/main/java/com/takipi/oss/storage/fs/api/SearchRequest.java new file mode 100644 index 0000000..d55df9d --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/fs/api/SearchRequest.java @@ -0,0 +1,15 @@ +package com.takipi.oss.storage.fs.api; + +import com.takipi.oss.storage.data.EncodingType; + +public interface SearchRequest { + + EncodingType getEncodingType(); + + String getName(); + + String getBaseSearchPath(); + + boolean shouldPreventDuplicates(); + +} diff --git a/src/main/java/com/takipi/oss/storage/fs/api/SearchResult.java b/src/main/java/com/takipi/oss/storage/fs/api/SearchResult.java new file mode 100644 index 0000000..9719457 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/fs/api/SearchResult.java @@ -0,0 +1,9 @@ +package com.takipi.oss.storage.fs.api; + +public interface SearchResult { + + String getData(); + + String getPath(); + +} diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java index 3d97f38..4ee4138 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java @@ -1,16 +1,9 @@ package com.takipi.oss.storage.fs.folder; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - +import com.takipi.oss.storage.fs.api.Filesystem; import org.apache.commons.io.IOUtils; -import com.takipi.oss.storage.fs.api.Filesystem; +import java.io.*; public abstract class FolderFilesystem extends FolderFilesystemHealth implements Filesystem { public FolderFilesystem(String rootFolder, double maxUsedStoragePercentage) { diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/record/HashSubfolderFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/record/HashSubfolderFilesystem.java index 7a2c7c5..8c7e153 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/record/HashSubfolderFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/record/HashSubfolderFilesystem.java @@ -9,7 +9,7 @@ import com.google.common.hash.Hashing; import com.takipi.oss.storage.fs.Record; -public class HashSubfolderFilesystem extends RecordFilesystem { +public abstract class HashSubfolderFilesystem extends RecordFilesystem { private HashFunction func; public HashSubfolderFilesystem(String rootFolder, double maxUsedStoragePercentage) { @@ -40,4 +40,6 @@ private String hashKey(String key) { return sb.toString(); } + + } diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/record/RecordFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/record/RecordFilesystem.java index d364eae..72dd791 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/record/RecordFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/record/RecordFilesystem.java @@ -1,10 +1,18 @@ package com.takipi.oss.storage.fs.folder.record; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; +import com.google.common.base.Predicate; +import com.takipi.oss.storage.data.simple.SimpleSearchResponse; import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.api.SearchRequest; +import com.takipi.oss.storage.fs.api.SearchResult; import com.takipi.oss.storage.fs.folder.FolderFilesystem; +import com.takipi.oss.storage.helper.FilesystemUtil; public class RecordFilesystem extends FolderFilesystem { public RecordFilesystem(String rootFolder, double maxUsedStoragePercentage) { @@ -22,4 +30,66 @@ protected String buildPath(Record record) { protected String escape(String value) { return value.replace("..", "__").replace("/", "-").replace("\\", "-"); } + + @Override + public SearchResult search(SearchRequest searchRequest) throws IOException { + File searchRoot = new File(getRoot(), FilesystemUtil.fixPath(searchRequest.getBaseSearchPath())); + + ResourceFileCallback fileCallback = new ResourceFileCallback(searchRequest.getName(), searchRequest.shouldPreventDuplicates()); + FilesystemUtil.listFilesRecursively(searchRoot, fileCallback); + File result = fileCallback.getFoundFile(); + + if (result == null) { + return null; + } + + String relFSPath = result.getAbsolutePath().replace(getRoot().getAbsolutePath(), ""); + String data = FilesystemUtil.encode(new FileInputStream(relFSPath), searchRequest.getEncodingType()); + + if (data == null) { + return null; + } + + return new SimpleSearchResponse(data, relFSPath.replace(searchRequest.getName(), "")); + } + + private static class ResourceFileCallback implements Predicate { + private final String resourceName; + private final boolean preventDuplicates; + + private File foundFile; + + protected ResourceFileCallback(String resourceName, boolean preventDuplicates) + { + this.resourceName = resourceName; + this.preventDuplicates = preventDuplicates; + + this.foundFile = null; + } + + @Override + public boolean apply(File file) + { + if (!resourceName.equals(file.getName())) + { + return false; + } + + if ((preventDuplicates) && + (foundFile != null)) + { + foundFile = null; // never find more than one result if preventing duplicates + return true; + } + + foundFile = file; + + return !preventDuplicates; // if we don't prevent duplicates, we stop right now + } + + public File getFoundFile() + { + return foundFile; + } + } } diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/simple/SimpleFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/simple/SimpleFilesystem.java index 058a2cb..7f36a17 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/simple/SimpleFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/simple/SimpleFilesystem.java @@ -1,10 +1,19 @@ package com.takipi.oss.storage.fs.folder.simple; +import java.io.File; +import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; +import com.google.common.base.Predicate; +import com.takipi.oss.storage.data.simple.SimpleSearchResponse; +import com.takipi.oss.storage.fs.api.SearchRequest; +import com.takipi.oss.storage.fs.api.SearchResult; import com.takipi.oss.storage.fs.folder.FolderFilesystem; import com.takipi.oss.storage.helper.FilesystemUtil; +import com.takipi.oss.storage.resources.fs.JsonSimpleSearchStorageResource; + +import javax.ws.rs.core.Response; public class SimpleFilesystem extends FolderFilesystem { public SimpleFilesystem(String rootFolder, double maxUsedStoragePercentage) { @@ -21,4 +30,66 @@ protected String buildPath(String record) { protected String escape(String value) { return FilesystemUtil.fixPath(value); } + + @Override + public SearchResult search(SearchRequest searchRequest) throws IOException { + File searchRoot = new File(getRoot(), FilesystemUtil.fixPath(searchRequest.getBaseSearchPath())); + + ResourceFileCallback fileCallback = new ResourceFileCallback(searchRequest.getName(), searchRequest.shouldPreventDuplicates()); + FilesystemUtil.listFilesRecursively(searchRoot, fileCallback); + File result = fileCallback.getFoundFile(); + + if (result == null) { + return null; + } + + String relFSPath = result.getAbsolutePath().replace(getRoot().getAbsolutePath(), ""); + String data = FilesystemUtil.read(this, relFSPath, searchRequest.getEncodingType()); + + if (data == null) { + return null; + } + + return new SimpleSearchResponse(data, relFSPath.replace(searchRequest.getName(), "")); + } + + private static class ResourceFileCallback implements Predicate { + private final String resourceName; + private final boolean preventDuplicates; + + private File foundFile; + + protected ResourceFileCallback(String resourceName, boolean preventDuplicates) + { + this.resourceName = resourceName; + this.preventDuplicates = preventDuplicates; + + this.foundFile = null; + } + + @Override + public boolean apply(File file) + { + if (!resourceName.equals(file.getName())) + { + return false; + } + + if ((preventDuplicates) && + (foundFile != null)) + { + foundFile = null; // never find more than one result if preventing duplicates + return true; + } + + foundFile = file; + + return !preventDuplicates; // if we don't prevent duplicates, we stop right now + } + + public File getFoundFile() + { + return foundFile; + } + } } diff --git a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java new file mode 100644 index 0000000..c111030 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java @@ -0,0 +1,88 @@ +package com.takipi.oss.storage.fs.s3; + +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.*; +import com.takipi.oss.storage.data.simple.SimpleSearchResponse; +import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.api.SearchRequest; +import com.takipi.oss.storage.fs.api.SearchResult; +import com.takipi.oss.storage.fs.folder.simple.SimpleFilesystem; +import com.takipi.oss.storage.helper.FilesystemUtil; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; + +public class S3Filesystem implements Filesystem { + + private final AmazonS3 amazonS3; + private final String bucket; + + public S3Filesystem(AmazonS3 amazonS3, String bucket) { + this.amazonS3 = amazonS3; + this.bucket = bucket; + } + + @Override + public void put(T record, InputStream is) throws IOException { + ObjectMetadata objectMetadata = new ObjectMetadata(); + amazonS3.putObject(bucket, keyOf(record), is, objectMetadata); + } + + @Override + public InputStream get(T record) throws IOException { + return amazonS3.getObject(bucket, keyOf(record)).getObjectContent(); + } + + @Override + public void delete(T record) throws IOException { + amazonS3.deleteObject(bucket, keyOf(record)); + } + + @Override + public boolean exists(T record) throws IOException { + return amazonS3.doesObjectExist(bucket, keyOf(record)); + } + + @Override + public long size(T record) throws IOException { + return amazonS3.getObjectMetadata(bucket, keyOf(record)).getContentLength(); + } + + @Override + public SearchResult search(SearchRequest searchRequest) throws IOException { + + // Start a prefix search + ListObjectsRequest listObjectsRequest = new ListObjectsRequest(); + listObjectsRequest.setBucketName(bucket); + listObjectsRequest.setPrefix(searchRequest.getBaseSearchPath()); + ObjectListing objectListing = amazonS3.listObjects(listObjectsRequest); + + // Just select the first object + S3Object s3Object = null; + for(S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { + if(objectSummary.getKey().contains(searchRequest.getName())) { + s3Object = amazonS3.getObject(bucket, objectSummary.getKey()); + break; + } + } + + if (s3Object == null) { + return null; + } else { + String data = FilesystemUtil.encode(s3Object.getObjectContent(), searchRequest.getEncodingType()); + return new SimpleSearchResponse(data, searchRequest.getBaseSearchPath()); + } + } + + @Override + public boolean healthy() { + return true; + } + + private String keyOf(T record) { + return record.getServiceId() + "/" + record.getType() + "/" + record.getKey(); + } + +} diff --git a/src/main/java/com/takipi/oss/storage/health/FilesystemHealthCheck.java b/src/main/java/com/takipi/oss/storage/health/FilesystemHealthCheck.java index 53d4735..b14c71c 100644 --- a/src/main/java/com/takipi/oss/storage/health/FilesystemHealthCheck.java +++ b/src/main/java/com/takipi/oss/storage/health/FilesystemHealthCheck.java @@ -1,21 +1,18 @@ package com.takipi.oss.storage.health; import com.codahale.metrics.health.HealthCheck; -import com.takipi.oss.storage.TakipiStorageConfiguration; -import com.takipi.oss.storage.fs.api.FilesystemHealth; -import com.takipi.oss.storage.fs.folder.FolderFilesystemHealth; +import com.takipi.oss.storage.fs.api.Filesystem; public class FilesystemHealthCheck extends HealthCheck { - private final FilesystemHealth fsh; + private final Filesystem filesystem; - public FilesystemHealthCheck(TakipiStorageConfiguration configuration) { - this.fsh = new FolderFilesystemHealth(configuration.getFolderPath(), - configuration.getMaxUsedStoragePercentage()); + public FilesystemHealthCheck(Filesystem filesystem) { + this.filesystem = filesystem; } @Override protected Result check() throws Exception { - if (fsh.healthy()) { + if (filesystem.healthy()) { return Result.healthy(); } else { return Result.unhealthy("Problem with filesystem"); diff --git a/src/main/java/com/takipi/oss/storage/helper/FilesystemUtil.java b/src/main/java/com/takipi/oss/storage/helper/FilesystemUtil.java index 5db3982..b418ae6 100644 --- a/src/main/java/com/takipi/oss/storage/helper/FilesystemUtil.java +++ b/src/main/java/com/takipi/oss/storage/helper/FilesystemUtil.java @@ -41,7 +41,7 @@ public static String read(Filesystem fs, T record, EncodingType encodingT } } - private static String encode(InputStream is, EncodingType type) throws IOException { + public static String encode(InputStream is, EncodingType type) throws IOException { switch (type) { case PLAIN: case JSON: { diff --git a/src/main/java/com/takipi/oss/storage/resources/diag/MachineInfoOnlyStatusStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/diag/MachineInfoOnlyStatusStorageResource.java new file mode 100644 index 0000000..961938e --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/diag/MachineInfoOnlyStatusStorageResource.java @@ -0,0 +1,49 @@ +package com.takipi.oss.storage.resources.diag; + +import com.codahale.metrics.annotation.Timed; +import com.takipi.oss.storage.data.status.MachineStatus; +import com.takipi.oss.storage.helper.StatusUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.Consumes; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +@Path("/storage/v1/diag/status") +@Consumes(MediaType.TEXT_PLAIN) +@Produces(MediaType.APPLICATION_JSON) +public class MachineInfoOnlyStatusStorageResource { + private static final Logger logger = LoggerFactory.getLogger(MachineInfoOnlyStatusStorageResource.class); + + @POST + @Timed + public Response post() { + try { + MachineStatus machineStatus = new MachineStatus(); + + collectMachineInfo(machineStatus); + + return Response.ok(machineStatus).build(); + } catch (Exception e) { + logger.error("Failed retrieving System Status", e); + return Response.serverError().entity("Failed retrieving System Status").build(); + } + } + + private void collectMachineInfo(MachineStatus machineStatus) { + machineStatus.setMachineName(StatusUtil.getMachineName()); + machineStatus.setPid(StatusUtil.getProcessId()); + machineStatus.setJvmUpTimeMillis(StatusUtil.getJvmUpTimeInMilli()); + machineStatus.setAvailableProcessors(StatusUtil.getAvailableProcessors()); + machineStatus.setLoadAverage(StatusUtil.getLoadAvg()); + machineStatus.setProcessCpuLoad(StatusUtil.getProcessCpuLoad()); + machineStatus.setTotalRamBytes(StatusUtil.getTotalRamInBytes()); + machineStatus.setUsedRamBytes(StatusUtil.getUsedRamInBytes()); + machineStatus.setHeapSizeBytes(StatusUtil.getHeapSizeInBytes()); + machineStatus.setPermGenSizeBytes(StatusUtil.getPermGenSizeInBytes()); + } +} diff --git a/src/main/java/com/takipi/oss/storage/resources/diag/NoOpTreeStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/diag/NoOpTreeStorageResource.java new file mode 100644 index 0000000..e531c8e --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/diag/NoOpTreeStorageResource.java @@ -0,0 +1,25 @@ +package com.takipi.oss.storage.resources.diag; + +import com.codahale.metrics.annotation.Timed; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.Consumes; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +@Path("/storage/v1/diag/tree") +@Consumes(MediaType.TEXT_PLAIN) +@Produces(MediaType.TEXT_PLAIN) +public class NoOpTreeStorageResource { + + @GET + @Timed + public Response get() { + return Response.ok("").build(); + } + +} diff --git a/src/main/java/com/takipi/oss/storage/resources/diag/StatusStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/diag/StatusStorageResource.java index 4fb1b92..5a9f417 100644 --- a/src/main/java/com/takipi/oss/storage/resources/diag/StatusStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/diag/StatusStorageResource.java @@ -41,7 +41,7 @@ public class StatusStorageResource { protected final String folderPath; public StatusStorageResource(TakipiStorageConfiguration configuration) { - this.folderPath = configuration.getFolderPath(); + this.folderPath = configuration.getFolderFs().getFolderPath(); } @POST diff --git a/src/main/java/com/takipi/oss/storage/resources/diag/TreeStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/diag/TreeStorageResource.java index 78c25f6..ee2e80a 100644 --- a/src/main/java/com/takipi/oss/storage/resources/diag/TreeStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/diag/TreeStorageResource.java @@ -29,7 +29,7 @@ public class TreeStorageResource { protected final String folderPath; public TreeStorageResource(TakipiStorageConfiguration configuration) { - this.folderPath = configuration.getFolderPath(); + this.folderPath = configuration.getFolderFs().getFolderPath(); } @GET diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/BinaryStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/BinaryStorageResource.java index 8d13665..40b1ba1 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/BinaryStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/BinaryStorageResource.java @@ -1,39 +1,31 @@ package com.takipi.oss.storage.resources.fs; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; +import com.codahale.metrics.annotation.Timed; +import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.api.Filesystem; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import javax.ws.rs.Consumes; -import javax.ws.rs.DELETE; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.HEAD; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; +import javax.ws.rs.*; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.codahale.metrics.annotation.Timed; -import com.takipi.oss.storage.TakipiStorageConfiguration; -import com.takipi.oss.storage.fs.Record; -import com.takipi.oss.storage.resources.fs.base.HashFileSystemStorageResource; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; @Path("/storage/v1/binary/{serviceId}/{type}/{key:.+}") @Consumes(MediaType.APPLICATION_OCTET_STREAM) @Produces(MediaType.APPLICATION_OCTET_STREAM) -public class BinaryStorageResource extends HashFileSystemStorageResource { +public class BinaryStorageResource { + private static final Logger logger = LoggerFactory.getLogger(BinaryStorageResource.class); - public BinaryStorageResource(TakipiStorageConfiguration configuration) { - super(configuration); + private final Filesystem filesystem; + + public BinaryStorageResource(Filesystem filesystem) { + this.filesystem = filesystem; } @GET @@ -103,7 +95,7 @@ public Response delete(@PathParam("serviceId") @DefaultValue("") String serviceI } try { - fs.delete(Record.newRecord(serviceId, type, key)); + filesystem.delete(Record.newRecord(serviceId, type, key)); return Response.ok().build(); } catch (FileNotFoundException e) { logger.warn("Key not found: {}", key); @@ -115,7 +107,7 @@ public Response delete(@PathParam("serviceId") @DefaultValue("") String serviceI } protected Response internalGet(Record record) throws IOException { - InputStream is = fs.get(record); + InputStream is = filesystem.get(record); long size = fs.size(record); @@ -123,13 +115,13 @@ protected Response internalGet(Record record) throws IOException { } protected Response internalHead(Record record) throws IOException { - long size = fs.size(record); + long size = filesystem.size(record); return Response.ok().header(HttpHeaders.CONTENT_LENGTH, size).build(); } protected Response internalPut(Record record, InputStream is) throws IOException { - fs.put(record, is); + filesystem.put(record, is); return Response.ok().build(); } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiDeleteStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiDeleteStorageResource.java index 40a9953..6273d0b 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiDeleteStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiDeleteStorageResource.java @@ -1,6 +1,13 @@ package com.takipi.oss.storage.resources.fs; -import java.util.List; +import com.codahale.metrics.annotation.Timed; +import com.google.common.collect.Lists; +import com.takipi.oss.storage.data.delete.MultiDeleteRequest; +import com.takipi.oss.storage.data.delete.MultiDeleteResponse; +import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.api.Filesystem; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import javax.ws.rs.Consumes; import javax.ws.rs.POST; @@ -8,26 +15,19 @@ import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.codahale.metrics.annotation.Timed; -import com.google.common.collect.Lists; -import com.takipi.oss.storage.TakipiStorageConfiguration; -import com.takipi.oss.storage.data.delete.MultiDeleteRequest; -import com.takipi.oss.storage.data.delete.MultiDeleteResponse; -import com.takipi.oss.storage.fs.Record; -import com.takipi.oss.storage.resources.fs.base.HashFileSystemStorageResource; +import java.util.List; @Path("/storage/v1/json/multidelete") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) -public class JsonMultiDeleteStorageResource extends HashFileSystemStorageResource { +public class JsonMultiDeleteStorageResource { + private static final Logger logger = LoggerFactory.getLogger(JsonMultiDeleteStorageResource.class); - public JsonMultiDeleteStorageResource(TakipiStorageConfiguration configuration) { - super(configuration); + private final Filesystem filesystem; + + public JsonMultiDeleteStorageResource(Filesystem filesystem) { + this.filesystem = filesystem; } @POST @@ -47,7 +47,7 @@ private MultiDeleteResponse handleResponse(MultiDeleteRequest request) { for (Record record : request.records) { try { - fs.delete(record); + filesystem.delete(record); deletedRecords.add(record); } catch (Exception e) { logger.error("Problem deleting record " + record, e); diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index 5708656..6f2124e 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -1,35 +1,34 @@ package com.takipi.oss.storage.resources.fs; -import java.util.List; - -import javax.ws.rs.Consumes; -import javax.ws.rs.POST; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import com.codahale.metrics.annotation.Timed; import com.google.common.collect.Lists; -import com.takipi.oss.storage.TakipiStorageConfiguration; import com.takipi.oss.storage.data.RecordWithData; import com.takipi.oss.storage.data.fetch.MultiFetchRequest; import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.api.Filesystem; import com.takipi.oss.storage.helper.FilesystemUtil; -import com.takipi.oss.storage.resources.fs.base.HashFileSystemStorageResource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.Consumes; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import java.util.List; @Path("/storage/v1/json/multifetch") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) -public class JsonMultiFetchStorageResource extends HashFileSystemStorageResource { +public class JsonMultiFetchStorageResource { private static final Logger logger = LoggerFactory.getLogger(JsonMultiFetchStorageResource.class); - public JsonMultiFetchStorageResource(TakipiStorageConfiguration configuration) { - super(configuration); + private final Filesystem filesystem; + + public JsonMultiFetchStorageResource(Filesystem filesystem) { + this.filesystem = filesystem; } @POST @@ -49,7 +48,7 @@ private MultiFetchResponse handleResponse(MultiFetchRequest request) { for (Record record : request.records) { try { - String value = FilesystemUtil.read(fs, record, request.encodingType); + String value = FilesystemUtil.read(filesystem, record, request.encodingType); if (value != null) { records.add(RecordWithData.of(record, value)); diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleFetchStorageResource.java index d02f6b1..d9de99d 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleFetchStorageResource.java @@ -1,5 +1,13 @@ package com.takipi.oss.storage.resources.fs; +import com.codahale.metrics.annotation.Timed; +import com.takipi.oss.storage.data.simple.SimpleFetchRequest; +import com.takipi.oss.storage.data.simple.SimpleFetchResponse; +import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.helper.FilesystemUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import javax.ws.rs.Consumes; import javax.ws.rs.POST; import javax.ws.rs.Path; @@ -7,24 +15,17 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.codahale.metrics.annotation.Timed; -import com.takipi.oss.storage.TakipiStorageConfiguration; -import com.takipi.oss.storage.data.simple.SimpleFetchRequest; -import com.takipi.oss.storage.data.simple.SimpleFetchResponse; -import com.takipi.oss.storage.helper.FilesystemUtil; -import com.takipi.oss.storage.resources.fs.base.SimpleFileSystemStorageResource; - @Path("/storage/v1/json/simplefetch") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) -public class JsonSimpleFetchStorageResource extends SimpleFileSystemStorageResource { +public class JsonSimpleFetchStorageResource { + private static final Logger logger = LoggerFactory.getLogger(JsonSimpleFetchStorageResource.class); - public JsonSimpleFetchStorageResource(TakipiStorageConfiguration configuration) { - super(configuration); + private final Filesystem filesystem; + + public JsonSimpleFetchStorageResource(Filesystem filesystem) { + this.filesystem = filesystem; } @POST @@ -39,7 +40,7 @@ public Response post(SimpleFetchRequest request) { private Response handleResponse(SimpleFetchRequest request) { try { - String data = FilesystemUtil.read(fs, request.path, request.encodingType); + String data = FilesystemUtil.read(filesystem, request.path, request.encodingType); if (data != null) { return Response.ok(new SimpleFetchResponse(data)).build(); diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleSearchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleSearchStorageResource.java index 3268658..b3c4839 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleSearchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleSearchStorageResource.java @@ -1,6 +1,12 @@ package com.takipi.oss.storage.resources.fs; -import java.io.File; +import com.codahale.metrics.annotation.Timed; +import com.takipi.oss.storage.data.simple.SimpleSearchRequest; +import com.takipi.oss.storage.data.simple.SimpleSearchResponse; +import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.api.SearchResult; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import javax.ws.rs.Consumes; import javax.ws.rs.POST; @@ -9,25 +15,17 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.codahale.metrics.annotation.Timed; -import com.google.common.base.Predicate; -import com.takipi.oss.storage.TakipiStorageConfiguration; -import com.takipi.oss.storage.data.simple.SimpleSearchRequest; -import com.takipi.oss.storage.data.simple.SimpleSearchResponse; -import com.takipi.oss.storage.helper.FilesystemUtil; -import com.takipi.oss.storage.resources.fs.base.SimpleFileSystemStorageResource; - @Path("/storage/v1/json/simplesearch") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) -public class JsonSimpleSearchStorageResource extends SimpleFileSystemStorageResource { +public class JsonSimpleSearchStorageResource { + private static final Logger logger = LoggerFactory.getLogger(JsonSimpleSearchStorageResource.class); - public JsonSimpleSearchStorageResource(TakipiStorageConfiguration configuration) { - super(configuration); + private final Filesystem filesystem; + + public JsonSimpleSearchStorageResource(Filesystem filesystem) { + this.filesystem = filesystem; } @POST @@ -42,25 +40,12 @@ public Response post(SimpleSearchRequest request) { private Response handleResponse(SimpleSearchRequest request) { try { - File searchRoot = new File(fs.getRoot(), FilesystemUtil.fixPath(request.baseSearchPath)); - - ResourceFileCallback fileCallback = new ResourceFileCallback(request.name, request.preventDuplicates); - FilesystemUtil.listFilesRecursively(searchRoot, fileCallback); - File result = fileCallback.getFoundFile(); - - if (result == null) { - return searchFailed(request.name); - } - - String relFSPath = result.getAbsolutePath().replace(fs.getRoot().getAbsolutePath(), ""); - String data = FilesystemUtil.read(fs, relFSPath, request.encodingType); - - if (data == null) { + SearchResult searchResult = filesystem.search(request); + if(searchResult != null) { + return Response.ok(new SimpleSearchResponse(searchResult.getData(), searchResult.getPath())).build(); + } else { return searchFailed(request.name); } - - return Response.ok(new SimpleSearchResponse(data, relFSPath.replace(request.name, ""))).build(); - } catch (Exception e) { logger.error("Problem getting: " + request.name, e); return Response.serverError().entity("Problem getting " + request.name).build(); @@ -71,45 +56,5 @@ private Response searchFailed(String name) { logger.warn("File not found: {}", name); return Response.status(404).entity("File not found" + name).build(); } - - private static class ResourceFileCallback implements Predicate - { - private final String resourceName; - private final boolean preventDuplicates; - - private File foundFile; - - protected ResourceFileCallback(String resourceName, boolean preventDuplicates) - { - this.resourceName = resourceName; - this.preventDuplicates = preventDuplicates; - - this.foundFile = null; - } - - @Override - public boolean apply(File file) - { - if (!resourceName.equals(file.getName())) - { - return false; - } - - if ((preventDuplicates) && - (foundFile != null)) - { - foundFile = null; // never find more than one result if preventing duplicates - return true; - } - - foundFile = file; - - return !preventDuplicates; // if we don't prevent duplicates, we stop right now - } - - public File getFoundFile() - { - return foundFile; - } - } + } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/base/FolderFileSystemStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/base/FolderFileSystemStorageResource.java deleted file mode 100644 index e2871a0..0000000 --- a/src/main/java/com/takipi/oss/storage/resources/fs/base/FolderFileSystemStorageResource.java +++ /dev/null @@ -1,14 +0,0 @@ -package com.takipi.oss.storage.resources.fs.base; - -import com.takipi.oss.storage.TakipiStorageConfiguration; -import com.takipi.oss.storage.fs.folder.FolderFilesystem; - -public abstract class FolderFileSystemStorageResource { - protected final FolderFilesystem fs; - - public FolderFileSystemStorageResource(TakipiStorageConfiguration configuration) { - this.fs = getNewFileSystem(configuration); - } - - protected abstract FolderFilesystem getNewFileSystem(TakipiStorageConfiguration configuration); -} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/base/HashFileSystemStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/base/HashFileSystemStorageResource.java deleted file mode 100644 index 1b3e1fd..0000000 --- a/src/main/java/com/takipi/oss/storage/resources/fs/base/HashFileSystemStorageResource.java +++ /dev/null @@ -1,17 +0,0 @@ -package com.takipi.oss.storage.resources.fs.base; - -import com.takipi.oss.storage.TakipiStorageConfiguration; -import com.takipi.oss.storage.fs.Record; -import com.takipi.oss.storage.fs.folder.FolderFilesystem; -import com.takipi.oss.storage.fs.folder.record.HashSubfolderFilesystem; - -public class HashFileSystemStorageResource extends FolderFileSystemStorageResource { - public HashFileSystemStorageResource(TakipiStorageConfiguration configuration) { - super(configuration); - } - - @Override - protected FolderFilesystem getNewFileSystem(TakipiStorageConfiguration configuration) { - return new HashSubfolderFilesystem(configuration.getFolderPath(), configuration.getMaxUsedStoragePercentage()); - } -} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/base/SimpleFileSystemStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/base/SimpleFileSystemStorageResource.java deleted file mode 100644 index e529502..0000000 --- a/src/main/java/com/takipi/oss/storage/resources/fs/base/SimpleFileSystemStorageResource.java +++ /dev/null @@ -1,16 +0,0 @@ -package com.takipi.oss.storage.resources.fs.base; - -import com.takipi.oss.storage.TakipiStorageConfiguration; -import com.takipi.oss.storage.fs.folder.FolderFilesystem; -import com.takipi.oss.storage.fs.folder.simple.SimpleFilesystem; - -public class SimpleFileSystemStorageResource extends FolderFileSystemStorageResource { - public SimpleFileSystemStorageResource(TakipiStorageConfiguration configuration) { - super(configuration); - } - - @Override - protected FolderFilesystem getNewFileSystem(TakipiStorageConfiguration configuration) { - return new SimpleFilesystem(configuration.getFolderPath(), configuration.getMaxUsedStoragePercentage()); - } -} From 939fd198fd6c2985b478d3958cac7a7b16874697 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Tue, 21 Feb 2017 18:23:59 +0200 Subject: [PATCH 06/66] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b08c912..3a8baaf 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Build and run: - clone the repo - `cd takipi-storage` - `mvn compile package` -- edit settings.yml to contain , , to access your s3 bucket +- edit settings.yml to contain bucket, key, password to access your s3 bucket - `java -jar target/takipi-storage-1.7.0.jar server settings.yml` Deploy: From 46dcf5ccd36e2ca13508bd161723c1d17ab5326d Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Thu, 23 Feb 2017 17:03:27 +0200 Subject: [PATCH 07/66] add support for docker for s3 --- docker/Dockerfile | 6 +++--- docker/README.md | 6 ++++-- docker/settings.yml | 33 +++++++++++++++++++++------------ 3 files changed, 28 insertions(+), 17 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 9199487..e024a90 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,5 +1,5 @@ # Run me with docker run -v :/opt/takipi-storage/storage -p :$STORAGE_PORT -# Logs are written to /log folder and both should be persistent outside of the running container +# Logs are written to /log folder and they should be persistent outside of the running container FROM java:8 MAINTAINER Chen harel "https://github.com/chook" @@ -9,7 +9,7 @@ ENV TAR_FILENAME takipi-storage-$VERSION.tar.gz ENV JAR_FILENAME takipi-storage-$VERSION.jar ENV STORAGE_PORT 8080 -RUN wget https://s3.amazonaws.com/app-takipi-com/deploy/takipi-storage/$TAR_FILENAME +RUN wget https://s3.amazonaws.com/app-takipi-com/deploy/takipi-storage/takipi-s3/$TAR_FILENAME RUN tar zxvf $TAR_FILENAME -C /tmp && \ mkdir -p /opt/takipi-storage/lib && \ cp /tmp/takipi-storage/lib/$JAR_FILENAME /opt/takipi-storage/lib @@ -18,4 +18,4 @@ ADD settings.yml /opt/takipi-storage EXPOSE $STORAGE_PORT WORKDIR /opt/takipi-storage -CMD java -jar /opt/takipi-storage/lib/$JAR_FILENAME server settings.yml +CMD java -jar /opt/takipi-storage/lib/$JAR_FILENAME server settings.yml \ No newline at end of file diff --git a/docker/README.md b/docker/README.md index e21d6ad..3571af9 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,6 +1,8 @@ takipi-storage on docker ======================== -When running takipi-storage inside docker, make sure to persist the storage folder outside (using volumes). +To run takipi-storage for s3: +- configure bucket, access key and password in settings.yml +- run it with docker run -v :/opt/takipi-storage/storage -p :8080 + (If you wish to have the logs persisted to and they can be found in /logs folder. -p to override default 8080 port) -Logs are placed inside the storage folder diff --git a/docker/settings.yml b/docker/settings.yml index 4dd3045..fc53524 100644 --- a/docker/settings.yml +++ b/docker/settings.yml @@ -1,27 +1,36 @@ -folderPath: /opt/takipi-storage/storage -maxUsedStoragePercentage: 0.95 enableCors: true corsOrigins: "*" +s3Fs: + bucket: + credentials: + accessKey: + secretKey: + server: applicationConnectors: - type: http port: 8080 +# this requires the alpn-boot library on the JVM's boot classpath +# - type: spdy3 +# port: 8445 +# keyStorePath: example.keystore +# keyStorePassword: example +# validateCerts: false adminConnectors: - type: http port: 8081 - requestLog: - appenders: - - type: file - currentLogFilename: ./storage/log/access.log - archivedLogFilenamePattern: ./storage/log/access.%d.log.gz - archivedFileCount: 14 + +# Logging settings. logging: + + # The default level of all loggers. Can be OFF, ERROR, WARN, INFO, DEBUG, TRACE, or ALL. level: INFO + + # Logger-specific levels. loggers: + com.takipi: DEBUG + appenders: - - type: file - currentLogFilename: ./storage/log/takipi-storage.log - archivedLogFilenamePattern: ./storage/log/takipi-storage.%d.log.gz - archivedFileCount: 14 + - type: console From 2cb1f9f539a453794c5223479f311b5eb5b7d4fb Mon Sep 17 00:00:00 2001 From: David Levanon Date: Tue, 28 Feb 2017 19:01:02 +0200 Subject: [PATCH 08/66] config env support --- .../storage/TakipiStorageConfiguration.java | 6 +- ...TakipiStorageConfigurationEnvResolver.java | 79 +++++++++++++++++++ 2 files changed, 82 insertions(+), 3 deletions(-) create mode 100644 src/main/java/com/takipi/oss/storage/TakipiStorageConfigurationEnvResolver.java diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java index 05508e8..93b596e 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java @@ -67,7 +67,7 @@ public static class Credentials { @JsonProperty public String getAccessKey() { - return accessKey; + return TakipiStorageConfigurationEnvResolver.resolveEnv(accessKey); } @JsonProperty @@ -77,7 +77,7 @@ public void setAccessKey(String accessKey) { @JsonProperty public String getSecretKey() { - return secretKey; + return TakipiStorageConfigurationEnvResolver.resolveEnv(secretKey); } @JsonProperty @@ -88,7 +88,7 @@ public void setSecretKey(String secretKey) { @JsonProperty public String getBucket() { - return bucket; + return TakipiStorageConfigurationEnvResolver.resolveEnv(bucket); } @JsonProperty diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageConfigurationEnvResolver.java b/src/main/java/com/takipi/oss/storage/TakipiStorageConfigurationEnvResolver.java new file mode 100644 index 0000000..2c05306 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageConfigurationEnvResolver.java @@ -0,0 +1,79 @@ +package com.takipi.oss.storage; + +public class TakipiStorageConfigurationEnvResolver { + private static final int ENV_STATE_NONE = -1; + private static final int ENV_STATE_ESCAPED = 0; + private static final int ENV_STATE_IN_NAME = 1; + + public static String resolveEnv(Object value) { + if (value == null) { + return null; + } + + String property = value.toString(); + + StringBuilder resultBuilder = new StringBuilder(property.length() * 2); + StringBuilder envNameBuilder = new StringBuilder(); + + int envState = ENV_STATE_NONE; + + for (char c : property.toCharArray()) { + switch (envState) { + case ENV_STATE_NONE: { + if (c == '$') + { + envState = ENV_STATE_ESCAPED; + } else { + resultBuilder.append(c); + } + } + break; + + case ENV_STATE_ESCAPED: { + if (c == '{') { + envState = ENV_STATE_IN_NAME; + } else { + resultBuilder.append('$'); + + if (c != '$') { + resultBuilder.append(c); + + envState = ENV_STATE_NONE; + } + } + } + break; + + case ENV_STATE_IN_NAME: { + if (c != '}') { + envNameBuilder.append(c); + } else { + String envVarValue = System.getenv(envNameBuilder.toString()); + + if (envVarValue == null) { + envVarValue = ""; + } + + resultBuilder.append(envVarValue); + + envNameBuilder = new StringBuilder(); + + envState = ENV_STATE_NONE; + } + } + break; + } + } + + if (envState != ENV_STATE_NONE) { + resultBuilder.append('$'); + + if (envState == ENV_STATE_IN_NAME) { + resultBuilder.append('{'); + resultBuilder.append(envNameBuilder); + } + } + + return resultBuilder.toString(); + } +} From 1023874d70d951732950a70908e3423e85ca1429 Mon Sep 17 00:00:00 2001 From: Luis E Limon Date: Wed, 19 Jul 2017 10:10:44 -0700 Subject: [PATCH 09/66] Added support for using IAM Roles. --- settings.yml | 2 ++ .../oss/storage/TakipiStorageConfiguration.java | 2 -- .../takipi/oss/storage/TakipiStorageMain.java | 17 +++++++++++++++-- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/settings.yml b/settings.yml index 07078f6..5d3b59e 100644 --- a/settings.yml +++ b/settings.yml @@ -1,6 +1,8 @@ enableCors: true corsOrigins: "*" +# +# If using attaching IAM Role to instance leave accessKey and secretKey empty. s3Fs: bucket: credentials: diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java index 93b596e..374e5ca 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java @@ -59,10 +59,8 @@ public static class S3Fs { private Credentials credentials; public static class Credentials { - @NotEmpty private String accessKey; - @NotEmpty private String secretKey; @JsonProperty diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index b5cb05b..8d9ec29 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -95,8 +95,21 @@ private Filesystem configureS3Filesystem(TakipiStorageConfiguration configuratio // Setup Amazon S3 client TakipiStorageConfiguration.S3Fs.Credentials credentials = configuration.getS3Fs().getCredentials(); - AWSCredentials awsCredentials = new BasicAWSCredentials(credentials.getAccessKey(), credentials.getSecretKey()); - AmazonS3 amazonS3 = new AmazonS3Client(awsCredentials); + AmazonS3 amazonS3; + + if ( credentials.getAccessKey() != null && !credentials.getAccessKey().isEmpty() && + credentials.getSecretKey() != null && !credentials.getSecretKey().isEmpty() ){ + log.debug("Using S3 Filesystem with keys" ); + + AWSCredentials awsCredentials = new BasicAWSCredentials(credentials.getAccessKey(), credentials.getSecretKey()); + amazonS3 = new AmazonS3Client(awsCredentials); + } + else { + // create a client connection based on IAM role assigned + log.debug("Using S3 Filesystem with IAM Role"); + amazonS3 = new AmazonS3Client(); + + } // S3 bucket String bucket = configuration.getS3Fs().getBucket(); From de9bd45cb127c48d9bda297b1d5c59868774c729 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Wed, 2 Aug 2017 01:53:15 +0300 Subject: [PATCH 10/66] adjust README and Dockerfile comments --- README.md | 4 ++-- docker/Dockerfile | 2 +- docker/README.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 3a8baaf..d6749d6 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,13 @@ Takipi Storage ============== -With thanks to moovel for developting this version supporting s3: https://github.com/moovel/takipi-storage/tree/s3-storage +Thanks to moovel for developing this version supporting s3: https://github.com/moovel/takipi-storage/tree/s3-storage Build and run: - clone the repo - `cd takipi-storage` - `mvn compile package` -- edit settings.yml to contain bucket, key, password to access your s3 bucket +- edit settings.yml to contain bucket, key, secret to access your s3 bucket - `java -jar target/takipi-storage-1.7.0.jar server settings.yml` Deploy: diff --git a/docker/Dockerfile b/docker/Dockerfile index e024a90..f45b4af 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,5 +1,5 @@ # Run me with docker run -v :/opt/takipi-storage/storage -p :$STORAGE_PORT -# Logs are written to /log folder and they should be persistent outside of the running container +# Logs are written to /log folder and so should be persistent outside of the running container FROM java:8 MAINTAINER Chen harel "https://github.com/chook" diff --git a/docker/README.md b/docker/README.md index 3571af9..2e76112 100644 --- a/docker/README.md +++ b/docker/README.md @@ -2,7 +2,7 @@ takipi-storage on docker ======================== To run takipi-storage for s3: -- configure bucket, access key and password in settings.yml +- configure bucket, access key and secret in settings.yml - run it with docker run -v :/opt/takipi-storage/storage -p :8080 (If you wish to have the logs persisted to and they can be found in /logs folder. -p to override default 8080 port) From f414faba0d70074583203f93abff8c064c8eaaa6 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Wed, 2 Aug 2017 02:09:39 +0300 Subject: [PATCH 11/66] https support in settings.yml --- docker/settings.yml | 12 ++++++------ settings.yml | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docker/settings.yml b/docker/settings.yml index fc53524..c254edf 100644 --- a/docker/settings.yml +++ b/docker/settings.yml @@ -11,12 +11,12 @@ server: applicationConnectors: - type: http port: 8080 -# this requires the alpn-boot library on the JVM's boot classpath -# - type: spdy3 -# port: 8445 -# keyStorePath: example.keystore -# keyStorePassword: example -# validateCerts: false +# https support +# - type: https +# port: 8443 +# keyStorePath: example.keystore +# keyStorePassword: example +# validateCerts: false adminConnectors: - type: http port: 8081 diff --git a/settings.yml b/settings.yml index 07078f6..89c27e2 100644 --- a/settings.yml +++ b/settings.yml @@ -18,9 +18,9 @@ server: applicationConnectors: - type: http port: 8080 -# this requires the alpn-boot library on the JVM's boot classpath -# - type: spdy3 -# port: 8445 +# https support +# - type: https +# port: 8443 # keyStorePath: example.keystore # keyStorePassword: example # validateCerts: false From 33655ba0c52e7da5af764ffbcec2d8e3cfe7520a Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Wed, 2 Aug 2017 02:10:05 +0300 Subject: [PATCH 12/66] style and formatting --- src/main/java/com/takipi/oss/storage/fs/Record.java | 6 +++++- .../com/takipi/oss/storage/fs/SimplePathRecord.java | 11 ++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/fs/Record.java b/src/main/java/com/takipi/oss/storage/fs/Record.java index 6f14ecb..a7b2c7c 100644 --- a/src/main/java/com/takipi/oss/storage/fs/Record.java +++ b/src/main/java/com/takipi/oss/storage/fs/Record.java @@ -1,5 +1,7 @@ package com.takipi.oss.storage.fs; +import java.io.File; + import org.apache.commons.lang.StringUtils; public class Record implements BaseRecord { @@ -34,7 +36,9 @@ public String getKey() { } public String getPath() { - return this.getServiceId() + "/" + this.getType() + "/" + this.getKey(); + return (this.getServiceId() + File.separator + + this.getType() + File.separator + + this.getKey()); } @Override diff --git a/src/main/java/com/takipi/oss/storage/fs/SimplePathRecord.java b/src/main/java/com/takipi/oss/storage/fs/SimplePathRecord.java index d4bc0e6..f754478 100644 --- a/src/main/java/com/takipi/oss/storage/fs/SimplePathRecord.java +++ b/src/main/java/com/takipi/oss/storage/fs/SimplePathRecord.java @@ -1,6 +1,6 @@ package com.takipi.oss.storage.fs; -public class SimplePathRecord implements BaseRecord { +public class SimplePathRecord implements BaseRecord { private final String path; @@ -23,8 +23,7 @@ public String getPath() { @Override public String getServiceId() { - if (pathParts.length > 0) - { + if (pathParts.length > 0) { return pathParts[0]; } @@ -33,8 +32,7 @@ public String getServiceId() { @Override public String getType() { - if (pathParts.length > 1) - { + if (pathParts.length > 1) { return pathParts[1]; } @@ -43,8 +41,7 @@ public String getType() { @Override public String getKey() { - if (pathParts.length > 2) - { + if (pathParts.length > 2) { return pathParts[2]; } From 988773e58e9be398baf7b1b5480f026736f04c4c Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Wed, 2 Aug 2017 08:06:57 +0300 Subject: [PATCH 13/66] add support for path prefix for s3 --- docker/settings.yml | 1 + settings.yml | 1 + .../storage/TakipiStorageConfiguration.java | 13 +++++++++++++ .../takipi/oss/storage/TakipiStorageMain.java | 5 +++-- .../oss/storage/fs/s3/S3Filesystem.java | 19 ++++++++++++++++--- 5 files changed, 34 insertions(+), 5 deletions(-) diff --git a/docker/settings.yml b/docker/settings.yml index c254edf..40c2f5f 100644 --- a/docker/settings.yml +++ b/docker/settings.yml @@ -3,6 +3,7 @@ corsOrigins: "*" s3Fs: bucket: + pathPrefix: credentials: accessKey: secretKey: diff --git a/settings.yml b/settings.yml index 89c27e2..da3abd3 100644 --- a/settings.yml +++ b/settings.yml @@ -3,6 +3,7 @@ corsOrigins: "*" s3Fs: bucket: + pathPrefix: credentials: accessKey: secretKey: diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java index 93b596e..1b7bcf8 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java @@ -54,6 +54,9 @@ public static class S3Fs { @NotEmpty private String bucket; + @NotEmpty + private String pathPrefix; + @NotNull @Valid private Credentials credentials; @@ -95,7 +98,17 @@ public String getBucket() { public void setBucket(String bucket) { this.bucket = bucket; } + + @JsonProperty + public String getPathPrefix() { + return pathPrefix; + } + @JsonProperty + public void setPathPrefix(String pathPrefix) { + this.pathPrefix = pathPrefix; + } + @JsonProperty public Credentials getCredentials() { return credentials; diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index b5cb05b..3e8448c 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -100,9 +100,10 @@ private Filesystem configureS3Filesystem(TakipiStorageConfiguration configuratio // S3 bucket String bucket = configuration.getS3Fs().getBucket(); - log.debug("Using AWS S3 based filesystem with bucket: {}", bucket); + String pathPrefix = configuration.getS3Fs().getPathPrefix(); + log.debug("Using AWS S3 based filesystem with bucket: {}, prefix: {}", bucket, pathPrefix); - return new S3Filesystem(amazonS3, bucket); + return new S3Filesystem(amazonS3, bucket, pathPrefix); } private void enableCors(TakipiStorageConfiguration configuration, Environment environment) { diff --git a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java index ab1a9cc..ca15aba 100644 --- a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java @@ -1,5 +1,6 @@ package com.takipi.oss.storage.fs.s3; +import java.io.File; import java.io.IOException; import java.io.InputStream; @@ -21,10 +22,12 @@ public class S3Filesystem implements Filesystem { private final AmazonS3 amazonS3; private final String bucket; - - public S3Filesystem(AmazonS3 amazonS3, String bucket) { + private final String pathPrefix; + + public S3Filesystem(AmazonS3 amazonS3, String bucket, String pathPrefix) { this.amazonS3 = amazonS3; this.bucket = bucket; + this.pathPrefix = pathPrefix; } @Override @@ -59,7 +62,13 @@ public SearchResult search(SearchRequest searchRequest) throws IOException { // Start a prefix search ListObjectsRequest listObjectsRequest = new ListObjectsRequest(); listObjectsRequest.setBucketName(bucket); - listObjectsRequest.setPrefix(searchRequest.getBaseSearchPath()); + + if (this.pathPrefix != null) { + listObjectsRequest.setPrefix(this.pathPrefix + "/" + searchRequest.getBaseSearchPath()); + } else { + listObjectsRequest.setPrefix(searchRequest.getBaseSearchPath()); + } + ObjectListing objectListing = amazonS3.listObjects(listObjectsRequest); // Just select the first object @@ -90,6 +99,10 @@ public BaseRecord pathToRecord(String path) { } private String keyOf(T record) { + if (this.pathPrefix != null) { + return this.pathPrefix + File.separator + record.getPath(); + } + return record.getPath(); } From dd219468decc5be01fa039beb96b9ccb340121d9 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Wed, 2 Aug 2017 08:10:04 +0300 Subject: [PATCH 14/66] add support for not passing credentials for s3 --- .../com/takipi/oss/storage/TakipiStorageMain.java | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index 3e8448c..933a690 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -93,10 +93,15 @@ private Filesystem configureS3Filesystem(TakipiStorageConfiguration configuratio environment.jersey().register(new NoOpTreeStorageResource()); environment.jersey().register(new MachineInfoOnlyStatusStorageResource()); + AmazonS3 amazonS3; // Setup Amazon S3 client - TakipiStorageConfiguration.S3Fs.Credentials credentials = configuration.getS3Fs().getCredentials(); - AWSCredentials awsCredentials = new BasicAWSCredentials(credentials.getAccessKey(), credentials.getSecretKey()); - AmazonS3 amazonS3 = new AmazonS3Client(awsCredentials); + if (configuration.getS3Fs().getCredentials() != null) { + TakipiStorageConfiguration.S3Fs.Credentials credentials = configuration.getS3Fs().getCredentials(); + AWSCredentials awsCredentials = new BasicAWSCredentials(credentials.getAccessKey(), credentials.getSecretKey()); + amazonS3 = new AmazonS3Client(awsCredentials); + } else { + amazonS3 = new AmazonS3Client(); + } // S3 bucket String bucket = configuration.getS3Fs().getBucket(); From 6471e311268ddc1c2368a8690aa5f0baac062b71 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Wed, 2 Aug 2017 08:20:31 +0300 Subject: [PATCH 15/66] add support for not passing credentials for s3 --- .settings/org.eclipse.jdt.core.prefs | 550 +++++++++--------- docker/settings.yml | 1 + settings.yml | 1 + .../storage/TakipiStorageConfiguration.java | 3 +- 4 files changed, 278 insertions(+), 277 deletions(-) diff --git a/.settings/org.eclipse.jdt.core.prefs b/.settings/org.eclipse.jdt.core.prefs index 7ef4a2f..3f86f84 100644 --- a/.settings/org.eclipse.jdt.core.prefs +++ b/.settings/org.eclipse.jdt.core.prefs @@ -1,306 +1,306 @@ -#Tue Jan 13 16:30:02 IST 2015 -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits=do not insert -org.eclipse.jdt.core.formatter.brace_position_for_block=end_of_line -org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration=do not insert +eclipse.preferences.version=1 encoding/src/main/java=UTF-8 -org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_try_resources=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws=do not insert -org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration=end_of_line -org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional=insert -org.eclipse.jdt.core.formatter.alignment_for_assignment=0 -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block=insert -org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_try_resources=insert -org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.7 encoding/src/main/resources=UTF-8 -org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters=do not insert -org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment=false -org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column=false -org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer=do not insert -org.eclipse.jdt.core.formatter.indent_statements_compare_to_body=true -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces=insert -org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw=insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_unary_operator=do not insert -org.eclipse.jdt.core.formatter.blank_lines_after_imports=1 -org.eclipse.jdt.core.formatter.alignment_for_arguments_in_annotation=0 -org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference=do not insert -org.eclipse.jdt.core.formatter.comment.format_source_code=true -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters=insert -org.eclipse.jdt.core.compiler.debug.localVariable=generate -org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header=true -org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_package=insert -org.eclipse.jdt.core.formatter.blank_lines_before_member_type=1 -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_try=insert -org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable=insert -org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line=false -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments=insert -org.eclipse.jdt.core.formatter.format_line_comment_starting_on_first_column=true -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations=insert -org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk=1 -org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call=16 -org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve=1 +encoding/src/test/java=UTF-8 encoding/src/test/resources=UTF-8 -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration=insert +org.eclipse.jdt.core.codeComplete.argumentPrefixes= +org.eclipse.jdt.core.codeComplete.argumentSuffixes= +org.eclipse.jdt.core.codeComplete.fieldPrefixes= +org.eclipse.jdt.core.codeComplete.fieldSuffixes= +org.eclipse.jdt.core.codeComplete.localPrefixes= +org.eclipse.jdt.core.codeComplete.localSuffixes= +org.eclipse.jdt.core.codeComplete.staticFieldPrefixes= +org.eclipse.jdt.core.codeComplete.staticFieldSuffixes= +org.eclipse.jdt.core.codeComplete.staticFinalFieldPrefixes= +org.eclipse.jdt.core.codeComplete.staticFinalFieldSuffixes= +org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled +org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.7 org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve -org.eclipse.jdt.core.formatter.brace_position_for_switch=end_of_line -org.eclipse.jdt.core.formatter.comment.format_header=false -org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws=insert -org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard=do not insert +org.eclipse.jdt.core.compiler.compliance=1.7 +org.eclipse.jdt.core.compiler.debug.lineNumber=generate +org.eclipse.jdt.core.compiler.debug.localVariable=generate +org.eclipse.jdt.core.compiler.debug.sourceFile=generate +org.eclipse.jdt.core.compiler.problem.assertIdentifier=error +org.eclipse.jdt.core.compiler.problem.enumIdentifier=error +org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning +org.eclipse.jdt.core.compiler.source=1.7 +org.eclipse.jdt.core.formatter.align_type_members_on_columns=false +org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression=16 +org.eclipse.jdt.core.formatter.alignment_for_arguments_in_annotation=0 +org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant=16 +org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call=16 +org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation=16 +org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression=16 +org.eclipse.jdt.core.formatter.alignment_for_assignment=0 +org.eclipse.jdt.core.formatter.alignment_for_binary_expression=16 +org.eclipse.jdt.core.formatter.alignment_for_compact_if=16 +org.eclipse.jdt.core.formatter.alignment_for_conditional_expression=80 +org.eclipse.jdt.core.formatter.alignment_for_enum_constants=0 +org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer=16 +org.eclipse.jdt.core.formatter.alignment_for_method_declaration=0 +org.eclipse.jdt.core.formatter.alignment_for_multiple_fields=16 +org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration=16 +org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration=16 +org.eclipse.jdt.core.formatter.alignment_for_resources_in_try=80 +org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation=16 org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration=16 -org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case=insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_semicolon=do not insert org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration=16 -org.eclipse.jdt.core.formatter.comment.line_length=80 -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch=do not insert -org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases=true -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments=insert -org.eclipse.jdt.core.formatter.insert_space_after_binary_operator=insert -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block=insert -org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator=do not insert -org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration=end_of_line -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments=insert -org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer=insert -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters=insert -org.eclipse.jdt.core.formatter.comment.format_javadoc_comments=true -org.eclipse.jdt.core.formatter.continuation_indentation=2 -org.eclipse.jdt.core.codeComplete.fieldSuffixes= -org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_field=insert -org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags=insert -org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation=16 -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference=insert -org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator=insert +org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration=16 +org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration=16 +org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration=16 +org.eclipse.jdt.core.formatter.alignment_for_union_type_in_multicatch=16 +org.eclipse.jdt.core.formatter.blank_lines_after_imports=1 +org.eclipse.jdt.core.formatter.blank_lines_after_package=1 +org.eclipse.jdt.core.formatter.blank_lines_before_field=0 +org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration=0 +org.eclipse.jdt.core.formatter.blank_lines_before_imports=1 +org.eclipse.jdt.core.formatter.blank_lines_before_member_type=1 org.eclipse.jdt.core.formatter.blank_lines_before_method=1 -org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line=true -org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration=16 +org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk=1 +org.eclipse.jdt.core.formatter.blank_lines_before_package=0 +org.eclipse.jdt.core.formatter.blank_lines_between_import_groups=1 org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations=1 -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations=insert -org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast=insert +org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_array_initializer=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_block=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_block_in_case=end_of_line org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration=end_of_line -org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration=insert -org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression=16 -org.eclipse.jdt.core.formatter.join_lines_in_comments=true -org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header=true -org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference=do not insert -org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration=0 +org.eclipse.jdt.core.formatter.brace_position_for_enum_constant=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_method_declaration=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_switch=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_type_declaration=end_of_line +org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment=false +org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment=false org.eclipse.jdt.core.formatter.comment.format_block_comments=true -org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional=insert -org.eclipse.jdt.core.formatter.alignment_for_multiple_fields=16 -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case=do not insert -org.eclipse.jdt.core.formatter.alignment_for_union_type_in_multicatch=16 +org.eclipse.jdt.core.formatter.comment.format_header=false org.eclipse.jdt.core.formatter.comment.format_html=true -org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration=do not insert -org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration=16 -org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized=do not insert -org.eclipse.jdt.core.compiler.problem.enumIdentifier=error -org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer=insert -org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for=insert -org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator=insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces=do not insert -org.eclipse.jdt.core.formatter.brace_position_for_method_declaration=end_of_line -org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments=do not insert -org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression=16 -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if=insert +org.eclipse.jdt.core.formatter.comment.format_javadoc_comments=true +org.eclipse.jdt.core.formatter.comment.format_line_comments=true +org.eclipse.jdt.core.formatter.comment.format_source_code=true +org.eclipse.jdt.core.formatter.comment.indent_parameter_description=true +org.eclipse.jdt.core.formatter.comment.indent_root_tags=true +org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags=insert +org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter=insert +org.eclipse.jdt.core.formatter.comment.line_length=80 +org.eclipse.jdt.core.formatter.comment.new_lines_at_block_boundaries=true org.eclipse.jdt.core.formatter.comment.new_lines_at_javadoc_boundaries=true -org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration=do not insert +org.eclipse.jdt.core.formatter.comment.preserve_white_space_between_code_and_line_comments=false +org.eclipse.jdt.core.formatter.compact_else_if=true +org.eclipse.jdt.core.formatter.continuation_indentation=2 +org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer=2 +org.eclipse.jdt.core.formatter.disabling_tag=@formatter\:off +org.eclipse.jdt.core.formatter.enabling_tag=@formatter\:on +org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line=false +org.eclipse.jdt.core.formatter.format_line_comment_starting_on_first_column=true +org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header=true org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header=true -org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter=insert -org.eclipse.jdt.core.codeComplete.staticFinalFieldSuffixes= -org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration=16 -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation=insert -org.eclipse.jdt.core.formatter.insert_space_after_ellipsis=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration=do not insert +org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header=true +org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header=true +org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases=true +org.eclipse.jdt.core.formatter.indent_empty_lines=false +org.eclipse.jdt.core.formatter.indent_statements_compare_to_block=true +org.eclipse.jdt.core.formatter.indent_statements_compare_to_body=true +org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases=true +org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch=false +org.eclipse.jdt.core.formatter.indentation.size=4 +org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_field=insert +org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable=insert +org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_method=insert +org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_package=insert +org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_type=insert +org.eclipse.jdt.core.formatter.insert_new_line_after_label=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement=do not insert org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration=insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration=insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block=insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant=insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration=insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body=insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter=insert +org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator=insert org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_try=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch=do not insert -org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant=16 -org.eclipse.jdt.core.codeComplete.fieldPrefixes= -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch=insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration=insert -org.eclipse.jdt.core.formatter.brace_position_for_enum_constant=end_of_line -org.eclipse.jdt.core.formatter.wrap_before_or_operator_multicatch=true +org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_binary_operator=insert +org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments=insert +org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters=insert +org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block=insert +org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast=insert +org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert=insert +org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case=insert +org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional=insert +org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for=insert org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters=insert +org.eclipse.jdt.core.formatter.insert_space_after_ellipsis=insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer=insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation=do not insert org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast=do not insert -org.eclipse.jdt.core.codeComplete.staticFieldSuffixes= -org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer=insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration=do not insert -org.eclipse.jdt.core.formatter.alignment_for_enum_constants=0 -org.eclipse.jdt.core.formatter.alignment_for_method_declaration=0 -org.eclipse.jdt.core.formatter.tabulation.char=space -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression=insert -org.eclipse.jdt.core.formatter.wrap_before_binary_operator=true +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_try=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional=insert org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation=do not insert -org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line=false -org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line=false -org.eclipse.jdt.core.formatter.compact_else_if=true -org.eclipse.jdt.core.codeComplete.localPrefixes= -org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return=insert -org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference=do not insert -org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration=end_of_line -org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment=false -org.eclipse.jdt.core.formatter.disabling_tag=@formatter\:off -org.eclipse.jdt.core.formatter.align_type_members_on_columns=false -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations=do not insert -org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_type=insert -org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for=insert +org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_try_resources=insert +org.eclipse.jdt.core.formatter.insert_space_after_unary_operator=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter=insert +org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator=insert +org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_before_binary_operator=insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer=insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference=do not insert org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation=do not insert -org.eclipse.jdt.core.formatter.join_wrapped_lines=true -org.eclipse.jdt.core.codeComplete.localSuffixes= -org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_try=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert=insert +org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case=do not insert org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional=insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression=do not insert -org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header=true -org.eclipse.jdt.core.formatter.use_on_off_tags=false -org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer=16 -org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column=false -org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_method=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch=insert -org.eclipse.jdt.core.formatter.comment.format_line_comments=true -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch=insert -org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line=false -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations=insert -org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments=do not insert -org.eclipse.jdt.core.formatter.comment.indent_parameter_description=true -org.eclipse.jdt.core.formatter.indentation.size=4 -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default=do not insert org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for=insert -org.eclipse.jdt.core.compiler.debug.sourceFile=generate +org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters=do not insert org.eclipse.jdt.core.formatter.insert_space_before_ellipsis=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for=insert -org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration=16 -org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration=insert org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer=insert -org.eclipse.jdt.core.formatter.comment.indent_root_tags=true +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration=insert org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant=insert -org.eclipse.jdt.core.formatter.alignment_for_binary_expression=16 -org.eclipse.jdt.core.formatter.brace_position_for_type_declaration=end_of_line -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws=insert -encoding/src/test/java=UTF-8 -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_binary_operator=insert -org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations=false -org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases=true -org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator=do not insert -org.eclipse.jdt.core.formatter.blank_lines_before_package=0 org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration=insert -org.eclipse.jdt.core.formatter.indent_empty_lines=false -org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments=do not insert -org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation=16 -org.eclipse.jdt.core.formatter.wrap_outer_expressions_when_nested=true +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference=do not insert org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation=do not insert -org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws=do not insert -org.eclipse.jdt.core.codeComplete.staticFinalFieldPrefixes= -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments=do not insert -org.eclipse.jdt.core.formatter.brace_position_for_block_in_case=end_of_line -org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body=0 -org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert=insert -org.eclipse.jdt.core.formatter.alignment_for_conditional_expression=80 -org.eclipse.jdt.core.codeComplete.argumentPrefixes= -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration=do not insert -org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line=false -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration=insert -org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch=false -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized=insert -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant=insert -org.eclipse.jdt.core.codeComplete.argumentSuffixes= -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant=do not insert org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for=insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments=do not insert -org.eclipse.jdt.core.formatter.alignment_for_resources_in_try=80 -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while=insert -org.eclipse.jdt.core.codeComplete.staticFieldPrefixes= +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration=do not insert org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation=do not insert -org.eclipse.jdt.core.compiler.debug.lineNumber=generate -org.eclipse.jdt.core.formatter.blank_lines_before_imports=1 -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression=do not insert -org.eclipse.jdt.core.formatter.alignment_for_compact_if=16 -org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled -org.eclipse.jdt.core.formatter.insert_new_line_after_label=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_try=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while=insert +org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return=insert +org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw=insert +org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional=insert +org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_semicolon=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_try_resources=do not insert org.eclipse.jdt.core.formatter.insert_space_before_unary_operator=do not insert -org.eclipse.jdt.core.formatter.comment.preserve_white_space_between_code_and_line_comments=false -org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_try=do not insert -org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer=2 +org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation=do not insert +org.eclipse.jdt.core.formatter.join_lines_in_comments=true +org.eclipse.jdt.core.formatter.join_wrapped_lines=true +org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line=false +org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line=false +org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line=false +org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line=false org.eclipse.jdt.core.formatter.lineSplit=120 -org.eclipse.jdt.core.compiler.source=1.7 -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration=insert -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body=insert +org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column=false +org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column=false +org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body=0 +org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve=1 +org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line=true +org.eclipse.jdt.core.formatter.tabulation.char=space org.eclipse.jdt.core.formatter.tabulation.size=4 -org.eclipse.jdt.core.compiler.problem.assertIdentifier=error -org.eclipse.jdt.core.formatter.brace_position_for_array_initializer=end_of_line -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments=do not insert -org.eclipse.jdt.core.formatter.blank_lines_between_import_groups=1 -org.eclipse.jdt.core.formatter.comment.new_lines_at_block_boundaries=true -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional=insert -org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference=do not insert -org.eclipse.jdt.core.formatter.indent_statements_compare_to_block=true -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation=do not insert -eclipse.preferences.version=1 -org.eclipse.jdt.core.formatter.enabling_tag=@formatter\:on -org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference=do not insert -org.eclipse.jdt.core.compiler.compliance=1.7 -org.eclipse.jdt.core.formatter.blank_lines_after_package=1 -org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration=16 -org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing=do not insert -org.eclipse.jdt.core.formatter.blank_lines_before_field=0 +org.eclipse.jdt.core.formatter.use_on_off_tags=false +org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations=false +org.eclipse.jdt.core.formatter.wrap_before_binary_operator=true +org.eclipse.jdt.core.formatter.wrap_before_or_operator_multicatch=true +org.eclipse.jdt.core.formatter.wrap_outer_expressions_when_nested=true diff --git a/docker/settings.yml b/docker/settings.yml index 40c2f5f..f625ec7 100644 --- a/docker/settings.yml +++ b/docker/settings.yml @@ -1,6 +1,7 @@ enableCors: true corsOrigins: "*" +# If using attaching IAM Role to instance leave accessKey and secretKey empty. s3Fs: bucket: pathPrefix: diff --git a/settings.yml b/settings.yml index da3abd3..5519303 100644 --- a/settings.yml +++ b/settings.yml @@ -1,6 +1,7 @@ enableCors: true corsOrigins: "*" +# If using attaching IAM Role to instance leave accessKey and secretKey empty. s3Fs: bucket: pathPrefix: diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java index 1b7bcf8..1461c45 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java @@ -62,10 +62,9 @@ public static class S3Fs { private Credentials credentials; public static class Credentials { - @NotEmpty + private String accessKey; - @NotEmpty private String secretKey; @JsonProperty From 32b0ac76a1a5fbb9a9c92c02347b4ff7456e6898 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Wed, 2 Aug 2017 08:22:25 +0300 Subject: [PATCH 16/66] credits --- .../takipi/oss/storage/TakipiStorageMain.java | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index 933a690..ee1d603 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -93,16 +93,23 @@ private Filesystem configureS3Filesystem(TakipiStorageConfiguration configuratio environment.jersey().register(new NoOpTreeStorageResource()); environment.jersey().register(new MachineInfoOnlyStatusStorageResource()); - AmazonS3 amazonS3; // Setup Amazon S3 client - if (configuration.getS3Fs().getCredentials() != null) { - TakipiStorageConfiguration.S3Fs.Credentials credentials = configuration.getS3Fs().getCredentials(); - AWSCredentials awsCredentials = new BasicAWSCredentials(credentials.getAccessKey(), credentials.getSecretKey()); - amazonS3 = new AmazonS3Client(awsCredentials); - } else { + TakipiStorageConfiguration.S3Fs.Credentials credentials = configuration.getS3Fs().getCredentials(); + AmazonS3 amazonS3; + + if ( credentials.getAccessKey() != null && !credentials.getAccessKey().isEmpty() && + credentials.getSecretKey() != null && !credentials.getSecretKey().isEmpty() ){ + log.debug("Using S3 Filesystem with keys" ); + + AWSCredentials awsCredentials = new BasicAWSCredentials(credentials.getAccessKey(), credentials.getSecretKey()); + amazonS3 = new AmazonS3Client(awsCredentials); + } + else { + // create a client connection based on IAM role assigned + log.debug("Using S3 Filesystem with IAM Role"); amazonS3 = new AmazonS3Client(); } - + // S3 bucket String bucket = configuration.getS3Fs().getBucket(); String pathPrefix = configuration.getS3Fs().getPathPrefix(); From a965c6082a78bc85b39b066c38f436905dfab211 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Thu, 3 Aug 2017 18:46:18 +0300 Subject: [PATCH 17/66] readme file --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index d6749d6..ab92da3 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,8 @@ Takipi Storage ============== -Thanks to moovel for developing this version supporting s3: https://github.com/moovel/takipi-storage/tree/s3-storage +Thanks to Moovel for developing this version supporting s3: https://github.com/moovel/takipi-storage/tree/s3-storage +And to Atlassian for adding some extra features Build and run: - clone the repo From 955b182e919904afa9e22457f3d8fde44cecd6e4 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Sun, 6 Aug 2017 14:28:51 +0300 Subject: [PATCH 18/66] standards --- settings.yml | 2 +- src/main/java/com/takipi/oss/storage/TakipiStorageMain.java | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/settings.yml b/settings.yml index 5519303..70a9dd4 100644 --- a/settings.yml +++ b/settings.yml @@ -1,7 +1,7 @@ enableCors: true corsOrigins: "*" -# If using attaching IAM Role to instance leave accessKey and secretKey empty. +# If using attaching IAM Role to instance leave accessKey and secretKey empty s3Fs: bucket: pathPrefix: diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index a48a4da..c32befe 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -98,8 +98,9 @@ private Filesystem configureS3Filesystem(TakipiStorageConfiguration configuratio AmazonS3 amazonS3; - if ( credentials.getAccessKey() != null && !credentials.getAccessKey().isEmpty() && - credentials.getSecretKey() != null && !credentials.getSecretKey().isEmpty() ) { + if ((credentials.getAccessKey() != null) && + (!credentials.getAccessKey().isEmpty()) && + (credentials.getSecretKey() != null && !credentials.getSecretKey().isEmpty())) { log.debug("Using S3 Filesystem with keys" ); AWSCredentials awsCredentials = new BasicAWSCredentials(credentials.getAccessKey(), credentials.getSecretKey()); From 750f8a6379c447478279fc3d0b155a8b73abdacd Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Sun, 6 Aug 2017 15:01:46 +0300 Subject: [PATCH 19/66] remove all warnings --- .../takipi/oss/storage/TakipiStorageMain.java | 10 +++--- .../takipi/oss/storage/fs/api/Filesystem.java | 1 - .../storage/health/FilesystemHealthCheck.java | 4 +-- .../diag/NoOpTreeStorageResource.java | 6 ++-- .../resources/fs/BinaryStorageResource.java | 32 ++++++++++++------- .../fs/JsonMultiDeleteStorageResource.java | 4 +-- .../fs/JsonMultiFetchStorageResource.java | 4 +-- .../fs/JsonSimpleSearchStorageResource.java | 4 +-- .../fs/folder/FolderFilesystemTest.java | 12 +++---- 9 files changed, 43 insertions(+), 34 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index c32befe..a1156c7 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -13,6 +13,7 @@ import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3Client; +import com.takipi.oss.storage.fs.BaseRecord; import com.takipi.oss.storage.fs.api.Filesystem; import com.takipi.oss.storage.fs.folder.simple.SimpleFilesystem; import com.takipi.oss.storage.fs.s3.S3Filesystem; @@ -52,6 +53,7 @@ public void initialize(Bootstrap bootstrap) { } @Override + @SuppressWarnings({ "rawtypes", "unchecked" }) public void run(TakipiStorageConfiguration configuration, Environment environment) { if (configuration.isEnableCors()) { enableCors(configuration, environment); @@ -69,7 +71,7 @@ public void run(TakipiStorageConfiguration configuration, Environment environmen environment.jersey().register(new VersionStorageResource()); } - private Filesystem configureFilesystem(TakipiStorageConfiguration configuration, Environment environment) { + private Filesystem configureFilesystem(TakipiStorageConfiguration configuration, Environment environment) { if(configuration.hasFolderFs()) { return configureFolderFilesystem(configuration, environment); } else if(configuration.hasS3Fs()) { @@ -80,7 +82,7 @@ private Filesystem configureFilesystem(TakipiStorageConfiguration configuration, } } - private Filesystem configureFolderFilesystem(TakipiStorageConfiguration configuration, Environment environment) { + private SimpleFilesystem configureFolderFilesystem(TakipiStorageConfiguration configuration, Environment environment) { log.debug("Using local filesystem at: {}", configuration.getFolderFs().getFolderPath()); environment.jersey().register(new TreeStorageResource(configuration)); @@ -88,7 +90,7 @@ private Filesystem configureFolderFilesystem(TakipiStorageConfiguration configur return new SimpleFilesystem(configuration.getFolderFs().getFolderPath(), configuration.getFolderFs().getMaxUsedStoragePercentage()); } - private Filesystem configureS3Filesystem(TakipiStorageConfiguration configuration, Environment environment) { + private Filesystem configureS3Filesystem(TakipiStorageConfiguration configuration, Environment environment) { // Setup basically mocked versions of info resources. environment.jersey().register(new NoOpTreeStorageResource()); environment.jersey().register(new MachineInfoOnlyStatusStorageResource()); @@ -117,7 +119,7 @@ private Filesystem configureS3Filesystem(TakipiStorageConfiguration configuratio String pathPrefix = configuration.getS3Fs().getPathPrefix(); log.debug("Using AWS S3 based filesystem with bucket: {}, prefix: {}", bucket, pathPrefix); - return new S3Filesystem(amazonS3, bucket, pathPrefix); + return new S3Filesystem(amazonS3, bucket, pathPrefix); } private void enableCors(TakipiStorageConfiguration configuration, Environment environment) { diff --git a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java index 568dd39..f31db59 100644 --- a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java @@ -4,7 +4,6 @@ import java.io.InputStream; import com.takipi.oss.storage.fs.BaseRecord; -import com.takipi.oss.storage.fs.Record; public interface Filesystem extends FilesystemHealth { /** diff --git a/src/main/java/com/takipi/oss/storage/health/FilesystemHealthCheck.java b/src/main/java/com/takipi/oss/storage/health/FilesystemHealthCheck.java index b14c71c..fd6c6d9 100644 --- a/src/main/java/com/takipi/oss/storage/health/FilesystemHealthCheck.java +++ b/src/main/java/com/takipi/oss/storage/health/FilesystemHealthCheck.java @@ -4,9 +4,9 @@ import com.takipi.oss.storage.fs.api.Filesystem; public class FilesystemHealthCheck extends HealthCheck { - private final Filesystem filesystem; + private final Filesystem filesystem; - public FilesystemHealthCheck(Filesystem filesystem) { + public FilesystemHealthCheck(Filesystem filesystem) { this.filesystem = filesystem; } diff --git a/src/main/java/com/takipi/oss/storage/resources/diag/NoOpTreeStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/diag/NoOpTreeStorageResource.java index e531c8e..9c9f3a6 100644 --- a/src/main/java/com/takipi/oss/storage/resources/diag/NoOpTreeStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/diag/NoOpTreeStorageResource.java @@ -1,9 +1,5 @@ package com.takipi.oss.storage.resources.diag; -import com.codahale.metrics.annotation.Timed; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import javax.ws.rs.Consumes; import javax.ws.rs.GET; import javax.ws.rs.Path; @@ -11,6 +7,8 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; +import com.codahale.metrics.annotation.Timed; + @Path("/storage/v1/diag/tree") @Consumes(MediaType.TEXT_PLAIN) @Produces(MediaType.TEXT_PLAIN) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/BinaryStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/BinaryStorageResource.java index 0203278..8db1cc8 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/BinaryStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/BinaryStorageResource.java @@ -1,19 +1,29 @@ package com.takipi.oss.storage.resources.fs; -import com.codahale.metrics.annotation.Timed; -import com.takipi.oss.storage.fs.Record; -import com.takipi.oss.storage.fs.api.Filesystem; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; -import javax.ws.rs.*; +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.HEAD; +import javax.ws.rs.PUT; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.codahale.metrics.annotation.Timed; +import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.api.Filesystem; @Path("/storage/v1/binary/{serviceId}/{type}/{key:.+}") @Consumes(MediaType.APPLICATION_OCTET_STREAM) @@ -22,9 +32,9 @@ public class BinaryStorageResource { private static final Logger logger = LoggerFactory.getLogger(BinaryStorageResource.class); - private final Filesystem filesystem; + private final Filesystem filesystem; - public BinaryStorageResource(Filesystem filesystem) { + public BinaryStorageResource(Filesystem filesystem) { this.filesystem = filesystem; } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiDeleteStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiDeleteStorageResource.java index 6273d0b..e2de986 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiDeleteStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiDeleteStorageResource.java @@ -24,9 +24,9 @@ public class JsonMultiDeleteStorageResource { private static final Logger logger = LoggerFactory.getLogger(JsonMultiDeleteStorageResource.class); - private final Filesystem filesystem; + private final Filesystem filesystem; - public JsonMultiDeleteStorageResource(Filesystem filesystem) { + public JsonMultiDeleteStorageResource(Filesystem filesystem) { this.filesystem = filesystem; } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index 6f2124e..038d4cc 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -25,9 +25,9 @@ public class JsonMultiFetchStorageResource { private static final Logger logger = LoggerFactory.getLogger(JsonMultiFetchStorageResource.class); - private final Filesystem filesystem; + private final Filesystem filesystem; - public JsonMultiFetchStorageResource(Filesystem filesystem) { + public JsonMultiFetchStorageResource(Filesystem filesystem) { this.filesystem = filesystem; } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleSearchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleSearchStorageResource.java index b3c4839..4cf7929 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleSearchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonSimpleSearchStorageResource.java @@ -22,9 +22,9 @@ public class JsonSimpleSearchStorageResource { private static final Logger logger = LoggerFactory.getLogger(JsonSimpleSearchStorageResource.class); - private final Filesystem filesystem; + private final Filesystem filesystem; - public JsonSimpleSearchStorageResource(Filesystem filesystem) { + public JsonSimpleSearchStorageResource(Filesystem filesystem) { this.filesystem = filesystem; } diff --git a/src/test/java/com/takipi/oss/storage/fs/folder/FolderFilesystemTest.java b/src/test/java/com/takipi/oss/storage/fs/folder/FolderFilesystemTest.java index ea623c1..6757cd8 100644 --- a/src/test/java/com/takipi/oss/storage/fs/folder/FolderFilesystemTest.java +++ b/src/test/java/com/takipi/oss/storage/fs/folder/FolderFilesystemTest.java @@ -25,7 +25,7 @@ public void testRootFolderIsValid() { try { File tempRoot = newTempFolderFile(); - new RecordFilesystem(tempRoot.getPath(), 0.0); + new RecordFilesystem(tempRoot.getPath(), 0.0); } catch (Exception e) { e.printStackTrace(); fail(); @@ -36,7 +36,7 @@ public void testRootFolderIsValid() { @Test public void testRootFolderIsInvalid() { try { - new RecordFilesystem("//:/", 0.0); + new RecordFilesystem("//:/", 0.0); fail(); } catch (Exception e) { } @@ -48,7 +48,7 @@ public void testRootFolderMaxUsedStorageValid() { try { File tempRoot = newTempFolderFile(); - new RecordFilesystem(tempRoot.getPath(), 0.95); + new RecordFilesystem(tempRoot.getPath(), 0.95); } catch (Exception e) { e.printStackTrace(); fail(); @@ -61,7 +61,7 @@ public void testRootFolderMaxUsedStorageBelowZero() { try { File tempRoot = newTempFolderFile(); - new RecordFilesystem(tempRoot.getPath(), -1); + new RecordFilesystem(tempRoot.getPath(), -1); fail(); } catch (Exception e) { } @@ -72,7 +72,7 @@ public void testRootFolderMaxUsedStorageBelowZero() { public void testRootFolderMaxUsedStorageAboveOne() { try { File tempRoot = newTempFolderFile(); - new RecordFilesystem(tempRoot.getPath(), 1.1); + new RecordFilesystem(tempRoot.getPath(), 1.1); fail(); } catch (Exception e) { } @@ -188,7 +188,7 @@ private File newTempFolderFile() { private Filesystem newValidFolderFilesystem() { File temp = newTempFolderFile(); - return new RecordFilesystem(temp.getPath(), 0.99); + return new RecordFilesystem(temp.getPath(), 0.99); } private Record newStubRecord() { From 56408b2e01036f1c35c38914191b196958cbde7f Mon Sep 17 00:00:00 2001 From: Chen Harel Date: Mon, 7 Aug 2017 15:32:53 +0300 Subject: [PATCH 20/66] overrides --- .../takipi/oss/storage/data/simple/SimpleSearchResponse.java | 2 ++ src/main/java/com/takipi/oss/storage/fs/Record.java | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/src/main/java/com/takipi/oss/storage/data/simple/SimpleSearchResponse.java b/src/main/java/com/takipi/oss/storage/data/simple/SimpleSearchResponse.java index 8dc82e4..1a250f6 100644 --- a/src/main/java/com/takipi/oss/storage/data/simple/SimpleSearchResponse.java +++ b/src/main/java/com/takipi/oss/storage/data/simple/SimpleSearchResponse.java @@ -11,10 +11,12 @@ public SimpleSearchResponse(String data, String path) { this.path = path; } + @Override public String getData() { return data; } + @Override public String getPath() { return path; } diff --git a/src/main/java/com/takipi/oss/storage/fs/Record.java b/src/main/java/com/takipi/oss/storage/fs/Record.java index a7b2c7c..a94bf83 100644 --- a/src/main/java/com/takipi/oss/storage/fs/Record.java +++ b/src/main/java/com/takipi/oss/storage/fs/Record.java @@ -23,18 +23,22 @@ public static Record newRecord(String serviceId, String type, String key) { return new Record(serviceId, type, key); } + @Override public String getServiceId() { return serviceId; } + @Override public String getType() { return type; } + @Override public String getKey() { return key; } + @Override public String getPath() { return (this.getServiceId() + File.separator + this.getType() + File.separator + From fbc315d9acaf87ace8c8cf13c4df1ac1a6cc8662 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Sun, 1 Oct 2017 19:39:26 +0300 Subject: [PATCH 21/66] added concurrency and caching to multi fetch --- .../fs/JsonMultiFetchStorageResource.java | 158 +++++++++++++++--- .../oss/storage/resources/fs/StopWatch.java | 18 ++ 2 files changed, 152 insertions(+), 24 deletions(-) create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/StopWatch.java diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index 038d4cc..cbbfcf8 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -1,7 +1,7 @@ package com.takipi.oss.storage.resources.fs; import com.codahale.metrics.annotation.Timed; -import com.google.common.collect.Lists; +import com.takipi.oss.storage.data.EncodingType; import com.takipi.oss.storage.data.RecordWithData; import com.takipi.oss.storage.data.fetch.MultiFetchRequest; import com.takipi.oss.storage.data.fetch.MultiFetchResponse; @@ -17,51 +17,161 @@ import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import java.util.List; +import java.util.*; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; @Path("/storage/v1/json/multifetch") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public class JsonMultiFetchStorageResource { + + private static final int MAX_CACHE_SIZE = 4194304; // 4 MB + private final static int MAX_THREADS = 50; + private static final Logger logger = LoggerFactory.getLogger(JsonMultiFetchStorageResource.class); - + private static final AtomicInteger threadCount = new AtomicInteger(); + private static final ExecutorService executorService; + private static final Map cache; + private final Filesystem filesystem; - + + static { + + cache = Collections.synchronizedMap( + new LinkedHashMap(1024,0.75f, true) { + + private int cacheSize = 0; + + @Override protected boolean removeEldestEntry(Map.Entry eldest) { + boolean remove = cacheSize > MAX_CACHE_SIZE; + if (remove) { + cacheSize -= eldest.getValue().length(); + } + return remove; + } + + @Override public String put(String key, String value) { + cacheSize += value.length(); + return super.put(key, value); + } + }); + + executorService = Executors.newFixedThreadPool(MAX_THREADS, new ThreadFactory() { + @Override + public Thread newThread(Runnable r) + { + Thread t = new Thread(r); + t.setDaemon(true); + t.setName("s3_get_thread_" + threadCount.incrementAndGet()); + return t; + } + }); + } + public JsonMultiFetchStorageResource(Filesystem filesystem) { this.filesystem = filesystem; } - + @POST @Timed public Response post(MultiFetchRequest request) { try { MultiFetchResponse response = handleResponse(request); - + return Response.ok(response).build(); - } catch (Exception e) { + } + catch (Exception e) { return Response.serverError().entity("Problem getting keys").build(); } } - - private MultiFetchResponse handleResponse(MultiFetchRequest request) { - List records = Lists.newArrayList(); - - for (Record record : request.records) { - try { - String value = FilesystemUtil.read(filesystem, record, request.encodingType); + + private MultiFetchResponse handleResponse(MultiFetchRequest request) throws InterruptedException { + final EncodingType encodingType = request.encodingType; + final List recordsToRetrieve = request.records; + final int count = recordsToRetrieve.size(); + final List> futures = new ArrayList<>(count); + final List recordsWithData = new ArrayList<>(count); + + logger.info("---------- Multi Fetch Request for " + count + " records"); + + StopWatch stopWatch = new StopWatch(); + + for (Record record : recordsToRetrieve) { + String value = cache.get(record.getKey()); + recordsWithData.add(RecordWithData.of(record, value)); + if (value != null) { + logger.info("S3 key " + record.getKey() + " found in cache. " + value.length() + " bytes"); + } + else { + Callable callable = new S3Callable(filesystem, record, encodingType); + futures.add(executorService.submit(callable)); + } + } + + for (int i = 0, futureIndex = 0; i < count; ++i) { + RecordWithData recordWithData = recordsWithData.get(i); + if (recordWithData.getData() == null) { + try { + String value = futures.get(futureIndex++).get(20, TimeUnit.SECONDS); + recordWithData.setData(value); + } + catch (Exception e) { + logger.error(e.getMessage()); + } + } + } + + logger.info("---------- Multi Fetch Request for " + count + " records completed in " + stopWatch.elapsed() + " ms"); + + return new MultiFetchResponse(recordsWithData); + } + + private static class S3Callable implements Callable { + final Filesystem filesystem; + final Record record; + final EncodingType encodingType; + + S3Callable(Filesystem filesystem, Record record, EncodingType encodingType) { + this.filesystem = filesystem; + this.record = record; + this.encodingType = encodingType; + } + + @Override + public String call() throws Exception { + StopWatch stopWatch = new StopWatch(); + String value = null; + final int MAX_TRIES = 2; + int count = 0; + + while ((value == null) && (count < MAX_TRIES)) { + + if (count++ > 0) { + logger.warn("Retrying loading S3 key " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); + stopWatch.reset(); + } - if (value != null) { - records.add(RecordWithData.of(record, value)); - } else { - logger.warn("Key not found: {}", record.getKey()); + try { + value = FilesystemUtil.read(filesystem, record, encodingType); + } + catch (Exception e) { + // Need this catch because some exceptions inside FilesystemUtil.read are caught and result in a + // null return value. The code would be simpler if all exceptions were just passed on. } - } catch (Exception e) { - logger.error("Problem with record " + record, e); } + + if (value != null) { + cache.put(record.getKey(), value); + logger.info("--------------------- " + Thread.currentThread().getName() + " loaded S3 key " + + record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); + } + else { + logger.error("Failed to load S3 key: " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); + throw new RuntimeException("Failed to load S3 key: " + record.getKey()); + } + + return value; } - - MultiFetchResponse response = new MultiFetchResponse(records); - - return response; } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/StopWatch.java b/src/main/java/com/takipi/oss/storage/resources/fs/StopWatch.java new file mode 100644 index 0000000..cfef6d6 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/StopWatch.java @@ -0,0 +1,18 @@ +package com.takipi.oss.storage.resources.fs; + +class StopWatch +{ + private long start; + + StopWatch() { + reset(); + } + + long elapsed() { + return System.currentTimeMillis() - start; + } + + void reset() { + start = System.currentTimeMillis(); + } +} From bfbbfd5b019acee3fb63797c27085d4b4ed71339 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 2 Oct 2017 11:36:13 +0300 Subject: [PATCH 22/66] Extracted the S3 cache to an external class and added a dummy cache as the current cache implementation. --- .../fs/JsonMultiFetchStorageResource.java | 27 +------- .../oss/storage/resources/fs/S3Cache.java | 65 +++++++++++++++++++ 2 files changed, 68 insertions(+), 24 deletions(-) create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index cbbfcf8..0544644 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -26,37 +26,16 @@ @Produces(MediaType.APPLICATION_JSON) public class JsonMultiFetchStorageResource { - private static final int MAX_CACHE_SIZE = 4194304; // 4 MB - private final static int MAX_THREADS = 50; + private static final int MAX_THREADS = 50; private static final Logger logger = LoggerFactory.getLogger(JsonMultiFetchStorageResource.class); - private static final AtomicInteger threadCount = new AtomicInteger(); private static final ExecutorService executorService; - private static final Map cache; - + private static final AtomicInteger threadCount = new AtomicInteger(); + private static final S3Cache cache = new DummyS3Cache(); private final Filesystem filesystem; static { - cache = Collections.synchronizedMap( - new LinkedHashMap(1024,0.75f, true) { - - private int cacheSize = 0; - - @Override protected boolean removeEldestEntry(Map.Entry eldest) { - boolean remove = cacheSize > MAX_CACHE_SIZE; - if (remove) { - cacheSize -= eldest.getValue().length(); - } - return remove; - } - - @Override public String put(String key, String value) { - cacheSize += value.length(); - return super.put(key, value); - } - }); - executorService = Executors.newFixedThreadPool(MAX_THREADS, new ThreadFactory() { @Override public Thread newThread(Runnable r) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java b/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java new file mode 100644 index 0000000..bffdc4e --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java @@ -0,0 +1,65 @@ +package com.takipi.oss.storage.resources.fs; + +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; + +interface S3Cache { + String get(String key); + String put(String key, String value); +} + +class DummyS3Cache implements S3Cache { + + @Override + public String get(String key) + { + return null; + } + + @Override + public String put(String key, String value) + { + return null; + } +} + +class S3CacheInMemory implements S3Cache { + + private static final int MAX_CACHE_SIZE = 4194304; // 4 MB + + private final Map cache; + + S3CacheInMemory() { + + cache = Collections.synchronizedMap( + new LinkedHashMap(1024,0.75f, true) { + + private int cacheSize = 0; + + @Override protected boolean removeEldestEntry(Map.Entry eldest) { + boolean remove = cacheSize > MAX_CACHE_SIZE; + if (remove) { + cacheSize -= eldest.getValue().length(); + } + return remove; + } + + @Override public String put(String key, String value) { + cacheSize += value.length(); + return super.put(key, value); + } + }); + + } + + @Override + public String get(String key) { + return cache.get(key); + } + + @Override + public String put(String key, String value) { + return cache.put(key, value); + } +} From 277a7b3ef5583ca29cbf804276664cb17e184938 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 2 Oct 2017 11:57:26 +0300 Subject: [PATCH 23/66] Extracted the S3 cache to an external class and added a dummy cache as the current cache implementation. --- .../fs/JsonMultiFetchStorageResource.java | 2 ++ .../oss/storage/resources/fs/S3Cache.java | 27 ++++++++++--------- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index 0544644..74c0a69 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -27,10 +27,12 @@ public class JsonMultiFetchStorageResource { private static final int MAX_THREADS = 50; + private static final int MAX_CACHE_SIZE = 8388608; // 8 MB private static final Logger logger = LoggerFactory.getLogger(JsonMultiFetchStorageResource.class); private static final ExecutorService executorService; private static final AtomicInteger threadCount = new AtomicInteger(); + //private static final S3Cache cache = new S3CacheInMemory(MAX_CACHE_SIZE); private static final S3Cache cache = new DummyS3Cache(); private final Filesystem filesystem; diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java b/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java index bffdc4e..e67609a 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java @@ -12,41 +12,44 @@ interface S3Cache { class DummyS3Cache implements S3Cache { @Override - public String get(String key) - { + public String get(String key) { return null; } @Override - public String put(String key, String value) - { + public String put(String key, String value) { return null; } } class S3CacheInMemory implements S3Cache { - private static final int MAX_CACHE_SIZE = 4194304; // 4 MB - private final Map cache; - S3CacheInMemory() { - + S3CacheInMemory(final int maxCacheSize) { + + int initialCapacity = 2; + int estimatedAverageSizePerElement = 600; + while ((initialCapacity * estimatedAverageSizePerElement) < maxCacheSize) { + initialCapacity *= 2; + } + cache = Collections.synchronizedMap( - new LinkedHashMap(1024,0.75f, true) { + + new LinkedHashMap(initialCapacity,0.75f, true) { private int cacheSize = 0; @Override protected boolean removeEldestEntry(Map.Entry eldest) { - boolean remove = cacheSize > MAX_CACHE_SIZE; + boolean remove = cacheSize > maxCacheSize; if (remove) { - cacheSize -= eldest.getValue().length(); + cacheSize -= (eldest.getKey().length() + eldest.getValue().length()); } return remove; } @Override public String put(String key, String value) { - cacheSize += value.length(); + cacheSize += (key.length() + value.length()); return super.put(key, value); } }); From bb49a50707cad1b70200f44ee1d2003b31ffacee Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 2 Oct 2017 14:22:17 +0300 Subject: [PATCH 24/66] Extracted the S3 cache to an external class and added a dummy cache as the current cache implementation. --- .../java/com/takipi/oss/storage/resources/fs/S3Cache.java | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java b/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java index e67609a..be576e1 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java @@ -28,11 +28,8 @@ class S3CacheInMemory implements S3Cache { S3CacheInMemory(final int maxCacheSize) { - int initialCapacity = 2; - int estimatedAverageSizePerElement = 600; - while ((initialCapacity * estimatedAverageSizePerElement) < maxCacheSize) { - initialCapacity *= 2; - } + int estimatedSizePerElement = 600; + int initialCapacity = (int)Math.pow(2, Math.ceil(Math.log((float)maxCacheSize / estimatedSizePerElement) / Math.log(2))); cache = Collections.synchronizedMap( From c6af41aea75b3700f4e90a6c8e611ea2657bede9 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 2 Oct 2017 18:33:26 +0300 Subject: [PATCH 25/66] some refactoring --- .../fs/JsonMultiFetchStorageResource.java | 124 ++-------------- .../storage/resources/fs/fetcher/Cache.java | 7 + .../fs/fetcher/ConcurrentMultiFetcher.java | 136 ++++++++++++++++++ .../resources/fs/fetcher/DummyCache.java | 14 ++ .../InMemoryCache.java} | 26 +--- .../MillisecStopWatch.java} | 6 +- .../resources/fs/fetcher/MultiFetcher.java | 11 ++ .../fs/fetcher/SequentialMultiFetcher.java | 62 ++++++++ 8 files changed, 245 insertions(+), 141 deletions(-) create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/fetcher/Cache.java create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/fetcher/ConcurrentMultiFetcher.java create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/fetcher/DummyCache.java rename src/main/java/com/takipi/oss/storage/resources/fs/{S3Cache.java => fetcher/InMemoryCache.java} (72%) rename src/main/java/com/takipi/oss/storage/resources/fs/{StopWatch.java => fetcher/MillisecStopWatch.java} (62%) create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MultiFetcher.java create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SequentialMultiFetcher.java diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index 74c0a69..507f1d9 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -1,15 +1,11 @@ package com.takipi.oss.storage.resources.fs; import com.codahale.metrics.annotation.Timed; -import com.takipi.oss.storage.data.EncodingType; -import com.takipi.oss.storage.data.RecordWithData; import com.takipi.oss.storage.data.fetch.MultiFetchRequest; import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.helper.FilesystemUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import com.takipi.oss.storage.resources.fs.fetcher.*; import javax.ws.rs.Consumes; import javax.ws.rs.POST; @@ -17,38 +13,21 @@ import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import java.util.*; -import java.util.concurrent.*; -import java.util.concurrent.atomic.AtomicInteger; @Path("/storage/v1/json/multifetch") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public class JsonMultiFetchStorageResource { - private static final int MAX_THREADS = 50; private static final int MAX_CACHE_SIZE = 8388608; // 8 MB - private static final Logger logger = LoggerFactory.getLogger(JsonMultiFetchStorageResource.class); - private static final ExecutorService executorService; - private static final AtomicInteger threadCount = new AtomicInteger(); - //private static final S3Cache cache = new S3CacheInMemory(MAX_CACHE_SIZE); - private static final S3Cache cache = new DummyS3Cache(); + private static final Cache cache = new InMemoryCache(MAX_CACHE_SIZE); + //private static final Cache cache = new DummyCache(); + private final Filesystem filesystem; - static { - - executorService = Executors.newFixedThreadPool(MAX_THREADS, new ThreadFactory() { - @Override - public Thread newThread(Runnable r) - { - Thread t = new Thread(r); - t.setDaemon(true); - t.setName("s3_get_thread_" + threadCount.incrementAndGet()); - return t; - } - }); - } + private final MultiFetcher multiFetcher = new ConcurrentMultiFetcher(); + //private final MultiFetcher multiFetcher = new SequentialMultiFetcher(); public JsonMultiFetchStorageResource(Filesystem filesystem) { this.filesystem = filesystem; @@ -58,7 +37,8 @@ public JsonMultiFetchStorageResource(Filesystem filesystem) { @Timed public Response post(MultiFetchRequest request) { try { - MultiFetchResponse response = handleResponse(request); + + MultiFetchResponse response = multiFetcher.loadData(request, filesystem, cache); return Response.ok(response).build(); } @@ -67,92 +47,4 @@ public Response post(MultiFetchRequest request) { } } - private MultiFetchResponse handleResponse(MultiFetchRequest request) throws InterruptedException { - final EncodingType encodingType = request.encodingType; - final List recordsToRetrieve = request.records; - final int count = recordsToRetrieve.size(); - final List> futures = new ArrayList<>(count); - final List recordsWithData = new ArrayList<>(count); - - logger.info("---------- Multi Fetch Request for " + count + " records"); - - StopWatch stopWatch = new StopWatch(); - - for (Record record : recordsToRetrieve) { - String value = cache.get(record.getKey()); - recordsWithData.add(RecordWithData.of(record, value)); - if (value != null) { - logger.info("S3 key " + record.getKey() + " found in cache. " + value.length() + " bytes"); - } - else { - Callable callable = new S3Callable(filesystem, record, encodingType); - futures.add(executorService.submit(callable)); - } - } - - for (int i = 0, futureIndex = 0; i < count; ++i) { - RecordWithData recordWithData = recordsWithData.get(i); - if (recordWithData.getData() == null) { - try { - String value = futures.get(futureIndex++).get(20, TimeUnit.SECONDS); - recordWithData.setData(value); - } - catch (Exception e) { - logger.error(e.getMessage()); - } - } - } - - logger.info("---------- Multi Fetch Request for " + count + " records completed in " + stopWatch.elapsed() + " ms"); - - return new MultiFetchResponse(recordsWithData); - } - - private static class S3Callable implements Callable { - final Filesystem filesystem; - final Record record; - final EncodingType encodingType; - - S3Callable(Filesystem filesystem, Record record, EncodingType encodingType) { - this.filesystem = filesystem; - this.record = record; - this.encodingType = encodingType; - } - - @Override - public String call() throws Exception { - StopWatch stopWatch = new StopWatch(); - String value = null; - final int MAX_TRIES = 2; - int count = 0; - - while ((value == null) && (count < MAX_TRIES)) { - - if (count++ > 0) { - logger.warn("Retrying loading S3 key " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); - stopWatch.reset(); - } - - try { - value = FilesystemUtil.read(filesystem, record, encodingType); - } - catch (Exception e) { - // Need this catch because some exceptions inside FilesystemUtil.read are caught and result in a - // null return value. The code would be simpler if all exceptions were just passed on. - } - } - - if (value != null) { - cache.put(record.getKey(), value); - logger.info("--------------------- " + Thread.currentThread().getName() + " loaded S3 key " + - record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); - } - else { - logger.error("Failed to load S3 key: " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); - throw new RuntimeException("Failed to load S3 key: " + record.getKey()); - } - - return value; - } - } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/Cache.java b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/Cache.java new file mode 100644 index 0000000..c5da164 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/Cache.java @@ -0,0 +1,7 @@ +package com.takipi.oss.storage.resources.fs.fetcher; + +public interface Cache +{ + String get(String key); + String put(String key, String value); +} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/ConcurrentMultiFetcher.java new file mode 100644 index 0000000..f57491e --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/ConcurrentMultiFetcher.java @@ -0,0 +1,136 @@ +package com.takipi.oss.storage.resources.fs.fetcher; + +import com.takipi.oss.storage.data.EncodingType; +import com.takipi.oss.storage.data.RecordWithData; +import com.takipi.oss.storage.data.fetch.MultiFetchRequest; +import com.takipi.oss.storage.data.fetch.MultiFetchResponse; +import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.helper.FilesystemUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; + +public class ConcurrentMultiFetcher implements MultiFetcher { + + private static final Logger logger = LoggerFactory.getLogger(ConcurrentMultiFetcher.class); + + private static final int MAX_THREADS = 50; + + private final ExecutorService executorService; + private final AtomicInteger threadCount = new AtomicInteger(); + + public ConcurrentMultiFetcher() { + + executorService = Executors.newFixedThreadPool(MAX_THREADS, new ThreadFactory() + { + @Override + public Thread newThread(Runnable r) + { + Thread t = new Thread(r); + t.setDaemon(true); + t.setName("fetcher_thread_" + threadCount.incrementAndGet()); + return t; + } + }); + } + + @Override + public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem, Cache cache) { + final EncodingType encodingType = request.encodingType; + final List recordsToRetrieve = request.records; + final int count = recordsToRetrieve.size(); + final List> futures = new ArrayList<>(count); + final List recordsWithData = new ArrayList<>(count); + + logger.debug("---------- Starting concurrent multi fetch request for " + count + " records"); + + MillisecStopWatch stopWatch = new MillisecStopWatch(); + + for (Record record : recordsToRetrieve) { + String value = cache.get(record.getKey()); + recordsWithData.add(RecordWithData.of(record, value)); + if (value != null) { + logger.debug("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); + } + else { + Callable callable = new FetcherCallable(filesystem, record, encodingType); + futures.add(executorService.submit(callable)); + } + } + + for (int i = 0, futureIndex = 0; i < count; ++i) { + RecordWithData recordWithData = recordsWithData.get(i); + if (recordWithData.getData() == null) { + try { + String value = futures.get(futureIndex++).get(20, TimeUnit.SECONDS); + cache.put(recordWithData.getRecord().getKey(), value); + recordWithData.setData(value); + } + catch (Exception e) { + logger.error(e.getMessage()); + } + } + } + + logger.info("---------- Concurrent multi fetch request for " + count + " records completed in " + stopWatch.elapsed() + " ms"); + + return new MultiFetchResponse(recordsWithData); + } + + private static class FetcherCallable implements Callable { + + static AtomicInteger counter = new AtomicInteger(); + final Filesystem filesystem; + final Record record; + final EncodingType encodingType; + + FetcherCallable(Filesystem filesystem, Record record, EncodingType encodingType) { + this.filesystem = filesystem; + this.record = record; + this.encodingType = encodingType; + } + + @Override + public String call() throws Exception { + MillisecStopWatch stopWatch = new MillisecStopWatch(); + String value = null; + final int MAX_TRIES = 2; + int count = 0; + + while ((value == null) && (count < MAX_TRIES)) { + + if (count++ > 0) { + logger.warn("Retry loading object for key " + record.getKey()); + stopWatch.reset(); + } + + try { + value = FilesystemUtil.read(filesystem, record, encodingType); + } + catch (Exception e) { + // Need this catch because some exceptions inside FilesystemUtil.read are caught and result in a + // null return value, and some are thrown. The code would be simpler if all exceptions were thrown. + } + } + + if (value != null) { + + logger.debug("--------------------- " + Thread.currentThread().getName() + " loaded key " + + record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); + + return value; + } + else { + + logger.error("Failed to load object for key: " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); + + throw new RuntimeException("Failed to load object for key: " + record.getKey()); + } + } + } +} \ No newline at end of file diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/DummyCache.java b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/DummyCache.java new file mode 100644 index 0000000..204caa0 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/DummyCache.java @@ -0,0 +1,14 @@ +package com.takipi.oss.storage.resources.fs.fetcher; + +public class DummyCache implements Cache +{ + @Override + public String get(String key) { + return null; + } + + @Override + public String put(String key, String value) { + return null; + } +} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/InMemoryCache.java similarity index 72% rename from src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java rename to src/main/java/com/takipi/oss/storage/resources/fs/fetcher/InMemoryCache.java index be576e1..b4cb175 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/S3Cache.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/InMemoryCache.java @@ -1,32 +1,14 @@ -package com.takipi.oss.storage.resources.fs; +package com.takipi.oss.storage.resources.fs.fetcher; import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; -interface S3Cache { - String get(String key); - String put(String key, String value); -} - -class DummyS3Cache implements S3Cache { - - @Override - public String get(String key) { - return null; - } - - @Override - public String put(String key, String value) { - return null; - } -} - -class S3CacheInMemory implements S3Cache { - +public class InMemoryCache implements Cache +{ private final Map cache; - S3CacheInMemory(final int maxCacheSize) { + public InMemoryCache(final int maxCacheSize) { int estimatedSizePerElement = 600; int initialCapacity = (int)Math.pow(2, Math.ceil(Math.log((float)maxCacheSize / estimatedSizePerElement) / Math.log(2))); diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/StopWatch.java b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MillisecStopWatch.java similarity index 62% rename from src/main/java/com/takipi/oss/storage/resources/fs/StopWatch.java rename to src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MillisecStopWatch.java index cfef6d6..4509994 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/StopWatch.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MillisecStopWatch.java @@ -1,10 +1,10 @@ -package com.takipi.oss.storage.resources.fs; +package com.takipi.oss.storage.resources.fs.fetcher; -class StopWatch +class MillisecStopWatch { private long start; - StopWatch() { + MillisecStopWatch() { reset(); } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MultiFetcher.java new file mode 100644 index 0000000..121cb48 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MultiFetcher.java @@ -0,0 +1,11 @@ +package com.takipi.oss.storage.resources.fs.fetcher; + +import com.takipi.oss.storage.data.fetch.MultiFetchRequest; +import com.takipi.oss.storage.data.fetch.MultiFetchResponse; +import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.api.Filesystem; + +public interface MultiFetcher { + + MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem, Cache cache); +} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SequentialMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SequentialMultiFetcher.java new file mode 100644 index 0000000..c1d0668 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SequentialMultiFetcher.java @@ -0,0 +1,62 @@ +package com.takipi.oss.storage.resources.fs.fetcher; + +import com.google.common.collect.Lists; +import com.takipi.oss.storage.data.RecordWithData; +import com.takipi.oss.storage.data.fetch.MultiFetchRequest; +import com.takipi.oss.storage.data.fetch.MultiFetchResponse; +import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.helper.FilesystemUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +public class SequentialMultiFetcher implements MultiFetcher { + + private static final Logger logger = LoggerFactory.getLogger(SequentialMultiFetcher.class); + + @Override + public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem, Cache cache) { + + List records = Lists.newArrayList(); + final int count = request.records.size(); + + logger.debug("---------- Starting sequential multi fetch request for " + count + " records"); + MillisecStopWatch stopWatch = new MillisecStopWatch(); + + for (Record record : request.records) { + + final String key = record.getKey(); + + String value = cache.get(key); + + if (value != null) { + logger.debug("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); + records.add(RecordWithData.of(record, value)); + } + else { + + try { + + value = FilesystemUtil.read(filesystem, record, request.encodingType); + + if (value == null) { + logger.warn("Key not found: {}", key); + } + else { + records.add(RecordWithData.of(record, value)); + cache.put(key, value); + } + } + catch (Exception e) { + logger.error("Problem with record " + record, e); + } + } + } + + logger.info("---------- Sequential multi fetch request for " + count + " records completed in " + stopWatch.elapsed() + " ms"); + + return new MultiFetchResponse(records); + } +} From 8750dd922d9355a61752bdd0e74f738129cf56f3 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 2 Oct 2017 18:47:55 +0300 Subject: [PATCH 26/66] base multifetcher type on file system type --- .../fs/JsonMultiFetchStorageResource.java | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index 507f1d9..4caa596 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -5,6 +5,7 @@ import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.s3.S3Filesystem; import com.takipi.oss.storage.resources.fs.fetcher.*; import javax.ws.rs.Consumes; @@ -25,21 +26,26 @@ public class JsonMultiFetchStorageResource { //private static final Cache cache = new DummyCache(); private final Filesystem filesystem; - - private final MultiFetcher multiFetcher = new ConcurrentMultiFetcher(); - //private final MultiFetcher multiFetcher = new SequentialMultiFetcher(); + private final MultiFetcher multiFetcher; public JsonMultiFetchStorageResource(Filesystem filesystem) { + this.filesystem = filesystem; + + if (filesystem instanceof S3Filesystem) { + multiFetcher = new ConcurrentMultiFetcher(); + } + else { + multiFetcher = new SequentialMultiFetcher(); + } } @POST @Timed public Response post(MultiFetchRequest request) { + try { - MultiFetchResponse response = multiFetcher.loadData(request, filesystem, cache); - return Response.ok(response).build(); } catch (Exception e) { From a8437da5a14f385a00bf4dcfd7f6ad35fbe012f4 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 2 Oct 2017 21:03:50 +0300 Subject: [PATCH 27/66] some refactoring --- .../com/takipi/oss/storage/fs/api/Filesystem.java | 4 +++- .../{resources/fs/fetcher => fs/cache}/Cache.java | 2 +- .../fs/fetcher => fs/cache}/DummyCache.java | 4 +++- .../fs/fetcher => fs/cache}/InMemoryCache.java | 2 +- .../oss/storage/fs/folder/FolderFilesystem.java | 7 +++++++ .../com/takipi/oss/storage/fs/s3/S3Filesystem.java | 10 ++++++++-- .../resources/fs/JsonMultiFetchStorageResource.java | 12 ++++-------- .../resources/fs/fetcher/ConcurrentMultiFetcher.java | 9 +++++---- .../storage/resources/fs/fetcher/MultiFetcher.java | 1 + .../resources/fs/fetcher/SequentialMultiFetcher.java | 3 ++- .../{MillisecStopWatch.java => SimpleStopWatch.java} | 4 ++-- 11 files changed, 37 insertions(+), 21 deletions(-) rename src/main/java/com/takipi/oss/storage/{resources/fs/fetcher => fs/cache}/Cache.java (63%) rename src/main/java/com/takipi/oss/storage/{resources/fs/fetcher => fs/cache}/DummyCache.java (65%) rename src/main/java/com/takipi/oss/storage/{resources/fs/fetcher => fs/cache}/InMemoryCache.java (95%) rename src/main/java/com/takipi/oss/storage/resources/fs/fetcher/{MillisecStopWatch.java => SimpleStopWatch.java} (82%) diff --git a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java index f31db59..2caa1ab 100644 --- a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java @@ -4,6 +4,7 @@ import java.io.InputStream; import com.takipi.oss.storage.fs.BaseRecord; +import com.takipi.oss.storage.resources.fs.fetcher.MultiFetcher; public interface Filesystem extends FilesystemHealth { /** @@ -87,5 +88,6 @@ public interface Filesystem extends FilesystemHealth { * @return record */ BaseRecord pathToRecord(String path); - + + MultiFetcher getMultiFetcher(); } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/Cache.java b/src/main/java/com/takipi/oss/storage/fs/cache/Cache.java similarity index 63% rename from src/main/java/com/takipi/oss/storage/resources/fs/fetcher/Cache.java rename to src/main/java/com/takipi/oss/storage/fs/cache/Cache.java index c5da164..6288356 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/Cache.java +++ b/src/main/java/com/takipi/oss/storage/fs/cache/Cache.java @@ -1,4 +1,4 @@ -package com.takipi.oss.storage.resources.fs.fetcher; +package com.takipi.oss.storage.fs.cache; public interface Cache { diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/DummyCache.java b/src/main/java/com/takipi/oss/storage/fs/cache/DummyCache.java similarity index 65% rename from src/main/java/com/takipi/oss/storage/resources/fs/fetcher/DummyCache.java rename to src/main/java/com/takipi/oss/storage/fs/cache/DummyCache.java index 204caa0..2259b3c 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/DummyCache.java +++ b/src/main/java/com/takipi/oss/storage/fs/cache/DummyCache.java @@ -1,7 +1,9 @@ -package com.takipi.oss.storage.resources.fs.fetcher; +package com.takipi.oss.storage.fs.cache; public class DummyCache implements Cache { + public static DummyCache dummyCache = new DummyCache(); + @Override public String get(String key) { return null; diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/InMemoryCache.java b/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java similarity index 95% rename from src/main/java/com/takipi/oss/storage/resources/fs/fetcher/InMemoryCache.java rename to src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java index b4cb175..81b4c4c 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/InMemoryCache.java +++ b/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java @@ -1,4 +1,4 @@ -package com.takipi.oss.storage.resources.fs.fetcher; +package com.takipi.oss.storage.fs.cache; import java.util.Collections; import java.util.LinkedHashMap; diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java index bb68daa..ed85265 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java @@ -2,6 +2,8 @@ import com.takipi.oss.storage.fs.BaseRecord; import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.resources.fs.fetcher.MultiFetcher; +import com.takipi.oss.storage.resources.fs.fetcher.SequentialMultiFetcher; import org.apache.commons.io.IOUtils; import java.io.*; @@ -73,6 +75,11 @@ public long size(T record) throws IOException { throw new FileNotFoundException(); } + @Override + public MultiFetcher getMultiFetcher() { + return new SequentialMultiFetcher(); + } + protected void beforePut(File file) { file.getParentFile().mkdirs(); } diff --git a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java index ca15aba..470589e 100644 --- a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java @@ -17,6 +17,8 @@ import com.takipi.oss.storage.fs.api.SearchRequest; import com.takipi.oss.storage.fs.api.SearchResult; import com.takipi.oss.storage.helper.FilesystemUtil; +import com.takipi.oss.storage.resources.fs.fetcher.ConcurrentMultiFetcher; +import com.takipi.oss.storage.resources.fs.fetcher.MultiFetcher; public class S3Filesystem implements Filesystem { @@ -97,7 +99,12 @@ public boolean healthy() { public BaseRecord pathToRecord(String path) { return SimplePathRecord.newRecord(path); } - + + @Override + public MultiFetcher getMultiFetcher() { + return new ConcurrentMultiFetcher(); + } + private String keyOf(T record) { if (this.pathPrefix != null) { return this.pathPrefix + File.separator + record.getPath(); @@ -105,5 +112,4 @@ private String keyOf(T record) { return record.getPath(); } - } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index 4caa596..1ffd6df 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -5,7 +5,8 @@ import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.fs.s3.S3Filesystem; +import com.takipi.oss.storage.fs.cache.Cache; +import com.takipi.oss.storage.fs.cache.InMemoryCache; import com.takipi.oss.storage.resources.fs.fetcher.*; import javax.ws.rs.Consumes; @@ -26,18 +27,13 @@ public class JsonMultiFetchStorageResource { //private static final Cache cache = new DummyCache(); private final Filesystem filesystem; + private final MultiFetcher multiFetcher; public JsonMultiFetchStorageResource(Filesystem filesystem) { this.filesystem = filesystem; - - if (filesystem instanceof S3Filesystem) { - multiFetcher = new ConcurrentMultiFetcher(); - } - else { - multiFetcher = new SequentialMultiFetcher(); - } + this.multiFetcher = filesystem.getMultiFetcher(); } @POST diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/ConcurrentMultiFetcher.java index f57491e..0f5e449 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/ConcurrentMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/ConcurrentMultiFetcher.java @@ -6,6 +6,7 @@ import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.cache.Cache; import com.takipi.oss.storage.helper.FilesystemUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -22,12 +23,12 @@ public class ConcurrentMultiFetcher implements MultiFetcher { private static final int MAX_THREADS = 50; private final ExecutorService executorService; + private final AtomicInteger threadCount = new AtomicInteger(); public ConcurrentMultiFetcher() { - executorService = Executors.newFixedThreadPool(MAX_THREADS, new ThreadFactory() - { + executorService = Executors.newFixedThreadPool(MAX_THREADS, new ThreadFactory() { @Override public Thread newThread(Runnable r) { @@ -49,7 +50,7 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem logger.debug("---------- Starting concurrent multi fetch request for " + count + " records"); - MillisecStopWatch stopWatch = new MillisecStopWatch(); + SimpleStopWatch stopWatch = new SimpleStopWatch(); for (Record record : recordsToRetrieve) { String value = cache.get(record.getKey()); @@ -97,7 +98,7 @@ private static class FetcherCallable implements Callable { @Override public String call() throws Exception { - MillisecStopWatch stopWatch = new MillisecStopWatch(); + SimpleStopWatch stopWatch = new SimpleStopWatch(); String value = null; final int MAX_TRIES = 2; int count = 0; diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MultiFetcher.java index 121cb48..e4a06c8 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MultiFetcher.java @@ -4,6 +4,7 @@ import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.cache.Cache; public interface MultiFetcher { diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SequentialMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SequentialMultiFetcher.java index c1d0668..229b51d 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SequentialMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SequentialMultiFetcher.java @@ -6,6 +6,7 @@ import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.cache.Cache; import com.takipi.oss.storage.helper.FilesystemUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -23,7 +24,7 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem final int count = request.records.size(); logger.debug("---------- Starting sequential multi fetch request for " + count + " records"); - MillisecStopWatch stopWatch = new MillisecStopWatch(); + SimpleStopWatch stopWatch = new SimpleStopWatch(); for (Record record : request.records) { diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MillisecStopWatch.java b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SimpleStopWatch.java similarity index 82% rename from src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MillisecStopWatch.java rename to src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SimpleStopWatch.java index 4509994..3be1cac 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MillisecStopWatch.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SimpleStopWatch.java @@ -1,10 +1,10 @@ package com.takipi.oss.storage.resources.fs.fetcher; -class MillisecStopWatch +class SimpleStopWatch { private long start; - MillisecStopWatch() { + SimpleStopWatch() { reset(); } From 5a4185601cc1099dceed96d6ff6783ce62fe3270 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 2 Oct 2017 21:41:50 +0300 Subject: [PATCH 28/66] more refactoring --- .../takipi/oss/storage/fs/api/Filesystem.java | 2 +- .../storage/fs/folder/FolderFilesystem.java | 4 +- .../oss/storage/fs/s3/S3Filesystem.java | 4 +- .../fs/JsonMultiFetchStorageResource.java | 4 +- .../resources/fs/fetcher/MultiFetcher.java | 12 ---- .../ConcurrentMultiFetcher.java | 64 ++----------------- .../fs/multifetcher/MultiFetcher.java | 59 +++++++++++++++++ .../SequentialMultiFetcher.java | 31 +++------ .../SimpleStopWatch.java | 2 +- 9 files changed, 83 insertions(+), 99 deletions(-) delete mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MultiFetcher.java rename src/main/java/com/takipi/oss/storage/resources/fs/{fetcher => multifetcher}/ConcurrentMultiFetcher.java (54%) create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java rename src/main/java/com/takipi/oss/storage/resources/fs/{fetcher => multifetcher}/SequentialMultiFetcher.java (60%) rename src/main/java/com/takipi/oss/storage/resources/fs/{fetcher => multifetcher}/SimpleStopWatch.java (78%) diff --git a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java index 2caa1ab..3020a2c 100644 --- a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java @@ -4,7 +4,7 @@ import java.io.InputStream; import com.takipi.oss.storage.fs.BaseRecord; -import com.takipi.oss.storage.resources.fs.fetcher.MultiFetcher; +import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; public interface Filesystem extends FilesystemHealth { /** diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java index ed85265..12467bc 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java @@ -2,8 +2,8 @@ import com.takipi.oss.storage.fs.BaseRecord; import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.resources.fs.fetcher.MultiFetcher; -import com.takipi.oss.storage.resources.fs.fetcher.SequentialMultiFetcher; +import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; +import com.takipi.oss.storage.resources.fs.multifetcher.SequentialMultiFetcher; import org.apache.commons.io.IOUtils; import java.io.*; diff --git a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java index 470589e..eefda8d 100644 --- a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java @@ -17,8 +17,8 @@ import com.takipi.oss.storage.fs.api.SearchRequest; import com.takipi.oss.storage.fs.api.SearchResult; import com.takipi.oss.storage.helper.FilesystemUtil; -import com.takipi.oss.storage.resources.fs.fetcher.ConcurrentMultiFetcher; -import com.takipi.oss.storage.resources.fs.fetcher.MultiFetcher; +import com.takipi.oss.storage.resources.fs.multifetcher.ConcurrentMultiFetcher; +import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; public class S3Filesystem implements Filesystem { diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index 1ffd6df..e63a354 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -7,7 +7,7 @@ import com.takipi.oss.storage.fs.api.Filesystem; import com.takipi.oss.storage.fs.cache.Cache; import com.takipi.oss.storage.fs.cache.InMemoryCache; -import com.takipi.oss.storage.resources.fs.fetcher.*; +import com.takipi.oss.storage.resources.fs.multifetcher.*; import javax.ws.rs.Consumes; import javax.ws.rs.POST; @@ -24,7 +24,7 @@ public class JsonMultiFetchStorageResource { private static final int MAX_CACHE_SIZE = 8388608; // 8 MB private static final Cache cache = new InMemoryCache(MAX_CACHE_SIZE); - //private static final Cache cache = new DummyCache(); + //private static final Cache cache = DummyCache.dummyCache; private final Filesystem filesystem; diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MultiFetcher.java deleted file mode 100644 index e4a06c8..0000000 --- a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/MultiFetcher.java +++ /dev/null @@ -1,12 +0,0 @@ -package com.takipi.oss.storage.resources.fs.fetcher; - -import com.takipi.oss.storage.data.fetch.MultiFetchRequest; -import com.takipi.oss.storage.data.fetch.MultiFetchResponse; -import com.takipi.oss.storage.fs.Record; -import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.fs.cache.Cache; - -public interface MultiFetcher { - - MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem, Cache cache); -} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java similarity index 54% rename from src/main/java/com/takipi/oss/storage/resources/fs/fetcher/ConcurrentMultiFetcher.java rename to src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java index 0f5e449..611b1e0 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/ConcurrentMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java @@ -1,4 +1,4 @@ -package com.takipi.oss.storage.resources.fs.fetcher; +package com.takipi.oss.storage.resources.fs.multifetcher; import com.takipi.oss.storage.data.EncodingType; import com.takipi.oss.storage.data.RecordWithData; @@ -7,7 +7,6 @@ import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; import com.takipi.oss.storage.fs.cache.Cache; -import com.takipi.oss.storage.helper.FilesystemUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -16,7 +15,7 @@ import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; -public class ConcurrentMultiFetcher implements MultiFetcher { +public class ConcurrentMultiFetcher extends BaseMultiFetcher { private static final Logger logger = LoggerFactory.getLogger(ConcurrentMultiFetcher.class); @@ -48,7 +47,7 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem final List> futures = new ArrayList<>(count); final List recordsWithData = new ArrayList<>(count); - logger.debug("---------- Starting concurrent multi fetch request for " + count + " records"); + logger.info("---------- Starting concurrent multi fetch request for " + count + " records"); SimpleStopWatch stopWatch = new SimpleStopWatch(); @@ -56,10 +55,11 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem String value = cache.get(record.getKey()); recordsWithData.add(RecordWithData.of(record, value)); if (value != null) { - logger.debug("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); + logger.info("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); } else { - Callable callable = new FetcherCallable(filesystem, record, encodingType); + + Callable callable = () -> load(filesystem, record, encodingType); futures.add(executorService.submit(callable)); } } @@ -82,56 +82,4 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem return new MultiFetchResponse(recordsWithData); } - - private static class FetcherCallable implements Callable { - - static AtomicInteger counter = new AtomicInteger(); - final Filesystem filesystem; - final Record record; - final EncodingType encodingType; - - FetcherCallable(Filesystem filesystem, Record record, EncodingType encodingType) { - this.filesystem = filesystem; - this.record = record; - this.encodingType = encodingType; - } - - @Override - public String call() throws Exception { - SimpleStopWatch stopWatch = new SimpleStopWatch(); - String value = null; - final int MAX_TRIES = 2; - int count = 0; - - while ((value == null) && (count < MAX_TRIES)) { - - if (count++ > 0) { - logger.warn("Retry loading object for key " + record.getKey()); - stopWatch.reset(); - } - - try { - value = FilesystemUtil.read(filesystem, record, encodingType); - } - catch (Exception e) { - // Need this catch because some exceptions inside FilesystemUtil.read are caught and result in a - // null return value, and some are thrown. The code would be simpler if all exceptions were thrown. - } - } - - if (value != null) { - - logger.debug("--------------------- " + Thread.currentThread().getName() + " loaded key " + - record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); - - return value; - } - else { - - logger.error("Failed to load object for key: " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); - - throw new RuntimeException("Failed to load object for key: " + record.getKey()); - } - } - } } \ No newline at end of file diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java new file mode 100644 index 0000000..2b61d2f --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -0,0 +1,59 @@ +package com.takipi.oss.storage.resources.fs.multifetcher; + +import com.takipi.oss.storage.data.EncodingType; +import com.takipi.oss.storage.data.fetch.MultiFetchRequest; +import com.takipi.oss.storage.data.fetch.MultiFetchResponse; +import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.cache.Cache; +import com.takipi.oss.storage.helper.FilesystemUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public interface MultiFetcher { + + MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem, Cache cache); +} + +abstract class BaseMultiFetcher implements MultiFetcher { + + private static final Logger logger = LoggerFactory.getLogger(ConcurrentMultiFetcher.class); + + static String load(Filesystem filesystem, Record record, EncodingType encodingType) { + + SimpleStopWatch stopWatch = new SimpleStopWatch(); + String value = null; + final int MAX_TRIES = 2; + int count = 0; + + while ((value == null) && (count < MAX_TRIES)) { + + if (count++ > 0) { + logger.warn("Retry loading object for key " + record.getKey()); + stopWatch.reset(); + } + + try { + value = FilesystemUtil.read(filesystem, record, encodingType); + } + catch (Exception e) { + // Need this catch because some exceptions inside FilesystemUtil.read are caught and result in a + // null return value, and some are thrown. The code would be simpler if all exceptions were thrown. + } + } + + if (value != null) { + + logger.info("--------------------- " + Thread.currentThread().getName() + " loaded key " + + record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); + + return value; + } + else { + + logger.error("Failed to load object for key: " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); + + throw new RuntimeException("Failed to load object for key: " + record.getKey()); + } + } +} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SequentialMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java similarity index 60% rename from src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SequentialMultiFetcher.java rename to src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java index 229b51d..bebbf79 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SequentialMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java @@ -1,19 +1,19 @@ -package com.takipi.oss.storage.resources.fs.fetcher; +package com.takipi.oss.storage.resources.fs.multifetcher; import com.google.common.collect.Lists; +import com.takipi.oss.storage.data.EncodingType; import com.takipi.oss.storage.data.RecordWithData; import com.takipi.oss.storage.data.fetch.MultiFetchRequest; import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; import com.takipi.oss.storage.fs.cache.Cache; -import com.takipi.oss.storage.helper.FilesystemUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; -public class SequentialMultiFetcher implements MultiFetcher { +public class SequentialMultiFetcher extends BaseMultiFetcher { private static final Logger logger = LoggerFactory.getLogger(SequentialMultiFetcher.class); @@ -22,36 +22,25 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem List records = Lists.newArrayList(); final int count = request.records.size(); - - logger.debug("---------- Starting sequential multi fetch request for " + count + " records"); + final EncodingType encodingType = request.encodingType; + logger.info("---------- Starting sequential multi fetch request for " + count + " records"); SimpleStopWatch stopWatch = new SimpleStopWatch(); for (Record record : request.records) { - final String key = record.getKey(); - - String value = cache.get(key); + String value = cache.get(record.getKey()); if (value != null) { - logger.debug("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); + logger.info("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); records.add(RecordWithData.of(record, value)); } else { - try { - - value = FilesystemUtil.read(filesystem, record, request.encodingType); - - if (value == null) { - logger.warn("Key not found: {}", key); - } - else { - records.add(RecordWithData.of(record, value)); - cache.put(key, value); - } + value = load(filesystem, record, encodingType); + records.add(RecordWithData.of(record, value)); } catch (Exception e) { - logger.error("Problem with record " + record, e); + logger.error(e.getMessage()); } } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SimpleStopWatch.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SimpleStopWatch.java similarity index 78% rename from src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SimpleStopWatch.java rename to src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SimpleStopWatch.java index 3be1cac..17594a6 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/fetcher/SimpleStopWatch.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SimpleStopWatch.java @@ -1,4 +1,4 @@ -package com.takipi.oss.storage.resources.fs.fetcher; +package com.takipi.oss.storage.resources.fs.multifetcher; class SimpleStopWatch { From 8090d3d7beb3e15455037e920ad2ae24a6dc8230 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 2 Oct 2017 21:43:02 +0300 Subject: [PATCH 29/66] more refactoring --- .../resources/fs/multifetcher/ConcurrentMultiFetcher.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java index 611b1e0..9c81036 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java @@ -18,11 +18,9 @@ public class ConcurrentMultiFetcher extends BaseMultiFetcher { private static final Logger logger = LoggerFactory.getLogger(ConcurrentMultiFetcher.class); - private static final int MAX_THREADS = 50; private final ExecutorService executorService; - private final AtomicInteger threadCount = new AtomicInteger(); public ConcurrentMultiFetcher() { @@ -41,6 +39,7 @@ public Thread newThread(Runnable r) @Override public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem, Cache cache) { + final EncodingType encodingType = request.encodingType; final List recordsToRetrieve = request.records; final int count = recordsToRetrieve.size(); From 452170067bb700044171aa87406380d8bd2a945f Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 2 Oct 2017 21:44:40 +0300 Subject: [PATCH 30/66] more refactoring --- .../fs/multifetcher/ConcurrentMultiFetcher.java | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java index 9c81036..db67676 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java @@ -25,15 +25,11 @@ public class ConcurrentMultiFetcher extends BaseMultiFetcher { public ConcurrentMultiFetcher() { - executorService = Executors.newFixedThreadPool(MAX_THREADS, new ThreadFactory() { - @Override - public Thread newThread(Runnable r) - { - Thread t = new Thread(r); - t.setDaemon(true); - t.setName("fetcher_thread_" + threadCount.incrementAndGet()); - return t; - } + executorService = Executors.newFixedThreadPool(MAX_THREADS, r -> { + Thread t = new Thread(r); + t.setDaemon(true); + t.setName("fetcher_thread_" + threadCount.incrementAndGet()); + return t; }); } From 936bf9b600d1be5096d686242540b1bfacf802b0 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Tue, 3 Oct 2017 13:07:08 +0300 Subject: [PATCH 31/66] more refactoring --- .../com/takipi/oss/storage/fs/api/Filesystem.java | 3 +++ .../oss/storage/fs/folder/FolderFilesystem.java | 7 +++++++ .../takipi/oss/storage/fs/s3/S3Filesystem.java | 12 +++++++++++- .../fs/JsonMultiFetchStorageResource.java | 10 +--------- .../fs/multifetcher/ConcurrentMultiFetcher.java | 15 +++++++++------ .../resources/fs/multifetcher/MultiFetcher.java | 5 ++--- .../fs/multifetcher/SequentialMultiFetcher.java | 5 +++-- 7 files changed, 36 insertions(+), 21 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java index 3020a2c..967bca0 100644 --- a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java @@ -4,6 +4,7 @@ import java.io.InputStream; import com.takipi.oss.storage.fs.BaseRecord; +import com.takipi.oss.storage.fs.cache.Cache; import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; public interface Filesystem extends FilesystemHealth { @@ -90,4 +91,6 @@ public interface Filesystem extends FilesystemHealth { BaseRecord pathToRecord(String path); MultiFetcher getMultiFetcher(); + + Cache getCache(); } diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java index 12467bc..f46e6e3 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java @@ -2,6 +2,8 @@ import com.takipi.oss.storage.fs.BaseRecord; import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.cache.Cache; +import com.takipi.oss.storage.fs.cache.DummyCache; import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; import com.takipi.oss.storage.resources.fs.multifetcher.SequentialMultiFetcher; import org.apache.commons.io.IOUtils; @@ -80,6 +82,11 @@ public MultiFetcher getMultiFetcher() { return new SequentialMultiFetcher(); } + @Override + public Cache getCache() { + return DummyCache.dummyCache; + } + protected void beforePut(File file) { file.getParentFile().mkdirs(); } diff --git a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java index eefda8d..48ba811 100644 --- a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java @@ -16,12 +16,17 @@ import com.takipi.oss.storage.fs.api.Filesystem; import com.takipi.oss.storage.fs.api.SearchRequest; import com.takipi.oss.storage.fs.api.SearchResult; +import com.takipi.oss.storage.fs.cache.Cache; +import com.takipi.oss.storage.fs.cache.InMemoryCache; import com.takipi.oss.storage.helper.FilesystemUtil; import com.takipi.oss.storage.resources.fs.multifetcher.ConcurrentMultiFetcher; import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; public class S3Filesystem implements Filesystem { - + + private static final int MAX_CACHE_SIZE = 8388608; // 8 MB + private static final Cache cache = new InMemoryCache(MAX_CACHE_SIZE); + private final AmazonS3 amazonS3; private final String bucket; private final String pathPrefix; @@ -105,6 +110,11 @@ public MultiFetcher getMultiFetcher() { return new ConcurrentMultiFetcher(); } + @Override + public Cache getCache() { + return cache; + } + private String keyOf(T record) { if (this.pathPrefix != null) { return this.pathPrefix + File.separator + record.getPath(); diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index e63a354..2190c62 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -5,8 +5,6 @@ import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.fs.cache.Cache; -import com.takipi.oss.storage.fs.cache.InMemoryCache; import com.takipi.oss.storage.resources.fs.multifetcher.*; import javax.ws.rs.Consumes; @@ -21,13 +19,7 @@ @Produces(MediaType.APPLICATION_JSON) public class JsonMultiFetchStorageResource { - private static final int MAX_CACHE_SIZE = 8388608; // 8 MB - - private static final Cache cache = new InMemoryCache(MAX_CACHE_SIZE); - //private static final Cache cache = DummyCache.dummyCache; - private final Filesystem filesystem; - private final MultiFetcher multiFetcher; public JsonMultiFetchStorageResource(Filesystem filesystem) { @@ -41,7 +33,7 @@ public JsonMultiFetchStorageResource(Filesystem filesystem) { public Response post(MultiFetchRequest request) { try { - MultiFetchResponse response = multiFetcher.loadData(request, filesystem, cache); + MultiFetchResponse response = multiFetcher.loadData(request, filesystem); return Response.ok(response).build(); } catch (Exception e) { diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java index db67676..72a8e33 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java @@ -18,29 +18,32 @@ public class ConcurrentMultiFetcher extends BaseMultiFetcher { private static final Logger logger = LoggerFactory.getLogger(ConcurrentMultiFetcher.class); - private static final int MAX_THREADS = 50; + private static final int MAX_THREADS = 30; private final ExecutorService executorService; private final AtomicInteger threadCount = new AtomicInteger(); public ConcurrentMultiFetcher() { - - executorService = Executors.newFixedThreadPool(MAX_THREADS, r -> { + + ThreadFactory threadFactory = r -> { Thread t = new Thread(r); t.setDaemon(true); t.setName("fetcher_thread_" + threadCount.incrementAndGet()); return t; - }); + }; + + executorService = Executors.newFixedThreadPool(MAX_THREADS, threadFactory); } @Override - public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem, Cache cache) { + public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { final EncodingType encodingType = request.encodingType; final List recordsToRetrieve = request.records; final int count = recordsToRetrieve.size(); final List> futures = new ArrayList<>(count); final List recordsWithData = new ArrayList<>(count); + Cache cache = filesystem.getCache(); logger.info("---------- Starting concurrent multi fetch request for " + count + " records"); @@ -50,7 +53,7 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem String value = cache.get(record.getKey()); recordsWithData.add(RecordWithData.of(record, value)); if (value != null) { - logger.info("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); + logger.debug("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); } else { diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index 2b61d2f..cdcc42b 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -5,14 +5,13 @@ import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.fs.cache.Cache; import com.takipi.oss.storage.helper.FilesystemUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public interface MultiFetcher { - MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem, Cache cache); + MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem); } abstract class BaseMultiFetcher implements MultiFetcher { @@ -44,7 +43,7 @@ static String load(Filesystem filesystem, Record record, EncodingType en if (value != null) { - logger.info("--------------------- " + Thread.currentThread().getName() + " loaded key " + + logger.debug("--------------------- " + Thread.currentThread().getName() + " loaded key " + record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); return value; diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java index bebbf79..cb397bd 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java @@ -18,20 +18,21 @@ public class SequentialMultiFetcher extends BaseMultiFetcher { private static final Logger logger = LoggerFactory.getLogger(SequentialMultiFetcher.class); @Override - public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem, Cache cache) { + public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { List records = Lists.newArrayList(); final int count = request.records.size(); final EncodingType encodingType = request.encodingType; logger.info("---------- Starting sequential multi fetch request for " + count + " records"); SimpleStopWatch stopWatch = new SimpleStopWatch(); + Cache cache = filesystem.getCache(); for (Record record : request.records) { String value = cache.get(record.getKey()); if (value != null) { - logger.info("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); + logger.debug("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); records.add(RecordWithData.of(record, value)); } else { From 532a656e2d8e768b9698332f4d343dca1906c93c Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Tue, 3 Oct 2017 14:37:13 +0300 Subject: [PATCH 32/66] more refactoring --- .../multifetcher/ConcurrentMultiFetcher.java | 20 +++++++----------- .../fs/multifetcher/MultiFetcher.java | 20 ++++++++++++++++++ .../multifetcher/SequentialMultiFetcher.java | 21 +++++++------------ 3 files changed, 36 insertions(+), 25 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java index 72a8e33..2f59ae0 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java @@ -42,28 +42,24 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem final List recordsToRetrieve = request.records; final int count = recordsToRetrieve.size(); final List> futures = new ArrayList<>(count); - final List recordsWithData = new ArrayList<>(count); Cache cache = filesystem.getCache(); logger.info("---------- Starting concurrent multi fetch request for " + count + " records"); SimpleStopWatch stopWatch = new SimpleStopWatch(); - for (Record record : recordsToRetrieve) { - String value = cache.get(record.getKey()); - recordsWithData.add(RecordWithData.of(record, value)); - if (value != null) { - logger.debug("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); - } - else { - - Callable callable = () -> load(filesystem, record, encodingType); + final List recordsWithData = loadFromCache(request.records, cache); + + for (RecordWithData recordWithData : recordsWithData) { + if (recordWithData.getData() == null) { + Callable callable = () -> load(filesystem, recordWithData.getRecord(), encodingType); futures.add(executorService.submit(callable)); } } - for (int i = 0, futureIndex = 0; i < count; ++i) { - RecordWithData recordWithData = recordsWithData.get(i); + int futureIndex = 0; + + for (RecordWithData recordWithData : recordsWithData) { if (recordWithData.getData() == null) { try { String value = futures.get(futureIndex++).get(20, TimeUnit.SECONDS); diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index cdcc42b..8fb378b 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -1,14 +1,19 @@ package com.takipi.oss.storage.resources.fs.multifetcher; import com.takipi.oss.storage.data.EncodingType; +import com.takipi.oss.storage.data.RecordWithData; import com.takipi.oss.storage.data.fetch.MultiFetchRequest; import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.cache.Cache; import com.takipi.oss.storage.helper.FilesystemUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; +import java.util.List; + public interface MultiFetcher { MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem); @@ -55,4 +60,19 @@ static String load(Filesystem filesystem, Record record, EncodingType en throw new RuntimeException("Failed to load object for key: " + record.getKey()); } } + + static List loadFromCache(List records, Cache cache) { + + List recordsWithData = new ArrayList<>(records.size()); + + for (Record record : records) { + String value = cache.get(record.getKey()); + recordsWithData.add(RecordWithData.of(record, value)); + if (value != null) { + logger.debug("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); + } + } + + return recordsWithData; + } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java index cb397bd..cf94615 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java @@ -1,6 +1,5 @@ package com.takipi.oss.storage.resources.fs.multifetcher; -import com.google.common.collect.Lists; import com.takipi.oss.storage.data.EncodingType; import com.takipi.oss.storage.data.RecordWithData; import com.takipi.oss.storage.data.fetch.MultiFetchRequest; @@ -11,6 +10,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; import java.util.List; public class SequentialMultiFetcher extends BaseMultiFetcher { @@ -20,25 +20,20 @@ public class SequentialMultiFetcher extends BaseMultiFetcher { @Override public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { - List records = Lists.newArrayList(); final int count = request.records.size(); final EncodingType encodingType = request.encodingType; logger.info("---------- Starting sequential multi fetch request for " + count + " records"); SimpleStopWatch stopWatch = new SimpleStopWatch(); Cache cache = filesystem.getCache(); - for (Record record : request.records) { - - String value = cache.get(record.getKey()); + final List recordsWithData = loadFromCache(request.records, cache); + + for (RecordWithData recordWithData : recordsWithData) { - if (value != null) { - logger.debug("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); - records.add(RecordWithData.of(record, value)); - } - else { + if (recordWithData.getData() == null) { try { - value = load(filesystem, record, encodingType); - records.add(RecordWithData.of(record, value)); + String value = load(filesystem, recordWithData.getRecord(), encodingType); + recordWithData.setData(value); } catch (Exception e) { logger.error(e.getMessage()); @@ -48,6 +43,6 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem logger.info("---------- Sequential multi fetch request for " + count + " records completed in " + stopWatch.elapsed() + " ms"); - return new MultiFetchResponse(records); + return new MultiFetchResponse(recordsWithData); } } From 448febd95973482e53a07eb4ee4111ad9de0768b Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Tue, 3 Oct 2017 16:00:54 +0300 Subject: [PATCH 33/66] Downgraded code from Java 8 to Java 7 --- .../multifetcher/ConcurrentMultiFetcher.java | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java index 2f59ae0..44a26e4 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java @@ -25,18 +25,22 @@ public class ConcurrentMultiFetcher extends BaseMultiFetcher { public ConcurrentMultiFetcher() { - ThreadFactory threadFactory = r -> { - Thread t = new Thread(r); - t.setDaemon(true); - t.setName("fetcher_thread_" + threadCount.incrementAndGet()); - return t; + ThreadFactory threadFactory = new ThreadFactory() { + @Override + public Thread newThread(Runnable r) + { + Thread t = new Thread(r); + t.setDaemon(true); + t.setName("fetcher_thread_" + threadCount.incrementAndGet()); + return t; + } }; executorService = Executors.newFixedThreadPool(MAX_THREADS, threadFactory); } @Override - public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { + public MultiFetchResponse loadData(final MultiFetchRequest request, final Filesystem filesystem) { final EncodingType encodingType = request.encodingType; final List recordsToRetrieve = request.records; @@ -50,9 +54,16 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem final List recordsWithData = loadFromCache(request.records, cache); - for (RecordWithData recordWithData : recordsWithData) { + for (final RecordWithData recordWithData : recordsWithData) { if (recordWithData.getData() == null) { - Callable callable = () -> load(filesystem, recordWithData.getRecord(), encodingType); + Callable callable = new Callable() + { + @Override + public String call() throws Exception + { + return load(filesystem, recordWithData.getRecord(), encodingType); + } + }; futures.add(executorService.submit(callable)); } } From 24b0a25010690175f7d5c2f010b7b7e87aef1e14 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Sun, 8 Oct 2017 11:16:52 +0300 Subject: [PATCH 34/66] Added cache size and max number of threads to the yml file in order to be configurable. --- .../storage/TakipiStorageConfiguration.java | 27 +++++++- .../takipi/oss/storage/TakipiStorageMain.java | 7 +- .../oss/storage/fs/cache/InMemoryCache.java | 25 ++++++- .../oss/storage/fs/s3/S3Filesystem.java | 22 ++++-- .../multifetcher/ConcurrentMultiFetcher.java | 68 ++++++++++++------- .../multifetcher/SequentialMultiFetcher.java | 1 - 6 files changed, 113 insertions(+), 37 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java index 1ce9dc1..b1e5b56 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java @@ -51,12 +51,17 @@ public void setMaxUsedStoragePercentage(double maxUsedStoragePercentage) { private S3Fs s3Fs; public static class S3Fs { + + private Integer concurrencyLevel; + + private Integer maxCacheSize; + @NotEmpty private String bucket; @NotEmpty private String pathPrefix; - + @NotNull @Valid private Credentials credentials; @@ -117,6 +122,26 @@ public Credentials getCredentials() { public void setCredentials(Credentials credentials) { this.credentials = credentials; } + + @JsonProperty + public Integer getConcurrencyLevel() { + return concurrencyLevel; + } + + @JsonProperty + public void setConcurrencyLevel(Integer concurrencyLevel) { + this.concurrencyLevel = concurrencyLevel; + } + + @JsonProperty + public Integer getMaxCacheSize() { + return maxCacheSize; + } + + @JsonProperty + public void setMaxCacheSize(Integer maxCacheSize) { + this.maxCacheSize = maxCacheSize; + } } private boolean enableCors; diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index a1156c7..7c9a5aa 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -115,11 +115,12 @@ private Filesystem configureS3Filesystem(TakipiStorage } // S3 bucket - String bucket = configuration.getS3Fs().getBucket(); - String pathPrefix = configuration.getS3Fs().getPathPrefix(); + TakipiStorageConfiguration.S3Fs s3Fs = configuration.getS3Fs(); + String bucket = s3Fs.getBucket(); + String pathPrefix = s3Fs.getPathPrefix(); log.debug("Using AWS S3 based filesystem with bucket: {}, prefix: {}", bucket, pathPrefix); - return new S3Filesystem(amazonS3, bucket, pathPrefix); + return new S3Filesystem(amazonS3, bucket, pathPrefix, s3Fs.getConcurrencyLevel(), s3Fs.getMaxCacheSize()); } private void enableCors(TakipiStorageConfiguration configuration, Environment environment) { diff --git a/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java b/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java index 81b4c4c..df3570d 100644 --- a/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java +++ b/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java @@ -1,14 +1,34 @@ package com.takipi.oss.storage.fs.cache; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; public class InMemoryCache implements Cache { + private static final Logger logger = LoggerFactory.getLogger(InMemoryCache.class); + private final Map cache; - public InMemoryCache(final int maxCacheSize) { + public InMemoryCache(int maxCacheSize) { + + int oneMegaByte = 1048576; + int hundredMegaByte = 100 * 1048576; + + if (maxCacheSize < oneMegaByte) { + logger.warn("Minimum In Memory Cache size = 1048576"); + maxCacheSize = oneMegaByte; + } + else if (maxCacheSize > hundredMegaByte) { + logger.warn("Maximum allowable In Memory Cache size = ", hundredMegaByte); + maxCacheSize = hundredMegaByte; + } + + final int cacheSizeLimit = maxCacheSize; + logger.info("In Memory Cache maximum size = " + maxCacheSize); int estimatedSizePerElement = 600; int initialCapacity = (int)Math.pow(2, Math.ceil(Math.log((float)maxCacheSize / estimatedSizePerElement) / Math.log(2))); @@ -20,7 +40,7 @@ public InMemoryCache(final int maxCacheSize) { private int cacheSize = 0; @Override protected boolean removeEldestEntry(Map.Entry eldest) { - boolean remove = cacheSize > maxCacheSize; + boolean remove = cacheSize > cacheSizeLimit; if (remove) { cacheSize -= (eldest.getKey().length() + eldest.getValue().length()); } @@ -29,6 +49,7 @@ public InMemoryCache(final int maxCacheSize) { @Override public String put(String key, String value) { cacheSize += (key.length() + value.length()); + logger.debug("InMemoryCache size = " + cacheSize); return super.put(key, value); } }); diff --git a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java index 48ba811..09cf21e 100644 --- a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java @@ -17,24 +17,36 @@ import com.takipi.oss.storage.fs.api.SearchRequest; import com.takipi.oss.storage.fs.api.SearchResult; import com.takipi.oss.storage.fs.cache.Cache; +import com.takipi.oss.storage.fs.cache.DummyCache; import com.takipi.oss.storage.fs.cache.InMemoryCache; import com.takipi.oss.storage.helper.FilesystemUtil; import com.takipi.oss.storage.resources.fs.multifetcher.ConcurrentMultiFetcher; import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; +import com.takipi.oss.storage.resources.fs.multifetcher.SequentialMultiFetcher; public class S3Filesystem implements Filesystem { - private static final int MAX_CACHE_SIZE = 8388608; // 8 MB - private static final Cache cache = new InMemoryCache(MAX_CACHE_SIZE); - + private final Cache cache; + private final MultiFetcher multiFetcher; private final AmazonS3 amazonS3; private final String bucket; private final String pathPrefix; - public S3Filesystem(AmazonS3 amazonS3, String bucket, String pathPrefix) { + public S3Filesystem(AmazonS3 amazonS3, + String bucket, + String pathPrefix, + int multiFetcherConcurrencyLevel, + int maxCacheSize) { + this.amazonS3 = amazonS3; this.bucket = bucket; this.pathPrefix = pathPrefix; + + this.cache = (maxCacheSize > 0) ? new InMemoryCache(maxCacheSize) : DummyCache.dummyCache; + + this.multiFetcher = (multiFetcherConcurrencyLevel > 1) ? + new ConcurrentMultiFetcher(multiFetcherConcurrencyLevel) : + new SequentialMultiFetcher(); } @Override @@ -107,7 +119,7 @@ public BaseRecord pathToRecord(String path) { @Override public MultiFetcher getMultiFetcher() { - return new ConcurrentMultiFetcher(); + return multiFetcher; } @Override diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java index 44a26e4..af0f11f 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java @@ -18,12 +18,22 @@ public class ConcurrentMultiFetcher extends BaseMultiFetcher { private static final Logger logger = LoggerFactory.getLogger(ConcurrentMultiFetcher.class); - private static final int MAX_THREADS = 30; private final ExecutorService executorService; private final AtomicInteger threadCount = new AtomicInteger(); - public ConcurrentMultiFetcher() { + public ConcurrentMultiFetcher(int maxThreads) { + + if (maxThreads > 50) { + logger.warn("ConcurrentMultiFetcher concurrency level can not be greater than 50"); + maxThreads = 50; + } + else if (maxThreads < 1) { + logger.warn("ConcurrentMultiFetcher concurrency level can not be less than 1"); + maxThreads = 1; + } + + logger.info("ConcurrentMultiFetcher maximum number of threads = " + maxThreads); ThreadFactory threadFactory = new ThreadFactory() { @Override @@ -36,7 +46,7 @@ public Thread newThread(Runnable r) } }; - executorService = Executors.newFixedThreadPool(MAX_THREADS, threadFactory); + executorService = Executors.newFixedThreadPool(maxThreads, threadFactory); } @Override @@ -54,31 +64,39 @@ public MultiFetchResponse loadData(final MultiFetchRequest request, final Filesy final List recordsWithData = loadFromCache(request.records, cache); - for (final RecordWithData recordWithData : recordsWithData) { - if (recordWithData.getData() == null) { - Callable callable = new Callable() - { - @Override - public String call() throws Exception - { - return load(filesystem, recordWithData.getRecord(), encodingType); - } - }; - futures.add(executorService.submit(callable)); + // if only 1 record, then no need to initiate a multi-threaded load + if (recordsWithData.size() == 1) { + RecordWithData firstRecord = recordsWithData.get(0); + if (firstRecord.getData() == null) { + logger.debug("Only one record so loading object in calling thread"); + firstRecord.setData(load(filesystem, firstRecord.getRecord(), encodingType)); } } - - int futureIndex = 0; - - for (RecordWithData recordWithData : recordsWithData) { - if (recordWithData.getData() == null) { - try { - String value = futures.get(futureIndex++).get(20, TimeUnit.SECONDS); - cache.put(recordWithData.getRecord().getKey(), value); - recordWithData.setData(value); + else { + for (final RecordWithData recordWithData : recordsWithData) { + if (recordWithData.getData() == null) { + Callable callable = new Callable() { + @Override + public String call() throws Exception { + return load(filesystem, recordWithData.getRecord(), encodingType); + } + }; + futures.add(executorService.submit(callable)); } - catch (Exception e) { - logger.error(e.getMessage()); + } + + int futureIndex = 0; + + for (RecordWithData recordWithData : recordsWithData) { + if (recordWithData.getData() == null) { + try { + String value = futures.get(futureIndex++).get(20, TimeUnit.SECONDS); + cache.put(recordWithData.getRecord().getKey(), value); + recordWithData.setData(value); + } + catch (Exception e) { + logger.error(e.getMessage()); + } } } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java index cf94615..7c32bcb 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java @@ -10,7 +10,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; import java.util.List; public class SequentialMultiFetcher extends BaseMultiFetcher { From 6f60d81037627d115425dfe2e72b9dcd3175eed8 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Sun, 8 Oct 2017 11:20:26 +0300 Subject: [PATCH 35/66] Added cache size and max number of threads to the yml file in order to be configurable. --- settings.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/settings.yml b/settings.yml index 70a9dd4..7f0456e 100644 --- a/settings.yml +++ b/settings.yml @@ -3,6 +3,8 @@ corsOrigins: "*" # If using attaching IAM Role to instance leave accessKey and secretKey empty s3Fs: + concurrencyLevel: 30 # maximum number of threads for concurrent multifetch + maxCacheSize: 8388608 # set to zero to disable caching, else use 8388608 (8MB) for example. bucket: pathPrefix: credentials: From c681d98859664ad35b2585543929fb6786ec9f2a Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Sun, 8 Oct 2017 17:27:08 +0300 Subject: [PATCH 36/66] changed some default configuration settings --- settings.yml | 2 +- .../oss/storage/fs/cache/InMemoryCache.java | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/settings.yml b/settings.yml index 7f0456e..3cd2680 100644 --- a/settings.yml +++ b/settings.yml @@ -4,7 +4,7 @@ corsOrigins: "*" # If using attaching IAM Role to instance leave accessKey and secretKey empty s3Fs: concurrencyLevel: 30 # maximum number of threads for concurrent multifetch - maxCacheSize: 8388608 # set to zero to disable caching, else use 8388608 (8MB) for example. + maxCacheSize: 2097152 # Recommended size > 524288. Set to zero to disable caching. bucket: pathPrefix: credentials: diff --git a/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java b/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java index df3570d..2782d39 100644 --- a/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java +++ b/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java @@ -15,16 +15,16 @@ public class InMemoryCache implements Cache public InMemoryCache(int maxCacheSize) { - int oneMegaByte = 1048576; - int hundredMegaByte = 100 * 1048576; + int minAllowedCacheSize = 65536; + int maxAllowedCacheSize = 134217728; - if (maxCacheSize < oneMegaByte) { - logger.warn("Minimum In Memory Cache size = 1048576"); - maxCacheSize = oneMegaByte; + if (maxCacheSize < minAllowedCacheSize) { + logger.warn("Minimum allowable In Memory Cache size = ", minAllowedCacheSize); + maxCacheSize = minAllowedCacheSize; } - else if (maxCacheSize > hundredMegaByte) { - logger.warn("Maximum allowable In Memory Cache size = ", hundredMegaByte); - maxCacheSize = hundredMegaByte; + else if (maxCacheSize > maxAllowedCacheSize) { + logger.warn("Maximum allowable In Memory Cache size = ", maxAllowedCacheSize); + maxCacheSize = maxAllowedCacheSize; } final int cacheSizeLimit = maxCacheSize; From 3cb3719febc0f6ae5604ad2921cadc5e3ba56314 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Sun, 8 Oct 2017 17:31:48 +0300 Subject: [PATCH 37/66] changed some default configuration settings --- settings.yml | 2 +- .../resources/fs/multifetcher/ConcurrentMultiFetcher.java | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/settings.yml b/settings.yml index 3cd2680..2ea0ab8 100644 --- a/settings.yml +++ b/settings.yml @@ -3,7 +3,7 @@ corsOrigins: "*" # If using attaching IAM Role to instance leave accessKey and secretKey empty s3Fs: - concurrencyLevel: 30 # maximum number of threads for concurrent multifetch + concurrencyLevel: 30 # maximum number of threads for concurrent multi-fetch. Set to zero to disable concurrent fetching. maxCacheSize: 2097152 # Recommended size > 524288. Set to zero to disable caching. bucket: pathPrefix: diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java index af0f11f..0346c7c 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java @@ -28,9 +28,9 @@ public ConcurrentMultiFetcher(int maxThreads) { logger.warn("ConcurrentMultiFetcher concurrency level can not be greater than 50"); maxThreads = 50; } - else if (maxThreads < 1) { - logger.warn("ConcurrentMultiFetcher concurrency level can not be less than 1"); - maxThreads = 1; + else if (maxThreads < 2) { + logger.warn("ConcurrentMultiFetcher concurrency level can not be less than 2"); + maxThreads = 2; } logger.info("ConcurrentMultiFetcher maximum number of threads = " + maxThreads); From 14aa38ac6fe9ed1169b8ea6860abcfba2c557ad5 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Sun, 8 Oct 2017 17:37:37 +0300 Subject: [PATCH 38/66] changed logger class --- .../oss/storage/resources/fs/multifetcher/MultiFetcher.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index 8fb378b..9ca0281 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -21,7 +21,7 @@ public interface MultiFetcher { abstract class BaseMultiFetcher implements MultiFetcher { - private static final Logger logger = LoggerFactory.getLogger(ConcurrentMultiFetcher.class); + private static final Logger logger = LoggerFactory.getLogger(BaseMultiFetcher.class); static String load(Filesystem filesystem, Record record, EncodingType encodingType) { From bd9ba47f26e45bea5ba506632ff447adb566f26a Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 9 Oct 2017 11:38:15 +0300 Subject: [PATCH 39/66] changed default max cache size --- settings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings.yml b/settings.yml index 2ea0ab8..b8e510d 100644 --- a/settings.yml +++ b/settings.yml @@ -4,7 +4,7 @@ corsOrigins: "*" # If using attaching IAM Role to instance leave accessKey and secretKey empty s3Fs: concurrencyLevel: 30 # maximum number of threads for concurrent multi-fetch. Set to zero to disable concurrent fetching. - maxCacheSize: 2097152 # Recommended size > 524288. Set to zero to disable caching. + maxCacheSize: 4194304 # Recommended size > 524288. Set to zero to disable caching. bucket: pathPrefix: credentials: From 0e7b12190f2a9b7ed0d825fcafdb1ce07ed00524 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Tue, 10 Oct 2017 18:01:54 +0300 Subject: [PATCH 40/66] changed logger level from info to debug in a few places --- .../resources/fs/multifetcher/ConcurrentMultiFetcher.java | 4 ++-- .../resources/fs/multifetcher/SequentialMultiFetcher.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java index 0346c7c..ab45dab 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java @@ -58,7 +58,7 @@ public MultiFetchResponse loadData(final MultiFetchRequest request, final Filesy final List> futures = new ArrayList<>(count); Cache cache = filesystem.getCache(); - logger.info("---------- Starting concurrent multi fetch request for " + count + " records"); + logger.debug("---------- Starting concurrent multi fetch request for " + count + " records"); SimpleStopWatch stopWatch = new SimpleStopWatch(); @@ -101,7 +101,7 @@ public String call() throws Exception { } } - logger.info("---------- Concurrent multi fetch request for " + count + " records completed in " + stopWatch.elapsed() + " ms"); + logger.debug("---------- Concurrent multi fetch request for " + count + " records completed in " + stopWatch.elapsed() + " ms"); return new MultiFetchResponse(recordsWithData); } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java index 7c32bcb..bc2e018 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java @@ -21,7 +21,7 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem final int count = request.records.size(); final EncodingType encodingType = request.encodingType; - logger.info("---------- Starting sequential multi fetch request for " + count + " records"); + logger.debug("---------- Starting sequential multi fetch request for " + count + " records"); SimpleStopWatch stopWatch = new SimpleStopWatch(); Cache cache = filesystem.getCache(); @@ -40,7 +40,7 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem } } - logger.info("---------- Sequential multi fetch request for " + count + " records completed in " + stopWatch.elapsed() + " ms"); + logger.debug("---------- Sequential multi fetch request for " + count + " records completed in " + stopWatch.elapsed() + " ms"); return new MultiFetchResponse(recordsWithData); } From 3648114b842b62135f4f0a93c4a5b2e2aa2b5bfa Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 16 Oct 2017 16:03:09 +0300 Subject: [PATCH 41/66] refactoring and changed default setting to disable caching. --- settings.yml | 7 +- .../storage/TakipiStorageConfiguration.java | 39 +++++++--- .../takipi/oss/storage/TakipiStorageMain.java | 9 ++- .../oss/storage/fs/cache/InMemoryCache.java | 22 ++++-- .../oss/storage/fs/s3/S3Filesystem.java | 5 +- .../fs/multifetcher/BaseMultiFetcher.java | 71 +++++++++++++++++++ .../multifetcher/ConcurrentMultiFetcher.java | 65 ++++++++--------- .../fs/multifetcher/MultiFetcher.java | 68 ------------------ .../multifetcher/SequentialMultiFetcher.java | 2 +- 9 files changed, 164 insertions(+), 124 deletions(-) create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java diff --git a/settings.yml b/settings.yml index b8e510d..a7a3615 100644 --- a/settings.yml +++ b/settings.yml @@ -3,14 +3,17 @@ corsOrigins: "*" # If using attaching IAM Role to instance leave accessKey and secretKey empty s3Fs: - concurrencyLevel: 30 # maximum number of threads for concurrent multi-fetch. Set to zero to disable concurrent fetching. - maxCacheSize: 4194304 # Recommended size > 524288. Set to zero to disable caching. bucket: pathPrefix: credentials: accessKey: secretKey: +multifetch: + concurrencyLevel: 30 # maximum number of threads for concurrent multi-fetch. Set to zero to disable concurrent fetching. + maxCacheSize: 0 # Recommended size >= 4194304. Set to zero to disable caching. + cacheLogLevel: debug # info / debug + #folderFs: # folderPath: /opt/takipi-storage/storage # maxUsedStoragePercentage: 0.95 diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java index b1e5b56..2bf986a 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java @@ -51,11 +51,7 @@ public void setMaxUsedStoragePercentage(double maxUsedStoragePercentage) { private S3Fs s3Fs; public static class S3Fs { - - private Integer concurrencyLevel; - private Integer maxCacheSize; - @NotEmpty private String bucket; @@ -122,28 +118,48 @@ public Credentials getCredentials() { public void setCredentials(Credentials credentials) { this.credentials = credentials; } + } + + @JsonProperty + private Multifetch multifetch; + public static class Multifetch { + + private Integer concurrencyLevel; + private Integer maxCacheSize; + private String cacheLogLevel; + @JsonProperty public Integer getConcurrencyLevel() { return concurrencyLevel; } - + @JsonProperty public void setConcurrencyLevel(Integer concurrencyLevel) { this.concurrencyLevel = concurrencyLevel; } - + @JsonProperty public Integer getMaxCacheSize() { return maxCacheSize; } - + @JsonProperty public void setMaxCacheSize(Integer maxCacheSize) { this.maxCacheSize = maxCacheSize; } + + @JsonProperty + public String getCacheLogLevel() { + return cacheLogLevel; + } + + @JsonProperty + public void setCacheLogLevel(String cacheLogLevel) { + this.cacheLogLevel = cacheLogLevel; + } } - + private boolean enableCors; @NotEmpty @@ -188,7 +204,12 @@ public void setFolderFs(FolderFs folderFs) { public S3Fs getS3Fs() { return s3Fs; } - + + @JsonProperty + public TakipiStorageConfiguration.Multifetch getMultifetch() { + return multifetch; + } + @JsonProperty public void setS3Fs(S3Fs s3Fs) { this.s3Fs = s3Fs; diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index 7c9a5aa..9aff6e8 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -119,8 +119,13 @@ private Filesystem configureS3Filesystem(TakipiStorage String bucket = s3Fs.getBucket(); String pathPrefix = s3Fs.getPathPrefix(); log.debug("Using AWS S3 based filesystem with bucket: {}, prefix: {}", bucket, pathPrefix); - - return new S3Filesystem(amazonS3, bucket, pathPrefix, s3Fs.getConcurrencyLevel(), s3Fs.getMaxCacheSize()); + + TakipiStorageConfiguration.Multifetch multifetchConfig = configuration.getMultifetch(); + int concurrencyLevel = multifetchConfig.getConcurrencyLevel(); + int maxCacheSize = multifetchConfig.getMaxCacheSize(); + String cacheLogLevel = multifetchConfig.getCacheLogLevel(); + + return new S3Filesystem(amazonS3, bucket, pathPrefix, concurrencyLevel, maxCacheSize, cacheLogLevel); } private void enableCors(TakipiStorageConfiguration configuration, Environment environment) { diff --git a/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java b/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java index 2782d39..1fb1263 100644 --- a/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java +++ b/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java @@ -13,8 +13,7 @@ public class InMemoryCache implements Cache private final Map cache; - public InMemoryCache(int maxCacheSize) { - + public static InMemoryCache create(int maxCacheSize, String cacheLogLevel) { int minAllowedCacheSize = 65536; int maxAllowedCacheSize = 134217728; @@ -27,7 +26,11 @@ else if (maxCacheSize > maxAllowedCacheSize) { maxCacheSize = maxAllowedCacheSize; } - final int cacheSizeLimit = maxCacheSize; + return new InMemoryCache(maxCacheSize, cacheLogLevel); + } + + private InMemoryCache(final int maxCacheSize, final String cacheLogLevel) { + logger.info("In Memory Cache maximum size = " + maxCacheSize); int estimatedSizePerElement = 600; @@ -40,16 +43,25 @@ else if (maxCacheSize > maxAllowedCacheSize) { private int cacheSize = 0; @Override protected boolean removeEldestEntry(Map.Entry eldest) { - boolean remove = cacheSize > cacheSizeLimit; + boolean remove = cacheSize > maxCacheSize; if (remove) { cacheSize -= (eldest.getKey().length() + eldest.getValue().length()); + String logMsg = "InMemoryCache max size exceeded. Count = " + cache.size() + ". Size = " + cacheSize; + switch (cacheLogLevel) { + case "info": logger.info(logMsg); break; + default: logger.debug(logMsg); + } } return remove; } @Override public String put(String key, String value) { cacheSize += (key.length() + value.length()); - logger.debug("InMemoryCache size = " + cacheSize); + String logMsg = "InMemoryCache element inserted. Count = " + cache.size() + ". Size = " + cacheSize; + switch (cacheLogLevel) { + case "info": logger.info(logMsg); break; + default: logger.debug(logMsg); + } return super.put(key, value); } }); diff --git a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java index 09cf21e..d6d961a 100644 --- a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java @@ -36,13 +36,14 @@ public S3Filesystem(AmazonS3 amazonS3, String bucket, String pathPrefix, int multiFetcherConcurrencyLevel, - int maxCacheSize) { + int maxCacheSize, + String cacheLogLevel) { this.amazonS3 = amazonS3; this.bucket = bucket; this.pathPrefix = pathPrefix; - this.cache = (maxCacheSize > 0) ? new InMemoryCache(maxCacheSize) : DummyCache.dummyCache; + this.cache = (maxCacheSize > 0) ? InMemoryCache.create(maxCacheSize, cacheLogLevel) : DummyCache.dummyCache; this.multiFetcher = (multiFetcherConcurrencyLevel > 1) ? new ConcurrentMultiFetcher(multiFetcherConcurrencyLevel) : diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java new file mode 100644 index 0000000..da7f1de --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java @@ -0,0 +1,71 @@ +package com.takipi.oss.storage.resources.fs.multifetcher; + +import com.takipi.oss.storage.data.EncodingType; +import com.takipi.oss.storage.data.RecordWithData; +import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.cache.Cache; +import com.takipi.oss.storage.helper.FilesystemUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; + +abstract class BaseMultiFetcher implements MultiFetcher +{ + private static final Logger logger = LoggerFactory.getLogger(BaseMultiFetcher.class); + + static String load(Filesystem filesystem, Record record, EncodingType encodingType) { + + SimpleStopWatch stopWatch = new SimpleStopWatch(); + String value = null; + final int MAX_TRIES = 2; + int count = 0; + + while ((value == null) && (count < MAX_TRIES)) { + + if (count++ > 0) { + logger.warn("Retry loading object for key " + record.getKey()); + stopWatch.reset(); + } + + try { + value = FilesystemUtil.read(filesystem, record, encodingType); + } + catch (Exception e) { + // Need this catch because some exceptions inside FilesystemUtil.read are caught and result in a + // null return value, and some are thrown. The code would be simpler if all exceptions were thrown. + } + } + + if (value != null) { + + logger.debug("--------------------- " + Thread.currentThread().getName() + " loaded key " + + record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); + + return value; + } + else { + + logger.error("Failed to load object for key: " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); + + throw new RuntimeException("Failed to load object for key: " + record.getKey()); + } + } + + static List loadFromCache(List records, Cache cache) { + + List recordsWithData = new ArrayList<>(records.size()); + + for (Record record : records) { + String value = cache.get(record.getKey()); + recordsWithData.add(RecordWithData.of(record, value)); + if (value != null) { + logger.debug("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); + } + } + + return recordsWithData; + } +} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java index ab45dab..ff1c6e3 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java @@ -18,6 +18,7 @@ public class ConcurrentMultiFetcher extends BaseMultiFetcher { private static final Logger logger = LoggerFactory.getLogger(ConcurrentMultiFetcher.class); + private static final SequentialMultiFetcher sequentialMultiFetcher = new SequentialMultiFetcher(); private final ExecutorService executorService; private final AtomicInteger threadCount = new AtomicInteger(); @@ -52,51 +53,45 @@ public Thread newThread(Runnable r) @Override public MultiFetchResponse loadData(final MultiFetchRequest request, final Filesystem filesystem) { - final EncodingType encodingType = request.encodingType; - final List recordsToRetrieve = request.records; - final int count = recordsToRetrieve.size(); - final List> futures = new ArrayList<>(count); - Cache cache = filesystem.getCache(); + final int count = request.records.size(); + + if (count == 1) { + logger.debug("Only one record so loading object in calling thread"); + return sequentialMultiFetcher.loadData(request, filesystem); + } logger.debug("---------- Starting concurrent multi fetch request for " + count + " records"); + final EncodingType encodingType = request.encodingType; + final List> futures = new ArrayList<>(count); + Cache cache = filesystem.getCache(); SimpleStopWatch stopWatch = new SimpleStopWatch(); final List recordsWithData = loadFromCache(request.records, cache); - // if only 1 record, then no need to initiate a multi-threaded load - if (recordsWithData.size() == 1) { - RecordWithData firstRecord = recordsWithData.get(0); - if (firstRecord.getData() == null) { - logger.debug("Only one record so loading object in calling thread"); - firstRecord.setData(load(filesystem, firstRecord.getRecord(), encodingType)); + for (final RecordWithData recordWithData : recordsWithData) { + if (recordWithData.getData() == null) { + Callable callable = new Callable() { + @Override + public String call() throws Exception { + return load(filesystem, recordWithData.getRecord(), encodingType); + } + }; + futures.add(executorService.submit(callable)); } } - else { - for (final RecordWithData recordWithData : recordsWithData) { - if (recordWithData.getData() == null) { - Callable callable = new Callable() { - @Override - public String call() throws Exception { - return load(filesystem, recordWithData.getRecord(), encodingType); - } - }; - futures.add(executorService.submit(callable)); + + int futureIndex = 0; + + for (RecordWithData recordWithData : recordsWithData) { + if (recordWithData.getData() == null) { + try { + String value = futures.get(futureIndex++).get(20, TimeUnit.SECONDS); + cache.put(recordWithData.getRecord().getKey(), value); + recordWithData.setData(value); } - } - - int futureIndex = 0; - - for (RecordWithData recordWithData : recordsWithData) { - if (recordWithData.getData() == null) { - try { - String value = futures.get(futureIndex++).get(20, TimeUnit.SECONDS); - cache.put(recordWithData.getRecord().getKey(), value); - recordWithData.setData(value); - } - catch (Exception e) { - logger.error(e.getMessage()); - } + catch (Exception e) { + logger.error(e.getMessage()); } } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index 9ca0281..f8cc496 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -1,78 +1,10 @@ package com.takipi.oss.storage.resources.fs.multifetcher; -import com.takipi.oss.storage.data.EncodingType; -import com.takipi.oss.storage.data.RecordWithData; import com.takipi.oss.storage.data.fetch.MultiFetchRequest; import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.fs.cache.Cache; -import com.takipi.oss.storage.helper.FilesystemUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; public interface MultiFetcher { - MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem); } - -abstract class BaseMultiFetcher implements MultiFetcher { - - private static final Logger logger = LoggerFactory.getLogger(BaseMultiFetcher.class); - - static String load(Filesystem filesystem, Record record, EncodingType encodingType) { - - SimpleStopWatch stopWatch = new SimpleStopWatch(); - String value = null; - final int MAX_TRIES = 2; - int count = 0; - - while ((value == null) && (count < MAX_TRIES)) { - - if (count++ > 0) { - logger.warn("Retry loading object for key " + record.getKey()); - stopWatch.reset(); - } - - try { - value = FilesystemUtil.read(filesystem, record, encodingType); - } - catch (Exception e) { - // Need this catch because some exceptions inside FilesystemUtil.read are caught and result in a - // null return value, and some are thrown. The code would be simpler if all exceptions were thrown. - } - } - - if (value != null) { - - logger.debug("--------------------- " + Thread.currentThread().getName() + " loaded key " + - record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); - - return value; - } - else { - - logger.error("Failed to load object for key: " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); - - throw new RuntimeException("Failed to load object for key: " + record.getKey()); - } - } - - static List loadFromCache(List records, Cache cache) { - - List recordsWithData = new ArrayList<>(records.size()); - - for (Record record : records) { - String value = cache.get(record.getKey()); - recordsWithData.add(RecordWithData.of(record, value)); - if (value != null) { - logger.debug("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); - } - } - - return recordsWithData; - } -} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java index bc2e018..5ef7399 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java @@ -20,8 +20,8 @@ public class SequentialMultiFetcher extends BaseMultiFetcher { public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { final int count = request.records.size(); - final EncodingType encodingType = request.encodingType; logger.debug("---------- Starting sequential multi fetch request for " + count + " records"); + final EncodingType encodingType = request.encodingType; SimpleStopWatch stopWatch = new SimpleStopWatch(); Cache cache = filesystem.getCache(); From ae0e2fde6b8ffa60cf83c34af598810bee8ae2ff Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Tue, 17 Oct 2017 13:57:52 +0300 Subject: [PATCH 42/66] removed caching and extracted the concurrent task executor to a generic utility object. --- .../takipi/oss/storage/TakipiStorageMain.java | 6 +- .../takipi/oss/storage/fs/api/Filesystem.java | 3 - .../takipi/oss/storage/fs/cache/Cache.java | 7 -- .../oss/storage/fs/cache/DummyCache.java | 16 --- .../oss/storage/fs/cache/InMemoryCache.java | 80 --------------- .../fs/concurrent/ConcurrentTaskExecutor.java | 82 +++++++++++++++ .../fs/concurrent/SequentialTaskExecutor.java | 32 ++++++ .../fs/concurrent/SimpleStopWatch.java | 18 ++++ .../oss/storage/fs/concurrent/Task.java | 5 + .../storage/fs/concurrent/TaskExecutor.java | 7 ++ .../storage/fs/folder/FolderFilesystem.java | 7 -- .../oss/storage/fs/s3/S3Filesystem.java | 15 +-- .../fs/multifetcher/BaseMultiFetcher.java | 69 +++++-------- .../multifetcher/ConcurrentMultiFetcher.java | 99 +------------------ .../fs/multifetcher/S3ObjectFetcherTask.java | 75 ++++++++++++++ .../multifetcher/SequentialMultiFetcher.java | 43 +------- .../fs/multifetcher/SimpleStopWatch.java | 18 ---- 17 files changed, 251 insertions(+), 331 deletions(-) delete mode 100644 src/main/java/com/takipi/oss/storage/fs/cache/Cache.java delete mode 100644 src/main/java/com/takipi/oss/storage/fs/cache/DummyCache.java delete mode 100644 src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java create mode 100644 src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java create mode 100644 src/main/java/com/takipi/oss/storage/fs/concurrent/SequentialTaskExecutor.java create mode 100644 src/main/java/com/takipi/oss/storage/fs/concurrent/SimpleStopWatch.java create mode 100644 src/main/java/com/takipi/oss/storage/fs/concurrent/Task.java create mode 100644 src/main/java/com/takipi/oss/storage/fs/concurrent/TaskExecutor.java create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java delete mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SimpleStopWatch.java diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index 9aff6e8..6ad68db 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -122,10 +122,8 @@ private Filesystem configureS3Filesystem(TakipiStorage TakipiStorageConfiguration.Multifetch multifetchConfig = configuration.getMultifetch(); int concurrencyLevel = multifetchConfig.getConcurrencyLevel(); - int maxCacheSize = multifetchConfig.getMaxCacheSize(); - String cacheLogLevel = multifetchConfig.getCacheLogLevel(); - - return new S3Filesystem(amazonS3, bucket, pathPrefix, concurrencyLevel, maxCacheSize, cacheLogLevel); + + return new S3Filesystem(amazonS3, bucket, pathPrefix, concurrencyLevel); } private void enableCors(TakipiStorageConfiguration configuration, Environment environment) { diff --git a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java index 967bca0..3020a2c 100644 --- a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java @@ -4,7 +4,6 @@ import java.io.InputStream; import com.takipi.oss.storage.fs.BaseRecord; -import com.takipi.oss.storage.fs.cache.Cache; import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; public interface Filesystem extends FilesystemHealth { @@ -91,6 +90,4 @@ public interface Filesystem extends FilesystemHealth { BaseRecord pathToRecord(String path); MultiFetcher getMultiFetcher(); - - Cache getCache(); } diff --git a/src/main/java/com/takipi/oss/storage/fs/cache/Cache.java b/src/main/java/com/takipi/oss/storage/fs/cache/Cache.java deleted file mode 100644 index 6288356..0000000 --- a/src/main/java/com/takipi/oss/storage/fs/cache/Cache.java +++ /dev/null @@ -1,7 +0,0 @@ -package com.takipi.oss.storage.fs.cache; - -public interface Cache -{ - String get(String key); - String put(String key, String value); -} diff --git a/src/main/java/com/takipi/oss/storage/fs/cache/DummyCache.java b/src/main/java/com/takipi/oss/storage/fs/cache/DummyCache.java deleted file mode 100644 index 2259b3c..0000000 --- a/src/main/java/com/takipi/oss/storage/fs/cache/DummyCache.java +++ /dev/null @@ -1,16 +0,0 @@ -package com.takipi.oss.storage.fs.cache; - -public class DummyCache implements Cache -{ - public static DummyCache dummyCache = new DummyCache(); - - @Override - public String get(String key) { - return null; - } - - @Override - public String put(String key, String value) { - return null; - } -} diff --git a/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java b/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java deleted file mode 100644 index 1fb1263..0000000 --- a/src/main/java/com/takipi/oss/storage/fs/cache/InMemoryCache.java +++ /dev/null @@ -1,80 +0,0 @@ -package com.takipi.oss.storage.fs.cache; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.Map; - -public class InMemoryCache implements Cache -{ - private static final Logger logger = LoggerFactory.getLogger(InMemoryCache.class); - - private final Map cache; - - public static InMemoryCache create(int maxCacheSize, String cacheLogLevel) { - int minAllowedCacheSize = 65536; - int maxAllowedCacheSize = 134217728; - - if (maxCacheSize < minAllowedCacheSize) { - logger.warn("Minimum allowable In Memory Cache size = ", minAllowedCacheSize); - maxCacheSize = minAllowedCacheSize; - } - else if (maxCacheSize > maxAllowedCacheSize) { - logger.warn("Maximum allowable In Memory Cache size = ", maxAllowedCacheSize); - maxCacheSize = maxAllowedCacheSize; - } - - return new InMemoryCache(maxCacheSize, cacheLogLevel); - } - - private InMemoryCache(final int maxCacheSize, final String cacheLogLevel) { - - logger.info("In Memory Cache maximum size = " + maxCacheSize); - - int estimatedSizePerElement = 600; - int initialCapacity = (int)Math.pow(2, Math.ceil(Math.log((float)maxCacheSize / estimatedSizePerElement) / Math.log(2))); - - cache = Collections.synchronizedMap( - - new LinkedHashMap(initialCapacity,0.75f, true) { - - private int cacheSize = 0; - - @Override protected boolean removeEldestEntry(Map.Entry eldest) { - boolean remove = cacheSize > maxCacheSize; - if (remove) { - cacheSize -= (eldest.getKey().length() + eldest.getValue().length()); - String logMsg = "InMemoryCache max size exceeded. Count = " + cache.size() + ". Size = " + cacheSize; - switch (cacheLogLevel) { - case "info": logger.info(logMsg); break; - default: logger.debug(logMsg); - } - } - return remove; - } - - @Override public String put(String key, String value) { - cacheSize += (key.length() + value.length()); - String logMsg = "InMemoryCache element inserted. Count = " + cache.size() + ". Size = " + cacheSize; - switch (cacheLogLevel) { - case "info": logger.info(logMsg); break; - default: logger.debug(logMsg); - } - return super.put(key, value); - } - }); - - } - - @Override - public String get(String key) { - return cache.get(key); - } - - @Override - public String put(String key, String value) { - return cache.put(key, value); - } -} diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java new file mode 100644 index 0000000..18cc606 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java @@ -0,0 +1,82 @@ +package com.takipi.oss.storage.fs.concurrent; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; + +public class ConcurrentTaskExecutor implements TaskExecutor { + + private static final Logger logger = LoggerFactory.getLogger(ConcurrentTaskExecutor.class); + + private final ExecutorService executorService; + private final AtomicInteger threadCount = new AtomicInteger(); + + public ConcurrentTaskExecutor(int maxThreads) { + + if (maxThreads > 50) { + logger.warn("ConcurrentTaskExecutor cannot have more than 50 threads"); + maxThreads = 50; + } + else if (maxThreads < 2) { + logger.warn("ConcurrentTaskExecutor cannot have less than 2 threads"); + maxThreads = 2; + } + + logger.info("ConcurrentTaskExecutor maximum number of threads = " + maxThreads); + + ThreadFactory threadFactory = new ThreadFactory() { + @Override + public Thread newThread(Runnable r) + { + Thread t = new Thread(r); + t.setDaemon(true); + t.setName("conctaskexec_thread_" + threadCount.incrementAndGet()); + return t; + } + }; + + executorService = Executors.newFixedThreadPool(maxThreads, threadFactory); + } + + @Override + public void execute(List tasks) { + + final int count = tasks.size(); + + logger.debug("---------- Starting concurrent task execute for " + count + " tasks"); + + SimpleStopWatch stopWatch = new SimpleStopWatch(); + + if (count == 1) { + try { + tasks.get(0).getRunnable().run(); + } + catch (Exception e) { + logger.error(e.getMessage()); + } + } + else { + + final List> futures = new ArrayList<>(count); + + for (Task command : tasks) { + futures.add(executorService.submit(command.getRunnable())); + } + + for (Future future : futures) { + try { + future.get(1, TimeUnit.MINUTES); + } + catch (Exception e) { + logger.error(e.getMessage()); + } + } + } + + logger.debug("---------- Concurrent task executor executed " + count + "tasks in " + stopWatch.elapsed() + " ms"); + } +} diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/SequentialTaskExecutor.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/SequentialTaskExecutor.java new file mode 100644 index 0000000..1a1a0e5 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/SequentialTaskExecutor.java @@ -0,0 +1,32 @@ +package com.takipi.oss.storage.fs.concurrent; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +public class SequentialTaskExecutor implements TaskExecutor { + + private static final Logger logger = LoggerFactory.getLogger(ConcurrentTaskExecutor.class); + + @Override + public void execute(List tasks) { + + final int count = tasks.size(); + + SimpleStopWatch stopWatch = new SimpleStopWatch(); + + logger.debug("---------- Starting sequential execute for " + count + " tasks"); + + for (Task task : tasks) { + try { + task.getRunnable().run(); + } + catch (Exception e) { + logger.error(e.getMessage()); + } + } + + logger.debug("---------- Sequential task executor executed " + count + "tasks in " + stopWatch.elapsed() + " ms"); + } +} diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/SimpleStopWatch.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/SimpleStopWatch.java new file mode 100644 index 0000000..7faf720 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/SimpleStopWatch.java @@ -0,0 +1,18 @@ +package com.takipi.oss.storage.fs.concurrent; + +public class SimpleStopWatch +{ + private long start; + + public SimpleStopWatch() { + reset(); + } + + public long elapsed() { + return System.currentTimeMillis() - start; + } + + public void reset() { + start = System.currentTimeMillis(); + } +} diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/Task.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/Task.java new file mode 100644 index 0000000..3a64104 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/Task.java @@ -0,0 +1,5 @@ +package com.takipi.oss.storage.fs.concurrent; + +public interface Task { + Runnable getRunnable(); +} diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/TaskExecutor.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/TaskExecutor.java new file mode 100644 index 0000000..760a84a --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/TaskExecutor.java @@ -0,0 +1,7 @@ +package com.takipi.oss.storage.fs.concurrent; + +import java.util.List; + +public interface TaskExecutor { + void execute(List tasks); +} diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java index f46e6e3..12467bc 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java @@ -2,8 +2,6 @@ import com.takipi.oss.storage.fs.BaseRecord; import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.fs.cache.Cache; -import com.takipi.oss.storage.fs.cache.DummyCache; import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; import com.takipi.oss.storage.resources.fs.multifetcher.SequentialMultiFetcher; import org.apache.commons.io.IOUtils; @@ -82,11 +80,6 @@ public MultiFetcher getMultiFetcher() { return new SequentialMultiFetcher(); } - @Override - public Cache getCache() { - return DummyCache.dummyCache; - } - protected void beforePut(File file) { file.getParentFile().mkdirs(); } diff --git a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java index d6d961a..ef435cf 100644 --- a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java @@ -16,9 +16,6 @@ import com.takipi.oss.storage.fs.api.Filesystem; import com.takipi.oss.storage.fs.api.SearchRequest; import com.takipi.oss.storage.fs.api.SearchResult; -import com.takipi.oss.storage.fs.cache.Cache; -import com.takipi.oss.storage.fs.cache.DummyCache; -import com.takipi.oss.storage.fs.cache.InMemoryCache; import com.takipi.oss.storage.helper.FilesystemUtil; import com.takipi.oss.storage.resources.fs.multifetcher.ConcurrentMultiFetcher; import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; @@ -26,7 +23,6 @@ public class S3Filesystem implements Filesystem { - private final Cache cache; private final MultiFetcher multiFetcher; private final AmazonS3 amazonS3; private final String bucket; @@ -35,16 +31,12 @@ public class S3Filesystem implements Filesystem { public S3Filesystem(AmazonS3 amazonS3, String bucket, String pathPrefix, - int multiFetcherConcurrencyLevel, - int maxCacheSize, - String cacheLogLevel) { + int multiFetcherConcurrencyLevel) { this.amazonS3 = amazonS3; this.bucket = bucket; this.pathPrefix = pathPrefix; - this.cache = (maxCacheSize > 0) ? InMemoryCache.create(maxCacheSize, cacheLogLevel) : DummyCache.dummyCache; - this.multiFetcher = (multiFetcherConcurrencyLevel > 1) ? new ConcurrentMultiFetcher(multiFetcherConcurrencyLevel) : new SequentialMultiFetcher(); @@ -123,11 +115,6 @@ public MultiFetcher getMultiFetcher() { return multiFetcher; } - @Override - public Cache getCache() { - return cache; - } - private String keyOf(T record) { if (this.pathPrefix != null) { return this.pathPrefix + File.separator + record.getPath(); diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java index da7f1de..02603f8 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java @@ -2,10 +2,12 @@ import com.takipi.oss.storage.data.EncodingType; import com.takipi.oss.storage.data.RecordWithData; +import com.takipi.oss.storage.data.fetch.MultiFetchRequest; +import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.fs.cache.Cache; -import com.takipi.oss.storage.helper.FilesystemUtil; +import com.takipi.oss.storage.fs.concurrent.Task; +import com.takipi.oss.storage.fs.concurrent.TaskExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -16,56 +18,31 @@ abstract class BaseMultiFetcher implements MultiFetcher { private static final Logger logger = LoggerFactory.getLogger(BaseMultiFetcher.class); - static String load(Filesystem filesystem, Record record, EncodingType encodingType) { + private final TaskExecutor taskExecutor; + + BaseMultiFetcher(TaskExecutor taskExecutor) { + this.taskExecutor = taskExecutor; + } + + @Override + public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { - SimpleStopWatch stopWatch = new SimpleStopWatch(); - String value = null; - final int MAX_TRIES = 2; - int count = 0; + final int count = request.records.size(); + final EncodingType encodingType = request.encodingType; + final List tasks = new ArrayList<>(count); - while ((value == null) && (count < MAX_TRIES)) { - - if (count++ > 0) { - logger.warn("Retry loading object for key " + record.getKey()); - stopWatch.reset(); - } - - try { - value = FilesystemUtil.read(filesystem, record, encodingType); - } - catch (Exception e) { - // Need this catch because some exceptions inside FilesystemUtil.read are caught and result in a - // null return value, and some are thrown. The code would be simpler if all exceptions were thrown. - } - } + List recordsWithData = new ArrayList<>(count); - if (value != null) { - - logger.debug("--------------------- " + Thread.currentThread().getName() + " loaded key " + - record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); - - return value; + for (Record record : request.records) { + RecordWithData recordWithData = RecordWithData.of(record, null); + recordsWithData.add(recordWithData); + tasks.add(new S3ObjectFetcherTask(recordWithData, filesystem, encodingType)); } - else { - - logger.error("Failed to load object for key: " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); - - throw new RuntimeException("Failed to load object for key: " + record.getKey()); - } - } - - static List loadFromCache(List records, Cache cache) { - List recordsWithData = new ArrayList<>(records.size()); + taskExecutor.execute(tasks); - for (Record record : records) { - String value = cache.get(record.getKey()); - recordsWithData.add(RecordWithData.of(record, value)); - if (value != null) { - logger.debug("Object for key " + record.getKey() + " found in cache. " + value.length() + " bytes"); - } - } + logger.debug("Multi fetched completed fetching of " + count + " objects"); - return recordsWithData; + return new MultiFetchResponse(recordsWithData); } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java index ff1c6e3..c98c2d0 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java @@ -1,103 +1,10 @@ package com.takipi.oss.storage.resources.fs.multifetcher; -import com.takipi.oss.storage.data.EncodingType; -import com.takipi.oss.storage.data.RecordWithData; -import com.takipi.oss.storage.data.fetch.MultiFetchRequest; -import com.takipi.oss.storage.data.fetch.MultiFetchResponse; -import com.takipi.oss.storage.fs.Record; -import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.fs.cache.Cache; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.*; -import java.util.concurrent.atomic.AtomicInteger; +import com.takipi.oss.storage.fs.concurrent.ConcurrentTaskExecutor; public class ConcurrentMultiFetcher extends BaseMultiFetcher { - private static final Logger logger = LoggerFactory.getLogger(ConcurrentMultiFetcher.class); - private static final SequentialMultiFetcher sequentialMultiFetcher = new SequentialMultiFetcher(); - - private final ExecutorService executorService; - private final AtomicInteger threadCount = new AtomicInteger(); - public ConcurrentMultiFetcher(int maxThreads) { - - if (maxThreads > 50) { - logger.warn("ConcurrentMultiFetcher concurrency level can not be greater than 50"); - maxThreads = 50; - } - else if (maxThreads < 2) { - logger.warn("ConcurrentMultiFetcher concurrency level can not be less than 2"); - maxThreads = 2; - } - - logger.info("ConcurrentMultiFetcher maximum number of threads = " + maxThreads); - - ThreadFactory threadFactory = new ThreadFactory() { - @Override - public Thread newThread(Runnable r) - { - Thread t = new Thread(r); - t.setDaemon(true); - t.setName("fetcher_thread_" + threadCount.incrementAndGet()); - return t; - } - }; - - executorService = Executors.newFixedThreadPool(maxThreads, threadFactory); - } - - @Override - public MultiFetchResponse loadData(final MultiFetchRequest request, final Filesystem filesystem) { - - final int count = request.records.size(); - - if (count == 1) { - logger.debug("Only one record so loading object in calling thread"); - return sequentialMultiFetcher.loadData(request, filesystem); - } - - logger.debug("---------- Starting concurrent multi fetch request for " + count + " records"); - - final EncodingType encodingType = request.encodingType; - final List> futures = new ArrayList<>(count); - Cache cache = filesystem.getCache(); - SimpleStopWatch stopWatch = new SimpleStopWatch(); - - final List recordsWithData = loadFromCache(request.records, cache); - - for (final RecordWithData recordWithData : recordsWithData) { - if (recordWithData.getData() == null) { - Callable callable = new Callable() { - @Override - public String call() throws Exception { - return load(filesystem, recordWithData.getRecord(), encodingType); - } - }; - futures.add(executorService.submit(callable)); - } - } - - int futureIndex = 0; - - for (RecordWithData recordWithData : recordsWithData) { - if (recordWithData.getData() == null) { - try { - String value = futures.get(futureIndex++).get(20, TimeUnit.SECONDS); - cache.put(recordWithData.getRecord().getKey(), value); - recordWithData.setData(value); - } - catch (Exception e) { - logger.error(e.getMessage()); - } - } - } - - logger.debug("---------- Concurrent multi fetch request for " + count + " records completed in " + stopWatch.elapsed() + " ms"); - - return new MultiFetchResponse(recordsWithData); + super(new ConcurrentTaskExecutor(maxThreads)); } -} \ No newline at end of file +} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java new file mode 100644 index 0000000..70fe533 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java @@ -0,0 +1,75 @@ +package com.takipi.oss.storage.resources.fs.multifetcher; + +import com.takipi.oss.storage.data.EncodingType; +import com.takipi.oss.storage.data.RecordWithData; +import com.takipi.oss.storage.fs.Record; +import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.concurrent.SimpleStopWatch; +import com.takipi.oss.storage.fs.concurrent.Task; +import com.takipi.oss.storage.helper.FilesystemUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class S3ObjectFetcherTask implements Task { + + private static final Logger logger = LoggerFactory.getLogger(S3ObjectFetcherTask.class); + + private final RecordWithData recordWithData; + private final Filesystem filesystem; + private final EncodingType encodingType; + + S3ObjectFetcherTask(RecordWithData recordWithData, Filesystem filesystem, EncodingType encodingType) { + this.recordWithData = recordWithData; + this.filesystem = filesystem; + this.encodingType = encodingType; + } + + @Override + public Runnable getRunnable() { + return new Runnable() { + @Override + public void run() { + String result = load(filesystem, recordWithData.getRecord(), encodingType); + recordWithData.setData(result); + } + }; + } + + private static String load(Filesystem filesystem, Record record, EncodingType encodingType) { + + SimpleStopWatch stopWatch = new SimpleStopWatch(); + String value = null; + final int MAX_TRIES = 2; + int count = 0; + + while ((value == null) && (count < MAX_TRIES)) { + + if (count++ > 0) { + logger.warn("Retry loading object for key " + record.getKey()); + stopWatch.reset(); + } + + try { + value = FilesystemUtil.read(filesystem, record, encodingType); + } + catch (Exception e) { + // Need this catch because some exceptions inside FilesystemUtil.read are caught and result in a + // null return value, and some are thrown. The code would be simpler if all exceptions were thrown. + } + } + + if (value != null) { + + logger.debug("--------------------- " + Thread.currentThread().getName() + " loaded key " + + record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); + + return value; + } + else { + + logger.error("Failed to load object for key: " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); + + throw new RuntimeException("Failed to load object for key: " + record.getKey()); + } + } +} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java index 5ef7399..7b5c12c 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java @@ -1,47 +1,10 @@ package com.takipi.oss.storage.resources.fs.multifetcher; -import com.takipi.oss.storage.data.EncodingType; -import com.takipi.oss.storage.data.RecordWithData; -import com.takipi.oss.storage.data.fetch.MultiFetchRequest; -import com.takipi.oss.storage.data.fetch.MultiFetchResponse; -import com.takipi.oss.storage.fs.Record; -import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.fs.cache.Cache; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; +import com.takipi.oss.storage.fs.concurrent.SequentialTaskExecutor; public class SequentialMultiFetcher extends BaseMultiFetcher { - private static final Logger logger = LoggerFactory.getLogger(SequentialMultiFetcher.class); - - @Override - public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { - - final int count = request.records.size(); - logger.debug("---------- Starting sequential multi fetch request for " + count + " records"); - final EncodingType encodingType = request.encodingType; - SimpleStopWatch stopWatch = new SimpleStopWatch(); - Cache cache = filesystem.getCache(); - - final List recordsWithData = loadFromCache(request.records, cache); - - for (RecordWithData recordWithData : recordsWithData) { - - if (recordWithData.getData() == null) { - try { - String value = load(filesystem, recordWithData.getRecord(), encodingType); - recordWithData.setData(value); - } - catch (Exception e) { - logger.error(e.getMessage()); - } - } - } - - logger.debug("---------- Sequential multi fetch request for " + count + " records completed in " + stopWatch.elapsed() + " ms"); - - return new MultiFetchResponse(recordsWithData); + public SequentialMultiFetcher() { + super(new SequentialTaskExecutor()); } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SimpleStopWatch.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SimpleStopWatch.java deleted file mode 100644 index 17594a6..0000000 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SimpleStopWatch.java +++ /dev/null @@ -1,18 +0,0 @@ -package com.takipi.oss.storage.resources.fs.multifetcher; - -class SimpleStopWatch -{ - private long start; - - SimpleStopWatch() { - reset(); - } - - long elapsed() { - return System.currentTimeMillis() - start; - } - - void reset() { - start = System.currentTimeMillis(); - } -} From 15e298a08db62c8b8eee47ef0ae557ea75f8c3ef Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Tue, 17 Oct 2017 16:54:50 +0300 Subject: [PATCH 43/66] changed tabs to spaces and a small refactoring. --- .../takipi/oss/storage/TakipiStorageMain.java | 12 +++++------- .../takipi/oss/storage/fs/api/Filesystem.java | 3 --- .../fs/concurrent/ConcurrentTaskExecutor.java | 8 ++++---- .../fs/concurrent/SequentialTaskExecutor.java | 6 +++--- .../takipi/oss/storage/fs/concurrent/Task.java | 5 ----- .../storage/fs/concurrent/TaskExecutor.java | 2 +- .../storage/fs/folder/FolderFilesystem.java | 7 ------- .../takipi/oss/storage/fs/s3/S3Filesystem.java | 18 +----------------- .../fs/JsonMultiFetchStorageResource.java | 11 +++++++++-- .../fs/multifetcher/BaseMultiFetcher.java | 11 +++++------ .../fs/multifetcher/S3ObjectFetcherTask.java | 14 ++++---------- 11 files changed, 32 insertions(+), 65 deletions(-) delete mode 100644 src/main/java/com/takipi/oss/storage/fs/concurrent/Task.java diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index 6ad68db..c9eb9f9 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -60,10 +60,11 @@ public void run(TakipiStorageConfiguration configuration, Environment environmen } Filesystem filesystem = configureFilesystem(configuration, environment); - + TakipiStorageConfiguration.Multifetch multifetchConfig = configuration.getMultifetch(); + environment.healthChecks().register("filesystem", new FilesystemHealthCheck(filesystem)); environment.jersey().register(new BinaryStorageResource(filesystem)); - environment.jersey().register(new JsonMultiFetchStorageResource(filesystem)); + environment.jersey().register(new JsonMultiFetchStorageResource(filesystem, multifetchConfig.getConcurrencyLevel())); environment.jersey().register(new JsonMultiDeleteStorageResource(filesystem)); environment.jersey().register(new JsonSimpleFetchStorageResource(filesystem)); environment.jersey().register(new JsonSimpleSearchStorageResource(filesystem)); @@ -119,11 +120,8 @@ private Filesystem configureS3Filesystem(TakipiStorage String bucket = s3Fs.getBucket(); String pathPrefix = s3Fs.getPathPrefix(); log.debug("Using AWS S3 based filesystem with bucket: {}, prefix: {}", bucket, pathPrefix); - - TakipiStorageConfiguration.Multifetch multifetchConfig = configuration.getMultifetch(); - int concurrencyLevel = multifetchConfig.getConcurrencyLevel(); - - return new S3Filesystem(amazonS3, bucket, pathPrefix, concurrencyLevel); + + return new S3Filesystem(amazonS3, bucket, pathPrefix); } private void enableCors(TakipiStorageConfiguration configuration, Environment environment) { diff --git a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java index 3020a2c..18bdd03 100644 --- a/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/api/Filesystem.java @@ -4,7 +4,6 @@ import java.io.InputStream; import com.takipi.oss.storage.fs.BaseRecord; -import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; public interface Filesystem extends FilesystemHealth { /** @@ -88,6 +87,4 @@ public interface Filesystem extends FilesystemHealth { * @return record */ BaseRecord pathToRecord(String path); - - MultiFetcher getMultiFetcher(); } diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java index 18cc606..059c568 100644 --- a/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java @@ -43,7 +43,7 @@ public Thread newThread(Runnable r) } @Override - public void execute(List tasks) { + public void execute(List tasks) { final int count = tasks.size(); @@ -53,7 +53,7 @@ public void execute(List tasks) { if (count == 1) { try { - tasks.get(0).getRunnable().run(); + tasks.get(0).run(); } catch (Exception e) { logger.error(e.getMessage()); @@ -63,8 +63,8 @@ public void execute(List tasks) { final List> futures = new ArrayList<>(count); - for (Task command : tasks) { - futures.add(executorService.submit(command.getRunnable())); + for (Runnable task : tasks) { + futures.add(executorService.submit(task)); } for (Future future : futures) { diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/SequentialTaskExecutor.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/SequentialTaskExecutor.java index 1a1a0e5..289c3fd 100644 --- a/src/main/java/com/takipi/oss/storage/fs/concurrent/SequentialTaskExecutor.java +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/SequentialTaskExecutor.java @@ -10,7 +10,7 @@ public class SequentialTaskExecutor implements TaskExecutor { private static final Logger logger = LoggerFactory.getLogger(ConcurrentTaskExecutor.class); @Override - public void execute(List tasks) { + public void execute(List tasks) { final int count = tasks.size(); @@ -18,9 +18,9 @@ public void execute(List tasks) { logger.debug("---------- Starting sequential execute for " + count + " tasks"); - for (Task task : tasks) { + for (Runnable task : tasks) { try { - task.getRunnable().run(); + task.run(); } catch (Exception e) { logger.error(e.getMessage()); diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/Task.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/Task.java deleted file mode 100644 index 3a64104..0000000 --- a/src/main/java/com/takipi/oss/storage/fs/concurrent/Task.java +++ /dev/null @@ -1,5 +0,0 @@ -package com.takipi.oss.storage.fs.concurrent; - -public interface Task { - Runnable getRunnable(); -} diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/TaskExecutor.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/TaskExecutor.java index 760a84a..ee85d4d 100644 --- a/src/main/java/com/takipi/oss/storage/fs/concurrent/TaskExecutor.java +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/TaskExecutor.java @@ -3,5 +3,5 @@ import java.util.List; public interface TaskExecutor { - void execute(List tasks); + void execute(List tasks); } diff --git a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java index 12467bc..bb68daa 100644 --- a/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/folder/FolderFilesystem.java @@ -2,8 +2,6 @@ import com.takipi.oss.storage.fs.BaseRecord; import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; -import com.takipi.oss.storage.resources.fs.multifetcher.SequentialMultiFetcher; import org.apache.commons.io.IOUtils; import java.io.*; @@ -75,11 +73,6 @@ public long size(T record) throws IOException { throw new FileNotFoundException(); } - @Override - public MultiFetcher getMultiFetcher() { - return new SequentialMultiFetcher(); - } - protected void beforePut(File file) { file.getParentFile().mkdirs(); } diff --git a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java index ef435cf..86e77ec 100644 --- a/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java +++ b/src/main/java/com/takipi/oss/storage/fs/s3/S3Filesystem.java @@ -17,29 +17,18 @@ import com.takipi.oss.storage.fs.api.SearchRequest; import com.takipi.oss.storage.fs.api.SearchResult; import com.takipi.oss.storage.helper.FilesystemUtil; -import com.takipi.oss.storage.resources.fs.multifetcher.ConcurrentMultiFetcher; -import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; -import com.takipi.oss.storage.resources.fs.multifetcher.SequentialMultiFetcher; public class S3Filesystem implements Filesystem { - private final MultiFetcher multiFetcher; private final AmazonS3 amazonS3; private final String bucket; private final String pathPrefix; - public S3Filesystem(AmazonS3 amazonS3, - String bucket, - String pathPrefix, - int multiFetcherConcurrencyLevel) { + public S3Filesystem(AmazonS3 amazonS3, String bucket, String pathPrefix) { this.amazonS3 = amazonS3; this.bucket = bucket; this.pathPrefix = pathPrefix; - - this.multiFetcher = (multiFetcherConcurrencyLevel > 1) ? - new ConcurrentMultiFetcher(multiFetcherConcurrencyLevel) : - new SequentialMultiFetcher(); } @Override @@ -110,11 +99,6 @@ public BaseRecord pathToRecord(String path) { return SimplePathRecord.newRecord(path); } - @Override - public MultiFetcher getMultiFetcher() { - return multiFetcher; - } - private String keyOf(T record) { if (this.pathPrefix != null) { return this.pathPrefix + File.separator + record.getPath(); diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index 2190c62..2ff2eac 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -5,6 +5,7 @@ import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.s3.S3Filesystem; import com.takipi.oss.storage.resources.fs.multifetcher.*; import javax.ws.rs.Consumes; @@ -22,10 +23,16 @@ public class JsonMultiFetchStorageResource { private final Filesystem filesystem; private final MultiFetcher multiFetcher; - public JsonMultiFetchStorageResource(Filesystem filesystem) { + public JsonMultiFetchStorageResource(Filesystem filesystem, int multiFetcherConcurrencyLevel) { this.filesystem = filesystem; - this.multiFetcher = filesystem.getMultiFetcher(); + + if ((filesystem instanceof S3Filesystem) && (multiFetcherConcurrencyLevel > 1)) { + this.multiFetcher = new ConcurrentMultiFetcher(multiFetcherConcurrencyLevel); + } + else { + this.multiFetcher = new SequentialMultiFetcher(); + } } @POST diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java index 02603f8..894684f 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java @@ -6,7 +6,6 @@ import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.fs.concurrent.Task; import com.takipi.oss.storage.fs.concurrent.TaskExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -14,11 +13,11 @@ import java.util.ArrayList; import java.util.List; -abstract class BaseMultiFetcher implements MultiFetcher -{ - private static final Logger logger = LoggerFactory.getLogger(BaseMultiFetcher.class); +abstract class BaseMultiFetcher implements MultiFetcher { + + private static final Logger logger = LoggerFactory.getLogger(BaseMultiFetcher.class); - private final TaskExecutor taskExecutor; + private final TaskExecutor taskExecutor; BaseMultiFetcher(TaskExecutor taskExecutor) { this.taskExecutor = taskExecutor; @@ -29,7 +28,7 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem final int count = request.records.size(); final EncodingType encodingType = request.encodingType; - final List tasks = new ArrayList<>(count); + final List tasks = new ArrayList<>(count); List recordsWithData = new ArrayList<>(count); diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java index 70fe533..092cef8 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java @@ -5,12 +5,11 @@ import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; import com.takipi.oss.storage.fs.concurrent.SimpleStopWatch; -import com.takipi.oss.storage.fs.concurrent.Task; import com.takipi.oss.storage.helper.FilesystemUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class S3ObjectFetcherTask implements Task { +public class S3ObjectFetcherTask implements Runnable { private static final Logger logger = LoggerFactory.getLogger(S3ObjectFetcherTask.class); @@ -25,14 +24,9 @@ public class S3ObjectFetcherTask implements Task { } @Override - public Runnable getRunnable() { - return new Runnable() { - @Override - public void run() { - String result = load(filesystem, recordWithData.getRecord(), encodingType); - recordWithData.setData(result); - } - }; + public void run() { + String result = load(filesystem, recordWithData.getRecord(), encodingType); + recordWithData.setData(result); } private static String load(Filesystem filesystem, Record record, EncodingType encodingType) { From 17814a405f285d8af778ad8db477827f6555e48d Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Wed, 18 Oct 2017 12:28:26 +0300 Subject: [PATCH 44/66] always execute first task in the calling thread --- .../fs/concurrent/ConcurrentTaskExecutor.java | 77 ++++++++++--------- 1 file changed, 39 insertions(+), 38 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java index 059c568..d8ffa36 100644 --- a/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java @@ -41,42 +41,43 @@ public Thread newThread(Runnable r) executorService = Executors.newFixedThreadPool(maxThreads, threadFactory); } - - @Override - public void execute(List tasks) { - - final int count = tasks.size(); - - logger.debug("---------- Starting concurrent task execute for " + count + " tasks"); - - SimpleStopWatch stopWatch = new SimpleStopWatch(); - - if (count == 1) { - try { - tasks.get(0).run(); - } - catch (Exception e) { - logger.error(e.getMessage()); - } - } - else { - - final List> futures = new ArrayList<>(count); - - for (Runnable task : tasks) { - futures.add(executorService.submit(task)); - } - - for (Future future : futures) { - try { - future.get(1, TimeUnit.MINUTES); - } - catch (Exception e) { - logger.error(e.getMessage()); - } - } - } - - logger.debug("---------- Concurrent task executor executed " + count + "tasks in " + stopWatch.elapsed() + " ms"); - } + + @Override + public void execute(List tasks) { + + final int count = tasks.size(); + + if (count == 0) { + return; + } + + logger.debug("---------- Starting concurrent task execute for " + count + " tasks"); + + SimpleStopWatch stopWatch = new SimpleStopWatch(); + + final List> futures = new ArrayList<>(count - 1); + + for (int i = 1; i < count; ++i) { + Runnable task = tasks.get(i); + futures.add(executorService.submit(task)); + } + + try { + tasks.get(0).run(); + } + catch (Exception e) { + logger.error(e.getMessage()); + } + + for (Future future : futures) { + try { + future.get(); + } + catch (Exception e) { + logger.error(e.getMessage()); + } + } + + logger.debug("---------- Concurrent task executor executed " + count + "tasks in " + stopWatch.elapsed() + " ms"); + } } From 1077a9e50213239d086f201341f02ae1e1a30e6c Mon Sep 17 00:00:00 2001 From: David Levanon Date: Wed, 18 Oct 2017 19:20:19 +0300 Subject: [PATCH 45/66] mini refactor --- .../fs/concurrent/ConcurrentTaskExecutor.java | 13 +- .../fs/concurrent/SequentialTaskExecutor.java | 8 +- .../fs/JsonMultiFetchStorageResource.java | 16 ++- .../fs/multifetcher/BaseMultiFetcher.java | 47 -------- .../multifetcher/ConcurrentMultiFetcher.java | 10 -- .../fs/multifetcher/MultiFetcher.java | 40 ++++++- .../fs/multifetcher/S3ObjectFetcherTask.java | 112 +++++++++--------- .../multifetcher/SequentialMultiFetcher.java | 10 -- 8 files changed, 117 insertions(+), 139 deletions(-) delete mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java delete mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java delete mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java index d8ffa36..ddf78fe 100644 --- a/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java @@ -26,7 +26,7 @@ else if (maxThreads < 2) { maxThreads = 2; } - logger.info("ConcurrentTaskExecutor maximum number of threads = " + maxThreads); + logger.info("ConcurrentTaskExecutor maximum number of threads = {}", maxThreads); ThreadFactory threadFactory = new ThreadFactory() { @Override @@ -51,7 +51,7 @@ public void execute(List tasks) { return; } - logger.debug("---------- Starting concurrent task execute for " + count + " tasks"); + logger.debug("Starting concurrent task execute for {} tasks", count); SimpleStopWatch stopWatch = new SimpleStopWatch(); @@ -63,10 +63,11 @@ public void execute(List tasks) { } try { - tasks.get(0).run(); + Runnable firstTask = tasks.get(0); + firstTask.run(); } catch (Exception e) { - logger.error(e.getMessage()); + logger.error("Error running task", e); } for (Future future : futures) { @@ -74,10 +75,10 @@ public void execute(List tasks) { future.get(); } catch (Exception e) { - logger.error(e.getMessage()); + logger.error("Error running task", e); } } - logger.debug("---------- Concurrent task executor executed " + count + "tasks in " + stopWatch.elapsed() + " ms"); + logger.debug("Concurrent task executor executed {} tasks in {} ms", count, stopWatch.elapsed()); } } diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/SequentialTaskExecutor.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/SequentialTaskExecutor.java index 289c3fd..ecd1eda 100644 --- a/src/main/java/com/takipi/oss/storage/fs/concurrent/SequentialTaskExecutor.java +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/SequentialTaskExecutor.java @@ -7,7 +7,7 @@ public class SequentialTaskExecutor implements TaskExecutor { - private static final Logger logger = LoggerFactory.getLogger(ConcurrentTaskExecutor.class); + private static final Logger logger = LoggerFactory.getLogger(SequentialTaskExecutor.class); @Override public void execute(List tasks) { @@ -16,17 +16,17 @@ public void execute(List tasks) { SimpleStopWatch stopWatch = new SimpleStopWatch(); - logger.debug("---------- Starting sequential execute for " + count + " tasks"); + logger.debug("Starting sequential execute for {} tasks", count); for (Runnable task : tasks) { try { task.run(); } catch (Exception e) { - logger.error(e.getMessage()); + logger.error("Error running task", e); } } - logger.debug("---------- Sequential task executor executed " + count + "tasks in " + stopWatch.elapsed() + " ms"); + logger.debug("Sequential task executor executed {} tasks in {} ms", count, stopWatch.elapsed()); } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index 2ff2eac..3cc2fa5 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -6,7 +6,10 @@ import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; import com.takipi.oss.storage.fs.s3.S3Filesystem; -import com.takipi.oss.storage.resources.fs.multifetcher.*; +import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; +import com.takipi.oss.storage.fs.concurrent.TaskExecutor; +import com.takipi.oss.storage.fs.concurrent.ConcurrentTaskExecutor; +import com.takipi.oss.storage.fs.concurrent.SequentialTaskExecutor; import javax.ws.rs.Consumes; import javax.ws.rs.POST; @@ -27,12 +30,17 @@ public JsonMultiFetchStorageResource(Filesystem filesystem, int multiFet this.filesystem = filesystem; - if ((filesystem instanceof S3Filesystem) && (multiFetcherConcurrencyLevel > 1)) { - this.multiFetcher = new ConcurrentMultiFetcher(multiFetcherConcurrencyLevel); + TaskExecutor taskExecutor; + + if ((filesystem instanceof S3Filesystem) && + (multiFetcherConcurrencyLevel > 1)) { + taskExecutor = new ConcurrentTaskExecutor(multiFetcherConcurrencyLevel); } else { - this.multiFetcher = new SequentialMultiFetcher(); + taskExecutor = new SequentialTaskExecutor(); } + + this.multiFetcher = new MultiFetcher(taskExecutor); } @POST diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java deleted file mode 100644 index 894684f..0000000 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/BaseMultiFetcher.java +++ /dev/null @@ -1,47 +0,0 @@ -package com.takipi.oss.storage.resources.fs.multifetcher; - -import com.takipi.oss.storage.data.EncodingType; -import com.takipi.oss.storage.data.RecordWithData; -import com.takipi.oss.storage.data.fetch.MultiFetchRequest; -import com.takipi.oss.storage.data.fetch.MultiFetchResponse; -import com.takipi.oss.storage.fs.Record; -import com.takipi.oss.storage.fs.api.Filesystem; -import com.takipi.oss.storage.fs.concurrent.TaskExecutor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; - -abstract class BaseMultiFetcher implements MultiFetcher { - - private static final Logger logger = LoggerFactory.getLogger(BaseMultiFetcher.class); - - private final TaskExecutor taskExecutor; - - BaseMultiFetcher(TaskExecutor taskExecutor) { - this.taskExecutor = taskExecutor; - } - - @Override - public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { - - final int count = request.records.size(); - final EncodingType encodingType = request.encodingType; - final List tasks = new ArrayList<>(count); - - List recordsWithData = new ArrayList<>(count); - - for (Record record : request.records) { - RecordWithData recordWithData = RecordWithData.of(record, null); - recordsWithData.add(recordWithData); - tasks.add(new S3ObjectFetcherTask(recordWithData, filesystem, encodingType)); - } - - taskExecutor.execute(tasks); - - logger.debug("Multi fetched completed fetching of " + count + " objects"); - - return new MultiFetchResponse(recordsWithData); - } -} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java deleted file mode 100644 index c98c2d0..0000000 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/ConcurrentMultiFetcher.java +++ /dev/null @@ -1,10 +0,0 @@ -package com.takipi.oss.storage.resources.fs.multifetcher; - -import com.takipi.oss.storage.fs.concurrent.ConcurrentTaskExecutor; - -public class ConcurrentMultiFetcher extends BaseMultiFetcher { - - public ConcurrentMultiFetcher(int maxThreads) { - super(new ConcurrentTaskExecutor(maxThreads)); - } -} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index f8cc496..6aa03d1 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -1,10 +1,46 @@ package com.takipi.oss.storage.resources.fs.multifetcher; +import com.takipi.oss.storage.data.EncodingType; +import com.takipi.oss.storage.data.RecordWithData; import com.takipi.oss.storage.data.fetch.MultiFetchRequest; import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.concurrent.TaskExecutor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -public interface MultiFetcher { - MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem); +import java.util.ArrayList; +import java.util.List; + +public class MultiFetcher { + + private static final Logger logger = LoggerFactory.getLogger(MultiFetcher.class); + + private final TaskExecutor taskExecutor; + + public MultiFetcher(TaskExecutor taskExecutor) { + this.taskExecutor = taskExecutor; + } + + public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { + + final int count = request.records.size(); + final EncodingType encodingType = request.encodingType; + final List tasks = new ArrayList<>(count); + + List recordsWithData = new ArrayList<>(count); + + for (Record record : request.records) { + RecordWithData recordWithData = RecordWithData.of(record, null); + recordsWithData.add(recordWithData); + tasks.add(new S3ObjectFetcherTask(recordWithData, filesystem, encodingType)); + } + + taskExecutor.execute(tasks); + + logger.debug("Multi fetched completed fetching of {} objects", count); + + return new MultiFetchResponse(recordsWithData); + } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java index 092cef8..b87d004 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java @@ -10,60 +10,60 @@ import org.slf4j.LoggerFactory; public class S3ObjectFetcherTask implements Runnable { - - private static final Logger logger = LoggerFactory.getLogger(S3ObjectFetcherTask.class); - - private final RecordWithData recordWithData; - private final Filesystem filesystem; - private final EncodingType encodingType; - - S3ObjectFetcherTask(RecordWithData recordWithData, Filesystem filesystem, EncodingType encodingType) { - this.recordWithData = recordWithData; - this.filesystem = filesystem; - this.encodingType = encodingType; - } - - @Override - public void run() { - String result = load(filesystem, recordWithData.getRecord(), encodingType); - recordWithData.setData(result); - } - - private static String load(Filesystem filesystem, Record record, EncodingType encodingType) { - - SimpleStopWatch stopWatch = new SimpleStopWatch(); - String value = null; - final int MAX_TRIES = 2; - int count = 0; - - while ((value == null) && (count < MAX_TRIES)) { - - if (count++ > 0) { - logger.warn("Retry loading object for key " + record.getKey()); - stopWatch.reset(); - } - - try { - value = FilesystemUtil.read(filesystem, record, encodingType); - } - catch (Exception e) { - // Need this catch because some exceptions inside FilesystemUtil.read are caught and result in a - // null return value, and some are thrown. The code would be simpler if all exceptions were thrown. - } - } - - if (value != null) { - - logger.debug("--------------------- " + Thread.currentThread().getName() + " loaded key " + - record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); - - return value; - } - else { - - logger.error("Failed to load object for key: " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); - - throw new RuntimeException("Failed to load object for key: " + record.getKey()); - } - } + + private static final Logger logger = LoggerFactory.getLogger(S3ObjectFetcherTask.class); + + private final RecordWithData recordWithData; + private final Filesystem filesystem; + private final EncodingType encodingType; + + S3ObjectFetcherTask(RecordWithData recordWithData, Filesystem filesystem, EncodingType encodingType) { + this.recordWithData = recordWithData; + this.filesystem = filesystem; + this.encodingType = encodingType; + } + + @Override + public void run() { + String result = load(filesystem, recordWithData.getRecord(), encodingType); + recordWithData.setData(result); + } + + private static String load(Filesystem filesystem, Record record, EncodingType encodingType) { + + SimpleStopWatch stopWatch = new SimpleStopWatch(); + String value = null; + final int MAX_TRIES = 2; + int count = 0; + + while ((value == null) && (count < MAX_TRIES)) { + + if (count++ > 0) { + logger.warn("Retry loading object for key " + record.getKey()); + stopWatch.reset(); + } + + try { + value = FilesystemUtil.read(filesystem, record, encodingType); + } + catch (Exception e) { + // Need this catch because some exceptions inside FilesystemUtil.read are caught and result in a + // null return value, and some are thrown. The code would be simpler if all exceptions were thrown. + } + } + + if (value != null) { + + logger.debug("--------------------- " + Thread.currentThread().getName() + " loaded key " + + record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); + + return value; + } + else { + + logger.error("Failed to load object for key: " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); + + throw new RuntimeException("Failed to load object for key: " + record.getKey()); + } + } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java deleted file mode 100644 index 7b5c12c..0000000 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/SequentialMultiFetcher.java +++ /dev/null @@ -1,10 +0,0 @@ -package com.takipi.oss.storage.resources.fs.multifetcher; - -import com.takipi.oss.storage.fs.concurrent.SequentialTaskExecutor; - -public class SequentialMultiFetcher extends BaseMultiFetcher { - - public SequentialMultiFetcher() { - super(new SequentialTaskExecutor()); - } -} From 0dff4386b03fcaac5a173d7eaa4af609f7257b6b Mon Sep 17 00:00:00 2001 From: David Levanon Date: Thu, 19 Oct 2017 10:48:59 +0300 Subject: [PATCH 46/66] logging refactor --- .../resources/fs/multifetcher/S3ObjectFetcherTask.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java index b87d004..03a61ed 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java @@ -39,7 +39,7 @@ private static String load(Filesystem filesystem, Record record, Encodin while ((value == null) && (count < MAX_TRIES)) { if (count++ > 0) { - logger.warn("Retry loading object for key " + record.getKey()); + logger.warn("Retry loading object for key {}", record.getKey()); stopWatch.reset(); } @@ -53,15 +53,15 @@ private static String load(Filesystem filesystem, Record record, Encodin } if (value != null) { - - logger.debug("--------------------- " + Thread.currentThread().getName() + " loaded key " + - record.getKey() + " in " + stopWatch.elapsed() + " ms. " + value.length() + " bytes"); + logger.debug("{} loaded key {} int {}ms {} bytes", + Thread.currentThread().getName(), record.getKey(), stopWatch.elapsed(), value.length()); return value; } else { - logger.error("Failed to load object for key: " + record.getKey() + ". Elapsed time = " + stopWatch.elapsed() + " ms"); + logger.error("Failed to load object for key: {}. Elapsed time = {} ms", + record.getKey(), stopWatch.elapsed()); throw new RuntimeException("Failed to load object for key: " + record.getKey()); } From b735c201fed7067384f6ecf517d8d0277ca8dc92 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Sun, 22 Oct 2017 20:41:39 +0300 Subject: [PATCH 47/66] added caching --- settings.yml | 9 +- .../storage/TakipiStorageConfiguration.java | 11 - .../takipi/oss/storage/TakipiStorageMain.java | 2 + .../com/takipi/oss/storage/caching/Cache.java | 79 ++++++ .../oss/storage/caching/CacheDelegator.java | 50 ++++ .../oss/storage/caching/CacheLogger.java | 87 ++++++ .../oss/storage/caching/DiskBackedCache.java | 262 ++++++++++++++++++ .../takipi/oss/storage/caching/HashLock.java | 31 +++ .../oss/storage/caching/InMemoryCache.java | 145 ++++++++++ .../caching/SerializableCacheValue.java | 110 ++++++++ .../oss/storage/caching/StringUtil.java | 11 + .../fs/JsonMultiFetchStorageResource.java | 15 +- .../fs/multifetcher/MultiFetcher.java | 55 +++- .../resources/fs/multifetcher/S3Cache.java | 8 + .../fs/multifetcher/S3CacheImpl.java | 36 +++ 15 files changed, 880 insertions(+), 31 deletions(-) create mode 100644 src/main/java/com/takipi/oss/storage/caching/Cache.java create mode 100644 src/main/java/com/takipi/oss/storage/caching/CacheDelegator.java create mode 100644 src/main/java/com/takipi/oss/storage/caching/CacheLogger.java create mode 100644 src/main/java/com/takipi/oss/storage/caching/DiskBackedCache.java create mode 100644 src/main/java/com/takipi/oss/storage/caching/HashLock.java create mode 100644 src/main/java/com/takipi/oss/storage/caching/InMemoryCache.java create mode 100644 src/main/java/com/takipi/oss/storage/caching/SerializableCacheValue.java create mode 100644 src/main/java/com/takipi/oss/storage/caching/StringUtil.java create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3Cache.java create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java diff --git a/settings.yml b/settings.yml index a7a3615..01540eb 100644 --- a/settings.yml +++ b/settings.yml @@ -10,9 +10,12 @@ s3Fs: secretKey: multifetch: - concurrencyLevel: 30 # maximum number of threads for concurrent multi-fetch. Set to zero to disable concurrent fetching. - maxCacheSize: 0 # Recommended size >= 4194304. Set to zero to disable caching. - cacheLogLevel: debug # info / debug + + # maximum number of threads for concurrent multi-fetch. Set to zero to disable concurrent fetching. + concurrencyLevel: 30 + + # Recommended size >= 4194304. Set to zero to disable caching. + maxCacheSize: 67108864 #folderFs: # folderPath: /opt/takipi-storage/storage diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java index 2bf986a..4f3f1ad 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java @@ -127,7 +127,6 @@ public static class Multifetch { private Integer concurrencyLevel; private Integer maxCacheSize; - private String cacheLogLevel; @JsonProperty public Integer getConcurrencyLevel() { @@ -148,16 +147,6 @@ public Integer getMaxCacheSize() { public void setMaxCacheSize(Integer maxCacheSize) { this.maxCacheSize = maxCacheSize; } - - @JsonProperty - public String getCacheLogLevel() { - return cacheLogLevel; - } - - @JsonProperty - public void setCacheLogLevel(String cacheLogLevel) { - this.cacheLogLevel = cacheLogLevel; - } } private boolean enableCors; diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index c9eb9f9..d67fd68 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -5,6 +5,7 @@ import javax.servlet.DispatcherType; import javax.servlet.FilterRegistration; +import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; import org.eclipse.jetty.servlets.CrossOriginFilter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,6 +62,7 @@ public void run(TakipiStorageConfiguration configuration, Environment environmen Filesystem filesystem = configureFilesystem(configuration, environment); TakipiStorageConfiguration.Multifetch multifetchConfig = configuration.getMultifetch(); + MultiFetcher.setMaxCacheSize(multifetchConfig.getMaxCacheSize()); environment.healthChecks().register("filesystem", new FilesystemHealthCheck(filesystem)); environment.jersey().register(new BinaryStorageResource(filesystem)); diff --git a/src/main/java/com/takipi/oss/storage/caching/Cache.java b/src/main/java/com/takipi/oss/storage/caching/Cache.java new file mode 100644 index 0000000..c428377 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/caching/Cache.java @@ -0,0 +1,79 @@ +package com.takipi.oss.storage.caching; + +public class Cache { + private final CacheDelegator cacheDelegator; + + public Cache(CacheDelegator cacheDelegator) { + this.cacheDelegator = cacheDelegator; + } + + public void put(String key, V value, Serializer serializer) { + if (!canCache()) { + return; + } + + cacheDelegator.put(key, new SerializableCacheValue(value, serializer)); + } + + public void put(String key, byte[] value) { + if (!canCache()) { + return; + } + + cacheDelegator.put(key, new SerializableCacheValue(value, ByteArraySerializer.instance)); + } + + public void put(String key, V value, byte[] serializedValue) { + if (!canCache()) { + return; + } + + cacheDelegator.put(key, new SerializableCacheValue(value, serializedValue)); + } + + public byte[] get(String key) { + if (!canCache()) { + return null; + } + + SerializableCacheValue result = + cacheDelegator.get(key, new SerializableCacheValue(byte[].class, ByteArraySerializer.instance)); + + return result.deserialize(key); + } + + public V get(String key, Class valueClass, Serializer serializer) { + if (!canCache()) { + return null; + } + + SerializableCacheValue result = + cacheDelegator.get(key, new SerializableCacheValue(valueClass, serializer)); + + return result.deserialize(key); + } + + protected boolean canCache() { + return true; + } + + public static interface Serializer { + public byte[] serialize(V value) throws Exception; + + public V deserialize(byte[] bytes) throws Exception; + } + + public static class ByteArraySerializer implements Serializer { + public static ByteArraySerializer instance = new ByteArraySerializer(); + + @Override + public byte[] serialize(byte[] value) throws Exception { + return value; + } + + @Override + public byte[] deserialize(byte[] bytes) throws Exception { + return bytes; + } + } +} diff --git a/src/main/java/com/takipi/oss/storage/caching/CacheDelegator.java b/src/main/java/com/takipi/oss/storage/caching/CacheDelegator.java new file mode 100644 index 0000000..b22d720 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/caching/CacheDelegator.java @@ -0,0 +1,50 @@ +package com.takipi.oss.storage.caching; + +public abstract class CacheDelegator { + private final CacheDelegator parent; + + public CacheDelegator() { + this(null); + } + + public CacheDelegator(CacheDelegator parent) { + this.parent = parent; + } + + public SerializableCacheValue parentGet(String key, SerializableCacheValue result) { + if (this.parent != null) { + return this.parent.get(key, result); + } + + return result; + } + + public SerializableCacheValue parentPut(String key, SerializableCacheValue value) { + return parentPut(key, value, false); + } + + public SerializableCacheValue parentPut(String key, SerializableCacheValue value, boolean overwrite) { + if (this.parent != null) { + return this.parent.put(key, value, overwrite); + } + + return value; + } + + @Override + public String toString() { + if (this.parent == null) { + return "NUL"; + } + + return this.parent.toString(); + } + + public SerializableCacheValue put(String key, SerializableCacheValue value) { + return put(key, value, false); + } + + public abstract SerializableCacheValue put(String key, SerializableCacheValue value, boolean overwrite); + + public abstract SerializableCacheValue get(String key, SerializableCacheValue result); +} diff --git a/src/main/java/com/takipi/oss/storage/caching/CacheLogger.java b/src/main/java/com/takipi/oss/storage/caching/CacheLogger.java new file mode 100644 index 0000000..571ed72 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/caching/CacheLogger.java @@ -0,0 +1,87 @@ +package com.takipi.oss.storage.caching; + +import java.util.concurrent.atomic.AtomicLong; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class CacheLogger extends CacheDelegator { + private static final Logger logger = LoggerFactory.getLogger("backendcache"); + + private static final AtomicLong counter = new AtomicLong(); + + private final long id; + + public CacheLogger(CacheDelegator parent) { + super(parent); + + this.id = counter.incrementAndGet(); + + logger.info("Cache created (id: {}) {}", this.id, parent); + } + + @Override + public SerializableCacheValue get(String key, SerializableCacheValue result) { + long start = System.currentTimeMillis(); + + try { + return parentGet(key, result); + } + finally { + CacheDelegator delegator = result.getRetriever(); + + if (delegator != null) { + logger.info("{} {}ms {} (retriever: {}) {}", + paddedId(), + paddedDiff(start), + paddedVerb("get"), + paddedDelegator(delegator), + key); + } + } + } + + @Override + public SerializableCacheValue put(String key, SerializableCacheValue value) { + return parentPut(key, value, false); + } + + @Override + public SerializableCacheValue put(String key, SerializableCacheValue value, boolean overwrite) { + long start = System.currentTimeMillis(); + + try { + return parentPut(key, value, overwrite); + } + finally { + CacheDelegator delegator = value.getUpdater(); + + if (delegator != null) { + logger.info("{} {}ms {} (updater: {}) {}", + paddedId(), + paddedDiff(start), + paddedVerb("put"), + paddedDelegator(delegator), + key); + } + } + } + + private String paddedId() { + return StringUtil.padRight(Long.toString(id), 8); + } + + private String paddedVerb(String verb) { + return StringUtil.padRight(verb, 10); + } + + private String paddedDiff(long start) { + long diff = System.currentTimeMillis() - start; + + return StringUtil.padLeft(Long.toString(diff), 8); + } + + private String paddedDelegator(CacheDelegator delegator) { + return StringUtil.padLeft(delegator.getClass().getSimpleName(), 15); + } +} diff --git a/src/main/java/com/takipi/oss/storage/caching/DiskBackedCache.java b/src/main/java/com/takipi/oss/storage/caching/DiskBackedCache.java new file mode 100644 index 0000000..58b0956 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/caching/DiskBackedCache.java @@ -0,0 +1,262 @@ +package com.takipi.oss.storage.caching; + +import java.io.File; +import java.util.Arrays; +import java.util.Comparator; + +import com.takipi.oss.storage.fs.concurrent.SimpleStopWatch; +import org.apache.commons.io.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DiskBackedCache extends CacheDelegator { + private static final Logger logger = LoggerFactory.getLogger(DiskBackedCache.class); + + private static final HashLock fileAccessLock = new HashLock(); + private static final int MAX_LOGGED_ERRORS = 10; + private static volatile boolean cleanupInProgress = false; + + private final File rootFile; + + private final long totalDiskSpace; + private final long maxDiskSpace; + + private final double cleanupPercentage; + + public DiskBackedCache(File rootFile, double maxDiskUsage, double cleanupPercentage) { + super(null); + + this.rootFile = rootFile; + this.rootFile.mkdirs(); + + this.totalDiskSpace = rootFile.getTotalSpace(); + this.maxDiskSpace = (long) Math.floor(maxDiskUsage * this.totalDiskSpace); + this.cleanupPercentage = cleanupPercentage; + } + + @Override + public String toString() { + return "Disk Cache (path: " + rootFile.getAbsolutePath() + + ") (max: " + maxDiskSpace + + ") (clean: " + cleanupPercentage + ") -> " + super.toString(); + } + + @Override + public SerializableCacheValue get(String key, SerializableCacheValue result) { + try { + return internalGet(key, result); + } + catch (Exception e) { + logger.error("Error getting from disk cache {}", key, e); + return result; + } + } + + public SerializableCacheValue internalGet(String key, SerializableCacheValue result) throws Exception { + File file = keyToFile(key); + + byte[] data = null; + + synchronized (fileAccessLock.get(file)) { + if (!file.canRead()) { + return result; + } + + data = FileUtils.readFileToByteArray(file); + file.setLastModified(System.currentTimeMillis()); + } + + if (data != null) { + result.setSerializedValue(this, data); + } + + return result; + } + + @Override + public SerializableCacheValue put(String key, SerializableCacheValue value, boolean overwrite) { + try { + return internalPut(key, value, overwrite); + } + catch (Exception e) { + logger.error("Error putting to disk cache {}", key, e); + return value; + } + } + + public SerializableCacheValue internalPut(String key, SerializableCacheValue value, boolean overwrite) + throws Exception { + File file = keyToFile(key); + + if (!checkDiskUsage(true)) { + cleanup(); + + if (!checkDiskUsage(true)) { + return value; + } + } + + byte[] data = value.serialize(key); + + if (data == null) { + return value; + } + + synchronized (fileAccessLock.get(file)) { + if ((!overwrite) && + (file.canRead())) { + file.setLastModified(System.currentTimeMillis()); + return value; + } + + checkDiskUsage(false); + + FileUtils.writeByteArrayToFile(file, data); + + if ((!file.setReadable(true, false)) || + (!file.setWritable(true, false))) { + throw new IllegalStateException("Unable to set read/write permissions for local file: " + file.getAbsolutePath()); + } + } + + value.setUpdater(this); + + return value; + } + + public T use(String key, CacheFileCallback callback) { + File file = keyToFile(key); + + if (!file.canRead()) { + return null; + } + + synchronized (fileAccessLock.get(file)) { + if (!file.canRead()) { + return null; + } + + return callback.run(file); + } + } + + private void cleanup() { + if (cleanupPercentage == 0.0) { + logger.info("Disk cleanup is disabled."); + return; + } + + if (cleanupInProgress) { + logger.info("Disk cleanup is already in progress; skipping."); + return; + } + + cleanupInProgress = true; + + logger.info("Starting disk cleanup for {} ({}%).", this, (int) (cleanupPercentage * 100)); + + SimpleStopWatch stopwatch = new SimpleStopWatch(); + + try { + String[] files = rootFile.list(); + + if (files == null) { + return; + } + + logger.info("Sorting {} files by last-modified date.", files.length); + + Arrays.sort(files, new Comparator() { + @Override + public int compare(String filename1, String filename2) { + long date1 = new File(filename1).lastModified(); + long date2 = new File(filename2).lastModified(); + + return Long.valueOf(date1).compareTo(Long.valueOf(date2)); + } + }); + + int deleteCounter = (int) Math.floor(files.length * cleanupPercentage); + + logger.info("About to attempt to delete {} old files.", deleteCounter); + + int errorCounter = 0; + int successCounter = 0; + long sizeCounter = 0l; + + for (int i = 0; i < deleteCounter; i++) { + try { + File file = new File(rootFile, files[i]); + + synchronized (fileAccessLock.get(file)) { + if ((file.canWrite()) && + (!file.isDirectory())) { + long fileSize = file.length(); + + FileUtils.forceDelete(file); + + successCounter++; + sizeCounter += fileSize; + } + } + } + catch (Exception ex) { + errorCounter++; + + if (errorCounter <= MAX_LOGGED_ERRORS) { + logger.error("Deleting {} from disk failed: {}: {}", + files[i], ex.getClass().getSimpleName(), ex.getMessage()); + } + } + } + + if (successCounter == 0) { + logger.warn("No files were deleted from: {}.", rootFile); + } + else { + logger.info("A total of {} files ({} bytes) were deleted from: {}.", + successCounter, sizeCounter, rootFile); + } + + if (errorCounter > 0) { + logger.error("A total of {} files could not be deleted.", errorCounter); + } + } + catch (Exception ex) { + logger.error("Disk cleanup failed.", ex); + } + finally { + logger.info("Disk cleanup completed in {} ms.", stopwatch.elapsed()); + + cleanupInProgress = false; + } + } + + private boolean checkDiskUsage(boolean silent) { + long curFreeSpace = rootFile.getUsableSpace(); + long minFreeSpace = totalDiskSpace - maxDiskSpace; + + if (curFreeSpace <= minFreeSpace) { + if (silent) { + return false; + } + else { + throw new IllegalStateException("Max disk usage limit reached: " + + Double.toString((double) curFreeSpace / totalDiskSpace) + + "% left (limit: " + + Double.toString((double) minFreeSpace / totalDiskSpace) + ")"); + } + } + + return true; + } + + private File keyToFile(String key) { + String validFileName = key.replace(File.separator, ""); + return new File(rootFile, validFileName); + } + + public static interface CacheFileCallback { + T run(File file); + } +} diff --git a/src/main/java/com/takipi/oss/storage/caching/HashLock.java b/src/main/java/com/takipi/oss/storage/caching/HashLock.java new file mode 100644 index 0000000..22daaf8 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/caching/HashLock.java @@ -0,0 +1,31 @@ +package com.takipi.oss.storage.caching; + +public class HashLock { + private static final int DEFAULT_SIZE = 32; + + private final Object[] locks; + + public HashLock() { + this(DEFAULT_SIZE); + } + + public HashLock(int size) { + locks = new Object[size]; + + for (int i = 0; i < size; i++) { + locks[i] = new Object(); + } + } + + public Object get(Object key) { + int hashCode = key.hashCode(); + + if (hashCode < 0) { + hashCode = (hashCode + 1) + Integer.MAX_VALUE; + } + + int bucket = hashCode % locks.length; + + return locks[bucket]; + } +} diff --git a/src/main/java/com/takipi/oss/storage/caching/InMemoryCache.java b/src/main/java/com/takipi/oss/storage/caching/InMemoryCache.java new file mode 100644 index 0000000..12d4497 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/caching/InMemoryCache.java @@ -0,0 +1,145 @@ +package com.takipi.oss.storage.caching; + +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; + +import com.google.common.cache.Weigher; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; + +public class InMemoryCache extends CacheDelegator { + private static final Logger logger = LoggerFactory.getLogger(InMemoryCache.class); + + private final Cache cache; + private final String description; + + public static class Builder { + private CacheBuilder cacheBuilder = CacheBuilder.newBuilder(); + private CacheDelegator parent = null; + private String description = "Memory Cache"; + + public Builder setParentDelegator(CacheDelegator parent) { + this.parent = parent; + return this; + } + + public Builder setMaxElementCount(int maxElementCount) { + cacheBuilder.maximumSize(maxElementCount); + description += " (max elements = " + maxElementCount + ")"; + return this; + } + + public Builder setMaxSize(long maxSize) { + Weigher weigher = new Weigher() { + @Override + public int weigh(String s, Object o) { + byte[] bytes = (byte[]) o; + return s.length() + bytes.length; + } + }; + cacheBuilder.weigher(weigher); + cacheBuilder.maximumWeight(maxSize); + description += " (max size = " + maxSize + ")"; + return this; + } + + public Builder setExpiry(int expiry, TimeUnit timeUnit) { + cacheBuilder.expireAfterAccess(expiry, timeUnit); + description += " (expiry = " + expiry + " " + timeUnit + ")"; + return this; + } + + public InMemoryCache build() { + Cache cache = cacheBuilder.build(); + return new InMemoryCache(parent, cache, description); + } + } + + private InMemoryCache(CacheDelegator parent, Cache cache, String description) { + super(parent); + + this.cache = cache; + this.description = description; + } + + @Override + public String toString() { + return description + " -> " + super.toString(); + } + + @Override + public SerializableCacheValue get(String key, SerializableCacheValue result) { + try { + return internalGet(key, result); + } + catch (Exception e) { + logger.error("Error getting from in memory cache {}", key, e); + return result; + } + } + + public SerializableCacheValue internalGet(final String key, final SerializableCacheValue result) { + Object value = null; + + try { + value = cache.get(key, new Callable() { + @Override + public Object call() throws Exception { + SerializableCacheValue parentResult = InMemoryCache.this.parentGet(key, result); + + Object resultObject = null; + + if (parentResult != null) { + resultObject = parentResult.deserialize(key); + } + + if (resultObject == null) { + throw new IllegalStateException("Object with key: " + key + "not found in cache"); + } + + return resultObject; + } + }); + } + catch (Exception e) { + } + + if (value != null) { + result.setValue(this, value); + } + + return result; + } + + @Override + public SerializableCacheValue put(String key, SerializableCacheValue value, boolean overwrite) { + try { + // The google cache we are using automatically overwrites values if a key exists + // so we don't need to pass the overwrite param + return internalPut(key, value); + } + catch (Exception e) { + logger.error("Error putting to in memory cache {}", key, e); + + return value; + } + } + + public SerializableCacheValue internalPut(final String key, final SerializableCacheValue value) { + if (cache.getIfPresent(key) != null) { + return value; + } + + value.setUpdater(InMemoryCache.this); + cache.put(key, value.deserialize(key)); + + return InMemoryCache.this.parentPut(key, value); + } + + public long size() { + return cache.size(); + } +} diff --git a/src/main/java/com/takipi/oss/storage/caching/SerializableCacheValue.java b/src/main/java/com/takipi/oss/storage/caching/SerializableCacheValue.java new file mode 100644 index 0000000..daf0987 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/caching/SerializableCacheValue.java @@ -0,0 +1,110 @@ +package com.takipi.oss.storage.caching; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SerializableCacheValue { + private static final Logger logger = LoggerFactory.getLogger(SerializableCacheValue.class); + + private V value; + private Cache.Serializer serializer; + private byte[] serializedValue; + private Class valueClass; + + private CacheDelegator firstRetriever; + private CacheDelegator lastUpdater; + + public SerializableCacheValue(V value, Cache.Serializer serializer) { + this.value = value; + this.serializer = serializer; + } + + public SerializableCacheValue(Class valueClass, Cache.Serializer serializer) { + this.valueClass = valueClass; + this.serializer = serializer; + } + + public SerializableCacheValue(V value, byte[] serializedValue) { + this.value = value; + this.serializedValue = serializedValue; + } + + public CacheDelegator getRetriever() { + return this.firstRetriever; + } + + public CacheDelegator getUpdater() { + return this.lastUpdater; + } + + public void setUpdater(CacheDelegator updater) { + updateUpdater(updater); + } + + public void setSerializedValue(CacheDelegator retriever, byte[] serializedValue) { + updateRetriever(retriever); + this.serializedValue = serializedValue; + } + + public void setValue(CacheDelegator retriever, Object value) { + if (!valueClass.isInstance(value)) { + logger.error("Error setting value, Type mismatch (expected: {}; found: {}).", + valueClass.getSimpleName(), value.getClass().getSimpleName()); + return; + } + + updateRetriever(retriever); + this.value = valueClass.cast(value); + } + + private void updateRetriever(CacheDelegator retriever) { + if (this.firstRetriever == null) { + this.firstRetriever = retriever; + } + } + + private void updateUpdater(CacheDelegator updater) { + this.lastUpdater = updater; + } + + public V deserialize(String name) { + try { + if (value == null) { + if (serializedValue == null) { + return null; + } + + value = serializer.deserialize(serializedValue); + + if (!valueClass.isInstance(value)) { + logger.error("Error serializing, Type mismatch for '{}' in cache (expected: {}; found: {}).", + name, valueClass.getSimpleName(), value.getClass().getSimpleName()); + + return null; + } + + return valueClass.cast(value); + } + + return value; + } + catch (Exception e) { + logger.error("Error deserializing '{}'", name, e); + return null; + } + } + + public byte[] serialize(String name) { + try { + if (serializedValue == null) { + serializedValue = serializer.serialize(value); + } + + return serializedValue; + } + catch (Exception e) { + logger.error("Error serializing '{}'", name, e); + return null; + } + } +} diff --git a/src/main/java/com/takipi/oss/storage/caching/StringUtil.java b/src/main/java/com/takipi/oss/storage/caching/StringUtil.java new file mode 100644 index 0000000..9c7a85a --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/caching/StringUtil.java @@ -0,0 +1,11 @@ +package com.takipi.oss.storage.caching; + +public class StringUtil { + public static String padRight(String str, int targetLength) { + return String.format("%1$-" + targetLength + "s", str); + } + + public static String padLeft(String str, int targetLength) { + return String.format("%1$" + targetLength + "s", str); + } +} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index 3cc2fa5..be8845a 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -22,27 +22,26 @@ @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public class JsonMultiFetchStorageResource { - + private final Filesystem filesystem; private final MultiFetcher multiFetcher; - + public JsonMultiFetchStorageResource(Filesystem filesystem, int multiFetcherConcurrencyLevel) { this.filesystem = filesystem; TaskExecutor taskExecutor; - - if ((filesystem instanceof S3Filesystem) && - (multiFetcherConcurrencyLevel > 1)) { + + if ((filesystem instanceof S3Filesystem) && (multiFetcherConcurrencyLevel > 1)) { taskExecutor = new ConcurrentTaskExecutor(multiFetcherConcurrencyLevel); } else { taskExecutor = new SequentialTaskExecutor(); } - + this.multiFetcher = new MultiFetcher(taskExecutor); } - + @POST @Timed public Response post(MultiFetchRequest request) { @@ -55,5 +54,5 @@ public Response post(MultiFetchRequest request) { return Response.serverError().entity("Problem getting keys").build(); } } - + } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index 6aa03d1..5811338 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -16,31 +16,68 @@ public class MultiFetcher { private static final Logger logger = LoggerFactory.getLogger(MultiFetcher.class); + private static long maxCacheSize = 33554432; private final TaskExecutor taskExecutor; + private final static S3Cache cache; + + static { + if (maxCacheSize > 0) { + cache = new S3CacheImpl(maxCacheSize); + } + else { + cache = new S3Cache() { + @Override + public String get(String key) { + return null; + } + + @Override + public void put(String key, String value) { + } + }; + } + } public MultiFetcher(TaskExecutor taskExecutor) { this.taskExecutor = taskExecutor; } + public static void setMaxCacheSize(long maxCacheSize) { + MultiFetcher.maxCacheSize = maxCacheSize; + } + public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { - - final int count = request.records.size(); + + List records = request.records; + final int count = records.size(); final EncodingType encodingType = request.encodingType; - final List tasks = new ArrayList<>(count); + final List recordsWithData = new ArrayList<>(count); + final List recordsToFetch = new ArrayList<>(count); - List recordsWithData = new ArrayList<>(count); - - for (Record record : request.records) { - RecordWithData recordWithData = RecordWithData.of(record, null); + for (Record record : records) { + String value = cache.get(record.getKey()); + RecordWithData recordWithData = RecordWithData.of(record, value); recordsWithData.add(recordWithData); + if (value == null) { + recordsToFetch.add(recordWithData); + } + } + + final List tasks = new ArrayList<>(recordsToFetch.size()); + + for (RecordWithData recordWithData : recordsToFetch) { tasks.add(new S3ObjectFetcherTask(recordWithData, filesystem, encodingType)); } taskExecutor.execute(tasks); - logger.debug("Multi fetched completed fetching of {} objects", count); - + logger.debug("Multi fetcher completed fetching of {} objects", count); + + for (RecordWithData recordWithData : recordsToFetch) { + cache.put(recordWithData.getRecord().getKey(), recordWithData.getData()); + } + return new MultiFetchResponse(recordsWithData); } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3Cache.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3Cache.java new file mode 100644 index 0000000..5cf6a30 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3Cache.java @@ -0,0 +1,8 @@ +package com.takipi.oss.storage.resources.fs.multifetcher; + +interface S3Cache { + + String get(String key); + + void put(String key, String value); +} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java new file mode 100644 index 0000000..bbb4363 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java @@ -0,0 +1,36 @@ +package com.takipi.oss.storage.resources.fs.multifetcher; + +import com.takipi.oss.storage.caching.Cache; +import com.takipi.oss.storage.caching.InMemoryCache; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class S3CacheImpl implements S3Cache { + + private static final Logger logger = LoggerFactory.getLogger(S3CacheImpl.class); + + private final InMemoryCache memoryCache; + private final Cache cache; + + public S3CacheImpl(long maxSize) { + memoryCache = new InMemoryCache.Builder().setMaxSize(maxSize).build(); + cache = new Cache(memoryCache); + } + + public String get(String key) { + + byte[] bytes = cache.get(key); + + try { + return new String(bytes, "UTF-8"); + } + catch (Exception e) { + logger.error(e.getMessage()); + return null; + } + } + + public void put(String key, String value) { + cache.put(key, value.getBytes()); + } +} From 04971c3f1d2ca19dd89a8bb84b03d6dbece03e0c Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 23 Oct 2017 12:50:14 +0300 Subject: [PATCH 48/66] added caching --- .../fs/multifetcher/MultiFetcher.java | 13 +++++++--- .../fs/multifetcher/S3ObjectFetcherTask.java | 25 ++++++++----------- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index 5811338..f2e93d8 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -1,11 +1,11 @@ package com.takipi.oss.storage.resources.fs.multifetcher; -import com.takipi.oss.storage.data.EncodingType; import com.takipi.oss.storage.data.RecordWithData; import com.takipi.oss.storage.data.fetch.MultiFetchRequest; import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; import com.takipi.oss.storage.fs.api.Filesystem; +import com.takipi.oss.storage.fs.concurrent.SimpleStopWatch; import com.takipi.oss.storage.fs.concurrent.TaskExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,11 +49,13 @@ public static void setMaxCacheSize(long maxCacheSize) { public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { + SimpleStopWatch stopWatch = new SimpleStopWatch(); List records = request.records; final int count = records.size(); - final EncodingType encodingType = request.encodingType; final List recordsWithData = new ArrayList<>(count); final List recordsToFetch = new ArrayList<>(count); + + logger.debug("------------ Multi fetcher commencing load of {} objects", count); for (Record record : records) { String value = cache.get(record.getKey()); @@ -62,17 +64,20 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem if (value == null) { recordsToFetch.add(recordWithData); } + else { + logger.debug("Multi fetcher found key {} in cache", record.getKey()); + } } final List tasks = new ArrayList<>(recordsToFetch.size()); for (RecordWithData recordWithData : recordsToFetch) { - tasks.add(new S3ObjectFetcherTask(recordWithData, filesystem, encodingType)); + tasks.add(new S3ObjectFetcherTask(recordWithData, filesystem, request.encodingType)); } taskExecutor.execute(tasks); - logger.debug("Multi fetcher completed fetching of {} objects", count); + logger.debug("------------ Multi fetcher completed loading {} objects in {} ms", count, stopWatch.elapsed()); for (RecordWithData recordWithData : recordsToFetch) { cache.put(recordWithData.getRecord().getKey(), recordWithData.getData()); diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java index 03a61ed..cf28f5f 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java @@ -31,7 +31,8 @@ public void run() { private static String load(Filesystem filesystem, Record record, EncodingType encodingType) { - SimpleStopWatch stopWatch = new SimpleStopWatch(); + final SimpleStopWatch stopWatch = new SimpleStopWatch(); + final String key = record.getKey(); String value = null; final int MAX_TRIES = 2; int count = 0; @@ -39,7 +40,7 @@ private static String load(Filesystem filesystem, Record record, Encodin while ((value == null) && (count < MAX_TRIES)) { if (count++ > 0) { - logger.warn("Retry loading object for key {}", record.getKey()); + logger.warn("Retry loading object for key {}", key); stopWatch.reset(); } @@ -51,19 +52,15 @@ private static String load(Filesystem filesystem, Record record, Encodin // null return value, and some are thrown. The code would be simpler if all exceptions were thrown. } } + + long elapsed = stopWatch.elapsed(); - if (value != null) { - logger.debug("{} loaded key {} int {}ms {} bytes", - Thread.currentThread().getName(), record.getKey(), stopWatch.elapsed(), value.length()); - - return value; - } - else { - - logger.error("Failed to load object for key: {}. Elapsed time = {} ms", - record.getKey(), stopWatch.elapsed()); - - throw new RuntimeException("Failed to load object for key: " + record.getKey()); + if (value == null) { + logger.error("Failed to load object for key: {}. Elapsed time = {} ms", key, elapsed); + throw new RuntimeException("Failed to load object for key: " + key); } + + logger.debug("{} loaded key {} in {} ms. {} bytes", Thread.currentThread().getName(), key, elapsed, value.length()); + return value; } } From 75920fd9f2efe2a5592468818d0df7487739a5dd Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 23 Oct 2017 13:10:42 +0300 Subject: [PATCH 49/66] added caching --- .../resources/fs/multifetcher/MultiFetcher.java | 2 ++ .../resources/fs/multifetcher/S3CacheImpl.java | 16 ++++++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index f2e93d8..4fe96bb 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -82,6 +82,8 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem for (RecordWithData recordWithData : recordsToFetch) { cache.put(recordWithData.getRecord().getKey(), recordWithData.getData()); } + + logger.debug("Multi fetcher cached {} objects.", recordsToFetch.size()); return new MultiFetchResponse(recordsWithData); } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java index bbb4363..19f32e7 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java @@ -21,13 +21,17 @@ public String get(String key) { byte[] bytes = cache.get(key); - try { - return new String(bytes, "UTF-8"); - } - catch (Exception e) { - logger.error(e.getMessage()); - return null; + if (bytes != null) { + + try { + return new String(bytes, "UTF-8"); + } + catch (Exception e) { + logger.error(e.getMessage()); + } } + + return null; } public void put(String key, String value) { From 69525eb657aae77f54f401b685e1668bf8ae5b00 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Tue, 24 Oct 2017 18:52:24 +0300 Subject: [PATCH 50/66] Increase the concurrent thread executor max threads to 500. Multifetcher loads silver number parts in batch sizes instead of batch counts. --- .../storage/data/fetch/MultiFetchRequest.java | 1 + .../fs/concurrent/ConcurrentTaskExecutor.java | 8 ++- .../fs/multifetcher/MultiFetcher.java | 57 ++++++++++++++----- 3 files changed, 48 insertions(+), 18 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/data/fetch/MultiFetchRequest.java b/src/main/java/com/takipi/oss/storage/data/fetch/MultiFetchRequest.java index 85efbe9..aad34ea 100644 --- a/src/main/java/com/takipi/oss/storage/data/fetch/MultiFetchRequest.java +++ b/src/main/java/com/takipi/oss/storage/data/fetch/MultiFetchRequest.java @@ -8,4 +8,5 @@ public class MultiFetchRequest { public EncodingType encodingType; public List records; + public int maxBatchSize; } diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java index ddf78fe..d9e6c5c 100644 --- a/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java @@ -16,10 +16,12 @@ public class ConcurrentTaskExecutor implements TaskExecutor { private final AtomicInteger threadCount = new AtomicInteger(); public ConcurrentTaskExecutor(int maxThreads) { + + int maxAllowedThreads = 500; - if (maxThreads > 50) { - logger.warn("ConcurrentTaskExecutor cannot have more than 50 threads"); - maxThreads = 50; + if (maxThreads > maxAllowedThreads) { + logger.warn("ConcurrentTaskExecutor cannot have more than {} threads", maxAllowedThreads); + maxThreads = maxAllowedThreads; } else if (maxThreads < 2) { logger.warn("ConcurrentTaskExecutor cannot have less than 2 threads"); diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index 4fe96bb..89a539f 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -14,12 +14,27 @@ import java.util.List; public class MultiFetcher { - + + private static class PartSizeEstimator { + private long totalSizeLoaded = 0; + private long numberOfPartsLoaded = 0; + synchronized void updateStats(long size) { + if (numberOfPartsLoaded < 1000000) { + totalSizeLoaded += size; + ++numberOfPartsLoaded; + } + } + synchronized int getEstimatedSizePerPart() { + return (numberOfPartsLoaded < 10) ? 1700 : (int)(totalSizeLoaded / numberOfPartsLoaded); + } + } + private static final Logger logger = LoggerFactory.getLogger(MultiFetcher.class); private static long maxCacheSize = 33554432; private final TaskExecutor taskExecutor; private final static S3Cache cache; + private static PartSizeEstimator partSizeEstimator = new PartSizeEstimator(); static { if (maxCacheSize > 0) { @@ -48,15 +63,24 @@ public static void setMaxCacheSize(long maxCacheSize) { } public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { - - SimpleStopWatch stopWatch = new SimpleStopWatch(); + + int estimatedSizePerPart = partSizeEstimator.getEstimatedSizePerPart(); + final int maxBatchCount = request.maxBatchSize / estimatedSizePerPart; + logger.info("Max batch size = {}. Estimated size per part = {}. Max batch count = {}", + request.maxBatchSize, estimatedSizePerPart, maxBatchCount); + List records = request.records; + records = (records.size() > maxBatchCount) ? records.subList(0, maxBatchCount) : records; + + final SimpleStopWatch stopWatch = new SimpleStopWatch(); final int count = records.size(); final List recordsWithData = new ArrayList<>(count); final List recordsToFetch = new ArrayList<>(count); - + logger.debug("------------ Multi fetcher commencing load of {} objects", count); - + + long totalSize = 0; + for (Record record : records) { String value = cache.get(record.getKey()); RecordWithData recordWithData = RecordWithData.of(record, value); @@ -65,26 +89,29 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem recordsToFetch.add(recordWithData); } else { - logger.debug("Multi fetcher found key {} in cache", record.getKey()); + totalSize += value.length(); + logger.debug("Multi fetcher found key {} in cache. {} bytes", record.getKey(), value.length()); } } - + final List tasks = new ArrayList<>(recordsToFetch.size()); - + for (RecordWithData recordWithData : recordsToFetch) { tasks.add(new S3ObjectFetcherTask(recordWithData, filesystem, request.encodingType)); } - + taskExecutor.execute(tasks); - - logger.debug("------------ Multi fetcher completed loading {} objects in {} ms", count, stopWatch.elapsed()); - + for (RecordWithData recordWithData : recordsToFetch) { - cache.put(recordWithData.getRecord().getKey(), recordWithData.getData()); + String value = recordWithData.getData(); + cache.put(recordWithData.getRecord().getKey(), value); + totalSize += value.length(); + partSizeEstimator.updateStats(value.length()); } - logger.debug("Multi fetcher cached {} objects.", recordsToFetch.size()); - + logger.debug("------------ Multi fetcher completed loading {} objects in {} ms. Total bytes fetched = {}", + count, stopWatch.elapsed(), totalSize); + return new MultiFetchResponse(recordsWithData); } } From 1056ac9a7829543b98d7a1bc45ac0f5053cc60a6 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 30 Oct 2017 16:27:02 +0200 Subject: [PATCH 51/66] small refactor --- .../fs/multifetcher/MultiFetcher.java | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index 89a539f..b7d9b8e 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -93,20 +93,23 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem logger.debug("Multi fetcher found key {} in cache. {} bytes", record.getKey(), value.length()); } } + + if (!recordsToFetch.isEmpty()) { - final List tasks = new ArrayList<>(recordsToFetch.size()); + final List tasks = new ArrayList<>(recordsToFetch.size()); - for (RecordWithData recordWithData : recordsToFetch) { - tasks.add(new S3ObjectFetcherTask(recordWithData, filesystem, request.encodingType)); - } + for (RecordWithData recordWithData : recordsToFetch) { + tasks.add(new S3ObjectFetcherTask(recordWithData, filesystem, request.encodingType)); + } - taskExecutor.execute(tasks); + taskExecutor.execute(tasks); - for (RecordWithData recordWithData : recordsToFetch) { - String value = recordWithData.getData(); - cache.put(recordWithData.getRecord().getKey(), value); - totalSize += value.length(); - partSizeEstimator.updateStats(value.length()); + for (RecordWithData recordWithData : recordsToFetch) { + String value = recordWithData.getData(); + cache.put(recordWithData.getRecord().getKey(), value); + totalSize += value.length(); + partSizeEstimator.updateStats(value.length()); + } } logger.debug("------------ Multi fetcher completed loading {} objects in {} ms. Total bytes fetched = {}", From aed75819f2302d4f0a1c212b0ef5e340f5429282 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 30 Oct 2017 18:43:01 +0200 Subject: [PATCH 52/66] small refactor --- .../fs/multifetcher/MultiFetcher.java | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index b7d9b8e..4d8e323 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -15,26 +15,36 @@ public class MultiFetcher { + private static final long DEFAULT_MAX_CACHE_SIZE = 33554432; // 32 MB + private static class PartSizeEstimator { + + private static long MIN_LOADED_PARTS_FOR_SIZE_ESTIMATION = 10; + private static long DEFAULT_PART_SIZE_ESTIMATION = 1700; + private static long MAX_TOTAL_SIZE = 1L << 30; + private long totalSizeLoaded = 0; private long numberOfPartsLoaded = 0; + synchronized void updateStats(long size) { - if (numberOfPartsLoaded < 1000000) { + if (totalSizeLoaded < MAX_TOTAL_SIZE) { totalSizeLoaded += size; ++numberOfPartsLoaded; } } + synchronized int getEstimatedSizePerPart() { - return (numberOfPartsLoaded < 10) ? 1700 : (int)(totalSizeLoaded / numberOfPartsLoaded); + return (int)((numberOfPartsLoaded < MIN_LOADED_PARTS_FOR_SIZE_ESTIMATION) ? + DEFAULT_PART_SIZE_ESTIMATION : (totalSizeLoaded / numberOfPartsLoaded)); } } private static final Logger logger = LoggerFactory.getLogger(MultiFetcher.class); - private static long maxCacheSize = 33554432; + private static long maxCacheSize = DEFAULT_MAX_CACHE_SIZE; private final TaskExecutor taskExecutor; private final static S3Cache cache; - private static PartSizeEstimator partSizeEstimator = new PartSizeEstimator(); + private static final PartSizeEstimator partSizeEstimator = new PartSizeEstimator(); static { if (maxCacheSize > 0) { From 570e2160232d79b69a4abb55ca6607c6a1cc3a5f Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Tue, 31 Oct 2017 13:22:38 +0200 Subject: [PATCH 53/66] removed thread limit for ConcurrentTaskExecutor and did a small refactor. --- .../fs/concurrent/ConcurrentTaskExecutor.java | 13 +------------ .../resources/fs/multifetcher/MultiFetcher.java | 12 ++++++++---- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java b/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java index d9e6c5c..daccf37 100644 --- a/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java +++ b/src/main/java/com/takipi/oss/storage/fs/concurrent/ConcurrentTaskExecutor.java @@ -16,18 +16,7 @@ public class ConcurrentTaskExecutor implements TaskExecutor { private final AtomicInteger threadCount = new AtomicInteger(); public ConcurrentTaskExecutor(int maxThreads) { - - int maxAllowedThreads = 500; - - if (maxThreads > maxAllowedThreads) { - logger.warn("ConcurrentTaskExecutor cannot have more than {} threads", maxAllowedThreads); - maxThreads = maxAllowedThreads; - } - else if (maxThreads < 2) { - logger.warn("ConcurrentTaskExecutor cannot have less than 2 threads"); - maxThreads = 2; - } - + logger.info("ConcurrentTaskExecutor maximum number of threads = {}", maxThreads); ThreadFactory threadFactory = new ThreadFactory() { diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index 4d8e323..f9a2719 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -20,7 +20,7 @@ public class MultiFetcher { private static class PartSizeEstimator { private static long MIN_LOADED_PARTS_FOR_SIZE_ESTIMATION = 10; - private static long DEFAULT_PART_SIZE_ESTIMATION = 1700; + private static int DEFAULT_PART_SIZE_ESTIMATION = 1700; private static long MAX_TOTAL_SIZE = 1L << 30; private long totalSizeLoaded = 0; @@ -34,8 +34,12 @@ synchronized void updateStats(long size) { } synchronized int getEstimatedSizePerPart() { - return (int)((numberOfPartsLoaded < MIN_LOADED_PARTS_FOR_SIZE_ESTIMATION) ? - DEFAULT_PART_SIZE_ESTIMATION : (totalSizeLoaded / numberOfPartsLoaded)); + if (numberOfPartsLoaded < MIN_LOADED_PARTS_FOR_SIZE_ESTIMATION) { + return DEFAULT_PART_SIZE_ESTIMATION; + } + else { + return (int)(totalSizeLoaded / numberOfPartsLoaded); + } } } @@ -122,7 +126,7 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem } } - logger.debug("------------ Multi fetcher completed loading {} objects in {} ms. Total bytes fetched = {}", + logger.info("------------ Multi fetcher completed loading {} objects in {} ms. Total bytes fetched = {}", count, stopWatch.elapsed(), totalSize); return new MultiFetchResponse(recordsWithData); From 7ab567f1fa186a539f6615838a35b52643e6d27b Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Tue, 31 Oct 2017 13:46:52 +0200 Subject: [PATCH 54/66] modified logs --- .../oss/storage/resources/fs/multifetcher/MultiFetcher.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index f9a2719..579686d 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -91,7 +91,7 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem final List recordsWithData = new ArrayList<>(count); final List recordsToFetch = new ArrayList<>(count); - logger.debug("------------ Multi fetcher commencing load of {} objects", count); + logger.debug("Multi fetcher commencing load of {} objects", count); long totalSize = 0; @@ -126,8 +126,8 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem } } - logger.info("------------ Multi fetcher completed loading {} objects in {} ms. Total bytes fetched = {}", - count, stopWatch.elapsed(), totalSize); + logger.info("Multi fetcher loaded {} parts in {} ms. {} parts found in cache. {} bytes total.", + count, stopWatch.elapsed(), (records.size() - recordsToFetch.size()), totalSize); return new MultiFetchResponse(recordsWithData); } From ff1ce11fd5ddc122f1d4fdeb51a355b1dea85c2a Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Wed, 1 Nov 2017 11:51:55 +0200 Subject: [PATCH 55/66] refactoring. --- .../takipi/oss/storage/TakipiStorageMain.java | 3 +- .../oss/storage/caching/CacheLogger.java | 1 + .../{caching => helper}/StringUtil.java | 2 +- .../fs/JsonMultiFetchStorageResource.java | 12 +++++--- .../fs/multifetcher/MultiFetcher.java | 30 +++---------------- .../fs/multifetcher/S3CacheImpl.java | 4 +-- .../fs/multifetcher/S3DummyCache.java | 20 +++++++++++++ 7 files changed, 37 insertions(+), 35 deletions(-) rename src/main/java/com/takipi/oss/storage/{caching => helper}/StringUtil.java (88%) create mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3DummyCache.java diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index d67fd68..7f8eff8 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -62,11 +62,10 @@ public void run(TakipiStorageConfiguration configuration, Environment environmen Filesystem filesystem = configureFilesystem(configuration, environment); TakipiStorageConfiguration.Multifetch multifetchConfig = configuration.getMultifetch(); - MultiFetcher.setMaxCacheSize(multifetchConfig.getMaxCacheSize()); environment.healthChecks().register("filesystem", new FilesystemHealthCheck(filesystem)); environment.jersey().register(new BinaryStorageResource(filesystem)); - environment.jersey().register(new JsonMultiFetchStorageResource(filesystem, multifetchConfig.getConcurrencyLevel())); + environment.jersey().register(new JsonMultiFetchStorageResource(filesystem, multifetchConfig)); environment.jersey().register(new JsonMultiDeleteStorageResource(filesystem)); environment.jersey().register(new JsonSimpleFetchStorageResource(filesystem)); environment.jersey().register(new JsonSimpleSearchStorageResource(filesystem)); diff --git a/src/main/java/com/takipi/oss/storage/caching/CacheLogger.java b/src/main/java/com/takipi/oss/storage/caching/CacheLogger.java index 571ed72..d3cb68e 100644 --- a/src/main/java/com/takipi/oss/storage/caching/CacheLogger.java +++ b/src/main/java/com/takipi/oss/storage/caching/CacheLogger.java @@ -2,6 +2,7 @@ import java.util.concurrent.atomic.AtomicLong; +import com.takipi.oss.storage.helper.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/main/java/com/takipi/oss/storage/caching/StringUtil.java b/src/main/java/com/takipi/oss/storage/helper/StringUtil.java similarity index 88% rename from src/main/java/com/takipi/oss/storage/caching/StringUtil.java rename to src/main/java/com/takipi/oss/storage/helper/StringUtil.java index 9c7a85a..666c708 100644 --- a/src/main/java/com/takipi/oss/storage/caching/StringUtil.java +++ b/src/main/java/com/takipi/oss/storage/helper/StringUtil.java @@ -1,4 +1,4 @@ -package com.takipi.oss.storage.caching; +package com.takipi.oss.storage.helper; public class StringUtil { public static String padRight(String str, int targetLength) { diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index be8845a..bdc0818 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -1,6 +1,7 @@ package com.takipi.oss.storage.resources.fs; import com.codahale.metrics.annotation.Timed; +import com.takipi.oss.storage.TakipiStorageConfiguration; import com.takipi.oss.storage.data.fetch.MultiFetchRequest; import com.takipi.oss.storage.data.fetch.MultiFetchResponse; import com.takipi.oss.storage.fs.Record; @@ -26,20 +27,23 @@ public class JsonMultiFetchStorageResource { private final Filesystem filesystem; private final MultiFetcher multiFetcher; - public JsonMultiFetchStorageResource(Filesystem filesystem, int multiFetcherConcurrencyLevel) { + public JsonMultiFetchStorageResource(Filesystem filesystem, + TakipiStorageConfiguration.Multifetch multifetchConfig) { this.filesystem = filesystem; TaskExecutor taskExecutor; + + int maxConcurrencyLevel = multifetchConfig.getConcurrencyLevel(); - if ((filesystem instanceof S3Filesystem) && (multiFetcherConcurrencyLevel > 1)) { - taskExecutor = new ConcurrentTaskExecutor(multiFetcherConcurrencyLevel); + if ((filesystem instanceof S3Filesystem) && (maxConcurrencyLevel > 1)) { + taskExecutor = new ConcurrentTaskExecutor(maxConcurrencyLevel); } else { taskExecutor = new SequentialTaskExecutor(); } - this.multiFetcher = new MultiFetcher(taskExecutor); + this.multiFetcher = new MultiFetcher(taskExecutor, multifetchConfig.getMaxCacheSize()); } @POST diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index 579686d..c9f0e1c 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -44,36 +44,14 @@ synchronized int getEstimatedSizePerPart() { } private static final Logger logger = LoggerFactory.getLogger(MultiFetcher.class); - private static long maxCacheSize = DEFAULT_MAX_CACHE_SIZE; private final TaskExecutor taskExecutor; - private final static S3Cache cache; - private static final PartSizeEstimator partSizeEstimator = new PartSizeEstimator(); + private final S3Cache cache; + private final PartSizeEstimator partSizeEstimator = new PartSizeEstimator(); - static { - if (maxCacheSize > 0) { - cache = new S3CacheImpl(maxCacheSize); - } - else { - cache = new S3Cache() { - @Override - public String get(String key) { - return null; - } - - @Override - public void put(String key, String value) { - } - }; - } - } - - public MultiFetcher(TaskExecutor taskExecutor) { + public MultiFetcher(TaskExecutor taskExecutor, int maxCacheSize) { this.taskExecutor = taskExecutor; - } - - public static void setMaxCacheSize(long maxCacheSize) { - MultiFetcher.maxCacheSize = maxCacheSize; + this.cache = (maxCacheSize > 0) ? new S3CacheImpl(maxCacheSize) : S3DummyCache.instance; } public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java index 19f32e7..99bff0b 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java @@ -12,7 +12,7 @@ public class S3CacheImpl implements S3Cache { private final InMemoryCache memoryCache; private final Cache cache; - public S3CacheImpl(long maxSize) { + S3CacheImpl(long maxSize) { memoryCache = new InMemoryCache.Builder().setMaxSize(maxSize).build(); cache = new Cache(memoryCache); } @@ -27,7 +27,7 @@ public String get(String key) { return new String(bytes, "UTF-8"); } catch (Exception e) { - logger.error(e.getMessage()); + logger.error("Failed to convert byte[] to String", e.getMessage()); } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3DummyCache.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3DummyCache.java new file mode 100644 index 0000000..7ab4c49 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3DummyCache.java @@ -0,0 +1,20 @@ +package com.takipi.oss.storage.resources.fs.multifetcher; + +public class S3DummyCache implements S3Cache { + + public static final S3DummyCache instance = new S3DummyCache(); + + private S3DummyCache() { + + } + + @Override + public String get(String key) { + return null; + } + + @Override + public void put(String key, String value) { + + } +} From d37a36d44ce954eab5c609b8abac2b96222cd05a Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Thu, 2 Nov 2017 13:09:10 +0200 Subject: [PATCH 56/66] added option of CacheLogger and some small refactorings. --- .../oss/storage/TakipiStorageConfiguration.java | 11 +++++++++++ .../takipi/oss/storage/TakipiStorageMain.java | 1 - .../fs/JsonMultiFetchStorageResource.java | 5 ++++- .../resources/fs/multifetcher/MultiFetcher.java | 16 +++++++--------- .../resources/fs/multifetcher/S3Cache.java | 8 -------- .../com/takipi/oss/storage/s3cache/S3Cache.java | 8 ++++++++ .../fs/multifetcher => s3cache}/S3CacheImpl.java | 13 ++++++------- .../multifetcher => s3cache}/S3DummyCache.java | 2 +- 8 files changed, 37 insertions(+), 27 deletions(-) delete mode 100644 src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3Cache.java create mode 100644 src/main/java/com/takipi/oss/storage/s3cache/S3Cache.java rename src/main/java/com/takipi/oss/storage/{resources/fs/multifetcher => s3cache}/S3CacheImpl.java (65%) rename src/main/java/com/takipi/oss/storage/{resources/fs/multifetcher => s3cache}/S3DummyCache.java (84%) diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java index 4f3f1ad..6f7842b 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java @@ -127,6 +127,7 @@ public static class Multifetch { private Integer concurrencyLevel; private Integer maxCacheSize; + private Boolean enableCacheLogger; @JsonProperty public Integer getConcurrencyLevel() { @@ -147,6 +148,16 @@ public Integer getMaxCacheSize() { public void setMaxCacheSize(Integer maxCacheSize) { this.maxCacheSize = maxCacheSize; } + + @JsonProperty + public Boolean getEnableCacheLogger() { + return enableCacheLogger; + } + + @JsonProperty + public void setEnableCacheLogger(Boolean enableCacheLogger) { + this.enableCacheLogger = enableCacheLogger; + } } private boolean enableCors; diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java index 7f8eff8..d9931f7 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageMain.java @@ -5,7 +5,6 @@ import javax.servlet.DispatcherType; import javax.servlet.FilterRegistration; -import com.takipi.oss.storage.resources.fs.multifetcher.MultiFetcher; import org.eclipse.jetty.servlets.CrossOriginFilter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index bdc0818..c1f6587 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -42,8 +42,11 @@ public JsonMultiFetchStorageResource(Filesystem filesystem, else { taskExecutor = new SequentialTaskExecutor(); } + + int maxCacheSize = multifetchConfig.getMaxCacheSize(); + boolean enableCacheLogger = multifetchConfig.getEnableCacheLogger(); - this.multiFetcher = new MultiFetcher(taskExecutor, multifetchConfig.getMaxCacheSize()); + this.multiFetcher = new MultiFetcher(taskExecutor, maxCacheSize, enableCacheLogger); } @POST diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index c9f0e1c..28cecee 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -7,6 +7,9 @@ import com.takipi.oss.storage.fs.api.Filesystem; import com.takipi.oss.storage.fs.concurrent.SimpleStopWatch; import com.takipi.oss.storage.fs.concurrent.TaskExecutor; +import com.takipi.oss.storage.s3cache.S3Cache; +import com.takipi.oss.storage.s3cache.S3CacheImpl; +import com.takipi.oss.storage.s3cache.S3DummyCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -15,22 +18,17 @@ public class MultiFetcher { - private static final long DEFAULT_MAX_CACHE_SIZE = 33554432; // 32 MB - private static class PartSizeEstimator { private static long MIN_LOADED_PARTS_FOR_SIZE_ESTIMATION = 10; private static int DEFAULT_PART_SIZE_ESTIMATION = 1700; - private static long MAX_TOTAL_SIZE = 1L << 30; private long totalSizeLoaded = 0; private long numberOfPartsLoaded = 0; synchronized void updateStats(long size) { - if (totalSizeLoaded < MAX_TOTAL_SIZE) { - totalSizeLoaded += size; - ++numberOfPartsLoaded; - } + totalSizeLoaded += size; + ++numberOfPartsLoaded; } synchronized int getEstimatedSizePerPart() { @@ -49,9 +47,9 @@ synchronized int getEstimatedSizePerPart() { private final S3Cache cache; private final PartSizeEstimator partSizeEstimator = new PartSizeEstimator(); - public MultiFetcher(TaskExecutor taskExecutor, int maxCacheSize) { + public MultiFetcher(TaskExecutor taskExecutor, int maxCacheSize, boolean enableCacheLogger) { this.taskExecutor = taskExecutor; - this.cache = (maxCacheSize > 0) ? new S3CacheImpl(maxCacheSize) : S3DummyCache.instance; + this.cache = (maxCacheSize > 0) ? new S3CacheImpl(maxCacheSize, enableCacheLogger) : S3DummyCache.instance; } public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3Cache.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3Cache.java deleted file mode 100644 index 5cf6a30..0000000 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3Cache.java +++ /dev/null @@ -1,8 +0,0 @@ -package com.takipi.oss.storage.resources.fs.multifetcher; - -interface S3Cache { - - String get(String key); - - void put(String key, String value); -} diff --git a/src/main/java/com/takipi/oss/storage/s3cache/S3Cache.java b/src/main/java/com/takipi/oss/storage/s3cache/S3Cache.java new file mode 100644 index 0000000..298a371 --- /dev/null +++ b/src/main/java/com/takipi/oss/storage/s3cache/S3Cache.java @@ -0,0 +1,8 @@ +package com.takipi.oss.storage.s3cache; + +public interface S3Cache { + + String get(String key); + + void put(String key, String value); +} diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java b/src/main/java/com/takipi/oss/storage/s3cache/S3CacheImpl.java similarity index 65% rename from src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java rename to src/main/java/com/takipi/oss/storage/s3cache/S3CacheImpl.java index 99bff0b..f7df848 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3CacheImpl.java +++ b/src/main/java/com/takipi/oss/storage/s3cache/S3CacheImpl.java @@ -1,7 +1,6 @@ -package com.takipi.oss.storage.resources.fs.multifetcher; +package com.takipi.oss.storage.s3cache; -import com.takipi.oss.storage.caching.Cache; -import com.takipi.oss.storage.caching.InMemoryCache; +import com.takipi.oss.storage.caching.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -9,12 +8,12 @@ public class S3CacheImpl implements S3Cache { private static final Logger logger = LoggerFactory.getLogger(S3CacheImpl.class); - private final InMemoryCache memoryCache; private final Cache cache; - S3CacheImpl(long maxSize) { - memoryCache = new InMemoryCache.Builder().setMaxSize(maxSize).build(); - cache = new Cache(memoryCache); + public S3CacheImpl(long maxSize, boolean enableCacheLogger) { + InMemoryCache memoryCache = new InMemoryCache.Builder().setMaxSize(maxSize).build(); + CacheDelegator cacheDelegator = enableCacheLogger ? new CacheLogger(memoryCache) : memoryCache; + cache = new Cache(cacheDelegator); } public String get(String key) { diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3DummyCache.java b/src/main/java/com/takipi/oss/storage/s3cache/S3DummyCache.java similarity index 84% rename from src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3DummyCache.java rename to src/main/java/com/takipi/oss/storage/s3cache/S3DummyCache.java index 7ab4c49..3e7ba03 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3DummyCache.java +++ b/src/main/java/com/takipi/oss/storage/s3cache/S3DummyCache.java @@ -1,4 +1,4 @@ -package com.takipi.oss.storage.resources.fs.multifetcher; +package com.takipi.oss.storage.s3cache; public class S3DummyCache implements S3Cache { From f2fce4b17c7e05988cf27bccbac35fc9691b9102 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Thu, 2 Nov 2017 13:31:58 +0200 Subject: [PATCH 57/66] added maven guava dependency --- pom.xml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 5c3b4df..a2bb4e1 100644 --- a/pom.xml +++ b/pom.xml @@ -35,6 +35,11 @@ aws-java-sdk-s3 1.11.13 + + com.google.guava + guava + 17.0 + @@ -103,7 +108,7 @@ org.codehaus.mojo findbugs-maven-plugin - 3.0.4-SNAPSHOT + 3.0.4 From d8e460899313b2370aeefeaee17dc0177ca28e3f Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Sun, 5 Nov 2017 16:36:51 +0200 Subject: [PATCH 58/66] added max batch size and updated settings.yml --- settings.yml | 8 ++++++-- .../oss/storage/TakipiStorageConfiguration.java | 11 +++++++++++ .../oss/storage/data/fetch/MultiFetchRequest.java | 1 - .../resources/fs/JsonMultiFetchStorageResource.java | 3 ++- .../resources/fs/multifetcher/MultiFetcher.java | 8 +++++--- 5 files changed, 24 insertions(+), 7 deletions(-) diff --git a/settings.yml b/settings.yml index 01540eb..6339b15 100644 --- a/settings.yml +++ b/settings.yml @@ -12,10 +12,14 @@ s3Fs: multifetch: # maximum number of threads for concurrent multi-fetch. Set to zero to disable concurrent fetching. - concurrencyLevel: 30 + concurrencyLevel: 100 - # Recommended size >= 4194304. Set to zero to disable caching. + # Recommended size >= 16777216. Set to zero to disable caching. maxCacheSize: 67108864 + + enableCacheLogger: false + + maxBatchSize: 4194304 #folderFs: # folderPath: /opt/takipi-storage/storage diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java index 6f7842b..a67f287 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java @@ -128,6 +128,7 @@ public static class Multifetch { private Integer concurrencyLevel; private Integer maxCacheSize; private Boolean enableCacheLogger; + private Integer maxBatchSize; @JsonProperty public Integer getConcurrencyLevel() { @@ -158,6 +159,16 @@ public Boolean getEnableCacheLogger() { public void setEnableCacheLogger(Boolean enableCacheLogger) { this.enableCacheLogger = enableCacheLogger; } + + @JsonProperty + public Integer getMaxBatchSize() { + return maxBatchSize; + } + + @JsonProperty + public void setMaxBatchSize(Integer maxBatchSize) { + this.maxBatchSize = maxBatchSize; + } } private boolean enableCors; diff --git a/src/main/java/com/takipi/oss/storage/data/fetch/MultiFetchRequest.java b/src/main/java/com/takipi/oss/storage/data/fetch/MultiFetchRequest.java index aad34ea..85efbe9 100644 --- a/src/main/java/com/takipi/oss/storage/data/fetch/MultiFetchRequest.java +++ b/src/main/java/com/takipi/oss/storage/data/fetch/MultiFetchRequest.java @@ -8,5 +8,4 @@ public class MultiFetchRequest { public EncodingType encodingType; public List records; - public int maxBatchSize; } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java index c1f6587..2236bb7 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/JsonMultiFetchStorageResource.java @@ -45,8 +45,9 @@ public JsonMultiFetchStorageResource(Filesystem filesystem, int maxCacheSize = multifetchConfig.getMaxCacheSize(); boolean enableCacheLogger = multifetchConfig.getEnableCacheLogger(); + int maxBatchSize = multifetchConfig.getMaxBatchSize(); - this.multiFetcher = new MultiFetcher(taskExecutor, maxCacheSize, enableCacheLogger); + this.multiFetcher = new MultiFetcher(taskExecutor, maxCacheSize, enableCacheLogger, maxBatchSize); } @POST diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index 28cecee..4d17ff3 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -46,18 +46,20 @@ synchronized int getEstimatedSizePerPart() { private final TaskExecutor taskExecutor; private final S3Cache cache; private final PartSizeEstimator partSizeEstimator = new PartSizeEstimator(); + private final int maxBatchSize; - public MultiFetcher(TaskExecutor taskExecutor, int maxCacheSize, boolean enableCacheLogger) { + public MultiFetcher(TaskExecutor taskExecutor, int maxCacheSize, boolean enableCacheLogger, int maxBatchSize) { this.taskExecutor = taskExecutor; this.cache = (maxCacheSize > 0) ? new S3CacheImpl(maxCacheSize, enableCacheLogger) : S3DummyCache.instance; + this.maxBatchSize = maxBatchSize; } public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem filesystem) { int estimatedSizePerPart = partSizeEstimator.getEstimatedSizePerPart(); - final int maxBatchCount = request.maxBatchSize / estimatedSizePerPart; + final int maxBatchCount = maxBatchSize / estimatedSizePerPart; logger.info("Max batch size = {}. Estimated size per part = {}. Max batch count = {}", - request.maxBatchSize, estimatedSizePerPart, maxBatchCount); + maxBatchSize, estimatedSizePerPart, maxBatchCount); List records = request.records; records = (records.size() > maxBatchCount) ? records.subList(0, maxBatchCount) : records; From 67d5914201aad8ed5ea8e8f6a5e26c70454d2227 Mon Sep 17 00:00:00 2001 From: mervyn2017 Date: Mon, 6 Nov 2017 14:44:48 +0200 Subject: [PATCH 59/66] code review minor changes --- .../java/com/takipi/oss/storage/caching/CacheLogger.java | 2 +- .../java/com/takipi/oss/storage/s3cache/S3CacheImpl.java | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/caching/CacheLogger.java b/src/main/java/com/takipi/oss/storage/caching/CacheLogger.java index d3cb68e..cb6728a 100644 --- a/src/main/java/com/takipi/oss/storage/caching/CacheLogger.java +++ b/src/main/java/com/takipi/oss/storage/caching/CacheLogger.java @@ -7,7 +7,7 @@ import org.slf4j.LoggerFactory; public class CacheLogger extends CacheDelegator { - private static final Logger logger = LoggerFactory.getLogger("backendcache"); + private static final Logger logger = LoggerFactory.getLogger(CacheLogger.class); private static final AtomicLong counter = new AtomicLong(); diff --git a/src/main/java/com/takipi/oss/storage/s3cache/S3CacheImpl.java b/src/main/java/com/takipi/oss/storage/s3cache/S3CacheImpl.java index f7df848..b1edf39 100644 --- a/src/main/java/com/takipi/oss/storage/s3cache/S3CacheImpl.java +++ b/src/main/java/com/takipi/oss/storage/s3cache/S3CacheImpl.java @@ -1,6 +1,9 @@ package com.takipi.oss.storage.s3cache; -import com.takipi.oss.storage.caching.*; +import com.takipi.oss.storage.caching.Cache; +import com.takipi.oss.storage.caching.CacheDelegator; +import com.takipi.oss.storage.caching.CacheLogger; +import com.takipi.oss.storage.caching.InMemoryCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; From a58ef3d9938ac95ba12a1b7c55d47e4c597a2b66 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Thu, 18 Jan 2018 23:47:18 +0200 Subject: [PATCH 60/66] enable nullable --- .../java/com/takipi/oss/storage/TakipiStorageConfiguration.java | 1 - 1 file changed, 1 deletion(-) diff --git a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java index a67f287..d026a29 100644 --- a/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java +++ b/src/main/java/com/takipi/oss/storage/TakipiStorageConfiguration.java @@ -55,7 +55,6 @@ public static class S3Fs { @NotEmpty private String bucket; - @NotEmpty private String pathPrefix; @NotNull From 8238c56b9d4b05c2397418a3490c253b8ce06937 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Fri, 19 Jan 2018 11:46:05 +0200 Subject: [PATCH 61/66] handle missing keys --- .../resources/fs/multifetcher/MultiFetcher.java | 10 +++++++--- .../resources/fs/multifetcher/S3ObjectFetcherTask.java | 9 +++++++-- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index 4d17ff3..b51a347 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -98,9 +98,13 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem for (RecordWithData recordWithData : recordsToFetch) { String value = recordWithData.getData(); - cache.put(recordWithData.getRecord().getKey(), value); - totalSize += value.length(); - partSizeEstimator.updateStats(value.length()); + + if (value != null) + { + cache.put(recordWithData.getRecord().getKey(), value); + totalSize += value.length(); + partSizeEstimator.updateStats(value.length()); + } } } diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java index cf28f5f..6d85f53 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java @@ -26,7 +26,11 @@ public class S3ObjectFetcherTask implements Runnable { @Override public void run() { String result = load(filesystem, recordWithData.getRecord(), encodingType); - recordWithData.setData(result); + + if (result != null) + { + recordWithData.setData(result); + } } private static String load(Filesystem filesystem, Record record, EncodingType encodingType) { @@ -57,7 +61,8 @@ private static String load(Filesystem filesystem, Record record, Encodin if (value == null) { logger.error("Failed to load object for key: {}. Elapsed time = {} ms", key, elapsed); - throw new RuntimeException("Failed to load object for key: " + key); + return null; + // throw new RuntimeException("Failed to load object for key: " + key); } logger.debug("{} loaded key {} in {} ms. {} bytes", Thread.currentThread().getName(), key, elapsed, value.length()); From 79f51792cb92fbef414a29c21f632d0bf44ce1bd Mon Sep 17 00:00:00 2001 From: David Levanon Date: Sun, 21 Jan 2018 14:15:31 +0200 Subject: [PATCH 62/66] remove comments --- .../oss/storage/resources/fs/multifetcher/MultiFetcher.java | 3 +-- .../storage/resources/fs/multifetcher/S3ObjectFetcherTask.java | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java index b51a347..373d3eb 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/MultiFetcher.java @@ -99,8 +99,7 @@ public MultiFetchResponse loadData(MultiFetchRequest request, Filesystem for (RecordWithData recordWithData : recordsToFetch) { String value = recordWithData.getData(); - if (value != null) - { + if (value != null) { cache.put(recordWithData.getRecord().getKey(), value); totalSize += value.length(); partSizeEstimator.updateStats(value.length()); diff --git a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java index 6d85f53..ba08fab 100644 --- a/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java +++ b/src/main/java/com/takipi/oss/storage/resources/fs/multifetcher/S3ObjectFetcherTask.java @@ -62,7 +62,6 @@ private static String load(Filesystem filesystem, Record record, Encodin if (value == null) { logger.error("Failed to load object for key: {}. Elapsed time = {} ms", key, elapsed); return null; - // throw new RuntimeException("Failed to load object for key: " + key); } logger.debug("{} loaded key {} in {} ms. {} bytes", Thread.currentThread().getName(), key, elapsed, value.length()); From 66e79ef7c4bdf65376a0d5b50bb172625e8da5cb Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Mon, 22 Jan 2018 19:45:10 +0200 Subject: [PATCH 63/66] downgrade --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index e6e783a..a2bb4e1 100644 --- a/pom.xml +++ b/pom.xml @@ -11,7 +11,7 @@ UTF-8 UTF-8 - 1.2.2 + 0.7.1 0.7 From c352bb76d1b13aba252a9a752f023f7e046bc4a4 Mon Sep 17 00:00:00 2001 From: shimonmagal Date: Mon, 22 Jan 2018 19:49:25 +0200 Subject: [PATCH 64/66] downgrade --- src/main/java/com/takipi/oss/storage/fs/Record.java | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/main/java/com/takipi/oss/storage/fs/Record.java b/src/main/java/com/takipi/oss/storage/fs/Record.java index bf18551..ad0abc4 100644 --- a/src/main/java/com/takipi/oss/storage/fs/Record.java +++ b/src/main/java/com/takipi/oss/storage/fs/Record.java @@ -2,8 +2,6 @@ import java.io.File; -import org.apache.commons.lang3.StringUtils; - public class Record implements BaseRecord { private String serviceId; private String type; @@ -63,8 +61,8 @@ public boolean equals(Object obj) { Record objRecord = (Record) obj; - return ((StringUtils.equals(serviceId, objRecord.serviceId)) && - (StringUtils.equals(type, objRecord.type)) && - (StringUtils.equals(key, objRecord.key))); + return ((serviceId.equals(objRecord.serviceId)) && + (type.equals(objRecord.type)) && + (key.equals(objRecord.key))); } } From 7b81bfe3460299ec8a59bfc90decf814b39f277b Mon Sep 17 00:00:00 2001 From: Corey Severino Date: Tue, 12 Jan 2021 16:34:03 -0500 Subject: [PATCH 65/66] [OOE-273] Updating Dockerfile to use consistent rootless docker image. (#48) --- docker/Dockerfile | 44 ++++++++---- docker/Jenkinsfile | 72 +++++++++++++++++++ docker/README.md | 23 ++++-- .../{settings.yml => private/settings.yaml} | 21 ++++-- docker/scripts/run.sh | 3 + 5 files changed, 135 insertions(+), 28 deletions(-) create mode 100644 docker/Jenkinsfile rename docker/{settings.yml => private/settings.yaml} (58%) create mode 100644 docker/scripts/run.sh diff --git a/docker/Dockerfile b/docker/Dockerfile index f45b4af..31c28f2 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,21 +1,35 @@ -# Run me with docker run -v :/opt/takipi-storage/storage -p :$STORAGE_PORT -# Logs are written to /log folder and so should be persistent outside of the running container +FROM openjdk:8-jre-slim +LABEL maintainer="support@overops.com" -FROM java:8 -MAINTAINER Chen harel "https://github.com/chook" +ARG APP_VERSION=latest -ENV VERSION 1.7.0 -ENV TAR_FILENAME takipi-storage-$VERSION.tar.gz -ENV JAR_FILENAME takipi-storage-$VERSION.jar -ENV STORAGE_PORT 8080 +# install curl +RUN apt-get update; apt-get install -y curl -RUN wget https://s3.amazonaws.com/app-takipi-com/deploy/takipi-storage/takipi-s3/$TAR_FILENAME -RUN tar zxvf $TAR_FILENAME -C /tmp && \ - mkdir -p /opt/takipi-storage/lib && \ - cp /tmp/takipi-storage/lib/$JAR_FILENAME /opt/takipi-storage/lib -ADD settings.yml /opt/takipi-storage +# rootless +RUN groupadd --gid 1000 overops +RUN adduser --home /opt/takipi-storage --uid 1000 --gid 1000 overops +USER 1000:1000 -EXPOSE $STORAGE_PORT +# install into the /opt directory +WORKDIR /opt + +# download and install the storage server +RUN curl -sL http://app-takipi-com.s3.amazonaws.com/deployx/s3/deploy/takipi-storage/takipi-storage-${APP_VERSION}.tar.gz | tar -xvzf - + +RUN mkdir /opt/takipi-storage/private +COPY --chown=1000:1000 "./private/settings.yaml" "/opt/takipi-storage/private/settings.yaml" + +# use mount to make settings.yaml available +VOLUME ["/opt/takipi-storage/private"] WORKDIR /opt/takipi-storage -CMD java -jar /opt/takipi-storage/lib/$JAR_FILENAME server settings.yml \ No newline at end of file + +# copy the run script +COPY --chown=1000:1000 "./scripts/run.sh" "./run.sh" +RUN chmod +x run.sh + +EXPOSE 8080 8081 + +# run the service, printing logs to stdout +CMD ["./run.sh"] diff --git a/docker/Jenkinsfile b/docker/Jenkinsfile new file mode 100644 index 0000000..1c54cc8 --- /dev/null +++ b/docker/Jenkinsfile @@ -0,0 +1,72 @@ +def imageName = 'docker-local/overops-storage-server-s3' +def dockerHubImage = 'overops/storage-server-s3' + +pipeline { + + environment { + registryCred = 'container-registry-build-guy' + dockerhubCred = 'docker-hub' + gitCred = 'build-guy' + } + + parameters { + string(name: 'VERSION', defaultValue: 'latest', description:'Application version') + string(name: 'TAG', defaultValue: 'latest', description:'Image Tag to be used') + booleanParam(name: 'PUBLISH_TO_DOCKERHUB', defaultValue: false, description:'Flag to publish to docker-hub') + } + + agent any + stages { + stage('Cloning Git') { + steps { + git([url: 'https://github.com/takipi/takipi-storage', branch: 's3-storage', credentialsId: gitCred]) + } + } + + stage('Build Docker Image') { + steps { + dir('docker') { + script { + if (params.PUBLISH_TO_DOCKERHUB) { + imageName = dockerHubImage + } + + dockerOptions = ('--label=storage-server-s3-pipeline --build-arg APP_VERSION=' + params.VERSION + ' .') + dockerImage = docker.build(imageName, dockerOptions) + } + } + } + } + + stage('Publish Docker Image') { + steps { + script { + if (params.PUBLISH_TO_DOCKERHUB) { + reg = '' + cred = dockerhubCred + } else { + reg = env.LOCAL_DOCKER_REGISTRY_URL + cred = registryCred + } + + docker.withRegistry(reg, cred) { + dockerImage.push() + + if (params.TAG != 'latest') { + dockerImage.push(params.TAG) + } + } + } + } + } + + stage('Cleanup') { + steps { + script { + sh(script:"docker rmi -f \$(docker images -f label=storage-server-s3-pipeline -q)") + } + cleanWs() + } + } + } +} diff --git a/docker/README.md b/docker/README.md index 2e76112..31e3826 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,8 +1,19 @@ -takipi-storage on docker -======================== +# Deploy the Storage Server - S3 (hybrid installations, AWS S3) -To run takipi-storage for s3: -- configure bucket, access key and secret in settings.yml -- run it with docker run -v :/opt/takipi-storage/storage -p :8080 - (If you wish to have the logs persisted to and they can be found in /logs folder. -p to override default 8080 port) +For hybrid installations, the Storage Server can be installed in your cluster. +This Storage Server [Dockerfile](Dockerfile) is based on the [Installing the Storage Server on AWS S3](https://doc.overops.com/docs/installing-the-storage-server-on-aws-s3) guide, with some minor modifications. + +For complete instructions on performing a hybrid installation, refer to the [Hybrid Installation on Linux](https://doc.overops.com/docs/linux-hybrid-installation) guide. + +The file `settings.yaml` must be mounted into the `/opt/takipi-storage/private` directory to run this container. An example [settings.yaml](private/settings.yaml) can be found in this repo. + +## Quick Start + +This image is on Docker Hub: [overops/storage-server-s3](https://hub.docker.com/r/overops/storage-server-s3) + +### Docker Quick Start + +```console +docker run -d -p 8080:8080 --mount type=bind,source="$(pwd)"/private,target=/opt/takipi-storage/private overops/storage-server-s3 +``` \ No newline at end of file diff --git a/docker/settings.yml b/docker/private/settings.yaml similarity index 58% rename from docker/settings.yml rename to docker/private/settings.yaml index f625ec7..1d0df4a 100644 --- a/docker/settings.yml +++ b/docker/private/settings.yaml @@ -3,13 +3,23 @@ corsOrigins: "*" # If using attaching IAM Role to instance leave accessKey and secretKey empty. s3Fs: - bucket: - pathPrefix: + bucket: + pathPrefix: credentials: - accessKey: - secretKey: + accessKey: + secretKey: + +multifetch: + # maximum number of threads for concurrent multi-fetch. Set to zero to disable concurrent fetching. + concurrencyLevel: 100 + # Recommended size >= 16777216. Set to zero to disable caching. + maxCacheSize: 67108864 + enableCacheLogger: false + maxBatchSize: 4194304 server: + gzip: + includedMethods: [POST, GET] applicationConnectors: - type: http port: 8080 @@ -25,13 +35,10 @@ server: # Logging settings. logging: - # The default level of all loggers. Can be OFF, ERROR, WARN, INFO, DEBUG, TRACE, or ALL. level: INFO - # Logger-specific levels. loggers: - com.takipi: DEBUG appenders: diff --git a/docker/scripts/run.sh b/docker/scripts/run.sh new file mode 100644 index 0000000..7e14e5b --- /dev/null +++ b/docker/scripts/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +java -jar /opt/takipi-storage/lib/takipi-storage.jar server /opt/takipi-storage/private/settings.yaml &> /opt/takipi-storage/log/takipi-storage.log & +/usr/bin/tail -f /opt/takipi-storage/log/takipi-storage.log From ab4ed1a159779906ecdd573062e1c19179133930 Mon Sep 17 00:00:00 2001 From: Corey Severino Date: Thu, 28 Jan 2021 10:58:48 -0500 Subject: [PATCH 66/66] Update to use Virtual Repository on Jfrog --- docker/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Jenkinsfile b/docker/Jenkinsfile index 1c54cc8..d767ebb 100644 --- a/docker/Jenkinsfile +++ b/docker/Jenkinsfile @@ -1,4 +1,4 @@ -def imageName = 'docker-local/overops-storage-server-s3' +def imageName = 'docker/overops-storage-server-s3' def dockerHubImage = 'overops/storage-server-s3' pipeline {