diff --git a/filescale_init/voltdb/SetPermissionsV2.java b/filescale_init/voltdb/SetPermissionsV2.java new file mode 100644 index 0000000000..6aeb8ef6f6 --- /dev/null +++ b/filescale_init/voltdb/SetPermissionsV2.java @@ -0,0 +1,19 @@ +import org.voltdb.*; +import java.io.File; + +// https://docs.voltdb.com/tutorial/Part5.php +public class SetPermissionsV2 extends VoltProcedure { + + public final SQLStmt sql1 = new SQLStmt("UPDATE inodes SET permission = ? WHERE parentName STARTS WITH ?;"); + public final SQLStmt sql2 = new SQLStmt("UPDATE inodes SET permission = ? WHERE parentName = ? and name = ?;"); + + public long run(final String path, final long permission) throws VoltAbortException { + File file = new File(path); + String parent = file.getParent(); + String name = file.getName(); + voltQueueSQL(sql1, permission, path); + voltQueueSQL(sql2, permission, parent, name); + voltExecuteSQL(); + return getUniqueId(); + } +} diff --git a/filescale_init/voltdb/UpdateSubtreeV2.java b/filescale_init/voltdb/UpdateSubtreeV2.java index 32486c3bdd..0b0793762a 100644 --- a/filescale_init/voltdb/UpdateSubtreeV2.java +++ b/filescale_init/voltdb/UpdateSubtreeV2.java @@ -19,7 +19,11 @@ public long run(final long dir_id, final long dest_id, final String old_parent_n voltQueueSQL(sql1, old_parent_name); VoltTable[] res = voltExecuteSQL(); - // 2. update subtree records + // 2. delete old subtree records + voltQueueSQL(sql3, old_parent_name); + voltExecuteSQL(); + + // 3. update subtree records Long id = null; String name = null; Long accessTime = null; @@ -62,10 +66,6 @@ public long run(final long dir_id, final long dest_id, final String old_parent_n } voltExecuteSQL(); - // 3. delete old subtree records - voltQueueSQL(sql3, old_parent_name); - voltExecuteSQL(); - return getUniqueId(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-db/src/main/java/org/apache/hadoop/hdfs/db/DatabaseINode.java b/hadoop-hdfs-project/hadoop-hdfs-db/src/main/java/org/apache/hadoop/hdfs/db/DatabaseINode.java index 6070d9fb8c..1873315382 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-db/src/main/java/org/apache/hadoop/hdfs/db/DatabaseINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs-db/src/main/java/org/apache/hadoop/hdfs/db/DatabaseINode.java @@ -674,6 +674,36 @@ public static long setPermissions(final List parents, final List return res; } + public static long setPermissions(final String path, final long permission) { + long res = 0; + try { + DatabaseConnection obj = Database.getInstance().getConnection(); + String env = System.getenv("DATABASE"); + if (env.equals("VOLT")) { + try { + VoltTable[] results = obj.getVoltClient().callProcedure("SetPermissionsV2", + path, permission).getResults(); + VoltTable result = results[0]; + result.resetRowPosition(); + while (result.advanceRow()) { + res = result.getLong(0); + } + } catch (Exception e) { + e.printStackTrace(); + } + } else { + throw new SQLException("[UNSUPPORT] Invalid operation ..."); + } + Database.getInstance().retConnection(obj); + } catch (SQLException ex) { + System.err.println(ex.getMessage()); + } + if (LOG.isInfoEnabled()) { + LOG.info("txnId: " + res + " permissions [UPDATE v2]: (" + permission + ")"); + } + return res; + } + public static long setPermission(final long id, final long permission) { long res = 0; try { @@ -2073,32 +2103,47 @@ public static long batchUpdateINodes( try { DatabaseConnection obj = Database.getInstance().getConnection(); String env = System.getenv("DATABASE"); - if (env.equals("VOLT")) { - try { - VoltTable[] results = obj.getVoltClient() - .callProcedure( - "BatchUpdateINodes", - longAttr.toArray(new Long[longAttr.size()]), - strAttr.toArray(new String[strAttr.size()]), - fileIds.toArray(new Long[fileIds.size()]), - fileAttr.toArray(new String[fileAttr.size()])).getResults(); - VoltTable result = results[0]; - result.resetRowPosition(); - while (result.advanceRow()) { - res = result.getLong(0); - } - } catch (Exception e) { - e.printStackTrace(); + // if (env.equals("VOLT")) { + // try { + // VoltTable[] results = obj.getVoltClient() + // .callProcedure( + // "BatchUpdateINodes", + // longAttr.toArray(new Long[longAttr.size()]), + // strAttr.toArray(new String[strAttr.size()]), + // fileIds.toArray(new Long[fileIds.size()]), + // fileAttr.toArray(new String[fileAttr.size()])).getResults(); + // VoltTable result = results[0]; + // result.resetRowPosition(); + // while (result.advanceRow()) { + // res = result.getLong(0); + // } + // } catch (Exception e) { + // e.printStackTrace(); + // } + // } else { + int size = strAttr.size() / 2; + Connection conn = obj.getConnection(); + String sql = "UPSERT INTO inodes(" + + "parent, id, name, modificationTime, accessTime, permission, header, parentName" + + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?);"; + PreparedStatement pst = conn.prepareStatement(sql); + + for (int i = 0; i < size; ++i) { + int idx = i * 6; + int idy = i * 2; + pst.setLong(1, longAttr.get(idx)); + pst.setLong(2, longAttr.get(idx + 1)); + pst.setString(3, strAttr.get(idy)); + pst.setLong(4, longAttr.get(idx + 2)); + pst.setLong(5, longAttr.get(idx + 3)); + pst.setLong(6, longAttr.get(idx + 4)); + pst.setLong(7, longAttr.get(idx + 5)); + pst.setString(8, strAttr.get(idy + 1)); + pst.addBatch(); } - } else { - // Connection conn = obj.getConnection(); - // PreparedStatement pst = conn.prepareStatement(sql); - // pst.setLong(1, childId); - // pst.executeUpdate(); - // pst.close(); - // TODO: Support batch update in CockroachDB - throw new SQLException("[UNSUPPORT] Invalid operation ..."); - } + pst.executeBatch(); + pst.close(); + // } Database.getInstance().retConnection(obj); } catch (SQLException ex) { System.err.println(ex.getMessage()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index cb56230549..0ff7e6144a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -994,25 +994,9 @@ public void remoteRename(INode node, String oldName, String oldParent, String ne } } - // if (child.getId() != old_id) { - // child.setParent(child.getParentId() + 40000000); - // child.setParentName(newParent + child.getParentName().substring(skip_id)); - // } - // child.setId(child.getId() + 40000000); - - // if (child.isDirectory()) { - // // log: create new diretory - // FSDirectory.getInstance() - // .getEditLog() - // .logMkDir(null, (INodeDirectory)child); - // } else { - // // log: create new file - // FSDirectory.getInstance() - // .getEditLog() - // .logOpenFile(null, (INodeFile)child, true, true); - // } - - renameSet.add(child); + if (child.getId() != old_id) { + renameSet.add(child); + } count++; INodeKeyedObjects.getCache().invalidate(child.getPath()); if (count == dirtyCount) { @@ -1020,11 +1004,6 @@ public void remoteRename(INode node, String oldName, String oldParent, String ne update_subtree(renameSet); break; } - if (database.equals("VOLT")) { - if (renameSet.size() >= 5120) { - update_subtree(renameSet); - } - } } } if (count < dirtyCount && renameSet.size() > 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java index 9e6a80fdfc..30f899de2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java @@ -512,7 +512,7 @@ public static final void invalidateAndWriteBackDB(String parent, String name) { } } - private final void remoteChmod(Set> mpoints) { + private final void remoteChmod(String path, Set> mpoints) { String database = System.getenv("DATABASE"); DatabaseConnection conn = Database.getInstance().getConnection(); BinaryObjectBuilder inodeKeyBuilder = null; @@ -521,8 +521,6 @@ private final void remoteChmod(Set> mpoints) { } // 1. invalidate cache and write back dirty data - List parents = new ArrayList<>(); - List names = new ArrayList<>(); Set keys = new HashSet<>(); List> list = new ArrayList<>(); for (Pair pair : mpoints) { @@ -557,10 +555,7 @@ private final void remoteChmod(Set> mpoints) { e.printStackTrace(); } - if (database.equals("VOLT")) { - parents.add(parent); - names.add(name); - } else if (database.equals("IGNITE")) { + if (database.equals("IGNITE")) { keys.add(inodeKeyBuilder.setField("parentName", parent).setField("name", name).build()); } } @@ -569,10 +564,10 @@ private final void remoteChmod(Set> mpoints) { // 2. execute distributed txn LOG.info("Execute dist txn for chmod"); - if (parents.size() > 0 || keys.size() > 0) { + if (path != null) { String start = INodeKeyedObjects.getWalOffset(); if (database.equals("VOLT")) { - INodeKeyedObjects.setWalOffset(DatabaseINode.setPermissions(parents, names, this.permission)); + INodeKeyedObjects.setWalOffset(DatabaseINode.setPermissions(path, this.permission)); } else if (database.equals("IGNITE")) { IgniteCompute compute = conn.getIgniteClient().compute(); INodeKeyedObjects.setWalOffset( @@ -595,7 +590,7 @@ private final void updatePermissionStatus(PermissionStatusFormat f, long n) { try { Set> mpoints = FSDirectory.getInstance().getMountsManager().resolveSubPaths(getPath()); LOG.info(getPath() + " has sub-paths that are mounted into: " + mpoints); - remoteChmod(mpoints); + remoteChmod(getPath(), mpoints); } catch (Exception e) { e.printStackTrace(); }