Skip to content

Commit

Permalink
Tools fixes from CASSANDRA-18239
Browse files Browse the repository at this point in the history
  • Loading branch information
ekaterinadimitrova2 committed Oct 3, 2024
1 parent 5696964 commit 76c8dd6
Show file tree
Hide file tree
Showing 6 changed files with 77 additions and 32 deletions.
2 changes: 1 addition & 1 deletion bin/cassandra.in.sh
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ if [ -f "$CASSANDRA_HOME"/lib/jsr223/scala/scala-compiler.jar ] ; then
fi

# set JVM javaagent opts to avoid warnings/errors
JAVA_AGENT="$JAVA_AGENT -javaagent:$CASSANDRA_HOME/lib/jamm-0.3.2.jar"
JAVA_AGENT="$JAVA_AGENT -javaagent:$CASSANDRA_HOME/lib/jamm-0.4.0.jar"

# Added sigar-bin to the java.library.path CASSANDRA-7838
JAVA_OPTS="$JAVA_OPTS:-Djava.library.path=$CASSANDRA_HOME/lib/sigar-bin"
Expand Down
2 changes: 1 addition & 1 deletion conf/cassandra-env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ fi
JVM_OPTS="$JVM_OPTS -XX:CompileCommandFile=$CASSANDRA_CONF/hotspot_compiler"

# add the jamm javaagent
JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.3.2.jar"
JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.4.0.jar"

# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR
if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then
Expand Down
2 changes: 1 addition & 1 deletion redhat/cassandra.in.sh
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ CLASSPATH="$CLASSPATH:$EXTRA_CLASSPATH"


# set JVM javaagent opts to avoid warnings/errors
JAVA_AGENT="$JAVA_AGENT -javaagent:$CASSANDRA_HOME/lib/jamm-0.3.2.jar"
JAVA_AGENT="$JAVA_AGENT -javaagent:$CASSANDRA_HOME/lib/jamm-0.4.0.jar"


#
Expand Down
7 changes: 6 additions & 1 deletion src/java/org/apache/cassandra/audit/BinAuditLogger.java
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,11 @@ public void log(AuditLogEntry auditLogEntry)
@VisibleForTesting
public static class Message extends BinLog.ReleaseableWriteMarshallable implements WeightedQueue.Weighable
{
/**
* The shallow size of a {@code Message} object.
*/
private static final long EMPTY_SIZE = ObjectSizes.measure(new Message(""));

private final String message;

public Message(String message)
Expand Down Expand Up @@ -128,7 +133,7 @@ public void release()
@Override
public int weight()
{
return Ints.checkedCast(ObjectSizes.sizeOf(message));
return Ints.checkedCast(EMPTY_SIZE + ObjectSizes.sizeOf(message));
}
}
}
93 changes: 66 additions & 27 deletions src/java/org/apache/cassandra/fql/FullQueryLogger.java
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,8 @@
import org.apache.cassandra.utils.binlog.BinLog;
import org.apache.cassandra.utils.binlog.BinLogOptions;
import org.apache.cassandra.utils.concurrent.WeightedQueue;
import org.github.jamm.MemoryLayoutSpecification;

import static com.google.common.base.Preconditions.checkNotNull;
import static org.github.jamm.MemoryMeterStrategy.MEMORY_LAYOUT;

/**
* A logger that logs entire query contents after the query finishes (or times out).
Expand Down Expand Up @@ -85,9 +83,6 @@ public class FullQueryLogger implements QueryEvents.Listener
private static final int EMPTY_LIST_SIZE = Ints.checkedCast(ObjectSizes.measureDeep(new ArrayList<>(0)));
private static final int EMPTY_BYTEBUF_SIZE;

private static final int OBJECT_HEADER_SIZE = MEMORY_LAYOUT.getObjectHeaderSize();
private static final int OBJECT_REFERENCE_SIZE = MEMORY_LAYOUT.getReferenceSize();

public static final FullQueryLogger instance = new FullQueryLogger();

volatile BinLog binLog;
Expand Down Expand Up @@ -332,6 +327,11 @@ public void executeSuccess(CQLStatement statement, String query, QueryOptions op

public static class Query extends AbstractLogEntry
{
/**
* The shallow size of a {@code Query} object.
*/
private static final long EMPTY_SIZE = ObjectSizes.measure(new Query());

private final String query;

public Query(String query, QueryOptions queryOptions, QueryState queryState, long queryStartTime)
Expand All @@ -340,6 +340,14 @@ public Query(String query, QueryOptions queryOptions, QueryState queryState, lon
this.query = query;
}

/**
* Constructor only use to compute this class shallow size.
*/
private Query()
{
this.query = null;
}

@Override
protected String type()
{
Expand All @@ -356,12 +364,20 @@ public void writeMarshallablePayload(WireOut wire)
@Override
public int weight()
{
return Ints.checkedCast(ObjectSizes.sizeOf(query)) + super.weight();
// Object deep size = Object' shallow size + query field deep size + deep size of the parent fields
return Ints.checkedCast(EMPTY_SIZE + ObjectSizes.sizeOf(query) + super.fieldsSize());
}
}

public static class Batch extends AbstractLogEntry
{
/**
* The shallow size of a {@code Batch} object (which includes primitive fields).
*/
private static final long EMPTY_SIZE = ObjectSizes.measure(new Batch());
/**
* The weight is pre-computed in the constructor and represent the object deep size.
*/
private final int weight;
private final BatchStatement.Type batchType;
private final List<String> queries;
Expand All @@ -380,25 +396,37 @@ public Batch(BatchStatement.Type batchType,
this.values = values;
this.batchType = batchType;

int weight = super.weight();

// weight, queries, values, batch type
weight += Integer.BYTES + // cached weight
2 * EMPTY_LIST_SIZE + // queries + values lists
3 * OBJECT_REFERENCE_SIZE; // batchType and two lists references
// We assume that all the lists are ArrayLists and that the size of each underlying array is the one of the list
// (which is obviously wrong but not worst than the previous computation that was ignoring part of the arrays size in the computation).
long queriesSize = EMPTY_LIST_SIZE + ObjectSizes.sizeOfReferenceArray(queries.size());

for (String query : queries)
weight += ObjectSizes.sizeOf(checkNotNull(query)) + OBJECT_REFERENCE_SIZE;
queriesSize += ObjectSizes.sizeOf(checkNotNull(query));

long valuesSize = EMPTY_LIST_SIZE + ObjectSizes.sizeOfReferenceArray(values.size());

for (List<ByteBuffer> subValues : values)
{
weight += EMPTY_LIST_SIZE + OBJECT_REFERENCE_SIZE;

for (ByteBuffer value : subValues)
weight += ObjectSizes.sizeOnHeapOf(value) + OBJECT_REFERENCE_SIZE;
valuesSize += EMPTY_LIST_SIZE + ObjectSizes.sizeOfReferenceArray(subValues.size());
for (ByteBuffer subValue : subValues)
valuesSize += ObjectSizes.sizeOnHeapOf(subValue);
}

this.weight = weight;
// No need to add the batch type which is an enum.
this.weight = Ints.checkedCast(EMPTY_SIZE // Shallow size object
+ super.fieldsSize() // deep size of the parent fields (non-primitives as they are included in the shallow size)
+ queriesSize // deep size queries field
+ valuesSize); // deep size values field
}
/**
* Constructor only use to compute this class shallow size.
*/
private Batch()
{
this.weight = 0;
this.batchType = null;
this.queries = null;
this.values = null;
}

@Override
Expand Down Expand Up @@ -483,6 +511,19 @@ private static abstract class AbstractLogEntry extends BinLog.ReleaseableWriteMa
}
}

/**
* Constructor only use to compute sub-classes shallow size.
*/
private AbstractLogEntry()
{
this.queryStartTime = 0;
this.protocolVersion = 0;
this.queryOptionsBuffer = null;
this.generatedTimestamp = 0;
this.generatedNowInSeconds = 0;
this.keyspace = null;
}

@Override
protected long version()
{
Expand All @@ -508,16 +549,14 @@ public void release()
queryOptionsBuffer.release();
}

@Override
public int weight()
/**
* Returns the sum of the non-primitive fields' deep sizes.
* @return the sum of the non-primitive fields' deep sizes.
*/
protected long fieldsSize()
{
return OBJECT_HEADER_SIZE
+ Long.BYTES // queryStartTime
+ Integer.BYTES // protocolVersion
+ OBJECT_REFERENCE_SIZE + EMPTY_BYTEBUF_SIZE + queryOptionsBuffer.capacity() // queryOptionsBuffer
+ Long.BYTES // generatedTimestamp
+ Integer.BYTES // generatedNowInSeconds
+ OBJECT_REFERENCE_SIZE + Ints.checkedCast(ObjectSizes.sizeOf(keyspace)); // keyspace
return EMPTY_BYTEBUF_SIZE + queryOptionsBuffer.capacity() // queryOptionsBuffer
+ ObjectSizes.sizeOf(keyspace); // keyspace
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,11 @@
public abstract class MemtableSizeTestBase extends CQLTester
{
// The meter in ObjectSizes uses omitSharedBufferOverhead which counts off-heap data too
// Note: To see a printout of the usage for each object, add .enableDebug() here (most useful with smaller number of
// Note: To see a printout of the usage for each object, add .printVisitedTree() here (most useful with smaller number of
// partitions).
private static final MemoryMeter meter = MemoryMeter.builder()
.withGuessing(MemoryMeter.Guess.INSTRUMENTATION, MemoryMeter.Guess.UNSAFE)
// .printVisitedTreeUpTo(1000)
.build();

static String keyspace;
Expand Down

1 comment on commit 76c8dd6

@cassci-bot
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Build rejected: 17 NEW test failure(s) in 2 builds., Build 2: ran 17719 tests with 29 failures and 128 skipped.
Butler analysis done on ds-cassandra-pr-gate/CNDB-11144 vs last 16 runs of ds-cassandra-build-nightly/main.
Showing only first 13 NEW test failures
org.apache.cassandra.index.sai.disk.NodeStartupTest.startupOrderingTest[INDEXABLE_ROWS PER_SSTABLE_CORRUPT PRE_JOIN_RUNS_AFTER_BUILD]: test is constantly failing. No failures on upstream;
branch story: [FF] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.index.sai.disk.NodeStartupTest.startupOrderingTest[INDEXABLE_ROWS PER_SSTABLE_CORRUPT PRE_JOIN_RUNS_MID_BUILD]: test is constantly failing. No failures on upstream;
branch story: [FF] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.index.sai.cql.VectorSiftSmallTest.testMultiSegmentBuild: test failed in the recent build. No failures on upstream;
branch story: [F+] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.index.sai.plan.PlanTest.testLazyAccessPropagation: test is constantly failing. No failures on upstream;
branch story: [FF] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.db.memtable.MemtableSizeHeapBuffersTest.testSize[SkipListMemtable]: test is constantly failing. No failures on upstream;
branch story: [FF] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.index.sai.disk.NodeStartupTest.startupOrderingTest[INDEXABLE_ROWS PER_COLUMN_CORRUPT PRE_JOIN_RUNS_AFTER_BUILD]: test is constantly failing. No failures on upstream;
branch story: [FF] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.index.sai.functional.SnapshotTest.shouldTakeAndRestoreSnapshots: test is constantly failing. No failures on upstream;
branch story: [FF] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.index.sai.disk.NodeStartupTest.startupOrderingTest[INDEXABLE_ROWS PER_SSTABLE_CORRUPT PRE_JOIN_RUNS_BEFORE_BUILD]: test is constantly failing. No failures on upstream;
branch story: [FF] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.index.sai.functional.SnapshotTest.shouldSnapshotAfterIndexBuild: test is constantly failing. No failures on upstream;
branch story: [FF] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.index.sai.disk.NodeStartupTest.startupOrderingTest[INDEXABLE_ROWS PER_COLUMN_CORRUPT PRE_JOIN_RUNS_MID_BUILD]: test is constantly failing. No failures on upstream;
branch story: [FF] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.index.sai.disk.NodeStartupTest.startupOrderingTest[INDEXABLE_ROWS PER_COLUMN_CORRUPT PRE_JOIN_RUNS_BEFORE_BUILD]: test is constantly failing. No failures on upstream;
branch story: [FF] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.db.ClusteringHeapSizeTest.unsharedHeap[org.apache.cassandra.db.BufferClustering@34cdca70]: test is constantly failing. No results on upstream;
branch story: [FF] vs upstream: []; [NEW]
org.apache.cassandra.index.sai.cql.TinySegmentQueryWriteLifecycleTest.testWriteLifecycle[aa_CompoundKeyWithStaticsDataModel{primaryKey=p, c}]: test is constantly failing. No failures on upstream;
branch story: [F] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
butler comparison

Please sign in to comment.