From de244c6cc6ca6a8ef0d0db6abdfdc77b077c5b4f Mon Sep 17 00:00:00 2001 From: Ryo Kanbayashi Date: Thu, 22 Aug 2024 19:37:39 +0900 Subject: [PATCH] implementing bree index: added testcase of BTreeIndex. --- lib/catalog/table_metadata.go | 7 ++ .../btree_index_executor_test.go | 110 ++++++++++++++++++ .../skiplist_index_executor_test.go | 20 ++-- 3 files changed, 127 insertions(+), 10 deletions(-) create mode 100644 lib/execution/executors/executor_test/btree_index_executor_test.go diff --git a/lib/catalog/table_metadata.go b/lib/catalog/table_metadata.go index afdb6dff..3ad4c142 100644 --- a/lib/catalog/table_metadata.go +++ b/lib/catalog/table_metadata.go @@ -63,6 +63,13 @@ func NewTableMetadata(schema *schema.Schema, name string, table *access.TableHea indexes = append(indexes, slIdx) //column_.SetIndexHeaderPageId(slIdx.GetHeaderPageId()) + case index_constants.INDEX_KIND_BTREE: + // currently, BTree Index always use new pages even if relaunch + im := index.NewIndexMetadata(column_.GetColumnName()+"_index", name, schema, []uint32{uint32(idx)}) + // TODO: (SDB) need to add index headae ID argument like HashIndex (NewTableMetadata) + slIdx := index.NewBTreeIndex(im, table.GetBufferPoolManager(), uint32(idx), log_manager) + + indexes = append(indexes, slIdx) default: panic("illegal index kind!") } diff --git a/lib/execution/executors/executor_test/btree_index_executor_test.go b/lib/execution/executors/executor_test/btree_index_executor_test.go new file mode 100644 index 00000000..577d2114 --- /dev/null +++ b/lib/execution/executors/executor_test/btree_index_executor_test.go @@ -0,0 +1,110 @@ +package executor_test + +import ( + "fmt" + "github.com/ryogrid/SamehadaDB/lib/catalog" + "github.com/ryogrid/SamehadaDB/lib/common" + "github.com/ryogrid/SamehadaDB/lib/samehada" + "github.com/ryogrid/SamehadaDB/lib/storage/index/index_constants" + "github.com/ryogrid/SamehadaDB/lib/storage/table/column" + "github.com/ryogrid/SamehadaDB/lib/storage/table/schema" + testingpkg "github.com/ryogrid/SamehadaDB/lib/testing/testing_assert" + "github.com/ryogrid/SamehadaDB/lib/types" + "os" + "testing" +) + +func testKeyDuplicateInsertDeleteWithBTreeIndex[T float32 | int32 | string](t *testing.T, keyType types.TypeID) { + if !common.EnableOnMemStorage { + os.Remove(t.Name() + ".db") + os.Remove(t.Name() + ".log") + } + + shi := samehada.NewSamehadaInstance(t.Name(), 500) + shi.GetLogManager().ActivateLogging() + testingpkg.Assert(t, shi.GetLogManager().IsEnabledLogging(), "") + fmt.Println("System logging is active.") + txnMgr := shi.GetTransactionManager() + + txn := txnMgr.Begin(nil) + + c := catalog.BootstrapCatalog(shi.GetBufferPoolManager(), shi.GetLogManager(), shi.GetLockManager(), txn) + + columnA := column.NewColumn("account_id", keyType, true, index_constants.INDEX_KIND_BTREE, types.PageID(-1), nil) + columnB := column.NewColumn("balance", types.Integer, true, index_constants.INDEX_KIND_BTREE, types.PageID(-1), nil) + schema_ := schema.NewSchema([]*column.Column{columnA, columnB}) + tableMetadata := c.CreateTable("test_1", schema_, txn) + + txnMgr.Commit(c, txn) + + txn = txnMgr.Begin(nil) + + var accountId interface{} + switch keyType { + case types.Integer: + accountId = int32(10) + case types.Float: + accountId = float32(-5.2) + case types.Varchar: + accountId = "duplicateTest" + default: + panic("unsuppoted value type") + } + + insPlan1 := createSpecifiedValInsertPlanNode(accountId.(T), int32(100), c, tableMetadata, keyType) + result := executePlan(c, shi.GetBufferPoolManager(), txn, insPlan1) + insPlan2 := createSpecifiedValInsertPlanNode(accountId.(T), int32(101), c, tableMetadata, keyType) + result = executePlan(c, shi.GetBufferPoolManager(), txn, insPlan2) + insPlan3 := createSpecifiedValInsertPlanNode(accountId.(T), int32(102), c, tableMetadata, keyType) + result = executePlan(c, shi.GetBufferPoolManager(), txn, insPlan3) + + txnMgr.Commit(c, txn) + + txn = txnMgr.Begin(nil) + + scanP := createSpecifiedPointScanPlanNode(accountId.(T), c, tableMetadata, keyType, index_constants.INDEX_KIND_SKIP_LIST) + result = executePlan(c, shi.GetBufferPoolManager(), txn, scanP) + testingpkg.Assert(t, len(result) == 3, "duplicated key point scan got illegal results.") + rid1 := result[0].GetRID() + val0_1 := result[0].GetValue(tableMetadata.Schema(), 0) + val0_2 := result[0].GetValue(tableMetadata.Schema(), 1) + fmt.Println(val0_1, val0_2) + rid2 := result[1].GetRID() + rid3 := result[2].GetRID() + fmt.Printf("%v %v %v\n", *rid1, *rid2, *rid3) + + indexCol1 := tableMetadata.GetIndex(0) + indexCol2 := tableMetadata.GetIndex(1) + + indexCol1.DeleteEntry(result[0], *rid1, txn) + indexCol2.DeleteEntry(result[0], *rid1, txn) + scanP = createSpecifiedPointScanPlanNode(accountId.(T), c, tableMetadata, keyType, index_constants.INDEX_KIND_SKIP_LIST) + result = executePlan(c, shi.GetBufferPoolManager(), txn, scanP) + testingpkg.Assert(t, len(result) == 2, "duplicated key point scan got illegal results.") + + indexCol1.DeleteEntry(result[0], *rid2, txn) + indexCol2.DeleteEntry(result[0], *rid2, txn) + scanP = createSpecifiedPointScanPlanNode(accountId.(T), c, tableMetadata, keyType, index_constants.INDEX_KIND_SKIP_LIST) + result = executePlan(c, shi.GetBufferPoolManager(), txn, scanP) + testingpkg.Assert(t, len(result) == 1, "duplicated key point scan got illegal results.") + + indexCol1.DeleteEntry(result[0], *rid3, txn) + indexCol2.DeleteEntry(result[0], *rid3, txn) + scanP = createSpecifiedPointScanPlanNode(accountId.(T), c, tableMetadata, keyType, index_constants.INDEX_KIND_SKIP_LIST) + result = executePlan(c, shi.GetBufferPoolManager(), txn, scanP) + testingpkg.Assert(t, len(result) == 0, "duplicated key point scan got illegal results.") + + txnMgr.Commit(c, txn) +} + +func TestKeyDuplicateInsertDeleteWithBTreeIndexInt(t *testing.T) { + testKeyDuplicateInsertDeleteWithBTreeIndex[int32](t, types.Integer) +} + +func TestKeyDuplicateInsertDeleteWithBTreeIndexFloat(t *testing.T) { + testKeyDuplicateInsertDeleteWithSkipListIndex[float32](t, types.Float) +} + +func TestKeyDuplicateInsertDeleteWithBTreeIndexVarchar(t *testing.T) { + testKeyDuplicateInsertDeleteWithSkipListIndex[string](t, types.Varchar) +} diff --git a/lib/execution/executors/executor_test/skiplist_index_executor_test.go b/lib/execution/executors/executor_test/skiplist_index_executor_test.go index 7ba51730..c3caf674 100644 --- a/lib/execution/executors/executor_test/skiplist_index_executor_test.go +++ b/lib/execution/executors/executor_test/skiplist_index_executor_test.go @@ -117,7 +117,7 @@ func getNotDupWithAccountRandomPrimitivVal[T int32 | float32 | string](keyType t return retVal } -func testParallelTxnsQueryingSkipListIndexUsedColumns[T int32 | float32 | string](t *testing.T, keyType types.TypeID, stride int32, opTimes int32, seedVal int32, initialEntryNum int32, bpoolSize int32, indexKind index_constants.IndexKind, execType int32, threadNum int) { +func testParallelTxnsQueryingIndexUsedColumns[T int32 | float32 | string](t *testing.T, keyType types.TypeID, stride int32, opTimes int32, seedVal int32, initialEntryNum int32, bpoolSize int32, indexKind index_constants.IndexKind, execType int32, threadNum int) { common.ShPrintf(common.DEBUG_INFO, "start of testParallelTxnsQueryingUniqSkipListIndexUsedColumns stride=%d opTimes=%d seedVal=%d initialEntryNum=%d bpoolSize=%d ====================================================\n", stride, opTimes, seedVal, initialEntryNum, bpoolSize) @@ -1252,24 +1252,24 @@ func testSkipListParallelTxnStrideRoot[T int32 | float32 | string](t *testing.T, //testParallelTxnsQueryingUniqSkipListIndexUsedColumns[T](t, keyType, 400, 30000, 13, 0, bpoolSize, index_constants.INDEX_KIND_UNIQ_SKIP_LIST, PARALLEL_EXEC, 20) //testParallelTxnsQueryingUniqSkipListIndexUsedColumns[T](t, keyType, 400, 30000, 13, 0, bpoolSize, index_constants.INDEX_KIND_UNIQ_SKIP_LIST, PARALLEL_EXEC, 20) - //testParallelTxnsQueryingSkipListIndexUsedColumns[T](t, keyType, 400, 30000, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, PARALLEL_EXEC, 20) - //testParallelTxnsQueryingSkipListIndexUsedColumns[T](t, keyType, 400, 30000, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, SERIAL_EXEC, 20) - //testParallelTxnsQueryingSkipListIndexUsedColumns[T](t, keyType, 400, 300, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, SERIAL_EXEC, 20) - //testParallelTxnsQueryingSkipListIndexUsedColumns[T](t, keyType, 400, 3000, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, SERIAL_EXEC, 20) - testParallelTxnsQueryingSkipListIndexUsedColumns[T](t, keyType, 400, 3000, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, PARALLEL_EXEC, 20) + //testParallelTxnsQueryingIndexUsedColumns[T](t, keyType, 400, 30000, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, PARALLEL_EXEC, 20) + //testParallelTxnsQueryingIndexUsedColumns[T](t, keyType, 400, 30000, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, SERIAL_EXEC, 20) + //testParallelTxnsQueryingIndexUsedColumns[T](t, keyType, 400, 300, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, SERIAL_EXEC, 20) + //testParallelTxnsQueryingIndexUsedColumns[T](t, keyType, 400, 3000, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, SERIAL_EXEC, 20) + testParallelTxnsQueryingIndexUsedColumns[T](t, keyType, 400, 3000, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, PARALLEL_EXEC, 20) - //testParallelTxnsQueryingSkipListIndexUsedColumns[T](t, keyType, 400, 3000, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, PARALLEL_EXEC, 20) + //testParallelTxnsQueryingIndexUsedColumns[T](t, keyType, 400, 3000, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, PARALLEL_EXEC, 20) case types.Float: //testParallelTxnsQueryingUniqSkipListIndexUsedColumns[T](t, keyType, 400, 30000, 13, 0, bpoolSize, index_constants.INDEX_KIND_UNIQ_SKIP_LIST, PARALLEL_EXEC, 20) - testParallelTxnsQueryingSkipListIndexUsedColumns[T](t, keyType, 240, 1000, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, PARALLEL_EXEC, 20) + testParallelTxnsQueryingIndexUsedColumns[T](t, keyType, 240, 1000, 13, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, PARALLEL_EXEC, 20) case types.Varchar: //testParallelTxnsQueryingUniqSkipListIndexUsedColumns[T](t, keyType, 400, 400, 13, 0, bpoolSize, index_constants.INDEX_KIND_INVALID, PARALLEL_EXEC, 20) //testParallelTxnsQueryingUniqSkipListIndexUsedColumns[T](t, keyType, 400, 3000, 13, 0, bpoolSize, index_constants.INDEX_KIND_UNIQ_SKIP_LIST, PARALLEL_EXEC, 20) //testParallelTxnsQueryingUniqSkipListIndexUsedColumns[T](t, keyType, 400, 90000, 17, 0, bpoolSize, index_constants.INDEX_KIND_UNIQ_SKIP_LIST, PARALLEL_EXEC, 20) - //testParallelTxnsQueryingSkipListIndexUsedColumns[T](t, keyType, 400, 5000, 17, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, PARALLEL_EXEC, 20) - testParallelTxnsQueryingSkipListIndexUsedColumns[T](t, keyType, 400, 5000, 17, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, PARALLEL_EXEC, 20) + //testParallelTxnsQueryingIndexUsedColumns[T](t, keyType, 400, 5000, 17, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, PARALLEL_EXEC, 20) + testParallelTxnsQueryingIndexUsedColumns[T](t, keyType, 400, 5000, 17, 0, bpoolSize, index_constants.INDEX_KIND_SKIP_LIST, PARALLEL_EXEC, 20) //testParallelTxnsQueryingUniqSkipListIndexUsedColumns[T](t, keyType, 400, 50000, 17, 0, bpoolSize, index_constants.INDEX_KIND_UNIQ_SKIP_LIST, PARALLEL_EXEC, 20) //testParallelTxnsQueryingUniqSkipListIndexUsedColumns[T](t, keyType, 400, 200000, 11, 0, bpoolSize, index_constants.INDEX_KIND_UNIQ_SKIP_LIST, PARALLEL_EXEC, 20)