Skip to content

Commit

Permalink
implementing bree index: debugging (1).
Browse files Browse the repository at this point in the history
  • Loading branch information
ryogrid committed Aug 23, 2024
1 parent c31f0b2 commit 91bda35
Show file tree
Hide file tree
Showing 5 changed files with 78 additions and 63 deletions.
75 changes: 41 additions & 34 deletions lib/execution/executors/executor_test/btree_index_executor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,49 +53,56 @@ func testKeyDuplicateInsertDeleteWithBTreeIndex[T float32 | int32 | string](t *t

insPlan1 := createSpecifiedValInsertPlanNode(accountId.(T), int32(100), c, tableMetadata, keyType)
result := executePlan(c, shi.GetBufferPoolManager(), txn, insPlan1)
fmt.Println(result)
insPlan2 := createSpecifiedValInsertPlanNode(accountId.(T), int32(101), c, tableMetadata, keyType)
result = executePlan(c, shi.GetBufferPoolManager(), txn, insPlan2)
fmt.Println(result)
insPlan3 := createSpecifiedValInsertPlanNode(accountId.(T), int32(102), c, tableMetadata, keyType)
result = executePlan(c, shi.GetBufferPoolManager(), txn, insPlan3)
fmt.Println(result)

txnMgr.Commit(c, txn)

txn = txnMgr.Begin(nil)

scanP := createSpecifiedPointScanPlanNode(accountId.(T), c, tableMetadata, keyType, index_constants.INDEX_KIND_SKIP_LIST)
result = executePlan(c, shi.GetBufferPoolManager(), txn, scanP)
testingpkg.Assert(t, len(result) == 3, "duplicated key point scan got illegal results.")
rid1 := result[0].GetRID()
val0_1 := result[0].GetValue(tableMetadata.Schema(), 0)
val0_2 := result[0].GetValue(tableMetadata.Schema(), 1)
fmt.Println(val0_1, val0_2)
rid2 := result[1].GetRID()
rid3 := result[2].GetRID()
fmt.Printf("%v %v %v\n", *rid1, *rid2, *rid3)

indexCol1 := tableMetadata.GetIndex(0)
indexCol2 := tableMetadata.GetIndex(1)

indexCol1.DeleteEntry(result[0], *rid1, txn)
indexCol2.DeleteEntry(result[0], *rid1, txn)
scanP = createSpecifiedPointScanPlanNode(accountId.(T), c, tableMetadata, keyType, index_constants.INDEX_KIND_SKIP_LIST)
result = executePlan(c, shi.GetBufferPoolManager(), txn, scanP)
testingpkg.Assert(t, len(result) == 2, "duplicated key point scan got illegal results.")

indexCol1.DeleteEntry(result[0], *rid2, txn)
indexCol2.DeleteEntry(result[0], *rid2, txn)
scanP = createSpecifiedPointScanPlanNode(accountId.(T), c, tableMetadata, keyType, index_constants.INDEX_KIND_SKIP_LIST)
result = executePlan(c, shi.GetBufferPoolManager(), txn, scanP)
testingpkg.Assert(t, len(result) == 1, "duplicated key point scan got illegal results.")

indexCol1.DeleteEntry(result[0], *rid3, txn)
indexCol2.DeleteEntry(result[0], *rid3, txn)
scanP = createSpecifiedPointScanPlanNode(accountId.(T), c, tableMetadata, keyType, index_constants.INDEX_KIND_SKIP_LIST)
result = executePlan(c, shi.GetBufferPoolManager(), txn, scanP)
testingpkg.Assert(t, len(result) == 0, "duplicated key point scan got illegal results.")

txnMgr.Commit(c, txn)
shi.Shutdown(samehada.ShutdownPatternCloseFiles)
rangeScanP := createSpecifiedRangeScanPlanNode[T](c, tableMetadata, keyType, 0, nil, nil, index_constants.INDEX_KIND_BTREE)
results := executePlan(c, shi.GetBufferPoolManager(), txn, rangeScanP)
fmt.Println(results)

//scanP := createSpecifiedPointScanPlanNode(accountId.(T), c, tableMetadata, keyType, index_constants.INDEX_KIND_BTREE)
//result = executePlan(c, shi.GetBufferPoolManager(), txn, scanP)
//testingpkg.Assert(t, len(result) == 3, "duplicated key point scan got illegal results.")
//rid1 := result[0].GetRID()
//val0_1 := result[0].GetValue(tableMetadata.Schema(), 0)
//val0_2 := result[0].GetValue(tableMetadata.Schema(), 1)
//fmt.Println(val0_1, val0_2)
//rid2 := result[1].GetRID()
//rid3 := result[2].GetRID()
//fmt.Printf("%v %v %v\n", *rid1, *rid2, *rid3)
//
//indexCol1 := tableMetadata.GetIndex(0)
//indexCol2 := tableMetadata.GetIndex(1)
//
//indexCol1.DeleteEntry(result[0], *rid1, txn)
//indexCol2.DeleteEntry(result[0], *rid1, txn)
//scanP = createSpecifiedPointScanPlanNode(accountId.(T), c, tableMetadata, keyType, index_constants.INDEX_KIND_BTREE)
//result = executePlan(c, shi.GetBufferPoolManager(), txn, scanP)
//testingpkg.Assert(t, len(result) == 2, "duplicated key point scan got illegal results.")
//
//indexCol1.DeleteEntry(result[0], *rid2, txn)
//indexCol2.DeleteEntry(result[0], *rid2, txn)
//scanP = createSpecifiedPointScanPlanNode(accountId.(T), c, tableMetadata, keyType, index_constants.INDEX_KIND_BTREE)
//result = executePlan(c, shi.GetBufferPoolManager(), txn, scanP)
//testingpkg.Assert(t, len(result) == 1, "duplicated key point scan got illegal results.")
//
//indexCol1.DeleteEntry(result[0], *rid3, txn)
//indexCol2.DeleteEntry(result[0], *rid3, txn)
//scanP = createSpecifiedPointScanPlanNode(accountId.(T), c, tableMetadata, keyType, index_constants.INDEX_KIND_BTREE)
//result = executePlan(c, shi.GetBufferPoolManager(), txn, scanP)
//testingpkg.Assert(t, len(result) == 0, "duplicated key point scan got illegal results.")
//
//txnMgr.Commit(c, txn)
//shi.Shutdown(samehada.ShutdownPatternCloseFiles)
}

func TestKeyDuplicateInsertDeleteWithBTreeIndexInt(t *testing.T) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -684,7 +684,7 @@ func createSpecifiedPointScanPlanNode[T int32 | float32 | string](getKeyVal T, c
switch indexKind {
case index_constants.INDEX_KIND_INVALID:
skipListPointScanP = plans.NewSeqScanPlanNode(c, tm.Schema(), expression_.(*expression.Comparison), tm.OID())
case index_constants.INDEX_KIND_UNIQ_SKIP_LIST, index_constants.INDEX_KIND_SKIP_LIST:
case index_constants.INDEX_KIND_UNIQ_SKIP_LIST, index_constants.INDEX_KIND_SKIP_LIST, index_constants.INDEX_KIND_BTREE:
skipListPointScanP = plans.NewPointScanWithIndexPlanNode(c, tm.Schema(), expression_.(*expression.Comparison), tm.OID())
default:
panic("not implemented!")
Expand All @@ -700,7 +700,7 @@ func createSpecifiedRangeScanPlanNode[T int32 | float32 | string](c *catalog.Cat
switch indexKind {
case index_constants.INDEX_KIND_INVALID:
skipListRangeScanP = plans.NewSeqScanPlanNode(c, tm.Schema(), nil, tm.OID())
case index_constants.INDEX_KIND_UNIQ_SKIP_LIST, index_constants.INDEX_KIND_SKIP_LIST:
case index_constants.INDEX_KIND_UNIQ_SKIP_LIST, index_constants.INDEX_KIND_SKIP_LIST, index_constants.INDEX_KIND_BTREE:
if rangeStartKey != nil {
startVal = samehada_util.GetPonterOfValue(types.NewValue(*rangeStartKey))
}
Expand Down
2 changes: 1 addition & 1 deletion lib/samehada/samehada_util/samehada_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ func encodeToDicOrderComparableBytes(orgVal interface{}, valType types.TypeID) [
buf := new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, u)
return buf.Bytes()
case valType:
case types.Integer:
i := orgVal.(int32)
u := uint32(i)
buf := new(bytes.Buffer)
Expand Down
20 changes: 14 additions & 6 deletions lib/storage/index/btree_index.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func (btreeItr *BtreeIndexIterator) Next() (done bool, err error, key *types.Val
}
uintRID := binary.BigEndian.Uint64(packedRID)
unpackedRID := samehada_util.UnpackUint64toRID(uintRID)
decodedKey := samehada_util.ExtractOrgKeyFromDicOrderComparableEncodedVarchar(samehada_util.GetPonterOfValue(types.NewVarchar(string(keyBytes))), btreeItr.valType)
decodedKey := samehada_util.ExtractOrgKeyFromDicOrderComparableEncodedVarchar(types.NewValueFromBytes(keyBytes, types.Varchar), btreeItr.valType)
return false, nil, decodedKey, &unpackedRID
}

Expand Down Expand Up @@ -78,7 +78,7 @@ func (btidx *BTreeIndex) insertEntryInner(key *tuple.Tuple, rid page.RID, txn in
packedRID := samehada_util.PackRIDtoUint64(&rid)
var valBuf [8]byte
binary.BigEndian.PutUint64(valBuf[:], packedRID)
btidx.container.InsertKey(convedKeyVal.SerializeOnlyVal(), 0, valBuf, true)
btidx.container.InsertKey(convedKeyVal.Serialize(), 0, valBuf, true)
}

func (btidx *BTreeIndex) InsertEntry(key *tuple.Tuple, rid page.RID, txn interface{}) {
Expand All @@ -95,7 +95,7 @@ func (btidx *BTreeIndex) deleteEntryInner(key *tuple.Tuple, rid page.RID, txn in
btidx.updateMtx.RLock()
defer btidx.updateMtx.RUnlock()
}
btidx.container.DeleteKey(convedKeyVal.SerializeOnlyVal(), 0)
btidx.container.DeleteKey(convedKeyVal.Serialize(), 0)
}

func (btidx *BTreeIndex) DeleteEntry(key *tuple.Tuple, rid page.RID, txn interface{}) {
Expand All @@ -110,7 +110,7 @@ func (btidx *BTreeIndex) ScanKey(key *tuple.Tuple, txn interface{}) []page.RID {

btidx.updateMtx.RLock()
// Attention: returned itr's containing keys are string type Value which is constructed with byte arr of concatenated original key and value
rangeItr := btidx.container.GetRangeItr(smallestKeyVal.SerializeOnlyVal(), biggestKeyVal.SerializeOnlyVal())
rangeItr := btidx.container.GetRangeItr(smallestKeyVal.Serialize(), biggestKeyVal.Serialize())

retArr := make([]page.RID, 0)
for ok, _, packedRID := rangeItr.Next(); ok; ok, _, packedRID = rangeItr.Next() {
Expand All @@ -135,7 +135,6 @@ func (btidx *BTreeIndex) UpdateEntry(oldKey *tuple.Tuple, oldRID page.RID, newKe
// Attention: returned itr's containing keys are string type Value which is constructed with byte arr of concatenated original key and value
func (btidx *BTreeIndex) GetRangeScanIterator(start_key *tuple.Tuple, end_key *tuple.Tuple, transaction interface{}) IndexRangeScanIterator {
tupleSchema_ := btidx.GetTupleSchema()
// TODO: (SDB) need to handle start_key or/and end_key is nil case
var smallestKeyVal *types.Value = nil
if start_key != nil {
orgStartKeyVal := start_key.GetValue(tupleSchema_, btidx.col_idx)
Expand All @@ -150,7 +149,16 @@ func (btidx *BTreeIndex) GetRangeScanIterator(start_key *tuple.Tuple, end_key *t

btidx.updateMtx.RLock()
defer btidx.updateMtx.RUnlock()
return NewBtreeIndexIterator(btidx.container.GetRangeItr(smallestKeyVal.SerializeOnlyVal(), biggestKeyVal.SerializeOnlyVal()), btidx.metadata.tuple_schema.GetColumn(btidx.col_idx).GetType())
var smalledKeyBytes []byte
var biggestKeyBytes []byte

if smallestKeyVal != nil {
smalledKeyBytes = smallestKeyVal.Serialize()
}
if biggestKeyVal != nil {
biggestKeyBytes = biggestKeyVal.Serialize()
}
return NewBtreeIndexIterator(btidx.container.GetRangeItr(smalledKeyBytes, biggestKeyBytes), btidx.metadata.tuple_schema.GetColumn(btidx.col_idx).GetType())
}

// Return the metadata object associated with the index
Expand Down
40 changes: 20 additions & 20 deletions lib/types/column_value.go
Original file line number Diff line number Diff line change
Expand Up @@ -332,26 +332,26 @@ func (v Value) Serialize() []byte {
return []byte{}
}

// no length info and isNull info
func (v Value) SerializeOnlyVal() []byte {
switch v.valueType {
case Integer:
buf := new(bytes.Buffer)
binary.Write(buf, binary.LittleEndian, v.ToInteger())
return buf.Bytes()
case Float:
buf := new(bytes.Buffer)
binary.Write(buf, binary.LittleEndian, v.ToFloat())
return buf.Bytes()
case Varchar:
return []byte(v.ToVarchar())
case Boolean:
buf := new(bytes.Buffer)
binary.Write(buf, binary.LittleEndian, v.ToBoolean())
return buf.Bytes()
}
return []byte{}
}
//// no length info and isNull info
//func (v Value) SerializeOnlyVal() []byte {
// switch v.valueType {
// case Integer:
// buf := new(bytes.Buffer)
// binary.Write(buf, binary.LittleEndian, v.ToInteger())
// return buf.Bytes()
// case Float:
// buf := new(bytes.Buffer)
// binary.Write(buf, binary.LittleEndian, v.ToFloat())
// return buf.Bytes()
// case Varchar:
// return []byte(v.ToVarchar())
// case Boolean:
// buf := new(bytes.Buffer)
// binary.Write(buf, binary.LittleEndian, v.ToBoolean())
// return buf.Bytes()
// }
// return []byte{}
//}

// Size returns the size in bytes that the type will occupy inside the tuple
func (v Value) Size() uint32 {
Expand Down

0 comments on commit 91bda35

Please sign in to comment.