Skip to content

Commit

Permalink
removed needless TODO comments.
Browse files Browse the repository at this point in the history
  • Loading branch information
ryogrid committed Aug 24, 2024
1 parent 132bc13 commit 62d2508
Show file tree
Hide file tree
Showing 9 changed files with 7 additions and 21 deletions.
7 changes: 3 additions & 4 deletions lib/catalog/table_metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ func NewTableMetadata(schema *schema.Schema, name string, table *access.TableHea
if column_.HasIndex() {
switch column_.IndexKind() {
case index_constants.INDEX_KIND_HASH:
// TODO: (SDB) index bucket size is common.BucketSizeOfHashIndex (auto size extending is needed...)
// note: one bucket is used pages for storing index key/value pairs for a column.
// one page can store 512 key/value pair
// index bucket size is common.BucketSizeOfHashIndex (auto size extending is needed...)
// note: one bucket is used pages for storing index key/value pairs for a column.
// one page can store 512 key/value pair
im := index.NewIndexMetadata(column_.GetColumnName()+"_index", name, schema, []uint32{uint32(idx)})
hIdx := index.NewLinearProbeHashTableIndex(im, table.GetBufferPoolManager(), uint32(idx), common.BucketSizeOfHashIndex, column_.IndexHeaderPageId())

Expand All @@ -66,7 +66,6 @@ func NewTableMetadata(schema *schema.Schema, name string, table *access.TableHea
//column_.SetIndexHeaderPageId(slIdx.GetHeaderPageId())
case index_constants.INDEX_KIND_BTREE:
im := index.NewIndexMetadata(column_.GetColumnName()+"_index", name, schema, []uint32{uint32(idx)})
// TODO: (SDB) need to avoid reuse of page zero when system shutdown was not graceful
var pageZeroId *int32 = nil
if column_.IndexHeaderPageId() != -1 && isGracefulShutdown {
pageZeroId = new(int32)
Expand Down
1 change: 0 additions & 1 deletion lib/container/hash/linear_probe_hash_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
*/

// Limitation: current implementation contain BlockArraySize(252) * 1020 = 257,040 record info at most
// TODO: (SDB) LinearProbeHashTable does not dynamically grows...
type LinearProbeHashTable struct {
headerPageId types.PageID
bpm *buffer.BufferPoolManager
Expand Down
1 change: 0 additions & 1 deletion lib/execution/executors/executor_test/executor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -786,7 +786,6 @@ func TestDeleteWithSelctInsert(t *testing.T) {
bpm := buffer.NewBufferPoolManager(uint32(32), diskManager, log_mgr)
txn_mgr := access.NewTransactionManager(access.NewLockManager(access.REGULAR, access.DETECTION), log_mgr)
txn := txn_mgr.Begin(nil)
// TODO: (SDB) this is a hack to get around the fact that we don't have a recovery manager
txn.SetIsRecoveryPhase(true)

c := catalog.BootstrapCatalog(bpm, log_mgr, access.NewLockManager(access.REGULAR, access.PREVENTION), txn)
Expand Down
2 changes: 1 addition & 1 deletion lib/execution/plans/hash_join.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func NewHashJoinPlanNodeWithChilds(left_child Plan, left_hash_keys []expression.
if len(left_hash_keys) != 1 || len(right_hash_keys) != 1 {
panic("NewHashJoinPlanNodeWithChilds supports only one key for left and right now.")
}
// TODO: (SDB) one key pair only used on join even if multiple key pairs are passed
// one key pair only used on join even if multiple key pairs are passed
onPredicate := constructOnExpressionFromKeysInfo(left_hash_keys, right_hash_keys)
output_schema := makeMergedOutputSchema(left_child.OutputSchema(), right_child.OutputSchema())

Expand Down
2 changes: 0 additions & 2 deletions lib/execution/plans/insert.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,10 @@ func (p *InsertPlanNode) GetType() PlanType {
}

func (p *InsertPlanNode) AccessRowCount(c *catalog.Catalog) uint64 {
// TODO: (SDB) temporal impl
return uint64(len(p.rawValues))
}

func (p *InsertPlanNode) EmitRowCount(c *catalog.Catalog) uint64 {
// TODO: (SDB) temporal impl
return uint64(len(p.rawValues))
}

Expand Down
6 changes: 1 addition & 5 deletions lib/recovery/log_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,11 @@ import (
* happens. When the thread is awakened, the log buffer's content is written into the disk log file.
*/
type LogManager struct {
// TODO: (SDB) must ensure atomicity if current locking becomes not enough
offset uint32
// TODO: (SDB) must ensure atomicity if current locking becomes not enough
offset uint32
log_buffer_lsn types.LSN
/** The atomic counter which records the next log sequence number. */
// TODO: (SDB) must ensure atomicity if current locking becomes not enough
next_lsn types.LSN
/** The log records before and including the persistent lsn have been written to disk. */
// TODO: (SDB) must ensure atomicity if current locking becomes not enough
persistent_lsn types.LSN
log_buffer []byte
flush_buffer []byte
Expand Down
4 changes: 0 additions & 4 deletions lib/recovery/recovery_test/log_recovery_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,8 @@ func TestRedo(t *testing.T) {
val1_0 := tuple1_.GetValue(schema_, 0)

rid, _ = test_table.InsertTuple(tuple_, txn, math.MaxUint32, false)
// TODO: (SDB) insert index entry if needed
testingpkg.Assert(t, rid != nil, "")
rid1, _ = test_table.InsertTuple(tuple1_, txn, math.MaxUint32, false)
// TODO: (SDB) insert index entry if needed
testingpkg.Assert(t, rid != nil, "")

samehada_instance.GetTransactionManager().Commit(nil, txn)
Expand Down Expand Up @@ -216,7 +214,6 @@ func TestUndo(t *testing.T) {
tuple3 := ConstructTuple(schema_)
var rid3 *page.RID
rid3, _ = test_table.InsertTuple(tuple3, txn, math.MaxUint32, false)
// TODO: (SDB) insert index entry if needed
testingpkg.Assert(t, rid3 != nil, "")

af_insert_tuple2, _ := test_table.GetTuple(rid2, txn)
Expand Down Expand Up @@ -359,7 +356,6 @@ func TestCheckpoint(t *testing.T) {
if err != nil {
fmt.Println(err)
}
// TODO: (SDB) insert index entry if needed
testingpkg.Assert(t, rid != nil, "")
}
samehada_instance.GetTransactionManager().Commit(nil, txn1)
Expand Down
4 changes: 2 additions & 2 deletions lib/storage/disk/disk_manager_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -276,12 +276,12 @@ func (d *DiskManagerImpl) WriteLog(log_data []byte) error {
// if err != nil {
// fmt.Println("I/O error while writing log")
// fmt.Println(err)
// // TODO: (SDB) SHOULD BE FIXED: statistics update thread's call causes this error rarely
// // Note: (SDB) SHOULD BE FIXED: statistics update thread's call causes this error rarely
// return err
// }
//}

// TODO: (SDB) writing log isn't using direct I/O now
// Note: (SDB) writing log isn't using direct I/O now

_, err := d.log.Write(log_data)
if err != nil {
Expand Down
1 change: 0 additions & 1 deletion lib/types/column_value.go
Original file line number Diff line number Diff line change
Expand Up @@ -398,7 +398,6 @@ func (v Value) ToBoolean() bool {
// NULL value check is needed in general
func (v Value) ToInteger() int32 {
if v.valueType != Integer {
// TODO: (SDB) temporal modification for Varchar (AddWLatchRecord, RemoveWLatchRecord...)
return math.MaxInt32
}
return *v.integer
Expand Down

0 comments on commit 62d2508

Please sign in to comment.