forked from linkedin/openhouse
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Fix catalog drop table for dual storage (linkedin#237)
## Summary Currently dropTable in catalog assumes the cluster default storage as the storage for all tables that were created. That assumption is incorrect with the introduction of storage per table. This PR fixes that bug/assumption by retrieving the correct fileio for a table being dropped and perform cleanup with the correct fileio ## Changes - [ ] Client-facing API Changes - [ ] Internal API Changes - [x] Bug Fixes - [ ] New Features - [ ] Performance Improvements - [ ] Code Style - [x] Refactoring - [ ] Documentation - [x] Tests For all the boxes checked, please include additional details of the changes made in this pull request. ## Testing Done <!--- Check any relevant boxes with "x" --> - [ ] Manually Tested on local docker setup. Please include commands ran, and their output. - [x] Added new tests for the changes made. - [ ] Updated existing tests to reflect the changes made. - [ ] No tests added or updated. Please explain why. If unsure, please feel free to ask for help. - [ ] Some other form of testing like staging or soak time in production. Please explain. Added a DualStorageTest that tests creation and deletion of tables on hdfs and local. # Additional Information - [ ] Breaking Changes - [ ] Deprecations - [ ] Large PR broken into smaller PRs, and PR plan linked in the description. For all the boxes checked, include additional details of the changes made in this pull request.
- Loading branch information
1 parent
79e2a9f
commit 2960f85
Showing
6 changed files
with
140 additions
and
20 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
84 changes: 84 additions & 0 deletions
84
services/tables/src/test/java/com/linkedin/openhouse/tables/e2e/h2/DualStorageTest.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,84 @@ | ||
package com.linkedin.openhouse.tables.e2e.h2; | ||
|
||
import static com.linkedin.openhouse.tables.model.TableModelConstants.buildGetTableResponseBodyWithDbTbl; | ||
import static com.linkedin.openhouse.tables.model.TableModelConstants.buildTableDto; | ||
|
||
import com.linkedin.openhouse.cluster.storage.StorageManager; | ||
import com.linkedin.openhouse.cluster.storage.StorageType; | ||
import com.linkedin.openhouse.internal.catalog.model.HouseTable; | ||
import com.linkedin.openhouse.internal.catalog.model.HouseTablePrimaryKey; | ||
import com.linkedin.openhouse.internal.catalog.repository.HouseTableRepository; | ||
import com.linkedin.openhouse.tables.dto.mapper.TablesMapper; | ||
import com.linkedin.openhouse.tables.mock.properties.CustomClusterPropertiesInitializer; | ||
import com.linkedin.openhouse.tables.model.TableDto; | ||
import com.linkedin.openhouse.tables.model.TableDtoPrimaryKey; | ||
import com.linkedin.openhouse.tables.repository.OpenHouseInternalRepository; | ||
import org.apache.iceberg.catalog.Catalog; | ||
import org.junit.jupiter.api.AfterAll; | ||
import org.junit.jupiter.api.Assertions; | ||
import org.junit.jupiter.api.Test; | ||
import org.springframework.beans.factory.annotation.Autowired; | ||
import org.springframework.boot.test.context.SpringBootTest; | ||
import org.springframework.boot.test.mock.mockito.SpyBean; | ||
import org.springframework.test.context.ContextConfiguration; | ||
|
||
@SpringBootTest | ||
@ContextConfiguration(initializers = CustomClusterPropertiesInitializer.class) | ||
public class DualStorageTest { | ||
|
||
@Autowired HouseTableRepository houseTablesRepository; | ||
|
||
@SpyBean @Autowired OpenHouseInternalRepository openHouseInternalRepository; | ||
|
||
@Autowired StorageManager storageManager; | ||
|
||
@Autowired Catalog catalog; | ||
|
||
@Autowired TablesMapper tablesMapper; | ||
|
||
@Test | ||
public void testCreateDropTableDualStorage() { | ||
|
||
// Test create table | ||
// db.table should be created on hdfs storage | ||
TableDto hdfsTableDto = buildTableDto(buildGetTableResponseBodyWithDbTbl("db", "table")); | ||
openHouseInternalRepository.save(hdfsTableDto); | ||
TableDtoPrimaryKey hdfsDtoPrimaryKey = tablesMapper.toTableDtoPrimaryKey(hdfsTableDto); | ||
Assertions.assertTrue(openHouseInternalRepository.existsById(hdfsDtoPrimaryKey)); | ||
HouseTablePrimaryKey hdfsHtsPrimaryKey = | ||
HouseTablePrimaryKey.builder() | ||
.databaseId(hdfsDtoPrimaryKey.getDatabaseId()) | ||
.tableId(hdfsDtoPrimaryKey.getTableId()) | ||
.build(); | ||
Assertions.assertTrue(houseTablesRepository.existsById(hdfsHtsPrimaryKey)); | ||
HouseTable houseTable = houseTablesRepository.findById(hdfsHtsPrimaryKey).get(); | ||
// storage type hdfs | ||
Assertions.assertEquals(StorageType.HDFS.getValue(), houseTable.getStorageType()); | ||
|
||
// local_db.table should be created on local storage | ||
TableDto localTableDto = buildTableDto(buildGetTableResponseBodyWithDbTbl("local_db", "table")); | ||
openHouseInternalRepository.save(localTableDto); | ||
TableDtoPrimaryKey localDtoPrimaryKey = tablesMapper.toTableDtoPrimaryKey(localTableDto); | ||
Assertions.assertTrue(openHouseInternalRepository.existsById(localDtoPrimaryKey)); | ||
HouseTablePrimaryKey localHtsPrimaryKey = | ||
HouseTablePrimaryKey.builder() | ||
.databaseId(localDtoPrimaryKey.getDatabaseId()) | ||
.tableId(localDtoPrimaryKey.getTableId()) | ||
.build(); | ||
Assertions.assertTrue(houseTablesRepository.existsById(localHtsPrimaryKey)); | ||
houseTable = houseTablesRepository.findById(localHtsPrimaryKey).get(); | ||
// storage type local | ||
Assertions.assertEquals(StorageType.LOCAL.getValue(), houseTable.getStorageType()); | ||
|
||
// Test Drop Table | ||
openHouseInternalRepository.deleteById(hdfsDtoPrimaryKey); | ||
Assertions.assertFalse(openHouseInternalRepository.existsById(hdfsDtoPrimaryKey)); | ||
openHouseInternalRepository.deleteById(localDtoPrimaryKey); | ||
Assertions.assertFalse(openHouseInternalRepository.existsById(localDtoPrimaryKey)); | ||
} | ||
|
||
@AfterAll | ||
static void unsetSysProp() { | ||
System.clearProperty("OPENHOUSE_CLUSTER_CONFIG_PATH"); | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters