From 48a8e23b9379121db09e1ccd30b5b8fdd5d5f596 Mon Sep 17 00:00:00 2001
From: Runchao Han <me@runchao.rocks>
Date: Mon, 19 Aug 2024 12:29:20 +1000
Subject: [PATCH 1/3] init

---
 client/docs/swagger-ui/swagger.yaml           | 208 +++++------
 client/query/zoneconcierge.go                 |  18 +-
 proto/babylon/zoneconcierge/v1/query.proto    |  30 +-
 .../zoneconcierge/v1/zoneconcierge.proto      |  12 +-
 test/e2e/btc_timestamping_e2e_test.go         |   6 +-
 test/e2e/configurer/chain/queries.go          |   8 +-
 x/zoneconcierge/client/cli/query.go           |  18 +-
 .../keeper/canonical_chain_indexer.go         |  27 +-
 .../keeper/canonical_chain_indexer_test.go    |  24 +-
 x/zoneconcierge/keeper/chain_info_indexer.go  |  49 +--
 .../keeper/epoch_chain_info_indexer.go        |  44 +--
 x/zoneconcierge/keeper/fork_indexer.go        |  19 +-
 x/zoneconcierge/keeper/fork_indexer_test.go   |   4 +-
 x/zoneconcierge/keeper/grpc_query.go          |  84 ++---
 x/zoneconcierge/keeper/grpc_query_test.go     |  90 ++---
 x/zoneconcierge/keeper/header_handler.go      |  22 +-
 x/zoneconcierge/keeper/hooks.go               |   4 +-
 .../keeper/ibc_header_decorator.go            |  19 +-
 .../keeper/ibc_packet_btc_timestamp.go        |  14 +-
 x/zoneconcierge/keeper/keeper_test.go         |  16 +-
 x/zoneconcierge/keeper/proof_btc_timestamp.go |   2 +-
 .../keeper/proof_btc_timestamp_test.go        |   8 +-
 x/zoneconcierge/keeper/query_kvstore.go       |   2 +-
 x/zoneconcierge/types/btc_timestamp.go        |   6 +-
 x/zoneconcierge/types/btc_timestamp_test.go   |   8 +-
 x/zoneconcierge/types/errors.go               |   2 +-
 x/zoneconcierge/types/query.pb.go             | 338 +++++++++---------
 x/zoneconcierge/types/query.pb.gw.go          |  76 ++--
 x/zoneconcierge/types/zoneconcierge.go        |  12 +-
 x/zoneconcierge/types/zoneconcierge.pb.go     | 190 +++++-----
 30 files changed, 683 insertions(+), 677 deletions(-)

diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml
index 00e83b140..a59dd6156 100644
--- a/client/docs/swagger-ui/swagger.yaml
+++ b/client/docs/swagger-ui/swagger.yaml
@@ -4738,7 +4738,7 @@ paths:
           type: boolean
       tags:
         - Query
-  /babylon/zoneconcierge/v1/chain_info/{chain_id}/header/{height}:
+  /babylon/zoneconcierge/v1/chain_info/{consumer_id}/header/{height}:
     get:
       summary: Header queries the CZ header and fork headers at a given height.
       operationId: Header
@@ -4751,9 +4751,9 @@ paths:
               header:
                 type: object
                 properties:
-                  chain_id:
+                  consumer_id:
                     type: string
-                    title: chain_id is the unique ID of the chain
+                    title: consumer_id is the unique ID of the consumer
                   hash:
                     type: string
                     format: byte
@@ -4816,9 +4816,9 @@ paths:
                     items:
                       type: object
                       properties:
-                        chain_id:
+                        consumer_id:
                           type: string
-                          title: chain_id is the unique ID of the chain
+                          title: consumer_id is the unique ID of the consumer
                         hash:
                           type: string
                           format: byte
@@ -5108,7 +5108,7 @@ paths:
                           "value": "1.212s"
                         }
       parameters:
-        - name: chain_id
+        - name: consumer_id
           in: path
           required: true
           type: string
@@ -5129,12 +5129,12 @@ paths:
           schema:
             type: object
             properties:
-              chain_ids:
+              consumer_ids:
                 type: array
                 items:
                   type: string
                 title: >-
-                  chain_ids are IDs of the chains in ascending alphabetical
+                  consumer_ids are IDs of the chains in ascending alphabetical
                   order
               pagination:
                 title: pagination defines the pagination in the response
@@ -5438,15 +5438,15 @@ paths:
                 items:
                   type: object
                   properties:
-                    chain_id:
+                    consumer_id:
                       type: string
-                      title: chain_id is the ID of the chain
+                      title: consumer_id is the ID of the consumer
                     latest_header:
                       type: object
                       properties:
-                        chain_id:
+                        consumer_id:
                           type: string
-                          title: chain_id is the unique ID of the chain
+                          title: consumer_id is the unique ID of the consumer
                         hash:
                           type: string
                           format: byte
@@ -5512,9 +5512,9 @@ paths:
                           items:
                             type: object
                             properties:
-                              chain_id:
+                              consumer_id:
                                 type: string
-                                title: chain_id is the unique ID of the chain
+                                title: consumer_id is the unique ID of the consumer
                               hash:
                                 type: string
                                 format: byte
@@ -5820,7 +5820,7 @@ paths:
                           "value": "1.212s"
                         }
       parameters:
-        - name: chain_ids
+        - name: consumer_ids
           in: query
           required: false
           type: array
@@ -5846,15 +5846,15 @@ paths:
                 items:
                   type: object
                   properties:
-                    chain_id:
+                    consumer_id:
                       type: string
-                      title: chain_id is the ID of the chain
+                      title: consumer_id is the ID of the consumer
                     latest_header:
                       type: object
                       properties:
-                        chain_id:
+                        consumer_id:
                           type: string
-                          title: chain_id is the unique ID of the chain
+                          title: consumer_id is the unique ID of the consumer
                         hash:
                           type: string
                           format: byte
@@ -5920,9 +5920,9 @@ paths:
                           items:
                             type: object
                             properties:
-                              chain_id:
+                              consumer_id:
                                 type: string
-                                title: chain_id is the unique ID of the chain
+                                title: consumer_id is the unique ID of the consumer
                               hash:
                                 type: string
                                 format: byte
@@ -6236,7 +6236,7 @@ paths:
           required: false
           type: string
           format: uint64
-        - name: chain_ids
+        - name: consumer_ids
           in: query
           required: false
           type: array
@@ -6245,7 +6245,7 @@ paths:
           collectionFormat: multi
       tags:
         - Query
-  /babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}/height/{height}:
+  /babylon/zoneconcierge/v1/finalized_chain_info/{consumer_id}/height/{height}:
     get:
       summary: >-
         FinalizedChainInfoUntilHeight queries the BTC-finalised info no later
@@ -6262,15 +6262,15 @@ paths:
               finalized_chain_info:
                 type: object
                 properties:
-                  chain_id:
+                  consumer_id:
                     type: string
-                    title: chain_id is the ID of the chain
+                    title: consumer_id is the ID of the consumer
                   latest_header:
                     type: object
                     properties:
-                      chain_id:
+                      consumer_id:
                         type: string
-                        title: chain_id is the unique ID of the chain
+                        title: consumer_id is the unique ID of the consumer
                       hash:
                         type: string
                         format: byte
@@ -6336,9 +6336,9 @@ paths:
                         items:
                           type: object
                           properties:
-                            chain_id:
+                            consumer_id:
                               type: string
-                              title: chain_id is the unique ID of the chain
+                              title: consumer_id is the unique ID of the consumer
                             hash:
                               type: string
                               format: byte
@@ -6953,8 +6953,8 @@ paths:
                           "value": "1.212s"
                         }
       parameters:
-        - name: chain_id
-          description: chain_id is the ID of the CZ
+        - name: consumer_id
+          description: consumer_id is the ID of the CZ
           in: path
           required: true
           type: string
@@ -6996,21 +6996,21 @@ paths:
                 items:
                   type: object
                   properties:
-                    chain_id:
+                    consumer_id:
                       type: string
-                      title: chain_id is the ID of the chain
+                      title: consumer_id is the ID of the consumer
                     finalized_chain_info:
                       type: object
                       properties:
-                        chain_id:
+                        consumer_id:
                           type: string
-                          title: chain_id is the ID of the chain
+                          title: consumer_id is the ID of the consumer
                         latest_header:
                           type: object
                           properties:
-                            chain_id:
+                            consumer_id:
                               type: string
-                              title: chain_id is the unique ID of the chain
+                              title: consumer_id is the unique ID of the consumer
                             hash:
                               type: string
                               format: byte
@@ -7077,9 +7077,11 @@ paths:
                               items:
                                 type: object
                                 properties:
-                                  chain_id:
+                                  consumer_id:
                                     type: string
-                                    title: chain_id is the unique ID of the chain
+                                    title: >-
+                                      consumer_id is the unique ID of the
+                                      consumer
                                   hash:
                                     type: string
                                     format: byte
@@ -7706,8 +7708,8 @@ paths:
                           "value": "1.212s"
                         }
       parameters:
-        - name: chain_ids
-          description: chain_ids is the list of ids of CZs.
+        - name: consumer_ids
+          description: consumer_ids is the list of ids of CZs.
           in: query
           required: false
           type: array
@@ -7723,7 +7725,7 @@ paths:
           type: boolean
       tags:
         - Query
-  /babylon/zoneconcierge/v1/headers/{chain_id}:
+  /babylon/zoneconcierge/v1/headers/{consumer_id}:
     get:
       summary: |-
         ListHeaders queries the headers of a chain in Babylon's view, with
@@ -7740,9 +7742,9 @@ paths:
                 items:
                   type: object
                   properties:
-                    chain_id:
+                    consumer_id:
                       type: string
-                      title: chain_id is the unique ID of the chain
+                      title: consumer_id is the unique ID of the consumer
                     hash:
                       type: string
                       format: byte
@@ -8029,7 +8031,7 @@ paths:
                           "value": "1.212s"
                         }
       parameters:
-        - name: chain_id
+        - name: consumer_id
           in: path
           required: true
           type: string
@@ -8091,7 +8093,7 @@ paths:
           type: boolean
       tags:
         - Query
-  /babylon/zoneconcierge/v1/headers/{chain_id}/epochs/{epoch_num}:
+  /babylon/zoneconcierge/v1/headers/{consumer_id}/epochs/{epoch_num}:
     get:
       summary: |-
         ListEpochHeaders queries the headers of a chain timestamped in a given
@@ -8108,9 +8110,9 @@ paths:
                 items:
                   type: object
                   properties:
-                    chain_id:
+                    consumer_id:
                       type: string
-                      title: chain_id is the unique ID of the chain
+                      title: consumer_id is the unique ID of the consumer
                     hash:
                       type: string
                       format: byte
@@ -8368,7 +8370,7 @@ paths:
                           "value": "1.212s"
                         }
       parameters:
-        - name: chain_id
+        - name: consumer_id
           in: path
           required: true
           type: string
@@ -11391,15 +11393,15 @@ definitions:
   babylon.zoneconcierge.v1.ChainInfo:
     type: object
     properties:
-      chain_id:
+      consumer_id:
         type: string
-        title: chain_id is the ID of the chain
+        title: consumer_id is the ID of the consumer
       latest_header:
         type: object
         properties:
-          chain_id:
+          consumer_id:
             type: string
-            title: chain_id is the unique ID of the chain
+            title: consumer_id is the unique ID of the consumer
           hash:
             type: string
             format: byte
@@ -11458,9 +11460,9 @@ definitions:
             items:
               type: object
               properties:
-                chain_id:
+                consumer_id:
                   type: string
-                  title: chain_id is the unique ID of the chain
+                  title: consumer_id is the unique ID of the consumer
                 hash:
                   type: string
                   format: byte
@@ -11560,21 +11562,21 @@ definitions:
   babylon.zoneconcierge.v1.FinalizedChainInfo:
     type: object
     properties:
-      chain_id:
+      consumer_id:
         type: string
-        title: chain_id is the ID of the chain
+        title: consumer_id is the ID of the consumer
       finalized_chain_info:
         type: object
         properties:
-          chain_id:
+          consumer_id:
             type: string
-            title: chain_id is the ID of the chain
+            title: consumer_id is the ID of the consumer
           latest_header:
             type: object
             properties:
-              chain_id:
+              consumer_id:
                 type: string
-                title: chain_id is the unique ID of the chain
+                title: consumer_id is the unique ID of the consumer
               hash:
                 type: string
                 format: byte
@@ -11637,9 +11639,9 @@ definitions:
                 items:
                   type: object
                   properties:
-                    chain_id:
+                    consumer_id:
                       type: string
-                      title: chain_id is the unique ID of the chain
+                      title: consumer_id is the unique ID of the consumer
                     hash:
                       type: string
                       format: byte
@@ -12041,9 +12043,9 @@ definitions:
         items:
           type: object
           properties:
-            chain_id:
+            consumer_id:
               type: string
-              title: chain_id is the unique ID of the chain
+              title: consumer_id is the unique ID of the consumer
             hash:
               type: string
               format: byte
@@ -12124,9 +12126,9 @@ definitions:
   babylon.zoneconcierge.v1.IndexedHeader:
     type: object
     properties:
-      chain_id:
+      consumer_id:
         type: string
-        title: chain_id is the unique ID of the chain
+        title: consumer_id is the unique ID of the consumer
       hash:
         type: string
         format: byte
@@ -12474,11 +12476,11 @@ definitions:
   babylon.zoneconcierge.v1.QueryChainListResponse:
     type: object
     properties:
-      chain_ids:
+      consumer_ids:
         type: array
         items:
           type: string
-        title: chain_ids are IDs of the chains in ascending alphabetical order
+        title: consumer_ids are IDs of the chains in ascending alphabetical order
       pagination:
         title: pagination defines the pagination in the response
         type: object
@@ -12515,15 +12517,15 @@ definitions:
         items:
           type: object
           properties:
-            chain_id:
+            consumer_id:
               type: string
-              title: chain_id is the ID of the chain
+              title: consumer_id is the ID of the consumer
             latest_header:
               type: object
               properties:
-                chain_id:
+                consumer_id:
                   type: string
-                  title: chain_id is the unique ID of the chain
+                  title: consumer_id is the unique ID of the consumer
                 hash:
                   type: string
                   format: byte
@@ -12586,9 +12588,9 @@ definitions:
                   items:
                     type: object
                     properties:
-                      chain_id:
+                      consumer_id:
                         type: string
-                        title: chain_id is the unique ID of the chain
+                        title: consumer_id is the unique ID of the consumer
                       hash:
                         type: string
                         format: byte
@@ -12705,15 +12707,15 @@ definitions:
         items:
           type: object
           properties:
-            chain_id:
+            consumer_id:
               type: string
-              title: chain_id is the ID of the chain
+              title: consumer_id is the ID of the consumer
             latest_header:
               type: object
               properties:
-                chain_id:
+                consumer_id:
                   type: string
-                  title: chain_id is the unique ID of the chain
+                  title: consumer_id is the unique ID of the consumer
                 hash:
                   type: string
                   format: byte
@@ -12776,9 +12778,9 @@ definitions:
                   items:
                     type: object
                     properties:
-                      chain_id:
+                      consumer_id:
                         type: string
-                        title: chain_id is the unique ID of the chain
+                        title: consumer_id is the unique ID of the consumer
                       hash:
                         type: string
                         format: byte
@@ -12896,15 +12898,15 @@ definitions:
       finalized_chain_info:
         type: object
         properties:
-          chain_id:
+          consumer_id:
             type: string
-            title: chain_id is the ID of the chain
+            title: consumer_id is the ID of the consumer
           latest_header:
             type: object
             properties:
-              chain_id:
+              consumer_id:
                 type: string
-                title: chain_id is the unique ID of the chain
+                title: consumer_id is the unique ID of the consumer
               hash:
                 type: string
                 format: byte
@@ -12967,9 +12969,9 @@ definitions:
                 items:
                   type: object
                   properties:
-                    chain_id:
+                    consumer_id:
                       type: string
-                      title: chain_id is the unique ID of the chain
+                      title: consumer_id is the unique ID of the consumer
                     hash:
                       type: string
                       format: byte
@@ -13373,21 +13375,21 @@ definitions:
         items:
           type: object
           properties:
-            chain_id:
+            consumer_id:
               type: string
-              title: chain_id is the ID of the chain
+              title: consumer_id is the ID of the consumer
             finalized_chain_info:
               type: object
               properties:
-                chain_id:
+                consumer_id:
                   type: string
-                  title: chain_id is the ID of the chain
+                  title: consumer_id is the ID of the consumer
                 latest_header:
                   type: object
                   properties:
-                    chain_id:
+                    consumer_id:
                       type: string
-                      title: chain_id is the unique ID of the chain
+                      title: consumer_id is the unique ID of the consumer
                     hash:
                       type: string
                       format: byte
@@ -13452,9 +13454,9 @@ definitions:
                       items:
                         type: object
                         properties:
-                          chain_id:
+                          consumer_id:
                             type: string
-                            title: chain_id is the unique ID of the chain
+                            title: consumer_id is the unique ID of the consumer
                           hash:
                             type: string
                             format: byte
@@ -13866,9 +13868,9 @@ definitions:
       header:
         type: object
         properties:
-          chain_id:
+          consumer_id:
             type: string
-            title: chain_id is the unique ID of the chain
+            title: consumer_id is the unique ID of the consumer
           hash:
             type: string
             format: byte
@@ -13927,9 +13929,9 @@ definitions:
             items:
               type: object
               properties:
-                chain_id:
+                consumer_id:
                   type: string
-                  title: chain_id is the unique ID of the chain
+                  title: consumer_id is the unique ID of the consumer
                 hash:
                   type: string
                   format: byte
@@ -14023,9 +14025,9 @@ definitions:
         items:
           type: object
           properties:
-            chain_id:
+            consumer_id:
               type: string
-              title: chain_id is the unique ID of the chain
+              title: consumer_id is the unique ID of the consumer
             hash:
               type: string
               format: byte
@@ -14090,9 +14092,9 @@ definitions:
         items:
           type: object
           properties:
-            chain_id:
+            consumer_id:
               type: string
-              title: chain_id is the unique ID of the chain
+              title: consumer_id is the unique ID of the consumer
             hash:
               type: string
               format: byte
diff --git a/client/query/zoneconcierge.go b/client/query/zoneconcierge.go
index 6639d5207..bdc20c193 100644
--- a/client/query/zoneconcierge.go
+++ b/client/query/zoneconcierge.go
@@ -21,12 +21,12 @@ func (c *QueryClient) QueryZoneConcierge(f func(ctx context.Context, queryClient
 }
 
 // FinalizedConnectedChainsInfo queries the zoneconcierge module to get the finalization information for a connected chain
-func (c *QueryClient) FinalizedConnectedChainsInfo(chainIds []string) (*zctypes.QueryFinalizedChainsInfoResponse, error) {
+func (c *QueryClient) FinalizedConnectedChainsInfo(consumerIds []string) (*zctypes.QueryFinalizedChainsInfoResponse, error) {
 	var resp *zctypes.QueryFinalizedChainsInfoResponse
 	err := c.QueryZoneConcierge(func(ctx context.Context, queryClient zctypes.QueryClient) error {
 		var err error
 		req := &zctypes.QueryFinalizedChainsInfoRequest{
-			ChainIds: chainIds,
+			ConsumerIds: consumerIds,
 		}
 		resp, err = queryClient.FinalizedChainsInfo(ctx, req)
 		return err
@@ -36,12 +36,12 @@ func (c *QueryClient) FinalizedConnectedChainsInfo(chainIds []string) (*zctypes.
 }
 
 // ConnectedChainsInfo queries the zoneconcierge module to get information for a connected chain
-func (c *QueryClient) ConnectedChainsInfo(chainIds []string) (*zctypes.QueryChainsInfoResponse, error) {
+func (c *QueryClient) ConnectedChainsInfo(consumerIds []string) (*zctypes.QueryChainsInfoResponse, error) {
 	var resp *zctypes.QueryChainsInfoResponse
 	err := c.QueryZoneConcierge(func(ctx context.Context, queryClient zctypes.QueryClient) error {
 		var err error
 		req := &zctypes.QueryChainsInfoRequest{
-			ChainIds: chainIds,
+			ConsumerIds: consumerIds,
 		}
 		resp, err = queryClient.ChainsInfo(ctx, req)
 		return err
@@ -64,12 +64,12 @@ func (c *QueryClient) ConnectedChainList() (*zctypes.QueryChainListResponse, err
 }
 
 // ConnectedChainHeaders queries the zoneconcierge module for the headers of a connected chain
-func (c *QueryClient) ConnectedChainHeaders(chainID string, pagination *sdkquerytypes.PageRequest) (*zctypes.QueryListHeadersResponse, error) {
+func (c *QueryClient) ConnectedChainHeaders(consumerID string, pagination *sdkquerytypes.PageRequest) (*zctypes.QueryListHeadersResponse, error) {
 	var resp *zctypes.QueryListHeadersResponse
 	err := c.QueryZoneConcierge(func(ctx context.Context, queryClient zctypes.QueryClient) error {
 		var err error
 		req := &zctypes.QueryListHeadersRequest{
-			ChainId:    chainID,
+			ConsumerId: consumerID,
 			Pagination: pagination,
 		}
 		resp, err = queryClient.ListHeaders(ctx, req)
@@ -80,13 +80,13 @@ func (c *QueryClient) ConnectedChainHeaders(chainID string, pagination *sdkquery
 }
 
 // ConnectedChainsEpochInfo queries the zoneconcierge module for the chain information of a connected chain at a particular epoch
-func (c *QueryClient) ConnectedChainsEpochInfo(chainIds []string, epochNum uint64) (*zctypes.QueryEpochChainsInfoResponse, error) {
+func (c *QueryClient) ConnectedChainsEpochInfo(consumerIds []string, epochNum uint64) (*zctypes.QueryEpochChainsInfoResponse, error) {
 	var resp *zctypes.QueryEpochChainsInfoResponse
 	err := c.QueryZoneConcierge(func(ctx context.Context, queryClient zctypes.QueryClient) error {
 		var err error
 		req := &zctypes.QueryEpochChainsInfoRequest{
-			ChainIds: chainIds,
-			EpochNum: epochNum,
+			ConsumerIds: consumerIds,
+			EpochNum:    epochNum,
 		}
 		resp, err = queryClient.EpochChainsInfo(ctx, req)
 		return err
diff --git a/proto/babylon/zoneconcierge/v1/query.proto b/proto/babylon/zoneconcierge/v1/query.proto
index 690ab0182..67f3b4503 100644
--- a/proto/babylon/zoneconcierge/v1/query.proto
+++ b/proto/babylon/zoneconcierge/v1/query.proto
@@ -21,7 +21,7 @@ service Query {
   // Header queries the CZ header and fork headers at a given height.
   rpc Header(QueryHeaderRequest) returns (QueryHeaderResponse) {
     option (google.api.http).get =
-        "/babylon/zoneconcierge/v1/chain_info/{chain_id}/header/{height}";
+        "/babylon/zoneconcierge/v1/chain_info/{consumer_id}/header/{height}";
   }
   // ChainList queries the list of chains that checkpoint to Babylon
   rpc ChainList(QueryChainListRequest) returns (QueryChainListResponse) {
@@ -43,14 +43,14 @@ service Query {
   // pagination support
   rpc ListHeaders(QueryListHeadersRequest) returns (QueryListHeadersResponse) {
     option (google.api.http).get =
-        "/babylon/zoneconcierge/v1/headers/{chain_id}";
+        "/babylon/zoneconcierge/v1/headers/{consumer_id}";
   }
   // ListEpochHeaders queries the headers of a chain timestamped in a given
   // epoch of Babylon, with pagination support
   rpc ListEpochHeaders(QueryListEpochHeadersRequest)
       returns (QueryListEpochHeadersResponse) {
     option (google.api.http).get =
-        "/babylon/zoneconcierge/v1/headers/{chain_id}/epochs/{epoch_num}";
+        "/babylon/zoneconcierge/v1/headers/{consumer_id}/epochs/{epoch_num}";
   }
   // FinalizedChainsInfo queries the BTC-finalised info of chains with given IDs, with proofs
   rpc FinalizedChainsInfo(QueryFinalizedChainsInfoRequest)
@@ -63,7 +63,7 @@ service Query {
   rpc FinalizedChainInfoUntilHeight(QueryFinalizedChainInfoUntilHeightRequest)
       returns (QueryFinalizedChainInfoUntilHeightResponse) {
     option (google.api.http).get =
-        "/babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}/height/"
+        "/babylon/zoneconcierge/v1/finalized_chain_info/{consumer_id}/height/"
         "{height}";
   }
 }
@@ -79,7 +79,7 @@ message QueryParamsResponse {
 
 // QueryHeaderRequest is request type for the Query/Header RPC method.
 message QueryHeaderRequest {
-  string chain_id = 1;
+  string consumer_id = 1;
   uint64 height = 2;
 }
 
@@ -97,14 +97,14 @@ message QueryChainListRequest {
 
 // QueryChainListResponse is response type for the Query/ChainList RPC method
 message QueryChainListResponse {
-  // chain_ids are IDs of the chains in ascending alphabetical order
-  repeated string chain_ids = 1;
+  // consumer_ids are IDs of the chains in ascending alphabetical order
+  repeated string consumer_ids = 1;
   // pagination defines the pagination in the response
   cosmos.base.query.v1beta1.PageResponse pagination = 2;
 }
 
 // QueryChainsInfoRequest is request type for the Query/ChainsInfo RPC method.
-message QueryChainsInfoRequest { repeated string chain_ids = 1; }
+message QueryChainsInfoRequest { repeated string consumer_ids = 1; }
 
 // QueryChainsInfoResponse is response type for the Query/ChainsInfo RPC method.
 message QueryChainsInfoResponse {
@@ -115,7 +115,7 @@ message QueryChainsInfoResponse {
 // method.
 message QueryEpochChainsInfoRequest {
   uint64 epoch_num = 1;
-  repeated string chain_ids = 2;
+  repeated string consumer_ids = 2;
 }
 
 // QueryEpochChainsInfoResponse is response type for the Query/EpochChainsInfo RPC
@@ -127,7 +127,7 @@ message QueryEpochChainsInfoResponse {
 
 // QueryListHeadersRequest is request type for the Query/ListHeaders RPC method.
 message QueryListHeadersRequest {
-  string chain_id = 1;
+  string consumer_id = 1;
   // pagination defines whether to have the pagination in the request
   cosmos.base.query.v1beta1.PageRequest pagination = 2;
 }
@@ -145,7 +145,7 @@ message QueryListHeadersResponse {
 // RPC method.
 message QueryListEpochHeadersRequest {
   uint64 epoch_num = 1;
-  string chain_id = 2;
+  string consumer_id = 2;
 }
 
 // QueryListEpochHeadersResponse is response type for the Query/ListEpochHeaders
@@ -158,8 +158,8 @@ message QueryListEpochHeadersResponse {
 // QueryFinalizedChainsInfoRequest is request type for the
 // Query/FinalizedChainsInfo RPC method.
 message QueryFinalizedChainsInfoRequest {
-  // chain_ids is the list of ids of CZs
-  repeated string chain_ids = 1;
+  // consumer_ids is the list of ids of CZs
+  repeated string consumer_ids = 1;
   // prove indicates whether the querier wants to get proofs of this timestamp
   bool prove = 2;
 }
@@ -173,8 +173,8 @@ message QueryFinalizedChainsInfoResponse {
 // QueryFinalizedChainInfoUntilHeightRequest is request type for the
 // Query/FinalizedChainInfoUntilHeight RPC method.
 message QueryFinalizedChainInfoUntilHeightRequest {
-  // chain_id is the ID of the CZ
-  string chain_id = 1;
+  // consumer_id is the ID of the CZ
+  string consumer_id = 1;
   // height is the height of the CZ chain
   // such that the returned finalised chain info will be no later than this
   // height
diff --git a/proto/babylon/zoneconcierge/v1/zoneconcierge.proto b/proto/babylon/zoneconcierge/v1/zoneconcierge.proto
index b6c1dde8c..44eeefdb1 100644
--- a/proto/babylon/zoneconcierge/v1/zoneconcierge.proto
+++ b/proto/babylon/zoneconcierge/v1/zoneconcierge.proto
@@ -14,8 +14,8 @@ option go_package = "github.com/babylonlabs-io/babylon/x/zoneconcierge/types";
 
 // IndexedHeader is the metadata of a CZ header
 message IndexedHeader {
-  // chain_id is the unique ID of the chain
-  string chain_id = 1;
+  // consumer_id is the unique ID of the consumer
+  string consumer_id = 1;
   // hash is the hash of this header
   bytes hash = 2;
   // height is the height of this header on CZ ledger
@@ -60,8 +60,8 @@ message Forks {
 
 // ChainInfo is the information of a CZ
 message ChainInfo {
-  // chain_id is the ID of the chain
-  string chain_id = 1;
+  // consumer_id is the ID of the consumer
+  string consumer_id = 1;
   // latest_header is the latest header in CZ's canonical chain
   IndexedHeader latest_header = 2;
   // latest_forks is the latest forks, formed as a series of IndexedHeader (from
@@ -84,8 +84,8 @@ message ChainInfoWithProof {
 
 // FinalizedChainInfo is the information of a CZ that is BTC-finalised
 message FinalizedChainInfo {
-  // chain_id is the ID of the chain
-  string chain_id = 1;
+  // consumer_id is the ID of the consumer
+  string consumer_id = 1;
   // finalized_chain_info is the info of the CZ
   babylon.zoneconcierge.v1.ChainInfo finalized_chain_info = 2;
 
diff --git a/test/e2e/btc_timestamping_e2e_test.go b/test/e2e/btc_timestamping_e2e_test.go
index 8e985113d..f67141503 100644
--- a/test/e2e/btc_timestamping_e2e_test.go
+++ b/test/e2e/btc_timestamping_e2e_test.go
@@ -115,7 +115,7 @@ func (s *BTCTimestampingTestSuite) Test4IbcCheckpointing() {
 	// Query checkpoint chain info for opposing chain
 	chainsInfo, err := nonValidatorNode.QueryChainsInfo([]string{initialization.ChainBID})
 	s.NoError(err)
-	s.Equal(chainsInfo[0].ChainId, initialization.ChainBID)
+	s.Equal(chainsInfo[0].ConsumerId, initialization.ChainBID)
 
 	// Finalize epoch 1, 2, 3, as first headers of opposing chain are in epoch 3
 	var (
@@ -142,7 +142,7 @@ func (s *BTCTimestampingTestSuite) Test4IbcCheckpointing() {
 	// Check we have epoch info for opposing chain and some basic assertions
 	epochChainsInfo, err := nonValidatorNode.QueryEpochChainsInfo(endEpochNum, []string{initialization.ChainBID})
 	s.NoError(err)
-	s.Equal(epochChainsInfo[0].ChainId, initialization.ChainBID)
+	s.Equal(epochChainsInfo[0].ConsumerId, initialization.ChainBID)
 	s.Equal(epochChainsInfo[0].LatestHeader.BabylonEpoch, endEpochNum)
 
 	// Check we have finalized epoch info for opposing chain and some basic assertions
@@ -150,7 +150,7 @@ func (s *BTCTimestampingTestSuite) Test4IbcCheckpointing() {
 	s.NoError(err)
 
 	// TODO Add more assertion here. Maybe check proofs ?
-	s.Equal(finalizedChainsInfo[0].FinalizedChainInfo.ChainId, initialization.ChainBID)
+	s.Equal(finalizedChainsInfo[0].FinalizedChainInfo.ConsumerId, initialization.ChainBID)
 	s.Equal(finalizedChainsInfo[0].EpochInfo.EpochNumber, endEpochNum)
 
 	currEpoch, err := nonValidatorNode.QueryCurrentEpoch()
diff --git a/test/e2e/configurer/chain/queries.go b/test/e2e/configurer/chain/queries.go
index 178349bbf..4e1725cd4 100644
--- a/test/e2e/configurer/chain/queries.go
+++ b/test/e2e/configurer/chain/queries.go
@@ -306,13 +306,13 @@ func (n *NodeConfig) QueryChains() (*[]string, error) {
 	if err := util.Cdc.UnmarshalJSON(bz, &chainsResponse); err != nil {
 		return nil, err
 	}
-	return &chainsResponse.ChainIds, nil
+	return &chainsResponse.ConsumerIds, nil
 }
 
-func (n *NodeConfig) QueryChainsInfo(chainIDs []string) ([]*zctypes.ChainInfo, error) {
+func (n *NodeConfig) QueryChainsInfo(consumerIDs []string) ([]*zctypes.ChainInfo, error) {
 	queryParams := url.Values{}
-	for _, chainId := range chainIDs {
-		queryParams.Add("chain_ids", chainId)
+	for _, consumerId := range consumerIDs {
+		queryParams.Add("consumer_ids", consumerId)
 	}
 
 	bz, err := n.QueryGRPCGateway("/babylon/zoneconcierge/v1/chains_info", queryParams)
diff --git a/x/zoneconcierge/client/cli/query.go b/x/zoneconcierge/client/cli/query.go
index ca0f1b682..7e55d19e9 100644
--- a/x/zoneconcierge/client/cli/query.go
+++ b/x/zoneconcierge/client/cli/query.go
@@ -31,13 +31,13 @@ func GetQueryCmd(queryRoute string) *cobra.Command {
 
 func CmdChainsInfo() *cobra.Command {
 	cmd := &cobra.Command{
-		Use:   "chains-info <chain-ids>",
-		Short: "retrieve the latest info for a given list of chains",
+		Use:   "chains-info <consumer-ids>",
+		Short: "retrieve the latest info for a given list of consumers",
 		Args:  cobra.ArbitraryArgs,
 		RunE: func(cmd *cobra.Command, args []string) error {
 			clientCtx := client.GetClientContextFromCmd(cmd)
 			queryClient := types.NewQueryClient(clientCtx)
-			req := types.QueryChainsInfoRequest{ChainIds: args}
+			req := types.QueryChainsInfoRequest{ConsumerIds: args}
 			resp, err := queryClient.ChainsInfo(cmd.Context(), &req)
 			if err != nil {
 				return err
@@ -53,15 +53,15 @@ func CmdChainsInfo() *cobra.Command {
 
 func CmdFinalizedChainsInfo() *cobra.Command {
 	cmd := &cobra.Command{
-		Use:   "finalized-chains-info <chain-ids>",
-		Short: "retrieve the finalized info for a given list of chains",
+		Use:   "finalized-chains-info <consumer-ids>",
+		Short: "retrieve the finalized info for a given list of consumers",
 		Args:  cobra.ArbitraryArgs,
 		RunE: func(cmd *cobra.Command, args []string) error {
 			prove, _ := cmd.Flags().GetBool("prove")
 
 			clientCtx := client.GetClientContextFromCmd(cmd)
 			queryClient := types.NewQueryClient(clientCtx)
-			req := types.QueryFinalizedChainsInfoRequest{ChainIds: args, Prove: prove}
+			req := types.QueryFinalizedChainsInfoRequest{ConsumerIds: args, Prove: prove}
 			resp, err := queryClient.FinalizedChainsInfo(cmd.Context(), &req)
 			if err != nil {
 				return err
@@ -79,8 +79,8 @@ func CmdFinalizedChainsInfo() *cobra.Command {
 
 func CmdEpochChainsInfoInfo() *cobra.Command {
 	cmd := &cobra.Command{
-		Use:   "epoch-chains-info <epoch-num> <chain-ids>",
-		Short: "retrieve the latest info for a list of chains in a given epoch",
+		Use:   "epoch-chains-info <epoch-num> <consumer-ids>",
+		Short: "retrieve the latest info for a list of consumers in a given epoch",
 		Args:  cobra.MinimumNArgs(1),
 		RunE: func(cmd *cobra.Command, args []string) error {
 			clientCtx := client.GetClientContextFromCmd(cmd)
@@ -90,7 +90,7 @@ func CmdEpochChainsInfoInfo() *cobra.Command {
 			if err != nil {
 				return err
 			}
-			req := types.QueryEpochChainsInfoRequest{EpochNum: epoch, ChainIds: args[1:]}
+			req := types.QueryEpochChainsInfoRequest{EpochNum: epoch, ConsumerIds: args[1:]}
 			resp, err := queryClient.EpochChainsInfo(cmd.Context(), &req)
 			if err != nil {
 				return err
diff --git a/x/zoneconcierge/keeper/canonical_chain_indexer.go b/x/zoneconcierge/keeper/canonical_chain_indexer.go
index 52d89b965..253c05c5c 100644
--- a/x/zoneconcierge/keeper/canonical_chain_indexer.go
+++ b/x/zoneconcierge/keeper/canonical_chain_indexer.go
@@ -3,6 +3,7 @@ package keeper
 import (
 	"context"
 	"fmt"
+
 	"github.com/cosmos/cosmos-sdk/runtime"
 
 	sdkerrors "cosmossdk.io/errors"
@@ -12,10 +13,10 @@ import (
 )
 
 // FindClosestHeader finds the IndexedHeader that is closest to (but not after) the given height
-func (k Keeper) FindClosestHeader(ctx context.Context, chainID string, height uint64) (*types.IndexedHeader, error) {
-	chainInfo, err := k.GetChainInfo(ctx, chainID)
+func (k Keeper) FindClosestHeader(ctx context.Context, consumerID string, height uint64) (*types.IndexedHeader, error) {
+	chainInfo, err := k.GetChainInfo(ctx, consumerID)
 	if err != nil {
-		return nil, fmt.Errorf("failed to get chain info for chain with ID %s: %w", chainID, err)
+		return nil, fmt.Errorf("failed to get chain info for chain with ID %s: %w", consumerID, err)
 	}
 
 	// if the given height is no lower than the latest header, return the latest header directly
@@ -24,13 +25,13 @@ func (k Keeper) FindClosestHeader(ctx context.Context, chainID string, height ui
 	}
 
 	// the requested height is lower than the latest header, trace back until finding a timestamped header
-	store := k.canonicalChainStore(ctx, chainID)
+	store := k.canonicalChainStore(ctx, consumerID)
 	heightBytes := sdk.Uint64ToBigEndian(height)
 	iter := store.ReverseIterator(nil, heightBytes)
 	defer iter.Close()
 	// if there is no key within range [0, height], return error
 	if !iter.Valid() {
-		return nil, fmt.Errorf("chain with ID %s does not have a timestamped header before height %d", chainID, height)
+		return nil, fmt.Errorf("chain with ID %s does not have a timestamped header before height %d", consumerID, height)
 	}
 	// find the header in bytes, decode and return
 	headerBytes := iter.Value()
@@ -39,8 +40,8 @@ func (k Keeper) FindClosestHeader(ctx context.Context, chainID string, height ui
 	return &header, nil
 }
 
-func (k Keeper) GetHeader(ctx context.Context, chainID string, height uint64) (*types.IndexedHeader, error) {
-	store := k.canonicalChainStore(ctx, chainID)
+func (k Keeper) GetHeader(ctx context.Context, consumerID string, height uint64) (*types.IndexedHeader, error) {
+	store := k.canonicalChainStore(ctx, consumerID)
 	heightBytes := sdk.Uint64ToBigEndian(height)
 	if !store.Has(heightBytes) {
 		return nil, types.ErrHeaderNotFound
@@ -51,23 +52,23 @@ func (k Keeper) GetHeader(ctx context.Context, chainID string, height uint64) (*
 	return &header, nil
 }
 
-func (k Keeper) insertHeader(ctx context.Context, chainID string, header *types.IndexedHeader) error {
+func (k Keeper) insertHeader(ctx context.Context, consumerID string, header *types.IndexedHeader) error {
 	if header == nil {
 		return sdkerrors.Wrapf(types.ErrInvalidHeader, "header is nil")
 	}
 	// NOTE: we can accept header without ancestor since IBC connection can be established at any height
-	store := k.canonicalChainStore(ctx, chainID)
+	store := k.canonicalChainStore(ctx, consumerID)
 	store.Set(sdk.Uint64ToBigEndian(header.Height), k.cdc.MustMarshal(header))
 	return nil
 }
 
 // canonicalChainStore stores the canonical chain of a CZ, formed as a list of IndexedHeader
-// prefix: CanonicalChainKey || chainID
+// prefix: CanonicalChainKey || consumerID
 // key: height
 // value: IndexedHeader
-func (k Keeper) canonicalChainStore(ctx context.Context, chainID string) prefix.Store {
+func (k Keeper) canonicalChainStore(ctx context.Context, consumerID string) prefix.Store {
 	storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx))
 	canonicalChainStore := prefix.NewStore(storeAdapter, types.CanonicalChainKey)
-	chainIDBytes := []byte(chainID)
-	return prefix.NewStore(canonicalChainStore, chainIDBytes)
+	consumerIDBytes := []byte(consumerID)
+	return prefix.NewStore(canonicalChainStore, consumerIDBytes)
 }
diff --git a/x/zoneconcierge/keeper/canonical_chain_indexer_test.go b/x/zoneconcierge/keeper/canonical_chain_indexer_test.go
index af484b32c..c13a7bed5 100644
--- a/x/zoneconcierge/keeper/canonical_chain_indexer_test.go
+++ b/x/zoneconcierge/keeper/canonical_chain_indexer_test.go
@@ -18,27 +18,27 @@ func FuzzCanonicalChainIndexer(f *testing.F) {
 		babylonApp := app.Setup(t, false)
 		zcKeeper := babylonApp.ZoneConciergeKeeper
 		ctx := babylonApp.NewContext(false)
-		czChainID := "test-chainid"
+		czConsumerId := "test-consumerid"
 
 		// simulate a random number of blocks
 		numHeaders := datagen.RandomInt(r, 100) + 1
-		headers := SimulateNewHeaders(ctx, r, &zcKeeper, czChainID, 0, numHeaders)
+		headers := SimulateNewHeaders(ctx, r, &zcKeeper, czConsumerId, 0, numHeaders)
 
 		// check if the canonical chain index is correct or not
 		for i := uint64(0); i < numHeaders; i++ {
-			header, err := zcKeeper.GetHeader(ctx, czChainID, i)
+			header, err := zcKeeper.GetHeader(ctx, czConsumerId, i)
 			require.NoError(t, err)
 			require.NotNil(t, header)
-			require.Equal(t, czChainID, header.ChainId)
+			require.Equal(t, czConsumerId, header.ConsumerId)
 			require.Equal(t, i, header.Height)
 			require.Equal(t, headers[i].Header.AppHash, header.Hash)
 		}
 
 		// check if the chain info is updated or not
-		chainInfo, err := zcKeeper.GetChainInfo(ctx, czChainID)
+		chainInfo, err := zcKeeper.GetChainInfo(ctx, czConsumerId)
 		require.NoError(t, err)
 		require.NotNil(t, chainInfo.LatestHeader)
-		require.Equal(t, czChainID, chainInfo.LatestHeader.ChainId)
+		require.Equal(t, czConsumerId, chainInfo.LatestHeader.ConsumerId)
 		require.Equal(t, numHeaders-1, chainInfo.LatestHeader.Height)
 		require.Equal(t, headers[numHeaders-1].Header.AppHash, chainInfo.LatestHeader.Hash)
 	})
@@ -53,17 +53,17 @@ func FuzzFindClosestHeader(f *testing.F) {
 		babylonApp := app.Setup(t, false)
 		zcKeeper := babylonApp.ZoneConciergeKeeper
 		ctx := babylonApp.NewContext(false)
-		czChainID := "test-chainid"
+		czConsumerId := "test-consumerid"
 
 		// no header at the moment, FindClosestHeader invocation should give error
-		_, err := zcKeeper.FindClosestHeader(ctx, czChainID, 100)
+		_, err := zcKeeper.FindClosestHeader(ctx, czConsumerId, 100)
 		require.Error(t, err)
 
 		// simulate a random number of blocks
 		numHeaders := datagen.RandomInt(r, 100) + 1
-		headers := SimulateNewHeaders(ctx, r, &zcKeeper, czChainID, 0, numHeaders)
+		headers := SimulateNewHeaders(ctx, r, &zcKeeper, czConsumerId, 0, numHeaders)
 
-		header, err := zcKeeper.FindClosestHeader(ctx, czChainID, numHeaders)
+		header, err := zcKeeper.FindClosestHeader(ctx, czConsumerId, numHeaders)
 		require.NoError(t, err)
 		require.Equal(t, headers[len(headers)-1].Header.AppHash, header.Hash)
 
@@ -72,12 +72,12 @@ func FuzzFindClosestHeader(f *testing.F) {
 
 		// simulate a random number of blocks
 		// where the new batch of headers has a gap with the previous batch
-		SimulateNewHeaders(ctx, r, &zcKeeper, czChainID, numHeaders+gap+1, numHeaders)
+		SimulateNewHeaders(ctx, r, &zcKeeper, czConsumerId, numHeaders+gap+1, numHeaders)
 
 		// get a random height that is in this gap
 		randomHeightInGap := datagen.RandomInt(r, int(gap+1)) + numHeaders
 		// find the closest header with the given randomHeightInGap
-		header, err = zcKeeper.FindClosestHeader(ctx, czChainID, randomHeightInGap)
+		header, err = zcKeeper.FindClosestHeader(ctx, czConsumerId, randomHeightInGap)
 		require.NoError(t, err)
 		// the header should be the same as the last header in the last batch
 		require.Equal(t, headers[len(headers)-1].Header.AppHash, header.Hash)
diff --git a/x/zoneconcierge/keeper/chain_info_indexer.go b/x/zoneconcierge/keeper/chain_info_indexer.go
index a3ae402d1..29deeea92 100644
--- a/x/zoneconcierge/keeper/chain_info_indexer.go
+++ b/x/zoneconcierge/keeper/chain_info_indexer.go
@@ -3,6 +3,7 @@ package keeper
 import (
 	"context"
 	"fmt"
+
 	"github.com/cosmos/cosmos-sdk/runtime"
 
 	errorsmod "cosmossdk.io/errors"
@@ -12,20 +13,20 @@ import (
 
 func (k Keeper) setChainInfo(ctx context.Context, chainInfo *types.ChainInfo) {
 	store := k.chainInfoStore(ctx)
-	store.Set([]byte(chainInfo.ChainId), k.cdc.MustMarshal(chainInfo))
+	store.Set([]byte(chainInfo.ConsumerId), k.cdc.MustMarshal(chainInfo))
 }
 
-func (k Keeper) InitChainInfo(ctx context.Context, chainID string) (*types.ChainInfo, error) {
-	if len(chainID) == 0 {
-		return nil, fmt.Errorf("chainID is empty")
+func (k Keeper) InitChainInfo(ctx context.Context, consumerID string) (*types.ChainInfo, error) {
+	if len(consumerID) == 0 {
+		return nil, fmt.Errorf("consumerID is empty")
 	}
 	// ensure chain info has not been initialised yet
-	if k.HasChainInfo(ctx, chainID) {
+	if k.HasChainInfo(ctx, consumerID) {
 		return nil, errorsmod.Wrapf(types.ErrInvalidChainInfo, "chain info has already initialized")
 	}
 
 	chainInfo := &types.ChainInfo{
-		ChainId:      chainID,
+		ConsumerId:   consumerID,
 		LatestHeader: nil,
 		LatestForks: &types.Forks{
 			Headers: []*types.IndexedHeader{},
@@ -40,21 +41,21 @@ func (k Keeper) InitChainInfo(ctx context.Context, chainID string) (*types.Chain
 // HasChainInfo returns whether the chain info exists for a given ID
 // Since IBC does not provide API that allows to initialise chain info right before creating an IBC connection,
 // we can only check its existence every time, and return an empty one if it's not initialised yet.
-func (k Keeper) HasChainInfo(ctx context.Context, chainID string) bool {
+func (k Keeper) HasChainInfo(ctx context.Context, consumerId string) bool {
 	store := k.chainInfoStore(ctx)
-	return store.Has([]byte(chainID))
+	return store.Has([]byte(consumerId))
 }
 
 // GetChainInfo returns the ChainInfo struct for a chain with a given ID
 // Since IBC does not provide API that allows to initialise chain info right before creating an IBC connection,
 // we can only check its existence every time, and return an empty one if it's not initialised yet.
-func (k Keeper) GetChainInfo(ctx context.Context, chainID string) (*types.ChainInfo, error) {
-	if !k.HasChainInfo(ctx, chainID) {
+func (k Keeper) GetChainInfo(ctx context.Context, consumerId string) (*types.ChainInfo, error) {
+	if !k.HasChainInfo(ctx, consumerId) {
 		return nil, types.ErrChainInfoNotFound
 	}
 
 	store := k.chainInfoStore(ctx)
-	chainInfoBytes := store.Get([]byte(chainID))
+	chainInfoBytes := store.Get([]byte(consumerId))
 	var chainInfo types.ChainInfo
 	k.cdc.MustUnmarshal(chainInfoBytes, &chainInfo)
 	return &chainInfo, nil
@@ -66,14 +67,14 @@ func (k Keeper) GetChainInfo(ctx context.Context, chainID string) (*types.ChainI
 // Note that this function is triggered only upon receiving headers from the relayer,
 // and only a subset of headers in CZ are relayed. Thus TimestampedHeadersCount is not
 // equal to the total number of headers in CZ.
-func (k Keeper) updateLatestHeader(ctx context.Context, chainID string, header *types.IndexedHeader) error {
+func (k Keeper) updateLatestHeader(ctx context.Context, consumerId string, header *types.IndexedHeader) error {
 	if header == nil {
 		return errorsmod.Wrapf(types.ErrInvalidHeader, "header is nil")
 	}
-	chainInfo, err := k.GetChainInfo(ctx, chainID)
+	chainInfo, err := k.GetChainInfo(ctx, consumerId)
 	if err != nil {
 		// chain info has not been initialised yet
-		return fmt.Errorf("failed to get chain info of %s: %w", chainID, err)
+		return fmt.Errorf("failed to get chain info of %s: %w", consumerId, err)
 	}
 	chainInfo.LatestHeader = header     // replace the old latest header with the given one
 	chainInfo.TimestampedHeadersCount++ // increment the number of timestamped headers
@@ -87,12 +88,12 @@ func (k Keeper) updateLatestHeader(ctx context.Context, chainID string, header *
 // - If there is a fork header at the same height, add this fork to the set of latest fork headers
 // - If this fork header is newer than the previous one, replace the old fork headers with this fork header
 // - If this fork header is older than the current latest fork, ignore
-func (k Keeper) tryToUpdateLatestForkHeader(ctx context.Context, chainID string, header *types.IndexedHeader) error {
+func (k Keeper) tryToUpdateLatestForkHeader(ctx context.Context, consumerId string, header *types.IndexedHeader) error {
 	if header == nil {
 		return errorsmod.Wrapf(types.ErrInvalidHeader, "header is nil")
 	}
 
-	chainInfo, err := k.GetChainInfo(ctx, chainID)
+	chainInfo, err := k.GetChainInfo(ctx, consumerId)
 	if err != nil {
 		return errorsmod.Wrapf(types.ErrChainInfoNotFound, "cannot insert fork header when chain info is not initialized")
 	}
@@ -117,23 +118,23 @@ func (k Keeper) tryToUpdateLatestForkHeader(ctx context.Context, chainID string,
 	return nil
 }
 
-// GetAllChainIDs gets all chain IDs that integrate Babylon
-func (k Keeper) GetAllChainIDs(ctx context.Context) []string {
-	chainIDs := []string{}
+// GetAllConsumerIDs gets IDs of all consumer that integrate Babylon
+func (k Keeper) GetAllConsumerIDs(ctx context.Context) []string {
+	consumerIds := []string{}
 	iter := k.chainInfoStore(ctx).Iterator(nil, nil)
 	defer iter.Close()
 
 	for ; iter.Valid(); iter.Next() {
-		chainIDBytes := iter.Key()
-		chainID := string(chainIDBytes)
-		chainIDs = append(chainIDs, chainID)
+		consumerIdBytes := iter.Key()
+		consumerId := string(consumerIdBytes)
+		consumerIds = append(consumerIds, consumerId)
 	}
-	return chainIDs
+	return consumerIds
 }
 
 // msgChainInfoStore stores the information of canonical chains and forks for CZs
 // prefix: ChainInfoKey
-// key: chainID
+// key: consumerId
 // value: ChainInfo
 func (k Keeper) chainInfoStore(ctx context.Context) prefix.Store {
 	storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx))
diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer.go
index 5584b008e..942f90001 100644
--- a/x/zoneconcierge/keeper/epoch_chain_info_indexer.go
+++ b/x/zoneconcierge/keeper/epoch_chain_info_indexer.go
@@ -13,12 +13,12 @@ import (
 )
 
 // GetEpochChainInfo gets the latest chain info of a given epoch for a given chain ID
-func (k Keeper) GetEpochChainInfo(ctx context.Context, chainID string, epochNumber uint64) (*types.ChainInfoWithProof, error) {
-	if !k.EpochChainInfoExists(ctx, chainID, epochNumber) {
+func (k Keeper) GetEpochChainInfo(ctx context.Context, consumerID string, epochNumber uint64) (*types.ChainInfoWithProof, error) {
+	if !k.EpochChainInfoExists(ctx, consumerID, epochNumber) {
 		return nil, types.ErrEpochChainInfoNotFound
 	}
 
-	store := k.epochChainInfoStore(ctx, chainID)
+	store := k.epochChainInfoStore(ctx, consumerID)
 	epochNumberBytes := sdk.Uint64ToBigEndian(epochNumber)
 	epochChainInfoBytes := store.Get(epochNumberBytes)
 	var chainInfo types.ChainInfoWithProof
@@ -26,24 +26,24 @@ func (k Keeper) GetEpochChainInfo(ctx context.Context, chainID string, epochNumb
 	return &chainInfo, nil
 }
 
-func (k Keeper) setEpochChainInfo(ctx context.Context, chainID string, epochNumber uint64, chainInfo *types.ChainInfoWithProof) {
-	store := k.epochChainInfoStore(ctx, chainID)
+func (k Keeper) setEpochChainInfo(ctx context.Context, consumerID string, epochNumber uint64, chainInfo *types.ChainInfoWithProof) {
+	store := k.epochChainInfoStore(ctx, consumerID)
 	store.Set(sdk.Uint64ToBigEndian(epochNumber), k.cdc.MustMarshal(chainInfo))
 }
 
 // EpochChainInfoExists checks if the latest chain info exists of a given epoch for a given chain ID
-func (k Keeper) EpochChainInfoExists(ctx context.Context, chainID string, epochNumber uint64) bool {
-	store := k.epochChainInfoStore(ctx, chainID)
+func (k Keeper) EpochChainInfoExists(ctx context.Context, consumerID string, epochNumber uint64) bool {
+	store := k.epochChainInfoStore(ctx, consumerID)
 	epochNumberBytes := sdk.Uint64ToBigEndian(epochNumber)
 	return store.Has(epochNumberBytes)
 }
 
 // GetEpochHeaders gets the headers timestamped in a given epoch, in the ascending order
-func (k Keeper) GetEpochHeaders(ctx context.Context, chainID string, epochNumber uint64) ([]*types.IndexedHeader, error) {
+func (k Keeper) GetEpochHeaders(ctx context.Context, consumerID string, epochNumber uint64) ([]*types.IndexedHeader, error) {
 	headers := []*types.IndexedHeader{}
 
 	// find the last timestamped header of this chain in the epoch
-	epochChainInfoWithProof, err := k.GetEpochChainInfo(ctx, chainID, epochNumber)
+	epochChainInfoWithProof, err := k.GetEpochChainInfo(ctx, consumerID, epochNumber)
 	if err != nil {
 		return nil, err
 	}
@@ -57,7 +57,7 @@ func (k Keeper) GetEpochHeaders(ctx context.Context, chainID string, epochNumber
 	headers = append(headers, epochChainInfo.LatestHeader)
 
 	// append all previous headers until reaching the previous epoch
-	canonicalChainStore := k.canonicalChainStore(ctx, chainID)
+	canonicalChainStore := k.canonicalChainStore(ctx, consumerID)
 	lastHeaderKey := sdk.Uint64ToBigEndian(epochChainInfo.LatestHeader.Height)
 	// NOTE: even in ReverseIterator, start and end should still be specified in ascending order
 	canonicalChainIter := canonicalChainStore.ReverseIterator(nil, lastHeaderKey)
@@ -80,9 +80,9 @@ func (k Keeper) GetEpochHeaders(ctx context.Context, chainID string, epochNumber
 
 // recordEpochChainInfo records the chain info for a given epoch number of given chain ID
 // where the latest chain info is retrieved from the chain info indexer
-func (k Keeper) recordEpochChainInfo(ctx context.Context, chainID string, epochNumber uint64) {
+func (k Keeper) recordEpochChainInfo(ctx context.Context, consumerID string, epochNumber uint64) {
 	// get the latest known chain info
-	chainInfo, err := k.GetChainInfo(ctx, chainID)
+	chainInfo, err := k.GetChainInfo(ctx, consumerID)
 	if err != nil {
 		k.Logger(sdk.UnwrapSDKContext(ctx)).Debug("chain info does not exist yet, nothing to record")
 		return
@@ -93,19 +93,19 @@ func (k Keeper) recordEpochChainInfo(ctx context.Context, chainID string, epochN
 	}
 
 	// NOTE: we can record epoch chain info without ancestor since IBC connection can be established at any height
-	k.setEpochChainInfo(ctx, chainID, epochNumber, chainInfoWithProof)
+	k.setEpochChainInfo(ctx, consumerID, epochNumber, chainInfoWithProof)
 }
 
 // recordEpochChainInfo records the chain info for a given epoch number of given chain ID
 // where the latest chain info is retrieved from the chain info indexer
 func (k Keeper) recordEpochChainInfoProofs(ctx context.Context, epochNumber uint64) {
 	curEpoch := k.GetEpoch(ctx)
-	chainIDs := k.GetAllChainIDs(ctx)
+	consumerIDs := k.GetAllConsumerIDs(ctx)
 
 	// save all inclusion proofs
-	for _, chainID := range chainIDs {
+	for _, consumerID := range consumerIDs {
 		// retrieve chain info with empty proof
-		chainInfo, err := k.GetEpochChainInfo(ctx, chainID, epochNumber)
+		chainInfo, err := k.GetEpochChainInfo(ctx, consumerID, epochNumber)
 		if err != nil {
 			panic(err) // only programming error
 		}
@@ -116,24 +116,24 @@ func (k Keeper) recordEpochChainInfoProofs(ctx context.Context, epochNumber uint
 			proofCZHeaderInEpoch, err := k.ProveCZHeaderInEpoch(ctx, lastHeaderInEpoch, curEpoch)
 			if err != nil {
 				// only programming error is possible here
-				panic(fmt.Errorf("failed to generate proofCZHeaderInEpoch for chain %s: %w", chainID, err))
+				panic(fmt.Errorf("failed to generate proofCZHeaderInEpoch for consumer %s: %w", consumerID, err))
 			}
 
 			chainInfo.ProofHeaderInEpoch = proofCZHeaderInEpoch
 
 			// set chain info with proof back
-			k.setEpochChainInfo(ctx, chainID, epochNumber, chainInfo)
+			k.setEpochChainInfo(ctx, consumerID, epochNumber, chainInfo)
 		}
 	}
 }
 
 // epochChainInfoStore stores each epoch's latest ChainInfo for a CZ
-// prefix: EpochChainInfoKey || chainID
+// prefix: EpochChainInfoKey || consumerID
 // key: epochNumber
 // value: ChainInfoWithProof
-func (k Keeper) epochChainInfoStore(ctx context.Context, chainID string) prefix.Store {
+func (k Keeper) epochChainInfoStore(ctx context.Context, consumerID string) prefix.Store {
 	storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx))
 	epochChainInfoStore := prefix.NewStore(storeAdapter, types.EpochChainInfoKey)
-	chainIDBytes := []byte(chainID)
-	return prefix.NewStore(epochChainInfoStore, chainIDBytes)
+	consumerIDBytes := []byte(consumerID)
+	return prefix.NewStore(epochChainInfoStore, consumerIDBytes)
 }
diff --git a/x/zoneconcierge/keeper/fork_indexer.go b/x/zoneconcierge/keeper/fork_indexer.go
index 68d98243f..936c13ea3 100644
--- a/x/zoneconcierge/keeper/fork_indexer.go
+++ b/x/zoneconcierge/keeper/fork_indexer.go
@@ -3,6 +3,7 @@ package keeper
 import (
 	"bytes"
 	"context"
+
 	"github.com/cosmos/cosmos-sdk/runtime"
 
 	sdkerrors "cosmossdk.io/errors"
@@ -12,8 +13,8 @@ import (
 )
 
 // GetForks returns a list of forked headers at a given height
-func (k Keeper) GetForks(ctx context.Context, chainID string, height uint64) *types.Forks {
-	store := k.forkStore(ctx, chainID)
+func (k Keeper) GetForks(ctx context.Context, consumerID string, height uint64) *types.Forks {
+	store := k.forkStore(ctx, consumerID)
 	heightBytes := sdk.Uint64ToBigEndian(height)
 	// if no fork at the moment, create an empty struct
 	if !store.Has(heightBytes) {
@@ -28,12 +29,12 @@ func (k Keeper) GetForks(ctx context.Context, chainID string, height uint64) *ty
 }
 
 // insertForkHeader inserts a forked header to the list of forked headers at the same height
-func (k Keeper) insertForkHeader(ctx context.Context, chainID string, header *types.IndexedHeader) error {
+func (k Keeper) insertForkHeader(ctx context.Context, consumerID string, header *types.IndexedHeader) error {
 	if header == nil {
 		return sdkerrors.Wrapf(types.ErrInvalidHeader, "header is nil")
 	}
-	store := k.forkStore(ctx, chainID)
-	forks := k.GetForks(ctx, chainID, header.Height) // if no fork at the height, forks will be an empty struct rather than nil
+	store := k.forkStore(ctx, consumerID)
+	forks := k.GetForks(ctx, consumerID, header.Height) // if no fork at the height, forks will be an empty struct rather than nil
 	// if the header is already in forks, discard this header and return directly
 	for _, h := range forks.Headers {
 		if bytes.Equal(h.Hash, header.Hash) {
@@ -47,12 +48,12 @@ func (k Keeper) insertForkHeader(ctx context.Context, chainID string, header *ty
 }
 
 // forkStore stores the forks for each CZ
-// prefix: ForkKey || chainID
+// prefix: ForkKey || consumerID
 // key: height that this fork starts from
 // value: a list of IndexedHeader, representing each header in the fork
-func (k Keeper) forkStore(ctx context.Context, chainID string) prefix.Store {
+func (k Keeper) forkStore(ctx context.Context, consumerID string) prefix.Store {
 	storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx))
 	forkStore := prefix.NewStore(storeAdapter, types.ForkKey)
-	chainIDBytes := []byte(chainID)
-	return prefix.NewStore(forkStore, chainIDBytes)
+	consumerIDBytes := []byte(consumerID)
+	return prefix.NewStore(forkStore, consumerIDBytes)
 }
diff --git a/x/zoneconcierge/keeper/fork_indexer_test.go b/x/zoneconcierge/keeper/fork_indexer_test.go
index c1ef1a522..b54fce894 100644
--- a/x/zoneconcierge/keeper/fork_indexer_test.go
+++ b/x/zoneconcierge/keeper/fork_indexer_test.go
@@ -29,7 +29,7 @@ func FuzzForkIndexer(f *testing.F) {
 		forks := zcKeeper.GetForks(ctx, czChainID, numHeaders-1)
 		require.Equal(t, numForkHeaders, uint64(len(forks.Headers)))
 		for i := range forks.Headers {
-			require.Equal(t, czChainID, forks.Headers[i].ChainId)
+			require.Equal(t, czChainID, forks.Headers[i].ConsumerId)
 			require.Equal(t, numHeaders-1, forks.Headers[i].Height)
 			require.Equal(t, forkHeaders[i].Header.AppHash, forks.Headers[i].Hash)
 		}
@@ -39,7 +39,7 @@ func FuzzForkIndexer(f *testing.F) {
 		require.NoError(t, err)
 		require.Equal(t, numForkHeaders, uint64(len(chainInfo.LatestForks.Headers)))
 		for i := range forks.Headers {
-			require.Equal(t, czChainID, chainInfo.LatestForks.Headers[i].ChainId)
+			require.Equal(t, czChainID, chainInfo.LatestForks.Headers[i].ConsumerId)
 			require.Equal(t, numHeaders-1, chainInfo.LatestForks.Headers[i].Height)
 			require.Equal(t, forkHeaders[i].Header.AppHash, chainInfo.LatestForks.Headers[i].Hash)
 		}
diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go
index 5cb71dfca..22e56e8aa 100644
--- a/x/zoneconcierge/keeper/grpc_query.go
+++ b/x/zoneconcierge/keeper/grpc_query.go
@@ -31,11 +31,11 @@ func (k Keeper) ChainList(c context.Context, req *types.QueryChainListRequest) (
 
 	ctx := sdk.UnwrapSDKContext(c)
 
-	chainIDs := []string{}
+	ConsumerIds := []string{}
 	store := k.chainInfoStore(ctx)
 	pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error {
-		chainID := string(key)
-		chainIDs = append(chainIDs, chainID)
+		ConsumerId := string(key)
+		ConsumerIds = append(ConsumerIds, ConsumerId)
 		return nil
 	})
 	if err != nil {
@@ -43,8 +43,8 @@ func (k Keeper) ChainList(c context.Context, req *types.QueryChainListRequest) (
 	}
 
 	resp := &types.QueryChainListResponse{
-		ChainIds:   chainIDs,
-		Pagination: pageRes,
+		ConsumerIds: ConsumerIds,
+		Pagination:  pageRes,
 	}
 	return resp, nil
 }
@@ -56,24 +56,24 @@ func (k Keeper) ChainsInfo(c context.Context, req *types.QueryChainsInfoRequest)
 	}
 
 	// return if no chain IDs are provided
-	if len(req.ChainIds) == 0 {
+	if len(req.ConsumerIds) == 0 {
 		return nil, status.Error(codes.InvalidArgument, "chain IDs cannot be empty")
 	}
 
 	// return if chain IDs exceed the limit
-	if len(req.ChainIds) > maxQueryChainsInfoLimit {
+	if len(req.ConsumerIds) > maxQueryChainsInfoLimit {
 		return nil, status.Errorf(codes.InvalidArgument, "cannot query more than %d chains", maxQueryChainsInfoLimit)
 	}
 
 	// return if chain IDs contain duplicates or empty strings
-	if err := bbntypes.CheckForDuplicatesAndEmptyStrings(req.ChainIds); err != nil {
-		return nil, status.Error(codes.InvalidArgument, types.ErrInvalidChainIDs.Wrap(err.Error()).Error())
+	if err := bbntypes.CheckForDuplicatesAndEmptyStrings(req.ConsumerIds); err != nil {
+		return nil, status.Error(codes.InvalidArgument, types.ErrInvalidConsumerIDs.Wrap(err.Error()).Error())
 	}
 
 	ctx := sdk.UnwrapSDKContext(c)
 	var chainsInfo []*types.ChainInfo
-	for _, chainID := range req.ChainIds {
-		chainInfo, err := k.GetChainInfo(ctx, chainID)
+	for _, ConsumerId := range req.ConsumerIds {
+		chainInfo, err := k.GetChainInfo(ctx, ConsumerId)
 		if err != nil {
 			return nil, err
 		}
@@ -91,17 +91,17 @@ func (k Keeper) Header(c context.Context, req *types.QueryHeaderRequest) (*types
 		return nil, status.Error(codes.InvalidArgument, "invalid request")
 	}
 
-	if len(req.ChainId) == 0 {
+	if len(req.ConsumerId) == 0 {
 		return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty")
 	}
 
 	ctx := sdk.UnwrapSDKContext(c)
 
-	header, err := k.GetHeader(ctx, req.ChainId, req.Height)
+	header, err := k.GetHeader(ctx, req.ConsumerId, req.Height)
 	if err != nil {
 		return nil, err
 	}
-	forks := k.GetForks(ctx, req.ChainId, req.Height)
+	forks := k.GetForks(ctx, req.ConsumerId, req.Height)
 	resp := &types.QueryHeaderResponse{
 		Header:      header,
 		ForkHeaders: forks,
@@ -117,36 +117,36 @@ func (k Keeper) EpochChainsInfo(c context.Context, req *types.QueryEpochChainsIn
 	}
 
 	// return if no chain IDs are provided
-	if len(req.ChainIds) == 0 {
+	if len(req.ConsumerIds) == 0 {
 		return nil, status.Error(codes.InvalidArgument, "chain IDs cannot be empty")
 	}
 
 	// return if chain IDs exceed the limit
-	if len(req.ChainIds) > maxQueryChainsInfoLimit {
+	if len(req.ConsumerIds) > maxQueryChainsInfoLimit {
 		return nil, status.Errorf(codes.InvalidArgument, "cannot query more than %d chains", maxQueryChainsInfoLimit)
 	}
 
 	// return if chain IDs contain duplicates or empty strings
-	if err := bbntypes.CheckForDuplicatesAndEmptyStrings(req.ChainIds); err != nil {
-		return nil, status.Error(codes.InvalidArgument, types.ErrInvalidChainIDs.Wrap(err.Error()).Error())
+	if err := bbntypes.CheckForDuplicatesAndEmptyStrings(req.ConsumerIds); err != nil {
+		return nil, status.Error(codes.InvalidArgument, types.ErrInvalidConsumerIDs.Wrap(err.Error()).Error())
 	}
 
 	ctx := sdk.UnwrapSDKContext(c)
 	var chainsInfo []*types.ChainInfo
-	for _, chainID := range req.ChainIds {
+	for _, ConsumerId := range req.ConsumerIds {
 		// check if chain ID is valid
-		if !k.HasChainInfo(ctx, chainID) {
-			return nil, status.Error(codes.InvalidArgument, types.ErrChainInfoNotFound.Wrapf("chain ID %s", chainID).Error())
+		if !k.HasChainInfo(ctx, ConsumerId) {
+			return nil, status.Error(codes.InvalidArgument, types.ErrChainInfoNotFound.Wrapf("chain ID %s", ConsumerId).Error())
 		}
 
 		// if the chain info is not found in the given epoch, return with empty fields
-		if !k.EpochChainInfoExists(ctx, chainID, req.EpochNum) {
-			chainsInfo = append(chainsInfo, &types.ChainInfo{ChainId: chainID})
+		if !k.EpochChainInfoExists(ctx, ConsumerId, req.EpochNum) {
+			chainsInfo = append(chainsInfo, &types.ChainInfo{ConsumerId: ConsumerId})
 			continue
 		}
 
 		// find the chain info of the given epoch
-		chainInfoWithProof, err := k.GetEpochChainInfo(ctx, chainID, req.EpochNum)
+		chainInfoWithProof, err := k.GetEpochChainInfo(ctx, ConsumerId, req.EpochNum)
 		if err != nil {
 			return nil, err
 		}
@@ -164,14 +164,14 @@ func (k Keeper) ListHeaders(c context.Context, req *types.QueryListHeadersReques
 		return nil, status.Error(codes.InvalidArgument, "invalid request")
 	}
 
-	if len(req.ChainId) == 0 {
+	if len(req.ConsumerId) == 0 {
 		return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty")
 	}
 
 	ctx := sdk.UnwrapSDKContext(c)
 
 	headers := []*types.IndexedHeader{}
-	store := k.canonicalChainStore(ctx, req.ChainId)
+	store := k.canonicalChainStore(ctx, req.ConsumerId)
 	pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error {
 		var header types.IndexedHeader
 		k.cdc.MustUnmarshal(value, &header)
@@ -196,13 +196,13 @@ func (k Keeper) ListEpochHeaders(c context.Context, req *types.QueryListEpochHea
 		return nil, status.Error(codes.InvalidArgument, "invalid request")
 	}
 
-	if len(req.ChainId) == 0 {
+	if len(req.ConsumerId) == 0 {
 		return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty")
 	}
 
 	ctx := sdk.UnwrapSDKContext(c)
 
-	headers, err := k.GetEpochHeaders(ctx, req.ChainId, req.EpochNum)
+	headers, err := k.GetEpochHeaders(ctx, req.ConsumerId, req.EpochNum)
 	if err != nil {
 		return nil, err
 	}
@@ -220,18 +220,18 @@ func (k Keeper) FinalizedChainsInfo(c context.Context, req *types.QueryFinalized
 	}
 
 	// return if no chain IDs are provided
-	if len(req.ChainIds) == 0 {
+	if len(req.ConsumerIds) == 0 {
 		return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty")
 	}
 
 	// return if chain IDs exceed the limit
-	if len(req.ChainIds) > maxQueryChainsInfoLimit {
+	if len(req.ConsumerIds) > maxQueryChainsInfoLimit {
 		return nil, status.Errorf(codes.InvalidArgument, "cannot query more than %d chains", maxQueryChainsInfoLimit)
 	}
 
 	// return if chain IDs contain duplicates or empty strings
-	if err := bbntypes.CheckForDuplicatesAndEmptyStrings(req.ChainIds); err != nil {
-		return nil, status.Error(codes.InvalidArgument, types.ErrInvalidChainIDs.Wrap(err.Error()).Error())
+	if err := bbntypes.CheckForDuplicatesAndEmptyStrings(req.ConsumerIds); err != nil {
+		return nil, status.Error(codes.InvalidArgument, types.ErrInvalidConsumerIDs.Wrap(err.Error()).Error())
 	}
 
 	ctx := sdk.UnwrapSDKContext(c)
@@ -239,22 +239,22 @@ func (k Keeper) FinalizedChainsInfo(c context.Context, req *types.QueryFinalized
 
 	// find the last finalised epoch
 	lastFinalizedEpoch := k.GetLastFinalizedEpoch(ctx)
-	for _, chainID := range req.ChainIds {
+	for _, ConsumerId := range req.ConsumerIds {
 		// check if chain ID is valid
-		if !k.HasChainInfo(ctx, chainID) {
-			return nil, status.Error(codes.InvalidArgument, types.ErrChainInfoNotFound.Wrapf("chain ID %s", chainID).Error())
+		if !k.HasChainInfo(ctx, ConsumerId) {
+			return nil, status.Error(codes.InvalidArgument, types.ErrChainInfoNotFound.Wrapf("chain ID %s", ConsumerId).Error())
 		}
 
-		data := &types.FinalizedChainInfo{ChainId: chainID}
+		data := &types.FinalizedChainInfo{ConsumerId: ConsumerId}
 
 		// if the chain info is not found in the last finalised epoch, return with empty fields
-		if !k.EpochChainInfoExists(ctx, chainID, lastFinalizedEpoch) {
+		if !k.EpochChainInfoExists(ctx, ConsumerId, lastFinalizedEpoch) {
 			resp.FinalizedChainsInfo = append(resp.FinalizedChainsInfo, data)
 			continue
 		}
 
 		// find the chain info in the last finalised epoch
-		chainInfoWithProof, err := k.GetEpochChainInfo(ctx, chainID, lastFinalizedEpoch)
+		chainInfoWithProof, err := k.GetEpochChainInfo(ctx, ConsumerId, lastFinalizedEpoch)
 		if err != nil {
 			return nil, err
 		}
@@ -309,7 +309,7 @@ func (k Keeper) FinalizedChainInfoUntilHeight(c context.Context, req *types.Quer
 		return nil, status.Error(codes.InvalidArgument, "invalid request")
 	}
 
-	if len(req.ChainId) == 0 {
+	if len(req.ConsumerId) == 0 {
 		return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty")
 	}
 
@@ -319,7 +319,7 @@ func (k Keeper) FinalizedChainInfoUntilHeight(c context.Context, req *types.Quer
 	// find the last finalised epoch
 	lastFinalizedEpoch := k.GetLastFinalizedEpoch(ctx)
 	// find the chain info in the last finalised epoch
-	chainInfoWithProof, err := k.GetEpochChainInfo(ctx, req.ChainId, lastFinalizedEpoch)
+	chainInfoWithProof, err := k.GetEpochChainInfo(ctx, req.ConsumerId, lastFinalizedEpoch)
 	if err != nil {
 		return nil, err
 	}
@@ -358,13 +358,13 @@ func (k Keeper) FinalizedChainInfoUntilHeight(c context.Context, req *types.Quer
 		}
 	} else { // the requested height is before the last finalised chain info
 		// starting from the requested height, iterate backward until a timestamped header
-		closestHeader, err := k.FindClosestHeader(ctx, req.ChainId, req.Height)
+		closestHeader, err := k.FindClosestHeader(ctx, req.ConsumerId, req.Height)
 		if err != nil {
 			return nil, err
 		}
 		// assign the finalizedEpoch, and retrieve epoch info, raw ckpt and submission key
 		finalizedEpoch = closestHeader.BabylonEpoch
-		chainInfoWithProof, err := k.GetEpochChainInfo(ctx, req.ChainId, finalizedEpoch)
+		chainInfoWithProof, err := k.GetEpochChainInfo(ctx, req.ConsumerId, finalizedEpoch)
 		if err != nil {
 			return nil, err
 		}
diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go
index 4885659e9..03777faf3 100644
--- a/x/zoneconcierge/keeper/grpc_query_test.go
+++ b/x/zoneconcierge/keeper/grpc_query_test.go
@@ -19,7 +19,7 @@ import (
 )
 
 type chainInfo struct {
-	chainID           string
+	consumerID        string
 	numHeaders        uint64
 	numForkHeaders    uint64
 	headerStartHeight uint64
@@ -39,16 +39,16 @@ func FuzzChainList(f *testing.F) {
 		numHeaders := datagen.RandomInt(r, 100) + 1
 		allChainIDs := []string{}
 		for i := uint64(0); i < numHeaders; i++ {
-			var chainID string
+			var consumerID string
 			// simulate the scenario that some headers belong to the same chain
 			if i > 0 && datagen.OneInN(r, 2) {
-				chainID = allChainIDs[r.Intn(len(allChainIDs))]
+				consumerID = allChainIDs[r.Intn(len(allChainIDs))]
 			} else {
-				chainID = datagen.GenRandomHexStr(r, 30)
-				allChainIDs = append(allChainIDs, chainID)
+				consumerID = datagen.GenRandomHexStr(r, 30)
+				allChainIDs = append(allChainIDs, consumerID)
 			}
-			header := datagen.GenRandomIBCTMHeader(r, chainID, 0)
-			zcKeeper.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), false)
+			header := datagen.GenRandomIBCTMHeader(r, consumerID, 0)
+			zcKeeper.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), consumerID, false)
 		}
 
 		limit := datagen.RandomInt(r, len(allChainIDs)) + 1
@@ -60,12 +60,12 @@ func FuzzChainList(f *testing.F) {
 			},
 		})
 		require.NoError(t, err)
-		actualChainIDs := resp.ChainIds
+		actualConsumerIDs := resp.ConsumerIds
 
-		require.Equal(t, limit, uint64(len(actualChainIDs)))
-		allChainIDs = zcKeeper.GetAllChainIDs(ctx)
+		require.Equal(t, limit, uint64(len(actualConsumerIDs)))
+		allChainIDs = zcKeeper.GetAllConsumerIDs(ctx)
 		for i := uint64(0); i < limit; i++ {
-			require.Equal(t, allChainIDs[i], actualChainIDs[i])
+			require.Equal(t, allChainIDs[i], actualConsumerIDs[i])
 		}
 	})
 }
@@ -81,31 +81,31 @@ func FuzzChainsInfo(f *testing.F) {
 		ctx := babylonApp.NewContext(false)
 
 		var (
-			chainsInfo []chainInfo
-			chainIDs   []string
+			chainsInfo  []chainInfo
+			consumerIDs []string
 		)
 		numChains := datagen.RandomInt(r, 100) + 1
 		for i := uint64(0); i < numChains; i++ {
-			chainID := datagen.GenRandomHexStr(r, 30)
+			consumerID := datagen.GenRandomHexStr(r, 30)
 			numHeaders := datagen.RandomInt(r, 100) + 1
 			numForkHeaders := datagen.RandomInt(r, 10) + 1
-			SimulateNewHeadersAndForks(ctx, r, &zcKeeper, chainID, 0, numHeaders, numForkHeaders)
+			SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, 0, numHeaders, numForkHeaders)
 
-			chainIDs = append(chainIDs, chainID)
+			consumerIDs = append(consumerIDs, consumerID)
 			chainsInfo = append(chainsInfo, chainInfo{
-				chainID:        chainID,
+				consumerID:     consumerID,
 				numHeaders:     numHeaders,
 				numForkHeaders: numForkHeaders,
 			})
 		}
 
 		resp, err := zcKeeper.ChainsInfo(ctx, &zctypes.QueryChainsInfoRequest{
-			ChainIds: chainIDs,
+			ConsumerIds: consumerIDs,
 		})
 		require.NoError(t, err)
 
 		for i, respData := range resp.ChainsInfo {
-			require.Equal(t, chainsInfo[i].chainID, respData.ChainId)
+			require.Equal(t, chainsInfo[i].consumerID, respData.ConsumerId)
 			require.Equal(t, chainsInfo[i].numHeaders-1, respData.LatestHeader.Height)
 			require.Equal(t, chainsInfo[i].numForkHeaders, uint64(len(respData.LatestForks.Headers)))
 		}
@@ -130,13 +130,13 @@ func FuzzHeader(f *testing.F) {
 
 		// find header at a random height and assert correctness against the expected header
 		randomHeight := datagen.RandomInt(r, int(numHeaders-1))
-		resp, err := zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ChainId: czChainID, Height: randomHeight})
+		resp, err := zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ConsumerId: czChainID, Height: randomHeight})
 		require.NoError(t, err)
 		require.Equal(t, headers[randomHeight].Header.AppHash, resp.Header.Hash)
 		require.Len(t, resp.ForkHeaders.Headers, 0)
 
 		// find the last header and fork headers then assert correctness
-		resp, err = zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ChainId: czChainID, Height: numHeaders - 1})
+		resp, err = zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ConsumerId: czChainID, Height: numHeaders - 1})
 		require.NoError(t, err)
 		require.Equal(t, headers[numHeaders-1].Header.AppHash, resp.Header.Hash)
 		require.Len(t, resp.ForkHeaders.Headers, int(numForkHeaders))
@@ -160,10 +160,10 @@ func FuzzEpochChainsInfo(f *testing.F) {
 
 		// generate a random number of chains
 		numChains := datagen.RandomInt(r, 10) + 1
-		var chainIDs []string
+		var consumerIDs []string
 		for j := uint64(0); j < numChains; j++ {
-			chainID := datagen.GenRandomHexStr(r, 30)
-			chainIDs = append(chainIDs, chainID)
+			consumerID := datagen.GenRandomHexStr(r, 30)
+			consumerIDs = append(consumerIDs, consumerID)
 		}
 
 		// generate a random number of epochNums
@@ -180,16 +180,16 @@ func FuzzEpochChainsInfo(f *testing.F) {
 		epochToChainInfo := make(map[uint64]map[string]chainInfo)
 		for _, epochNum := range epochNums {
 			epochToChainInfo[epochNum] = make(map[string]chainInfo)
-			for j, chainID := range chainIDs {
+			for j, consumerID := range consumerIDs {
 				// generate a random number of headers and fork headers for each chain
 				numHeaders := datagen.RandomInt(r, 100) + 1
 				numForkHeaders := datagen.RandomInt(r, 10) + 1
 
 				// trigger hooks to append these headers and fork headers
-				SimulateNewHeadersAndForks(ctx, r, &zcKeeper, chainID, chainHeaderStartHeights[j], numHeaders, numForkHeaders)
+				SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, chainHeaderStartHeights[j], numHeaders, numForkHeaders)
 
-				epochToChainInfo[epochNum][chainID] = chainInfo{
-					chainID:           chainID,
+				epochToChainInfo[epochNum][consumerID] = chainInfo{
+					consumerID:        consumerID,
 					numHeaders:        numHeaders,
 					numForkHeaders:    numForkHeaders,
 					headerStartHeight: chainHeaderStartHeights[j],
@@ -205,14 +205,14 @@ func FuzzEpochChainsInfo(f *testing.F) {
 
 		// assert correctness of best case scenario
 		for _, epochNum := range epochNums {
-			resp, err := zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: epochNum, ChainIds: chainIDs})
+			resp, err := zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: epochNum, ConsumerIds: consumerIDs})
 			require.NoError(t, err)
 			epochChainsInfo := resp.ChainsInfo
 			require.Len(t, epochChainsInfo, int(numChains))
 			for _, info := range epochChainsInfo {
-				require.Equal(t, epochToChainInfo[epochNum][info.ChainId].numForkHeaders, uint64(len(info.LatestForks.Headers)))
+				require.Equal(t, epochToChainInfo[epochNum][info.ConsumerId].numForkHeaders, uint64(len(info.LatestForks.Headers)))
 
-				actualHeight := epochToChainInfo[epochNum][info.ChainId].headerStartHeight + (epochToChainInfo[epochNum][info.ChainId].numHeaders - 1)
+				actualHeight := epochToChainInfo[epochNum][info.ConsumerId].headerStartHeight + (epochToChainInfo[epochNum][info.ConsumerId].numHeaders - 1)
 				require.Equal(t, actualHeight, info.LatestHeader.Height)
 
 			}
@@ -225,21 +225,21 @@ func FuzzEpochChainsInfo(f *testing.F) {
 			maxChainIDs = append(maxChainIDs, datagen.GenRandomHexStr(r, 30))
 		}
 		randomEpochNum := datagen.RandomInt(r, 10) + 1
-		_, err := zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ChainIds: maxChainIDs})
+		_, err := zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ConsumerIds: maxChainIDs})
 		require.Error(t, err)
 
 		// if no input is passed in, query should fail
-		_, err = zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ChainIds: nil})
+		_, err = zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ConsumerIds: nil})
 		require.Error(t, err)
 
 		// if len of chain ids is 0, query should fail
-		_, err = zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ChainIds: []string{}})
+		_, err = zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ConsumerIds: []string{}})
 		require.Error(t, err)
 
 		// if chain ids contain duplicates, query should fail
 		randomChainID := datagen.GenRandomHexStr(r, 30)
-		dupChainIds := []string{randomChainID, randomChainID}
-		_, err = zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ChainIds: dupChainIds})
+		dupConsumerIds := []string{randomChainID, randomChainID}
+		_, err = zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ConsumerIds: dupConsumerIds})
 		require.Error(t, err)
 	})
 }
@@ -263,7 +263,7 @@ func FuzzListHeaders(f *testing.F) {
 		// a request with randomised pagination
 		limit := datagen.RandomInt(r, int(numHeaders)) + 1
 		req := &zctypes.QueryListHeadersRequest{
-			ChainId: czChainID,
+			ConsumerId: czChainID,
 			Pagination: &query.PageRequest{
 				Limit: limit,
 			},
@@ -334,8 +334,8 @@ func FuzzListEpochHeaders(f *testing.F) {
 			epochNum := epochNumList[i]
 			// make request
 			req := &zctypes.QueryListEpochHeadersRequest{
-				ChainId:  czChainID,
-				EpochNum: epochNum,
+				ConsumerId: czChainID,
+				EpochNum:   epochNum,
 			}
 			resp, err := zcKeeper.ListEpochHeaders(ctx, req)
 			require.NoError(t, err)
@@ -400,8 +400,8 @@ func FuzzFinalizedChainInfo(f *testing.F) {
 		hooks := zcKeeper.Hooks()
 
 		var (
-			chainsInfo []chainInfo
-			chainIDs   []string
+			chainsInfo  []chainInfo
+			consumerIDs []string
 		)
 		numChains := datagen.RandomInt(r, 100) + 1
 		for i := uint64(0); i < numChains; i++ {
@@ -413,9 +413,9 @@ func FuzzFinalizedChainInfo(f *testing.F) {
 			numForkHeaders := datagen.RandomInt(r, 10) + 1
 			SimulateNewHeadersAndForks(ctx, r, zcKeeper, czChainID, 0, numHeaders, numForkHeaders)
 
-			chainIDs = append(chainIDs, czChainID)
+			consumerIDs = append(consumerIDs, czChainID)
 			chainsInfo = append(chainsInfo, chainInfo{
-				chainID:        czChainID,
+				consumerID:     czChainID,
 				numHeaders:     numHeaders,
 				numForkHeaders: numForkHeaders,
 			})
@@ -427,10 +427,10 @@ func FuzzFinalizedChainInfo(f *testing.F) {
 		checkpointingKeeper.EXPECT().GetLastFinalizedEpoch(gomock.Any()).Return(epoch.EpochNumber).AnyTimes()
 
 		// check if the chain info of this epoch is recorded or not
-		resp, err := zcKeeper.FinalizedChainsInfo(ctx, &zctypes.QueryFinalizedChainsInfoRequest{ChainIds: chainIDs, Prove: true})
+		resp, err := zcKeeper.FinalizedChainsInfo(ctx, &zctypes.QueryFinalizedChainsInfoRequest{ConsumerIds: consumerIDs, Prove: true})
 		require.NoError(t, err)
 		for i, respData := range resp.FinalizedChainsInfo {
-			require.Equal(t, chainsInfo[i].chainID, respData.FinalizedChainInfo.ChainId)
+			require.Equal(t, chainsInfo[i].consumerID, respData.FinalizedChainInfo.ConsumerId)
 			require.Equal(t, chainsInfo[i].numHeaders-1, respData.FinalizedChainInfo.LatestHeader.Height)
 			require.Equal(t, chainsInfo[i].numForkHeaders, uint64(len(respData.FinalizedChainInfo.LatestForks.Headers)))
 		}
diff --git a/x/zoneconcierge/keeper/header_handler.go b/x/zoneconcierge/keeper/header_handler.go
index e4bfa635d..c96b9347d 100644
--- a/x/zoneconcierge/keeper/header_handler.go
+++ b/x/zoneconcierge/keeper/header_handler.go
@@ -10,11 +10,11 @@ import (
 )
 
 // HandleHeaderWithValidCommit handles a CZ header with a valid QC
-func (k Keeper) HandleHeaderWithValidCommit(ctx context.Context, txHash []byte, header *types.HeaderInfo, isOnFork bool) {
+func (k Keeper) HandleHeaderWithValidCommit(ctx context.Context, txHash []byte, header *types.HeaderInfo, clientID string, isOnFork bool) {
 	sdkCtx := sdk.UnwrapSDKContext(ctx)
 	babylonHeader := sdkCtx.HeaderInfo()
 	indexedHeader := types.IndexedHeader{
-		ChainId:             header.ChainId,
+		ConsumerId:          clientID,
 		Hash:                header.AppHash,
 		Height:              header.Height,
 		Time:                &header.Time,
@@ -30,27 +30,27 @@ func (k Keeper) HandleHeaderWithValidCommit(ctx context.Context, txHash []byte,
 		chainInfo *types.ChainInfo
 		err       error
 	)
-	if !k.HasChainInfo(ctx, indexedHeader.ChainId) {
+	if !k.HasChainInfo(ctx, indexedHeader.ConsumerId) {
 		// chain info does not exist yet, initialise chain info for this chain
-		chainInfo, err = k.InitChainInfo(ctx, indexedHeader.ChainId)
+		chainInfo, err = k.InitChainInfo(ctx, indexedHeader.ConsumerId)
 		if err != nil {
-			panic(fmt.Errorf("failed to initialize chain info of %s: %w", indexedHeader.ChainId, err))
+			panic(fmt.Errorf("failed to initialize chain info of %s: %w", indexedHeader.ConsumerId, err))
 		}
 	} else {
 		// get chain info
-		chainInfo, err = k.GetChainInfo(ctx, indexedHeader.ChainId)
+		chainInfo, err = k.GetChainInfo(ctx, indexedHeader.ConsumerId)
 		if err != nil {
-			panic(fmt.Errorf("failed to get chain info of %s: %w", indexedHeader.ChainId, err))
+			panic(fmt.Errorf("failed to get chain info of %s: %w", indexedHeader.ConsumerId, err))
 		}
 	}
 
 	if isOnFork {
 		// insert header to fork index
-		if err := k.insertForkHeader(ctx, indexedHeader.ChainId, &indexedHeader); err != nil {
+		if err := k.insertForkHeader(ctx, indexedHeader.ConsumerId, &indexedHeader); err != nil {
 			panic(err)
 		}
 		// update the latest fork in chain info
-		if err := k.tryToUpdateLatestForkHeader(ctx, indexedHeader.ChainId, &indexedHeader); err != nil {
+		if err := k.tryToUpdateLatestForkHeader(ctx, indexedHeader.ConsumerId, &indexedHeader); err != nil {
 			panic(err)
 		}
 	} else {
@@ -62,11 +62,11 @@ func (k Keeper) HandleHeaderWithValidCommit(ctx context.Context, txHash []byte,
 		}
 
 		// insert header to canonical chain index
-		if err := k.insertHeader(ctx, indexedHeader.ChainId, &indexedHeader); err != nil {
+		if err := k.insertHeader(ctx, indexedHeader.ConsumerId, &indexedHeader); err != nil {
 			panic(err)
 		}
 		// update the latest canonical header in chain info
-		if err := k.updateLatestHeader(ctx, indexedHeader.ChainId, &indexedHeader); err != nil {
+		if err := k.updateLatestHeader(ctx, indexedHeader.ConsumerId, &indexedHeader); err != nil {
 			panic(err)
 		}
 	}
diff --git a/x/zoneconcierge/keeper/hooks.go b/x/zoneconcierge/keeper/hooks.go
index 8621183dc..f8d4c5031 100644
--- a/x/zoneconcierge/keeper/hooks.go
+++ b/x/zoneconcierge/keeper/hooks.go
@@ -23,8 +23,8 @@ func (k Keeper) Hooks() Hooks { return Hooks{k} }
 func (h Hooks) AfterEpochEnds(ctx context.Context, epoch uint64) {
 	// upon an epoch has ended, index the current chain info for each CZ
 	// TODO: do this together when epoch is sealed?
-	for _, chainID := range h.k.GetAllChainIDs(ctx) {
-		h.k.recordEpochChainInfo(ctx, chainID, epoch)
+	for _, consumerID := range h.k.GetAllConsumerIDs(ctx) {
+		h.k.recordEpochChainInfo(ctx, consumerID, epoch)
 	}
 }
 
diff --git a/x/zoneconcierge/keeper/ibc_header_decorator.go b/x/zoneconcierge/keeper/ibc_header_decorator.go
index 930f33e6e..ecac68f9a 100644
--- a/x/zoneconcierge/keeper/ibc_header_decorator.go
+++ b/x/zoneconcierge/keeper/ibc_header_decorator.go
@@ -22,21 +22,22 @@ func NewIBCHeaderDecorator(k Keeper) *IBCHeaderDecorator {
 	}
 }
 
-func (d *IBCHeaderDecorator) getHeaderAndClientState(ctx sdk.Context, m sdk.Msg) (*types.HeaderInfo, *ibctmtypes.ClientState) {
+func (d *IBCHeaderDecorator) parseMsgUpdateClient(ctx sdk.Context, m sdk.Msg) (*types.HeaderInfo, *ibctmtypes.ClientState, string) {
 	// ensure the message is MsgUpdateClient
 	msgUpdateClient, ok := m.(*clienttypes.MsgUpdateClient)
 	if !ok {
-		return nil, nil
+		return nil, nil, ""
 	}
+	clientID := msgUpdateClient.ClientId
 	// unpack ClientMsg inside MsgUpdateClient
 	clientMsg, err := clienttypes.UnpackClientMessage(msgUpdateClient.ClientMessage)
 	if err != nil {
-		return nil, nil
+		return nil, nil, ""
 	}
 	// ensure the ClientMsg is a Comet header
 	ibctmHeader, ok := clientMsg.(*ibctmtypes.Header)
 	if !ok {
-		return nil, nil
+		return nil, nil, ""
 	}
 
 	// all good, we get the headerInfo
@@ -51,15 +52,15 @@ func (d *IBCHeaderDecorator) getHeaderAndClientState(ctx sdk.Context, m sdk.Msg)
 	// ensure the corresponding clientState exists
 	clientState, exist := d.k.clientKeeper.GetClientState(ctx, msgUpdateClient.ClientId)
 	if !exist {
-		return nil, nil
+		return nil, nil, ""
 	}
 	// ensure the clientState is a Comet clientState
 	cmtClientState, ok := clientState.(*ibctmtypes.ClientState)
 	if !ok {
-		return nil, nil
+		return nil, nil, ""
 	}
 
-	return headerInfo, cmtClientState
+	return headerInfo, cmtClientState, clientID
 }
 
 func (d *IBCHeaderDecorator) PostHandle(ctx sdk.Context, tx sdk.Tx, simulate, success bool, next sdk.PostHandler) (sdk.Context, error) {
@@ -78,7 +79,7 @@ func (d *IBCHeaderDecorator) PostHandle(ctx sdk.Context, tx sdk.Tx, simulate, su
 
 	for _, msg := range tx.GetMsgs() {
 		// try to extract the headerInfo and the client's status
-		headerInfo, clientState := d.getHeaderAndClientState(ctx, msg)
+		headerInfo, clientState, clientID := d.parseMsgUpdateClient(ctx, msg)
 		if headerInfo == nil {
 			continue
 		}
@@ -91,7 +92,7 @@ func (d *IBCHeaderDecorator) PostHandle(ctx sdk.Context, tx sdk.Tx, simulate, su
 		// fail, eventually failing the entire tx. All state updates due to this
 		// failed tx will be rolled back.
 		isOnFork := !clientState.FrozenHeight.IsZero()
-		d.k.HandleHeaderWithValidCommit(ctx, txHash, headerInfo, isOnFork)
+		d.k.HandleHeaderWithValidCommit(ctx, txHash, headerInfo, clientID, isOnFork)
 
 		// unfreeze client (by setting FrozenHeight to zero again) if the client is frozen
 		// due to a fork header
diff --git a/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go b/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go
index 7df7c61ad..428ec16fe 100644
--- a/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go
+++ b/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go
@@ -99,7 +99,7 @@ func (k Keeper) getFinalizedInfo(
 // where the counterparty is a Cosmos zone
 func (k Keeper) createBTCTimestamp(
 	ctx context.Context,
-	chainID string,
+	consumerID string,
 	channel channeltypes.IdentifiedChannel,
 	finalizedInfo *finalizedInfo,
 ) (*types.BTCTimestamp, error) {
@@ -123,9 +123,9 @@ func (k Keeper) createBTCTimestamp(
 	// NOTE: it's possible that this chain does not have chain info at the moment
 	// In this case, skip sending BTC timestamp for this chain at this epoch
 	epochNum := finalizedInfo.EpochInfo.EpochNumber
-	epochChainInfo, err := k.GetEpochChainInfo(ctx, chainID, epochNum)
+	epochChainInfo, err := k.GetEpochChainInfo(ctx, consumerID, epochNum)
 	if err != nil {
-		return nil, fmt.Errorf("no epochChainInfo for chain %s at epoch %d", chainID, epochNum)
+		return nil, fmt.Errorf("no epochChainInfo for chain %s at epoch %d", consumerID, epochNum)
 	}
 
 	// construct BTC timestamp from everything
@@ -235,16 +235,16 @@ func (k Keeper) BroadcastBTCTimestamps(
 	// for each channel, construct and send BTC timestamp
 	for _, channel := range openZCChannels {
 		// get the ID of the chain under this channel
-		chainID, err := k.getChainID(ctx, channel)
+		consumerID, err := k.getChainID(ctx, channel)
 		if err != nil {
 			k.Logger(sdkCtx).Error("failed to get chain ID, skip sending BTC timestamp for this chain", "channelID", channel.ChannelId, "error", err)
 			continue
 		}
 
 		// generate timestamp for this channel
-		btcTimestamp, err := k.createBTCTimestamp(ctx, chainID, channel, finalizedInfo)
+		btcTimestamp, err := k.createBTCTimestamp(ctx, consumerID, channel, finalizedInfo)
 		if err != nil {
-			k.Logger(sdkCtx).Error("failed to generate BTC timestamp, skip sending BTC timestamp for this chain", "chainID", chainID, "error", err)
+			k.Logger(sdkCtx).Error("failed to generate BTC timestamp, skip sending BTC timestamp for this chain", "consumerID", consumerID, "error", err)
 			continue
 		}
 
@@ -252,7 +252,7 @@ func (k Keeper) BroadcastBTCTimestamps(
 		packet := types.NewBTCTimestampPacketData(btcTimestamp)
 		// send IBC packet
 		if err := k.SendIBCPacket(ctx, channel, packet); err != nil {
-			k.Logger(sdkCtx).Error("failed to send BTC timestamp IBC packet, skip sending BTC timestamp for this chain", "chainID", chainID, "channelID", channel.ChannelId, "error", err)
+			k.Logger(sdkCtx).Error("failed to send BTC timestamp IBC packet, skip sending BTC timestamp for this chain", "consumerID", consumerID, "channelID", channel.ChannelId, "error", err)
 			continue
 		}
 	}
diff --git a/x/zoneconcierge/keeper/keeper_test.go b/x/zoneconcierge/keeper/keeper_test.go
index 1d5392735..aaf972240 100644
--- a/x/zoneconcierge/keeper/keeper_test.go
+++ b/x/zoneconcierge/keeper/keeper_test.go
@@ -11,32 +11,32 @@ import (
 )
 
 // SimulateNewHeaders generates a non-zero number of canonical headers
-func SimulateNewHeaders(ctx context.Context, r *rand.Rand, k *zckeeper.Keeper, chainID string, startHeight uint64, numHeaders uint64) []*ibctmtypes.Header {
+func SimulateNewHeaders(ctx context.Context, r *rand.Rand, k *zckeeper.Keeper, consumerID string, startHeight uint64, numHeaders uint64) []*ibctmtypes.Header {
 	headers := []*ibctmtypes.Header{}
 	// invoke the hook a number of times to simulate a number of blocks
 	for i := uint64(0); i < numHeaders; i++ {
-		header := datagen.GenRandomIBCTMHeader(r, chainID, startHeight+i)
-		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), false)
+		header := datagen.GenRandomIBCTMHeader(r, consumerID, startHeight+i)
+		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), consumerID, false)
 		headers = append(headers, header)
 	}
 	return headers
 }
 
 // SimulateNewHeadersAndForks generates a random non-zero number of canonical headers and fork headers
-func SimulateNewHeadersAndForks(ctx context.Context, r *rand.Rand, k *zckeeper.Keeper, chainID string, startHeight uint64, numHeaders uint64, numForkHeaders uint64) ([]*ibctmtypes.Header, []*ibctmtypes.Header) {
+func SimulateNewHeadersAndForks(ctx context.Context, r *rand.Rand, k *zckeeper.Keeper, consumerID string, startHeight uint64, numHeaders uint64, numForkHeaders uint64) ([]*ibctmtypes.Header, []*ibctmtypes.Header) {
 	headers := []*ibctmtypes.Header{}
 	// invoke the hook a number of times to simulate a number of blocks
 	for i := uint64(0); i < numHeaders; i++ {
-		header := datagen.GenRandomIBCTMHeader(r, chainID, startHeight+i)
-		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), false)
+		header := datagen.GenRandomIBCTMHeader(r, consumerID, startHeight+i)
+		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), consumerID, false)
 		headers = append(headers, header)
 	}
 
 	// generate a number of fork headers
 	forkHeaders := []*ibctmtypes.Header{}
 	for i := uint64(0); i < numForkHeaders; i++ {
-		header := datagen.GenRandomIBCTMHeader(r, chainID, startHeight+numHeaders-1)
-		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), true)
+		header := datagen.GenRandomIBCTMHeader(r, consumerID, startHeight+numHeaders-1)
+		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), consumerID, true)
 		forkHeaders = append(forkHeaders, header)
 	}
 	return headers, forkHeaders
diff --git a/x/zoneconcierge/keeper/proof_btc_timestamp.go b/x/zoneconcierge/keeper/proof_btc_timestamp.go
index 248fe6b1a..5e9924ef7 100644
--- a/x/zoneconcierge/keeper/proof_btc_timestamp.go
+++ b/x/zoneconcierge/keeper/proof_btc_timestamp.go
@@ -13,7 +13,7 @@ import (
 )
 
 func (k Keeper) ProveCZHeaderInEpoch(_ context.Context, header *types.IndexedHeader, epoch *epochingtypes.Epoch) (*cmtcrypto.ProofOps, error) {
-	czHeaderKey := types.GetCZHeaderKey(header.ChainId, header.Height)
+	czHeaderKey := types.GetCZHeaderKey(header.ConsumerId, header.Height)
 	_, _, proof, err := k.QueryStore(types.StoreKey, czHeaderKey, int64(epoch.GetSealerBlockHeight()))
 	if err != nil {
 		return nil, err
diff --git a/x/zoneconcierge/keeper/proof_btc_timestamp_test.go b/x/zoneconcierge/keeper/proof_btc_timestamp_test.go
index d58d3d666..c1f0aa673 100644
--- a/x/zoneconcierge/keeper/proof_btc_timestamp_test.go
+++ b/x/zoneconcierge/keeper/proof_btc_timestamp_test.go
@@ -41,14 +41,14 @@ func FuzzProofCZHeaderInEpoch(f *testing.F) {
 		}
 
 		// handle a random header from a random consumer chain
-		chainID := datagen.GenRandomHexStr(r, 10)
+		consumerID := datagen.GenRandomHexStr(r, 10)
 		height := datagen.RandomInt(r, 100) + 1
-		ibctmHeader := datagen.GenRandomIBCTMHeader(r, chainID, height)
+		ibctmHeader := datagen.GenRandomIBCTMHeader(r, consumerID, height)
 		headerInfo := datagen.HeaderToHeaderInfo(ibctmHeader)
-		zck.HandleHeaderWithValidCommit(h.Ctx, datagen.GenRandomByteArray(r, 32), headerInfo, false)
+		zck.HandleHeaderWithValidCommit(h.Ctx, datagen.GenRandomByteArray(r, 32), headerInfo, consumerID, false)
 
 		// ensure the header is successfully inserted
-		indexedHeader, err := zck.GetHeader(h.Ctx, chainID, height)
+		indexedHeader, err := zck.GetHeader(h.Ctx, consumerID, height)
 		h.NoError(err)
 
 		// enter the 1st block of the next epoch
diff --git a/x/zoneconcierge/keeper/query_kvstore.go b/x/zoneconcierge/keeper/query_kvstore.go
index eb1caf149..6f2568f4c 100644
--- a/x/zoneconcierge/keeper/query_kvstore.go
+++ b/x/zoneconcierge/keeper/query_kvstore.go
@@ -9,7 +9,7 @@ import (
 
 // QueryStore queries a KV pair in the KVStore, where
 // - moduleStoreKey is the store key of a module, e.g., zctypes.StoreKey
-// - key is the key of the queried KV pair, including the prefix, e.g., zctypes.EpochChainInfoKey || chainID in the chain info store
+// - key is the key of the queried KV pair, including the prefix, e.g., zctypes.EpochChainInfoKey || consumerID in the chain info store
 // and returns
 // - key of this KV pair
 // - value of this KV pair
diff --git a/x/zoneconcierge/types/btc_timestamp.go b/x/zoneconcierge/types/btc_timestamp.go
index d54b04355..de88561d5 100644
--- a/x/zoneconcierge/types/btc_timestamp.go
+++ b/x/zoneconcierge/types/btc_timestamp.go
@@ -19,9 +19,9 @@ import (
 	epochingtypes "github.com/babylonlabs-io/babylon/x/epoching/types"
 )
 
-func GetCZHeaderKey(chainID string, height uint64) []byte {
+func GetCZHeaderKey(consumerID string, height uint64) []byte {
 	key := CanonicalChainKey
-	key = append(key, []byte(chainID)...)
+	key = append(key, []byte(consumerID)...)
 	key = append(key, sdk.Uint64ToBigEndian(height)...)
 	return key
 }
@@ -180,7 +180,7 @@ func VerifyCZHeaderInEpoch(header *IndexedHeader, epoch *epochingtypes.Epoch, pr
 		return err
 	}
 
-	if err := VerifyStore(root, StoreKey, GetCZHeaderKey(header.ChainId, header.Height), headerBytes, proof); err != nil {
+	if err := VerifyStore(root, StoreKey, GetCZHeaderKey(header.ConsumerId, header.Height), headerBytes, proof); err != nil {
 		return errorsmod.Wrapf(ErrInvalidMerkleProof, "invalid inclusion proof for CZ header: %v", err)
 	}
 
diff --git a/x/zoneconcierge/types/btc_timestamp_test.go b/x/zoneconcierge/types/btc_timestamp_test.go
index 24033ed88..fc2a9e43b 100644
--- a/x/zoneconcierge/types/btc_timestamp_test.go
+++ b/x/zoneconcierge/types/btc_timestamp_test.go
@@ -59,14 +59,14 @@ func FuzzBTCTimestamp(f *testing.F) {
 		}
 
 		// handle a random header from a random consumer chain
-		chainID := datagen.GenRandomHexStr(r, 10)
+		consumerID := datagen.GenRandomHexStr(r, 10)
 		height := datagen.RandomInt(r, 100) + 1
-		ibctmHeader := datagen.GenRandomIBCTMHeader(r, chainID, height)
+		ibctmHeader := datagen.GenRandomIBCTMHeader(r, consumerID, height)
 		headerInfo := datagen.HeaderToHeaderInfo(ibctmHeader)
-		zck.HandleHeaderWithValidCommit(h.Ctx, datagen.GenRandomByteArray(r, 32), headerInfo, false)
+		zck.HandleHeaderWithValidCommit(h.Ctx, datagen.GenRandomByteArray(r, 32), headerInfo, consumerID, false)
 
 		// ensure the header is successfully inserted
-		indexedHeader, err := zck.GetHeader(h.Ctx, chainID, height)
+		indexedHeader, err := zck.GetHeader(h.Ctx, consumerID, height)
 		h.NoError(err)
 
 		// enter block 21, 1st block of epoch 3
diff --git a/x/zoneconcierge/types/errors.go b/x/zoneconcierge/types/errors.go
index 8d033faf5..5a224e551 100644
--- a/x/zoneconcierge/types/errors.go
+++ b/x/zoneconcierge/types/errors.go
@@ -15,5 +15,5 @@ var (
 	ErrInvalidProofEpochSealed = errorsmod.Register(ModuleName, 1107, "invalid ProofEpochSealed")
 	ErrInvalidMerkleProof      = errorsmod.Register(ModuleName, 1108, "invalid Merkle inclusion proof")
 	ErrInvalidChainInfo        = errorsmod.Register(ModuleName, 1109, "invalid chain info")
-	ErrInvalidChainIDs         = errorsmod.Register(ModuleName, 1110, "chain ids contain duplicates or empty strings")
+	ErrInvalidConsumerIDs      = errorsmod.Register(ModuleName, 1110, "chain ids contain duplicates or empty strings")
 )
diff --git a/x/zoneconcierge/types/query.pb.go b/x/zoneconcierge/types/query.pb.go
index c2f8f49dd..ae44fc9d3 100644
--- a/x/zoneconcierge/types/query.pb.go
+++ b/x/zoneconcierge/types/query.pb.go
@@ -118,8 +118,8 @@ func (m *QueryParamsResponse) GetParams() Params {
 
 // QueryHeaderRequest is request type for the Query/Header RPC method.
 type QueryHeaderRequest struct {
-	ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
-	Height  uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"`
+	ConsumerId string `protobuf:"bytes,1,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"`
+	Height     uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"`
 }
 
 func (m *QueryHeaderRequest) Reset()         { *m = QueryHeaderRequest{} }
@@ -155,9 +155,9 @@ func (m *QueryHeaderRequest) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_QueryHeaderRequest proto.InternalMessageInfo
 
-func (m *QueryHeaderRequest) GetChainId() string {
+func (m *QueryHeaderRequest) GetConsumerId() string {
 	if m != nil {
-		return m.ChainId
+		return m.ConsumerId
 	}
 	return ""
 }
@@ -270,8 +270,8 @@ func (m *QueryChainListRequest) GetPagination() *query.PageRequest {
 
 // QueryChainListResponse is response type for the Query/ChainList RPC method
 type QueryChainListResponse struct {
-	// chain_ids are IDs of the chains in ascending alphabetical order
-	ChainIds []string `protobuf:"bytes,1,rep,name=chain_ids,json=chainIds,proto3" json:"chain_ids,omitempty"`
+	// consumer_ids are IDs of the chains in ascending alphabetical order
+	ConsumerIds []string `protobuf:"bytes,1,rep,name=consumer_ids,json=consumerIds,proto3" json:"consumer_ids,omitempty"`
 	// pagination defines the pagination in the response
 	Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
 }
@@ -309,9 +309,9 @@ func (m *QueryChainListResponse) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_QueryChainListResponse proto.InternalMessageInfo
 
-func (m *QueryChainListResponse) GetChainIds() []string {
+func (m *QueryChainListResponse) GetConsumerIds() []string {
 	if m != nil {
-		return m.ChainIds
+		return m.ConsumerIds
 	}
 	return nil
 }
@@ -325,7 +325,7 @@ func (m *QueryChainListResponse) GetPagination() *query.PageResponse {
 
 // QueryChainsInfoRequest is request type for the Query/ChainsInfo RPC method.
 type QueryChainsInfoRequest struct {
-	ChainIds []string `protobuf:"bytes,1,rep,name=chain_ids,json=chainIds,proto3" json:"chain_ids,omitempty"`
+	ConsumerIds []string `protobuf:"bytes,1,rep,name=consumer_ids,json=consumerIds,proto3" json:"consumer_ids,omitempty"`
 }
 
 func (m *QueryChainsInfoRequest) Reset()         { *m = QueryChainsInfoRequest{} }
@@ -361,9 +361,9 @@ func (m *QueryChainsInfoRequest) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_QueryChainsInfoRequest proto.InternalMessageInfo
 
-func (m *QueryChainsInfoRequest) GetChainIds() []string {
+func (m *QueryChainsInfoRequest) GetConsumerIds() []string {
 	if m != nil {
-		return m.ChainIds
+		return m.ConsumerIds
 	}
 	return nil
 }
@@ -416,8 +416,8 @@ func (m *QueryChainsInfoResponse) GetChainsInfo() []*ChainInfo {
 // QueryEpochChainsInfoRequest is request type for the Query/EpochChainsInfo RPC
 // method.
 type QueryEpochChainsInfoRequest struct {
-	EpochNum uint64   `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"`
-	ChainIds []string `protobuf:"bytes,2,rep,name=chain_ids,json=chainIds,proto3" json:"chain_ids,omitempty"`
+	EpochNum    uint64   `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"`
+	ConsumerIds []string `protobuf:"bytes,2,rep,name=consumer_ids,json=consumerIds,proto3" json:"consumer_ids,omitempty"`
 }
 
 func (m *QueryEpochChainsInfoRequest) Reset()         { *m = QueryEpochChainsInfoRequest{} }
@@ -460,9 +460,9 @@ func (m *QueryEpochChainsInfoRequest) GetEpochNum() uint64 {
 	return 0
 }
 
-func (m *QueryEpochChainsInfoRequest) GetChainIds() []string {
+func (m *QueryEpochChainsInfoRequest) GetConsumerIds() []string {
 	if m != nil {
-		return m.ChainIds
+		return m.ConsumerIds
 	}
 	return nil
 }
@@ -516,7 +516,7 @@ func (m *QueryEpochChainsInfoResponse) GetChainsInfo() []*ChainInfo {
 
 // QueryListHeadersRequest is request type for the Query/ListHeaders RPC method.
 type QueryListHeadersRequest struct {
-	ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
+	ConsumerId string `protobuf:"bytes,1,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"`
 	// pagination defines whether to have the pagination in the request
 	Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
 }
@@ -554,9 +554,9 @@ func (m *QueryListHeadersRequest) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_QueryListHeadersRequest proto.InternalMessageInfo
 
-func (m *QueryListHeadersRequest) GetChainId() string {
+func (m *QueryListHeadersRequest) GetConsumerId() string {
 	if m != nil {
-		return m.ChainId
+		return m.ConsumerId
 	}
 	return ""
 }
@@ -627,8 +627,8 @@ func (m *QueryListHeadersResponse) GetPagination() *query.PageResponse {
 // QueryListEpochHeadersRequest is request type for the Query/ListEpochHeaders
 // RPC method.
 type QueryListEpochHeadersRequest struct {
-	EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"`
-	ChainId  string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
+	EpochNum   uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"`
+	ConsumerId string `protobuf:"bytes,2,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"`
 }
 
 func (m *QueryListEpochHeadersRequest) Reset()         { *m = QueryListEpochHeadersRequest{} }
@@ -671,9 +671,9 @@ func (m *QueryListEpochHeadersRequest) GetEpochNum() uint64 {
 	return 0
 }
 
-func (m *QueryListEpochHeadersRequest) GetChainId() string {
+func (m *QueryListEpochHeadersRequest) GetConsumerId() string {
 	if m != nil {
-		return m.ChainId
+		return m.ConsumerId
 	}
 	return ""
 }
@@ -728,8 +728,8 @@ func (m *QueryListEpochHeadersResponse) GetHeaders() []*IndexedHeader {
 // QueryFinalizedChainsInfoRequest is request type for the
 // Query/FinalizedChainsInfo RPC method.
 type QueryFinalizedChainsInfoRequest struct {
-	// chain_ids is the list of ids of CZs
-	ChainIds []string `protobuf:"bytes,1,rep,name=chain_ids,json=chainIds,proto3" json:"chain_ids,omitempty"`
+	// consumer_ids is the list of ids of CZs
+	ConsumerIds []string `protobuf:"bytes,1,rep,name=consumer_ids,json=consumerIds,proto3" json:"consumer_ids,omitempty"`
 	// prove indicates whether the querier wants to get proofs of this timestamp
 	Prove bool `protobuf:"varint,2,opt,name=prove,proto3" json:"prove,omitempty"`
 }
@@ -767,9 +767,9 @@ func (m *QueryFinalizedChainsInfoRequest) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_QueryFinalizedChainsInfoRequest proto.InternalMessageInfo
 
-func (m *QueryFinalizedChainsInfoRequest) GetChainIds() []string {
+func (m *QueryFinalizedChainsInfoRequest) GetConsumerIds() []string {
 	if m != nil {
-		return m.ChainIds
+		return m.ConsumerIds
 	}
 	return nil
 }
@@ -830,8 +830,8 @@ func (m *QueryFinalizedChainsInfoResponse) GetFinalizedChainsInfo() []*Finalized
 // QueryFinalizedChainInfoUntilHeightRequest is request type for the
 // Query/FinalizedChainInfoUntilHeight RPC method.
 type QueryFinalizedChainInfoUntilHeightRequest struct {
-	// chain_id is the ID of the CZ
-	ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
+	// consumer_id is the ID of the CZ
+	ConsumerId string `protobuf:"bytes,1,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"`
 	// height is the height of the CZ chain
 	// such that the returned finalised chain info will be no later than this
 	// height
@@ -877,9 +877,9 @@ func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest proto.InternalMessageInfo
 
-func (m *QueryFinalizedChainInfoUntilHeightRequest) GetChainId() string {
+func (m *QueryFinalizedChainInfoUntilHeightRequest) GetConsumerId() string {
 	if m != nil {
-		return m.ChainId
+		return m.ConsumerId
 	}
 	return ""
 }
@@ -1012,81 +1012,81 @@ func init() {
 }
 
 var fileDescriptor_cd665af90102da38 = []byte{
-	// 1182 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcf, 0x6f, 0x1b, 0xc5,
-	0x17, 0xcf, 0xe6, 0x57, 0x93, 0xe7, 0x6f, 0xbf, 0x54, 0x93, 0xb4, 0x98, 0x6d, 0xeb, 0x44, 0x0b,
-	0xa5, 0x69, 0x49, 0x76, 0x71, 0x4a, 0x1b, 0x95, 0x03, 0x55, 0x93, 0x92, 0x34, 0x2a, 0xaa, 0x9a,
-	0x85, 0x80, 0xc4, 0xc5, 0xec, 0xae, 0xc7, 0xf6, 0x2a, 0xf1, 0x8e, 0xbb, 0xb3, 0x76, 0xeb, 0x86,
-	0x70, 0x40, 0xbd, 0x83, 0xc4, 0x05, 0x71, 0xe2, 0xc4, 0x81, 0x43, 0x6f, 0xfc, 0x09, 0x48, 0x3d,
-	0x70, 0xa8, 0xc4, 0x85, 0x13, 0x42, 0x09, 0xff, 0x06, 0x12, 0xda, 0x99, 0x59, 0x7b, 0x7f, 0xda,
-	0xeb, 0x90, 0x9b, 0x67, 0xf6, 0xbd, 0xcf, 0xe7, 0xf3, 0xde, 0xbc, 0x99, 0xf7, 0x0c, 0x6f, 0x99,
-	0x86, 0xd9, 0xdd, 0x27, 0x8e, 0xf6, 0x8c, 0x38, 0xd8, 0x22, 0x8e, 0x65, 0x63, 0xb7, 0x8e, 0xb5,
-	0x4e, 0x59, 0x7b, 0xdc, 0xc6, 0x6e, 0x57, 0x6d, 0xb9, 0xc4, 0x23, 0xa8, 0x28, 0xac, 0xd4, 0x88,
-	0x95, 0xda, 0x29, 0xcb, 0xf3, 0x75, 0x52, 0x27, 0xcc, 0x48, 0xf3, 0x7f, 0x71, 0x7b, 0xf9, 0x52,
-	0x9d, 0x90, 0xfa, 0x3e, 0xd6, 0x8c, 0x96, 0xad, 0x19, 0x8e, 0x43, 0x3c, 0xc3, 0xb3, 0x89, 0x43,
-	0xc5, 0xd7, 0xeb, 0x16, 0xa1, 0x4d, 0x42, 0x35, 0xd3, 0xa0, 0x98, 0xd3, 0x68, 0x9d, 0xb2, 0x89,
-	0x3d, 0xa3, 0xac, 0xb5, 0x8c, 0xba, 0xed, 0x30, 0x63, 0x61, 0xbb, 0x1c, 0xe8, 0x33, 0x3d, 0xcb,
-	0x6a, 0x60, 0x6b, 0xaf, 0x45, 0x6c, 0xc7, 0xf3, 0xf5, 0x45, 0x36, 0x84, 0xf5, 0xb5, 0xc0, 0xba,
-	0xff, 0xc5, 0x76, 0xea, 0xbe, 0x75, 0xc2, 0x54, 0x09, 0x4c, 0x71, 0x8b, 0x58, 0x0d, 0x61, 0x15,
-	0xfc, 0x8e, 0x93, 0x27, 0x92, 0x13, 0xcd, 0x03, 0xb7, 0xbe, 0x92, 0x69, 0xdd, 0x32, 0x5c, 0xa3,
-	0x29, 0xa2, 0x57, 0xe6, 0x01, 0xed, 0xf8, 0x31, 0x3f, 0x62, 0x9b, 0x3a, 0x7e, 0xdc, 0xc6, 0xd4,
-	0x53, 0x76, 0x61, 0x2e, 0xb2, 0x4b, 0x5b, 0xc4, 0xa1, 0x18, 0x7d, 0x00, 0xd3, 0xdc, 0xb9, 0x28,
-	0x2d, 0x4a, 0x4b, 0x85, 0xd5, 0x45, 0x35, 0xeb, 0x24, 0x54, 0xee, 0xb9, 0x3e, 0xf9, 0xf2, 0xcf,
-	0x85, 0x31, 0x5d, 0x78, 0x29, 0x5b, 0x82, 0xec, 0x3e, 0x36, 0xaa, 0xd8, 0x15, 0x64, 0xe8, 0x0d,
-	0x98, 0xb1, 0x1a, 0x86, 0xed, 0x54, 0xec, 0x2a, 0xc3, 0x9d, 0xd5, 0xcf, 0xb0, 0xf5, 0x76, 0x15,
-	0x5d, 0x80, 0xe9, 0x06, 0xb6, 0xeb, 0x0d, 0xaf, 0x38, 0xbe, 0x28, 0x2d, 0x4d, 0xea, 0x62, 0xa5,
-	0xfc, 0x20, 0x09, 0x81, 0x01, 0x92, 0x10, 0x78, 0xc7, 0xb7, 0xf7, 0x77, 0x84, 0xc0, 0xab, 0xd9,
-	0x02, 0xb7, 0x9d, 0x2a, 0x7e, 0x8a, 0xab, 0x02, 0x40, 0xb8, 0xa1, 0x75, 0xf8, 0x5f, 0x8d, 0xb8,
-	0x7b, 0x15, 0xbe, 0xa4, 0x8c, 0xb6, 0xb0, 0xba, 0x90, 0x0d, 0xb3, 0x49, 0xdc, 0x3d, 0xaa, 0x17,
-	0x7c, 0x27, 0x0e, 0x45, 0x95, 0x0a, 0x9c, 0x67, 0xda, 0x36, 0xfc, 0x20, 0x3e, 0xb2, 0xa9, 0x17,
-	0x04, 0xba, 0x09, 0xd0, 0xaf, 0x28, 0xa1, 0xf0, 0x6d, 0x95, 0x97, 0x9f, 0xea, 0x97, 0x9f, 0xca,
-	0xab, 0x5c, 0x94, 0x9f, 0xfa, 0xc8, 0xa8, 0x63, 0xe1, 0xab, 0x87, 0x3c, 0x95, 0xaf, 0xe0, 0x42,
-	0x9c, 0x40, 0xc4, 0x7f, 0x11, 0x66, 0x83, 0x54, 0xfa, 0x67, 0x34, 0xb1, 0x34, 0xab, 0xcf, 0x88,
-	0x5c, 0x52, 0xb4, 0x15, 0xa1, 0x1f, 0x17, 0x09, 0x1a, 0x46, 0xcf, 0x91, 0x23, 0xfc, 0x37, 0xc3,
-	0xfc, 0x74, 0xdb, 0xa9, 0x91, 0x20, 0xc2, 0x41, 0xfc, 0x4a, 0x05, 0x5e, 0x4f, 0xb8, 0x09, 0xdd,
-	0xf7, 0xa0, 0xc0, 0xcc, 0x68, 0xc5, 0x76, 0x6a, 0x84, 0x79, 0x16, 0x56, 0xdf, 0xcc, 0xce, 0x3a,
-	0x83, 0x60, 0x08, 0x60, 0xf5, 0xd0, 0x94, 0xcf, 0xe0, 0x22, 0x23, 0xf8, 0xd0, 0xbf, 0x37, 0xa9,
-	0xe2, 0xd8, 0x8d, 0xaa, 0x38, 0xed, 0x26, 0xcb, 0xfe, 0xa4, 0x3e, 0xc3, 0x36, 0x1e, 0xb6, 0x9b,
-	0x51, 0xe5, 0xe3, 0x31, 0xe5, 0x55, 0xb8, 0x94, 0x0e, 0x7c, 0xaa, 0xf2, 0xbf, 0x14, 0xf9, 0xf1,
-	0x4f, 0x54, 0xd4, 0x52, 0x8e, 0x2b, 0xb2, 0x99, 0x72, 0xaa, 0x27, 0x29, 0xaa, 0x9f, 0x24, 0x28,
-	0x26, 0xe9, 0x45, 0x80, 0x77, 0xe1, 0x4c, 0x70, 0x23, 0x78, 0x70, 0xb9, 0x2f, 0x56, 0xe0, 0x77,
-	0x7a, 0xd5, 0xf7, 0xa9, 0x38, 0x0c, 0x5f, 0x27, 0x3b, 0x90, 0x58, 0xae, 0x06, 0x1e, 0x73, 0x38,
-	0x91, 0xe3, 0x91, 0x44, 0x2a, 0x26, 0x5c, 0xce, 0xc0, 0x3d, 0xb5, 0x24, 0x28, 0x9f, 0xc0, 0x02,
-	0xe3, 0xd8, 0xb4, 0x1d, 0x63, 0xdf, 0x7e, 0x86, 0xab, 0xa3, 0x5d, 0x21, 0x34, 0x0f, 0x53, 0x2d,
-	0x97, 0x74, 0x30, 0xd3, 0x3e, 0xa3, 0xf3, 0x85, 0xf2, 0x5c, 0x82, 0xc5, 0x6c, 0x58, 0xa1, 0xfe,
-	0x0b, 0x38, 0x5f, 0x0b, 0x3e, 0x57, 0x92, 0xd5, 0xba, 0x3c, 0xe0, 0x89, 0x8b, 0xa0, 0x32, 0xd0,
-	0xb9, 0x5a, 0x92, 0x49, 0xf1, 0xe0, 0x5a, 0x8a, 0x0a, 0xff, 0xd3, 0xae, 0xe3, 0xd9, 0xfb, 0xf7,
-	0xd9, 0xd3, 0x7d, 0xf2, 0x47, 0xbf, 0x1f, 0xfc, 0x44, 0x38, 0xf8, 0x17, 0x13, 0x70, 0x3d, 0x0f,
-	0xad, 0x48, 0xc3, 0x2e, 0xcc, 0xc7, 0xd2, 0x10, 0x64, 0x41, 0xca, 0x7b, 0x67, 0x51, 0x2d, 0xc1,
-	0x84, 0x6e, 0x03, 0xf0, 0xa2, 0x63, 0x60, 0xbc, 0xba, 0xe5, 0x1e, 0x58, 0xaf, 0x91, 0x77, 0xca,
-	0x2a, 0x2b, 0x2d, 0x9d, 0x97, 0x28, 0x73, 0x7d, 0x08, 0xff, 0x77, 0x8d, 0x27, 0x95, 0xfe, 0x48,
-	0xc0, 0xe2, 0x0b, 0x57, 0x57, 0x64, 0x7c, 0xf0, 0x31, 0x74, 0xe3, 0xc9, 0x46, 0x6f, 0x4f, 0x3f,
-	0xeb, 0x86, 0x97, 0x68, 0x17, 0x90, 0xe9, 0x59, 0x15, 0xda, 0x36, 0x9b, 0x36, 0xa5, 0x36, 0x71,
-	0x2a, 0x7b, 0xb8, 0x5b, 0x9c, 0x8c, 0x61, 0x46, 0xe7, 0x95, 0x4e, 0x59, 0xfd, 0xb8, 0x67, 0xff,
-	0x00, 0x77, 0xf5, 0x73, 0xa6, 0x67, 0x45, 0x76, 0xd0, 0x16, 0xcb, 0x3e, 0xa9, 0x15, 0xa7, 0x18,
-	0x52, 0x79, 0x40, 0xeb, 0xf7, 0xcd, 0x52, 0x8a, 0x86, 0xfb, 0xaf, 0x3e, 0x3f, 0x0b, 0x53, 0xec,
-	0xc0, 0xd0, 0x37, 0x12, 0x4c, 0xf3, 0x39, 0x01, 0x0d, 0x28, 0xbf, 0xe4, 0x78, 0x22, 0xaf, 0xe4,
-	0xb4, 0xe6, 0x67, 0xae, 0x2c, 0x7d, 0xfd, 0xfb, 0xdf, 0xdf, 0x8d, 0x2b, 0x68, 0x51, 0x1b, 0x32,
-	0x13, 0xa1, 0x17, 0x12, 0x4c, 0xf3, 0x3b, 0x3b, 0x54, 0x51, 0x64, 0x86, 0x19, 0xaa, 0x28, 0x3a,
-	0xa7, 0x28, 0x5b, 0x4c, 0xd1, 0x5d, 0x74, 0x27, 0x5b, 0x51, 0xbf, 0x36, 0xb5, 0x83, 0xe0, 0xa6,
-	0x1c, 0x6a, 0xfc, 0x21, 0xd1, 0x0e, 0xf8, 0x95, 0x38, 0x44, 0xdf, 0x4b, 0x30, 0xdb, 0x1b, 0x03,
-	0x90, 0x36, 0x44, 0x45, 0x7c, 0x22, 0x91, 0xdf, 0xcd, 0xef, 0x90, 0x3f, 0x97, 0xfc, 0x71, 0x41,
-	0x3f, 0x4a, 0x00, 0xfd, 0xd7, 0x01, 0xe5, 0xa2, 0x0a, 0xbf, 0x84, 0x72, 0x79, 0x04, 0x0f, 0xa1,
-	0x6e, 0x85, 0xa9, 0xbb, 0x8a, 0xae, 0x0c, 0x53, 0xc7, 0x12, 0x8b, 0x7e, 0x91, 0xe0, 0xb5, 0x58,
-	0x4f, 0x47, 0x37, 0x87, 0xb0, 0xa6, 0x0f, 0x17, 0xf2, 0xad, 0x51, 0xdd, 0x84, 0xe2, 0x1b, 0x4c,
-	0xf1, 0x0a, 0x7a, 0x27, 0x5b, 0x31, 0x7f, 0x58, 0xc2, 0xba, 0x7f, 0x96, 0xa0, 0x10, 0x6a, 0xd3,
-	0x68, 0x58, 0xa6, 0x92, 0x13, 0x85, 0xbc, 0x3a, 0x8a, 0x8b, 0xd0, 0xfa, 0x1e, 0xd3, 0xaa, 0xa2,
-	0xe5, 0x6c, 0xad, 0xa2, 0xd1, 0x85, 0x4a, 0x16, 0xfd, 0x26, 0xc1, 0xb9, 0x78, 0x4f, 0x45, 0xb7,
-	0x72, 0xd0, 0xa7, 0x34, 0x77, 0x79, 0x6d, 0x64, 0xbf, 0xfc, 0x37, 0x2e, 0xa9, 0x9d, 0xa7, 0x9e,
-	0x6a, 0x07, 0xbd, 0x81, 0xe2, 0x10, 0xfd, 0x2a, 0xc1, 0x5c, 0x4a, 0x9f, 0x45, 0xb7, 0x87, 0x28,
-	0xcb, 0x6e, 0xf9, 0xf2, 0xfb, 0x27, 0x71, 0x15, 0x71, 0xad, 0xb1, 0xb8, 0xca, 0x48, 0xcb, 0x8e,
-	0x2b, 0xb5, 0xed, 0xa3, 0x7f, 0x24, 0xb8, 0x3c, 0xb0, 0x65, 0xa2, 0x8d, 0x91, 0x64, 0xa5, 0xf7,
-	0x79, 0xf9, 0xde, 0x7f, 0x03, 0x11, 0x51, 0xee, 0xb0, 0x28, 0x1f, 0xa0, 0xed, 0xdc, 0x51, 0xa6,
-	0xbc, 0x9c, 0x3e, 0x62, 0xef, 0xe5, 0x5c, 0xdf, 0x79, 0x79, 0x54, 0x92, 0x5e, 0x1d, 0x95, 0xa4,
-	0xbf, 0x8e, 0x4a, 0xd2, 0xb7, 0xc7, 0xa5, 0xb1, 0x57, 0xc7, 0xa5, 0xb1, 0x3f, 0x8e, 0x4b, 0x63,
-	0x9f, 0xaf, 0xd5, 0x6d, 0xaf, 0xd1, 0x36, 0x55, 0x8b, 0x34, 0x03, 0xba, 0x7d, 0xc3, 0xa4, 0x2b,
-	0x36, 0xe9, 0xb1, 0x3f, 0x8d, 0xf1, 0x7b, 0xdd, 0x16, 0xa6, 0xe6, 0x34, 0xfb, 0x4b, 0x7d, 0xe3,
-	0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x09, 0x4f, 0xa5, 0xb2, 0xc6, 0x10, 0x00, 0x00,
+	// 1180 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcd, 0x6f, 0xdc, 0x44,
+	0x14, 0x8f, 0x37, 0x1f, 0x34, 0x6f, 0x5b, 0xa8, 0x26, 0x69, 0x59, 0xb9, 0xed, 0x26, 0x18, 0x4a,
+	0xd3, 0xd2, 0xd8, 0x6c, 0x2a, 0x1a, 0x15, 0x24, 0x10, 0x49, 0x48, 0x1b, 0x28, 0x51, 0x6b, 0xc8,
+	0xa5, 0x02, 0x2d, 0xb6, 0x77, 0x76, 0xd7, 0x4a, 0xd6, 0xb3, 0xf5, 0x78, 0xb7, 0x4d, 0xa2, 0x48,
+	0xa8, 0xe2, 0x4e, 0x25, 0x2e, 0x88, 0x13, 0x27, 0x6e, 0x20, 0x71, 0xe0, 0x4f, 0x40, 0xea, 0x8d,
+	0x4a, 0x5c, 0x38, 0x21, 0x94, 0xf0, 0x87, 0x54, 0x9e, 0x19, 0xef, 0xfa, 0x33, 0x76, 0xd2, 0xdc,
+	0xd6, 0x33, 0xef, 0xfd, 0xde, 0xef, 0x7d, 0xcd, 0x7b, 0x0b, 0x6f, 0x99, 0x86, 0xb9, 0xbd, 0x45,
+	0x1c, 0x6d, 0x87, 0x38, 0xd8, 0x22, 0x8e, 0x65, 0x63, 0xb7, 0x85, 0xb5, 0x7e, 0x4d, 0x7b, 0xd8,
+	0xc3, 0xee, 0xb6, 0xda, 0x75, 0x89, 0x47, 0x50, 0x45, 0x48, 0xa9, 0x11, 0x29, 0xb5, 0x5f, 0x93,
+	0xa7, 0x5b, 0xa4, 0x45, 0x98, 0x90, 0xe6, 0xff, 0xe2, 0xf2, 0xf2, 0xc5, 0x16, 0x21, 0xad, 0x2d,
+	0xac, 0x19, 0x5d, 0x5b, 0x33, 0x1c, 0x87, 0x78, 0x86, 0x67, 0x13, 0x87, 0x8a, 0xdb, 0x6b, 0x16,
+	0xa1, 0x1d, 0x42, 0x35, 0xd3, 0xa0, 0x98, 0x9b, 0xd1, 0xfa, 0x35, 0x13, 0x7b, 0x46, 0x4d, 0xeb,
+	0x1a, 0x2d, 0xdb, 0x61, 0xc2, 0x42, 0xf6, 0x7a, 0xc0, 0xcf, 0xf4, 0x2c, 0xab, 0x8d, 0xad, 0xcd,
+	0x2e, 0xb1, 0x1d, 0xcf, 0xe7, 0x17, 0x39, 0x10, 0xd2, 0x57, 0x03, 0xe9, 0xe1, 0x8d, 0xed, 0xb4,
+	0x7c, 0xe9, 0x84, 0xa8, 0x12, 0x88, 0xe2, 0x2e, 0xb1, 0xda, 0x42, 0x2a, 0xf8, 0x1d, 0x37, 0x9e,
+	0x08, 0x4e, 0x34, 0x0e, 0x5c, 0xfa, 0x72, 0xa6, 0x74, 0xd7, 0x70, 0x8d, 0x8e, 0xf0, 0x5e, 0x99,
+	0x06, 0x74, 0xdf, 0xf7, 0xf9, 0x1e, 0x3b, 0xd4, 0xf1, 0xc3, 0x1e, 0xa6, 0x9e, 0xb2, 0x01, 0x53,
+	0x91, 0x53, 0xda, 0x25, 0x0e, 0xc5, 0xe8, 0x43, 0x98, 0xe0, 0xca, 0x15, 0x69, 0x56, 0x9a, 0x2b,
+	0x2f, 0xcc, 0xaa, 0x59, 0x99, 0x50, 0xb9, 0xe6, 0xd2, 0xd8, 0xb3, 0x7f, 0x67, 0x46, 0x74, 0xa1,
+	0xa5, 0x7c, 0x2e, 0x8c, 0xdd, 0xc1, 0x46, 0x03, 0xbb, 0xc2, 0x18, 0x9a, 0x81, 0xb2, 0x45, 0x1c,
+	0xda, 0xeb, 0x60, 0xb7, 0x6e, 0x37, 0x18, 0xf4, 0xa4, 0x0e, 0xc1, 0xd1, 0x5a, 0x03, 0x9d, 0x87,
+	0x89, 0x36, 0xb6, 0x5b, 0x6d, 0xaf, 0x52, 0x9a, 0x95, 0xe6, 0xc6, 0x74, 0xf1, 0xa5, 0xfc, 0x24,
+	0x09, 0x9a, 0x01, 0x9e, 0xa0, 0xf9, 0x91, 0x2f, 0xef, 0x9f, 0x08, 0x9a, 0x57, 0xb2, 0x69, 0xae,
+	0x39, 0x0d, 0xfc, 0x18, 0x37, 0x04, 0x80, 0x50, 0x43, 0x4b, 0x70, 0xba, 0x49, 0xdc, 0xcd, 0x3a,
+	0xff, 0xa4, 0xcc, 0x6c, 0x79, 0x61, 0x26, 0x1b, 0x66, 0x95, 0xb8, 0x9b, 0x54, 0x2f, 0xfb, 0x4a,
+	0x1c, 0x8a, 0x2a, 0x75, 0x38, 0xc7, 0xb8, 0x2d, 0xb7, 0x0d, 0xdb, 0xb9, 0x6b, 0x53, 0x2f, 0x70,
+	0x77, 0x15, 0x60, 0x58, 0x57, 0x82, 0xe1, 0xdb, 0x2a, 0x2f, 0x42, 0xd5, 0x2f, 0x42, 0x95, 0xd7,
+	0xba, 0x28, 0x42, 0xf5, 0x9e, 0xd1, 0xc2, 0x42, 0x57, 0x0f, 0x69, 0x2a, 0xdf, 0x49, 0x70, 0x3e,
+	0x6e, 0x41, 0x04, 0xe0, 0x0d, 0x38, 0x1d, 0x8a, 0xa8, 0x9f, 0xad, 0xd1, 0xb9, 0x49, 0xbd, 0x3c,
+	0x0c, 0x29, 0x45, 0xb7, 0x23, 0x2c, 0x4a, 0x22, 0x4e, 0x79, 0x2c, 0x38, 0x7e, 0x84, 0xc6, 0x07,
+	0x61, 0x16, 0x74, 0xcd, 0x69, 0x92, 0xc0, 0xd1, 0x7c, 0x16, 0x4a, 0x1d, 0x5e, 0x4f, 0x28, 0x0b,
+	0x1f, 0x56, 0xa0, 0x6c, 0xb1, 0xd3, 0xba, 0xed, 0x34, 0x09, 0x53, 0x2e, 0x2f, 0xbc, 0x99, 0x9d,
+	0x02, 0x06, 0xc1, 0x10, 0xc0, 0x1a, 0xa0, 0x29, 0x5f, 0xc3, 0x05, 0x66, 0xe0, 0x13, 0xbf, 0x95,
+	0x92, 0x14, 0x2f, 0xc0, 0x24, 0x6b, 0xb2, 0xba, 0xd3, 0xeb, 0xb0, 0x54, 0x8c, 0xe9, 0xa7, 0xd8,
+	0xc1, 0x7a, 0xaf, 0x93, 0xe0, 0x5f, 0x4a, 0xf2, 0x6f, 0xc0, 0xc5, 0x74, 0xf8, 0x13, 0x75, 0xe2,
+	0x89, 0x24, 0xc2, 0xe4, 0x27, 0x59, 0xd4, 0x57, 0xe1, 0xe6, 0x59, 0x4d, 0x49, 0xf4, 0x71, 0xca,
+	0xed, 0x17, 0x09, 0x2a, 0x49, 0x12, 0xc2, 0xcf, 0x8f, 0xe1, 0x95, 0xa0, 0x57, 0xb8, 0x8f, 0x85,
+	0x5b, 0x2e, 0xd0, 0x3b, 0xb9, 0x82, 0xfc, 0x4a, 0xe4, 0xc4, 0xe7, 0xc9, 0xf2, 0x12, 0x8b, 0xd8,
+	0xa1, 0x39, 0x8f, 0x85, 0xb3, 0x14, 0x0f, 0xa7, 0x62, 0xc2, 0xa5, 0x0c, 0xf4, 0x13, 0x0b, 0x85,
+	0xf2, 0x00, 0x66, 0x98, 0x8d, 0x55, 0xdb, 0x31, 0xb6, 0xec, 0x1d, 0xdc, 0x38, 0x4e, 0x6f, 0xa1,
+	0x69, 0x18, 0xef, 0xba, 0xa4, 0x8f, 0x99, 0x13, 0xa7, 0x74, 0xfe, 0xe1, 0xbf, 0x1a, 0xb3, 0xd9,
+	0xe0, 0xc2, 0x87, 0x6f, 0xe0, 0x5c, 0x33, 0xb8, 0xae, 0x27, 0x0b, 0xf8, 0xfa, 0x21, 0x0f, 0x61,
+	0x04, 0x95, 0x81, 0x4e, 0x35, 0x93, 0x96, 0x94, 0x1d, 0xb8, 0x9a, 0xc2, 0xc2, 0xbf, 0xda, 0x70,
+	0x3c, 0x7b, 0xeb, 0x0e, 0x7b, 0xe0, 0x5f, 0x76, 0x40, 0x0c, 0x43, 0x30, 0x1a, 0x0e, 0xc1, 0x6f,
+	0xa3, 0x70, 0xad, 0x88, 0x71, 0x11, 0x8c, 0x0d, 0x98, 0x8e, 0x05, 0x23, 0x88, 0x85, 0x54, 0xb4,
+	0x99, 0x51, 0x33, 0x61, 0x09, 0xdd, 0x02, 0xe0, 0x65, 0xc8, 0xc0, 0x78, 0xbd, 0xcb, 0x03, 0xb0,
+	0xc1, 0xe8, 0xef, 0xd7, 0x54, 0x56, 0x66, 0x3a, 0x2f, 0x5a, 0xa6, 0xba, 0x0e, 0xaf, 0xba, 0xc6,
+	0xa3, 0xfa, 0x70, 0x89, 0x60, 0xfe, 0x85, 0x2b, 0x2d, 0xb2, 0x70, 0xf8, 0x18, 0xba, 0xf1, 0x68,
+	0x79, 0x70, 0xa6, 0x9f, 0x71, 0xc3, 0x9f, 0x68, 0x03, 0x90, 0xe9, 0x59, 0x75, 0xda, 0x33, 0x3b,
+	0x36, 0xa5, 0x36, 0x71, 0xea, 0x9b, 0x78, 0xbb, 0x32, 0x16, 0xc3, 0x8c, 0x6e, 0x38, 0xfd, 0x9a,
+	0xfa, 0xc5, 0x40, 0xfe, 0x33, 0xbc, 0xad, 0x9f, 0x35, 0x3d, 0x2b, 0x72, 0x82, 0x6e, 0xb3, 0xe8,
+	0x93, 0x66, 0x65, 0x9c, 0x21, 0xd5, 0x0e, 0x59, 0x16, 0x7c, 0xb1, 0x94, 0xd2, 0xe1, 0xfa, 0x0b,
+	0x4f, 0xcf, 0xc0, 0x38, 0x4b, 0x18, 0xfa, 0x5e, 0x82, 0x09, 0xbe, 0x59, 0xa0, 0x43, 0x8a, 0x30,
+	0xb9, 0xd0, 0xc8, 0xf3, 0x05, 0xa5, 0x79, 0xce, 0x95, 0xb9, 0x27, 0x7f, 0xff, 0xff, 0x43, 0x49,
+	0x41, 0xb3, 0x5a, 0xce, 0x16, 0x85, 0x7e, 0x97, 0x60, 0x82, 0xf7, 0x6f, 0x2e, 0xa3, 0xc8, 0xd6,
+	0x93, 0xcb, 0x28, 0xba, 0xd3, 0x28, 0x9f, 0x32, 0x46, 0x2b, 0x68, 0x29, 0x9b, 0xd1, 0xb0, 0x36,
+	0xb5, 0xdd, 0x50, 0xbf, 0xec, 0x69, 0xfc, 0x5d, 0xd1, 0x76, 0x79, 0x57, 0xec, 0xa1, 0x1f, 0x25,
+	0x98, 0x1c, 0x2c, 0x0d, 0x48, 0xcb, 0x21, 0x12, 0x5f, 0x60, 0xe4, 0x77, 0x8b, 0x2b, 0x14, 0x0f,
+	0x27, 0x7f, 0x65, 0xd0, 0xcf, 0x12, 0xc0, 0xf0, 0x99, 0x40, 0x85, 0x4c, 0x85, 0x1f, 0x46, 0xb9,
+	0x76, 0x04, 0x0d, 0xc1, 0x6e, 0x9e, 0xb1, 0xbb, 0x82, 0x2e, 0xe7, 0xb1, 0x63, 0xb1, 0x45, 0x7f,
+	0x48, 0xf0, 0x5a, 0x6c, 0xde, 0xa3, 0xf7, 0x72, 0xac, 0xa6, 0xaf, 0x1f, 0xf2, 0xcd, 0xa3, 0xaa,
+	0x09, 0xc6, 0x37, 0x18, 0xe3, 0x79, 0xf4, 0x4e, 0x36, 0x63, 0xfe, 0xb6, 0x84, 0x79, 0xff, 0x2a,
+	0x41, 0x39, 0x34, 0xbb, 0x51, 0x5e, 0xa4, 0x92, 0xcb, 0x86, 0xbc, 0x70, 0x14, 0x15, 0xc1, 0x75,
+	0x91, 0x71, 0xad, 0x21, 0x2d, 0x9b, 0xab, 0x98, 0x7b, 0xd1, 0xaa, 0x45, 0x7f, 0x49, 0x70, 0x36,
+	0x3e, 0x65, 0xd1, 0xcd, 0x02, 0x0c, 0x52, 0x86, 0xbe, 0xbc, 0x78, 0x64, 0xbd, 0xe2, 0x7d, 0x97,
+	0x4a, 0x9f, 0x27, 0x80, 0x6a, 0xbb, 0x83, 0x5d, 0x63, 0x0f, 0xfd, 0x29, 0xc1, 0x54, 0xca, 0xd8,
+	0x45, 0xb7, 0x72, 0xc8, 0x65, 0xef, 0x01, 0xf2, 0xfb, 0xc7, 0x51, 0x2d, 0x9e, 0x99, 0xd4, 0x2d,
+	0x00, 0x7d, 0x5b, 0x82, 0x4b, 0x87, 0xce, 0x4e, 0xb4, 0x7c, 0x24, 0x5a, 0xe9, 0x63, 0x5f, 0x5e,
+	0x79, 0x39, 0x10, 0xe1, 0xe5, 0x97, 0xcc, 0xcb, 0x75, 0x74, 0xb7, 0xb0, 0x97, 0xe9, 0x4f, 0xa8,
+	0x0f, 0x3a, 0x78, 0x42, 0x97, 0xee, 0x3f, 0xdb, 0xaf, 0x4a, 0xcf, 0xf7, 0xab, 0xd2, 0x7f, 0xfb,
+	0x55, 0xe9, 0xe9, 0x41, 0x75, 0xe4, 0xf9, 0x41, 0x75, 0xe4, 0x9f, 0x83, 0xea, 0xc8, 0x83, 0xc5,
+	0x96, 0xed, 0xb5, 0x7b, 0xa6, 0x6a, 0x91, 0x4e, 0x60, 0x71, 0xcb, 0x30, 0xe9, 0xbc, 0x4d, 0x06,
+	0x04, 0x1e, 0xc7, 0x28, 0x78, 0xdb, 0x5d, 0x4c, 0xcd, 0x09, 0xf6, 0x87, 0xfc, 0xc6, 0x8b, 0x00,
+	0x00, 0x00, 0xff, 0xff, 0x95, 0x86, 0xeb, 0x57, 0x04, 0x11, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -1564,10 +1564,10 @@ func (m *QueryHeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 		i--
 		dAtA[i] = 0x10
 	}
-	if len(m.ChainId) > 0 {
-		i -= len(m.ChainId)
-		copy(dAtA[i:], m.ChainId)
-		i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId)))
+	if len(m.ConsumerId) > 0 {
+		i -= len(m.ConsumerId)
+		copy(dAtA[i:], m.ConsumerId)
+		i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerId)))
 		i--
 		dAtA[i] = 0xa
 	}
@@ -1688,11 +1688,11 @@ func (m *QueryChainListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error)
 		i--
 		dAtA[i] = 0x12
 	}
-	if len(m.ChainIds) > 0 {
-		for iNdEx := len(m.ChainIds) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.ChainIds[iNdEx])
-			copy(dAtA[i:], m.ChainIds[iNdEx])
-			i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainIds[iNdEx])))
+	if len(m.ConsumerIds) > 0 {
+		for iNdEx := len(m.ConsumerIds) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ConsumerIds[iNdEx])
+			copy(dAtA[i:], m.ConsumerIds[iNdEx])
+			i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerIds[iNdEx])))
 			i--
 			dAtA[i] = 0xa
 		}
@@ -1720,11 +1720,11 @@ func (m *QueryChainsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error)
 	_ = i
 	var l int
 	_ = l
-	if len(m.ChainIds) > 0 {
-		for iNdEx := len(m.ChainIds) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.ChainIds[iNdEx])
-			copy(dAtA[i:], m.ChainIds[iNdEx])
-			i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainIds[iNdEx])))
+	if len(m.ConsumerIds) > 0 {
+		for iNdEx := len(m.ConsumerIds) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ConsumerIds[iNdEx])
+			copy(dAtA[i:], m.ConsumerIds[iNdEx])
+			i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerIds[iNdEx])))
 			i--
 			dAtA[i] = 0xa
 		}
@@ -1789,11 +1789,11 @@ func (m *QueryEpochChainsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, er
 	_ = i
 	var l int
 	_ = l
-	if len(m.ChainIds) > 0 {
-		for iNdEx := len(m.ChainIds) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.ChainIds[iNdEx])
-			copy(dAtA[i:], m.ChainIds[iNdEx])
-			i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainIds[iNdEx])))
+	if len(m.ConsumerIds) > 0 {
+		for iNdEx := len(m.ConsumerIds) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ConsumerIds[iNdEx])
+			copy(dAtA[i:], m.ConsumerIds[iNdEx])
+			i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerIds[iNdEx])))
 			i--
 			dAtA[i] = 0x12
 		}
@@ -1875,10 +1875,10 @@ func (m *QueryListHeadersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error)
 		i--
 		dAtA[i] = 0x12
 	}
-	if len(m.ChainId) > 0 {
-		i -= len(m.ChainId)
-		copy(dAtA[i:], m.ChainId)
-		i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId)))
+	if len(m.ConsumerId) > 0 {
+		i -= len(m.ConsumerId)
+		copy(dAtA[i:], m.ConsumerId)
+		i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerId)))
 		i--
 		dAtA[i] = 0xa
 	}
@@ -1954,10 +1954,10 @@ func (m *QueryListEpochHeadersRequest) MarshalToSizedBuffer(dAtA []byte) (int, e
 	_ = i
 	var l int
 	_ = l
-	if len(m.ChainId) > 0 {
-		i -= len(m.ChainId)
-		copy(dAtA[i:], m.ChainId)
-		i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId)))
+	if len(m.ConsumerId) > 0 {
+		i -= len(m.ConsumerId)
+		copy(dAtA[i:], m.ConsumerId)
+		i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerId)))
 		i--
 		dAtA[i] = 0x12
 	}
@@ -2036,11 +2036,11 @@ func (m *QueryFinalizedChainsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int
 		i--
 		dAtA[i] = 0x10
 	}
-	if len(m.ChainIds) > 0 {
-		for iNdEx := len(m.ChainIds) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.ChainIds[iNdEx])
-			copy(dAtA[i:], m.ChainIds[iNdEx])
-			i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainIds[iNdEx])))
+	if len(m.ConsumerIds) > 0 {
+		for iNdEx := len(m.ConsumerIds) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ConsumerIds[iNdEx])
+			copy(dAtA[i:], m.ConsumerIds[iNdEx])
+			i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerIds[iNdEx])))
 			i--
 			dAtA[i] = 0xa
 		}
@@ -2120,10 +2120,10 @@ func (m *QueryFinalizedChainInfoUntilHeightRequest) MarshalToSizedBuffer(dAtA []
 		i--
 		dAtA[i] = 0x10
 	}
-	if len(m.ChainId) > 0 {
-		i -= len(m.ChainId)
-		copy(dAtA[i:], m.ChainId)
-		i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId)))
+	if len(m.ConsumerId) > 0 {
+		i -= len(m.ConsumerId)
+		copy(dAtA[i:], m.ConsumerId)
+		i = encodeVarintQuery(dAtA, i, uint64(len(m.ConsumerId)))
 		i--
 		dAtA[i] = 0xa
 	}
@@ -2250,7 +2250,7 @@ func (m *QueryHeaderRequest) Size() (n int) {
 	}
 	var l int
 	_ = l
-	l = len(m.ChainId)
+	l = len(m.ConsumerId)
 	if l > 0 {
 		n += 1 + l + sovQuery(uint64(l))
 	}
@@ -2296,8 +2296,8 @@ func (m *QueryChainListResponse) Size() (n int) {
 	}
 	var l int
 	_ = l
-	if len(m.ChainIds) > 0 {
-		for _, s := range m.ChainIds {
+	if len(m.ConsumerIds) > 0 {
+		for _, s := range m.ConsumerIds {
 			l = len(s)
 			n += 1 + l + sovQuery(uint64(l))
 		}
@@ -2315,8 +2315,8 @@ func (m *QueryChainsInfoRequest) Size() (n int) {
 	}
 	var l int
 	_ = l
-	if len(m.ChainIds) > 0 {
-		for _, s := range m.ChainIds {
+	if len(m.ConsumerIds) > 0 {
+		for _, s := range m.ConsumerIds {
 			l = len(s)
 			n += 1 + l + sovQuery(uint64(l))
 		}
@@ -2348,8 +2348,8 @@ func (m *QueryEpochChainsInfoRequest) Size() (n int) {
 	if m.EpochNum != 0 {
 		n += 1 + sovQuery(uint64(m.EpochNum))
 	}
-	if len(m.ChainIds) > 0 {
-		for _, s := range m.ChainIds {
+	if len(m.ConsumerIds) > 0 {
+		for _, s := range m.ConsumerIds {
 			l = len(s)
 			n += 1 + l + sovQuery(uint64(l))
 		}
@@ -2378,7 +2378,7 @@ func (m *QueryListHeadersRequest) Size() (n int) {
 	}
 	var l int
 	_ = l
-	l = len(m.ChainId)
+	l = len(m.ConsumerId)
 	if l > 0 {
 		n += 1 + l + sovQuery(uint64(l))
 	}
@@ -2417,7 +2417,7 @@ func (m *QueryListEpochHeadersRequest) Size() (n int) {
 	if m.EpochNum != 0 {
 		n += 1 + sovQuery(uint64(m.EpochNum))
 	}
-	l = len(m.ChainId)
+	l = len(m.ConsumerId)
 	if l > 0 {
 		n += 1 + l + sovQuery(uint64(l))
 	}
@@ -2445,8 +2445,8 @@ func (m *QueryFinalizedChainsInfoRequest) Size() (n int) {
 	}
 	var l int
 	_ = l
-	if len(m.ChainIds) > 0 {
-		for _, s := range m.ChainIds {
+	if len(m.ConsumerIds) > 0 {
+		for _, s := range m.ConsumerIds {
 			l = len(s)
 			n += 1 + l + sovQuery(uint64(l))
 		}
@@ -2478,7 +2478,7 @@ func (m *QueryFinalizedChainInfoUntilHeightRequest) Size() (n int) {
 	}
 	var l int
 	_ = l
-	l = len(m.ChainId)
+	l = len(m.ConsumerId)
 	if l > 0 {
 		n += 1 + l + sovQuery(uint64(l))
 	}
@@ -2690,7 +2690,7 @@ func (m *QueryHeaderRequest) Unmarshal(dAtA []byte) error {
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -2718,7 +2718,7 @@ func (m *QueryHeaderRequest) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ChainId = string(dAtA[iNdEx:postIndex])
+			m.ConsumerId = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 2:
 			if wireType != 0 {
@@ -2999,7 +2999,7 @@ func (m *QueryChainListResponse) Unmarshal(dAtA []byte) error {
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ChainIds", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumerIds", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -3027,7 +3027,7 @@ func (m *QueryChainListResponse) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ChainIds = append(m.ChainIds, string(dAtA[iNdEx:postIndex]))
+			m.ConsumerIds = append(m.ConsumerIds, string(dAtA[iNdEx:postIndex]))
 			iNdEx = postIndex
 		case 2:
 			if wireType != 2 {
@@ -3117,7 +3117,7 @@ func (m *QueryChainsInfoRequest) Unmarshal(dAtA []byte) error {
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ChainIds", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumerIds", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -3145,7 +3145,7 @@ func (m *QueryChainsInfoRequest) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ChainIds = append(m.ChainIds, string(dAtA[iNdEx:postIndex]))
+			m.ConsumerIds = append(m.ConsumerIds, string(dAtA[iNdEx:postIndex]))
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -3302,7 +3302,7 @@ func (m *QueryEpochChainsInfoRequest) Unmarshal(dAtA []byte) error {
 			}
 		case 2:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ChainIds", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumerIds", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -3330,7 +3330,7 @@ func (m *QueryEpochChainsInfoRequest) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ChainIds = append(m.ChainIds, string(dAtA[iNdEx:postIndex]))
+			m.ConsumerIds = append(m.ConsumerIds, string(dAtA[iNdEx:postIndex]))
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -3468,7 +3468,7 @@ func (m *QueryListHeadersRequest) Unmarshal(dAtA []byte) error {
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -3496,7 +3496,7 @@ func (m *QueryListHeadersRequest) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ChainId = string(dAtA[iNdEx:postIndex])
+			m.ConsumerId = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 2:
 			if wireType != 2 {
@@ -3725,7 +3725,7 @@ func (m *QueryListEpochHeadersRequest) Unmarshal(dAtA []byte) error {
 			}
 		case 2:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -3753,7 +3753,7 @@ func (m *QueryListEpochHeadersRequest) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ChainId = string(dAtA[iNdEx:postIndex])
+			m.ConsumerId = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -3891,7 +3891,7 @@ func (m *QueryFinalizedChainsInfoRequest) Unmarshal(dAtA []byte) error {
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ChainIds", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumerIds", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -3919,7 +3919,7 @@ func (m *QueryFinalizedChainsInfoRequest) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ChainIds = append(m.ChainIds, string(dAtA[iNdEx:postIndex]))
+			m.ConsumerIds = append(m.ConsumerIds, string(dAtA[iNdEx:postIndex]))
 			iNdEx = postIndex
 		case 2:
 			if wireType != 0 {
@@ -4077,7 +4077,7 @@ func (m *QueryFinalizedChainInfoUntilHeightRequest) Unmarshal(dAtA []byte) error
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -4105,7 +4105,7 @@ func (m *QueryFinalizedChainInfoUntilHeightRequest) Unmarshal(dAtA []byte) error
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ChainId = string(dAtA[iNdEx:postIndex])
+			m.ConsumerId = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 2:
 			if wireType != 0 {
diff --git a/x/zoneconcierge/types/query.pb.gw.go b/x/zoneconcierge/types/query.pb.gw.go
index fc22099f6..de98ae8af 100644
--- a/x/zoneconcierge/types/query.pb.gw.go
+++ b/x/zoneconcierge/types/query.pb.gw.go
@@ -62,15 +62,15 @@ func request_Query_Header_0(ctx context.Context, marshaler runtime.Marshaler, cl
 		_   = err
 	)
 
-	val, ok = pathParams["chain_id"]
+	val, ok = pathParams["consumer_id"]
 	if !ok {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id")
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id")
 	}
 
-	protoReq.ChainId, err = runtime.String(val)
+	protoReq.ConsumerId, err = runtime.String(val)
 
 	if err != nil {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err)
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err)
 	}
 
 	val, ok = pathParams["height"]
@@ -100,15 +100,15 @@ func local_request_Query_Header_0(ctx context.Context, marshaler runtime.Marshal
 		_   = err
 	)
 
-	val, ok = pathParams["chain_id"]
+	val, ok = pathParams["consumer_id"]
 	if !ok {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id")
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id")
 	}
 
-	protoReq.ChainId, err = runtime.String(val)
+	protoReq.ConsumerId, err = runtime.String(val)
 
 	if err != nil {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err)
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err)
 	}
 
 	val, ok = pathParams["height"]
@@ -236,7 +236,7 @@ func local_request_Query_EpochChainsInfo_0(ctx context.Context, marshaler runtim
 }
 
 var (
-	filter_Query_ListHeaders_0 = &utilities.DoubleArray{Encoding: map[string]int{"chain_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
+	filter_Query_ListHeaders_0 = &utilities.DoubleArray{Encoding: map[string]int{"consumer_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
 )
 
 func request_Query_ListHeaders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -250,15 +250,15 @@ func request_Query_ListHeaders_0(ctx context.Context, marshaler runtime.Marshale
 		_   = err
 	)
 
-	val, ok = pathParams["chain_id"]
+	val, ok = pathParams["consumer_id"]
 	if !ok {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id")
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id")
 	}
 
-	protoReq.ChainId, err = runtime.String(val)
+	protoReq.ConsumerId, err = runtime.String(val)
 
 	if err != nil {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err)
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err)
 	}
 
 	if err := req.ParseForm(); err != nil {
@@ -284,15 +284,15 @@ func local_request_Query_ListHeaders_0(ctx context.Context, marshaler runtime.Ma
 		_   = err
 	)
 
-	val, ok = pathParams["chain_id"]
+	val, ok = pathParams["consumer_id"]
 	if !ok {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id")
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id")
 	}
 
-	protoReq.ChainId, err = runtime.String(val)
+	protoReq.ConsumerId, err = runtime.String(val)
 
 	if err != nil {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err)
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err)
 	}
 
 	if err := req.ParseForm(); err != nil {
@@ -318,15 +318,15 @@ func request_Query_ListEpochHeaders_0(ctx context.Context, marshaler runtime.Mar
 		_   = err
 	)
 
-	val, ok = pathParams["chain_id"]
+	val, ok = pathParams["consumer_id"]
 	if !ok {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id")
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id")
 	}
 
-	protoReq.ChainId, err = runtime.String(val)
+	protoReq.ConsumerId, err = runtime.String(val)
 
 	if err != nil {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err)
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err)
 	}
 
 	val, ok = pathParams["epoch_num"]
@@ -356,15 +356,15 @@ func local_request_Query_ListEpochHeaders_0(ctx context.Context, marshaler runti
 		_   = err
 	)
 
-	val, ok = pathParams["chain_id"]
+	val, ok = pathParams["consumer_id"]
 	if !ok {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id")
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id")
 	}
 
-	protoReq.ChainId, err = runtime.String(val)
+	protoReq.ConsumerId, err = runtime.String(val)
 
 	if err != nil {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err)
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err)
 	}
 
 	val, ok = pathParams["epoch_num"]
@@ -420,7 +420,7 @@ func local_request_Query_FinalizedChainsInfo_0(ctx context.Context, marshaler ru
 }
 
 var (
-	filter_Query_FinalizedChainInfoUntilHeight_0 = &utilities.DoubleArray{Encoding: map[string]int{"chain_id": 0, "height": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}}
+	filter_Query_FinalizedChainInfoUntilHeight_0 = &utilities.DoubleArray{Encoding: map[string]int{"consumer_id": 0, "height": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}}
 )
 
 func request_Query_FinalizedChainInfoUntilHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -434,15 +434,15 @@ func request_Query_FinalizedChainInfoUntilHeight_0(ctx context.Context, marshale
 		_   = err
 	)
 
-	val, ok = pathParams["chain_id"]
+	val, ok = pathParams["consumer_id"]
 	if !ok {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id")
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id")
 	}
 
-	protoReq.ChainId, err = runtime.String(val)
+	protoReq.ConsumerId, err = runtime.String(val)
 
 	if err != nil {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err)
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err)
 	}
 
 	val, ok = pathParams["height"]
@@ -479,15 +479,15 @@ func local_request_Query_FinalizedChainInfoUntilHeight_0(ctx context.Context, ma
 		_   = err
 	)
 
-	val, ok = pathParams["chain_id"]
+	val, ok = pathParams["consumer_id"]
 	if !ok {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id")
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "consumer_id")
 	}
 
-	protoReq.ChainId, err = runtime.String(val)
+	protoReq.ConsumerId, err = runtime.String(val)
 
 	if err != nil {
-		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err)
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "consumer_id", err)
 	}
 
 	val, ok = pathParams["height"]
@@ -953,7 +953,7 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie
 var (
 	pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false)))
 
-	pattern_Query_Header_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "chain_info", "chain_id", "header", "height"}, "", runtime.AssumeColonVerbOpt(false)))
+	pattern_Query_Header_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "chain_info", "consumer_id", "header", "height"}, "", runtime.AssumeColonVerbOpt(false)))
 
 	pattern_Query_ChainList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "chains"}, "", runtime.AssumeColonVerbOpt(false)))
 
@@ -961,13 +961,13 @@ var (
 
 	pattern_Query_EpochChainsInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "epoch_chains_info"}, "", runtime.AssumeColonVerbOpt(false)))
 
-	pattern_Query_ListHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "headers", "chain_id"}, "", runtime.AssumeColonVerbOpt(false)))
+	pattern_Query_ListHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "headers", "consumer_id"}, "", runtime.AssumeColonVerbOpt(false)))
 
-	pattern_Query_ListEpochHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "headers", "chain_id", "epochs", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false)))
+	pattern_Query_ListEpochHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "headers", "consumer_id", "epochs", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false)))
 
 	pattern_Query_FinalizedChainsInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "finalized_chains_info"}, "", runtime.AssumeColonVerbOpt(false)))
 
-	pattern_Query_FinalizedChainInfoUntilHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 5}, []string{"babylon", "zoneconcierge", "v1", "finalized_chain_info", "chain_id", "height"}, "", runtime.AssumeColonVerbOpt(false)))
+	pattern_Query_FinalizedChainInfoUntilHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 5}, []string{"babylon", "zoneconcierge", "v1", "finalized_chain_info", "consumer_id", "height"}, "", runtime.AssumeColonVerbOpt(false)))
 )
 
 var (
diff --git a/x/zoneconcierge/types/zoneconcierge.go b/x/zoneconcierge/types/zoneconcierge.go
index 0a052a01a..91c970ebe 100644
--- a/x/zoneconcierge/types/zoneconcierge.go
+++ b/x/zoneconcierge/types/zoneconcierge.go
@@ -46,8 +46,8 @@ func (p *ProofEpochSealed) ValidateBasic() error {
 }
 
 func (ih *IndexedHeader) ValidateBasic() error {
-	if len(ih.ChainId) == 0 {
-		return fmt.Errorf("empty ChainID")
+	if len(ih.ConsumerId) == 0 {
+		return fmt.Errorf("empty ConsumerID")
 	}
 	if len(ih.Hash) == 0 {
 		return fmt.Errorf("empty Hash")
@@ -66,7 +66,7 @@ func (ih *IndexedHeader) Equal(ih2 *IndexedHeader) bool {
 		return false
 	}
 
-	if ih.ChainId != ih2.ChainId {
+	if ih.ConsumerId != ih2.ConsumerId {
 		return false
 	}
 	if !bytes.Equal(ih.Hash, ih2.Hash) {
@@ -92,7 +92,7 @@ func (ci *ChainInfo) Equal(ci2 *ChainInfo) bool {
 		return false
 	}
 
-	if ci.ChainId != ci2.ChainId {
+	if ci.ConsumerId != ci2.ConsumerId {
 		return false
 	}
 	if !ci.LatestHeader.Equal(ci2.LatestHeader) {
@@ -110,8 +110,8 @@ func (ci *ChainInfo) Equal(ci2 *ChainInfo) bool {
 }
 
 func (ci *ChainInfo) ValidateBasic() error {
-	if len(ci.ChainId) == 0 {
-		return ErrInvalidChainInfo.Wrap("ChainID is empty")
+	if len(ci.ConsumerId) == 0 {
+		return ErrInvalidChainInfo.Wrap("ConsumerId is empty")
 	} else if ci.LatestHeader == nil {
 		return ErrInvalidChainInfo.Wrap("LatestHeader is nil")
 	} else if ci.LatestForks == nil {
diff --git a/x/zoneconcierge/types/zoneconcierge.pb.go b/x/zoneconcierge/types/zoneconcierge.pb.go
index 8e3d792b7..cd5023d6d 100644
--- a/x/zoneconcierge/types/zoneconcierge.pb.go
+++ b/x/zoneconcierge/types/zoneconcierge.pb.go
@@ -34,8 +34,8 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 // IndexedHeader is the metadata of a CZ header
 type IndexedHeader struct {
-	// chain_id is the unique ID of the chain
-	ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
+	// consumer_id is the unique ID of the consumer
+	ConsumerId string `protobuf:"bytes,1,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"`
 	// hash is the hash of this header
 	Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
 	// height is the height of this header on CZ ledger
@@ -92,9 +92,9 @@ func (m *IndexedHeader) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_IndexedHeader proto.InternalMessageInfo
 
-func (m *IndexedHeader) GetChainId() string {
+func (m *IndexedHeader) GetConsumerId() string {
 	if m != nil {
-		return m.ChainId
+		return m.ConsumerId
 	}
 	return ""
 }
@@ -211,8 +211,8 @@ func (m *Forks) GetHeaders() []*IndexedHeader {
 
 // ChainInfo is the information of a CZ
 type ChainInfo struct {
-	// chain_id is the ID of the chain
-	ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
+	// consumer_id is the ID of the consumer
+	ConsumerId string `protobuf:"bytes,1,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"`
 	// latest_header is the latest header in CZ's canonical chain
 	LatestHeader *IndexedHeader `protobuf:"bytes,2,opt,name=latest_header,json=latestHeader,proto3" json:"latest_header,omitempty"`
 	// latest_forks is the latest forks, formed as a series of IndexedHeader (from
@@ -256,9 +256,9 @@ func (m *ChainInfo) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_ChainInfo proto.InternalMessageInfo
 
-func (m *ChainInfo) GetChainId() string {
+func (m *ChainInfo) GetConsumerId() string {
 	if m != nil {
-		return m.ChainId
+		return m.ConsumerId
 	}
 	return ""
 }
@@ -343,8 +343,8 @@ func (m *ChainInfoWithProof) GetProofHeaderInEpoch() *crypto.ProofOps {
 
 // FinalizedChainInfo is the information of a CZ that is BTC-finalised
 type FinalizedChainInfo struct {
-	// chain_id is the ID of the chain
-	ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
+	// consumer_id is the ID of the consumer
+	ConsumerId string `protobuf:"bytes,1,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"`
 	// finalized_chain_info is the info of the CZ
 	FinalizedChainInfo *ChainInfo `protobuf:"bytes,2,opt,name=finalized_chain_info,json=finalizedChainInfo,proto3" json:"finalized_chain_info,omitempty"`
 	// epoch_info is the metadata of the last BTC-finalised epoch
@@ -391,9 +391,9 @@ func (m *FinalizedChainInfo) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_FinalizedChainInfo proto.InternalMessageInfo
 
-func (m *FinalizedChainInfo) GetChainId() string {
+func (m *FinalizedChainInfo) GetConsumerId() string {
 	if m != nil {
-		return m.ChainId
+		return m.ConsumerId
 	}
 	return ""
 }
@@ -639,68 +639,68 @@ func init() {
 }
 
 var fileDescriptor_ab886e1868e5c5cd = []byte{
-	// 968 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xdd, 0x6e, 0x1b, 0x45,
-	0x14, 0xce, 0xc6, 0x4e, 0xd2, 0x1c, 0xc7, 0x6d, 0x98, 0xa4, 0xd4, 0x0d, 0xc2, 0xb1, 0x5c, 0xa9,
-	0xb8, 0x88, 0xae, 0x65, 0x83, 0x84, 0xe0, 0x0e, 0x5b, 0x2d, 0x4d, 0x41, 0xfc, 0xac, 0xdd, 0x82,
-	0x10, 0x68, 0xb5, 0x3f, 0x63, 0xef, 0x2a, 0xeb, 0x1d, 0x6b, 0x67, 0xe2, 0xc6, 0x79, 0x8a, 0xbe,
-	0x05, 0xdc, 0xf2, 0x00, 0xdc, 0x73, 0xd9, 0x4b, 0xee, 0x40, 0xc9, 0x2b, 0x70, 0xc3, 0x1d, 0x9a,
-	0x33, 0x33, 0xeb, 0x75, 0x22, 0x93, 0x70, 0x13, 0xed, 0xcc, 0x7c, 0xe7, 0x9c, 0xef, 0x7c, 0xe7,
-	0xc7, 0x81, 0x0f, 0x7c, 0xcf, 0x9f, 0x27, 0x2c, 0x6d, 0x9f, 0xb1, 0x94, 0x06, 0x2c, 0x0d, 0x62,
-	0x9a, 0x8d, 0x69, 0x7b, 0xd6, 0x59, 0xbe, 0xb0, 0xa7, 0x19, 0x13, 0x8c, 0xd4, 0x34, 0xda, 0x5e,
-	0x7e, 0x9c, 0x75, 0x0e, 0xf6, 0xc7, 0x6c, 0xcc, 0x10, 0xd4, 0x96, 0x5f, 0x0a, 0x7f, 0x70, 0x38,
-	0x66, 0x6c, 0x9c, 0xd0, 0x36, 0x9e, 0xfc, 0x93, 0x51, 0x5b, 0xc4, 0x13, 0xca, 0x85, 0x37, 0x99,
-	0x6a, 0xc0, 0xbb, 0x82, 0xa6, 0x21, 0xcd, 0x26, 0x71, 0x2a, 0xda, 0x41, 0x36, 0x9f, 0x0a, 0x26,
-	0xb1, 0x6c, 0xa4, 0x9f, 0x73, 0x76, 0xbe, 0x08, 0x82, 0x88, 0x06, 0xc7, 0x53, 0x26, 0x91, 0xb3,
-	0xce, 0xf2, 0x85, 0x46, 0x3f, 0x34, 0xe8, 0xc5, 0x4b, 0x9c, 0x8e, 0x11, 0x9d, 0x70, 0xf7, 0x98,
-	0xce, 0x35, 0xee, 0xd1, 0x4a, 0xdc, 0x15, 0x97, 0x4d, 0x03, 0xa5, 0x53, 0x16, 0x44, 0x1a, 0x65,
-	0xbe, 0x35, 0xc6, 0x2e, 0x90, 0x4c, 0xe2, 0x71, 0x24, 0xff, 0xd2, 0x9c, 0x65, 0xe1, 0x46, 0xe1,
-	0x9b, 0xbf, 0xad, 0x43, 0xf5, 0x28, 0x0d, 0xe9, 0x29, 0x0d, 0x9f, 0x51, 0x2f, 0xa4, 0x19, 0xb9,
-	0x0f, 0xb7, 0x82, 0xc8, 0x8b, 0x53, 0x37, 0x0e, 0x6b, 0x56, 0xc3, 0x6a, 0x6d, 0x3b, 0x5b, 0x78,
-	0x3e, 0x0a, 0x09, 0x81, 0x72, 0xe4, 0xf1, 0xa8, 0xb6, 0xde, 0xb0, 0x5a, 0x3b, 0x0e, 0x7e, 0x93,
-	0xb7, 0x61, 0x33, 0xa2, 0xd2, 0x6d, 0xad, 0xd4, 0xb0, 0x5a, 0x65, 0x47, 0x9f, 0xc8, 0x47, 0x50,
-	0x96, 0xfa, 0xd6, 0xca, 0x0d, 0xab, 0x55, 0xe9, 0x1e, 0xd8, 0x4a, 0x7c, 0xdb, 0x88, 0x6f, 0x0f,
-	0x8d, 0xf8, 0xbd, 0xf2, 0xeb, 0x3f, 0x0f, 0x2d, 0x07, 0xd1, 0xc4, 0x86, 0x3d, 0x9d, 0x80, 0x1b,
-	0x21, 0x1d, 0x17, 0x03, 0x6e, 0x60, 0xc0, 0xb7, 0xf4, 0x93, 0x22, 0xfa, 0x4c, 0x46, 0xef, 0xc2,
-	0xdd, 0xcb, 0x78, 0x45, 0x66, 0x13, 0xc9, 0xec, 0x2d, 0x5b, 0x28, 0x66, 0x0f, 0xa0, 0x6a, 0x6c,
-	0x50, 0xbc, 0xda, 0x16, 0x62, 0x77, 0xf4, 0xe5, 0x13, 0x79, 0x47, 0x1e, 0xc2, 0x1d, 0x03, 0x12,
-	0xa7, 0x8a, 0xc4, 0x2d, 0x24, 0x61, 0x6c, 0x87, 0xa7, 0x92, 0x40, 0xf3, 0x39, 0x6c, 0x3c, 0x65,
-	0xd9, 0x31, 0x27, 0x9f, 0xc1, 0x96, 0x62, 0xc0, 0x6b, 0xa5, 0x46, 0xa9, 0x55, 0xe9, 0xbe, 0x67,
-	0xaf, 0xea, 0x4f, 0x7b, 0x49, 0x70, 0xc7, 0xd8, 0x35, 0xff, 0xb6, 0x60, 0xbb, 0x8f, 0x52, 0xa7,
-	0x23, 0xf6, 0x5f, 0x75, 0xf8, 0x12, 0xaa, 0x89, 0x27, 0x28, 0x17, 0x3a, 0x69, 0x2c, 0xc8, 0xff,
-	0x88, 0xb8, 0xa3, 0xac, 0x75, 0xc1, 0x7b, 0xa0, 0xcf, 0xee, 0x48, 0x66, 0x82, 0x75, 0xac, 0x74,
-	0x0f, 0x57, 0x3b, 0xc3, 0x84, 0x9d, 0x8a, 0x32, 0x52, 0xd9, 0x7f, 0x0a, 0xf7, 0xf3, 0x69, 0xa2,
-	0xa1, 0xa6, 0xc5, 0xdd, 0x80, 0x9d, 0xa4, 0x02, 0x5b, 0xa0, 0xec, 0xdc, 0x2b, 0x00, 0x54, 0x64,
-	0xde, 0x97, 0xcf, 0xcd, 0x5f, 0x2c, 0x20, 0x79, 0xda, 0xdf, 0xc5, 0x22, 0xfa, 0x46, 0x0e, 0x1d,
-	0xe9, 0x01, 0xe8, 0xfc, 0xd3, 0x11, 0x43, 0x05, 0x2a, 0xdd, 0x07, 0xab, 0x49, 0xe5, 0x1e, 0x9c,
-	0xed, 0x20, 0xd7, 0xf0, 0x2b, 0xb8, 0x8b, 0x13, 0x6c, 0x9a, 0x23, 0x36, 0x25, 0x57, 0x82, 0xbd,
-	0x63, 0x2f, 0x26, 0xde, 0x56, 0x13, 0x6f, 0x63, 0xf0, 0xaf, 0xa7, 0xdc, 0x21, 0x68, 0xa9, 0x98,
-	0x1e, 0xa9, 0xae, 0x68, 0xfe, 0x5a, 0x02, 0xf2, 0x34, 0x4e, 0xbd, 0x24, 0x3e, 0xa3, 0xe1, 0x8d,
-	0x4a, 0xf5, 0x02, 0xf6, 0x47, 0xc6, 0xc0, 0x2d, 0xe4, 0xb3, 0x7e, 0xf3, 0x7c, 0xc8, 0xe8, 0x6a,
-	0xc4, 0x4f, 0x00, 0x30, 0x11, 0xe5, 0xac, 0xa4, 0x67, 0xcc, 0x38, 0xcb, 0x77, 0xc2, 0xac, 0x63,
-	0x23, 0x71, 0x67, 0x1b, 0xaf, 0xb4, 0x26, 0xb7, 0x33, 0xef, 0x95, 0xbb, 0xd8, 0x2e, 0x7a, 0x44,
-	0x17, 0xdd, 0xb3, 0xb4, 0x89, 0xa4, 0x0f, 0xc7, 0x7b, 0xd5, 0xcf, 0xef, 0x9c, 0x6a, 0x56, 0x3c,
-	0x92, 0x17, 0x40, 0x7c, 0x11, 0xb8, 0xfc, 0xc4, 0x9f, 0xc4, 0x9c, 0xc7, 0x2c, 0x95, 0xcb, 0x0d,
-	0x27, 0xb6, 0xe8, 0x73, 0x79, 0x45, 0xce, 0x3a, 0xf6, 0x20, 0xc7, 0x7f, 0x41, 0xe7, 0xce, 0xae,
-	0x2f, 0x82, 0xa5, 0x1b, 0xf2, 0x39, 0x6c, 0x60, 0x01, 0x70, 0x92, 0x2b, 0xdd, 0xce, 0x6a, 0xa5,
-	0xb0, 0x62, 0x57, 0xab, 0xe2, 0x28, 0xfb, 0xe6, 0x3f, 0x16, 0xec, 0x22, 0x04, 0x95, 0x18, 0x50,
-	0x2f, 0xa1, 0x21, 0x71, 0xa0, 0x3a, 0xf3, 0x92, 0x38, 0xf4, 0x04, 0xcb, 0x5c, 0x4e, 0x45, 0xcd,
-	0xc2, 0x99, 0x7d, 0xbc, 0x5a, 0x83, 0x97, 0x06, 0x2e, 0x3b, 0xb4, 0x97, 0x70, 0xc9, 0x7a, 0x27,
-	0xf7, 0x31, 0xa0, 0x82, 0x3c, 0x81, 0x5d, 0xd5, 0x6c, 0x85, 0xca, 0xdc, 0xa0, 0xcf, 0x6e, 0x4f,
-	0x73, 0x72, 0x58, 0x9f, 0xe7, 0xb0, 0x57, 0x74, 0x33, 0xf3, 0x12, 0x24, 0x58, 0xba, 0xde, 0xd3,
-	0xee, 0xc2, 0xd3, 0x4b, 0x2f, 0x19, 0x50, 0xd1, 0xfc, 0x79, 0x1d, 0xee, 0xad, 0x90, 0x87, 0x0c,
-	0xa0, 0xa6, 0xe2, 0x04, 0x67, 0x57, 0xc6, 0xc3, 0xba, 0x3e, 0xd8, 0x3e, 0x1a, 0xf7, 0xcf, 0x96,
-	0x06, 0x84, 0x7c, 0x0f, 0xa4, 0x48, 0x9e, 0xa3, 0xda, 0x5a, 0x85, 0xf7, 0xaf, 0x29, 0x61, 0xa1,
-	0x3e, 0xc5, 0x54, 0x74, 0xc5, 0x7e, 0x32, 0xa3, 0xac, 0x3d, 0xcb, 0x66, 0x11, 0x82, 0x86, 0x7a,
-	0xdb, 0x3e, 0x5a, 0xdd, 0x69, 0xc3, 0xcc, 0x4b, 0xb9, 0x17, 0x88, 0x98, 0xa9, 0xbe, 0xd8, 0x2b,
-	0xf8, 0x36, 0x5e, 0x9a, 0x3f, 0xc2, 0x9d, 0xde, 0xb0, 0x8f, 0xea, 0x0c, 0xe8, 0x78, 0x42, 0x53,
-	0x41, 0x8e, 0xa0, 0x22, 0x1b, 0xdb, 0x6c, 0x75, 0xd5, 0x21, 0xad, 0x62, 0x9c, 0xe2, 0xcf, 0xe9,
-	0xac, 0x63, 0xf7, 0x86, 0x7d, 0xa3, 0xc6, 0x88, 0x39, 0xe0, 0x8b, 0x40, 0xef, 0xb9, 0xde, 0xb7,
-	0xbf, 0x9f, 0xd7, 0xad, 0x37, 0xe7, 0x75, 0xeb, 0xaf, 0xf3, 0xba, 0xf5, 0xfa, 0xa2, 0xbe, 0xf6,
-	0xe6, 0xa2, 0xbe, 0xf6, 0xc7, 0x45, 0x7d, 0xed, 0x87, 0x8f, 0xc7, 0xb1, 0x88, 0x4e, 0x7c, 0x3b,
-	0x60, 0x93, 0xb6, 0xf6, 0x9c, 0x78, 0x3e, 0x7f, 0x1c, 0x33, 0x73, 0x6c, 0x9f, 0x5e, 0xfa, 0x77,
-	0x48, 0xcc, 0xa7, 0x94, 0xfb, 0x9b, 0xf8, 0x4b, 0xfa, 0xe1, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff,
-	0xa3, 0x29, 0xc3, 0x31, 0x34, 0x09, 0x00, 0x00,
+	// 974 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0x5f, 0x6f, 0x1b, 0x45,
+	0x10, 0xcf, 0xc5, 0x4e, 0x4a, 0xc6, 0x71, 0x1b, 0x36, 0x29, 0x35, 0x41, 0x38, 0x96, 0x2b, 0x15,
+	0x17, 0xd1, 0xb3, 0x6c, 0x90, 0x10, 0xbc, 0x61, 0xab, 0xa5, 0x2e, 0x88, 0x3f, 0x67, 0xb7, 0x20,
+	0x04, 0x3a, 0xdd, 0x9f, 0xb5, 0xef, 0x94, 0xf3, 0xad, 0x75, 0xbb, 0x76, 0xe3, 0x7c, 0x8a, 0x7e,
+	0x0b, 0xf8, 0x06, 0x7c, 0x00, 0x5e, 0x78, 0xec, 0x23, 0x6f, 0xa0, 0xe4, 0x53, 0xc0, 0x13, 0xda,
+	0xd9, 0xdd, 0xf3, 0xb9, 0x91, 0x95, 0xf6, 0x25, 0xba, 0x9d, 0xfd, 0xcd, 0xcc, 0x6f, 0xe6, 0x37,
+	0xb3, 0x0e, 0x7c, 0xe4, 0x7b, 0xfe, 0x32, 0x61, 0x69, 0xfb, 0x9c, 0xa5, 0x34, 0x60, 0x69, 0x10,
+	0xd3, 0x6c, 0x42, 0xdb, 0x8b, 0xce, 0xba, 0xc1, 0x9e, 0x65, 0x4c, 0x30, 0x52, 0xd3, 0x68, 0x7b,
+	0xfd, 0x72, 0xd1, 0x39, 0x3e, 0x9a, 0xb0, 0x09, 0x43, 0x50, 0x5b, 0x7e, 0x29, 0xfc, 0xf1, 0xc9,
+	0x84, 0xb1, 0x49, 0x42, 0xdb, 0x78, 0xf2, 0xe7, 0xe3, 0xb6, 0x88, 0xa7, 0x94, 0x0b, 0x6f, 0x3a,
+	0xd3, 0x80, 0xf7, 0x05, 0x4d, 0x43, 0x9a, 0x4d, 0xe3, 0x54, 0xb4, 0x83, 0x6c, 0x39, 0x13, 0x4c,
+	0x62, 0xd9, 0x58, 0x5f, 0xe7, 0xec, 0x7c, 0x11, 0x04, 0x11, 0x0d, 0x4e, 0x67, 0x4c, 0x22, 0x17,
+	0x9d, 0x75, 0x83, 0x46, 0xdf, 0x33, 0xe8, 0xd5, 0x4d, 0x9c, 0x4e, 0x10, 0x9d, 0x70, 0xf7, 0x94,
+	0x2e, 0x35, 0xee, 0xfe, 0x46, 0xdc, 0x95, 0x90, 0x4d, 0x03, 0xa5, 0x33, 0x16, 0x44, 0x1a, 0x65,
+	0xbe, 0x35, 0xc6, 0x2e, 0x90, 0x4c, 0xe2, 0x49, 0x24, 0xff, 0xd2, 0x9c, 0x65, 0xc1, 0xa2, 0xf0,
+	0xcd, 0x3f, 0xb6, 0xa1, 0x3a, 0x48, 0x43, 0x7a, 0x46, 0xc3, 0xc7, 0xd4, 0x0b, 0x69, 0x46, 0x4e,
+	0xa0, 0x12, 0xb0, 0x94, 0xcf, 0xa7, 0x34, 0x73, 0xe3, 0xb0, 0x66, 0x35, 0xac, 0xd6, 0x9e, 0x03,
+	0xc6, 0x34, 0x08, 0x09, 0x81, 0x72, 0xe4, 0xf1, 0xa8, 0xb6, 0xdd, 0xb0, 0x5a, 0xfb, 0x0e, 0x7e,
+	0x93, 0x77, 0x60, 0x37, 0xa2, 0x32, 0x78, 0xad, 0xd4, 0xb0, 0x5a, 0x65, 0x47, 0x9f, 0xc8, 0x27,
+	0x50, 0x96, 0x5d, 0xae, 0x95, 0x1b, 0x56, 0xab, 0xd2, 0x3d, 0xb6, 0x95, 0x04, 0xb6, 0x91, 0xc0,
+	0x1e, 0x19, 0x09, 0x7a, 0xe5, 0x17, 0x7f, 0x9f, 0x58, 0x0e, 0xa2, 0x89, 0x0d, 0x87, 0xba, 0x0c,
+	0x37, 0x42, 0x52, 0x2e, 0x26, 0xdc, 0xc1, 0x84, 0x6f, 0xeb, 0x2b, 0x45, 0xf7, 0xb1, 0xcc, 0xde,
+	0x85, 0xdb, 0xaf, 0xe2, 0x15, 0x99, 0x5d, 0x24, 0x73, 0xb8, 0xee, 0xa1, 0x98, 0xdd, 0x85, 0xaa,
+	0xf1, 0xc1, 0x16, 0xd6, 0x6e, 0x20, 0x76, 0x5f, 0x1b, 0x1f, 0x4a, 0x1b, 0xb9, 0x07, 0xb7, 0x0c,
+	0x48, 0x9c, 0x29, 0x12, 0x6f, 0x21, 0x09, 0xe3, 0x3b, 0x3a, 0x93, 0x04, 0x9a, 0x4f, 0x60, 0xe7,
+	0x11, 0xcb, 0x4e, 0x39, 0xf9, 0x02, 0x6e, 0x28, 0x06, 0xbc, 0x56, 0x6a, 0x94, 0x5a, 0x95, 0xee,
+	0x07, 0xf6, 0xa6, 0x29, 0xb5, 0xd7, 0xda, 0xee, 0x18, 0xbf, 0xe6, 0x7f, 0x16, 0xec, 0xf5, 0x23,
+	0x2f, 0x4e, 0x07, 0xe9, 0x98, 0x5d, 0xaf, 0xc6, 0xd7, 0x50, 0x4d, 0x3c, 0x41, 0xb9, 0xd0, 0xa5,
+	0xa3, 0x2c, 0x6f, 0x90, 0x77, 0x5f, 0x79, 0x6b, 0xf1, 0x7b, 0xa0, 0xcf, 0xee, 0x58, 0xd6, 0x83,
+	0x6a, 0x56, 0xba, 0x27, 0x9b, 0x83, 0x61, 0xd9, 0x4e, 0x45, 0x39, 0xa9, 0x1e, 0x7c, 0x0e, 0xef,
+	0xe6, 0x9b, 0x45, 0x43, 0x4d, 0x8b, 0xbb, 0x01, 0x9b, 0xa7, 0x02, 0x07, 0xa1, 0xec, 0xdc, 0x29,
+	0x00, 0x54, 0x66, 0xde, 0x97, 0xd7, 0xcd, 0xdf, 0x2c, 0x20, 0x79, 0xf1, 0x3f, 0xc4, 0x22, 0xfa,
+	0x4e, 0x2e, 0x20, 0xe9, 0x01, 0x04, 0xd2, 0xea, 0xc6, 0xe9, 0x98, 0x61, 0x13, 0x2a, 0xdd, 0xbb,
+	0x9b, 0x49, 0xe5, 0x11, 0x9c, 0xbd, 0x20, 0xef, 0xe4, 0x37, 0x70, 0x1b, 0xb7, 0xd9, 0x8c, 0x48,
+	0x6c, 0x84, 0x57, 0x0d, 0x7b, 0xcf, 0x5e, 0x6d, 0xbf, 0xad, 0xb6, 0xdf, 0xc6, 0xe4, 0xdf, 0xce,
+	0xb8, 0x43, 0xd0, 0x53, 0x31, 0x1d, 0xa8, 0xd9, 0x68, 0xfe, 0x5e, 0x02, 0xf2, 0x28, 0x4e, 0xbd,
+	0x24, 0x3e, 0xa7, 0xe1, 0x1b, 0x08, 0xf6, 0x14, 0x8e, 0xc6, 0xc6, 0xcd, 0x2d, 0x54, 0xb5, 0xfd,
+	0xfa, 0x55, 0x91, 0xf1, 0xd5, 0xbc, 0x9f, 0x01, 0x60, 0x39, 0x2a, 0x58, 0x49, 0xef, 0x9b, 0x09,
+	0x96, 0xbf, 0x12, 0x8b, 0x8e, 0x8d, 0xf4, 0x9d, 0x3d, 0x34, 0xe9, 0xce, 0xdc, 0xcc, 0xbc, 0xe7,
+	0xee, 0xea, 0xbd, 0xd1, 0xeb, 0xba, 0x9a, 0xa1, 0xb5, 0xb7, 0x49, 0xc6, 0x70, 0xbc, 0xe7, 0xfd,
+	0xdc, 0xe6, 0x54, 0xb3, 0xe2, 0x91, 0x3c, 0x05, 0xe2, 0x8b, 0xc0, 0xe5, 0x73, 0x7f, 0x1a, 0x73,
+	0x1e, 0xb3, 0x54, 0x3e, 0x77, 0xb8, 0xbd, 0xc5, 0x98, 0xeb, 0x8f, 0xe6, 0xa2, 0x63, 0x0f, 0x73,
+	0xfc, 0x57, 0x74, 0xe9, 0x1c, 0xf8, 0x22, 0x58, 0xb3, 0x90, 0x2f, 0x61, 0x07, 0x65, 0xc0, 0xad,
+	0xae, 0x74, 0x3b, 0x9b, 0x3b, 0x85, 0xba, 0x5d, 0xd5, 0xc6, 0x51, 0xfe, 0xcd, 0x7f, 0x2d, 0x38,
+	0x40, 0x08, 0x76, 0x62, 0x48, 0xbd, 0x84, 0x86, 0xc4, 0x81, 0xea, 0xc2, 0x4b, 0xe2, 0xd0, 0x13,
+	0x2c, 0x73, 0x39, 0x15, 0x35, 0x0b, 0xf7, 0xf7, 0xc1, 0xe6, 0x1e, 0x3c, 0x33, 0x70, 0x39, 0xa7,
+	0xbd, 0x84, 0x4b, 0xd6, 0xfb, 0x79, 0x8c, 0x21, 0x15, 0xe4, 0x21, 0x1c, 0xa8, 0x91, 0x2b, 0x28,
+	0xf3, 0x1a, 0xd3, 0x76, 0x73, 0x96, 0x93, 0x43, 0x7d, 0x9e, 0xc0, 0x61, 0x31, 0xcc, 0xc2, 0x4b,
+	0x90, 0x60, 0xe9, 0xfa, 0x48, 0x07, 0xab, 0x48, 0xcf, 0xbc, 0x64, 0x48, 0x45, 0xf3, 0xd7, 0x6d,
+	0xb8, 0xb3, 0xa1, 0x3d, 0x64, 0x08, 0x35, 0x95, 0x27, 0x38, 0xbf, 0xb2, 0x24, 0xd6, 0xf5, 0xc9,
+	0x8e, 0xd0, 0xb9, 0x7f, 0xbe, 0xb6, 0x26, 0xe4, 0x47, 0x20, 0x45, 0xf2, 0x1c, 0xbb, 0xad, 0xbb,
+	0xf0, 0xe1, 0x35, 0x12, 0x16, 0xf4, 0x29, 0x96, 0xa2, 0x15, 0xfb, 0xc5, 0x2c, 0xb4, 0x8e, 0x2c,
+	0x87, 0x45, 0x08, 0x1a, 0xea, 0x97, 0xf7, 0xfe, 0xe6, 0x49, 0x1b, 0x65, 0x5e, 0xca, 0xbd, 0x40,
+	0xc4, 0x4c, 0xcd, 0xc5, 0x61, 0x21, 0xb6, 0x89, 0xd2, 0xfc, 0x19, 0x6e, 0xf5, 0x46, 0x7d, 0xec,
+	0xce, 0x90, 0x4e, 0xa6, 0x34, 0x15, 0x64, 0x00, 0x15, 0x39, 0xd8, 0xe6, 0x85, 0x57, 0x13, 0xd2,
+	0x2a, 0xe6, 0x29, 0xfe, 0xc0, 0x2e, 0x3a, 0x76, 0x6f, 0xd4, 0x37, 0xdd, 0x18, 0x33, 0x07, 0x7c,
+	0x11, 0xe8, 0xd7, 0xae, 0xf7, 0xfd, 0x9f, 0x17, 0x75, 0xeb, 0xe5, 0x45, 0xdd, 0xfa, 0xe7, 0xa2,
+	0x6e, 0xbd, 0xb8, 0xac, 0x6f, 0xbd, 0xbc, 0xac, 0x6f, 0xfd, 0x75, 0x59, 0xdf, 0xfa, 0xe9, 0xd3,
+	0x49, 0x2c, 0xa2, 0xb9, 0x6f, 0x07, 0x6c, 0xda, 0xd6, 0x91, 0x13, 0xcf, 0xe7, 0x0f, 0x62, 0x66,
+	0x8e, 0xed, 0xb3, 0x57, 0xfe, 0x41, 0x12, 0xcb, 0x19, 0xe5, 0xfe, 0x2e, 0xfe, 0xaa, 0x7e, 0xfc,
+	0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, 0xc7, 0xfb, 0xb5, 0x46, 0x09, 0x00, 0x00,
 }
 
 func (m *IndexedHeader) Marshal() (dAtA []byte, err error) {
@@ -769,10 +769,10 @@ func (m *IndexedHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 		i--
 		dAtA[i] = 0x12
 	}
-	if len(m.ChainId) > 0 {
-		i -= len(m.ChainId)
-		copy(dAtA[i:], m.ChainId)
-		i = encodeVarintZoneconcierge(dAtA, i, uint64(len(m.ChainId)))
+	if len(m.ConsumerId) > 0 {
+		i -= len(m.ConsumerId)
+		copy(dAtA[i:], m.ConsumerId)
+		i = encodeVarintZoneconcierge(dAtA, i, uint64(len(m.ConsumerId)))
 		i--
 		dAtA[i] = 0xa
 	}
@@ -865,10 +865,10 @@ func (m *ChainInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 		i--
 		dAtA[i] = 0x12
 	}
-	if len(m.ChainId) > 0 {
-		i -= len(m.ChainId)
-		copy(dAtA[i:], m.ChainId)
-		i = encodeVarintZoneconcierge(dAtA, i, uint64(len(m.ChainId)))
+	if len(m.ConsumerId) > 0 {
+		i -= len(m.ConsumerId)
+		copy(dAtA[i:], m.ConsumerId)
+		i = encodeVarintZoneconcierge(dAtA, i, uint64(len(m.ConsumerId)))
 		i--
 		dAtA[i] = 0xa
 	}
@@ -1002,10 +1002,10 @@ func (m *FinalizedChainInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 		i--
 		dAtA[i] = 0x12
 	}
-	if len(m.ChainId) > 0 {
-		i -= len(m.ChainId)
-		copy(dAtA[i:], m.ChainId)
-		i = encodeVarintZoneconcierge(dAtA, i, uint64(len(m.ChainId)))
+	if len(m.ConsumerId) > 0 {
+		i -= len(m.ConsumerId)
+		copy(dAtA[i:], m.ConsumerId)
+		i = encodeVarintZoneconcierge(dAtA, i, uint64(len(m.ConsumerId)))
 		i--
 		dAtA[i] = 0xa
 	}
@@ -1188,7 +1188,7 @@ func (m *IndexedHeader) Size() (n int) {
 	}
 	var l int
 	_ = l
-	l = len(m.ChainId)
+	l = len(m.ConsumerId)
 	if l > 0 {
 		n += 1 + l + sovZoneconcierge(uint64(l))
 	}
@@ -1241,7 +1241,7 @@ func (m *ChainInfo) Size() (n int) {
 	}
 	var l int
 	_ = l
-	l = len(m.ChainId)
+	l = len(m.ConsumerId)
 	if l > 0 {
 		n += 1 + l + sovZoneconcierge(uint64(l))
 	}
@@ -1282,7 +1282,7 @@ func (m *FinalizedChainInfo) Size() (n int) {
 	}
 	var l int
 	_ = l
-	l = len(m.ChainId)
+	l = len(m.ConsumerId)
 	if l > 0 {
 		n += 1 + l + sovZoneconcierge(uint64(l))
 	}
@@ -1407,7 +1407,7 @@ func (m *IndexedHeader) Unmarshal(dAtA []byte) error {
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -1435,7 +1435,7 @@ func (m *IndexedHeader) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ChainId = string(dAtA[iNdEx:postIndex])
+			m.ConsumerId = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 2:
 			if wireType != 2 {
@@ -1768,7 +1768,7 @@ func (m *ChainInfo) Unmarshal(dAtA []byte) error {
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -1796,7 +1796,7 @@ func (m *ChainInfo) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ChainId = string(dAtA[iNdEx:postIndex])
+			m.ConsumerId = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 2:
 			if wireType != 2 {
@@ -2063,7 +2063,7 @@ func (m *FinalizedChainInfo) Unmarshal(dAtA []byte) error {
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumerId", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -2091,7 +2091,7 @@ func (m *FinalizedChainInfo) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ChainId = string(dAtA[iNdEx:postIndex])
+			m.ConsumerId = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 2:
 			if wireType != 2 {

From e2aa1accaf5aa2b2a2b877ccb24b4b4af2eabeb5 Mon Sep 17 00:00:00 2001
From: Runchao Han <me@runchao.rocks>
Date: Mon, 19 Aug 2024 15:36:27 +1000
Subject: [PATCH 2/3] fix some e2e

---
 test/e2e/btc_timestamping_e2e_test.go         | 21 +++++--
 .../btc_timestamping_phase2_hermes_test.go    | 11 +++-
 test/e2e/btc_timestamping_phase2_rly_test.go  | 11 +++-
 test/e2e/configurer/chain/queries.go          | 16 +++---
 test/e2e/configurer/chain/queries_ibc.go      | 15 +++++
 testutil/datagen/tendermint.go                | 13 ++---
 x/zoneconcierge/README.md                     | 16 +++---
 .../keeper/epoch_chain_info_indexer_test.go   | 12 ++--
 x/zoneconcierge/keeper/fork_indexer_test.go   | 11 ++--
 x/zoneconcierge/keeper/grpc_query.go          |  4 +-
 x/zoneconcierge/keeper/grpc_query_test.go     | 56 +++++++++----------
 x/zoneconcierge/keeper/header_handler.go      |  4 +-
 .../keeper/ibc_header_decorator.go            | 19 +++----
 .../keeper/ibc_packet_btc_timestamp.go        | 20 +++----
 x/zoneconcierge/keeper/keeper_test.go         | 12 ++--
 .../keeper/proof_btc_timestamp_test.go        |  4 +-
 x/zoneconcierge/types/btc_timestamp_test.go   |  4 +-
 17 files changed, 138 insertions(+), 111 deletions(-)

diff --git a/test/e2e/btc_timestamping_e2e_test.go b/test/e2e/btc_timestamping_e2e_test.go
index f67141503..c344e6e17 100644
--- a/test/e2e/btc_timestamping_e2e_test.go
+++ b/test/e2e/btc_timestamping_e2e_test.go
@@ -112,10 +112,19 @@ func (s *BTCTimestampingTestSuite) Test4IbcCheckpointing() {
 	nonValidatorNode, err := chainA.GetNodeAtIndex(2)
 	s.NoError(err)
 
+	// Query open IBC channels and assert there is only one
+	channels, err := nonValidatorNode.QueryIBCChannels()
+	s.NoError(err)
+	s.Equal(1, len(channels.Channels), "Expected only one open IBC channel")
+	// Get the client ID under this IBC channel
+	channelClientState, err := nonValidatorNode.QueryChannelClientState(channels.Channels[0].ChannelId, channels.Channels[0].PortId)
+	s.NoError(err)
+	clientID := channelClientState.IdentifiedClientState.ClientId
+
 	// Query checkpoint chain info for opposing chain
-	chainsInfo, err := nonValidatorNode.QueryChainsInfo([]string{initialization.ChainBID})
+	chainsInfo, err := nonValidatorNode.QueryChainsInfo([]string{clientID})
 	s.NoError(err)
-	s.Equal(chainsInfo[0].ConsumerId, initialization.ChainBID)
+	s.Equal(chainsInfo[0].ConsumerId, clientID)
 
 	// Finalize epoch 1, 2, 3, as first headers of opposing chain are in epoch 3
 	var (
@@ -140,17 +149,17 @@ func (s *BTCTimestampingTestSuite) Test4IbcCheckpointing() {
 	nonValidatorNode.WaitForNextBlock()
 
 	// Check we have epoch info for opposing chain and some basic assertions
-	epochChainsInfo, err := nonValidatorNode.QueryEpochChainsInfo(endEpochNum, []string{initialization.ChainBID})
+	epochChainsInfo, err := nonValidatorNode.QueryEpochChainsInfo(endEpochNum, []string{clientID})
 	s.NoError(err)
-	s.Equal(epochChainsInfo[0].ConsumerId, initialization.ChainBID)
+	s.Equal(epochChainsInfo[0].ConsumerId, clientID)
 	s.Equal(epochChainsInfo[0].LatestHeader.BabylonEpoch, endEpochNum)
 
 	// Check we have finalized epoch info for opposing chain and some basic assertions
-	finalizedChainsInfo, err := nonValidatorNode.QueryFinalizedChainsInfo([]string{initialization.ChainBID})
+	finalizedChainsInfo, err := nonValidatorNode.QueryFinalizedChainsInfo([]string{clientID})
 	s.NoError(err)
 
 	// TODO Add more assertion here. Maybe check proofs ?
-	s.Equal(finalizedChainsInfo[0].FinalizedChainInfo.ConsumerId, initialization.ChainBID)
+	s.Equal(finalizedChainsInfo[0].FinalizedChainInfo.ConsumerId, clientID)
 	s.Equal(finalizedChainsInfo[0].EpochInfo.EpochNumber, endEpochNum)
 
 	currEpoch, err := nonValidatorNode.QueryCurrentEpoch()
diff --git a/test/e2e/btc_timestamping_phase2_hermes_test.go b/test/e2e/btc_timestamping_phase2_hermes_test.go
index b8eb6a1e8..4f656e068 100644
--- a/test/e2e/btc_timestamping_phase2_hermes_test.go
+++ b/test/e2e/btc_timestamping_phase2_hermes_test.go
@@ -4,7 +4,6 @@ import (
 	"time"
 
 	"github.com/babylonlabs-io/babylon/test/e2e/configurer"
-	"github.com/babylonlabs-io/babylon/test/e2e/initialization"
 	ct "github.com/babylonlabs-io/babylon/x/checkpointing/types"
 	"github.com/cosmos/cosmos-sdk/types/query"
 	channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types"
@@ -51,6 +50,8 @@ func (s *BTCTimestampingPhase2HermesTestSuite) TearDownSuite() {
 
 func (s *BTCTimestampingPhase2HermesTestSuite) Test1IbcCheckpointingPhase2Hermes() {
 	chainA := s.configurer.GetChainConfig(0)
+	nonValidatorNode, err := chainA.GetNodeAtIndex(2)
+	s.NoError(err)
 
 	babylonNode, err := chainA.GetNodeAtIndex(2)
 	s.NoError(err)
@@ -98,9 +99,15 @@ func (s *BTCTimestampingPhase2HermesTestSuite) Test1IbcCheckpointingPhase2Hermes
 		return true
 	}, time.Minute, time.Second*2)
 
+	// Get the client ID under this IBC channel
+	channelClientState, err := nonValidatorNode.QueryChannelClientState(babylonChannel.ChannelId, babylonChannel.PortId)
+	s.NoError(err)
+	clientID := channelClientState.IdentifiedClientState.ClientId
+
 	// Query checkpoint chain info for the consumer chain
-	listHeaderResp, err := babylonNode.QueryListHeaders(initialization.ChainBID, &query.PageRequest{Limit: 1})
+	listHeaderResp, err := babylonNode.QueryListHeaders(clientID, &query.PageRequest{Limit: 1})
 	s.NoError(err)
+	s.GreaterOrEqual(len(listHeaderResp.Headers), 1)
 	startEpochNum := listHeaderResp.Headers[0].BabylonEpoch
 	endEpochNum := startEpochNum + 2
 
diff --git a/test/e2e/btc_timestamping_phase2_rly_test.go b/test/e2e/btc_timestamping_phase2_rly_test.go
index 7dc31959e..744fc7d06 100644
--- a/test/e2e/btc_timestamping_phase2_rly_test.go
+++ b/test/e2e/btc_timestamping_phase2_rly_test.go
@@ -4,7 +4,6 @@ import (
 	"time"
 
 	"github.com/babylonlabs-io/babylon/test/e2e/configurer"
-	"github.com/babylonlabs-io/babylon/test/e2e/initialization"
 	ct "github.com/babylonlabs-io/babylon/x/checkpointing/types"
 	"github.com/cosmos/cosmos-sdk/types/query"
 	channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types"
@@ -51,6 +50,8 @@ func (s *BTCTimestampingPhase2RlyTestSuite) TearDownSuite() {
 
 func (s *BTCTimestampingPhase2RlyTestSuite) Test1IbcCheckpointingPhase2Rly() {
 	chainA := s.configurer.GetChainConfig(0)
+	nonValidatorNode, err := chainA.GetNodeAtIndex(2)
+	s.NoError(err)
 
 	babylonNode, err := chainA.GetNodeAtIndex(2)
 	s.NoError(err)
@@ -98,9 +99,15 @@ func (s *BTCTimestampingPhase2RlyTestSuite) Test1IbcCheckpointingPhase2Rly() {
 		return true
 	}, time.Minute, time.Second*2)
 
+	// Get the client ID under this IBC channel
+	channelClientState, err := nonValidatorNode.QueryChannelClientState(babylonChannel.ChannelId, babylonChannel.PortId)
+	s.NoError(err)
+	clientID := channelClientState.IdentifiedClientState.ClientId
+
 	// Query checkpoint chain info for the consumer chain
-	listHeaderResp, err := babylonNode.QueryListHeaders(initialization.ChainBID, &query.PageRequest{Limit: 1})
+	listHeaderResp, err := babylonNode.QueryListHeaders(clientID, &query.PageRequest{Limit: 1})
 	s.NoError(err)
+	s.GreaterOrEqual(len(listHeaderResp.Headers), 1)
 	startEpochNum := listHeaderResp.Headers[0].BabylonEpoch
 	endEpochNum := startEpochNum + 2
 
diff --git a/test/e2e/configurer/chain/queries.go b/test/e2e/configurer/chain/queries.go
index 4e1725cd4..d981974d5 100644
--- a/test/e2e/configurer/chain/queries.go
+++ b/test/e2e/configurer/chain/queries.go
@@ -245,14 +245,14 @@ func (n *NodeConfig) QueryHeaderDepth(hash string) (uint64, error) {
 	return blcResponse.Depth, nil
 }
 
-func (n *NodeConfig) QueryListHeaders(chainID string, pagination *query.PageRequest) (*zctypes.QueryListHeadersResponse, error) {
+func (n *NodeConfig) QueryListHeaders(consumerID string, pagination *query.PageRequest) (*zctypes.QueryListHeadersResponse, error) {
 	queryParams := url.Values{}
 	if pagination != nil {
 		queryParams.Set("pagination.key", base64.URLEncoding.EncodeToString(pagination.Key))
 		queryParams.Set("pagination.limit", strconv.Itoa(int(pagination.Limit)))
 	}
 
-	path := fmt.Sprintf("babylon/zoneconcierge/v1/headers/%s", chainID)
+	path := fmt.Sprintf("babylon/zoneconcierge/v1/headers/%s", consumerID)
 	bz, err := n.QueryGRPCGateway(path, queryParams)
 	require.NoError(n.t, err)
 
@@ -264,10 +264,10 @@ func (n *NodeConfig) QueryListHeaders(chainID string, pagination *query.PageRequ
 	return &resp, nil
 }
 
-func (n *NodeConfig) QueryFinalizedChainsInfo(chainIDs []string) ([]*zctypes.FinalizedChainInfo, error) {
+func (n *NodeConfig) QueryFinalizedChainsInfo(consumerIDs []string) ([]*zctypes.FinalizedChainInfo, error) {
 	queryParams := url.Values{}
-	for _, chainId := range chainIDs {
-		queryParams.Add("chain_ids", chainId)
+	for _, consumerID := range consumerIDs {
+		queryParams.Add("consumer_ids", consumerID)
 	}
 
 	bz, err := n.QueryGRPCGateway("babylon/zoneconcierge/v1/finalized_chains_info", queryParams)
@@ -281,11 +281,11 @@ func (n *NodeConfig) QueryFinalizedChainsInfo(chainIDs []string) ([]*zctypes.Fin
 	return resp.FinalizedChainsInfo, nil
 }
 
-func (n *NodeConfig) QueryEpochChainsInfo(epochNum uint64, chainIDs []string) ([]*zctypes.ChainInfo, error) {
+func (n *NodeConfig) QueryEpochChainsInfo(epochNum uint64, consumerIDs []string) ([]*zctypes.ChainInfo, error) {
 	queryParams := url.Values{}
-	for _, chainId := range chainIDs {
+	for _, consumerID := range consumerIDs {
 		queryParams.Add("epoch_num", fmt.Sprintf("%d", epochNum))
-		queryParams.Add("chain_ids", chainId)
+		queryParams.Add("consumer_ids", consumerID)
 	}
 
 	bz, err := n.QueryGRPCGateway("babylon/zoneconcierge/v1/epoch_chains_info", queryParams)
diff --git a/test/e2e/configurer/chain/queries_ibc.go b/test/e2e/configurer/chain/queries_ibc.go
index f5889b229..39c5cc3e7 100644
--- a/test/e2e/configurer/chain/queries_ibc.go
+++ b/test/e2e/configurer/chain/queries_ibc.go
@@ -23,6 +23,21 @@ func (n *NodeConfig) QueryIBCChannels() (*channeltypes.QueryChannelsResponse, er
 	return &resp, nil
 }
 
+func (n *NodeConfig) QueryChannelClientState(channelID, portID string) (*channeltypes.QueryChannelClientStateResponse, error) {
+	path := fmt.Sprintf("/ibc/core/channel/v1/channels/%s/ports/%s/client_state", channelID, portID)
+	bz, err := n.QueryGRPCGateway(path, url.Values{})
+	if err != nil {
+		return nil, err
+	}
+
+	var resp channeltypes.QueryChannelClientStateResponse
+	if err := util.Cdc.UnmarshalJSON(bz, &resp); err != nil {
+		return nil, err
+	}
+
+	return &resp, nil
+}
+
 func (n *NodeConfig) QueryNextSequenceReceive(channelID, portID string) (*channeltypes.QueryNextSequenceReceiveResponse, error) {
 	path := fmt.Sprintf("/ibc/core/channel/v1/channels/%s/ports/%s/next_sequence", channelID, portID)
 	bz, err := n.QueryGRPCGateway(path, url.Values{})
diff --git a/testutil/datagen/tendermint.go b/testutil/datagen/tendermint.go
index c09368739..82985156d 100644
--- a/testutil/datagen/tendermint.go
+++ b/testutil/datagen/tendermint.go
@@ -21,11 +21,11 @@ func GenRandomTMHeader(r *rand.Rand, chainID string, height uint64) *cmtproto.He
 	}
 }
 
-func GenRandomIBCTMHeader(r *rand.Rand, chainID string, height uint64) *ibctmtypes.Header {
+func GenRandomIBCTMHeader(r *rand.Rand, height uint64) *ibctmtypes.Header {
 	return &ibctmtypes.Header{
 		SignedHeader: &cmtproto.SignedHeader{
 			Header: &cmtproto.Header{
-				ChainID: chainID,
+				ChainID: GenRandomHexStr(r, 10),
 				Height:  int64(height),
 				AppHash: GenRandomByteArray(r, 32),
 			},
@@ -34,13 +34,10 @@ func GenRandomIBCTMHeader(r *rand.Rand, chainID string, height uint64) *ibctmtyp
 }
 
 func GenRandomTMHeaderInfo(r *rand.Rand, chainID string, height uint64) *header.Info {
-	tmHeader := GenRandomIBCTMHeader(r, chainID, height)
 	return &header.Info{
-		Height:  tmHeader.Header.Height,
-		Hash:    tmHeader.Header.DataHash,
-		Time:    tmHeader.Header.Time,
-		ChainID: tmHeader.Header.ChainID,
-		AppHash: tmHeader.Header.AppHash,
+		Height:  int64(height),
+		ChainID: chainID,
+		AppHash: GenRandomByteArray(r, 32),
 	}
 }
 
diff --git a/x/zoneconcierge/README.md b/x/zoneconcierge/README.md
index f873f4030..046c5567a 100644
--- a/x/zoneconcierge/README.md
+++ b/x/zoneconcierge/README.md
@@ -211,9 +211,9 @@ message Params {
 ### ChainInfo
 
 The [chain info storage](./keeper/chain_info_indexer.go) maintains `ChainInfo`
-for each PoS blockchain. The key is the PoS blockchain's `ChainID`, and the
-value is a `ChainInfo` object. The `ChainInfo` is a structure storing the
-information of a PoS blockchain that checkpoints to Babylon.
+for each PoS blockchain. The key is the PoS blockchain's `ConsumerID`, which is the
+ID of the IBC light client. The value is a `ChainInfo` object. The `ChainInfo` is
+a structure storing the information of a PoS blockchain that checkpoints to Babylon.
 
 ```protobuf
 // ChainInfo is the information of a CZ
@@ -235,14 +235,14 @@ message ChainInfo {
 
 The [epoch chain info storage](./keeper/epoch_chain_info_indexer.go) maintains
 `ChainInfo` at the end of each Babylon epoch for each PoS blockchain. The key is
-the PoS blockchain's `ChainID` plus the epoch number, and the value is a
+the PoS blockchain's `ConsumerID` plus the epoch number, and the value is a
 `ChainInfo` object.
 
 ### CanonicalChain
 
 The [canonical chain storage](./keeper/canonical_chain_indexer.go) maintains the
 metadata of canonical IBC headers of a PoS blockchain. The key is the consumer
-chain's `ChainID` plus the height, and the value is a `IndexedHeader` object.
+chain's `ConsumerID` plus the height, and the value is a `IndexedHeader` object.
 `IndexedHeader` is a structure storing IBC header's metadata.
 
 ```protobuf
@@ -277,7 +277,7 @@ message IndexedHeader {
 ### Fork
 
 The [fork storage](./keeper/fork_indexer.go) maintains the metadata of canonical
-IBC headers of a PoS blockchain. The key is the PoS blockchain's `ChainID` plus
+IBC headers of a PoS blockchain. The key is the PoS blockchain's `ConsumerID` plus
 the height, and the value is a list of `IndexedHeader` objects, which represent
 fork headers at that height.
 
@@ -421,9 +421,9 @@ Babylon. The logic is defined at
       is still canonical in the segment to the current tip of the BTC light
       client.
 3. For each of these IBC channels:
-   1. Find the `ChainID` of the counterparty chain (i.e., the PoS blockchain) in
+   1. Find the `ConsumerID` of the counterparty chain (i.e., the PoS blockchain) in
       the IBC channel.
-   2. Get the `ChainInfo` of the `ChainID` at the last finalized epoch.
+   2. Get the `ChainInfo` of the `ConsumerID` at the last finalized epoch.
    3. Get the metadata of the last finalized epoch and its corresponding raw
       checkpoint.
    4. Generate the proof that the last PoS blockchain's canonical header is
diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go
index 59dc52c50..472c76dd5 100644
--- a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go
+++ b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go
@@ -20,7 +20,7 @@ func FuzzEpochChainInfoIndexer(f *testing.F) {
 		babylonApp := app.Setup(t, false)
 		zcKeeper := babylonApp.ZoneConciergeKeeper
 		ctx := babylonApp.NewContext(false)
-		czChainID := "test-chainid"
+		consumerID := "test-consumerid"
 
 		hooks := zcKeeper.Hooks()
 
@@ -33,13 +33,13 @@ func FuzzEpochChainInfoIndexer(f *testing.F) {
 		// invoke the hook a random number of times to simulate a random number of blocks
 		numHeaders := datagen.RandomInt(r, 100) + 1
 		numForkHeaders := datagen.RandomInt(r, 10) + 1
-		SimulateNewHeadersAndForks(ctx, r, &zcKeeper, czChainID, 0, numHeaders, numForkHeaders)
+		SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, 0, numHeaders, numForkHeaders)
 
 		// end this epoch
 		hooks.AfterEpochEnds(ctx, epochNum)
 
 		// check if the chain info of this epoch is recorded or not
-		chainInfoWithProof, err := zcKeeper.GetEpochChainInfo(ctx, czChainID, epochNum)
+		chainInfoWithProof, err := zcKeeper.GetEpochChainInfo(ctx, consumerID, epochNum)
 		chainInfo := chainInfoWithProof.ChainInfo
 		require.NoError(t, err)
 		require.Equal(t, numHeaders-1, chainInfo.LatestHeader.Height)
@@ -57,7 +57,7 @@ func FuzzGetEpochHeaders(f *testing.F) {
 		babylonApp := app.Setup(t, false)
 		zcKeeper := babylonApp.ZoneConciergeKeeper
 		ctx := babylonApp.NewContext(false)
-		czChainID := "test-chainid"
+		consumerID := "test-consumerid"
 
 		hooks := zcKeeper.Hooks()
 
@@ -87,7 +87,7 @@ func FuzzGetEpochHeaders(f *testing.F) {
 			numHeadersList = append(numHeadersList, datagen.RandomInt(r, 100)+1)
 			numForkHeadersList = append(numForkHeadersList, datagen.RandomInt(r, 10)+1)
 			// trigger hooks to append these headers and fork headers
-			expectedHeaders, _ := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, czChainID, nextHeightList[i], numHeadersList[i], numForkHeadersList[i])
+			expectedHeaders, _ := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, nextHeightList[i], numHeadersList[i], numForkHeadersList[i])
 			expectedHeadersMap[epochNum] = expectedHeaders
 			// prepare nextHeight for the next request
 			nextHeightList = append(nextHeightList, nextHeightList[i]+numHeadersList[i])
@@ -102,7 +102,7 @@ func FuzzGetEpochHeaders(f *testing.F) {
 		for i := uint64(0); i < numReqs; i++ {
 			epochNum := epochNumList[i]
 			// check if the headers are same as expected
-			headers, err := zcKeeper.GetEpochHeaders(ctx, czChainID, epochNum)
+			headers, err := zcKeeper.GetEpochHeaders(ctx, consumerID, epochNum)
 			require.NoError(t, err)
 			require.Equal(t, len(expectedHeadersMap[epochNum]), len(headers))
 			for j := 0; j < len(expectedHeadersMap[epochNum]); j++ {
diff --git a/x/zoneconcierge/keeper/fork_indexer_test.go b/x/zoneconcierge/keeper/fork_indexer_test.go
index b54fce894..e1fb2a954 100644
--- a/x/zoneconcierge/keeper/fork_indexer_test.go
+++ b/x/zoneconcierge/keeper/fork_indexer_test.go
@@ -18,28 +18,27 @@ func FuzzForkIndexer(f *testing.F) {
 		babylonApp := app.Setup(t, false)
 		zcKeeper := babylonApp.ZoneConciergeKeeper
 		ctx := babylonApp.NewContext(false)
-		czChainID := "test-chainid"
+		consumerID := "test-consumerid"
 
 		// invoke the hook a random number of times to simulate a random number of blocks
 		numHeaders := datagen.RandomInt(r, 100) + 1
 		numForkHeaders := datagen.RandomInt(r, 10) + 1
-		_, forkHeaders := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, czChainID, 0, numHeaders, numForkHeaders)
+		_, forkHeaders := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, 0, numHeaders, numForkHeaders)
 
 		// check if the fork is updated or not
-		forks := zcKeeper.GetForks(ctx, czChainID, numHeaders-1)
+		forks := zcKeeper.GetForks(ctx, consumerID, numHeaders-1)
 		require.Equal(t, numForkHeaders, uint64(len(forks.Headers)))
 		for i := range forks.Headers {
-			require.Equal(t, czChainID, forks.Headers[i].ConsumerId)
 			require.Equal(t, numHeaders-1, forks.Headers[i].Height)
 			require.Equal(t, forkHeaders[i].Header.AppHash, forks.Headers[i].Hash)
 		}
 
 		// check if the chain info is updated or not
-		chainInfo, err := zcKeeper.GetChainInfo(ctx, czChainID)
+		chainInfo, err := zcKeeper.GetChainInfo(ctx, consumerID)
 		require.NoError(t, err)
 		require.Equal(t, numForkHeaders, uint64(len(chainInfo.LatestForks.Headers)))
 		for i := range forks.Headers {
-			require.Equal(t, czChainID, chainInfo.LatestForks.Headers[i].ConsumerId)
+			require.Equal(t, consumerID, chainInfo.LatestForks.Headers[i].ConsumerId)
 			require.Equal(t, numHeaders-1, chainInfo.LatestForks.Headers[i].Height)
 			require.Equal(t, forkHeaders[i].Header.AppHash, chainInfo.LatestForks.Headers[i].Hash)
 		}
diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go
index 22e56e8aa..5b0e7ee62 100644
--- a/x/zoneconcierge/keeper/grpc_query.go
+++ b/x/zoneconcierge/keeper/grpc_query.go
@@ -57,7 +57,7 @@ func (k Keeper) ChainsInfo(c context.Context, req *types.QueryChainsInfoRequest)
 
 	// return if no chain IDs are provided
 	if len(req.ConsumerIds) == 0 {
-		return nil, status.Error(codes.InvalidArgument, "chain IDs cannot be empty")
+		return nil, status.Error(codes.InvalidArgument, "consumer IDs cannot be empty")
 	}
 
 	// return if chain IDs exceed the limit
@@ -118,7 +118,7 @@ func (k Keeper) EpochChainsInfo(c context.Context, req *types.QueryEpochChainsIn
 
 	// return if no chain IDs are provided
 	if len(req.ConsumerIds) == 0 {
-		return nil, status.Error(codes.InvalidArgument, "chain IDs cannot be empty")
+		return nil, status.Error(codes.InvalidArgument, "consumer IDs cannot be empty")
 	}
 
 	// return if chain IDs exceed the limit
diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go
index 03777faf3..1b5d91e8e 100644
--- a/x/zoneconcierge/keeper/grpc_query_test.go
+++ b/x/zoneconcierge/keeper/grpc_query_test.go
@@ -37,21 +37,21 @@ func FuzzChainList(f *testing.F) {
 
 		// invoke the hook a random number of times with random chain IDs
 		numHeaders := datagen.RandomInt(r, 100) + 1
-		allChainIDs := []string{}
+		allConsumerIDs := []string{}
 		for i := uint64(0); i < numHeaders; i++ {
 			var consumerID string
 			// simulate the scenario that some headers belong to the same chain
 			if i > 0 && datagen.OneInN(r, 2) {
-				consumerID = allChainIDs[r.Intn(len(allChainIDs))]
+				consumerID = allConsumerIDs[r.Intn(len(allConsumerIDs))]
 			} else {
 				consumerID = datagen.GenRandomHexStr(r, 30)
-				allChainIDs = append(allChainIDs, consumerID)
+				allConsumerIDs = append(allConsumerIDs, consumerID)
 			}
-			header := datagen.GenRandomIBCTMHeader(r, consumerID, 0)
-			zcKeeper.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), consumerID, false)
+			header := datagen.GenRandomIBCTMHeader(r, 0)
+			zcKeeper.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), false)
 		}
 
-		limit := datagen.RandomInt(r, len(allChainIDs)) + 1
+		limit := datagen.RandomInt(r, len(allConsumerIDs)) + 1
 
 		// make query to get actual chain IDs
 		resp, err := zcKeeper.ChainList(ctx, &zctypes.QueryChainListRequest{
@@ -63,9 +63,9 @@ func FuzzChainList(f *testing.F) {
 		actualConsumerIDs := resp.ConsumerIds
 
 		require.Equal(t, limit, uint64(len(actualConsumerIDs)))
-		allChainIDs = zcKeeper.GetAllConsumerIDs(ctx)
+		allConsumerIDs = zcKeeper.GetAllConsumerIDs(ctx)
 		for i := uint64(0); i < limit; i++ {
-			require.Equal(t, allChainIDs[i], actualConsumerIDs[i])
+			require.Equal(t, allConsumerIDs[i], actualConsumerIDs[i])
 		}
 	})
 }
@@ -121,22 +121,22 @@ func FuzzHeader(f *testing.F) {
 		babylonApp := app.Setup(t, false)
 		zcKeeper := babylonApp.ZoneConciergeKeeper
 		ctx := babylonApp.NewContext(false)
-		czChainID := "test-chainid"
+		consumerID := "test-consumerid"
 
 		// invoke the hook a random number of times to simulate a random number of blocks
 		numHeaders := datagen.RandomInt(r, 100) + 2
 		numForkHeaders := datagen.RandomInt(r, 10) + 1
-		headers, forkHeaders := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, czChainID, 0, numHeaders, numForkHeaders)
+		headers, forkHeaders := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, 0, numHeaders, numForkHeaders)
 
 		// find header at a random height and assert correctness against the expected header
 		randomHeight := datagen.RandomInt(r, int(numHeaders-1))
-		resp, err := zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ConsumerId: czChainID, Height: randomHeight})
+		resp, err := zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ConsumerId: consumerID, Height: randomHeight})
 		require.NoError(t, err)
 		require.Equal(t, headers[randomHeight].Header.AppHash, resp.Header.Hash)
 		require.Len(t, resp.ForkHeaders.Headers, 0)
 
 		// find the last header and fork headers then assert correctness
-		resp, err = zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ConsumerId: czChainID, Height: numHeaders - 1})
+		resp, err = zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ConsumerId: consumerID, Height: numHeaders - 1})
 		require.NoError(t, err)
 		require.Equal(t, headers[numHeaders-1].Header.AppHash, resp.Header.Hash)
 		require.Len(t, resp.ForkHeaders.Headers, int(numForkHeaders))
@@ -220,12 +220,12 @@ func FuzzEpochChainsInfo(f *testing.F) {
 
 		// if num of chain ids exceed the max limit, query should fail
 		largeNumChains := datagen.RandomInt(r, 10) + 101
-		var maxChainIDs []string
+		var maxConsumerIDs []string
 		for i := uint64(0); i < largeNumChains; i++ {
-			maxChainIDs = append(maxChainIDs, datagen.GenRandomHexStr(r, 30))
+			maxConsumerIDs = append(maxConsumerIDs, datagen.GenRandomHexStr(r, 30))
 		}
 		randomEpochNum := datagen.RandomInt(r, 10) + 1
-		_, err := zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ConsumerIds: maxChainIDs})
+		_, err := zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ConsumerIds: maxConsumerIDs})
 		require.Error(t, err)
 
 		// if no input is passed in, query should fail
@@ -237,8 +237,8 @@ func FuzzEpochChainsInfo(f *testing.F) {
 		require.Error(t, err)
 
 		// if chain ids contain duplicates, query should fail
-		randomChainID := datagen.GenRandomHexStr(r, 30)
-		dupConsumerIds := []string{randomChainID, randomChainID}
+		randomConsumerID := datagen.GenRandomHexStr(r, 30)
+		dupConsumerIds := []string{randomConsumerID, randomConsumerID}
 		_, err = zcKeeper.EpochChainsInfo(ctx, &zctypes.QueryEpochChainsInfoRequest{EpochNum: randomEpochNum, ConsumerIds: dupConsumerIds})
 		require.Error(t, err)
 	})
@@ -253,17 +253,17 @@ func FuzzListHeaders(f *testing.F) {
 		babylonApp := app.Setup(t, false)
 		zcKeeper := babylonApp.ZoneConciergeKeeper
 		ctx := babylonApp.NewContext(false)
-		czChainID := "test-chainid"
+		consumerID := "test-consumerid"
 
 		// invoke the hook a random number of times to simulate a random number of blocks
 		numHeaders := datagen.RandomInt(r, 100) + 1
 		numForkHeaders := datagen.RandomInt(r, 10) + 1
-		headers, _ := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, czChainID, 0, numHeaders, numForkHeaders)
+		headers, _ := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, 0, numHeaders, numForkHeaders)
 
 		// a request with randomised pagination
 		limit := datagen.RandomInt(r, int(numHeaders)) + 1
 		req := &zctypes.QueryListHeadersRequest{
-			ConsumerId: czChainID,
+			ConsumerId: consumerID,
 			Pagination: &query.PageRequest{
 				Limit: limit,
 			},
@@ -287,7 +287,7 @@ func FuzzListEpochHeaders(f *testing.F) {
 		zcKeeper := babylonApp.ZoneConciergeKeeper
 		epochingKeeper := babylonApp.EpochingKeeper
 		ctx := babylonApp.NewContext(false)
-		czChainID := "test-chainid"
+		consumerID := "test-consumerid"
 
 		hooks := zcKeeper.Hooks()
 
@@ -318,7 +318,7 @@ func FuzzListEpochHeaders(f *testing.F) {
 			numHeadersList = append(numHeadersList, datagen.RandomInt(r, 100)+1)
 			numForkHeadersList = append(numForkHeadersList, datagen.RandomInt(r, 10)+1)
 			// trigger hooks to append these headers and fork headers
-			expectedHeaders, _ := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, czChainID, nextHeightList[i], numHeadersList[i], numForkHeadersList[i])
+			expectedHeaders, _ := SimulateNewHeadersAndForks(ctx, r, &zcKeeper, consumerID, nextHeightList[i], numHeadersList[i], numForkHeadersList[i])
 			expectedHeadersMap[epochNum] = expectedHeaders
 			// prepare nextHeight for the next request
 			nextHeightList = append(nextHeightList, nextHeightList[i]+numHeadersList[i])
@@ -334,7 +334,7 @@ func FuzzListEpochHeaders(f *testing.F) {
 			epochNum := epochNumList[i]
 			// make request
 			req := &zctypes.QueryListEpochHeadersRequest{
-				ConsumerId: czChainID,
+				ConsumerId: consumerID,
 				EpochNum:   epochNum,
 			}
 			resp, err := zcKeeper.ListEpochHeaders(ctx, req)
@@ -405,17 +405,17 @@ func FuzzFinalizedChainInfo(f *testing.F) {
 		)
 		numChains := datagen.RandomInt(r, 100) + 1
 		for i := uint64(0); i < numChains; i++ {
-			czChainIDLen := datagen.RandomInt(r, 40) + 10
-			czChainID := string(datagen.GenRandomByteArray(r, czChainIDLen))
+			consumerIDLen := datagen.RandomInt(r, 40) + 10
+			consumerID := string(datagen.GenRandomByteArray(r, consumerIDLen))
 
 			// invoke the hook a random number of times to simulate a random number of blocks
 			numHeaders := datagen.RandomInt(r, 100) + 1
 			numForkHeaders := datagen.RandomInt(r, 10) + 1
-			SimulateNewHeadersAndForks(ctx, r, zcKeeper, czChainID, 0, numHeaders, numForkHeaders)
+			SimulateNewHeadersAndForks(ctx, r, zcKeeper, consumerID, 0, numHeaders, numForkHeaders)
 
-			consumerIDs = append(consumerIDs, czChainID)
+			consumerIDs = append(consumerIDs, consumerID)
 			chainsInfo = append(chainsInfo, chainInfo{
-				consumerID:     czChainID,
+				consumerID:     consumerID,
 				numHeaders:     numHeaders,
 				numForkHeaders: numForkHeaders,
 			})
diff --git a/x/zoneconcierge/keeper/header_handler.go b/x/zoneconcierge/keeper/header_handler.go
index c96b9347d..412664328 100644
--- a/x/zoneconcierge/keeper/header_handler.go
+++ b/x/zoneconcierge/keeper/header_handler.go
@@ -10,11 +10,11 @@ import (
 )
 
 // HandleHeaderWithValidCommit handles a CZ header with a valid QC
-func (k Keeper) HandleHeaderWithValidCommit(ctx context.Context, txHash []byte, header *types.HeaderInfo, clientID string, isOnFork bool) {
+func (k Keeper) HandleHeaderWithValidCommit(ctx context.Context, txHash []byte, header *types.HeaderInfo, isOnFork bool) {
 	sdkCtx := sdk.UnwrapSDKContext(ctx)
 	babylonHeader := sdkCtx.HeaderInfo()
 	indexedHeader := types.IndexedHeader{
-		ConsumerId:          clientID,
+		ConsumerId:          header.ClientId,
 		Hash:                header.AppHash,
 		Height:              header.Height,
 		Time:                &header.Time,
diff --git a/x/zoneconcierge/keeper/ibc_header_decorator.go b/x/zoneconcierge/keeper/ibc_header_decorator.go
index ecac68f9a..930f33e6e 100644
--- a/x/zoneconcierge/keeper/ibc_header_decorator.go
+++ b/x/zoneconcierge/keeper/ibc_header_decorator.go
@@ -22,22 +22,21 @@ func NewIBCHeaderDecorator(k Keeper) *IBCHeaderDecorator {
 	}
 }
 
-func (d *IBCHeaderDecorator) parseMsgUpdateClient(ctx sdk.Context, m sdk.Msg) (*types.HeaderInfo, *ibctmtypes.ClientState, string) {
+func (d *IBCHeaderDecorator) getHeaderAndClientState(ctx sdk.Context, m sdk.Msg) (*types.HeaderInfo, *ibctmtypes.ClientState) {
 	// ensure the message is MsgUpdateClient
 	msgUpdateClient, ok := m.(*clienttypes.MsgUpdateClient)
 	if !ok {
-		return nil, nil, ""
+		return nil, nil
 	}
-	clientID := msgUpdateClient.ClientId
 	// unpack ClientMsg inside MsgUpdateClient
 	clientMsg, err := clienttypes.UnpackClientMessage(msgUpdateClient.ClientMessage)
 	if err != nil {
-		return nil, nil, ""
+		return nil, nil
 	}
 	// ensure the ClientMsg is a Comet header
 	ibctmHeader, ok := clientMsg.(*ibctmtypes.Header)
 	if !ok {
-		return nil, nil, ""
+		return nil, nil
 	}
 
 	// all good, we get the headerInfo
@@ -52,15 +51,15 @@ func (d *IBCHeaderDecorator) parseMsgUpdateClient(ctx sdk.Context, m sdk.Msg) (*
 	// ensure the corresponding clientState exists
 	clientState, exist := d.k.clientKeeper.GetClientState(ctx, msgUpdateClient.ClientId)
 	if !exist {
-		return nil, nil, ""
+		return nil, nil
 	}
 	// ensure the clientState is a Comet clientState
 	cmtClientState, ok := clientState.(*ibctmtypes.ClientState)
 	if !ok {
-		return nil, nil, ""
+		return nil, nil
 	}
 
-	return headerInfo, cmtClientState, clientID
+	return headerInfo, cmtClientState
 }
 
 func (d *IBCHeaderDecorator) PostHandle(ctx sdk.Context, tx sdk.Tx, simulate, success bool, next sdk.PostHandler) (sdk.Context, error) {
@@ -79,7 +78,7 @@ func (d *IBCHeaderDecorator) PostHandle(ctx sdk.Context, tx sdk.Tx, simulate, su
 
 	for _, msg := range tx.GetMsgs() {
 		// try to extract the headerInfo and the client's status
-		headerInfo, clientState, clientID := d.parseMsgUpdateClient(ctx, msg)
+		headerInfo, clientState := d.getHeaderAndClientState(ctx, msg)
 		if headerInfo == nil {
 			continue
 		}
@@ -92,7 +91,7 @@ func (d *IBCHeaderDecorator) PostHandle(ctx sdk.Context, tx sdk.Tx, simulate, su
 		// fail, eventually failing the entire tx. All state updates due to this
 		// failed tx will be rolled back.
 		isOnFork := !clientState.FrozenHeight.IsZero()
-		d.k.HandleHeaderWithValidCommit(ctx, txHash, headerInfo, clientID, isOnFork)
+		d.k.HandleHeaderWithValidCommit(ctx, txHash, headerInfo, isOnFork)
 
 		// unfreeze client (by setting FrozenHeight to zero again) if the client is frozen
 		// due to a fork header
diff --git a/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go b/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go
index 428ec16fe..218d25ed6 100644
--- a/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go
+++ b/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go
@@ -6,7 +6,6 @@ import (
 
 	sdk "github.com/cosmos/cosmos-sdk/types"
 	channeltypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types"
-	ibctmtypes "github.com/cosmos/ibc-go/v8/modules/light-clients/07-tendermint"
 
 	bbn "github.com/babylonlabs-io/babylon/types"
 	btcctypes "github.com/babylonlabs-io/babylon/x/btccheckpoint/types"
@@ -27,21 +26,16 @@ type finalizedInfo struct {
 	BTCHeaders          []*btclctypes.BTCHeaderInfo
 }
 
-// getChainID gets the ID of the counterparty chain under the given channel
-func (k Keeper) getChainID(ctx context.Context, channel channeltypes.IdentifiedChannel) (string, error) {
+// getClientID gets the ID of the IBC client under the given channel
+// We will use the client ID as the consumer ID to uniquely identify
+// the consumer chain
+func (k Keeper) getClientID(ctx context.Context, channel channeltypes.IdentifiedChannel) (string, error) {
 	sdkCtx := sdk.UnwrapSDKContext(ctx)
-	// get clientState under this channel
-	_, clientState, err := k.channelKeeper.GetChannelClientState(sdkCtx, channel.PortId, channel.ChannelId)
+	clientID, _, err := k.channelKeeper.GetChannelClientState(sdkCtx, channel.PortId, channel.ChannelId)
 	if err != nil {
 		return "", err
 	}
-	// cast clientState to comet clientState
-	// TODO: support for chains other than Cosmos zones
-	cmtClientState, ok := clientState.(*ibctmtypes.ClientState)
-	if !ok {
-		return "", fmt.Errorf("client must be a Comet client, expected: %T, got: %T", &ibctmtypes.ClientState{}, cmtClientState)
-	}
-	return cmtClientState.ChainId, nil
+	return clientID, nil
 }
 
 // getFinalizedInfo returns metadata and proofs that are identical to all BTC timestamps in the same epoch
@@ -235,7 +229,7 @@ func (k Keeper) BroadcastBTCTimestamps(
 	// for each channel, construct and send BTC timestamp
 	for _, channel := range openZCChannels {
 		// get the ID of the chain under this channel
-		consumerID, err := k.getChainID(ctx, channel)
+		consumerID, err := k.getClientID(ctx, channel)
 		if err != nil {
 			k.Logger(sdkCtx).Error("failed to get chain ID, skip sending BTC timestamp for this chain", "channelID", channel.ChannelId, "error", err)
 			continue
diff --git a/x/zoneconcierge/keeper/keeper_test.go b/x/zoneconcierge/keeper/keeper_test.go
index aaf972240..d6a68a57e 100644
--- a/x/zoneconcierge/keeper/keeper_test.go
+++ b/x/zoneconcierge/keeper/keeper_test.go
@@ -15,8 +15,8 @@ func SimulateNewHeaders(ctx context.Context, r *rand.Rand, k *zckeeper.Keeper, c
 	headers := []*ibctmtypes.Header{}
 	// invoke the hook a number of times to simulate a number of blocks
 	for i := uint64(0); i < numHeaders; i++ {
-		header := datagen.GenRandomIBCTMHeader(r, consumerID, startHeight+i)
-		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), consumerID, false)
+		header := datagen.GenRandomIBCTMHeader(r, startHeight+i)
+		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), false)
 		headers = append(headers, header)
 	}
 	return headers
@@ -27,16 +27,16 @@ func SimulateNewHeadersAndForks(ctx context.Context, r *rand.Rand, k *zckeeper.K
 	headers := []*ibctmtypes.Header{}
 	// invoke the hook a number of times to simulate a number of blocks
 	for i := uint64(0); i < numHeaders; i++ {
-		header := datagen.GenRandomIBCTMHeader(r, consumerID, startHeight+i)
-		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), consumerID, false)
+		header := datagen.GenRandomIBCTMHeader(r, startHeight+i)
+		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), false)
 		headers = append(headers, header)
 	}
 
 	// generate a number of fork headers
 	forkHeaders := []*ibctmtypes.Header{}
 	for i := uint64(0); i < numForkHeaders; i++ {
-		header := datagen.GenRandomIBCTMHeader(r, consumerID, startHeight+numHeaders-1)
-		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), consumerID, true)
+		header := datagen.GenRandomIBCTMHeader(r, startHeight+numHeaders-1)
+		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), true)
 		forkHeaders = append(forkHeaders, header)
 	}
 	return headers, forkHeaders
diff --git a/x/zoneconcierge/keeper/proof_btc_timestamp_test.go b/x/zoneconcierge/keeper/proof_btc_timestamp_test.go
index c1f0aa673..535079d8f 100644
--- a/x/zoneconcierge/keeper/proof_btc_timestamp_test.go
+++ b/x/zoneconcierge/keeper/proof_btc_timestamp_test.go
@@ -43,9 +43,9 @@ func FuzzProofCZHeaderInEpoch(f *testing.F) {
 		// handle a random header from a random consumer chain
 		consumerID := datagen.GenRandomHexStr(r, 10)
 		height := datagen.RandomInt(r, 100) + 1
-		ibctmHeader := datagen.GenRandomIBCTMHeader(r, consumerID, height)
+		ibctmHeader := datagen.GenRandomIBCTMHeader(r, height)
 		headerInfo := datagen.HeaderToHeaderInfo(ibctmHeader)
-		zck.HandleHeaderWithValidCommit(h.Ctx, datagen.GenRandomByteArray(r, 32), headerInfo, consumerID, false)
+		zck.HandleHeaderWithValidCommit(h.Ctx, datagen.GenRandomByteArray(r, 32), headerInfo, false)
 
 		// ensure the header is successfully inserted
 		indexedHeader, err := zck.GetHeader(h.Ctx, consumerID, height)
diff --git a/x/zoneconcierge/types/btc_timestamp_test.go b/x/zoneconcierge/types/btc_timestamp_test.go
index fc2a9e43b..a0d8d8d46 100644
--- a/x/zoneconcierge/types/btc_timestamp_test.go
+++ b/x/zoneconcierge/types/btc_timestamp_test.go
@@ -61,9 +61,9 @@ func FuzzBTCTimestamp(f *testing.F) {
 		// handle a random header from a random consumer chain
 		consumerID := datagen.GenRandomHexStr(r, 10)
 		height := datagen.RandomInt(r, 100) + 1
-		ibctmHeader := datagen.GenRandomIBCTMHeader(r, consumerID, height)
+		ibctmHeader := datagen.GenRandomIBCTMHeader(r, height)
 		headerInfo := datagen.HeaderToHeaderInfo(ibctmHeader)
-		zck.HandleHeaderWithValidCommit(h.Ctx, datagen.GenRandomByteArray(r, 32), headerInfo, consumerID, false)
+		zck.HandleHeaderWithValidCommit(h.Ctx, datagen.GenRandomByteArray(r, 32), headerInfo, false)
 
 		// ensure the header is successfully inserted
 		indexedHeader, err := zck.GetHeader(h.Ctx, consumerID, height)

From 1dfb09096a1d9ed08f27d5ddce514b8a22644b7a Mon Sep 17 00:00:00 2001
From: Runchao Han <me@runchao.rocks>
Date: Mon, 19 Aug 2024 15:46:49 +1000
Subject: [PATCH 3/3] fix lint

---
 testutil/datagen/tendermint.go                     | 11 ++++++-----
 x/zoneconcierge/keeper/grpc_query_test.go          |  2 +-
 x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go |  2 +-
 x/zoneconcierge/keeper/keeper_test.go              |  6 +++---
 x/zoneconcierge/keeper/proof_btc_timestamp_test.go |  2 +-
 x/zoneconcierge/types/btc_timestamp_test.go        |  2 +-
 6 files changed, 13 insertions(+), 12 deletions(-)

diff --git a/testutil/datagen/tendermint.go b/testutil/datagen/tendermint.go
index 82985156d..8a40fc275 100644
--- a/testutil/datagen/tendermint.go
+++ b/testutil/datagen/tendermint.go
@@ -41,12 +41,13 @@ func GenRandomTMHeaderInfo(r *rand.Rand, chainID string, height uint64) *header.
 	}
 }
 
-func HeaderToHeaderInfo(header *ibctmtypes.Header) *zctypes.HeaderInfo {
+func NewZCHeaderInfo(header *ibctmtypes.Header, clientID string) *zctypes.HeaderInfo {
 	return &zctypes.HeaderInfo{
-		AppHash: header.Header.AppHash,
-		ChainId: header.Header.ChainID,
-		Time:    header.Header.Time,
-		Height:  uint64(header.Header.Height),
+		ClientId: clientID,
+		AppHash:  header.Header.AppHash,
+		ChainId:  header.Header.ChainID,
+		Time:     header.Header.Time,
+		Height:   uint64(header.Header.Height),
 	}
 }
 
diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go
index 1b5d91e8e..b160119a0 100644
--- a/x/zoneconcierge/keeper/grpc_query_test.go
+++ b/x/zoneconcierge/keeper/grpc_query_test.go
@@ -48,7 +48,7 @@ func FuzzChainList(f *testing.F) {
 				allConsumerIDs = append(allConsumerIDs, consumerID)
 			}
 			header := datagen.GenRandomIBCTMHeader(r, 0)
-			zcKeeper.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), false)
+			zcKeeper.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.NewZCHeaderInfo(header, consumerID), false)
 		}
 
 		limit := datagen.RandomInt(r, len(allConsumerIDs)) + 1
diff --git a/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go b/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go
index 218d25ed6..447051973 100644
--- a/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go
+++ b/x/zoneconcierge/keeper/ibc_packet_btc_timestamp.go
@@ -231,7 +231,7 @@ func (k Keeper) BroadcastBTCTimestamps(
 		// get the ID of the chain under this channel
 		consumerID, err := k.getClientID(ctx, channel)
 		if err != nil {
-			k.Logger(sdkCtx).Error("failed to get chain ID, skip sending BTC timestamp for this chain", "channelID", channel.ChannelId, "error", err)
+			k.Logger(sdkCtx).Error("failed to get client ID, skip sending BTC timestamp for this consumer", "channelID", channel.ChannelId, "error", err)
 			continue
 		}
 
diff --git a/x/zoneconcierge/keeper/keeper_test.go b/x/zoneconcierge/keeper/keeper_test.go
index d6a68a57e..a552219b8 100644
--- a/x/zoneconcierge/keeper/keeper_test.go
+++ b/x/zoneconcierge/keeper/keeper_test.go
@@ -16,7 +16,7 @@ func SimulateNewHeaders(ctx context.Context, r *rand.Rand, k *zckeeper.Keeper, c
 	// invoke the hook a number of times to simulate a number of blocks
 	for i := uint64(0); i < numHeaders; i++ {
 		header := datagen.GenRandomIBCTMHeader(r, startHeight+i)
-		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), false)
+		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.NewZCHeaderInfo(header, consumerID), false)
 		headers = append(headers, header)
 	}
 	return headers
@@ -28,7 +28,7 @@ func SimulateNewHeadersAndForks(ctx context.Context, r *rand.Rand, k *zckeeper.K
 	// invoke the hook a number of times to simulate a number of blocks
 	for i := uint64(0); i < numHeaders; i++ {
 		header := datagen.GenRandomIBCTMHeader(r, startHeight+i)
-		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), false)
+		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.NewZCHeaderInfo(header, consumerID), false)
 		headers = append(headers, header)
 	}
 
@@ -36,7 +36,7 @@ func SimulateNewHeadersAndForks(ctx context.Context, r *rand.Rand, k *zckeeper.K
 	forkHeaders := []*ibctmtypes.Header{}
 	for i := uint64(0); i < numForkHeaders; i++ {
 		header := datagen.GenRandomIBCTMHeader(r, startHeight+numHeaders-1)
-		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.HeaderToHeaderInfo(header), true)
+		k.HandleHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(r, 32), datagen.NewZCHeaderInfo(header, consumerID), true)
 		forkHeaders = append(forkHeaders, header)
 	}
 	return headers, forkHeaders
diff --git a/x/zoneconcierge/keeper/proof_btc_timestamp_test.go b/x/zoneconcierge/keeper/proof_btc_timestamp_test.go
index 535079d8f..97ef52fba 100644
--- a/x/zoneconcierge/keeper/proof_btc_timestamp_test.go
+++ b/x/zoneconcierge/keeper/proof_btc_timestamp_test.go
@@ -44,7 +44,7 @@ func FuzzProofCZHeaderInEpoch(f *testing.F) {
 		consumerID := datagen.GenRandomHexStr(r, 10)
 		height := datagen.RandomInt(r, 100) + 1
 		ibctmHeader := datagen.GenRandomIBCTMHeader(r, height)
-		headerInfo := datagen.HeaderToHeaderInfo(ibctmHeader)
+		headerInfo := datagen.NewZCHeaderInfo(ibctmHeader, consumerID)
 		zck.HandleHeaderWithValidCommit(h.Ctx, datagen.GenRandomByteArray(r, 32), headerInfo, false)
 
 		// ensure the header is successfully inserted
diff --git a/x/zoneconcierge/types/btc_timestamp_test.go b/x/zoneconcierge/types/btc_timestamp_test.go
index a0d8d8d46..4d62fe195 100644
--- a/x/zoneconcierge/types/btc_timestamp_test.go
+++ b/x/zoneconcierge/types/btc_timestamp_test.go
@@ -62,7 +62,7 @@ func FuzzBTCTimestamp(f *testing.F) {
 		consumerID := datagen.GenRandomHexStr(r, 10)
 		height := datagen.RandomInt(r, 100) + 1
 		ibctmHeader := datagen.GenRandomIBCTMHeader(r, height)
-		headerInfo := datagen.HeaderToHeaderInfo(ibctmHeader)
+		headerInfo := datagen.NewZCHeaderInfo(ibctmHeader, consumerID)
 		zck.HandleHeaderWithValidCommit(h.Ctx, datagen.GenRandomByteArray(r, 32), headerInfo, false)
 
 		// ensure the header is successfully inserted