From e2f7247b8646232836dc73321271f9aa8555b7c1 Mon Sep 17 00:00:00 2001 From: BESCOND Anthony Date: Fri, 23 Aug 2024 16:16:24 +0200 Subject: [PATCH 1/2] feat: tolerate failed checkins / ready Making checkin process optional (default to activated) --- cmd/kg/main.go | 6 ++-- cmd/kgctl/main.go | 4 ++- docs/kg.md | 1 + pkg/k8s/backend.go | 17 ++++++----- pkg/k8s/backend_test.go | 63 ++++++++++++++++++++++++++++------------- pkg/mesh/backend.go | 10 ++++++- pkg/mesh/mesh.go | 7 ++++- pkg/mesh/mesh_test.go | 63 +++++++++++++++++++++++------------------ 8 files changed, 112 insertions(+), 59 deletions(-) diff --git a/cmd/kg/main.go b/cmd/kg/main.go index c2ad6d5b..ba2cf4bc 100644 --- a/cmd/kg/main.go +++ b/cmd/kg/main.go @@ -102,6 +102,7 @@ var cmd = &cobra.Command{ var ( backend string + checkIn bool cleanUp bool cleanUpIface bool createIface bool @@ -134,6 +135,7 @@ var ( func init() { cmd.Flags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends)) + cmd.Flags().BoolVar(&checkIn, "check-in", true, "Should kilo regularly check-in in backend") cmd.Flags().BoolVar(&cleanUp, "clean-up", true, "Should kilo clean up network modifications on shutdown?") cmd.Flags().BoolVar(&cleanUpIface, "clean-up-interface", false, "Should Kilo delete its interface when it shuts down?") cmd.Flags().BoolVar(&createIface, "create-interface", true, "Should kilo create an interface on startup?") @@ -248,7 +250,7 @@ func runRoot(_ *cobra.Command, _ []string) error { c := kubernetes.NewForConfigOrDie(config) kc := kiloclient.NewForConfigOrDie(config) ec := apiextensions.NewForConfigOrDie(config) - b = k8s.New(c, kc, ec, topologyLabel, log.With(logger, "component", "k8s backend")) + b = k8s.New(c, kc, ec, topologyLabel, checkIn, log.With(logger, "component", "k8s backend")) default: return fmt.Errorf("backend %v unknown; possible values are: %s", backend, availableBackends) } @@ -266,7 +268,7 @@ func runRoot(_ *cobra.Command, _ []string) error { serviceCIDRs = append(serviceCIDRs, s) } - m, err := mesh.New(b, enc, gr, hostname, port, s, local, cni, cniPath, iface, cleanUp, cleanUpIface, createIface, mtu, resyncPeriod, prioritisePrivateAddr, iptablesForwardRule, serviceCIDRs, log.With(logger, "component", "kilo"), registry) + m, err := mesh.New(b, enc, gr, hostname, port, s, local, cni, cniPath, iface, checkIn, cleanUp, cleanUpIface, createIface, mtu, resyncPeriod, prioritisePrivateAddr, iptablesForwardRule, serviceCIDRs, log.With(logger, "component", "kilo"), registry) if err != nil { return fmt.Errorf("failed to create Kilo mesh: %v", err) } diff --git a/cmd/kgctl/main.go b/cmd/kgctl/main.go index 9a6c58ac..8cbc2c3f 100644 --- a/cmd/kgctl/main.go +++ b/cmd/kgctl/main.go @@ -66,6 +66,7 @@ var ( port int } backend string + checkIn bool granularity string kubeconfig string topologyLabel string @@ -94,7 +95,7 @@ func runRoot(c *cobra.Command, _ []string) error { c := kubernetes.NewForConfigOrDie(config) opts.kc = kiloclient.NewForConfigOrDie(config) ec := apiextensions.NewForConfigOrDie(config) - opts.backend = k8s.New(c, opts.kc, ec, topologyLabel, log.NewNopLogger()) + opts.backend = k8s.New(c, opts.kc, ec, topologyLabel, checkIn, log.NewNopLogger()) default: return fmt.Errorf("backend %s unknown; posible values are: %s", backend, availableBackends) } @@ -119,6 +120,7 @@ func main() { SilenceErrors: true, } cmd.PersistentFlags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends)) + cmd.PersistentFlags().BoolVar(&checkIn, "check-in", true, "Should kilo consider check-in (LastSeen) in backend") cmd.PersistentFlags().StringVar(&granularity, "mesh-granularity", string(mesh.AutoGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities)) defaultKubeconfig := os.Getenv("KUBECONFIG") if _, err := os.Stat(defaultKubeconfig); os.IsNotExist(err) { diff --git a/docs/kg.md b/docs/kg.md index 9a9fe8fb..af5956d9 100644 --- a/docs/kg.md +++ b/docs/kg.md @@ -33,6 +33,7 @@ Available Commands: Flags: --backend string The backend for the mesh. Possible values: kubernetes (default "kubernetes") + --check-in Should kilo regularly check-in in backend (default true) --clean-up Should kilo clean up network modifications on shutdown? (default true) --clean-up-interface Should Kilo delete its interface when it shuts down? --cni Should Kilo manage the node's CNI configuration? (default true) diff --git a/pkg/k8s/backend.go b/pkg/k8s/backend.go index e94a6646..f2be9f86 100644 --- a/pkg/k8s/backend.go +++ b/pkg/k8s/backend.go @@ -92,6 +92,7 @@ type nodeBackend struct { informer cache.SharedIndexInformer lister v1listers.NodeLister topologyLabel string + checkIn bool } type peerBackend struct { @@ -103,7 +104,7 @@ type peerBackend struct { } // New creates a new instance of a mesh.Backend. -func New(c kubernetes.Interface, kc kiloclient.Interface, ec apiextensions.Interface, topologyLabel string, l log.Logger) mesh.Backend { +func New(c kubernetes.Interface, kc kiloclient.Interface, ec apiextensions.Interface, topologyLabel string, checkIn bool, l log.Logger) mesh.Backend { ni := v1informers.NewNodeInformer(c, 5*time.Minute, nil) pi := v1alpha1informers.NewPeerInformer(kc, 5*time.Minute, nil) @@ -116,6 +117,7 @@ func New(c kubernetes.Interface, kc kiloclient.Interface, ec apiextensions.Inter informer: ni, lister: v1listers.NewNodeLister(ni.GetIndexer()), topologyLabel: topologyLabel, + checkIn: checkIn, }, &peerBackend{ client: kc, @@ -150,7 +152,7 @@ func (nb *nodeBackend) Get(name string) (*mesh.Node, error) { if err != nil { return nil, err } - return translateNode(n, nb.topologyLabel), nil + return translateNode(n, nb.topologyLabel, nb.checkIn), nil } // Init initializes the backend; for this backend that means @@ -170,7 +172,7 @@ func (nb *nodeBackend) Init(ctx context.Context) error { // Failed to decode Node; ignoring... return } - nb.events <- &mesh.NodeEvent{Type: mesh.AddEvent, Node: translateNode(n, nb.topologyLabel)} + nb.events <- &mesh.NodeEvent{Type: mesh.AddEvent, Node: translateNode(n, nb.topologyLabel, nb.checkIn)} }, UpdateFunc: func(old, obj interface{}) { n, ok := obj.(*v1.Node) @@ -183,7 +185,7 @@ func (nb *nodeBackend) Init(ctx context.Context) error { // Failed to decode Node; ignoring... return } - nb.events <- &mesh.NodeEvent{Type: mesh.UpdateEvent, Node: translateNode(n, nb.topologyLabel), Old: translateNode(o, nb.topologyLabel)} + nb.events <- &mesh.NodeEvent{Type: mesh.UpdateEvent, Node: translateNode(n, nb.topologyLabel, nb.checkIn), Old: translateNode(o, nb.topologyLabel, nb.checkIn)} }, DeleteFunc: func(obj interface{}) { n, ok := obj.(*v1.Node) @@ -191,7 +193,7 @@ func (nb *nodeBackend) Init(ctx context.Context) error { // Failed to decode Node; ignoring... return } - nb.events <- &mesh.NodeEvent{Type: mesh.DeleteEvent, Node: translateNode(n, nb.topologyLabel)} + nb.events <- &mesh.NodeEvent{Type: mesh.DeleteEvent, Node: translateNode(n, nb.topologyLabel, nb.checkIn)} }, }, ) @@ -206,7 +208,7 @@ func (nb *nodeBackend) List() ([]*mesh.Node, error) { } nodes := make([]*mesh.Node, len(ns)) for i := range ns { - nodes[i] = translateNode(ns[i], nb.topologyLabel) + nodes[i] = translateNode(ns[i], nb.topologyLabel, nb.checkIn) } return nodes, nil } @@ -265,7 +267,7 @@ func (nb *nodeBackend) Watch() <-chan *mesh.NodeEvent { } // translateNode translates a Kubernetes Node to a mesh.Node. -func translateNode(node *v1.Node, topologyLabel string) *mesh.Node { +func translateNode(node *v1.Node, topologyLabel string, checkIn bool) *mesh.Node { if node == nil { return nil } @@ -354,6 +356,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node { InternalIP: internalIP, Key: key, LastSeen: lastSeen, + CheckLastSeen: checkIn, Leader: leader, Location: location, Name: node.Name, diff --git a/pkg/k8s/backend_test.go b/pkg/k8s/backend_test.go index 8ea51aaa..37336d7a 100644 --- a/pkg/k8s/backend_test.go +++ b/pkg/k8s/backend_test.go @@ -63,7 +63,9 @@ func TestTranslateNode(t *testing.T) { { name: "empty", annotations: nil, - out: &mesh.Node{}, + out: &mesh.Node{ + CheckLastSeen: true, + }, }, { name: "invalid ips", @@ -71,7 +73,9 @@ func TestTranslateNode(t *testing.T) { endpointAnnotationKey: "10.0.0.1", internalIPAnnotationKey: "foo", }, - out: &mesh.Node{}, + out: &mesh.Node{ + CheckLastSeen: true, + }, }, { name: "valid ips", @@ -80,8 +84,9 @@ func TestTranslateNode(t *testing.T) { internalIPAnnotationKey: "10.0.0.2/32", }, out: &mesh.Node{ - Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1").To4(), mesh.DefaultKiloPort), - InternalIP: &net.IPNet{IP: net.ParseIP("10.0.0.2").To4(), Mask: net.CIDRMask(32, 32)}, + Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1").To4(), mesh.DefaultKiloPort), + InternalIP: &net.IPNet{IP: net.ParseIP("10.0.0.2").To4(), Mask: net.CIDRMask(32, 32)}, + CheckLastSeen: true, }, }, { @@ -91,21 +96,25 @@ func TestTranslateNode(t *testing.T) { internalIPAnnotationKey: "ff60::10/64", }, out: &mesh.Node{ - Endpoint: wireguard.NewEndpoint(net.ParseIP("ff10::10").To16(), mesh.DefaultKiloPort), - InternalIP: &net.IPNet{IP: net.ParseIP("ff60::10").To16(), Mask: net.CIDRMask(64, 128)}, + Endpoint: wireguard.NewEndpoint(net.ParseIP("ff10::10").To16(), mesh.DefaultKiloPort), + InternalIP: &net.IPNet{IP: net.ParseIP("ff60::10").To16(), Mask: net.CIDRMask(64, 128)}, + CheckLastSeen: true, }, }, { name: "invalid subnet", annotations: map[string]string{}, - out: &mesh.Node{}, - subnet: "foo", + out: &mesh.Node{ + CheckLastSeen: true, + }, + subnet: "foo", }, { name: "normalize subnet", annotations: map[string]string{}, out: &mesh.Node{ - Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0").To4(), Mask: net.CIDRMask(24, 32)}, + Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0").To4(), Mask: net.CIDRMask(24, 32)}, + CheckLastSeen: true, }, subnet: "10.2.0.1/24", }, @@ -113,7 +122,8 @@ func TestTranslateNode(t *testing.T) { name: "valid subnet", annotations: map[string]string{}, out: &mesh.Node{ - Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0").To4(), Mask: net.CIDRMask(24, 32)}, + Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0").To4(), Mask: net.CIDRMask(24, 32)}, + CheckLastSeen: true, }, subnet: "10.2.1.0/24", }, @@ -123,7 +133,8 @@ func TestTranslateNode(t *testing.T) { RegionLabelKey: "a", }, out: &mesh.Node{ - Location: "a", + Location: "a", + CheckLastSeen: true, }, }, { @@ -135,7 +146,8 @@ func TestTranslateNode(t *testing.T) { RegionLabelKey: "a", }, out: &mesh.Node{ - Location: "b", + Location: "b", + CheckLastSeen: true, }, }, { @@ -145,7 +157,8 @@ func TestTranslateNode(t *testing.T) { forceEndpointAnnotationKey: "-10.0.0.2:51821", }, out: &mesh.Node{ - Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1").To4(), mesh.DefaultKiloPort), + Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1").To4(), mesh.DefaultKiloPort), + CheckLastSeen: true, }, }, { @@ -155,7 +168,8 @@ func TestTranslateNode(t *testing.T) { forceEndpointAnnotationKey: "10.0.0.2:51821", }, out: &mesh.Node{ - Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.2").To4(), 51821), + Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.2").To4(), 51821), + CheckLastSeen: true, }, }, { @@ -165,6 +179,7 @@ func TestTranslateNode(t *testing.T) { }, out: &mesh.Node{ PersistentKeepalive: 25 * time.Second, + CheckLastSeen: true, }, }, { @@ -174,8 +189,9 @@ func TestTranslateNode(t *testing.T) { forceInternalIPAnnotationKey: "-10.1.0.2/24", }, out: &mesh.Node{ - InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.1").To4(), Mask: net.CIDRMask(24, 32)}, - NoInternalIP: false, + InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.1").To4(), Mask: net.CIDRMask(24, 32)}, + NoInternalIP: false, + CheckLastSeen: true, }, }, { @@ -185,8 +201,9 @@ func TestTranslateNode(t *testing.T) { forceInternalIPAnnotationKey: "10.1.0.2/24", }, out: &mesh.Node{ - InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2").To4(), Mask: net.CIDRMask(24, 32)}, - NoInternalIP: false, + InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2").To4(), Mask: net.CIDRMask(24, 32)}, + NoInternalIP: false, + CheckLastSeen: true, }, }, { @@ -194,7 +211,9 @@ func TestTranslateNode(t *testing.T) { annotations: map[string]string{ lastSeenAnnotationKey: "foo", }, - out: &mesh.Node{}, + out: &mesh.Node{ + CheckLastSeen: true, + }, }, { name: "complete", @@ -219,6 +238,7 @@ func TestTranslateNode(t *testing.T) { InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2").To4(), Mask: net.CIDRMask(32, 32)}, Key: fooKey, LastSeen: 1000000000, + CheckLastSeen: true, Leader: true, Location: "b", PersistentKeepalive: 25 * time.Second, @@ -250,6 +270,7 @@ func TestTranslateNode(t *testing.T) { InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2"), Mask: net.CIDRMask(32, 32)}, Key: fooKey, LastSeen: 1000000000, + CheckLastSeen: true, Leader: true, Location: "b", PersistentKeepalive: 25 * time.Second, @@ -277,6 +298,7 @@ func TestTranslateNode(t *testing.T) { InternalIP: nil, Key: fooKey, LastSeen: 1000000000, + CheckLastSeen: true, Leader: false, Location: "b", PersistentKeepalive: 25 * time.Second, @@ -306,6 +328,7 @@ func TestTranslateNode(t *testing.T) { InternalIP: nil, Key: fooKey, LastSeen: 1000000000, + CheckLastSeen: true, Leader: false, Location: "b", PersistentKeepalive: 25 * time.Second, @@ -319,7 +342,7 @@ func TestTranslateNode(t *testing.T) { n.ObjectMeta.Annotations = tc.annotations n.ObjectMeta.Labels = tc.labels n.Spec.PodCIDR = tc.subnet - node := translateNode(n, RegionLabelKey) + node := translateNode(n, RegionLabelKey, true) if diff := pretty.Compare(node, tc.out); diff != "" { t.Errorf("test case %q: got diff: %v", tc.name, diff) } diff --git a/pkg/mesh/backend.go b/pkg/mesh/backend.go index 203661d1..a2ae744e 100644 --- a/pkg/mesh/backend.go +++ b/pkg/mesh/backend.go @@ -64,6 +64,8 @@ type Node struct { // LastSeen is a Unix time for the last time // the node confirmed it was live. LastSeen int64 + // Whether Ready will check LastSeen value + CheckLastSeen bool // Leader is a suggestion to Kilo that // the node wants to lead its segment. Leader bool @@ -81,11 +83,17 @@ type Node struct { // Ready indicates whether or not the node is ready. func (n *Node) Ready() bool { // Nodes that are not leaders will not have WireGuardIPs, so it is not required. + var checkedIn bool + if (n != nil) && (n.Key != wgtypes.Key{}) && (n.Subnet != nil) && (n.CheckLastSeen) { + checkedIn = time.Now().Unix()-n.LastSeen < int64(checkInPeriod)*2/int64(time.Second) + } else { + checkedIn = true + } return n != nil && n.Endpoint.Ready() && n.Key != wgtypes.Key{} && n.Subnet != nil && - time.Now().Unix()-n.LastSeen < int64(checkInPeriod)*2/int64(time.Second) + checkedIn } // Peer represents a peer in the network. diff --git a/pkg/mesh/mesh.go b/pkg/mesh/mesh.go index 3057d2a2..463bb4d7 100644 --- a/pkg/mesh/mesh.go +++ b/pkg/mesh/mesh.go @@ -50,6 +50,7 @@ const ( // Mesh is able to create Kilo network meshes. type Mesh struct { Backend + checkin bool cleanup bool cleanUpIface bool cni bool @@ -89,7 +90,7 @@ type Mesh struct { } // New returns a new Mesh instance. -func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port int, subnet *net.IPNet, local, cni bool, cniPath, iface string, cleanup bool, cleanUpIface bool, createIface bool, mtu uint, resyncPeriod time.Duration, prioritisePrivateAddr, iptablesForwardRule bool, serviceCIDRs []*net.IPNet, logger log.Logger, registerer prometheus.Registerer) (*Mesh, error) { +func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port int, subnet *net.IPNet, local, cni bool, cniPath, iface string, checkin bool, cleanup bool, cleanUpIface bool, createIface bool, mtu uint, resyncPeriod time.Duration, prioritisePrivateAddr, iptablesForwardRule bool, serviceCIDRs []*net.IPNet, logger log.Logger, registerer prometheus.Registerer) (*Mesh, error) { if err := os.MkdirAll(kiloPath, 0700); err != nil { return nil, fmt.Errorf("failed to create directory to store configuration: %v", err) } @@ -168,6 +169,7 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit } mesh := Mesh{ Backend: backend, + checkin: checkin, cleanup: cleanup, cleanUpIface: cleanUpIface, cni: cni, @@ -269,6 +271,9 @@ func (m *Mesh) Run(ctx context.Context) error { } resync := time.NewTimer(m.resyncPeriod) checkIn := time.NewTimer(checkInPeriod) + if !m.checkin { + checkIn.Stop() + } nw := m.Nodes().Watch() pw := m.Peers().Watch() var ne *NodeEvent diff --git a/pkg/mesh/mesh_test.go b/pkg/mesh/mesh_test.go index f02c3af2..26271d92 100644 --- a/pkg/mesh/mesh_test.go +++ b/pkg/mesh/mesh_test.go @@ -48,74 +48,83 @@ func TestReady(t *testing.T) { ready: false, }, { - name: "empty fields", - node: &Node{}, + name: "empty fields", + node: &Node{ + CheckLastSeen: true, + }, ready: false, }, { name: "empty endpoint", node: &Node{ - InternalIP: internalIP, - Key: key, - Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + InternalIP: internalIP, + Key: key, + Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + CheckLastSeen: true, }, ready: false, }, { name: "empty endpoint IP", node: &Node{ - Endpoint: wireguard.NewEndpoint(nil, DefaultKiloPort), - InternalIP: internalIP, - Key: wgtypes.Key{}, - Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + Endpoint: wireguard.NewEndpoint(nil, DefaultKiloPort), + InternalIP: internalIP, + Key: wgtypes.Key{}, + Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + CheckLastSeen: true, }, ready: false, }, { name: "empty endpoint port", node: &Node{ - Endpoint: wireguard.NewEndpoint(externalIP.IP, 0), - InternalIP: internalIP, - Key: wgtypes.Key{}, - Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + Endpoint: wireguard.NewEndpoint(externalIP.IP, 0), + InternalIP: internalIP, + Key: wgtypes.Key{}, + Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + CheckLastSeen: true, }, ready: false, }, { name: "empty internal IP", node: &Node{ - Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort), - Key: wgtypes.Key{}, - Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort), + Key: wgtypes.Key{}, + Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + CheckLastSeen: true, }, ready: false, }, { name: "empty key", node: &Node{ - Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort), - InternalIP: internalIP, - Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort), + InternalIP: internalIP, + Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + CheckLastSeen: true, }, ready: false, }, { name: "empty subnet", node: &Node{ - Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort), - InternalIP: internalIP, - Key: wgtypes.Key{}, + Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort), + InternalIP: internalIP, + Key: wgtypes.Key{}, + CheckLastSeen: true, }, ready: false, }, { name: "valid", node: &Node{ - Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort), - InternalIP: internalIP, - Key: key, - LastSeen: time.Now().Unix(), - Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort), + InternalIP: internalIP, + Key: key, + LastSeen: time.Now().Unix(), + CheckLastSeen: true, + Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, }, ready: true, }, From 86d7b6e56e31445fdef040732bb036a77822453d Mon Sep 17 00:00:00 2001 From: BESCOND Anthony Date: Tue, 8 Oct 2024 06:39:08 +0200 Subject: [PATCH 2/2] fix: flag description --- cmd/kg/main.go | 2 +- cmd/kgctl/main.go | 2 +- docs/kg.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/kg/main.go b/cmd/kg/main.go index ba2cf4bc..9badd389 100644 --- a/cmd/kg/main.go +++ b/cmd/kg/main.go @@ -135,7 +135,7 @@ var ( func init() { cmd.Flags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends)) - cmd.Flags().BoolVar(&checkIn, "check-in", true, "Should kilo regularly check-in in backend") + cmd.Flags().BoolVar(&checkIn, "check-in", true, "Should Kilo regularly check in with the backend") cmd.Flags().BoolVar(&cleanUp, "clean-up", true, "Should kilo clean up network modifications on shutdown?") cmd.Flags().BoolVar(&cleanUpIface, "clean-up-interface", false, "Should Kilo delete its interface when it shuts down?") cmd.Flags().BoolVar(&createIface, "create-interface", true, "Should kilo create an interface on startup?") diff --git a/cmd/kgctl/main.go b/cmd/kgctl/main.go index 8cbc2c3f..23b2f445 100644 --- a/cmd/kgctl/main.go +++ b/cmd/kgctl/main.go @@ -120,7 +120,7 @@ func main() { SilenceErrors: true, } cmd.PersistentFlags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends)) - cmd.PersistentFlags().BoolVar(&checkIn, "check-in", true, "Should kilo consider check-in (LastSeen) in backend") + cmd.PersistentFlags().BoolVar(&checkIn, "check-in", true, "Should Kilo prune nodes that have not checked in with the backend") cmd.PersistentFlags().StringVar(&granularity, "mesh-granularity", string(mesh.AutoGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities)) defaultKubeconfig := os.Getenv("KUBECONFIG") if _, err := os.Stat(defaultKubeconfig); os.IsNotExist(err) { diff --git a/docs/kg.md b/docs/kg.md index af5956d9..cc5655c5 100644 --- a/docs/kg.md +++ b/docs/kg.md @@ -33,7 +33,7 @@ Available Commands: Flags: --backend string The backend for the mesh. Possible values: kubernetes (default "kubernetes") - --check-in Should kilo regularly check-in in backend (default true) + --check-in Should Kilo regularly check in with the backend (default true) --clean-up Should kilo clean up network modifications on shutdown? (default true) --clean-up-interface Should Kilo delete its interface when it shuts down? --cni Should Kilo manage the node's CNI configuration? (default true)