From f18eec0f382361049993129c15937fa5430db87b Mon Sep 17 00:00:00 2001 From: chengshiwen Date: Thu, 5 Sep 2024 00:17:21 +0800 Subject: [PATCH] optimize connection pool and test * fixes data-race between Get() and Close() of channelPool * using sync.RWMutex instead of sync.Mutex in boundedPool * refactor idle timeout in pool * add connection pool test * replace fatih/pool.v2 with custom pool --- DEPENDENCIES.md | 1 - coordinator/client_pool.go | 12 +- coordinator/config.go | 3 - coordinator/pool.go | 211 +++++++++++----- coordinator/pool_test.go | 475 +++++++++++++++++++++++++++++++++++++ go.mod | 1 - go.sum | 2 - 7 files changed, 630 insertions(+), 75 deletions(-) create mode 100644 coordinator/pool_test.go diff --git a/DEPENDENCIES.md b/DEPENDENCIES.md index 3412a08..24628ec 100644 --- a/DEPENDENCIES.md +++ b/DEPENDENCIES.md @@ -45,7 +45,6 @@ - golang.org/x/sys [BSD LICENSE](https://github.com/golang/sys/blob/master/LICENSE) - golang.org/x/text [BSD LICENSE](https://github.com/golang/text/blob/master/LICENSE) - golang.org/x/time [BSD LICENSE](https://github.com/golang/time/blob/master/LICENSE) -- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) - jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) - github.com/xlab/treeprint [MIT LICENSE](https://github.com/xlab/treeprint/blob/master/LICENSE) diff --git a/coordinator/client_pool.go b/coordinator/client_pool.go index 01a332b..7904803 100644 --- a/coordinator/client_pool.go +++ b/coordinator/client_pool.go @@ -3,28 +3,26 @@ package coordinator import ( "net" "sync" - - "gopkg.in/fatih/pool.v2" ) type clientPool struct { mu sync.RWMutex - pool map[uint64]pool.Pool + pool map[uint64]Pool } func newClientPool() *clientPool { return &clientPool{ - pool: make(map[uint64]pool.Pool), + pool: make(map[uint64]Pool), } } -func (c *clientPool) setPool(nodeID uint64, p pool.Pool) { +func (c *clientPool) setPool(nodeID uint64, p Pool) { c.mu.Lock() c.pool[nodeID] = p c.mu.Unlock() } -func (c *clientPool) getPool(nodeID uint64) (pool.Pool, bool) { +func (c *clientPool) getPool(nodeID uint64) (Pool, bool) { c.mu.RLock() p, ok := c.pool[nodeID] c.mu.RUnlock() @@ -35,7 +33,7 @@ func (c *clientPool) size() int { c.mu.RLock() var size int for _, p := range c.pool { - size += p.Len() + size += p.Size() } c.mu.RUnlock() return size diff --git a/coordinator/config.go b/coordinator/config.go index ef10152..0e39295 100644 --- a/coordinator/config.go +++ b/coordinator/config.go @@ -26,9 +26,6 @@ const ( // remains idle in the connection pool. DefaultPoolMaxIdleTime = time.Minute - // DefaultPoolWaitTimeout is the default timeout waiting for free connection. - DefaultPoolWaitTimeout = 5 * time.Second - // DefaultWriteTimeout is the default timeout for a complete write to succeed. DefaultWriteTimeout = 10 * time.Second diff --git a/coordinator/pool.go b/coordinator/pool.go index 65efa53..6d54dc6 100644 --- a/coordinator/pool.go +++ b/coordinator/pool.go @@ -5,12 +5,36 @@ import ( "fmt" "net" "sync" - "sync/atomic" "time" +) + +var ( + // ErrClosed is the error resulting if the pool is closed via pool.Close(). + ErrClosed = errors.New("pool is closed") - "gopkg.in/fatih/pool.v2" + // PoolWaitTimeout is the timeout waiting for free connection. + PoolWaitTimeout = 5 * time.Second ) +// Pool interface describes a pool implementation. A pool should have maximum +// capacity. An ideal pool is thread-safe and easy to use. +type Pool interface { + // Get returns a new connection from the pool. Closing the connections puts + // it back to the Pool. Closing it when the pool is destroyed or full will + // be counted as an error. + Get() (net.Conn, error) + + // Close closes the pool and all its connections. After Close() the pool is + // no longer usable. + Close() + + // Len returns the current number of idle connections of the pool. + Len() int + + // Size returns the total number of alive connections of the pool. + Size() int +} + // idleConn implements idle connection. type idleConn struct { c net.Conn @@ -20,13 +44,11 @@ type idleConn struct { // boundedPool implements the Pool interface based on buffered channels. type boundedPool struct { // storage for our net.Conn connections - mu sync.Mutex + mu sync.RWMutex conns chan *idleConn - idleTimeout time.Duration - waitTimeout time.Duration - - total int32 + total chan struct{} + done chan struct{} // net.Conn generator factory Factory } @@ -35,23 +57,23 @@ type boundedPool struct { type Factory func() (net.Conn, error) // NewBoundedPool returns a new pool based on buffered channels with an initial -// capacity, maximum capacity, idle timeout and timeout to wait for a connection -// from the pool. Factory is used when initial capacity is +// capacity, maximum capacity and maximum idle time that a connection remains +// idle in the connection pool. Factory is used when initial capacity is // greater than zero to fill the pool. A zero initialCap doesn't fill the Pool // until a new Get() is called. During a Get(), If there is no new connection -// available in the pool and total connections is less than the max, a new connection +// available in the pool and total connections is less than maxCap, a new connection // will be created via the Factory() method. Otherwise, the call will block until // a connection is available or the timeout is reached. -func NewBoundedPool(initialCap, maxCap int, idleTimeout, waitTimeout time.Duration, factory Factory) (pool.Pool, error) { +func NewBoundedPool(initialCap, maxCap int, idleTime time.Duration, factory Factory) (Pool, error) { if initialCap < 0 || maxCap <= 0 || initialCap > maxCap { return nil, errors.New("invalid capacity settings") } c := &boundedPool{ - conns: make(chan *idleConn, maxCap), - factory: factory, - idleTimeout: idleTimeout, - waitTimeout: waitTimeout, + conns: make(chan *idleConn, maxCap), + total: make(chan struct{}, maxCap), + done: make(chan struct{}), + factory: factory, } // create initial connections, if something goes wrong, @@ -63,67 +85,66 @@ func NewBoundedPool(initialCap, maxCap int, idleTimeout, waitTimeout time.Durati return nil, fmt.Errorf("factory is not able to fill the pool: %s", err) } c.conns <- &idleConn{c: conn, t: time.Now()} - atomic.AddInt32(&c.total, 1) + c.total <- struct{}{} } + go c.pruneIdleConns(idleTime) return c, nil } -func (c *boundedPool) getConns() chan *idleConn { - c.mu.Lock() +func (c *boundedPool) getConnsAndFactory() (chan *idleConn, Factory) { + c.mu.RLock() conns := c.conns - c.mu.Unlock() - return conns + factory := c.factory + c.mu.RUnlock() + return conns, factory } // Get implements the Pool interfaces Get() method. If there is no new // connection available in the pool, a new connection will be created via the // Factory() method. func (c *boundedPool) Get() (net.Conn, error) { - conns := c.getConns() + conns, factory := c.getConnsAndFactory() if conns == nil { - return nil, pool.ErrClosed + return nil, ErrClosed } // Try and grab a connection from the pool - for { + // Wrap our connections with our custom net.Conn implementation (wrapConn + // method) that puts the connection back to the pool if it's closed. + select { + case conn := <-conns: + if conn == nil { + return nil, ErrClosed + } + return c.wrapConn(conn.c), nil + default: + // Could not get connection, can we create a new one? + c.mu.RLock() select { - case conn := <-conns: - if conn == nil { - return nil, pool.ErrClosed + case c.total <- struct{}{}: + c.mu.RUnlock() + conn, err := factory() + if err != nil { + <-c.total + return nil, err } - if timeout := c.idleTimeout; timeout > 0 { - if conn.t.Add(timeout).Before(time.Now()) { - // Close the connection when idle longer than the specified duration - conn.c.Close() - atomic.AddInt32(&c.total, -1) - continue - } - } - return c.wrapConn(conn.c), nil + return c.wrapConn(conn), nil default: - // Could not get connection, can we create a new one? - if atomic.LoadInt32(&c.total) < maxConnections { - conn, err := c.factory() - if err != nil { - return nil, err - } - atomic.AddInt32(&c.total, 1) - return c.wrapConn(conn), nil - } + c.mu.RUnlock() } + } - // The pool was empty and we couldn't create a new one to - // retry until one is free or we timeout - select { - case conn := <-conns: - if conn == nil { - return nil, pool.ErrClosed - } - return c.wrapConn(conn.c), nil - case <-time.After(c.waitTimeout): - return nil, fmt.Errorf("timed out waiting for free connection") + // The pool was empty and we couldn't create a new one to + // retry until one is free or we timeout + select { + case conn := <-conns: + if conn == nil { + return nil, ErrClosed } + return c.wrapConn(conn.c), nil + case <-time.After(PoolWaitTimeout): + return nil, errors.New("timed out waiting for free connection") } } @@ -134,8 +155,8 @@ func (c *boundedPool) put(conn net.Conn) error { return errors.New("connection is nil. rejecting") } - c.mu.Lock() - defer c.mu.Unlock() + c.mu.RLock() + defer c.mu.RUnlock() if c.conns == nil { // pool is closed, close passed connection @@ -149,15 +170,17 @@ func (c *boundedPool) put(conn net.Conn) error { return nil default: // pool is full, close passed connection - atomic.AddInt32(&c.total, -1) + <-c.total return conn.Close() } } func (c *boundedPool) Close() { c.mu.Lock() - conns := c.conns + conns, total, done := c.conns, c.total, c.done c.conns = nil + c.total = nil + c.done = nil c.factory = nil c.mu.Unlock() @@ -169,11 +192,71 @@ func (c *boundedPool) Close() { for conn := range conns { conn.c.Close() } + close(total) + close(done) +} + +func (c *boundedPool) Len() int { + conns, _ := c.getConnsAndFactory() + return len(conns) +} + +func (c *boundedPool) Size() int { + c.mu.RLock() + defer c.mu.RUnlock() + return len(c.total) } -func (c *boundedPool) Len() int { return len(c.getConns()) } +// pruneIdleConns prunes idle connections. +func (c *boundedPool) pruneIdleConns(idleTime time.Duration) { + if idleTime <= 0 { + return + } + ticker := time.NewTicker(idleTime) + defer ticker.Stop() + for { + c.mu.RLock() + done := c.done + c.mu.RUnlock() + select { + case <-done: + return + case <-ticker.C: + conns, _ := c.getConnsAndFactory() + if conns == nil { + return + } + if len(conns) == 0 { + continue + } + var newConns []*idleConn + for { + select { + case conn := <-conns: + if conn.t.Add(idleTime).Before(time.Now()) { + <-c.total + conn.c.Close() + } else { + newConns = append(newConns, conn) + } + default: + goto DONE + } + } + DONE: + if len(newConns) > 0 { + c.mu.RLock() + for _, conn := range newConns { + c.conns <- conn + } + c.mu.RUnlock() + newConns = nil + } + } + } +} -// newConn wraps a standard net.Conn to a poolConn net.Conn. +// wrapConn wraps a standard net.Conn to a poolConn net.Conn. func (c *boundedPool) wrapConn(conn net.Conn) net.Conn { p := &pooledConn{c: c} p.Conn = conn @@ -196,6 +279,7 @@ func (p *pooledConn) Close() error { if p.unusable { if p.Conn != nil { + <-p.c.total return p.Conn.Close() } return nil @@ -208,5 +292,10 @@ func (p *pooledConn) MarkUnusable() { p.mu.Lock() p.unusable = true p.mu.Unlock() - atomic.AddInt32(&p.c.total, -1) +} + +func MarkUnusable(conn net.Conn) { + if pc, ok := conn.(*pooledConn); ok { + pc.MarkUnusable() + } } diff --git a/coordinator/pool_test.go b/coordinator/pool_test.go new file mode 100644 index 0000000..105061a --- /dev/null +++ b/coordinator/pool_test.go @@ -0,0 +1,475 @@ +package coordinator + +import ( + "log" + "math/rand" + "net" + "sync" + "sync/atomic" + "testing" + "time" +) + +var ( + InitialCap = 5 + MaximumCap = 30 + IdleTime = 60 * time.Second + network = "tcp" + address = "127.0.0.1:7777" + factory = func() (net.Conn, error) { return net.Dial(network, address) } +) + +func init() { + // used for factory function + go simpleTCPServer() + time.Sleep(time.Millisecond * 300) // wait until tcp server has been settled + + rand.New(rand.NewSource(time.Now().UTC().UnixNano())) +} + +func TestNew(t *testing.T) { + p, err := newBoundedPool() + if err != nil { + t.Errorf("New pool error: %s", err) + } + defer p.Close() +} + +func TestPool_Get_Impl(t *testing.T) { + p, _ := newBoundedPool() + defer p.Close() + + conn, err := p.Get() + if err != nil { + t.Errorf("Get error: %s", err) + } + + _, ok := conn.(*pooledConn) + if !ok { + t.Errorf("Conn is not of type poolConn") + } +} + +func TestPool_Get(t *testing.T) { + p, _ := newBoundedPool() + defer p.Close() + + _, err := p.Get() + if err != nil { + t.Errorf("Get error: %s", err) + } + + // after one get, current capacity should be lowered by one. + if p.Len() != (InitialCap - 1) { + t.Errorf("Get error. Expecting %d, got %d", InitialCap-1, p.Len()) + } + + // get them all + var wg sync.WaitGroup + for i := 0; i < (InitialCap - 1); i++ { + wg.Add(1) + go func() { + defer wg.Done() + _, err := p.Get() + if err != nil { + t.Errorf("Get error: %s", err) + } + }() + } + wg.Wait() + + if p.Len() != 0 { + t.Errorf("Get error. Expecting %d, got %d", InitialCap-1, p.Len()) + } + + _, err = p.Get() + if err != nil { + t.Errorf("Get error: %s", err) + } +} + +func TestPool_Put(t *testing.T) { + p, err := NewBoundedPool(0, 30, IdleTime, factory) + if err != nil { + t.Error(err) + } + defer p.Close() + + // get/create from the pool + conns := make([]net.Conn, MaximumCap) + for i := 0; i < MaximumCap; i++ { + conn, _ := p.Get() + conns[i] = conn + } + + // now put them all back + for _, conn := range conns { + conn.Close() + } + + if p.Len() != MaximumCap { + t.Errorf("Put error len. Expecting %d, got %d", 1, p.Len()) + } + + conn, _ := p.Get() + p.Close() // close pool + + conn.Close() // try to put into a full pool + if p.Len() != 0 { + t.Errorf("Put error. Closed pool shouldn't allow to put connections.") + } +} + +func TestPool_PutUnusableConn(t *testing.T) { + p, _ := newBoundedPool() + defer p.Close() + + // ensure pool is not empty + conn, _ := p.Get() + conn.Close() + + poolSize := p.Len() + conn, _ = p.Get() + conn.Close() + if p.Len() != poolSize { + t.Errorf("Pool size is expected to be equal to initial size") + } + + conn, _ = p.Get() + MarkUnusable(conn) + conn.Close() + if p.Len() != poolSize-1 { + t.Errorf("Pool size is expected to be initial_size - 1. Expecting %d, got %d", poolSize-1, p.Len()) + } +} + +func TestPool_UsedCapacity(t *testing.T) { + p, _ := newBoundedPool() + defer p.Close() + + if p.Len() != InitialCap { + t.Errorf("InitialCap error. Expecting %d, got %d", InitialCap, p.Len()) + } +} + +func TestPool_Close(t *testing.T) { + p, _ := newBoundedPool() + + // now close it and test all cases we are expecting. + p.Close() + + c := p.(*boundedPool) + + if c.conns != nil { + t.Errorf("Close error, conns channel should be nil") + } + + if c.factory != nil { + t.Errorf("Close error, factory should be nil") + } + + _, err := p.Get() + if err == nil { + t.Errorf("Close error, get conn should return an error") + } + + if p.Len() != 0 { + t.Errorf("Close error used capacity. Expecting 0, got %d", p.Len()) + } +} + +func TestPoolConcurrent(t *testing.T) { + p, _ := newBoundedPool() + pipe := make(chan net.Conn, 0) + + go func() { + p.Close() + }() + + for i := 0; i < MaximumCap; i++ { + go func() { + conn, _ := p.Get() + pipe <- conn + }() + + go func() { + conn := <-pipe + if conn == nil { + return + } + conn.Close() + }() + } +} + +func TestPoolWriteRead(t *testing.T) { + p, _ := NewBoundedPool(0, 30, IdleTime, factory) + defer p.Close() + + conn, _ := p.Get() + + msg := "hello" + _, err := conn.Write([]byte(msg)) + if err != nil { + t.Error(err) + } +} + +func TestPoolConcurrent2(t *testing.T) { + p, _ := NewBoundedPool(0, 30, IdleTime, factory) + defer p.Close() + + var wg sync.WaitGroup + + go func() { + for i := 0; i < 10; i++ { + wg.Add(1) + go func(i int) { + conn, _ := p.Get() + time.Sleep(time.Millisecond * time.Duration(rand.Intn(100))) + conn.Close() + wg.Done() + }(i) + } + }() + + for i := 0; i < 10; i++ { + wg.Add(1) + go func(i int) { + conn, _ := p.Get() + time.Sleep(time.Millisecond * time.Duration(rand.Intn(100))) + conn.Close() + wg.Done() + }(i) + } + + wg.Wait() +} + +func TestPoolConcurrent3(t *testing.T) { + p, _ := NewBoundedPool(0, 1, IdleTime, factory) + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + p.Close() + wg.Done() + }() + + if conn, err := p.Get(); err == nil { + conn.Close() + } + + wg.Wait() +} + +func TestPool_GetWaitTimeout(t *testing.T) { + maxCap := 1 + p, err := NewBoundedPool(0, maxCap, IdleTime, factory) + if err != nil { + t.Errorf("New pool error: %s", err) + } + defer p.Close() + + for i := 0; i < maxCap+1; i++ { + _, err := p.Get() + if i < maxCap { + if err != nil { + t.Errorf("Expected nil, got: %s", err) + } + } else { + if err == nil || err.Error() != "timed out waiting for free connection" { + t.Errorf("Get timeout error. Expecting ‘timed out waiting for free connection’, got %s", err) + } + } + } +} + +func TestPool_PruneIdleConns(t *testing.T) { + p, _ := NewBoundedPool(InitialCap, MaximumCap, 1*time.Second, factory) + defer p.Close() + + var wg sync.WaitGroup + numWorkers := MaximumCap * 2 + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + conn, err := p.Get() + if err != nil { + return + } + defer conn.Close() + time.Sleep(time.Millisecond * time.Duration(rand.Intn(1000))) + }() + } + + wg.Wait() + time.Sleep(2 * time.Second) + + if p.Len() != 0 { + t.Errorf("Idle connections expected: 0, got: %d", p.Len()) + } + + if p.Size() != 0 { + t.Errorf("Opened connections expected: 0, got %d", p.Size()) + } +} + +func TestPoolMaximumCapacity(t *testing.T) { + var success int32 + p, _ := NewBoundedPool(InitialCap, MaximumCap, IdleTime, factory) + defer p.Close() + + var wg sync.WaitGroup + numWorkers := MaximumCap * 2 + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + _, err := p.Get() + if err != nil { + return + } + atomic.AddInt32(&success, 1) + }() + } + + wg.Wait() + + if atomic.LoadInt32(&success) != int32(MaximumCap) { + t.Errorf("Opened success connections expected: %d, got: %d", MaximumCap, success) + } + + if p.Size() != MaximumCap { + t.Errorf("Opened connections expected: %d, got %d", MaximumCap, p.Size()) + } +} + +func TestPoolMaximumCapacity_Close(t *testing.T) { + var success int32 + p, _ := NewBoundedPool(InitialCap, MaximumCap, IdleTime, factory) + defer p.Close() + + var wg sync.WaitGroup + numWorkers := MaximumCap * 2 + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + conn, err := p.Get() + if err != nil { + t.Errorf("Expected nil, got: %s", err) + return + } + defer conn.Close() + + atomic.AddInt32(&success, 1) + time.Sleep(time.Millisecond * time.Duration(rand.Intn(1000))) + }() + } + + wg.Wait() + + if atomic.LoadInt32(&success) != int32(numWorkers) { + t.Errorf("Opened success connections expected: %d, got: %d", numWorkers, atomic.LoadInt32(&success)) + } +} + +func TestPool_Get1(t *testing.T) { + p, _ := NewBoundedPool(0, 1, IdleTime, factory) + defer p.Close() + + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + conn, err := p.Get() + if err != nil { + t.Errorf("Expected nil, got: %s", err) + return + } + defer conn.Close() + + time.Sleep(time.Millisecond * 100) + }() + } + wg.Wait() +} + +func TestPool_ClosedConnectionMarkUnusable(t *testing.T) { + p, _ := NewBoundedPool(1, 1, IdleTime, factory) + defer p.Close() + + conn, _ := p.Get() + + conn.(*pooledConn).MarkUnusable() + conn.Close() + + conn, err := p.Get() + if err != nil { + t.Errorf("Expected nil, got: %s", err) + } + if conn == nil { + t.Errorf("Expected non-nil connection") + } + conn.Close() +} + +func TestPool_FailedFactoryNotOpenConnections(t *testing.T) { + factory := func() (net.Conn, error) { + return net.DialTimeout("tcp", "localhost:1234", time.Millisecond) + } + + maxCap := 2 + p, err := NewBoundedPool(0, maxCap, IdleTime, factory) + if err != nil { + t.Errorf("New pool error: %s", err) + } + defer p.Close() + + for i := 0; i < maxCap+1; i++ { + conn, err := p.Get() + if err == nil && conn != nil { + conn.Close() + } + } + + if p.Size() != 0 { + t.Errorf("Failed factory shouldn't open connections. Expecting 0, got %d", p.Size()) + } +} + +func TestConn_Impl(t *testing.T) { + var _ net.Conn = new(pooledConn) +} + +func newBoundedPool() (Pool, error) { + return NewBoundedPool(InitialCap, MaximumCap, IdleTime, factory) +} + +func simpleTCPServer() { + l, err := net.Listen(network, address) + if err != nil { + log.Fatal(err) + } + defer l.Close() + + for { + conn, err := l.Accept() + if err != nil { + log.Fatal(err) + } + + go func() { + buffer := make([]byte, 256) + conn.Read(buffer) + }() + } +} diff --git a/go.mod b/go.mod index 43ec609..894974a 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,6 @@ require ( golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba golang.org/x/tools v0.16.0 google.golang.org/grpc v1.56.3 - gopkg.in/fatih/pool.v2 v2.0.0 ) require ( diff --git a/go.sum b/go.sum index 7768a70..e809e15 100644 --- a/go.sum +++ b/go.sum @@ -886,8 +886,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= -gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=