-
Notifications
You must be signed in to change notification settings - Fork 0
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add support for multihop payments #132
Changes from all commits
bc05e65
9169827
2275d89
53b4e21
f02334b
237db2f
9d9f766
6382274
f1d69bb
fcf4120
0699615
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -49,7 +49,7 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err | |
if err != nil { | ||
panic(err) | ||
} | ||
|
||
// Test | ||
role := peer.GetRole(seq, runConfig) | ||
// We use the sequence in the random source so we generate a unique key even if another client is running at the same time | ||
privateKey, err := crypto.GenerateKey() | ||
|
@@ -66,10 +66,10 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err | |
ms := p2pms.NewMessageService(ipAddress, port, pk) | ||
client.MustSignalAndWait(ctx, "msStarted", runEnv.TestInstanceCount) | ||
|
||
mePeerInfo := peer.PeerInfo{PeerInfo: p2pms.PeerInfo{Address: address, IpAddress: ipAddress, Port: port, Id: ms.Id()}, Role: role} | ||
mePeerInfo := peer.PeerInfo{PeerInfo: p2pms.PeerInfo{Address: address, IpAddress: ipAddress, Port: port, Id: ms.Id()}, Role: role, Seq: seq} | ||
me := peer.MyInfo{PeerInfo: mePeerInfo, PrivateKey: *privateKey} | ||
|
||
runEnv.RecordMessage("I am %+v", me) | ||
runEnv.RecordMessage("I am address:%s role:%d seq:%d", me.Address, me.Role, me.Seq) | ||
|
||
utils.RecordRunInfo(me, runConfig, runEnv.R()) | ||
|
||
|
@@ -104,13 +104,8 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err | |
|
||
client.MustSignalAndWait(ctx, "message service connected", runEnv.TestInstanceCount) | ||
|
||
ledgerIds := []types.Destination{} | ||
|
||
if me.Role != peer.Hub { | ||
// Create ledger channels with all the hubs | ||
ledgerIds = utils.CreateLedgerChannels(nClient, cm, utils.FINNEY_IN_WEI, me.PeerInfo, peers) | ||
|
||
} | ||
// Create ledger channels with all the hubs | ||
ledgerIds := utils.CreateLedgerChannels(nClient, cm, utils.FINNEY_IN_WEI, me.PeerInfo, peers) | ||
|
||
client.MustSignalAndWait(ctx, sync.State("ledgerDone"), runEnv.TestInstanceCount) | ||
|
||
|
@@ -121,7 +116,7 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err | |
payees = append(payees, peer.FilterByRole(peers, peer.PayerPayee)...) | ||
|
||
createVirtualPaymentsJob := func() { | ||
randomHub := utils.SelectRandom(hubs) | ||
selectedHubs := utils.SelectRandomHubs(hubs, int(runConfig.NumIntermediaries)) | ||
randomPayee := utils.SelectRandom(payees) | ||
|
||
var channelId types.Destination | ||
|
@@ -140,12 +135,12 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err | |
}, | ||
}} | ||
|
||
r := nClient.CreateVirtualPaymentChannel([]types.Address{randomHub.Address}, randomPayee.Address, 0, outcome) | ||
r := nClient.CreateVirtualPaymentChannel(selectedHubs, randomPayee.Address, 0, outcome) | ||
|
||
channelId = r.ChannelId | ||
cm.WaitForObjectivesToComplete([]protocols.ObjectiveId{r.Id}) | ||
|
||
runEnv.RecordMessage("Opened virtual channel %s with %s using hub %s", utils.Abbreviate(channelId), utils.Abbreviate(randomPayee.Address), utils.Abbreviate(randomHub.Address)) | ||
runEnv.RecordMessage("Opened virtual channel %s with %s using hubs %s", utils.Abbreviate(channelId), utils.Abbreviate(randomPayee.Address), utils.AbbreviateSlice(selectedHubs)) | ||
|
||
paymentAmount := big.NewInt(utils.KWEI_IN_WEI) | ||
nClient.Pay(r.ChannelId, paymentAmount) | ||
|
@@ -178,16 +173,29 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err | |
|
||
// Run the job(s) | ||
utils.RunJobs(createVirtualPaymentsJob, runConfig.PaymentTestDuration, int64(runConfig.ConcurrentPaymentJobs)) | ||
|
||
// We wait to allow hubs to finish processing messages and close out their channels. | ||
// The duration we wait is based on the payment test duration and the amount of concurrent jobs. | ||
toSleep := runConfig.PaymentTestDuration / 10 * time.Duration(runConfig.ConcurrentPaymentJobs) | ||
// Restrict the sleep duration to be between 1 and 30 seconds | ||
if toSleep > 30*time.Second { | ||
toSleep = 30 * time.Second | ||
} | ||
if toSleep < 1*time.Second { | ||
toSleep = 1 * time.Second | ||
} | ||
runEnv.RecordMessage("Waiting %s before closing ledger channels", toSleep) | ||
time.Sleep(toSleep) | ||
} | ||
client.MustSignalAndWait(ctx, "paymentsDone", runEnv.TestInstanceCount) | ||
|
||
if me.Role != peer.Hub { | ||
// TODO: Closing a ledger channel too soon after closing a virtual channel seems to fail. | ||
time.Sleep(time.Duration(250 * time.Millisecond)) | ||
// TODO: Closing as a hub seems to fail: https://github.com/statechannels/go-nitro-testground/issues/134 | ||
// For now we avoid closing as a hub | ||
if len(ledgerIds) > 0 && me.Role != peer.Hub { | ||
// Close all the ledger channels with the hub | ||
oIds := []protocols.ObjectiveId{} | ||
for _, ledgerId := range ledgerIds { | ||
runEnv.RecordMessage("Closing ledger %s", utils.Abbreviate(ledgerId)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why drop this? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I was cleaning up some debug log statements and got a little overzealous. I may leave this out though, as I found it a bit spammy. |
||
|
||
oId := nClient.CloseLedgerChannel(ledgerId) | ||
oIds = append(oIds, oId) | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,6 +7,7 @@ import ( | |
"math/big" | ||
"math/rand" | ||
"runtime/debug" | ||
"sort" | ||
|
||
s "sync" | ||
"sync/atomic" | ||
|
@@ -95,6 +96,19 @@ func RunJobs(job func(), duration time.Duration, concurrencyTarget int64) { | |
wg.Wait() | ||
} | ||
|
||
// AbbreviateSlice returns a string with abbreviated elements of the given slice. | ||
func AbbreviateSlice[U ~[]T, T fmt.Stringer](col U) string { | ||
abbreviated := "" | ||
for i, s := range col { | ||
if i > 0 { | ||
abbreviated += ", " | ||
} | ||
abbreviated += s.String()[0:8] | ||
} | ||
|
||
return abbreviated | ||
} | ||
|
||
// Abbreviate shortens a string to 8 characters and adds an ellipsis. | ||
func Abbreviate(s fmt.Stringer) string { | ||
return s.String()[0:8] + ".." | ||
|
@@ -128,16 +142,51 @@ func SelectRandom[U ~[]T, T any](collection U) T { | |
return collection[randomIndex] | ||
} | ||
|
||
// SelectRandomHubs selects numHub hubs randomly from hubs | ||
func SelectRandomHubs(hubs []peer.PeerInfo, numHubs int) []types.Address { | ||
// Copy and shuffle the slice of hubs | ||
shuffled := make([]peer.PeerInfo, len(hubs)) | ||
copy(shuffled, hubs) | ||
rand.Shuffle(len(shuffled), | ||
func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) | ||
|
||
// Select the amount of hubs we want | ||
selected := make([]peer.PeerInfo, numHubs) | ||
for i := 0; i < numHubs; i++ { | ||
selected[i] = shuffled[i] | ||
} | ||
|
||
// TODO: Virtual defunding seems to fail if intermediaries are not in "order". | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. To co-ordinate who initiates the ledger channel between two hubs I've introduced a simple rule: The participant with the lowest sequence number of the two hubs is responsible for creating the ledger channel. It seems that the intermediaries need to passed in the correct order, based on the ledger initiator (I think?). If I don't do this I get this error:
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's get an issue open to track this. |
||
// The order seems to be determined by the initiator of the ledger channel. | ||
// Since we use the sequence number to determine the initiator, we can just sort on sequence number. | ||
sort.Slice(selected, func(i, j int) bool { | ||
return selected[i].Seq < selected[j].Seq | ||
}) | ||
|
||
// Convert to addresses for the callers convenience | ||
selectedAddresses := make([]types.Address, numHubs) | ||
for i, hub := range selected { | ||
selectedAddresses[i] = hub.Address | ||
} | ||
return selectedAddresses | ||
} | ||
|
||
// CreateLedgerChannels creates a directly funded ledger channel with each hub in hubs. | ||
// The funding for each channel will be set to amount for both participants. | ||
// This function blocks until all ledger channels have successfully been created. | ||
// If the participant is a hub we use the participant's Seq to determine the initiator of the ledger channel. | ||
func CreateLedgerChannels(client nitro.Client, cm *CompletionMonitor, amount uint, me peer.PeerInfo, peers []peer.PeerInfo) []types.Destination { | ||
ids := []protocols.ObjectiveId{} | ||
cIds := []types.Destination{} | ||
for _, p := range peers { | ||
if p.Role != peer.Hub { | ||
hubs := peer.FilterByRole(peers, peer.Hub) | ||
for _, p := range hubs { | ||
|
||
// To co-ordinate creating ledger channels between hubs a hub will | ||
// only create a channel with another hub if it has a greater sequence number. | ||
if me.Role == peer.Hub && p.Seq <= me.Seq { | ||
continue | ||
} | ||
|
||
outcome := outcome.Exit{outcome.SingleAssetExit{ | ||
Allocations: outcome.Allocations{ | ||
outcome.Allocation{ | ||
|
@@ -150,7 +199,6 @@ func CreateLedgerChannels(client nitro.Client, cm *CompletionMonitor, amount uin | |
}, | ||
}, | ||
}} | ||
|
||
r := client.CreateLedgerChannel(p.Address, 0, outcome) | ||
cIds = append(cIds, r.ChannelId) | ||
ids = append(ids, r.Id) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Let's have an issue to track this? I agree with @NiloCK that the issues should live in
go-nitro
repo.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think we could use statechannels/go-nitro#883