Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

more verbose logging surrounding payments #11

Merged
merged 1 commit into from
May 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion htlcswitch/link.go
Original file line number Diff line number Diff line change
Expand Up @@ -2731,7 +2731,7 @@ func (l *channelLink) canSendHtlc(policy models.ForwardingPolicy,
return NewLinkError(&lnwire.FailExpiryTooFar{})
}

l.log.Errorf("Trying to send amt: %v over the channel: %v with bandwidth: %v", amt, l.channel.ChanPoint, l.Bandwidth())
l.log.Infof("Trying to send amt: %v over the channel: %v with bandwidth: %v", amt, l.channel.ChanPoint, l.Bandwidth())
// Check to see if there is enough balance in this channel.
if amt > l.Bandwidth() {
l.log.Warnf("insufficient bandwidth to route htlc: %v is "+
Expand Down
11 changes: 6 additions & 5 deletions routing/pathfind.go
Original file line number Diff line number Diff line change
Expand Up @@ -380,6 +380,7 @@ func getOutgoingBalance(node route.Vertex, outgoingChans map[uint64]struct{},
bandwidthHints bandwidthHints,
g routingGraph) (lnwire.MilliSatoshi, lnwire.MilliSatoshi, error) {

log.Infof("bandwidthHints: %#v", bandwidthHints)
var max, total lnwire.MilliSatoshi
cb := func(channel *channeldb.DirectedChannel) error {
if !channel.OutPolicySet {
Expand All @@ -398,7 +399,7 @@ func getOutgoingBalance(node route.Vertex, outgoingChans map[uint64]struct{},
bandwidth, ok := bandwidthHints.availableChanBandwidth(
chanID, 0,
)
log.Errorf("bandwidthHints: %#v, chanID: %v, bandwidth: %v, ok: %v", bandwidthHints, bandwidth, ok)
log.Infof("chanID: %v, bandwidth: %v, ok: %v", chanID, bandwidth, ok)

// If the bandwidth is not available, use the channel capacity.
// This can happen when a channel is added to the graph after
Expand Down Expand Up @@ -973,10 +974,10 @@ func findPath(g *graphParams, r *RestrictParams, cfg *PathFindingConfig,
pathEdges = append(pathEdges, currentNodeWithDist.nextHop)

if splitNeeded && currentNode != source {
log.Errorf("Removing fees - curentNode(dist): %v (%v) nextHop: %v", currentNodeWithDist.node.String(), currentNodeWithDist.dist, currentNodeWithDist.nextHop.ToNodePubKey().String())
log.Errorf("Next hop channel id: %v, FeeBaseMSat: %v, FeeProportionalMillionths: %v", (currentNodeWithDist.nextHop).ChannelID, (currentNodeWithDist.nextHop).FeeBaseMSat, (currentNodeWithDist.nextHop).FeeProportionalMillionths)
log.Infof("Removing fees - curentNode(dist): %v (%v) nextHop: %v", currentNodeWithDist.node.String(), currentNodeWithDist.dist, currentNodeWithDist.nextHop.ToNodePubKey().String())
log.Infof("Next hop channel id: %v, FeeBaseMSat: %v, FeeProportionalMillionths: %v", (currentNodeWithDist.nextHop).ChannelID, (currentNodeWithDist.nextHop).FeeBaseMSat, (currentNodeWithDist.nextHop).FeeProportionalMillionths)
fees := (currentNodeWithDist.nextHop).ComputeFeeFromIncoming(amt)
log.Errorf("Removing fees - before: %v fee: %v after: %v", amt, fees, amt-fees)
log.Infof("Removing fees - before: %v fee: %v after: %v", amt, fees, amt-fees)
amt -= fees + 1
}

Expand Down Expand Up @@ -1004,7 +1005,7 @@ func findPath(g *graphParams, r *RestrictParams, cfg *PathFindingConfig,
// findPath, and avoid using ChannelEdgePolicy altogether.
pathEdges[len(pathEdges)-1].ToNodeFeatures = features

log.Debugf("Found route: probability=%v, hops=%v, fee=%v",
log.Infof("Found route: probability=%v, hops=%v, fee=%v",
distance[source].probability, len(pathEdges),
distance[source].amountToReceive-amt)
var e error
Expand Down
35 changes: 24 additions & 11 deletions routing/payment_lifecycle.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ func (p *paymentLifecycle) resumePayment() ([32]byte, *route.Route, error) {
paySession: p.paySession,
}

log.Errorf("Resuming payment for hash %v", p.identifier)
log.Infof("Resuming payment for hash %v", p.identifier)

// When the payment lifecycle loop exits, we make sure to signal any
// sub goroutine of the shardHandler to exit, then wait for them to
Expand All @@ -140,13 +140,14 @@ func (p *paymentLifecycle) resumePayment() ([32]byte, *route.Route, error) {
// lifecycle loop below.
payment, _, err := p.fetchPaymentState()
if err != nil {
log.Errorf("Failed to fetch payment state: %v", err)
return [32]byte{}, nil, err
}

for _, a := range payment.InFlightHTLCs() {
a := a

log.Errorf("Resuming payment shard %v for hash %v",
log.Infof("Resuming payment shard %v for hash %v",
a.AttemptID, p.identifier)

shardHandler.collectResultAsync(&a.HTLCAttemptInfo)
Expand All @@ -159,6 +160,7 @@ lifecycle:
// Start by quickly checking if there are any outcomes already
// available to handle before we reevaluate our state.
if err := shardHandler.checkShards(); err != nil {
log.Warnf("Got error checking shards: %v", err)
return [32]byte{}, nil, err
}

Expand All @@ -169,10 +171,11 @@ lifecycle:
// state is consistent as a whole.
payment, currentState, err := p.fetchPaymentState()
if err != nil {
log.Errorf("Failed to fetch payment state: %v", err)
return [32]byte{}, nil, err
}

log.Debugf("Payment %v in state terminate=%v, "+
log.Infof("Payment %v in state terminate=%v, "+
"active_shards=%v, rem_value=%v, fee_limit=%v",
p.identifier, currentState.terminate,
currentState.numShardsInFlight,
Expand Down Expand Up @@ -205,6 +208,7 @@ lifecycle:
return a.Settle.Preimage, &a.Route, nil
}

log.Infof("Payment failed with reason: %v", *payment.FailureReason)
// Payment failed.
return [32]byte{}, nil, *payment.FailureReason

Expand All @@ -216,6 +220,7 @@ lifecycle:
// outcome to be available before re-evaluating our
// state.
if err := shardHandler.waitForShard(); err != nil {
log.Warnf("Got error waiting for shard: %v", err)
return [32]byte{}, nil, err
}
continue lifecycle
Expand Down Expand Up @@ -271,7 +276,7 @@ lifecycle:
// send the payment, so mark it failed with no route.
if currentState.numShardsInFlight == 0 {
failureCode := routeErr.FailureReason()
log.Debugf("Marking payment %v permanently "+
log.Infof("Marking payment %v permanently "+
"failed with no route: %v",
p.identifier, failureCode)

Expand All @@ -288,6 +293,7 @@ lifecycle:
// We still have active shards, we'll wait for an
// outcome to be available before retrying.
if err := shardHandler.waitForShard(); err != nil {
log.Warnf("Got error waiting for shard: %v", err)
return [32]byte{}, nil, err
}
continue lifecycle
Expand All @@ -313,6 +319,9 @@ lifecycle:
continue lifecycle

case err != nil:
log.Warnf("Launch shard failed %v for "+
"payment %v: %v", attempt.AttemptID,
p.identifier, err)
return [32]byte{}, nil, err
}

Expand Down Expand Up @@ -825,7 +834,7 @@ func (p *shardHandler) handleSendError(attempt *channeldb.HTLCAttemptInfo,
}

if sendErr == htlcswitch.ErrUnreadableFailureMessage {
log.Tracef("Unreadable failure when sending htlc")
log.Infof("Unreadable failure when sending htlc")

return reportFail(nil, nil)
}
Expand Down Expand Up @@ -863,8 +872,8 @@ func (p *shardHandler) handleSendError(attempt *channeldb.HTLCAttemptInfo,
return failPayment(&internalErrorReason, sendErr)
}

log.Tracef("Node=%v reported failure when sending htlc",
failureSourceIdx)
log.Infof("Node=%v reported failure when sending htlc, code: %v",
failureSourceIdx, failureMessage.Code())

return reportFail(&failureSourceIdx, failureMessage)
}
Expand Down Expand Up @@ -928,16 +937,20 @@ func (p *shardHandler) handleFailureMessage(rt *route.Route,
if !p.paySession.UpdateAdditionalEdge(
update, errSource, policy) {

log.Debugf("Invalid channel update received: node=%v",
errVertex)
log.Infof("Invalid channel update received: node=%v, "+
"scid=%v, base=%v, rate=%v, cltv=%v", errVertex,
update.ShortChannelID.String(), update.BaseFee,
update.FeeRate, update.TimeLockDelta)
}
return nil
}

// Apply channel update to the channel edge policy in our db.
if !p.router.applyChannelUpdate(update) {
log.Debugf("Invalid channel update received: node=%v",
errVertex)
log.Infof("Invalid channel update received: node=%v, "+
"scid=%v, base=%v, rate=%v, cltv=%v", errVertex,
update.ShortChannelID.String(), update.BaseFee,
update.FeeRate, update.TimeLockDelta)
}
return nil
}
Expand Down
21 changes: 12 additions & 9 deletions routing/payment_session.go
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ func (p *paymentSession) RequestRoute(maxAmt, feeLimit lnwire.MilliSatoshi,
// client-side MTU that we'll attempt to respect at all times.
maxShardActive := p.payment.MaxShardAmt != nil
if maxShardActive && maxAmt > *p.payment.MaxShardAmt {
p.log.Debug("Clamping payment attempt from %v to %v due to "+
p.log.Infof("Clamping payment attempt from %v to %v due to "+
"max shard size of %v", maxAmt,
*p.payment.MaxShardAmt, maxAmt)

Expand All @@ -319,7 +319,7 @@ func (p *paymentSession) RequestRoute(maxAmt, feeLimit lnwire.MilliSatoshi,
return nil, err
}

p.log.Errorf("pathfinding for amt=%v", maxAmt)
p.log.Infof("pathfinding for amt=%v, feelimit=%v", maxAmt, feeLimit)

sourceVertex := routingGraph.sourceNode()

Expand All @@ -343,14 +343,14 @@ func (p *paymentSession) RequestRoute(maxAmt, feeLimit lnwire.MilliSatoshi,
// Don't split if this is a legacy payment without mpp
// record.
if p.payment.PaymentAddr == nil {
p.log.Debugf("not splitting because payment " +
p.log.Infof("not splitting because payment " +
"address is unspecified")

return nil, errNoPathFound
}

if p.payment.DestFeatures == nil {
p.log.Debug("Not splitting because " +
p.log.Infof("Not splitting because " +
"destination DestFeatures is nil")
return nil, errNoPathFound
}
Expand All @@ -359,7 +359,7 @@ func (p *paymentSession) RequestRoute(maxAmt, feeLimit lnwire.MilliSatoshi,
if !destFeatures.HasFeature(lnwire.MPPOptional) &&
!destFeatures.HasFeature(lnwire.AMPOptional) {

p.log.Debug("not splitting because " +
p.log.Infof("not splitting because " +
"destination doesn't declare MPP or AMP")

return nil, errNoPathFound
Expand All @@ -368,7 +368,7 @@ func (p *paymentSession) RequestRoute(maxAmt, feeLimit lnwire.MilliSatoshi,
// No splitting if this is the last shard.
isLastShard := activeShards+1 >= p.payment.MaxParts
if isLastShard {
p.log.Errorf("not splitting because shard "+
p.log.Infof("not splitting because shard "+
"limit %v has been reached",
p.payment.MaxParts)

Expand All @@ -377,10 +377,10 @@ func (p *paymentSession) RequestRoute(maxAmt, feeLimit lnwire.MilliSatoshi,

var e extendedNoRouteError
if errors.As(err, &e) {
p.log.Errorf("Splitting amount: %v and using the smaller amount: %v", maxAmt, e.amt)
p.log.Infof("Splitting amount: %v and using the smaller amount: %v", maxAmt, e.amt)
maxAmt = e.amt
} else {
p.log.Errorf("Splitting amount: %v and divide by 2: %v", maxAmt, maxAmt/2)
p.log.Infof("Splitting amount: %v and divide by 2: %v", maxAmt, maxAmt/2)
// This is where the magic happens. If we can't find a
// route, try it for half the amount.
maxAmt /= 2
Expand All @@ -402,12 +402,13 @@ func (p *paymentSession) RequestRoute(maxAmt, feeLimit lnwire.MilliSatoshi,
// any case, but the sent out partial payments would be held by
// the receiver until the mpp timeout.
case errors.Is(err, errInsufficientBalance):
p.log.Errorf("not splitting because local balance " +
p.log.Infof("not splitting because local balance " +
"is insufficient")

return nil, errInsufficientBalance

case err != nil:
p.log.Info("Failed to find path: %v", err)
return nil, err
}

Expand All @@ -426,9 +427,11 @@ func (p *paymentSession) RequestRoute(maxAmt, feeLimit lnwire.MilliSatoshi,
},
)
if err != nil {
log.Warnf("Failed to construct new route: %v", err)
return nil, err
}

log.Infof("Constructed a new route: %v", spew.Sdump(route.Hops))
return route, err
}
}
Expand Down
Loading