diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 5d8c47377c7..4cce9fcd2cd 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -4,7 +4,7 @@ //! here. See also the chanmon_fail_consistency fuzz test. use chain::transaction::OutPoint; -use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSendFailure}; +use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure}; use ln::channelmonitor::ChannelMonitorUpdateErr; use ln::features::InitFeatures; use ln::msgs; @@ -1731,3 +1731,63 @@ fn during_funding_monitor_fail() { do_during_funding_monitor_fail(true, false); do_during_funding_monitor_fail(false, false); } + +#[test] +fn test_path_paused_mpp() { + // Simple test of sending a multi-part payment where one path is currently blocked awaiting + // monitor update + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).0.contents.short_channel_id; + let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::supported(), InitFeatures::supported()); + let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::supported(), InitFeatures::supported()).0.contents.short_channel_id; + let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::supported(), InitFeatures::supported()).0.contents.short_channel_id; + + let (payment_preimage, payment_hash) = get_payment_preimage_hash!(&nodes[0]); + let payment_secret = PaymentSecret([0xdb; 32]); + let mut route = nodes[0].router.get_route(&nodes[3].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap(); + + // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3: + let path = route.paths[0].clone(); + route.paths.push(path); + route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0][0].short_channel_id = chan_1_id; + route.paths[0][1].short_channel_id = chan_3_id; + route.paths[1][0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1][0].short_channel_id = chan_2_ann.contents.short_channel_id; + route.paths[1][1].short_channel_id = chan_4_id; + + // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second + // (for the path 0 -> 2 -> 3) fails. + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + *nodes[0].chan_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + + // Now check that we get the right return value, indicating that the first path succeeded but + // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as + // some paths succeeded, preventing retry. + if let Err(PaymentSendFailure::PartialFailure(results)) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { + assert_eq!(results.len(), 2); + if let Ok(()) = results[0] {} else { panic!(); } + if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); } + } else { panic!(); } + check_added_monitors!(nodes[0], 2); + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + + // Pass the first HTLC of the payment along to nodes[3]. + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false); + + // And check that, after we successfully update the monitor for chan_2 we can pass the second + // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. + let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true); + + claim_payment_along_route_with_secret(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage, Some(payment_secret), 200_000); +} diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index ac072c0ea4f..f44109432a5 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -18,7 +18,7 @@ use secp256k1; use ln::features::{ChannelFeatures, InitFeatures}; use ln::msgs; use ln::msgs::{DecodeError, OptionalField, DataLossProtect}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER}; use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT}; use ln::chan_utils::{CounterpartyCommitmentSecrets, LocalCommitmentTransaction, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys}; use ln::chan_utils; @@ -3154,13 +3154,33 @@ impl Channel { self.network_sync == UpdateStatus::DisabledMarked } - /// Called by channelmanager based on chain blocks being connected. - /// Note that we only need to use this to detect funding_signed, anything else is handled by - /// the channel_monitor. - /// In case of Err, the channel may have been closed, at which point the standard requirements - /// apply - no calls may be made except those explicitly stated to be allowed post-shutdown. + /// When we receive a new block, we (a) check whether the block contains the funding + /// transaction (which would start us counting blocks until we send the funding_signed), and + /// (b) check the height of the block against outbound holding cell HTLCs in case we need to + /// give up on them prematurely and time them out. Everything else (e.g. commitment + /// transaction broadcasts, channel closure detection, HTLC transaction broadcasting, etc) is + /// handled by the ChannelMonitor. + /// + /// If we return Err, the channel may have been closed, at which point the standard + /// requirements apply - no calls may be made except those explicitly stated to be allowed + /// post-shutdown. /// Only returns an ErrorAction of DisconnectPeer, if Err. - pub fn block_connected(&mut self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) -> Result, msgs::ErrorMessage> { + /// + /// May return some HTLCs (and their payment_hash) which have timed out and should be failed + /// back. + pub fn block_connected(&mut self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> { + let mut timed_out_htlcs = Vec::new(); + self.holding_cell_htlc_updates.retain(|htlc_update| { + match htlc_update { + &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => { + if *cltv_expiry <= height + HTLC_FAIL_BACK_BUFFER { + timed_out_htlcs.push((source.clone(), payment_hash.clone())); + false + } else { true } + }, + _ => true + } + }); let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); if header.bitcoin_hash() != self.last_block_connected { if self.funding_tx_confirmations > 0 { @@ -3243,19 +3263,19 @@ impl Channel { if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 { let next_per_commitment_secret = self.build_local_commitment_secret(self.cur_local_commitment_transaction_number); let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &next_per_commitment_secret); - return Ok(Some(msgs::FundingLocked { + return Ok((Some(msgs::FundingLocked { channel_id: self.channel_id, next_per_commitment_point: next_per_commitment_point, - })); + }), timed_out_htlcs)); } else { self.monitor_pending_funding_locked = true; - return Ok(None); + return Ok((None, timed_out_htlcs)); } } } } } - Ok(None) + Ok((None, timed_out_htlcs)) } /// Called by channelmanager based on chain blocks being disconnected. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0acc59df62e..cbcccb82f93 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -28,7 +28,7 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER}; use ln::features::{InitFeatures, NodeFeatures}; use ln::router::{Route, RouteHop}; use ln::msgs; @@ -76,6 +76,7 @@ enum PendingHTLCRouting { }, Receive { payment_data: Option, + incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed }, } @@ -129,6 +130,7 @@ struct ClaimableHTLC { /// payment_secret which prevents path-probing attacks and can associate different HTLCs which /// are part of the same payment. payment_data: Option, + cltv_expiry: u32, } /// Tracks the inbound corresponding to an outbound HTLC @@ -296,8 +298,6 @@ pub(super) struct ChannelHolder { /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the channels given here actually existing anymore by the time you /// go to read them! - /// TODO: We need to time out HTLCs sitting here which are waiting on other AMP HTLCs to - /// arrive. claimable_htlcs: HashMap<(PaymentHash, Option), Vec>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). @@ -1063,7 +1063,10 @@ impl ChannelMan // delay) once they've send us a commitment_signed! PendingHTLCStatus::Forward(PendingHTLCInfo { - routing: PendingHTLCRouting::Receive { payment_data }, + routing: PendingHTLCRouting::Receive { + payment_data, + incoming_cltv_expiry: msg.cltv_expiry, + }, payment_hash: msg.payment_hash.clone(), incoming_shared_secret: shared_secret, amt_to_forward: next_hop_data.amt_to_forward, @@ -1222,6 +1225,80 @@ impl ChannelMan }) } + // Only public for testing, this should otherwise never be called direcly + pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32) -> Result<(), APIError> { + log_trace!(self, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); + let (session_priv, prng_seed) = self.keys_manager.get_onion_rand(); + + let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) + .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?; + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height)?; + if onion_utils::route_size_insane(&onion_payloads) { + return Err(APIError::RouteError{err: "Route size too large considering onion data"}); + } + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); + + let _ = self.total_consistency_lock.read().unwrap(); + + let err: Result<(), _> = loop { + let mut channel_lock = self.channel_state.lock().unwrap(); + let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) { + None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), + Some(id) => id.clone(), + }; + + let channel_state = &mut *channel_lock; + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { + match { + if chan.get().get_their_node_id() != path.first().unwrap().pubkey { + return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); + } + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"}); + } + break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + path: path.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + }, onion_packet), channel_state, chan) + } { + Some((update_add, commitment_signed, monitor_update)) => { + if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { + maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); + // Note that MonitorUpdateFailed here indicates (per function docs) + // that we will resend the commitment update once monitor updating + // is restored. Therefore, we must return an error indicating that + // it is unsafe to retry the payment wholesale, which we do in the + // send_payment check for MonitorUpdateFailed, below. + return Err(APIError::MonitorUpdateFailed); + } + + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: path.first().unwrap().pubkey, + updates: msgs::CommitmentUpdate { + update_add_htlcs: vec![update_add], + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); + }, + None => {}, + } + } else { unreachable!(); } + return Ok(()); + }; + + match handle_error!(self, err, path.first().unwrap().pubkey) { + Ok(_) => unreachable!(), + Err(e) => { + Err(APIError::ChannelUnavailable { err: e.err }) + }, + } + } + /// Sends a payment along a given route. /// /// Value parameters are provided via the last hop in route, see documentation for RouteHop @@ -1294,89 +1371,8 @@ impl ChannelMan let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; let mut results = Vec::new(); - 'path_loop: for path in route.paths.iter() { - macro_rules! check_res_push { - ($res: expr) => { match $res { - Ok(r) => r, - Err(e) => { - results.push(Err(e)); - continue 'path_loop; - }, - } - } - } - - log_trace!(self, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); - let (session_priv, prng_seed) = self.keys_manager.get_onion_rand(); - - let onion_keys = check_res_push!(onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) - .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})); - let (onion_payloads, htlc_msat, htlc_cltv) = check_res_push!(onion_utils::build_onion_payloads(&path, total_value, payment_secret, cur_height)); - if onion_utils::route_size_insane(&onion_payloads) { - check_res_push!(Err(APIError::RouteError{err: "Route size too large considering onion data"})); - } - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, &payment_hash); - - let _ = self.total_consistency_lock.read().unwrap(); - - let err: Result<(), _> = loop { - let mut channel_lock = self.channel_state.lock().unwrap(); - let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) { - None => check_res_push!(Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"})), - Some(id) => id.clone(), - }; - - let channel_state = &mut *channel_lock; - if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { - match { - if chan.get().get_their_node_id() != path.first().unwrap().pubkey { - check_res_push!(Err(APIError::RouteError{err: "Node ID mismatch on first hop!"})); - } - if !chan.get().is_live() { - check_res_push!(Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"})); - } - break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - path: path.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - }, onion_packet), channel_state, chan) - } { - Some((update_add, commitment_signed, monitor_update)) => { - if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { - maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); - // Note that MonitorUpdateFailed here indicates (per function docs) - // that we will resend the commitment update once monitor updating - // is restored. Therefore, we must return an error indicating that - // it is unsafe to retry the payment wholesale, which we do in the - // next check for MonitorUpdateFailed, below. - check_res_push!(Err(APIError::MonitorUpdateFailed)); - } - - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: path.first().unwrap().pubkey, - updates: msgs::CommitmentUpdate { - update_add_htlcs: vec![update_add], - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - }, - }); - }, - None => {}, - } - } else { unreachable!(); } - results.push(Ok(())); - continue 'path_loop; - }; - - match handle_error!(self, err, path.first().unwrap().pubkey) { - Ok(_) => unreachable!(), - Err(e) => { - check_res_push!(Err(APIError::ChannelUnavailable { err: e.err })); - }, - } + for path in route.paths.iter() { + results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height)); } let mut has_ok = false; let mut has_err = false; @@ -1686,7 +1682,7 @@ impl ChannelMan for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { - routing: PendingHTLCRouting::Receive { payment_data }, + routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry }, incoming_shared_secret, payment_hash, amt_to_forward, .. }, } => { let prev_hop = HTLCPreviousHopData { short_channel_id: prev_short_channel_id, @@ -1703,6 +1699,7 @@ impl ChannelMan prev_hop, value: amt_to_forward, payment_data: payment_data.clone(), + cltv_expiry: incoming_cltv_expiry, }); if let &Some(ref data) = &payment_data { for htlc in htlcs.iter() { @@ -2958,29 +2955,39 @@ impl= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); + htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); + timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { + failure_code: 0x4000 | 15, + data: htlc_msat_height_data + })); + false + } else { true } + }); + !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. + }); } for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } + + for (source, payment_hash, reason) in timed_out_htlcs.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason); + } self.latest_block_height.store(height as usize, Ordering::Release); *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash; loop { @@ -3320,9 +3350,10 @@ impl Writeable for PendingHTLCInfo { onion_packet.write(writer)?; short_channel_id.write(writer)?; }, - &PendingHTLCRouting::Receive { ref payment_data } => { + &PendingHTLCRouting::Receive { ref payment_data, ref incoming_cltv_expiry } => { 1u8.write(writer)?; payment_data.write(writer)?; + incoming_cltv_expiry.write(writer)?; }, } self.incoming_shared_secret.write(writer)?; @@ -3343,6 +3374,7 @@ impl Readable for PendingHTLCInfo { }, 1u8 => PendingHTLCRouting::Receive { payment_data: Readable::read(reader)?, + incoming_cltv_expiry: Readable::read(reader)?, }, _ => return Err(DecodeError::InvalidValue), }, @@ -3415,7 +3447,8 @@ impl_writeable!(HTLCPreviousHopData, 0, { impl_writeable!(ClaimableHTLC, 0, { prev_hop, value, - payment_data + payment_data, + cltv_expiry }); impl Writeable for HTLCSource { diff --git a/lightning/src/ln/channelmonitor.rs b/lightning/src/ln/channelmonitor.rs index 7e961a129eb..c8e6b8260ad 100644 --- a/lightning/src/ln/channelmonitor.rs +++ b/lightning/src/ln/channelmonitor.rs @@ -383,6 +383,28 @@ pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3; /// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not /// keeping bumping another claim tx to solve the outpoint. pub(crate) const ANTI_REORG_DELAY: u32 = 6; +/// Number of blocks before confirmation at which we fail back an un-relayed HTLC or at which we +/// refuse to accept a new HTLC. +/// +/// This is used for a few separate purposes: +/// 1) if we've received an MPP HTLC to us and it expires within this many blocks and we are +/// waiting on additional parts (or waiting on the preimage for any HTLC from the user), we will +/// fail this HTLC, +/// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race +/// condition with the above), we will fail this HTLC without telling the user we received it, +/// 3) if we are waiting on a connection or a channel state update to send an HTLC to a peer, and +/// that HTLC expires within this many blocks, we will simply fail the HTLC instead. +/// +/// (1) is all about protecting us - we need enough time to update the channel state before we hit +/// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage. +/// +/// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately +/// in a race condition between the user connecting a block (which would fail it) and the user +/// providing us the preimage (which would claim it). +/// +/// (3) is about our counterparty - we don't want to relay an HTLC to a counterparty when they may +/// end up force-closing the channel on us to claim it. +pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS; #[derive(Clone, PartialEq)] struct LocalSignedTx { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 55f734a32af..91365d5a29b 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -717,7 +717,7 @@ macro_rules! get_payment_preimage_hash { } } -macro_rules! expect_pending_htlcs_forwardable { +macro_rules! expect_pending_htlcs_forwardable_ignore { ($node: expr) => {{ let events = $node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -725,6 +725,12 @@ macro_rules! expect_pending_htlcs_forwardable { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; + }} +} + +macro_rules! expect_pending_htlcs_forwardable { + ($node: expr) => {{ + expect_pending_htlcs_forwardable_ignore!($node); $node.node.process_pending_htlc_forwards(); }} } @@ -758,13 +764,19 @@ macro_rules! expect_payment_sent { } macro_rules! expect_payment_failed { - ($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr) => { + ($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr $(, $expected_error_code: expr, $expected_error_data: expr)*) => { let events = $node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentFailed { ref payment_hash, rejected_by_dest, .. } => { + Event::PaymentFailed { ref payment_hash, rejected_by_dest, ref error_code, ref error_data } => { assert_eq!(*payment_hash, $expected_payment_hash); assert_eq!(rejected_by_dest, $rejected_by_dest); + assert!(error_code.is_some()); + assert!(error_data.is_some()); + $( + assert_eq!(error_code.unwrap(), $expected_error_code); + assert_eq!(&error_data.as_ref().unwrap()[..], $expected_error_data); + )* }, _ => panic!("Unexpected event"), } @@ -774,49 +786,57 @@ macro_rules! expect_payment_failed { pub fn send_along_route_with_secret<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, route: Route, expected_paths: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option) { origin_node.node.send_payment(&route, our_payment_hash, &our_payment_secret).unwrap(); check_added_monitors!(origin_node, expected_paths.len()); + pass_along_route(origin_node, expected_paths, recv_value, our_payment_hash, our_payment_secret); +} - let mut events = origin_node.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), expected_paths.len()); - for (path_idx, (ev, expected_route)) in events.drain(..).zip(expected_paths.iter()).enumerate() { - let mut payment_event = SendEvent::from_event(ev); - let mut prev_node = origin_node; - - for (idx, &node) in expected_route.iter().enumerate() { - assert_eq!(node.node.get_our_node_id(), payment_event.node_id); - - node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(node, 0); - commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); - - expect_pending_htlcs_forwardable!(node); - - if idx == expected_route.len() - 1 { - let events_2 = node.node.get_and_clear_pending_events(); - // Once we've gotten through all the HTLCs, the last one should result in a - // PaymentReceived (but each previous one should not!). - if path_idx == expected_paths.len() - 1 { - assert_eq!(events_2.len(), 1); - match events_2[0] { - Event::PaymentReceived { ref payment_hash, ref payment_secret, amt } => { - assert_eq!(our_payment_hash, *payment_hash); - assert_eq!(our_payment_secret, *payment_secret); - assert_eq!(amt, recv_value); - }, - _ => panic!("Unexpected event"), - } - } else { - assert!(events_2.is_empty()); +pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option, ev: MessageSendEvent, payment_received_expected: bool) { + let mut payment_event = SendEvent::from_event(ev); + let mut prev_node = origin_node; + + for (idx, &node) in expected_path.iter().enumerate() { + assert_eq!(node.node.get_our_node_id(), payment_event.node_id); + + node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(node, 0); + commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); + + expect_pending_htlcs_forwardable!(node); + + if idx == expected_path.len() - 1 { + let events_2 = node.node.get_and_clear_pending_events(); + if payment_received_expected { + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::PaymentReceived { ref payment_hash, ref payment_secret, amt } => { + assert_eq!(our_payment_hash, *payment_hash); + assert_eq!(our_payment_secret, *payment_secret); + assert_eq!(amt, recv_value); + }, + _ => panic!("Unexpected event"), } } else { - let mut events_2 = node.node.get_and_clear_pending_msg_events(); - assert_eq!(events_2.len(), 1); - check_added_monitors!(node, 1); - payment_event = SendEvent::from_event(events_2.remove(0)); - assert_eq!(payment_event.msgs.len(), 1); + assert!(events_2.is_empty()); } - - prev_node = node; + } else { + let mut events_2 = node.node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + check_added_monitors!(node, 1); + payment_event = SendEvent::from_event(events_2.remove(0)); + assert_eq!(payment_event.msgs.len(), 1); } + + prev_node = node; + } +} + +pub fn pass_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option) { + let mut events = origin_node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), expected_route.len()); + for (path_idx, (ev, expected_path)) in events.drain(..).zip(expected_route.iter()).enumerate() { + // Once we've gotten through all the HTLCs, the last one should result in a + // PaymentReceived (but each previous one should not!), . + let expect_payment = path_idx == expected_route.len() - 1; + pass_along_path(origin_node, expected_path, recv_value, our_payment_hash.clone(), our_payment_secret, ev, expect_payment); } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 9d8a0bc2465..267d4bbdc06 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -17,7 +17,7 @@ use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use ln::msgs; use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction}; use util::enforcing_trait_impls::EnforcingChannelKeys; -use util::test_utils; +use util::{byte_utils, test_utils}; use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; use util::errors::APIError; use util::ser::{Writeable, Writer, ReadableArgs}; @@ -949,15 +949,7 @@ fn htlc_fail_async_shutdown() { nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } => { - assert_eq!(our_payment_hash, *payment_hash); - assert!(!rejected_by_dest); - }, - _ => panic!("Unexpected event"), - } + expect_payment_failed!(nodes[0], our_payment_hash, false); let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 2); @@ -1352,15 +1344,7 @@ fn holding_cell_htlc_counting() { _ => panic!("Unexpected event"), } - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => { - assert_eq!(payment_hash, payment_hash_2); - assert!(!rejected_by_dest); - }, - _ => panic!("Unexpected event"), - } + expect_payment_failed!(nodes[0], payment_hash_2, false); // Now forward all the pending HTLCs and claim them back nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]); @@ -2250,15 +2234,7 @@ fn claim_htlc_outputs_shared_tx() { nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); check_added_monitors!(nodes[1], 1); connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash()); - - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { payment_hash, .. } => { - assert_eq!(payment_hash, payment_hash_2); - }, - _ => panic!("Unexpected event"), - } + expect_payment_failed!(nodes[1], payment_hash_2, true); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 3); // ChannelMonitor: penalty tx, ChannelManager: local commitment + HTLC-timeout @@ -2320,16 +2296,10 @@ fn claim_htlc_outputs_single_tx() { check_added_monitors!(nodes[0], 1); nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200); check_added_monitors!(nodes[1], 1); - connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash()); + expect_pending_htlcs_forwardable_ignore!(nodes[0]); - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { payment_hash, .. } => { - assert_eq!(payment_hash, payment_hash_2); - }, - _ => panic!("Unexpected event"), - } + connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash()); + expect_payment_failed!(nodes[1], payment_hash_2, true); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 9); @@ -2683,7 +2653,7 @@ fn test_simple_commitment_revoked_fail_backward() { // Revoke the old state claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage, 3_000_000); - route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + let (_, payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); @@ -2712,12 +2682,7 @@ fn test_simple_commitment_revoked_fail_backward() { MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, _ => panic!("Unexpected event"), } - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { .. } => {}, - _ => panic!("Unexpected event"), - } + expect_payment_failed!(nodes[0], payment_hash, false); }, _ => panic!("Unexpected event"), } @@ -3653,6 +3618,143 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); } +fn do_test_htlc_timeout(send_partial_mpp: bool) { + // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them + // to avoid our counterparty failing the channel. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + + let our_payment_hash = if send_partial_mpp { + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap(); + let (_, our_payment_hash) = get_payment_preimage_hash!(&nodes[0]); + let payment_secret = PaymentSecret([0xdb; 32]); + // Use the utility function send_payment_along_path to send the payment with MPP data which + // indicates there are more HTLCs coming. + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, CHAN_CONFIRM_DEPTH).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + // Now do the relevant commitment_signed/RAA dances along the path, noting that the final + // hop should *not* yet generate any PaymentReceived event(s). + pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false); + our_payment_hash + } else { + route_payment(&nodes[0], &[&nodes[1]], 100000).1 + }; + + let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[0].block_notifier.block_connected_checked(&header, 101, &[], &[]); + nodes[1].block_notifier.block_connected_checked(&header, 101, &[], &[]); + for i in 102..TEST_FINAL_CLTV + 100 + 1 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS { + header.prev_blockhash = header.bitcoin_hash(); + nodes[0].block_notifier.block_connected_checked(&header, i, &[], &[]); + nodes[1].block_notifier.block_connected_checked(&header, i, &[], &[]); + } + + expect_pending_htlcs_forwardable!(nodes[1]); + + check_added_monitors!(nodes[1], 1); + let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(htlc_timeout_updates.update_add_htlcs.is_empty()); + assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1); + assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty()); + assert!(htlc_timeout_updates.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false); + // 100_000 msat as u64, followed by a height of 123 as u32 + let mut expected_failure_data = byte_utils::be64_to_array(100_000).to_vec(); + expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(123)); + expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]); +} + +#[test] +fn test_htlc_timeout() { + do_test_htlc_timeout(true); + do_test_htlc_timeout(false); +} + +fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { + // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported()); + + // Route a first payment to get the 1 -> 2 channel in awaiting_raa... + let route = nodes[1].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap(); + let (_, first_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[1].node.send_payment(&route, first_payment_hash, &None).unwrap(); + assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1); + check_added_monitors!(nodes[1], 1); + + // Now attempt to route a second payment, which should be placed in the holding cell + let (_, second_payment_hash) = get_payment_preimage_hash!(nodes[0]); + if forwarded_htlc { + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap(); + nodes[0].node.send_payment(&route, second_payment_hash, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 0); + } else { + nodes[1].node.send_payment(&route, second_payment_hash, &None).unwrap(); + check_added_monitors!(nodes[1], 0); + } + + let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].block_notifier.block_connected_checked(&header, 101, &[], &[]); + for i in 102..TEST_FINAL_CLTV + 100 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS { + header.prev_blockhash = header.bitcoin_hash(); + nodes[1].block_notifier.block_connected_checked(&header, i, &[], &[]); + } + + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + header.prev_blockhash = header.bitcoin_hash(); + nodes[1].block_notifier.block_connected_checked(&header, TEST_FINAL_CLTV + 100 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS, &[], &[]); + + if forwarded_htlc { + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(fail_commit.len(), 1); + match fail_commit[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => { + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true); + }, + _ => unreachable!(), + } + expect_payment_failed!(nodes[0], second_payment_hash, false); + if let &MessageSendEvent::PaymentFailureNetworkUpdate { ref update } = &nodes[0].node.get_and_clear_pending_msg_events()[0] { + match update { + &HTLCFailChannelUpdate::ChannelUpdateMessage { .. } => {}, + _ => panic!("Unexpected event"), + } + } else { + panic!("Unexpected event"); + } + } else { + expect_payment_failed!(nodes[1], second_payment_hash, true); + } +} + +#[test] +fn test_holding_cell_htlc_add_timeouts() { + do_test_holding_cell_htlc_add_timeouts(false); + do_test_holding_cell_htlc_add_timeouts(true); +} + #[test] fn test_invalid_channel_announcement() { //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs @@ -4276,14 +4378,7 @@ fn test_static_spendable_outputs_timeout_tx() { let header_1 = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[1].block_notifier.block_connected(&Block { header: header_1, txdata: vec![node_txn[0].clone()] }, 1); connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash()); - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { payment_hash, .. } => { - assert_eq!(payment_hash, our_payment_hash); - }, - _ => panic!("Unexpected event"), - } + expect_payment_failed!(nodes[1], our_payment_hash, true); let spend_txn = check_spendable_outputs!(nodes[1], 1); assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote (*2), timeout_tx.output (*1) @@ -4629,13 +4724,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { _ => { panic!("Unexpected event"); } } } - let events = nodes[0].node.get_and_clear_pending_events(); - match events[0] { - Event::PaymentFailed { ref payment_hash, .. } => { - assert_eq!(*payment_hash, duplicate_payment_hash); - } - _ => panic!("Unexpected event"), - } + expect_payment_failed!(nodes[0], duplicate_payment_hash, false); // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![htlc_success_txn[0].clone()] }, 200); @@ -4993,14 +5082,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[0].block_notifier.block_connected(&Block { header: header_201, txdata: vec![htlc_timeout.clone()] }, 201); connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 201, true, header_201.bitcoin_hash()); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { payment_hash, .. } => { - assert_eq!(payment_hash, our_payment_hash); - }, - _ => panic!("Unexpected event"), - } + expect_payment_failed!(nodes[0], our_payment_hash, true); // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor let spend_txn = check_spendable_outputs!(nodes[0], 1); @@ -5151,15 +5233,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no check_closed_broadcast!(nodes[0], false); check_added_monitors!(nodes[0], 1); } else { - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => { - assert_eq!(payment_hash, our_payment_hash); - assert!(rejected_by_dest); - }, - _ => panic!("Unexpected event"), - } + expect_payment_failed!(nodes[0], our_payment_hash, true); } } @@ -6522,14 +6596,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone()); let parent_hash = connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash()); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { payment_hash, .. } => { - assert_eq!(payment_hash, dust_hash); - }, - _ => panic!("Unexpected event"), - } + expect_payment_failed!(nodes[0], dust_hash, true); assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; @@ -6537,14 +6604,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { nodes[0].block_notifier.block_connected(&Block { header: header_2, txdata: vec![timeout_tx[0].clone()]}, 7); let header_3 = BlockHeader { version: 0x20000000, prev_blockhash: header_2.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 8, true, header_3.bitcoin_hash()); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { payment_hash, .. } => { - assert_eq!(payment_hash, non_dust_hash); - }, - _ => panic!("Unexpected event"), - } + expect_payment_failed!(nodes[0], non_dust_hash, true); } else { // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![bs_commitment_tx[0].clone()]}, 1); @@ -6555,28 +6615,14 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { let parent_hash = connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash()); let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; if !revoked { - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { payment_hash, .. } => { - assert_eq!(payment_hash, dust_hash); - }, - _ => panic!("Unexpected event"), - } + expect_payment_failed!(nodes[0], dust_hash, true); assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // We fail non-dust-HTLC 2 by broadcast of local timeout tx on remote commitment tx nodes[0].block_notifier.block_connected(&Block { header: header_2, txdata: vec![timeout_tx[0].clone()]}, 7); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); let header_3 = BlockHeader { version: 0x20000000, prev_blockhash: header_2.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 8, true, header_3.bitcoin_hash()); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { payment_hash, .. } => { - assert_eq!(payment_hash, non_dust_hash); - }, - _ => panic!("Unexpected event"), - } + expect_payment_failed!(nodes[0], non_dust_hash, true); } else { // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked // commitment tx @@ -6882,7 +6928,7 @@ fn test_check_htlc_underpaying() { // Create some initial channels create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); - let (payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000); + let (payment_preimage, payment_hash) = route_payment(&nodes[0], &[&nodes[1]], 10_000); // Node 3 is expecting payment of 100_000 but receive 10_000, // fail htlc like we didn't know the preimage. @@ -6907,25 +6953,10 @@ fn test_check_htlc_underpaying() { nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let &Event::PaymentFailed { payment_hash:_, ref rejected_by_dest, ref error_code, ref error_data } = &events[0] { - assert_eq!(*rejected_by_dest, true); - assert_eq!(error_code.unwrap(), 0x4000|15); - // 10_000 msat as u64, followed by a height of 99 as u32 - assert_eq!(&error_data.as_ref().unwrap()[..], &[ - ((10_000u64 >> 7*8) & 0xff) as u8, - ((10_000u64 >> 6*8) & 0xff) as u8, - ((10_000u64 >> 5*8) & 0xff) as u8, - ((10_000u64 >> 4*8) & 0xff) as u8, - ((10_000u64 >> 3*8) & 0xff) as u8, - ((10_000u64 >> 2*8) & 0xff) as u8, - ((10_000u64 >> 1*8) & 0xff) as u8, - ((10_000u64 >> 0*8) & 0xff) as u8, - 0, 0, 0, 99]); - } else { - panic!("Unexpected event"); - } + // 10_000 msat as u64, followed by a height of 99 as u32 + let mut expected_failure_data = byte_utils::be64_to_array(10_000).to_vec(); + expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(99)); + expect_payment_failed!(nodes[0], payment_hash, true, 0x4000|15, &expected_failure_data[..]); nodes[1].node.get_and_clear_pending_events(); } @@ -7140,6 +7171,8 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { // Broadcast set of revoked txn on A let header_128 = connect_blocks(&nodes[0].block_notifier, 128, 0, true, header.bitcoin_hash()); + expect_pending_htlcs_forwardable_ignore!(nodes[0]); + let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[0].block_notifier.block_connected(&Block { header: header_129, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()] }, 129); let first; @@ -7472,6 +7505,8 @@ fn test_bump_txn_sanitize_tracking_maps() { // Broadcast set of revoked txn on A let header_128 = connect_blocks(&nodes[0].block_notifier, 128, 0, false, Default::default()); + expect_pending_htlcs_forwardable_ignore!(nodes[0]); + let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[0].block_notifier.block_connected(&Block { header: header_129, txdata: vec![revoked_local_txn[0].clone()] }, 129); check_closed_broadcast!(nodes[0], false); diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index 8f34605675c..ca6355af001 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -55,6 +55,9 @@ pub enum Event { /// ChannelManager::fail_htlc_backwards to free up resources for this HTLC. /// The amount paid should be considered 'incorrect' when it is less than or more than twice /// the amount expected. + /// If you fail to call either ChannelManager::claim_funds or + /// ChannelManager::fail_htlc_backwards within the HTLC's timeout, the HTLC will be + /// automatically failed. PaymentReceived { /// The hash for which the preimage should be handed to the ChannelManager. payment_hash: PaymentHash, diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 4fcaf803e7e..905a53e6648 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -51,6 +51,9 @@ pub struct TestChannelMonitor<'a> { pub latest_monitor_update_id: Mutex>, pub simple_monitor: channelmonitor::SimpleManyChannelMonitor, pub update_ret: Mutex>, + // If this is set to Some(), after the next return, we'll always return this until update_ret + // is changed: + pub next_update_ret: Mutex>>, } impl<'a> TestChannelMonitor<'a> { pub fn new(chain_monitor: Arc, broadcaster: &'a chaininterface::BroadcasterInterface, logger: Arc, fee_estimator: &'a TestFeeEstimator) -> Self { @@ -59,6 +62,7 @@ impl<'a> TestChannelMonitor<'a> { latest_monitor_update_id: Mutex::new(HashMap::new()), simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, fee_estimator), update_ret: Mutex::new(Ok(())), + next_update_ret: Mutex::new(None), } } } @@ -74,7 +78,12 @@ impl<'a> channelmonitor::ManyChannelMonitor for TestChanne self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id())); self.added_monitors.lock().unwrap().push((funding_txo, monitor)); assert!(self.simple_monitor.add_monitor(funding_txo, new_monitor).is_ok()); - self.update_ret.lock().unwrap().clone() + + let ret = self.update_ret.lock().unwrap().clone(); + if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() { + *self.update_ret.lock().unwrap() = next_ret; + } + ret } fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { @@ -96,7 +105,12 @@ impl<'a> channelmonitor::ManyChannelMonitor for TestChanne &mut ::std::io::Cursor::new(&w.0), Arc::new(TestLogger::new())).unwrap().1; assert!(new_monitor == *monitor); self.added_monitors.lock().unwrap().push((funding_txo, new_monitor)); - self.update_ret.lock().unwrap().clone() + + let ret = self.update_ret.lock().unwrap().clone(); + if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() { + *self.update_ret.lock().unwrap() = next_ret; + } + ret } fn get_and_clear_pending_htlcs_updated(&self) -> Vec {