forked from lightningdevkit/rust-lightning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathchannelmonitor.rs
2774 lines (2544 loc) · 126 KB
/
channelmonitor.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
//! The logic to monitor for on-chain transactions and create the relevant claim responses lives
//! here.
//!
//! ChannelMonitor objects are generated by ChannelManager in response to relevant
//! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can
//! be made in responding to certain messages, see ManyChannelMonitor for more.
//!
//! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the
//! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date
//! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other
//! security-domain-separated system design, you should consider having multiple paths for
//! ChannelMonitors to get out of the HSM and onto monitoring devices.
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::{TxOut,Transaction};
use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
use bitcoin::blockdata::script::{Script, Builder};
use bitcoin::blockdata::opcodes;
use bitcoin::consensus::encode;
use bitcoin::util::hash::BitcoinHash;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
use bitcoin::secp256k1::{Secp256k1,Signature};
use bitcoin::secp256k1::key::{SecretKey,PublicKey};
use bitcoin::secp256k1;
use ln::msgs::DecodeError;
use ln::chan_utils;
use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, LocalCommitmentTransaction, HTLCType};
use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash};
use ln::onchaintx::{OnchainTxHandler, InputDescriptors};
use chain::chaininterface::{ChainListener, ChainWatchInterface, BroadcasterInterface, FeeEstimator};
use chain::transaction::OutPoint;
use chain::keysinterface::{SpendableOutputDescriptor, ChannelKeys};
use util::logger::Logger;
use util::ser::{Readable, MaybeReadable, Writer, Writeable, U48};
use util::{byte_utils, events};
use std::collections::{HashMap, hash_map};
use std::sync::Mutex;
use std::{hash,cmp, mem};
use std::ops::Deref;
/// An update generated by the underlying Channel itself which contains some new information the
/// ChannelMonitor should be made aware of.
#[cfg_attr(test, derive(PartialEq))]
#[derive(Clone)]
#[must_use]
pub struct ChannelMonitorUpdate {
pub(super) updates: Vec<ChannelMonitorUpdateStep>,
/// The sequence number of this update. Updates *must* be replayed in-order according to this
/// sequence number (and updates may panic if they are not). The update_id values are strictly
/// increasing and increase by one for each new update.
///
/// This sequence number is also used to track up to which points updates which returned
/// ChannelMonitorUpdateErr::TemporaryFailure have been applied to all copies of a given
/// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
pub update_id: u64,
}
impl Writeable for ChannelMonitorUpdate {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
self.update_id.write(w)?;
(self.updates.len() as u64).write(w)?;
for update_step in self.updates.iter() {
update_step.write(w)?;
}
Ok(())
}
}
impl Readable for ChannelMonitorUpdate {
fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
let update_id: u64 = Readable::read(r)?;
let len: u64 = Readable::read(r)?;
let mut updates = Vec::with_capacity(cmp::min(len as usize, MAX_ALLOC_SIZE / ::std::mem::size_of::<ChannelMonitorUpdateStep>()));
for _ in 0..len {
updates.push(Readable::read(r)?);
}
Ok(Self { update_id, updates })
}
}
/// An error enum representing a failure to persist a channel monitor update.
#[derive(Clone)]
pub enum ChannelMonitorUpdateErr {
/// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
/// our state failed, but is expected to succeed at some point in the future).
///
/// Such a failure will "freeze" a channel, preventing us from revoking old states or
/// submitting new commitment transactions to the remote party. Once the update(s) which failed
/// have been successfully applied, ChannelManager::channel_monitor_updated can be used to
/// restore the channel to an operational state.
///
/// Note that a given ChannelManager will *never* re-generate a given ChannelMonitorUpdate. If
/// you return a TemporaryFailure you must ensure that it is written to disk safely before
/// writing out the latest ChannelManager state.
///
/// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur
/// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting
/// to claim it on this channel) and those updates must be applied wherever they can be. At
/// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should
/// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to
/// the channel which would invalidate previous ChannelMonitors are not made when a channel has
/// been "frozen".
///
/// Note that even if updates made after TemporaryFailure succeed you must still call
/// channel_monitor_updated to ensure you have the latest monitor and re-enable normal channel
/// operation.
///
/// Note that the update being processed here will not be replayed for you when you call
/// ChannelManager::channel_monitor_updated, so you must store the update itself along
/// with the persisted ChannelMonitor on your own local disk prior to returning a
/// TemporaryFailure. You may, of course, employ a journaling approach, storing only the
/// ChannelMonitorUpdate on disk without updating the monitor itself, replaying the journal at
/// reload-time.
///
/// For deployments where a copy of ChannelMonitors and other local state are backed up in a
/// remote location (with local copies persisted immediately), it is anticipated that all
/// updates will return TemporaryFailure until the remote copies could be updated.
TemporaryFailure,
/// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
/// different watchtower and cannot update with all watchtowers that were previously informed
/// of this channel). This will force-close the channel in question (which will generate one
/// final ChannelMonitorUpdate which must be delivered to at least one ChannelMonitor copy).
///
/// Should also be used to indicate a failure to update the local persisted copy of the channel
/// monitor.
PermanentFailure,
}
/// General Err type for ChannelMonitor actions. Generally, this implies that the data provided is
/// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::update_monitor this
/// means you tried to update a monitor for a different channel or the ChannelMonitorUpdate was
/// corrupted.
/// Contains a human-readable error message.
#[derive(Debug)]
pub struct MonitorUpdateError(pub &'static str);
/// Simple structure send back by ManyChannelMonitor in case of HTLC detected onchain from a
/// forward channel and from which info are needed to update HTLC in a backward channel.
#[derive(Clone, PartialEq)]
pub struct HTLCUpdate {
pub(super) payment_hash: PaymentHash,
pub(super) payment_preimage: Option<PaymentPreimage>,
pub(super) source: HTLCSource
}
impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source });
/// Simple trait indicating ability to track a set of ChannelMonitors and multiplex events between
/// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing
/// events to it, while also taking any add/update_monitor events and passing them to some remote
/// server(s).
///
/// In general, you must always have at least one local copy in memory, which must never fail to
/// update (as it is responsible for broadcasting the latest state in case the channel is closed),
/// and then persist it to various on-disk locations. If, for some reason, the in-memory copy fails
/// to update (eg out-of-memory or some other condition), you must immediately shut down without
/// taking any further action such as writing the current state to disk. This should likely be
/// accomplished via panic!() or abort().
///
/// Note that any updates to a channel's monitor *must* be applied to each instance of the
/// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If
/// an update occurs and a remote watchtower is left with old state, it may broadcast transactions
/// which we have revoked, allowing our counterparty to claim all funds in the channel!
///
/// User needs to notify implementors of ManyChannelMonitor when a new block is connected or
/// disconnected using their `block_connected` and `block_disconnected` methods. However, rather
/// than calling these methods directly, the user should register implementors as listeners to the
/// BlockNotifier and call the BlockNotifier's `block_(dis)connected` methods, which will notify
/// all registered listeners in one go.
pub trait ManyChannelMonitor<ChanSigner: ChannelKeys>: Send + Sync {
/// Adds a monitor for the given `funding_txo`.
///
/// Implementer must also ensure that the funding_txo txid *and* outpoint are registered with
/// any relevant ChainWatchInterfaces such that the provided monitor receives block_connected
/// callbacks with the funding transaction, or any spends of it.
///
/// Further, the implementer must also ensure that each output returned in
/// monitor.get_outputs_to_watch() is registered to ensure that the provided monitor learns about
/// any spends of any of the outputs.
///
/// Any spends of outputs which should have been registered which aren't passed to
/// ChannelMonitors via block_connected may result in FUNDS LOSS.
fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr>;
/// Updates a monitor for the given `funding_txo`.
///
/// Implementer must also ensure that the funding_txo txid *and* outpoint are registered with
/// any relevant ChainWatchInterfaces such that the provided monitor receives block_connected
/// callbacks with the funding transaction, or any spends of it.
///
/// Further, the implementer must also ensure that each output returned in
/// monitor.get_watch_outputs() is registered to ensure that the provided monitor learns about
/// any spends of any of the outputs.
///
/// Any spends of outputs which should have been registered which aren't passed to
/// ChannelMonitors via block_connected may result in FUNDS LOSS.
fn update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>;
/// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated
/// with success or failure.
///
/// You should probably just call through to
/// ChannelMonitor::get_and_clear_pending_htlcs_updated() for each ChannelMonitor and return
/// the full list.
fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate>;
}
/// A simple implementation of a ManyChannelMonitor and ChainListener. Can be used to create a
/// watchtower or watch our own channels.
///
/// Note that you must provide your own key by which to refer to channels.
///
/// If you're accepting remote monitors (ie are implementing a watchtower), you must verify that
/// users cannot overwrite a given channel by providing a duplicate key. ie you should probably
/// index by a PublicKey which is required to sign any updates.
///
/// If you're using this for local monitoring of your own channels, you probably want to use
/// `OutPoint` as the key, which will give you a ManyChannelMonitor implementation.
pub struct SimpleManyChannelMonitor<Key, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref, C: Deref>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
C::Target: ChainWatchInterface,
{
#[cfg(test)] // Used in ChannelManager tests to manipulate channels directly
pub monitors: Mutex<HashMap<Key, ChannelMonitor<ChanSigner>>>,
#[cfg(not(test))]
monitors: Mutex<HashMap<Key, ChannelMonitor<ChanSigner>>>,
chain_monitor: C,
broadcaster: T,
logger: L,
fee_estimator: F
}
impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send, C: Deref + Sync + Send>
ChainListener for SimpleManyChannelMonitor<Key, ChanSigner, T, F, L, C>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
C::Target: ChainWatchInterface,
{
fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], _indexes_of_txn_matched: &[u32]) {
let block_hash = header.bitcoin_hash();
{
let mut monitors = self.monitors.lock().unwrap();
for monitor in monitors.values_mut() {
let txn_outputs = monitor.block_connected(txn_matched, height, &block_hash, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
for (ref txid, ref outputs) in txn_outputs {
for (idx, output) in outputs.iter().enumerate() {
self.chain_monitor.install_watch_outpoint((txid.clone(), idx as u32), &output.script_pubkey);
}
}
}
}
}
fn block_disconnected(&self, header: &BlockHeader, disconnected_height: u32) {
let block_hash = header.bitcoin_hash();
let mut monitors = self.monitors.lock().unwrap();
for monitor in monitors.values_mut() {
monitor.block_disconnected(disconnected_height, &block_hash, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
}
}
}
impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref, C: Deref> SimpleManyChannelMonitor<Key, ChanSigner, T, F, L, C>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
C::Target: ChainWatchInterface,
{
/// Creates a new object which can be used to monitor several channels given the chain
/// interface with which to register to receive notifications.
pub fn new(chain_monitor: C, broadcaster: T, logger: L, feeest: F) -> SimpleManyChannelMonitor<Key, ChanSigner, T, F, L, C> {
let res = SimpleManyChannelMonitor {
monitors: Mutex::new(HashMap::new()),
chain_monitor,
broadcaster,
logger,
fee_estimator: feeest,
};
res
}
/// Adds or updates the monitor which monitors the channel referred to by the given key.
pub fn add_monitor_by_key(&self, key: Key, monitor: ChannelMonitor<ChanSigner>) -> Result<(), MonitorUpdateError> {
let mut monitors = self.monitors.lock().unwrap();
let entry = match monitors.entry(key) {
hash_map::Entry::Occupied(_) => return Err(MonitorUpdateError("Channel monitor for given key is already present")),
hash_map::Entry::Vacant(e) => e,
};
log_trace!(self.logger, "Got new Channel Monitor for channel {}", log_bytes!(monitor.funding_info.0.to_channel_id()[..]));
self.chain_monitor.install_watch_tx(&monitor.funding_info.0.txid, &monitor.funding_info.1);
self.chain_monitor.install_watch_outpoint((monitor.funding_info.0.txid, monitor.funding_info.0.index as u32), &monitor.funding_info.1);
for (txid, outputs) in monitor.get_outputs_to_watch().iter() {
for (idx, script) in outputs.iter().enumerate() {
self.chain_monitor.install_watch_outpoint((*txid, idx as u32), script);
}
}
entry.insert(monitor);
Ok(())
}
/// Updates the monitor which monitors the channel referred to by the given key.
pub fn update_monitor_by_key(&self, key: Key, update: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
let mut monitors = self.monitors.lock().unwrap();
match monitors.get_mut(&key) {
Some(orig_monitor) => {
log_trace!(self.logger, "Updating Channel Monitor for channel {}", log_funding_info!(orig_monitor));
orig_monitor.update_monitor(update, &self.broadcaster, &self.logger)
},
None => Err(MonitorUpdateError("No such monitor registered"))
}
}
}
impl<ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send, C: Deref + Sync + Send> ManyChannelMonitor<ChanSigner> for SimpleManyChannelMonitor<OutPoint, ChanSigner, T, F, L, C>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
C::Target: ChainWatchInterface,
{
fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr> {
match self.add_monitor_by_key(funding_txo, monitor) {
Ok(_) => Ok(()),
Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
}
}
fn update_monitor(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> {
match self.update_monitor_by_key(funding_txo, update) {
Ok(_) => Ok(()),
Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
}
}
fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
let mut pending_htlcs_updated = Vec::new();
for chan in self.monitors.lock().unwrap().values_mut() {
pending_htlcs_updated.append(&mut chan.get_and_clear_pending_htlcs_updated());
}
pending_htlcs_updated
}
}
impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref, C: Deref> events::EventsProvider for SimpleManyChannelMonitor<Key, ChanSigner, T, F, L, C>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
C::Target: ChainWatchInterface,
{
fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
let mut pending_events = Vec::new();
for chan in self.monitors.lock().unwrap().values_mut() {
pending_events.append(&mut chan.get_and_clear_pending_events());
}
pending_events
}
}
/// If an HTLC expires within this many blocks, don't try to claim it in a shared transaction,
/// instead claiming it in its own individual transaction.
pub(crate) const CLTV_SHARED_CLAIM_BUFFER: u32 = 12;
/// If an HTLC expires within this many blocks, force-close the channel to broadcast the
/// HTLC-Success transaction.
/// In other words, this is an upper bound on how many blocks we think it can take us to get a
/// transaction confirmed (and we use it in a few more, equivalent, places).
pub(crate) const CLTV_CLAIM_BUFFER: u32 = 6;
/// Number of blocks by which point we expect our counterparty to have seen new blocks on the
/// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our
/// copies of ChannelMonitors, including watchtowers). We could enforce the contract by failing
/// at CLTV expiration height but giving a grace period to our peer may be profitable for us if he
/// can provide an over-late preimage. Nevertheless, grace period has to be accounted in our
/// CLTV_EXPIRY_DELTA to be secure. Following this policy we may decrease the rate of channel failures
/// due to expiration but increase the cost of funds being locked longuer in case of failure.
/// This delay also cover a low-power peer being slow to process blocks and so being behind us on
/// accurate block height.
/// In case of onchain failure to be pass backward we may see the last block of ANTI_REORG_DELAY
/// with at worst this delay, so we are not only using this value as a mercy for them but also
/// us as a safeguard to delay with enough time.
pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3;
/// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding inbound
/// HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us losing money.
/// We use also this delay to be sure we can remove our in-flight claim txn from bump candidates buffer.
/// It may cause spurrious generation of bumped claim txn but that's allright given the outpoint is already
/// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not
/// keeping bumping another claim tx to solve the outpoint.
pub(crate) const ANTI_REORG_DELAY: u32 = 6;
/// Number of blocks before confirmation at which we fail back an un-relayed HTLC or at which we
/// refuse to accept a new HTLC.
///
/// This is used for a few separate purposes:
/// 1) if we've received an MPP HTLC to us and it expires within this many blocks and we are
/// waiting on additional parts (or waiting on the preimage for any HTLC from the user), we will
/// fail this HTLC,
/// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race
/// condition with the above), we will fail this HTLC without telling the user we received it,
/// 3) if we are waiting on a connection or a channel state update to send an HTLC to a peer, and
/// that HTLC expires within this many blocks, we will simply fail the HTLC instead.
///
/// (1) is all about protecting us - we need enough time to update the channel state before we hit
/// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage.
///
/// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately
/// in a race condition between the user connecting a block (which would fail it) and the user
/// providing us the preimage (which would claim it).
///
/// (3) is about our counterparty - we don't want to relay an HTLC to a counterparty when they may
/// end up force-closing the channel on us to claim it.
pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS;
#[derive(Clone, PartialEq)]
struct LocalSignedTx {
/// txid of the transaction in tx, just used to make comparison faster
txid: Txid,
revocation_key: PublicKey,
a_htlc_key: PublicKey,
b_htlc_key: PublicKey,
delayed_payment_key: PublicKey,
per_commitment_point: PublicKey,
feerate_per_kw: u64,
htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
}
/// We use this to track remote commitment transactions and htlcs outputs and
/// use it to generate any justice or 2nd-stage preimage/timeout transactions.
#[derive(PartialEq)]
struct RemoteCommitmentTransaction {
remote_delayed_payment_base_key: PublicKey,
remote_htlc_base_key: PublicKey,
on_remote_tx_csv: u16,
per_htlc: HashMap<Txid, Vec<HTLCOutputInCommitment>>
}
impl Writeable for RemoteCommitmentTransaction {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
self.remote_delayed_payment_base_key.write(w)?;
self.remote_htlc_base_key.write(w)?;
w.write_all(&byte_utils::be16_to_array(self.on_remote_tx_csv))?;
w.write_all(&byte_utils::be64_to_array(self.per_htlc.len() as u64))?;
for (ref txid, ref htlcs) in self.per_htlc.iter() {
w.write_all(&txid[..])?;
w.write_all(&byte_utils::be64_to_array(htlcs.len() as u64))?;
for &ref htlc in htlcs.iter() {
htlc.write(w)?;
}
}
Ok(())
}
}
impl Readable for RemoteCommitmentTransaction {
fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
let remote_commitment_transaction = {
let remote_delayed_payment_base_key = Readable::read(r)?;
let remote_htlc_base_key = Readable::read(r)?;
let on_remote_tx_csv: u16 = Readable::read(r)?;
let per_htlc_len: u64 = Readable::read(r)?;
let mut per_htlc = HashMap::with_capacity(cmp::min(per_htlc_len as usize, MAX_ALLOC_SIZE / 64));
for _ in 0..per_htlc_len {
let txid: Txid = Readable::read(r)?;
let htlcs_count: u64 = Readable::read(r)?;
let mut htlcs = Vec::with_capacity(cmp::min(htlcs_count as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..htlcs_count {
let htlc = Readable::read(r)?;
htlcs.push(htlc);
}
if let Some(_) = per_htlc.insert(txid, htlcs) {
return Err(DecodeError::InvalidValue);
}
}
RemoteCommitmentTransaction {
remote_delayed_payment_base_key,
remote_htlc_base_key,
on_remote_tx_csv,
per_htlc,
}
};
Ok(remote_commitment_transaction)
}
}
/// When ChannelMonitor discovers an onchain outpoint being a step of a channel and that it needs
/// to generate a tx to push channel state forward, we cache outpoint-solving tx material to build
/// a new bumped one in case of lenghty confirmation delay
#[derive(Clone, PartialEq)]
pub(crate) enum InputMaterial {
Revoked {
per_commitment_point: PublicKey,
remote_delayed_payment_base_key: PublicKey,
remote_htlc_base_key: PublicKey,
per_commitment_key: SecretKey,
input_descriptor: InputDescriptors,
amount: u64,
htlc: Option<HTLCOutputInCommitment>,
on_remote_tx_csv: u16,
},
RemoteHTLC {
per_commitment_point: PublicKey,
remote_delayed_payment_base_key: PublicKey,
remote_htlc_base_key: PublicKey,
preimage: Option<PaymentPreimage>,
htlc: HTLCOutputInCommitment
},
LocalHTLC {
preimage: Option<PaymentPreimage>,
amount: u64,
},
Funding {
funding_redeemscript: Script,
}
}
impl Writeable for InputMaterial {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
match self {
&InputMaterial::Revoked { ref per_commitment_point, ref remote_delayed_payment_base_key, ref remote_htlc_base_key, ref per_commitment_key, ref input_descriptor, ref amount, ref htlc, ref on_remote_tx_csv} => {
writer.write_all(&[0; 1])?;
per_commitment_point.write(writer)?;
remote_delayed_payment_base_key.write(writer)?;
remote_htlc_base_key.write(writer)?;
writer.write_all(&per_commitment_key[..])?;
input_descriptor.write(writer)?;
writer.write_all(&byte_utils::be64_to_array(*amount))?;
htlc.write(writer)?;
on_remote_tx_csv.write(writer)?;
},
&InputMaterial::RemoteHTLC { ref per_commitment_point, ref remote_delayed_payment_base_key, ref remote_htlc_base_key, ref preimage, ref htlc} => {
writer.write_all(&[1; 1])?;
per_commitment_point.write(writer)?;
remote_delayed_payment_base_key.write(writer)?;
remote_htlc_base_key.write(writer)?;
preimage.write(writer)?;
htlc.write(writer)?;
},
&InputMaterial::LocalHTLC { ref preimage, ref amount } => {
writer.write_all(&[2; 1])?;
preimage.write(writer)?;
writer.write_all(&byte_utils::be64_to_array(*amount))?;
},
&InputMaterial::Funding { ref funding_redeemscript } => {
writer.write_all(&[3; 1])?;
funding_redeemscript.write(writer)?;
}
}
Ok(())
}
}
impl Readable for InputMaterial {
fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
let input_material = match <u8 as Readable>::read(reader)? {
0 => {
let per_commitment_point = Readable::read(reader)?;
let remote_delayed_payment_base_key = Readable::read(reader)?;
let remote_htlc_base_key = Readable::read(reader)?;
let per_commitment_key = Readable::read(reader)?;
let input_descriptor = Readable::read(reader)?;
let amount = Readable::read(reader)?;
let htlc = Readable::read(reader)?;
let on_remote_tx_csv = Readable::read(reader)?;
InputMaterial::Revoked {
per_commitment_point,
remote_delayed_payment_base_key,
remote_htlc_base_key,
per_commitment_key,
input_descriptor,
amount,
htlc,
on_remote_tx_csv
}
},
1 => {
let per_commitment_point = Readable::read(reader)?;
let remote_delayed_payment_base_key = Readable::read(reader)?;
let remote_htlc_base_key = Readable::read(reader)?;
let preimage = Readable::read(reader)?;
let htlc = Readable::read(reader)?;
InputMaterial::RemoteHTLC {
per_commitment_point,
remote_delayed_payment_base_key,
remote_htlc_base_key,
preimage,
htlc
}
},
2 => {
let preimage = Readable::read(reader)?;
let amount = Readable::read(reader)?;
InputMaterial::LocalHTLC {
preimage,
amount,
}
},
3 => {
InputMaterial::Funding {
funding_redeemscript: Readable::read(reader)?,
}
}
_ => return Err(DecodeError::InvalidValue),
};
Ok(input_material)
}
}
/// ClaimRequest is a descriptor structure to communicate between detection
/// and reaction module. They are generated by ChannelMonitor while parsing
/// onchain txn leaked from a channel and handed over to OnchainTxHandler which
/// is responsible for opportunistic aggregation, selecting and enforcing
/// bumping logic, building and signing transactions.
pub(crate) struct ClaimRequest {
// Block height before which claiming is exclusive to one party,
// after reaching it, claiming may be contentious.
pub(crate) absolute_timelock: u32,
// Timeout tx must have nLocktime set which means aggregating multiple
// ones must take the higher nLocktime among them to satisfy all of them.
// Sadly it has few pitfalls, a) it takes longuer to get fund back b) CLTV_DELTA
// of a sooner-HTLC could be swallowed by the highest nLocktime of the HTLC set.
// Do simplify we mark them as non-aggregable.
pub(crate) aggregable: bool,
// Basic bitcoin outpoint (txid, vout)
pub(crate) outpoint: BitcoinOutPoint,
// Following outpoint type, set of data needed to generate transaction digest
// and satisfy witness program.
pub(crate) witness_data: InputMaterial
}
/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
/// once they mature to enough confirmations (ANTI_REORG_DELAY)
#[derive(Clone, PartialEq)]
enum OnchainEvent {
/// HTLC output getting solved by a timeout, at maturation we pass upstream payment source information to solve
/// inbound HTLC in backward channel. Note, in case of preimage, we pass info to upstream without delay as we can
/// only win from it, so it's never an OnchainEvent
HTLCUpdate {
htlc_update: (HTLCSource, PaymentHash),
},
MaturingOutput {
descriptor: SpendableOutputDescriptor,
},
}
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
#[cfg_attr(test, derive(PartialEq))]
#[derive(Clone)]
pub(super) enum ChannelMonitorUpdateStep {
LatestLocalCommitmentTXInfo {
commitment_tx: LocalCommitmentTransaction,
htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
},
LatestRemoteCommitmentTXInfo {
unsigned_commitment_tx: Transaction, // TODO: We should actually only need the txid here
htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
commitment_number: u64,
their_revocation_point: PublicKey,
},
PaymentPreimage {
payment_preimage: PaymentPreimage,
},
CommitmentSecret {
idx: u64,
secret: [u8; 32],
},
/// Used to indicate that the no future updates will occur, and likely that the latest local
/// commitment transaction(s) should be broadcast, as the channel has been force-closed.
ChannelForceClosed {
/// If set to false, we shouldn't broadcast the latest local commitment transaction as we
/// think we've fallen behind!
should_broadcast: bool,
},
}
impl Writeable for ChannelMonitorUpdateStep {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
match self {
&ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo { ref commitment_tx, ref htlc_outputs } => {
0u8.write(w)?;
commitment_tx.write(w)?;
(htlc_outputs.len() as u64).write(w)?;
for &(ref output, ref signature, ref source) in htlc_outputs.iter() {
output.write(w)?;
signature.write(w)?;
source.write(w)?;
}
}
&ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo { ref unsigned_commitment_tx, ref htlc_outputs, ref commitment_number, ref their_revocation_point } => {
1u8.write(w)?;
unsigned_commitment_tx.write(w)?;
commitment_number.write(w)?;
their_revocation_point.write(w)?;
(htlc_outputs.len() as u64).write(w)?;
for &(ref output, ref source) in htlc_outputs.iter() {
output.write(w)?;
source.as_ref().map(|b| b.as_ref()).write(w)?;
}
},
&ChannelMonitorUpdateStep::PaymentPreimage { ref payment_preimage } => {
2u8.write(w)?;
payment_preimage.write(w)?;
},
&ChannelMonitorUpdateStep::CommitmentSecret { ref idx, ref secret } => {
3u8.write(w)?;
idx.write(w)?;
secret.write(w)?;
},
&ChannelMonitorUpdateStep::ChannelForceClosed { ref should_broadcast } => {
4u8.write(w)?;
should_broadcast.write(w)?;
},
}
Ok(())
}
}
impl Readable for ChannelMonitorUpdateStep {
fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
match Readable::read(r)? {
0u8 => {
Ok(ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo {
commitment_tx: Readable::read(r)?,
htlc_outputs: {
let len: u64 = Readable::read(r)?;
let mut res = Vec::new();
for _ in 0..len {
res.push((Readable::read(r)?, Readable::read(r)?, Readable::read(r)?));
}
res
},
})
},
1u8 => {
Ok(ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo {
unsigned_commitment_tx: Readable::read(r)?,
commitment_number: Readable::read(r)?,
their_revocation_point: Readable::read(r)?,
htlc_outputs: {
let len: u64 = Readable::read(r)?;
let mut res = Vec::new();
for _ in 0..len {
res.push((Readable::read(r)?, <Option<HTLCSource> as Readable>::read(r)?.map(|o| Box::new(o))));
}
res
},
})
},
2u8 => {
Ok(ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage: Readable::read(r)?,
})
},
3u8 => {
Ok(ChannelMonitorUpdateStep::CommitmentSecret {
idx: Readable::read(r)?,
secret: Readable::read(r)?,
})
},
4u8 => {
Ok(ChannelMonitorUpdateStep::ChannelForceClosed {
should_broadcast: Readable::read(r)?
})
},
_ => Err(DecodeError::InvalidValue),
}
}
}
/// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates
/// on-chain transactions to ensure no loss of funds occurs.
///
/// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
/// information and are actively monitoring the chain.
///
/// Pending Events or updated HTLCs which have not yet been read out by
/// get_and_clear_pending_htlcs_updated or get_and_clear_pending_events are serialized to disk and
/// reloaded at deserialize-time. Thus, you must ensure that, when handling events, all events
/// gotten are fully handled before re-serializing the new state.
pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
latest_update_id: u64,
commitment_transaction_number_obscure_factor: u64,
destination_script: Script,
broadcasted_local_revokable_script: Option<(Script, PublicKey, PublicKey)>,
remote_payment_script: Script,
shutdown_script: Script,
keys: ChanSigner,
funding_info: (OutPoint, Script),
current_remote_commitment_txid: Option<Txid>,
prev_remote_commitment_txid: Option<Txid>,
remote_tx_cache: RemoteCommitmentTransaction,
funding_redeemscript: Script,
channel_value_satoshis: u64,
// first is the idx of the first of the two revocation points
their_cur_revocation_points: Option<(u64, PublicKey, Option<PublicKey>)>,
on_local_tx_csv: u16,
commitment_secrets: CounterpartyCommitmentSecrets,
remote_claimable_outpoints: HashMap<Txid, Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>>,
/// We cannot identify HTLC-Success or HTLC-Timeout transactions by themselves on the chain.
/// Nor can we figure out their commitment numbers without the commitment transaction they are
/// spending. Thus, in order to claim them via revocation key, we track all the remote
/// commitment transactions which we find on-chain, mapping them to the commitment number which
/// can be used to derive the revocation key and claim the transactions.
remote_commitment_txn_on_chain: HashMap<Txid, (u64, Vec<Script>)>,
/// Cache used to make pruning of payment_preimages faster.
/// Maps payment_hash values to commitment numbers for remote transactions for non-revoked
/// remote transactions (ie should remain pretty small).
/// Serialized to disk but should generally not be sent to Watchtowers.
remote_hash_commitment_number: HashMap<PaymentHash, u64>,
// We store two local commitment transactions to avoid any race conditions where we may update
// some monitors (potentially on watchtowers) but then fail to update others, resulting in the
// various monitors for one channel being out of sync, and us broadcasting a local
// transaction for which we have deleted claim information on some watchtowers.
prev_local_signed_commitment_tx: Option<LocalSignedTx>,
current_local_commitment_tx: LocalSignedTx,
// Used just for ChannelManager to make sure it has the latest channel data during
// deserialization
current_remote_commitment_number: u64,
// Used just for ChannelManager to make sure it has the latest channel data during
// deserialization
current_local_commitment_number: u64,
payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
pending_htlcs_updated: Vec<HTLCUpdate>,
pending_events: Vec<events::Event>,
// Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which
// we have to take actions once they reach enough confs. Key is a block height timer, i.e we enforce
// actions when we receive a block with given height. Actions depend on OnchainEvent type.
onchain_events_waiting_threshold_conf: HashMap<u32, Vec<OnchainEvent>>,
// If we get serialized out and re-read, we need to make sure that the chain monitoring
// interface knows about the TXOs that we want to be notified of spends of. We could probably
// be smart and derive them from the above storage fields, but its much simpler and more
// Obviously Correct (tm) if we just keep track of them explicitly.
outputs_to_watch: HashMap<Txid, Vec<Script>>,
#[cfg(test)]
pub onchain_tx_handler: OnchainTxHandler<ChanSigner>,
#[cfg(not(test))]
onchain_tx_handler: OnchainTxHandler<ChanSigner>,
// This is set when the Channel[Manager] generated a ChannelMonitorUpdate which indicated the
// channel has been force-closed. After this is set, no further local commitment transaction
// updates may occur, and we panic!() if one is provided.
lockdown_from_offchain: bool,
// Set once we've signed a local commitment transaction and handed it over to our
// OnchainTxHandler. After this is set, no future updates to our local commitment transactions
// may occur, and we fail any such monitor updates.
local_tx_signed: bool,
// We simply modify last_block_hash in Channel's block_connected so that serialization is
// consistent but hopefully the users' copy handles block_connected in a consistent way.
// (we do *not*, however, update them in update_monitor to ensure any local user copies keep
// their last_block_hash from its state and not based on updated copies that didn't run through
// the full block_connected).
pub(crate) last_block_hash: BlockHash,
secp_ctx: Secp256k1<secp256k1::All>, //TODO: dedup this a bit...
}
#[cfg(any(test, feature = "fuzztarget"))]
/// Used only in testing and fuzztarget to check serialization roundtrips don't change the
/// underlying object
impl<ChanSigner: ChannelKeys> PartialEq for ChannelMonitor<ChanSigner> {
fn eq(&self, other: &Self) -> bool {
if self.latest_update_id != other.latest_update_id ||
self.commitment_transaction_number_obscure_factor != other.commitment_transaction_number_obscure_factor ||
self.destination_script != other.destination_script ||
self.broadcasted_local_revokable_script != other.broadcasted_local_revokable_script ||
self.remote_payment_script != other.remote_payment_script ||
self.keys.pubkeys() != other.keys.pubkeys() ||
self.funding_info != other.funding_info ||
self.current_remote_commitment_txid != other.current_remote_commitment_txid ||
self.prev_remote_commitment_txid != other.prev_remote_commitment_txid ||
self.remote_tx_cache != other.remote_tx_cache ||
self.funding_redeemscript != other.funding_redeemscript ||
self.channel_value_satoshis != other.channel_value_satoshis ||
self.their_cur_revocation_points != other.their_cur_revocation_points ||
self.on_local_tx_csv != other.on_local_tx_csv ||
self.commitment_secrets != other.commitment_secrets ||
self.remote_claimable_outpoints != other.remote_claimable_outpoints ||
self.remote_commitment_txn_on_chain != other.remote_commitment_txn_on_chain ||
self.remote_hash_commitment_number != other.remote_hash_commitment_number ||
self.prev_local_signed_commitment_tx != other.prev_local_signed_commitment_tx ||
self.current_remote_commitment_number != other.current_remote_commitment_number ||
self.current_local_commitment_number != other.current_local_commitment_number ||
self.current_local_commitment_tx != other.current_local_commitment_tx ||
self.payment_preimages != other.payment_preimages ||
self.pending_htlcs_updated != other.pending_htlcs_updated ||
self.pending_events.len() != other.pending_events.len() || // We trust events to round-trip properly
self.onchain_events_waiting_threshold_conf != other.onchain_events_waiting_threshold_conf ||
self.outputs_to_watch != other.outputs_to_watch ||
self.lockdown_from_offchain != other.lockdown_from_offchain ||
self.local_tx_signed != other.local_tx_signed
{
false
} else {
true
}
}
}
impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
/// Writes this monitor into the given writer, suitable for writing to disk.
///
/// Note that the deserializer is only implemented for (Sha256dHash, ChannelMonitor), which
/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
/// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the
/// returned block hash and the the current chain and then reconnecting blocks to get to the
/// best chain) upon deserializing the object!
pub fn write_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
//TODO: We still write out all the serialization here manually instead of using the fancy
//serialization framework we have, we should migrate things over to it.
writer.write_all(&[SERIALIZATION_VERSION; 1])?;
writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
self.latest_update_id.write(writer)?;
// Set in initial Channel-object creation, so should always be set by now:
U48(self.commitment_transaction_number_obscure_factor).write(writer)?;
self.destination_script.write(writer)?;
if let Some(ref broadcasted_local_revokable_script) = self.broadcasted_local_revokable_script {
writer.write_all(&[0; 1])?;
broadcasted_local_revokable_script.0.write(writer)?;
broadcasted_local_revokable_script.1.write(writer)?;
broadcasted_local_revokable_script.2.write(writer)?;
} else {
writer.write_all(&[1; 1])?;
}
self.remote_payment_script.write(writer)?;
self.shutdown_script.write(writer)?;
self.keys.write(writer)?;
writer.write_all(&self.funding_info.0.txid[..])?;
writer.write_all(&byte_utils::be16_to_array(self.funding_info.0.index))?;
self.funding_info.1.write(writer)?;
self.current_remote_commitment_txid.write(writer)?;
self.prev_remote_commitment_txid.write(writer)?;
self.remote_tx_cache.write(writer)?;
self.funding_redeemscript.write(writer)?;
self.channel_value_satoshis.write(writer)?;
match self.their_cur_revocation_points {
Some((idx, pubkey, second_option)) => {
writer.write_all(&byte_utils::be48_to_array(idx))?;
writer.write_all(&pubkey.serialize())?;
match second_option {
Some(second_pubkey) => {
writer.write_all(&second_pubkey.serialize())?;
},
None => {
writer.write_all(&[0; 33])?;
},
}
},
None => {
writer.write_all(&byte_utils::be48_to_array(0))?;
},
}
writer.write_all(&byte_utils::be16_to_array(self.on_local_tx_csv))?;
self.commitment_secrets.write(writer)?;
macro_rules! serialize_htlc_in_commitment {
($htlc_output: expr) => {
writer.write_all(&[$htlc_output.offered as u8; 1])?;
writer.write_all(&byte_utils::be64_to_array($htlc_output.amount_msat))?;
writer.write_all(&byte_utils::be32_to_array($htlc_output.cltv_expiry))?;
writer.write_all(&$htlc_output.payment_hash.0[..])?;
$htlc_output.transaction_output_index.write(writer)?;
}
}
writer.write_all(&byte_utils::be64_to_array(self.remote_claimable_outpoints.len() as u64))?;
for (ref txid, ref htlc_infos) in self.remote_claimable_outpoints.iter() {
writer.write_all(&txid[..])?;
writer.write_all(&byte_utils::be64_to_array(htlc_infos.len() as u64))?;
for &(ref htlc_output, ref htlc_source) in htlc_infos.iter() {
serialize_htlc_in_commitment!(htlc_output);
htlc_source.as_ref().map(|b| b.as_ref()).write(writer)?;
}
}
writer.write_all(&byte_utils::be64_to_array(self.remote_commitment_txn_on_chain.len() as u64))?;