-
Notifications
You must be signed in to change notification settings - Fork 3
/
sdfdsf.js
3157 lines (2646 loc) · 134 KB
/
sdfdsf.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// for providing examples only, don't run without understanding
sdfsdfs_NOT_SAFE_TO_RUN_dsfsdfsdf
import fs from 'fs' // comes with nodejs, to read/write log files
import dns from 'dns' // comes with nodejs, to check if there's internet access
// import os from 'os' // comes with nodejs, system stuff like memory checks
// my scripts
import bos from './bos.js' // my wrapper for bos, needs to be in same folder
import htlcLimiter from './htlcLimiter.js' // can limit number of htlcs per channel
import getBattery from './getBattery.js' // measure battery
const { min, max, trunc, floor, abs, random, log2, pow, ceil, exp, PI } = Math // useful Math
const copy = item => JSON.parse(JSON.stringify(item)) // copy values to new item, useful
// let it adjust fees and max htlc sizes and updating peer records (just simulates visually otherwise)
const ADJUST_POLICIES = true
// let it run bos.reconnect() at all (tries to wake up some disabled channels by disconnecting them)
// const ALLOW_BOS_RECONNECT = true (REMOVED)
// try simple reconnecting to inactive or offline peers (safer to run frequently)
const ALLOW_SIMPLE_RECONNECT = true
// let it rebalance (just simulates visually otherwise)
const ALLOW_REBALANCING = true
// let it request resetting node from another process to fix connections (by creating a file for resetHandler.js to see)
const ALLOW_NODE_RESET = true
// let it actively limit number of htlcs per channel
const ALLOW_HTLC_LIMITER = true
// if battery below 50%, request node shutdown (by creating a file for resetHandler.js to see)
const ALLOW_NODE_SHUTDOWN_ON_LOW_BATTERY = true
// backup payments in jsons & then remove from database for speed
const ALLOW_DB_CLEANUP = true
// restart node every day (requires ALLOW_NODE_RESET and resetHandler running)
const ALLOW_DAILY_RESET = true
const MIN_DAYS_BETWEEN_RESETS = 3
// shut down on system memory leak found (WIP), shell command: free | grep Mem | awk '{print $3/$2 * 100.0}
// const ALLOW_SHUTDOWN_ON_LOW_RAM = false
// time to sleep between trying a bot cycle of above operations
const MINUTES_BETWEEN_STEPS = 11
// print out acceptance/rejection of htlc requests
const SHOW_HTLC_REQUESTS = false
// store in log files htlc requests
const LOG_HTLC_REQUESTS = true
// memory handling
const SHOW_RAM_USAGE = true
// shut down node if free memory on system falls below this treshhold for any reason
// const LOW_RAM_TRESHHOLD_MB = 100
// within what UTC hour to reset node (0-23h) if ALLOW_DAILY_RESET
const UTC_HOUR_FOR_RESTART = 6
// how often to move payments from db to backup logs
const DAYS_BETWEEN_DB_CLEANING = 3
// rebalance with faster keysends after bos rebalance works
// (faster but higher risk of stuck sats so I send less)
const USE_KEYSENDS_AFTER_BALANCE = true
// only use keysends (makes above irrelevant)
const ONLY_USE_KEYSENDS = false
// show rebalancing printouts (very wordy routing info)
const SHOW_REBALANCE_LOG = false
// suspect might cause tor issues if too much bandwidth being used
// setting to 1 makes it try just 1 rebalance at a time
const MAX_PARALLEL_REBALANCES = 5
// how many days back to look for routing stats, must be longer than any other DAYS setting
const DAYS_FOR_STATS = 7
// how many days back do we keep data for, to prune what we store or care about at all
const MAX_DAYS_FOR_STATS = 7 * 4 * 3 // ~3 months
// how many days back for active ppm region
const MAX_DAYS_FOR_PPM_LOOKBACK = MAX_DAYS_FOR_STATS / 2
const MIN_DAYS_FOR_PPM_LOOKBACK = 1
// hours between running bos reconnect (it disconnects/reconnects online peers that disable to me)
// const MINUTES_BETWEEN_BOS_RECONNECTS = 13.37 * 60 // (REMOVED) offset from 24 hours to shift around when
// hours between running basic offline/inactive reconnect
const MINUTES_BETWEEN_SIMPLE_RECONNECTS = 69
// minimum sats away from 0.5 balance to consider off-balance
const MIN_SATS_OFF_BALANCE = 420e3
// unbalanced sats below this can stop (bos rebalance requires >50k)
const MIN_REBALANCE_SATS = 69e3
// smallest amount of sats necessary to consider a side not drained
const MIN_SATS_PER_SIDE = 1e6
// local sats below this means channel is drained
const SATS_PER_SIDE_DRAINED_LIMIT = MIN_SATS_PER_SIDE * 0.25
// wait at least _ minutes for node to finish restarting before checking again
// has to include recompacting time if used!!!
const MIN_WAIT_MINUTES_FOR_NODE_RESTART = 21
// array of public key strings to avoid in paths (avoids from settings.json added to it)
const AVOID_LIST = []
// limit of sats to balance per attempt
// larger = faster rebalances, less for channels.db to store
// smaller = can use smaller liquidity/channels for cheaper/easier rebalances
// bos rebalance does probing + size up htlc strategy
// (bos rebalance requires >50k)
const MAX_REBALANCE_SATS = 212121 * 3
// sats to balance via keysends
const MAX_REBALANCE_SATS_KEYSEND = 212121 * 3
// fuzzy the amount being rebalanced to blend in better
const fuzzyAmount = (amount, fraction = 0.21 * 2) => trunc(amount * (1 - fraction * random()))
// would average in earned/routed out fee rate measured in DAYS_FOR_STATS
// to determine what fee rate to use for rebalance
const INCLUDE_EARNED_FEE_RATE_FOR_REBALANCE = true
// channels smaller than this not necessary to balance or adjust fees for
// usually special cases anyway
// (maybe use proportional fee policy for them instead)
// >2m for now
const MIN_CHAN_SIZE = MIN_SATS_OFF_BALANCE * 2 + MIN_SATS_PER_SIDE * 2 // 2.1e6
// multiplier for proportional safety ppm margin
const SAFETY_MARGIN = 1.21 // 1.618 // 1.12345 //
// maximum flat safety ppm margin (proportional via SAFETY_MARGIN below this value)
const SAFETY_MARGIN_FLAT_MAX = 272 // 222 //
// how often to update fees and max htlc sizes (keep high to minimize network gossip)
// also time span of flow to look back at for deciding if and by how much to increase each fee rate
const MINUTES_BETWEEN_FEE_CHANGES = (21 * 60) / 2 // 212
// max size of fee adjustment upward
// const NUDGE_UP_MAX_PER_DAY = 0.042
// const NUDGE_UP = NUDGE_UP_MAX_PER_DAY / ((24 * 60) / MINUTES_BETWEEN_FEE_CHANGES)
const NUDGE_UP = 0.042
// max size of fee adjustment downward
const NUDGE_DOWN_PER_DAY = 0.01337
const NUDGE_DOWN = NUDGE_DOWN_PER_DAY / ((24 * 60) / MINUTES_BETWEEN_FEE_CHANGES) // 0.0021
// increase NUDGE_DOWN by this factor when channel has never been seen routing out
const NUDGE_DOWN_INACTIVE_MULTIPLIER = 3
// how much internal ppm setpoint has to change by to update what public sees as new fee rate
const FEE_CHANGE_TOLERANCE_FRACTION = 0.21 // by this fraction
const FEE_CHANGE_TOLERANCE_FLAT = 21 // or by this flat amount in ppm
// min days of no routing activity before allowing reduction in fees
const DAYS_FOR_FEE_REDUCTION = 0 // 0.25
// minimum ppm ever possible for a fee policy setting
const MIN_PPM_ABSOLUTE = 0
// max ppm ever possible for fee policy setting
const MAX_PPM_ABSOLUTE = 4999
// rebalancing fee rates below this aren't considered for rebalancing
const MIN_FEE_RATE_FOR_REBALANCE = 21
// max fee rate for rebalancing even if channel earns more
const MAX_FEE_RATE_FOR_REBALANCE = 1500
// fee rate to stop forwards out of drained channel
const ROUTING_STOPPING_FEE_RATE = 3333
// max minutes to spend per rebalance try
const MINUTES_FOR_REBALANCE = 6
// max minutes to spend per keysend try
const MINUTES_FOR_KEYSEND = 5
// number of times to retry a rebalance on probe timeout while
// increasing fee for last hop to skip all depleted channels
// Only applies on specifically ProbeTimeout so unsearched routes remain
const RETRIES_ON_TIMEOUTS_REBALANCE = 3
const RETRIES_ON_TIMEOUTS_SEND = 1
// time between retrying same good pair
const MIN_MINUTES_BETWEEN_SAME_PAIR = (MINUTES_BETWEEN_STEPS + MINUTES_FOR_REBALANCE) * 2
// max rebalance repeats while successful
// if realized rebalance rate is > 1/2 max rebalance rate
// this will just limit repeats when there's no major discounts
const MAX_REBALANCE_REPEATS = 12 // without major discount
const MAX_REBALANCE_REPEATS_ANY = 21 // with even discounts
// multiply max ppm rate after each rebalance for repeats by this
const REPEAT_MAX_RATE_RATIO = 0.98
// ms to put between each rebalance launch for safety
const STAGGERED_LAUNCH_MS = 1111
// as 0-profit fee rate increases, fee rate where where proportional
// fee takes over flat one is
// (break even fee rate) * SAFETY_MARGIN = SAFETY_MARGIN_FLAT_MAX
// how much error to use for balance calcs
// const BALANCE_DEV = 0.1
// weight multiplier for rebalancing rates that were actually used vs suggested
// const WORKED_WEIGHT = 5
// min sample size before using rebalancing ppm rates for anything
// const MIN_SAMPLE_SIZE = 3
// fraction of peers that need to be offline to restart tor service
// const PEERS_OFFLINE_PERCENT_MAXIMUM = 11 (removed)
// const INCLUDE_RECONNECTED_IN_OFFLINE = false (removed)
// show everything
const VERBOSE = true
const DEBUG = true
// what to weight random selection by
const WEIGHT_OPTIONS = {}
// WEIGHT_OPTIONS.FLAT = () => 1 // no preferences, totally random
// 2x more sats from balance is 2x more likely to be selected
// WEIGHT_OPTIONS.UNBALANCED_SATS = peer => peer.unbalancedSats
// 2x more sats from balance is ~1.4x more likely to be selected
// better for trying more channel combinations still favoring unabalanced
// WEIGHT_OPTIONS.UNBALANCED_SATS_SQRT = peer => trunc(sqrt(peer.unbalancedSats))
// WEIGHT_OPTIONS.UNBALANCED_SATS_SQRTSQRT = peer => trunc(sqrt(sqrt(peer.unbalancedSats)))
// WEIGHT_OPTIONS.CHANNEL_SIZE = peer => peer.totalSats
// ensure highest priority if below MIN_SATS_PER_SIDE on any side and decay to 0 when balanced
// prettier-ignore
WEIGHT_OPTIONS.MIN_LIQUIDITY = peer =>
1 - exp(-2 * pow(PI, 2) * pow((peer.outbound_liquidity - 0.5 * peer.capacity) / (peer.capacity - 2 * MIN_SATS_PER_SIDE), 2))
const WEIGHT = WEIGHT_OPTIONS.MIN_LIQUIDITY // default weight
// just to prioritize more profitable channels including fee rate in random sorting
// give highest priority to higher fee rates on scale 0 - 1
WEIGHT_OPTIONS.FEE_RATE = peer => 1 - exp((-2 * PI * peer.fee_rate) / MAX_FEE_RATE_FOR_REBALANCE)
// combine fee rate and liquidity functions just for remote sorting
const WEIGHT_REMOTE = peer => 0.5 * WEIGHT_OPTIONS.MIN_LIQUIDITY(peer) + 0.5 * WEIGHT_OPTIONS.FEE_RATE(peer)
// const WEIGHT_REMOTE = peer => WEIGHT_OPTIONS.MIN_LIQUIDITY(peer) * WEIGHT_OPTIONS.FEE_RATE(peer)
// experimental - fake small flowrate to be ready to expect
// const MIN_FLOWRATE_PER_DAY = 10000 // sats/day
// full path to db from /, can't use ~/
const DB_PATH = '/home/me/Umbrel/lnd/data/graph/mainnet/channel.db'
const SNAPSHOTS_PATH = './snapshots'
const PEERS_LOG_PATH = './peers'
const LOG_FILES = './logs'
const TIMERS_PATH = 'timers.json'
const SETTINGS_PATH = 'settings.json'
const LAST_SEEN_PATH = `${LOG_FILES}/lastSeen.json`
const DEFAULT_TIMERS = {
// lastReconnect: 0, // last bos reconnect timestamp (removed)
lastSimpleReconnect: 0, // last simple reconnect timestamp
lastFeeUpdate: 0, // last fee update timestamp
lastCleaningUpdate: 0, // last payment backup and cleaning timestamp
lastDailyReset: 0, // last time node containers were restarted via daily cheduled time
lastNodeReset: 0 // last time node containers was restarted at all
}
// global node info
const mynode = {
scriptStarted: Date.now(),
public_key: '',
restartFailures: 0,
// offlineLimitPercentage: PEERS_OFFLINE_PERCENT_MAXIMUM,
peers: [],
htlcLimiter: {},
timers: copy(DEFAULT_TIMERS)
}
const runBot = async () => {
logDim('runBot()')
// force clean up memory if gc exposed with --expose-gc
global?.gc?.()
printMemoryUsage('(at start of runBot cycle)')
// check battery
await checkBattery()
// check if need to restart node (scheduled daily)
await runNodeRestartCheck()
// check if time for bos reconnect
// await runBotReconnectCheck()
// check if time for updating fees
await runUpdateFeesCheck()
// runCleaningCheck
await runCleaningCheck()
// simple reconnect
await runSimpleReconnect()
// do rebalancing
await runBotRebalanceOrganizer()
// long pause
await sleep(MINUTES_BETWEEN_STEPS * minutes)
// restart
runBot()
}
// starts everything
const initialize = async () => {
// get authorized access to node
const auth = await bos.initializeAuth()
// get your own public key
const identity = await bos.callAPI('getIdentity')
if (!identity.public_key || identity.public_key.length < 10) {
console.log()
throw new Error('unknown public key for this node')
}
mynode.public_key = identity.public_key
const feeUpdatesPerDay = +((60 * 24) / MINUTES_BETWEEN_FEE_CHANGES).toFixed(1)
const feeRateInceaseString = (NUDGE_UP * 100).toFixed(2)
const feeRateDecreaseString = (NUDGE_DOWN * 100).toFixed(2)
const feeRateToleranceString = (FEE_CHANGE_TOLERANCE_FRACTION * 100).toFixed(0)
const feeRateToleranceFlatString = FEE_CHANGE_TOLERANCE_FLAT.toFixed(0)
const maxUpFeeChangePerDay = ((1 + NUDGE_UP) ** feeUpdatesPerDay - 1) * 100
const maxDownFeeChangePerDay = (1 - (1 - NUDGE_DOWN) ** feeUpdatesPerDay) * 100
const hoursBetweenFeeChanges = (MINUTES_BETWEEN_FEE_CHANGES / 60).toFixed(1)
// roughly how many decreases to equal to an increase
const decreasesToUndo = ceil(NUDGE_UP / NUDGE_DOWN)
// how much time is that
const minutesToUndo = (decreasesToUndo + 2) * MINUTES_BETWEEN_FEE_CHANGES
const daysToUndoString = (minutesToUndo / 60 / 24).toFixed(1)
console.log(`${getDate()}
========================================================
this node's public key:
"${mynode.public_key}"
There are a maximum of ${feeUpdatesPerDay} fee updates per day.
UP: Channel fee rate set-point increases by max of +${feeRateInceaseString}%
every ${hoursBetweenFeeChanges} hours or more
with higher if its outflow at that rate is closer to ${MIN_SATS_PER_SIDE} per day.
At constant high outflow absolute max is +${maxUpFeeChangePerDay.toFixed(1)}% / day.
DOWN: Fee rate set-point decreases by max of -${feeRateDecreaseString}%
every ${hoursBetweenFeeChanges} hours or more
Channel must also have more than ${pretty(MIN_SATS_PER_SIDE)} sats local
to allow decrease of fee rate or outflow numbers are unreliable.
At continous 0 outflow, max decrease per day is -${maxDownFeeChangePerDay.toFixed(1)}%.
If never logged as outflowing, decreasing rate is increased by factor of ${NUDGE_DOWN_INACTIVE_MULTIPLIER}x.
One high increase in fee rate takes ${daysToUndoString} days to undo with decreases.
Actual fee rate in policy is only updated when internal set-point is more
than ${feeRateToleranceString}% or ${feeRateToleranceFlatString} ppm away from current public policy fee rate.
If channel has under ${pretty(SATS_PER_SIDE_DRAINED_LIMIT)} sats local
it's also considered drained and policy fee rate is temporarily increased
to ${ROUTING_STOPPING_FEE_RATE} ppm to discorage additional routing.
IF THIS IS INCORRECT, ctrl + c
========================================================
`)
// small pause for friendly stop
await sleep(5 * seconds)
// make folders for all the files I use
if (!fs.existsSync(PEERS_LOG_PATH)) {
fs.mkdirSync(PEERS_LOG_PATH, { recursive: true })
}
if (!fs.existsSync(SNAPSHOTS_PATH)) {
fs.mkdirSync(SNAPSHOTS_PATH, { recursive: true })
}
if (!fs.existsSync(LOG_FILES)) {
fs.mkdirSync(LOG_FILES, { recursive: true })
}
// load settings file
if (fs.existsSync(SETTINGS_PATH)) {
mynode.settings = JSON.parse(fs.readFileSync(SETTINGS_PATH))
// add to avoid list from there
if (mynode.settings?.avoid?.length) {
mynode.settings.avoid.forEach(pk => {
if (!pk.startsWith('//')) AVOID_LIST.push(pk)
})
console.log(`${getDate()}`, { AVOID_LIST })
}
}
// timers
initializeBotTimers()
// generate snapshots at start to ensure recent data
await generatePeersSnapshots()
// small pause for friendly stop
await sleep(5 * seconds)
// initialize forwarding request limiter if used
if (ALLOW_HTLC_LIMITER) mynode.htlcLimiter = htlcLimiter(SHOW_HTLC_REQUESTS, LOG_HTLC_REQUESTS, auth)
// start bot loop
runBot()
}
// my own reconnect method that doesn't disconnect channels based on just disables
// so can run frequently without risking instability
const runSimpleReconnect = async () => {
if (!ALLOW_SIMPLE_RECONNECT) return null
logDim('runSimpleReconnect()')
await sleep(5 * seconds)
const minutesSinceLast = minutesAgo(mynode.timers.lastSimpleReconnect || 0)
if (minutesSinceLast > MINUTES_BETWEEN_SIMPLE_RECONNECTS) {
logDim('runSimpleReconnect(): time to run')
updateBotTimers({ lastSimpleReconnect: Date.now() })
} else {
const timeUntil = (MINUTES_BETWEEN_SIMPLE_RECONNECTS - minutesSinceLast).toFixed(0)
logDim(`runSimpleReconnect(): not time to run yet. (Scheduled in ${timeUntil}+ minutes)`)
return null
}
// key to alias table
const pkToAlias = await bos.getPublicKeyToAliasTable()
// get offline peers pubkeys
const peers = await bos.peers({})
const peersTotal = peers?.length
const offline = peers?.filter(p => p.is_offline).map(p => p.public_key) || []
// get inactive peers pubkeys
const channels = (await bos.callAPI('getChannels'))?.channels || []
const inactive = unique(channels.filter(c => !c.is_active).map(c => c.partner_public_key))
// combine offline and inactive list
const listToReconnect = unique([...offline, ...inactive])
const finalReconnected = []
const finalOffline = []
// reconnnect each one
const reconnectionTasks = []
for (const public_key of listToReconnect) {
const reconnection = bos.addPeer({ public_key }).then(res => {
if (res) {
finalReconnected.push(public_key)
logDim(`Reconnected to ${pkToAlias[public_key]} | ${public_key.slice(0, 20)}`)
} else {
finalOffline.push(public_key)
logDim(`Failed to reconnect to ${pkToAlias[public_key]} | ${public_key.slice(0, 20)}`)
}
})
reconnectionTasks.push(reconnection)
// launch in parallel but space apart starting reconnects by few seconds
const STAGGERED_RECONNECTS_MS = 7 * seconds
await sleep(STAGGERED_RECONNECTS_MS, { quiet: true })
}
// wait until all the reconnection tasks are complete
await Promise.all(reconnectionTasks)
// make a nice summary of results
const lastSeen = updateLastSeenList(peers)
const peersDisabledToMe = peers.filter(p => p.is_inbound_disabled).map(p => p.public_key)
// sort by offline time
finalOffline.sort((a, b) => (lastSeen[a] || 0) - (lastSeen[b] || 0))
const offlinePeerInfoList = []
for (const public_key of finalOffline) {
const alias = ca(pkToAlias[public_key]) || public_key.slice(0, 20)
const { countPeers, countDisabled } = await getPeersDisabledTowards({ public_key })
const percent = countPeers ? ((countDisabled / countPeers) * 100).toFixed(0) + '%' : ''
const daysOffline = lastSeen[public_key] ? daysAgo(lastSeen[public_key]).toFixed(1) + 'd' : ''
const isReallyOffline = daysOffline > 1 || (countPeers && countDisabled / countPeers > 0.33)
const icon = isReallyOffline ? '🚫' : '🕑'
offlinePeerInfoList.push(`${alias} ${icon} ${percent} ${daysOffline}`)
}
const offlinePeerInfo = offlinePeerInfoList.join('\n ') || 'n/a'
const message =
peers !== null
? `🔍 Simple reconnect done (every ${MINUTES_BETWEEN_SIMPLE_RECONNECTS} minutes).\n\n` +
// give overall statistics on offline
`<b>Offline peers</b>: ${finalOffline.length}/${peersTotal}` +
` (${((finalOffline.length / peersTotal) * 100).toFixed(0)}%)\n\n` +
// write out offline peers
` ${offlinePeerInfo}\n\n` +
// write out overall statistics on reconnected
`<b>Reconnected peers</b>: ${finalReconnected.length}/${peersTotal}` +
` (${((finalReconnected.length / peersTotal) * 100).toFixed(0)}%)\n\n` +
// write out reconnected peers
` ${finalReconnected.map(pk => ca(pkToAlias[pk]) || pk.slice(0, 20)).join(', ') || 'n/a'}\n\n` +
`<b>Disabled-towards-me peers</b>: ${peersDisabledToMe.length}/${peersTotal}` +
` (${((peersDisabledToMe.length / peersTotal) * 100).toFixed(0)}%)\n\n` +
// write out peers that disabled towards me
` ${peersDisabledToMe.map(pk => ca(pkToAlias[pk]) || pk.slice(0, 20)).join(', ') || 'n/a'}\n`
: 'bos/lnd issue detected'
// update user about offline peers just in case
console.log(`${getDate()} ${message.replaceAll(/<\/?.>/g, '')}`)
await telegramLog(message)
}
// restart node if requested
const runNodeRestartCheck = async () => {
if (!(ALLOW_DAILY_RESET && ALLOW_NODE_RESET)) return null
logDim('runNodeRestartCheck()')
await sleep(5 * seconds)
const now = Date.now()
const timers = mynode.timers
const thisHour = new Date(now).getUTCHours()
// check if right hour
const isRightHour = UTC_HOUR_FOR_RESTART === thisHour
// check if at least 4 hours since last daily reset or
// at at least MIN_HOURS_SINCE_RESET since last reset from other sources
const HOURS_DELTA = 4
const MIN_HOURS_SINCE_RESET = MIN_DAYS_BETWEEN_RESETS * 24
const hoursSinceDailyReset = (now - timers.lastDailyReset) / hours
const hoursSinceReset = (now - timers.lastNodeReset) / hours
const beenLongEnough = hoursSinceDailyReset > HOURS_DELTA && hoursSinceReset > MIN_HOURS_SINCE_RESET // just in case, checking both
const isReseting = isRightHour && beenLongEnough
// prettier-ignore
logDim(`runNodeRestartCheck() ${isRightHour && beenLongEnough ? 'reseting node processes' : 'not right time'}
${thisHour} UTC hour ${isRightHour ? 'matches' : 'is not'} the specified ${UTC_HOUR_FOR_RESTART} UTC hour for timed node reset.
It has been ${hoursSinceReset > MIN_HOURS_SINCE_RESET ? `over ${MIN_HOURS_SINCE_RESET} hours` : hoursSinceReset.toFixed(1) + ' hours (<' + MIN_HOURS_SINCE_RESET + ')'} since last known reset.
It has been ${hoursSinceDailyReset > HOURS_DELTA ? `over ${HOURS_DELTA} hours` : hoursSinceDailyReset.toFixed(1) + ' hours (<' + HOURS_DELTA + ')'} since last daily reset.
`)
if (!isReseting) return null
// seems time to restart node
logDim('runNodeRestartCheck() - right hour and been long enough so restarting node processes')
// update timers
updateBotTimers({ lastDailyReset: now })
await restartNodeProcess() // lastNodeReset updated inside
// run reconnect script to ensure everything is ready again
await runSimpleReconnect()
// await runBotReconnect()
}
const initializeBotTimers = () => {
if (!fs.existsSync(TIMERS_PATH)) {
// if no timer file, just generate timers file to keep track between runs
console.log(`${getDate()} creating timers file at ${TIMERS_PATH}`)
} else {
// if timer file exists, overwrite defaults with whatever is available in file
try {
const timersOnFile = JSON.parse(fs.readFileSync(TIMERS_PATH))
mynode.timers = { ...mynode.timers, ...(timersOnFile ?? {}) }
console.log(`${getDate()} found & updating timers file at ${TIMERS_PATH}`)
} catch (e) {
console.log(`${getDate()} timers file unreadable, writing to ${TIMERS_PATH}`)
}
}
console.log(
`${getDate()} current UTC timestamps: ${JSON.stringify(
Object.keys(mynode.timers).map(timerName => `${timerName.padStart(20)}: ${getDate(mynode.timers[timerName])}`),
null,
2
)}`
)
fs.writeFileSync(TIMERS_PATH, JSON.stringify(mynode.timers))
}
// setting/updating both bot global and written to file timers with newItems object item(s)
// getting is just mynode.timers as its updated from defaults+file during start up and w/ updates
const updateBotTimers = newItems => {
mynode.timers = {
...mynode.timers,
...newItems
}
console.log(`${getDate()} Updated ${TIMERS_PATH}`)
fs.writeFileSync(TIMERS_PATH, JSON.stringify(mynode.timers))
}
// carefully shut down node if low on battery
const checkBattery = async () => {
if (!ALLOW_NODE_SHUTDOWN_ON_LOW_BATTERY) return null
logDim('checkBattery()')
await sleep(5 * seconds)
const battery = await getBattery()
logDim(`checkBattery(): ${battery + '%' || 'n/a'}`)
if (battery && +battery < 50) {
console.log(`${getDate()} checkBattery(): battery below 50%`)
// check internet connection
const isInternetConnected = await dns.promises
.lookup('google.com')
.then(() => true)
.catch(() => false)
if (isInternetConnected && ALLOW_HTLC_LIMITER) {
// if internet still connected can wait a little for existing forwards to clear
console.log(`${getDate()} checkBattery(): requesting blocking of all new forward requests`)
// if HTLClimiter used, should signal it to reject all NEW forward requests until node is down
mynode.htlcLimiter.stop = true
// giving it 2 min to clear old htlcs
await sleep(2 * minutes)
}
console.log(`${getDate()} checkBattery(): requesting node shut down`)
// now signaling node shut down, picked up by resetHandler.js
const requestTime = Date.now()
const SHUTDOWN_REQUEST_PATH = 'shutdownRequest.json'
fs.writeFileSync(SHUTDOWN_REQUEST_PATH, JSON.stringify({ requestTime }))
// giving lightning node 5 min to shut down
await sleep(5 * minutes)
// exit this bot
console.log(`${getDate()} checkBattery(): terminating bot processes`)
process.exit(0)
}
}
// experimental parallel rebalancing function (unsplit, wip)
const runBotRebalanceOrganizer = async () => {
logDim('runBotRebalanceOrganizer()')
await sleep(5 * seconds)
// match up peers
// high weight lets channels get to pick good peers first (not always to occasionally search for better matches)
// get active peers
const peers = await runBotGetPeers()
// make a list of remote heavy and local heavy peers via balance check
const remoteHeavyPeers = rndWeightedSort(
peers.filter(includeForRemoteHeavyRebalance),
// this one includes fee rate in weight so more profitable channels more likely to be tried more often
WEIGHT_REMOTE
)
const localHeavyPeers = rndWeightedSort(peers.filter(includeForLocalHeavyRebalance), WEIGHT)
// grab original number of peers for each side
const [nRHP, nLHP] = [remoteHeavyPeers.length, localHeavyPeers.length]
// print out all options of peers & their weight
/*
if (VERBOSE) {
console.log(`${getDate()} Peer weight / balance / alias. Weight function: ${WEIGHT}`)
for (const p of localHeavyPeers) {
const weight = WEIGHT(p).toFixed(5)
const w = weight.padStart(13)
const b = p.balance.toFixed(2)
const local = (p.outbound_liquidity / 1e6).toFixed(1).padStart(4) + 'M'
const remote = (p.inbound_liquidity / 1e6).toFixed(1).padStart(4) + 'M'
console.log(`Local-heavy: ${ca(p.alias).padEnd(30)} ${w}w ${b}b ${local}|${remote}`)
}
console.log('')
for (const p of remoteHeavyPeers) {
const weight = WEIGHT(p).toFixed(5)
const w = weight.padStart(12)
const b = p.balance.toFixed(2)
const local = (p.outbound_liquidity / 1e6).toFixed(1).padStart(4) + 'M'
const remote = (p.inbound_liquidity / 1e6).toFixed(1).padStart(4) + 'M'
console.log(`Remote-heavy: ${ca(p.alias).padEnd(30)} ${w}w ${b}b ${local}|${remote}`)
}
console.log('')
}
*/
// assemble list of matching peers and how much to rebalance
const matchups = []
// keep taking peers out of arrays to match until one side empty
while (localHeavyPeers.length > 0 && remoteHeavyPeers.length > 0) {
// get top lucky remote channel
const remoteHeavy = remoteHeavyPeers[0]
// try to see if there's good match in locals for this peer
// just do it half the time to discover more
const localHeavyIndexIdeal =
random() < 0.5 ? findGoodPeerMatch({ remoteChannel: remoteHeavy, peerOptions: localHeavyPeers }) : -1
// use localHeavyIndexIdeal if it returns an index, otherwise use top local channel
const isGoodPeer = localHeavyIndexIdeal > -1
const localHeavyIndexUsed = isGoodPeer ? localHeavyIndexIdeal : 0
const localHeavy = localHeavyPeers[localHeavyIndexUsed]
// max amount to rebalance is the smaller sats off-balance between the two
const maxSatsToRebalance = trunc(min(localHeavy.unbalancedSats, remoteHeavy.unbalancedSats))
// can also calculate fee rate used this week for routing instead of just current fee rate
// round down fees to nearest sat to get rid of base fee
const routedOut = remoteHeavy.routed_out_msats / 1000
const earnedOut = remoteHeavy.routed_out_fees_msats / 1000
// const capacity = remoteHeavy.capacity
// const remoteSats = remoteHeavy.inbound_liquidity
// grab my outgoing fee for remote heavy peer (from record if available)
const rateNowOutgoing = trunc(getReferenceFee(remoteHeavy))
// near MIN_SATS_PER_SIDE (wO ~ 1) will use fee from routing events, otherwise (wO ~ 0) channel setting
const routedOutFactor = 1 - exp((-routedOut * PI) / MIN_SATS_PER_SIDE)
// actual earning rate (how else to handle very small amounts giving incorrect fee rate?)
const effectiveFeeRate = trunc((floor(earnedOut) / routedOut) * 1e6) || 0
// longer timeframe estimated fee rate (DEBUGGING values for now)
// const { summedRouted, routingWeightedPpm, lowestPpm } = analyzeForwardingSummaries(
// readRecord(remoteHeavy).forwarded,
// MAX_DAYS_FOR_PPM_LOOKBACK
// )
// console.log({
// alias: remoteHeavy.alias,
// rateNowOutgoing,
// summedRouted,
// routingWeightedPpm,
// lowestPpm,
// routedOut,
// effectiveFeeRate,
// routedOutFactor
// })
// the more I route out the more reliable calculated fee rate is vs current channel fee rate
const usedRefFeeRate = trunc(effectiveFeeRate * routedOutFactor + rateNowOutgoing * (1 - routedOutFactor) || 0)
// start calculating rebalance rate
const feeRateUsedForCalc = !INCLUDE_EARNED_FEE_RATE_FOR_REBALANCE
? rateNowOutgoing
: min(rateNowOutgoing, usedRefFeeRate)
// level of emergency decided by highest need of either channel 0-1
const weightRemote = WEIGHT(remoteHeavy)
const weightLocal = WEIGHT(localHeavy)
const levelOfEmergency = max(weightRemote, weightLocal)
// time dependence starts at 0 and ~1 after DAYS_FOR_STATS
const channelsAgeRemote = min(...(remoteHeavy.ids?.map(c => c.channel_age_days || 0) || [0]))
if (DEBUG && !remoteHeavy.ids) console.log('unknown channel ids on remote heavy peer', remoteHeavy)
const timeFactor = 1 - exp((-PI * channelsAgeRemote) / DAYS_FOR_STATS)
// fee via simple subtraction & division from reference
const safeRateBaseline = subtractSafety(feeRateUsedForCalc)
// new remoteHeavy channels can wait to be rebalanced
const safeRateForAge = trunc(timeFactor * feeRateUsedForCalc)
// low levels of emergency will try less hard
// high level of emergency will go as high as subtractSafety allows
// fee via weights from 0.5-1x of reference ppm
const rateBasedOnEmergency = trunc((0.33 + 0.67 * levelOfEmergency) * feeRateUsedForCalc)
// use smallest of 3 rebalance fee rate limits
const safeRate = min(rateBasedOnEmergency, safeRateBaseline, safeRateForAge)
// check against the absolute highest rebalance rate allowed
const maxRebalanceRate = min(safeRate, MAX_FEE_RATE_FOR_REBALANCE)
// console.log(remoteHeavy.alias, { effectiveFeeRate, rateNowOutgoing, maxRebalanceRate })
// check if rebalance rate is below absolute min fee rate for rebalance allowed or below inbound fee rate
if (maxRebalanceRate < MIN_FEE_RATE_FOR_REBALANCE || maxRebalanceRate < remoteHeavy.inbound_fee_rate) {
remoteHeavyPeers.splice(0, 1) // drop remote-heavy peer from consideration
continue // move onto next peer
}
// add this peer pair to matchups
// run keeps track of n times matchup ran
// done keeps track of done tasks
// started at keeps track of time taken
// results keeps 1+ return values from bos function
matchups.push({
localHeavy,
remoteHeavy,
maxSatsToRebalance,
maxRebalanceRate,
run: 1,
done: false,
startedAt: Date.now(),
results: [],
isGoodPeer,
rateNowOutgoing,
usedRefFeeRate,
effectiveFeeRate,
routedOut,
earnedOut,
routedOutFactor,
channelsAgeRemote,
timeFactor,
weightRemote,
weightLocal,
levelOfEmergency,
rateBasedOnEmergency,
safeRateBaseline,
safeRate,
safeRateForAge
})
// remove these peers from peer lists
localHeavyPeers.splice(localHeavyIndexUsed, 1)
remoteHeavyPeers.splice(0, 1)
// stop if limit reached
if (matchups.length >= MAX_PARALLEL_REBALANCES) break
}
if (VERBOSE) {
console.log(
`${getDate()} ${matchups.length} rebalance matchups from ${nRHP} remote-heavy & ${nLHP} local-heavy peers
sorted with offbalance-weighted randomness of ${WEIGHT}
${dim}weighting factors: wL = local-offbalance, wR = remote-offbalance, wE = emergency level, wT = aged weight, wO = outflow weight${undim}
${dim}rebalance ppm's considered: eff = effective, safe = max safe, rush = offbalance emergency${undim}
`
)
for (const match of matchups) {
const outOf = ca(match.localHeavy.alias).padStart(30)
const into = ca(match.remoteHeavy.alias).padEnd(30)
const meAtLH = (match.localHeavy.outbound_liquidity / 1e6).toFixed(1).padStart(5) + 'M'
const remAtLH = (match.localHeavy.inbound_liquidity / 1e6).toFixed(1).padStart(5) + 'M'
const meAtRH = (match.remoteHeavy.outbound_liquidity / 1e6).toFixed(1).padStart(5) + 'M'
const remAtRH = (match.remoteHeavy.inbound_liquidity / 1e6).toFixed(1).padStart(5) + 'M'
// show ppm used for routing in channel regularly and not temporary high ppm used on very drained channels as former is used for rebalancing reference
// const myFeeAtLH = `(${match.localHeavy.fee_rate})`.padStart(6)
const myFeeAtLH = `(${getReferenceFee(match.localHeavy)})`.padStart(6)
const remFeeAtLH = `(${match.localHeavy.inbound_fee_rate})`.padEnd(6)
// const myFeeAtRH = `(${match.remoteHeavy.fee_rate})`.padEnd(6)
const myFeeAtRH = `(${getReferenceFee(match.remoteHeavy)})`.padEnd(6)
const remFeeAtRH = `(${match.remoteHeavy.inbound_fee_rate})`.padStart(6)
const factorsUsed = [
`${match.weightLocal.toFixed(1)}wL`,
`${match.weightRemote.toFixed(1)}wR`,
`${match.levelOfEmergency.toFixed(1)}wE`,
`${match.timeFactor.toFixed(1)}wT`,
`${match.routedOutFactor.toFixed(1)}wO`,
`${match.usedRefFeeRate}eff`.padStart(7),
`${match.safeRateBaseline}safe`.padStart(8),
`${match.rateBasedOnEmergency}rush`.padStart(8)
].join(' ')
const isGoodPeer = match.isGoodPeer ? '💚' : ''
console.log(
` me☂️ ${dim}${myFeeAtLH} ${meAtLH} [ ||||-> ] ${remAtLH} ${remFeeAtLH}${undim} ${outOf} ${dim}--> ?` +
` -->${undim} ${into} ${dim}${remFeeAtRH} ${remAtRH} [ ||||-> ] ${meAtRH} ${myFeeAtRH}${undim} me☂️ ` +
`${dim}${factorsUsed}${undim} ${isGoodPeer}`
)
}
console.log('')
}
// if not actually rebalancing we end here
if (!ALLOW_REBALANCING) return null
// to keep track of list of launched rebalancing tasks
const rebalanceTasks = []
// function to launch every rebalance task for a matched pair with
const handleRebalance = async matchedPair => {
const { localHeavy, remoteHeavy, maxSatsToRebalance, maxRebalanceRate, run, startedAt, lastAvoid } = matchedPair
const localString = ca(localHeavy.alias).padStart(30)
const remoteString = ca(remoteHeavy.alias).padEnd(30)
const maxRebalanceRateString = ('<' + maxRebalanceRate + ' ppm').padStart(9)
// ONLY_USE_KEYSENDS - always does bos send instead of bos rebalance
// USE_KEYSENDS_AFTER_BALANCE - always does bos send after 1 bos rebalance works
const useRegularRebalance = !(run > 1 && USE_KEYSENDS_AFTER_BALANCE) && !ONLY_USE_KEYSENDS
const maxSatsToRebalanceAfterRules = useRegularRebalance
? fuzzyAmount(min(maxSatsToRebalance, MAX_REBALANCE_SATS))
: fuzzyAmount(min(maxSatsToRebalance, MAX_REBALANCE_SATS_KEYSEND))
// task launch message
console.log(
`${getDate()} Starting ${localString} --> ${remoteString} run #${run}` +
` rebalance @ ${maxRebalanceRateString}, ${pretty(maxSatsToRebalance).padStart(10)} sats left to balance ` +
`${dim}(${useRegularRebalance ? 'via bos rebalance' : 'via bos send'})${undim}`
)
const resBalance = useRegularRebalance
? await bos.rebalance(
{
fromChannel: localHeavy.public_key,
toChannel: remoteHeavy.public_key,
// bos rebalance probes with small # of sats and then increases
// amount up to this value until probe fails
// so then it uses the largest size that worked
maxSats: maxSatsToRebalanceAfterRules,
maxMinutes: MINUTES_FOR_REBALANCE,
maxFeeRate: maxRebalanceRate,
avoid: copy(lastAvoid ?? AVOID_LIST), // avoid these nodes in paths
retryAvoidsOnTimeout: RETRIES_ON_TIMEOUTS_REBALANCE
},
undefined,
// {} // no terminal output, too many things happening
{ details: SHOW_REBALANCE_LOG }
)
: await bos.keysendRebalance(
{
destination: mynode.public_key,
fromChannel: localHeavy.public_key,
toChannel: remoteHeavy.public_key,
// add randomness to amt (downward only)
sats: maxSatsToRebalanceAfterRules,
maxMinutes: MINUTES_FOR_KEYSEND,
maxFeeRate: maxRebalanceRate,
avoid: copy(lastAvoid ?? AVOID_LIST), // avoid these nodes in paths
retryAvoidsOnTimeout: RETRIES_ON_TIMEOUTS_SEND
},
// {} // no terminal output, too many things happening
{ details: SHOW_REBALANCE_LOG }
)
const taskLength = ((Date.now() - startedAt) / minutes).toFixed(1) + ' minutes'
matchedPair.results.push(resBalance)
// handle failure and success
if (resBalance.failed) {
// fail:
matchedPair.done = true
const tasksDone = matchups.reduce((count, m) => (m.done ? count + 1 : count), 0)
const reason = resBalance.msg[1] // 2nd item in error array from bos
const reasonString = resBalance.ppmSuggested
? `(Reason: needed ${String(resBalance.ppmSuggested).padStart(4)} ppm) `
: `(Reason: ${reason}) `
console.log(
`${getDate()} Stopping ${localString} --> ${remoteString} run #${run} ${maxRebalanceRateString} ` +
`rebalance failed ${reasonString}` +
`${dim}(${tasksDone}/${matchups.length} done after ${taskLength})${undim}`
)
// update records
// unless it's probe timeout which is completely inconclusive
if (reason !== 'ProbeTimeout') {
const now = Date.now()
changeRecord({
peer: remoteHeavy,
newRecord: {
rebalance: [
// new element added to start
{
t: now,
UTC: getDate(now),
ppm: resBalance.ppmSuggested,
maxRebalanceRate: maxRebalanceRate,
failed: true,
failedReason: reason,
peer: localHeavy.public_key,
peerAlias: localHeavy.alias,
sats: maxSatsToRebalanceAfterRules
},
// old elements kept
...(readRecord(remoteHeavy)?.rebalance || [])
]
}
})
changeRecord({
peer: localHeavy,
newRecord: {
rebalanceLocalHeavy: [
// new element added to start
{
t: now,
UTC: getDate(now),
ppm: resBalance.ppmSuggested,
maxRebalanceRate: maxRebalanceRate,
failed: true,
failedReason: reason,
peer: remoteHeavy.public_key,
peerAlias: remoteHeavy.alias,
sats: maxSatsToRebalanceAfterRules
},
// old elements kept
...(readRecord(localHeavy)?.rebalanceLocalHeavy || [])
]
}
})
}
// if successful rebalance
} else {
// just in case both fields are missing for some reason in response lets stop
if (!resBalance.rebalanced && !resBalance.sent) {
console.error(`${getDate()} shouldn't happen: missing resBalance.rebalanced & resBalance.sent`)
return matchedPair
}
const rebalanced = resBalance.rebalanced ?? resBalance.sent
// succeess:
matchedPair.maxSatsToRebalance -= rebalanced
matchedPair.run++
// update records
const now = Date.now()
changeRecord({
peer: remoteHeavy,