Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
# 0.7.0-rc.25 (Synonym Fork)
# 0.7.0-rc.27 (Synonym Fork)

## Bug Fixes

- Fixed `PeerStore::add_peer` silently ignoring address updates for existing peers. When a peer's
IP address changes (e.g., LSP node migration), `add_peer` now upserts the socket address and
re-persists, instead of returning early. This fixes the issue where ldk-node's reconnection loop
would indefinitely use a stale cached IP after an LSP node IP change.
(See [upstream issue #700](https://github.com/lightningdevkit/ldk-node/issues/700))
- Backported upstream Electrum sync fix (PR #4341): Skip unconfirmed `get_history` entries in
`ElectrumSyncClient`. Previously, mempool entries (height=0 or -1) were incorrectly treated as
confirmed, causing `get_merkle` to fail for 0-conf channel funding transactions.
Expand Down
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ exclude = ["bindings/uniffi-bindgen"]

[package]
name = "ldk-node"
version = "0.7.0-rc.25"
version = "0.7.0-rc.27"
authors = ["Elias Rohrer <dev@tnull.de>"]
homepage = "https://lightningdevkit.org/"
license = "MIT OR Apache-2.0"
Expand Down
4 changes: 2 additions & 2 deletions Package.swift
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@

import PackageDescription

let tag = "v0.7.0-rc.25"
let checksum = "3abf83a20d41a79337b9eae1c86da375b49423d5fe5176e4876b76285fde44ee"
let tag = "v0.7.0-rc.27"
let checksum = "c18449b57c5da535b56d1575505c26d68ecca4440c41db9edc23522747742034"
let url = "https://github.com/synonymdev/ldk-node/releases/download/\(tag)/LDKNodeFFI.xcframework.zip"

let package = Package(
Expand Down
2 changes: 1 addition & 1 deletion bindings/kotlin/ldk-node-android/gradle.properties
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@ android.useAndroidX=true
android.enableJetifier=true
kotlin.code.style=official
group=com.synonym
version=0.7.0-rc.25
version=0.7.0-rc.27
Binary file not shown.
Binary file not shown.
Binary file not shown.
2 changes: 1 addition & 1 deletion bindings/kotlin/ldk-node-jvm/gradle.properties
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
org.gradle.jvmargs=-Xmx1536m
kotlin.code.style=official
group=com.synonym
version=0.7.0-rc.25
version=0.7.0-rc.27
Binary file not shown.
Binary file not shown.
2 changes: 1 addition & 1 deletion bindings/python/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "ldk_node"
version = "0.7.0-rc.25"
version = "0.7.0-rc.27"
authors = [
{ name="Elias Rohrer", email="dev@tnull.de" },
]
Expand Down
10 changes: 6 additions & 4 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1107,6 +1107,12 @@ impl Node {

let peer_info = PeerInfo { node_id, address };

// Persist first so the address is updated even if the connection attempt
// races with an in-flight reconnection loop attempt at the old address.
if persist {
self.peer_store.add_peer(peer_info.clone())?;
}

let con_node_id = peer_info.node_id;
let con_addr = peer_info.address.clone();
let con_cm = Arc::clone(&self.connection_manager);
Expand All @@ -1119,10 +1125,6 @@ impl Node {

log_info!(self.logger, "Connected to peer {}@{}. ", peer_info.node_id, peer_info.address);

if persist {
self.peer_store.add_peer(peer_info)?;
}

Ok(())
}

Expand Down
66 changes: 63 additions & 3 deletions src/peer_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ use crate::io::{
PEER_INFO_PERSISTENCE_KEY, PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE,
PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE,
};
use crate::logger::{log_error, LdkLogger};
use crate::logger::{log_error, log_info, LdkLogger};
use crate::types::DynStore;
use crate::{Error, SocketAddress};

Expand All @@ -43,8 +43,17 @@ where
pub(crate) fn add_peer(&self, peer_info: PeerInfo) -> Result<(), Error> {
let mut locked_peers = self.peers.write().unwrap();

if locked_peers.contains_key(&peer_info.node_id) {
return Ok(());
if let Some(existing) = locked_peers.get(&peer_info.node_id) {
if existing.address == peer_info.address {
return Ok(());
}
log_info!(
self.logger,
"Updating socket address for peer {}: {} -> {}",
peer_info.node_id,
existing.address,
peer_info.address
);
}

locked_peers.insert(peer_info.node_id, peer_info);
Expand Down Expand Up @@ -194,4 +203,55 @@ mod tests {
assert_eq!(peers[0], expected_peer_info);
assert_eq!(deser_peer_store.get_peer(&node_id), Some(expected_peer_info));
}

#[test]
fn peer_address_updated_on_readd() {
let store: Arc<DynStore> = Arc::new(InMemoryStore::new());
let logger = Arc::new(TestLogger::new());
let peer_store = PeerStore::new(Arc::clone(&store), Arc::clone(&logger));

let node_id = PublicKey::from_str(
"0276607124ebe6a6c9338517b6f485825b27c2dcc0b9fc2aa6a4c0df91194e5993",
)
.unwrap();
let old_address = SocketAddress::from_str("34.65.186.40:9735").unwrap();
let new_address = SocketAddress::from_str("34.65.153.174:9735").unwrap();

peer_store.add_peer(PeerInfo { node_id, address: old_address.clone() }).unwrap();
assert_eq!(peer_store.get_peer(&node_id).unwrap().address, old_address);

peer_store.add_peer(PeerInfo { node_id, address: new_address.clone() }).unwrap();
assert_eq!(peer_store.get_peer(&node_id).unwrap().address, new_address);

assert_eq!(peer_store.list_peers().len(), 1);

let persisted_bytes = KVStoreSync::read(
&*store,
PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE,
PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE,
PEER_INFO_PERSISTENCE_KEY,
)
.unwrap();
let deser_peer_store =
PeerStore::read(&mut &persisted_bytes[..], (Arc::clone(&store), logger)).unwrap();
assert_eq!(deser_peer_store.get_peer(&node_id).unwrap().address, new_address);
}

#[test]
fn peer_same_address_skips_persist() {
let store: Arc<DynStore> = Arc::new(InMemoryStore::new());
let logger = Arc::new(TestLogger::new());
let peer_store = PeerStore::new(Arc::clone(&store), Arc::clone(&logger));

let node_id = PublicKey::from_str(
"0276607124ebe6a6c9338517b6f485825b27c2dcc0b9fc2aa6a4c0df91194e5993",
)
.unwrap();
let address = SocketAddress::from_str("127.0.0.1:9738").unwrap();

peer_store.add_peer(PeerInfo { node_id, address: address.clone() }).unwrap();

peer_store.add_peer(PeerInfo { node_id, address }).unwrap();
assert_eq!(peer_store.list_peers().len(), 1);
}
}
60 changes: 60 additions & 0 deletions tests/integration_tests_rust.rs
Original file line number Diff line number Diff line change
Expand Up @@ -911,6 +911,66 @@ async fn do_connection_restart_behavior(persist: bool) {
}
}

#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn peer_address_persisted_on_connect_failure() {
let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd();
let chain_source = TestChainSource::Esplora(&electrsd);
let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false);

let node_id_b = node_b.node_id();
let real_addr_b = node_b.listening_addresses().unwrap().first().unwrap().clone();

// Stop node_b so all connection attempts to it will fail.
node_b.stop().unwrap();

let fake_addr: lightning::ln::msgs::SocketAddress = "127.0.0.1:19999".parse().unwrap();

// Attempt to connect with persist=true to an unreachable address. The connection
// will fail, but the peer address must still be persisted. This is a regression test
// for a bug where add_peer was called AFTER the connection attempt, meaning a failed
// connect would skip persistence entirely.
let res = node_a.connect(node_id_b, fake_addr.clone(), true);
assert!(res.is_err());

let peers_a = node_a.list_peers();
let peer = peers_a
.iter()
.find(|p| p.node_id == node_id_b)
.expect("Peer must be in store even after failed connection when persist=true");
assert!(peer.is_persisted);
assert!(!peer.is_connected);
assert_eq!(peer.address, fake_addr);

// Now "update" to the real address (still unreachable since node_b is stopped).
// This verifies the upsert: even though connect fails again, the stored address
// should be updated to the new one.
let res = node_a.connect(node_id_b, real_addr_b.clone(), true);
assert!(res.is_err());

let peers_a = node_a.list_peers();
let peer = peers_a
.iter()
.find(|p| p.node_id == node_id_b)
.expect("Peer must still be in store after second failed connection");
assert_eq!(peer.address, real_addr_b, "Stored address must be updated to the new one");

// Restart node_b and node_a to verify the persisted address survives restart
// and the reconnection loop uses the correct (updated) address.
node_b.start().unwrap();
node_a.stop().unwrap();
node_a.start().unwrap();

tokio::time::sleep(std::time::Duration::from_secs(5)).await;

let peers_a = node_a.list_peers();
let peer = peers_a
.iter()
.find(|p| p.node_id == node_id_b)
.expect("Peer must reconnect after restart using persisted address");
assert!(peer.is_connected);
assert!(peer.is_persisted);
}

#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn concurrent_connections_succeed() {
let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd();
Expand Down
Loading