diff --git a/CHANGELOG.md b/CHANGELOG.md index c9a88002e..78eab4eb4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,12 @@ -# 0.7.0-rc.25 (Synonym Fork) +# 0.7.0-rc.27 (Synonym Fork) ## Bug Fixes +- Fixed `PeerStore::add_peer` silently ignoring address updates for existing peers. When a peer's + IP address changes (e.g., LSP node migration), `add_peer` now upserts the socket address and + re-persists, instead of returning early. This fixes the issue where ldk-node's reconnection loop + would indefinitely use a stale cached IP after an LSP node IP change. + (See [upstream issue #700](https://github.com/lightningdevkit/ldk-node/issues/700)) - Backported upstream Electrum sync fix (PR #4341): Skip unconfirmed `get_history` entries in `ElectrumSyncClient`. Previously, mempool entries (height=0 or -1) were incorrectly treated as confirmed, causing `get_merkle` to fail for 0-conf channel funding transactions. diff --git a/Cargo.toml b/Cargo.toml index 6d360d968..0707cd75a 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ exclude = ["bindings/uniffi-bindgen"] [package] name = "ldk-node" -version = "0.7.0-rc.25" +version = "0.7.0-rc.27" authors = ["Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" diff --git a/Package.swift b/Package.swift index a6a805327..8af5d2404 100644 --- a/Package.swift +++ b/Package.swift @@ -3,8 +3,8 @@ import PackageDescription -let tag = "v0.7.0-rc.25" -let checksum = "3abf83a20d41a79337b9eae1c86da375b49423d5fe5176e4876b76285fde44ee" +let tag = "v0.7.0-rc.27" +let checksum = "c18449b57c5da535b56d1575505c26d68ecca4440c41db9edc23522747742034" let url = "https://github.com/synonymdev/ldk-node/releases/download/\(tag)/LDKNodeFFI.xcframework.zip" let package = Package( diff --git a/bindings/kotlin/ldk-node-android/gradle.properties b/bindings/kotlin/ldk-node-android/gradle.properties index f1ac84414..d82a1818f 100644 --- a/bindings/kotlin/ldk-node-android/gradle.properties +++ b/bindings/kotlin/ldk-node-android/gradle.properties @@ -3,4 +3,4 @@ android.useAndroidX=true android.enableJetifier=true kotlin.code.style=official group=com.synonym -version=0.7.0-rc.25 +version=0.7.0-rc.27 diff --git a/bindings/kotlin/ldk-node-android/lib/src/main/jniLibs/arm64-v8a/libldk_node.so b/bindings/kotlin/ldk-node-android/lib/src/main/jniLibs/arm64-v8a/libldk_node.so index 5f939988c..c9e9c0279 100755 Binary files a/bindings/kotlin/ldk-node-android/lib/src/main/jniLibs/arm64-v8a/libldk_node.so and b/bindings/kotlin/ldk-node-android/lib/src/main/jniLibs/arm64-v8a/libldk_node.so differ diff --git a/bindings/kotlin/ldk-node-android/lib/src/main/jniLibs/armeabi-v7a/libldk_node.so b/bindings/kotlin/ldk-node-android/lib/src/main/jniLibs/armeabi-v7a/libldk_node.so index 4a6da127f..48bb1822f 100755 Binary files a/bindings/kotlin/ldk-node-android/lib/src/main/jniLibs/armeabi-v7a/libldk_node.so and b/bindings/kotlin/ldk-node-android/lib/src/main/jniLibs/armeabi-v7a/libldk_node.so differ diff --git a/bindings/kotlin/ldk-node-android/lib/src/main/jniLibs/x86_64/libldk_node.so b/bindings/kotlin/ldk-node-android/lib/src/main/jniLibs/x86_64/libldk_node.so index 965e35f91..bbc20b5a6 100755 Binary files a/bindings/kotlin/ldk-node-android/lib/src/main/jniLibs/x86_64/libldk_node.so and b/bindings/kotlin/ldk-node-android/lib/src/main/jniLibs/x86_64/libldk_node.so differ diff --git a/bindings/kotlin/ldk-node-jvm/gradle.properties b/bindings/kotlin/ldk-node-jvm/gradle.properties index cc08292e6..2e28ecdcf 100644 --- a/bindings/kotlin/ldk-node-jvm/gradle.properties +++ b/bindings/kotlin/ldk-node-jvm/gradle.properties @@ -1,4 +1,4 @@ org.gradle.jvmargs=-Xmx1536m kotlin.code.style=official group=com.synonym -version=0.7.0-rc.25 +version=0.7.0-rc.27 diff --git a/bindings/kotlin/ldk-node-jvm/lib/src/main/resources/darwin-aarch64/libldk_node.dylib b/bindings/kotlin/ldk-node-jvm/lib/src/main/resources/darwin-aarch64/libldk_node.dylib index f83adfc83..54fc1a5a7 100644 Binary files a/bindings/kotlin/ldk-node-jvm/lib/src/main/resources/darwin-aarch64/libldk_node.dylib and b/bindings/kotlin/ldk-node-jvm/lib/src/main/resources/darwin-aarch64/libldk_node.dylib differ diff --git a/bindings/kotlin/ldk-node-jvm/lib/src/main/resources/darwin-x86-64/libldk_node.dylib b/bindings/kotlin/ldk-node-jvm/lib/src/main/resources/darwin-x86-64/libldk_node.dylib index 43a37f022..1d7e300f4 100644 Binary files a/bindings/kotlin/ldk-node-jvm/lib/src/main/resources/darwin-x86-64/libldk_node.dylib and b/bindings/kotlin/ldk-node-jvm/lib/src/main/resources/darwin-x86-64/libldk_node.dylib differ diff --git a/bindings/python/pyproject.toml b/bindings/python/pyproject.toml index 210b1e43f..b0d1d02d8 100644 --- a/bindings/python/pyproject.toml +++ b/bindings/python/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ldk_node" -version = "0.7.0-rc.25" +version = "0.7.0-rc.27" authors = [ { name="Elias Rohrer", email="dev@tnull.de" }, ] diff --git a/src/lib.rs b/src/lib.rs index 751d02160..8e3bbf7c8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1107,6 +1107,12 @@ impl Node { let peer_info = PeerInfo { node_id, address }; + // Persist first so the address is updated even if the connection attempt + // races with an in-flight reconnection loop attempt at the old address. + if persist { + self.peer_store.add_peer(peer_info.clone())?; + } + let con_node_id = peer_info.node_id; let con_addr = peer_info.address.clone(); let con_cm = Arc::clone(&self.connection_manager); @@ -1119,10 +1125,6 @@ impl Node { log_info!(self.logger, "Connected to peer {}@{}. ", peer_info.node_id, peer_info.address); - if persist { - self.peer_store.add_peer(peer_info)?; - } - Ok(()) } diff --git a/src/peer_store.rs b/src/peer_store.rs index 59cd3d94f..fc0ea090b 100644 --- a/src/peer_store.rs +++ b/src/peer_store.rs @@ -18,7 +18,7 @@ use crate::io::{ PEER_INFO_PERSISTENCE_KEY, PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, }; -use crate::logger::{log_error, LdkLogger}; +use crate::logger::{log_error, log_info, LdkLogger}; use crate::types::DynStore; use crate::{Error, SocketAddress}; @@ -43,8 +43,17 @@ where pub(crate) fn add_peer(&self, peer_info: PeerInfo) -> Result<(), Error> { let mut locked_peers = self.peers.write().unwrap(); - if locked_peers.contains_key(&peer_info.node_id) { - return Ok(()); + if let Some(existing) = locked_peers.get(&peer_info.node_id) { + if existing.address == peer_info.address { + return Ok(()); + } + log_info!( + self.logger, + "Updating socket address for peer {}: {} -> {}", + peer_info.node_id, + existing.address, + peer_info.address + ); } locked_peers.insert(peer_info.node_id, peer_info); @@ -194,4 +203,55 @@ mod tests { assert_eq!(peers[0], expected_peer_info); assert_eq!(deser_peer_store.get_peer(&node_id), Some(expected_peer_info)); } + + #[test] + fn peer_address_updated_on_readd() { + let store: Arc = Arc::new(InMemoryStore::new()); + let logger = Arc::new(TestLogger::new()); + let peer_store = PeerStore::new(Arc::clone(&store), Arc::clone(&logger)); + + let node_id = PublicKey::from_str( + "0276607124ebe6a6c9338517b6f485825b27c2dcc0b9fc2aa6a4c0df91194e5993", + ) + .unwrap(); + let old_address = SocketAddress::from_str("34.65.186.40:9735").unwrap(); + let new_address = SocketAddress::from_str("34.65.153.174:9735").unwrap(); + + peer_store.add_peer(PeerInfo { node_id, address: old_address.clone() }).unwrap(); + assert_eq!(peer_store.get_peer(&node_id).unwrap().address, old_address); + + peer_store.add_peer(PeerInfo { node_id, address: new_address.clone() }).unwrap(); + assert_eq!(peer_store.get_peer(&node_id).unwrap().address, new_address); + + assert_eq!(peer_store.list_peers().len(), 1); + + let persisted_bytes = KVStoreSync::read( + &*store, + PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + PEER_INFO_PERSISTENCE_KEY, + ) + .unwrap(); + let deser_peer_store = + PeerStore::read(&mut &persisted_bytes[..], (Arc::clone(&store), logger)).unwrap(); + assert_eq!(deser_peer_store.get_peer(&node_id).unwrap().address, new_address); + } + + #[test] + fn peer_same_address_skips_persist() { + let store: Arc = Arc::new(InMemoryStore::new()); + let logger = Arc::new(TestLogger::new()); + let peer_store = PeerStore::new(Arc::clone(&store), Arc::clone(&logger)); + + let node_id = PublicKey::from_str( + "0276607124ebe6a6c9338517b6f485825b27c2dcc0b9fc2aa6a4c0df91194e5993", + ) + .unwrap(); + let address = SocketAddress::from_str("127.0.0.1:9738").unwrap(); + + peer_store.add_peer(PeerInfo { node_id, address: address.clone() }).unwrap(); + + peer_store.add_peer(PeerInfo { node_id, address }).unwrap(); + assert_eq!(peer_store.list_peers().len(), 1); + } } diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index abc53d572..21ae14d4f 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -911,6 +911,66 @@ async fn do_connection_restart_behavior(persist: bool) { } } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn peer_address_persisted_on_connect_failure() { + let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); + + let node_id_b = node_b.node_id(); + let real_addr_b = node_b.listening_addresses().unwrap().first().unwrap().clone(); + + // Stop node_b so all connection attempts to it will fail. + node_b.stop().unwrap(); + + let fake_addr: lightning::ln::msgs::SocketAddress = "127.0.0.1:19999".parse().unwrap(); + + // Attempt to connect with persist=true to an unreachable address. The connection + // will fail, but the peer address must still be persisted. This is a regression test + // for a bug where add_peer was called AFTER the connection attempt, meaning a failed + // connect would skip persistence entirely. + let res = node_a.connect(node_id_b, fake_addr.clone(), true); + assert!(res.is_err()); + + let peers_a = node_a.list_peers(); + let peer = peers_a + .iter() + .find(|p| p.node_id == node_id_b) + .expect("Peer must be in store even after failed connection when persist=true"); + assert!(peer.is_persisted); + assert!(!peer.is_connected); + assert_eq!(peer.address, fake_addr); + + // Now "update" to the real address (still unreachable since node_b is stopped). + // This verifies the upsert: even though connect fails again, the stored address + // should be updated to the new one. + let res = node_a.connect(node_id_b, real_addr_b.clone(), true); + assert!(res.is_err()); + + let peers_a = node_a.list_peers(); + let peer = peers_a + .iter() + .find(|p| p.node_id == node_id_b) + .expect("Peer must still be in store after second failed connection"); + assert_eq!(peer.address, real_addr_b, "Stored address must be updated to the new one"); + + // Restart node_b and node_a to verify the persisted address survives restart + // and the reconnection loop uses the correct (updated) address. + node_b.start().unwrap(); + node_a.stop().unwrap(); + node_a.start().unwrap(); + + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let peers_a = node_a.list_peers(); + let peer = peers_a + .iter() + .find(|p| p.node_id == node_id_b) + .expect("Peer must reconnect after restart using persisted address"); + assert!(peer.is_connected); + assert!(peer.is_persisted); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn concurrent_connections_succeed() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd();