p2p, swarm: fix node up races by granular locking (#18976)
* swarm/network: DRY out repeated giga comment I not necessarily agree with the way we wait for event propagation. But I truly disagree with having duplicated giga comments. * p2p/simulations: encapsulate Node.Up field so we avoid data races The Node.Up field was accessed concurrently without "proper" locking. There was a lock on Network and that was used sometimes to access the field. Other times the locking was missed and we had a data race. For example: https://github.com/ethereum/go-ethereum/pull/18464 The case above was solved, but there were still intermittent/hard to reproduce races. So let's solve the issue permanently. resolves: ethersphere/go-ethereum#1146 * p2p/simulations: fix unmarshal of simulations.Node Making Node.Up field private in13292ee897
broke TestHTTPNetwork and TestHTTPSnapshot. Because the default UnmarshalJSON does not handle unexported fields. Important: The fix is partial and not proper to my taste. But I cut scope as I think the fix may require a change to the current serialization format. New ticket: https://github.com/ethersphere/go-ethereum/issues/1177 * p2p/simulations: Add a sanity test case for Node.Config UnmarshalJSON * p2p/simulations: revert back to defer Unlock() pattern for Network It's a good patten to call `defer Unlock()` right after `Lock()` so (new) error cases won't miss to unlock. Let's get back to that pattern. The patten was abandoned in85a79b3ad3
, while fixing a data race. That data race does not exist anymore, since the Node.Up field got hidden behind its own lock. * p2p/simulations: consistent naming for test providers Node.UnmarshalJSON * p2p/simulations: remove JSON annotation from private fields of Node As unexported fields are not serialized. * p2p/simulations: fix deadlock in Network.GetRandomDownNode() Problem: GetRandomDownNode() locks -> getDownNodeIDs() -> GetNodes() tries to lock -> deadlock On Network type, unexported functions must assume that `net.lock` is already acquired and should not call exported functions which might try to lock again. * p2p/simulations: ensure method conformity for Network Connect* methods were moved to p2p/simulations.Network from swarm/network/simulation. However these new methods did not follow the pattern of Network methods, i.e., all exported method locks the whole Network either for read or write. * p2p/simulations: fix deadlock during network shutdown `TestDiscoveryPersistenceSimulationSimAdapter` often got into deadlock. The execution was stuck on two locks, i.e, `Kademlia.lock` and `p2p/simulations.Network.lock`. Usually the test got stuck once in each 20 executions with high confidence. `Kademlia` was stuck in `Kademlia.EachAddr()` and `Network` in `Network.Stop()`. Solution: in `Network.Stop()` `net.lock` must be released before calling `node.Stop()` as stopping a node (somehow - I did not find the exact code path) causes `Network.InitConn()` to be called from `Kademlia.SuggestPeer()` and that blocks on `net.lock`. Related ticket: https://github.com/ethersphere/go-ethereum/issues/1223 * swarm/state: simplify if statement in DBStore.Put() * p2p/simulations: remove faulty godoc from private function The comment started with the wrong method name. The method is simple and self explanatory. Also, it's private. => Let's just remove the comment.
This commit is contained in:
committed by
Viktor Trón
parent
12ca3b172a
commit
50b872bf05
@ -44,7 +44,7 @@ func (s *Simulation) NodeIDs() (ids []enode.ID) {
|
||||
func (s *Simulation) UpNodeIDs() (ids []enode.ID) {
|
||||
nodes := s.Net.GetNodes()
|
||||
for _, node := range nodes {
|
||||
if node.Up {
|
||||
if node.Up() {
|
||||
ids = append(ids, node.ID())
|
||||
}
|
||||
}
|
||||
@ -55,7 +55,7 @@ func (s *Simulation) UpNodeIDs() (ids []enode.ID) {
|
||||
func (s *Simulation) DownNodeIDs() (ids []enode.ID) {
|
||||
nodes := s.Net.GetNodes()
|
||||
for _, node := range nodes {
|
||||
if !node.Up {
|
||||
if !node.Up() {
|
||||
ids = append(ids, node.ID())
|
||||
}
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ func TestUpDownNodeIDs(t *testing.T) {
|
||||
gotIDs = sim.UpNodeIDs()
|
||||
|
||||
for _, id := range gotIDs {
|
||||
if !sim.Net.GetNode(id).Up {
|
||||
if !sim.Net.GetNode(id).Up() {
|
||||
t.Errorf("node %s should not be down", id)
|
||||
}
|
||||
}
|
||||
@ -66,7 +66,7 @@ func TestUpDownNodeIDs(t *testing.T) {
|
||||
gotIDs = sim.DownNodeIDs()
|
||||
|
||||
for _, id := range gotIDs {
|
||||
if sim.Net.GetNode(id).Up {
|
||||
if sim.Net.GetNode(id).Up() {
|
||||
t.Errorf("node %s should not be up", id)
|
||||
}
|
||||
}
|
||||
@ -112,7 +112,7 @@ func TestAddNode(t *testing.T) {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
|
||||
if !n.Up {
|
||||
if !n.Up() {
|
||||
t.Error("node not started")
|
||||
}
|
||||
}
|
||||
@ -327,7 +327,7 @@ func TestStartStopNode(t *testing.T) {
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if !n.Up {
|
||||
if !n.Up() {
|
||||
t.Error("node not started")
|
||||
}
|
||||
|
||||
@ -335,26 +335,17 @@ func TestStartStopNode(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n.Up {
|
||||
if n.Up() {
|
||||
t.Error("node not stopped")
|
||||
}
|
||||
|
||||
// Sleep here to ensure that Network.watchPeerEvents defer function
|
||||
// has set the `node.Up = false` before we start the node again.
|
||||
// p2p/simulations/network.go:215
|
||||
//
|
||||
// The same node is stopped and started again, and upon start
|
||||
// watchPeerEvents is started in a goroutine. If the node is stopped
|
||||
// and then very quickly started, that goroutine may be scheduled later
|
||||
// then start and force `node.Up = false` in its defer function.
|
||||
// This will make this test unreliable.
|
||||
time.Sleep(time.Second)
|
||||
waitForPeerEventPropagation()
|
||||
|
||||
err = sim.StartNode(id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !n.Up {
|
||||
if !n.Up() {
|
||||
t.Error("node not started")
|
||||
}
|
||||
}
|
||||
@ -377,7 +368,7 @@ func TestStartStopRandomNode(t *testing.T) {
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if n.Up {
|
||||
if n.Up() {
|
||||
t.Error("node not stopped")
|
||||
}
|
||||
|
||||
@ -386,16 +377,7 @@ func TestStartStopRandomNode(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Sleep here to ensure that Network.watchPeerEvents defer function
|
||||
// has set the `node.Up = false` before we start the node again.
|
||||
// p2p/simulations/network.go:215
|
||||
//
|
||||
// The same node is stopped and started again, and upon start
|
||||
// watchPeerEvents is started in a goroutine. If the node is stopped
|
||||
// and then very quickly started, that goroutine may be scheduled later
|
||||
// then start and force `node.Up = false` in its defer function.
|
||||
// This will make this test unreliable.
|
||||
time.Sleep(time.Second)
|
||||
waitForPeerEventPropagation()
|
||||
|
||||
idStarted, err := sim.StartRandomNode()
|
||||
if err != nil {
|
||||
@ -426,21 +408,12 @@ func TestStartStopRandomNodes(t *testing.T) {
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if n.Up {
|
||||
if n.Up() {
|
||||
t.Error("node not stopped")
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep here to ensure that Network.watchPeerEvents defer function
|
||||
// has set the `node.Up = false` before we start the node again.
|
||||
// p2p/simulations/network.go:215
|
||||
//
|
||||
// The same node is stopped and started again, and upon start
|
||||
// watchPeerEvents is started in a goroutine. If the node is stopped
|
||||
// and then very quickly started, that goroutine may be scheduled later
|
||||
// then start and force `node.Up = false` in its defer function.
|
||||
// This will make this test unreliable.
|
||||
time.Sleep(time.Second)
|
||||
waitForPeerEventPropagation()
|
||||
|
||||
ids, err = sim.StartRandomNodes(2)
|
||||
if err != nil {
|
||||
@ -452,8 +425,20 @@ func TestStartStopRandomNodes(t *testing.T) {
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if !n.Up {
|
||||
if !n.Up() {
|
||||
t.Error("node not started")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPeerEventPropagation() {
|
||||
// Sleep here to ensure that Network.watchPeerEvents defer function
|
||||
// has set the `node.Up() = false` before we start the node again.
|
||||
//
|
||||
// The same node is stopped and started again, and upon start
|
||||
// watchPeerEvents is started in a goroutine. If the node is stopped
|
||||
// and then very quickly started, that goroutine may be scheduled later
|
||||
// then start and force `node.Up() = false` in its defer function.
|
||||
// This will make this test unreliable.
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ func (s *Simulation) Services(name string) (services map[enode.ID]node.Service)
|
||||
nodes := s.Net.GetNodes()
|
||||
services = make(map[enode.ID]node.Service)
|
||||
for _, node := range nodes {
|
||||
if !node.Up {
|
||||
if !node.Up() {
|
||||
continue
|
||||
}
|
||||
simNode, ok := node.Node.(*adapters.SimNode)
|
||||
|
@ -124,7 +124,7 @@ func TestClose(t *testing.T) {
|
||||
|
||||
var upNodeCount int
|
||||
for _, n := range sim.Net.GetNodes() {
|
||||
if n.Up {
|
||||
if n.Up() {
|
||||
upNodeCount++
|
||||
}
|
||||
}
|
||||
@ -140,7 +140,7 @@ func TestClose(t *testing.T) {
|
||||
|
||||
upNodeCount = 0
|
||||
for _, n := range sim.Net.GetNodes() {
|
||||
if n.Up {
|
||||
if n.Up() {
|
||||
upNodeCount++
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user