1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #pragma once
6 
7 #include <atomic>
8 #include <cstddef>
9 
10 #include <lockdep/common.h>
11 
12 namespace lockdep {
13 
14 // A lock-free, wait-free hash set that tracks the set of lock classes that have
15 // been acquired prior the lock class that owns the set. Each lock class
16 // maintains its own dependency set.
17 //
18 // Implementation note: This hash set makes use of relaxed atomic operations.
19 // This approach is appropriate because the only variables communicated between
20 // threads are the values of the atomic variables themselves, other loads and
21 // stores are not published. Additionally, sequential consistency within a
22 // thread is ensured due to control dependencies only on the atomic variables.
23 class LockDependencySet {
24 public:
25     LockDependencySet() = default;
26     LockDependencySet(const LockDependencySet&) = delete;
27     void operator=(const LockDependencySet&) = delete;
28     LockDependencySet(LockDependencySet&&) = delete;
29     void operator=(LockDependencySet&&) = delete;
30 
31     // Checks the dependency hash set for the given lock class. This method may
32     // safely race with AddLockClass(), converging on the correct answer by the
33     // next check.
HasLockClass(LockClassId id)34     bool HasLockClass(LockClassId id) const {
35         for (size_t i = 0; i < kMaxLockDependencies; i++) {
36             const auto& entry = GetEntry(id, i);
37             const LockClassId entry_id = entry.load(std::memory_order_relaxed);
38             if (entry_id == id)
39                 return true;
40             else if (entry_id == kInvalidLockClassId)
41                 return false;
42         }
43         return false;
44     }
45 
46     // Adds the given lock class id to the dependency set if not already present.
47     // Updates the set using the following lock free approach:
48     //   1. The dependency set is fixed size and all entries start out empty.
49     //   2. New entries are added using open addressing with linear probing.
50     //   3. An entry may only change from empty to holding a lock class id.
51     //   4. To add an entry the set is probed linearly until either:
52     //      a) The id to add appears in the set already.
53     //      b) The first empty entry is found.
54     //      c) The entire set has been probed; return max dependencies error.
55     //   5. Attempt to compare-exchange the empty entry with the lock class id:
56     //      a) If the update succeeds return success.
57     //      b) If the update does not succeed but the winning value is the same
58     //         lock class id return with success.
59     //      c) If the update does not succeed and the winning value is a different
60     //         lock class id proceed to the next entry and continue the probe from
61     //         step #4.
AddLockClass(LockClassId id)62     LockResult AddLockClass(LockClassId id) {
63         for (size_t i = 0; i < kMaxLockDependencies; i++) {
64             auto& entry = GetEntry(id, i);
65             LockClassId entry_id = entry.load(std::memory_order_relaxed);
66             if (entry_id == id)
67                 return LockResult::DependencyExists;
68             while (entry_id == kInvalidLockClassId) {
69                 const bool result =
70                     entry.compare_exchange_weak(entry_id, id,
71                                                 std::memory_order_relaxed,
72                                                 std::memory_order_relaxed);
73                 if (result) {
74                     return LockResult::Success;
75                 } else if (entry_id == id) {
76                     return LockResult::DependencyExists;
77                 } else {
78                     // Continue to find an unused slot, moving back to the outer
79                     // loop because of the while loop condition.
80                 }
81             }
82         }
83 
84         return LockResult::MaxLockDependencies;
85     }
86 
87     // Iterator type to traverse the set of populated lock classes. Entries
88     // added after the iterator is created may or may not be returned, depending
89     // on where they land in the hash set relative to the index member.
90     struct Iterator {
91         const LockDependencySet* set;
92         size_t index;
93 
94         LockClassId operator*() const {
95             // TODO(eieio): See if it makes sense to add a memory order param
96             // to the iterator.
97             return set->list_[index].load(std::memory_order_relaxed);
98         }
99 
100         Iterator operator++() {
101             while (index != kMaxLockDependencies) {
102                 index++;
103                 if (**this != kInvalidLockClassId)
104                     break;
105             }
106             return *this;
107         }
108 
109         bool operator!=(const Iterator& other) const {
110             return set != other.set || index != other.index;
111         }
112     };
113 
114     // Iterator accessors.
begin()115     Iterator begin() const {
116         Iterator iter{this, 0};
117         if (*iter == kInvalidLockClassId)
118             ++iter;
119         return iter;
120     }
121 
end()122     Iterator end() const { return {this, kMaxLockDependencies}; }
123 
124     // Clears the dependency set. This method is not used by the main algorithm
125     // but may be useful for unit tests and benchmarking. Until lock sequence
126     // memoization is implemented it is generally safe to call this method at
127     // any time, resulting in dependency set being rebuilt at runtime. Once
128     // lock sequence memoization is implemented it is necessary to clear the
129     // memoization table whenever any dependency set is cleared so that the
130     // dependency set can be rebuilt; failure to do so could result in missing
131     // new lock violations.
clear()132     void clear() {
133         for (size_t i = 0; i < kMaxLockDependencies; i++)
134             list_[i].store(kInvalidLockClassId, std::memory_order_relaxed);
135     }
136 
137 private:
138     // Returns a reference to an entry by computing a trivial hash of the given id
139     // and a linear probe offset.
GetEntry(LockClassId id,size_t offset)140     std::atomic<LockClassId>& GetEntry(LockClassId id, size_t offset) {
141         const size_t index = (id + offset) % kMaxLockDependencies;
142         return list_[index];
143     }
GetEntry(LockClassId id,size_t offset)144     const std::atomic<LockClassId>& GetEntry(LockClassId id, size_t offset) const {
145         const size_t index = (id + offset) % kMaxLockDependencies;
146         return list_[index];
147     }
148 
149     // The list of atomic variables that make up the hash set. Initialized to
150     // kInvalidLockClassId  (0).
151     std::atomic<LockClassId> list_[kMaxLockDependencies]{};
152 };
153 
154 } // namespace lockdep
155