1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <digest/digest.h>
6 #include <fbl/auto_call.h>
7 #include <fbl/auto_lock.h>
8 #include <fbl/ref_ptr.h>
9 #include <zircon/status.h>
10 
11 #include <utility>
12 
13 #include <blobfs/blob-cache.h>
14 
15 using digest::Digest;
16 
17 namespace blobfs {
18 
19 BlobCache::BlobCache() = default;
~BlobCache()20 BlobCache::~BlobCache() {
21     Reset();
22 }
23 
Reset()24 void BlobCache::Reset() {
25     ForAllOpenNodes([this](fbl::RefPtr<CacheNode> node) {
26         // If someone races alongside Reset, and evicts an open node concurrently with us,
27         // a status other than "ZX_OK" may be returned. This is allowed.
28         __UNUSED zx_status_t status = Evict(node);
29     });
30 
31     fbl::AutoLock lock(&hash_lock_);
32     ResetLocked();
33 }
34 
ResetLocked()35 void BlobCache::ResetLocked() {
36     // All nodes in closed_hash_ have been leaked. If we're attempting to reset the
37     // cache, these nodes must be explicitly deleted.
38     CacheNode* node = nullptr;
39     while ((node = closed_hash_.pop_front()) != nullptr) {
40         delete node;
41     }
42 }
43 
ForAllOpenNodes(NextNodeCallback callback)44 void BlobCache::ForAllOpenNodes(NextNodeCallback callback) {
45     fbl::RefPtr<CacheNode> old_vnode = nullptr;
46     fbl::RefPtr<CacheNode> vnode = nullptr;
47 
48     while (true) {
49         // Scope the lock to prevent letting fbl::RefPtr<CacheNode> destructors from running while
50         // it is held.
51         {
52             fbl::AutoLock lock(&hash_lock_);
53             if (open_hash_.is_empty()) {
54                 return;
55             }
56 
57             CacheNode* raw_vnode = nullptr;
58             if (old_vnode == nullptr) {
59                 // Acquire the first node from the front of the cache...
60                 raw_vnode = &open_hash_.front();
61             } else {
62                 // ... Acquire all subsequent nodes by iterating from the lower bound
63                 // of the current node.
64                 auto current = open_hash_.lower_bound(old_vnode->GetKey());
65                 if (current == open_hash_.end()) {
66                     return;
67                 } else if (current.CopyPointer() != old_vnode.get()) {
68                     raw_vnode = current.CopyPointer();
69                 } else {
70                     auto next = ++current;
71                     if (next == open_hash_.end()) {
72                         return;
73                     }
74                     raw_vnode = next.CopyPointer();
75                 }
76             }
77             vnode = fbl::MakeRefPtrUpgradeFromRaw(raw_vnode, hash_lock_);
78             if (vnode == nullptr) {
79                 // The vnode is actively being deleted. Ignore it.
80                 release_cvar_.Wait(&hash_lock_);
81                 continue;
82             }
83         }
84         callback(vnode);
85         old_vnode = std::move(vnode);
86     }
87 }
88 
Lookup(const Digest & digest,fbl::RefPtr<CacheNode> * out)89 zx_status_t BlobCache::Lookup(const Digest& digest, fbl::RefPtr<CacheNode>* out) {
90     TRACE_DURATION("blobfs", "BlobCache::Lookup");
91     const uint8_t* key = digest.AcquireBytes();
92     auto release = fbl::MakeAutoCall([&digest]() { digest.ReleaseBytes(); });
93 
94     // Look up the blob in the maps.
95     fbl::RefPtr<CacheNode> vnode = nullptr;
96     // Avoid releasing a reference to |vnode| while holding |hash_lock_|.
97     {
98         fbl::AutoLock lock(&hash_lock_);
99         zx_status_t status = LookupLocked(key, &vnode);
100         if (status != ZX_OK) {
101             return status;
102         }
103     }
104     ZX_DEBUG_ASSERT(vnode != nullptr);
105 
106     if (out != nullptr) {
107         *out = std::move(vnode);
108     }
109     return ZX_OK;
110 }
111 
LookupLocked(const uint8_t * key,fbl::RefPtr<CacheNode> * out)112 zx_status_t BlobCache::LookupLocked(const uint8_t* key, fbl::RefPtr<CacheNode>* out) {
113     ZX_DEBUG_ASSERT(out != nullptr);
114 
115     // Try to acquire the node from the open hash, if possible.
116     while (true) {
117         auto raw_vnode = open_hash_.find(key).CopyPointer();
118         if (raw_vnode != nullptr) {
119             *out = fbl::MakeRefPtrUpgradeFromRaw(raw_vnode, hash_lock_);
120             if (*out == nullptr) {
121                 // This condition is only possible if:
122                 // - The raw pointer to the Vnode exists in the open map,
123                 // with refcount == 0.
124                 // - Another thread is fbl_recycling this Vnode, but has not
125                 // yet resurrected/evicted it.
126                 // - The vnode is being moved to the close cache, and is
127                 // not yet purged.
128                 //
129                 // It is not safe for us to attempt to Resurrect the Vnode. If
130                 // we do so, then the caller of Lookup may unlink, purge, and
131                 // destroy the Vnode concurrently before the original caller of
132                 // "fbl_recycle" completes.
133                 //
134                 // Since the window of time for this condition is extremely
135                 // small (between Release and the resurrection of the Vnode),
136                 // and only contains a single flag check, we use a condition
137                 // variable to wait until it is released, and try again.
138                 release_cvar_.Wait(&hash_lock_);
139                 continue;
140             }
141             return ZX_OK;
142         }
143         break;
144     }
145 
146     // If the node doesn't exist in the open hash, acquire it from the closed hash.
147     *out = UpgradeLocked(key);
148     if (*out == nullptr) {
149         return ZX_ERR_NOT_FOUND;
150     }
151     return ZX_OK;
152 }
153 
Add(const fbl::RefPtr<CacheNode> & vnode)154 zx_status_t BlobCache::Add(const fbl::RefPtr<CacheNode>& vnode) {
155     TRACE_DURATION("blobfs", "BlobCache::Add");
156 
157     const uint8_t* key = vnode->GetKey();
158     // Avoid running the old_node destructor while holding the lock.
159     fbl::RefPtr<CacheNode> old_node;
160     {
161         fbl::AutoLock lock(&hash_lock_);
162         if (LookupLocked(key, &old_node) == ZX_OK) {
163             return ZX_ERR_ALREADY_EXISTS;
164         }
165         open_hash_.insert(vnode.get());
166     }
167     return ZX_OK;
168 }
169 
Evict(const fbl::RefPtr<CacheNode> & vnode)170 zx_status_t BlobCache::Evict(const fbl::RefPtr<CacheNode>& vnode) {
171     TRACE_DURATION("blobfs", "BlobCache::Evict");
172 
173     return EvictUnsafe(vnode.get());
174 }
175 
EvictUnsafe(CacheNode * vnode,bool from_recycle)176 zx_status_t BlobCache::EvictUnsafe(CacheNode* vnode, bool from_recycle) {
177     fbl::AutoLock lock(&hash_lock_);
178 
179     // If this node isn't in any container, we have nothing to evict.
180     if (!vnode->InContainer()) {
181         return ZX_ERR_NOT_FOUND;
182     }
183 
184     ZX_ASSERT(open_hash_.erase(*vnode) != nullptr);
185     ZX_ASSERT(closed_hash_.find(vnode->GetKey()).CopyPointer() == nullptr);
186 
187     // If we successfully evicted the node from a container, we may have been invoked
188     // from fbl_recycle. In this case, a caller to |Lookup| may be blocked waiting until
189     // this "open node" is evicted.
190     //
191     // For this reason, they should be signalled.
192     if (from_recycle) {
193         release_cvar_.Broadcast();
194     }
195     return ZX_OK;
196 }
197 
Downgrade(CacheNode * raw_vnode)198 void BlobCache::Downgrade(CacheNode* raw_vnode) {
199     fbl::AutoLock lock(&hash_lock_);
200     // We must resurrect the vnode while holding the lock to prevent it from being
201     // concurrently accessed in Lookup, and gaining a strong reference before
202     // being erased from open_hash_.
203     raw_vnode->ResurrectRef();
204     fbl::RefPtr<CacheNode> vnode = fbl::internal::MakeRefPtrNoAdopt(raw_vnode);
205 
206     // If the node has already been evicted, destroy it instead of caching.
207     //
208     // Delete it explicitly to prevent repeatedly calling fbl_recycle.
209     if (!vnode->InContainer()) {
210         delete vnode.leak_ref();
211         return;
212     }
213 
214     ZX_ASSERT(open_hash_.erase(*raw_vnode) != nullptr);
215     release_cvar_.Broadcast();
216     ZX_ASSERT(closed_hash_.insert_or_find(vnode.get()));
217 
218     // While in the closed cache, the blob may either be destroyed or in an
219     // inactive state. The toggles here make tradeoffs between memory usage
220     // and performance.
221     switch (cache_policy_) {
222     case CachePolicy::EvictImmediately:
223         vnode->ActivateLowMemory();
224         break;
225     case CachePolicy::NeverEvict:
226         break;
227     default:
228         ZX_ASSERT_MSG(false, "Unexpected cache policy");
229     }
230 
231     // To exist in the closed_hash_, this RefPtr must be leaked.
232     // See the complement of this leak in UpgradeLocked.
233     __UNUSED auto leak = vnode.leak_ref();
234 }
235 
UpgradeLocked(const uint8_t * key)236 fbl::RefPtr<CacheNode> BlobCache::UpgradeLocked(const uint8_t* key) {
237     ZX_DEBUG_ASSERT(open_hash_.find(key).CopyPointer() == nullptr);
238     CacheNode* raw_vnode = closed_hash_.erase(key);
239     if (raw_vnode == nullptr) {
240         return nullptr;
241     }
242     open_hash_.insert(raw_vnode);
243     // To have existed in the closed_hash_, this RefPtr must have been leaked.
244     // See the complement of this adoption in Downgrade.
245     return fbl::internal::MakeRefPtrNoAdopt(raw_vnode);
246 }
247 
248 } // namespace blobfs
249