Lines Matching refs:vnode

16 static void afs_next_locker(struct afs_vnode *vnode, int error);
25 static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state) in afs_set_lock_state() argument
27 _debug("STATE %u -> %u", vnode->lock_state, state); in afs_set_lock_state()
28 vnode->lock_state = state; in afs_set_lock_state()
36 void afs_lock_may_be_available(struct afs_vnode *vnode) in afs_lock_may_be_available() argument
38 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); in afs_lock_may_be_available()
40 spin_lock(&vnode->lock); in afs_lock_may_be_available()
41 if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB) in afs_lock_may_be_available()
42 afs_next_locker(vnode, 0); in afs_lock_may_be_available()
43 trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0); in afs_lock_may_be_available()
44 spin_unlock(&vnode->lock); in afs_lock_may_be_available()
51 static void afs_schedule_lock_extension(struct afs_vnode *vnode) in afs_schedule_lock_extension() argument
56 expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2); in afs_schedule_lock_extension()
64 queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j); in afs_schedule_lock_extension()
74 struct afs_vnode *vnode = op->file[0].vnode; in afs_lock_op_done() local
77 spin_lock(&vnode->lock); in afs_lock_op_done()
78 trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0); in afs_lock_op_done()
79 vnode->locked_at = call->issue_time; in afs_lock_op_done()
80 afs_schedule_lock_extension(vnode); in afs_lock_op_done()
81 spin_unlock(&vnode->lock); in afs_lock_op_done()
90 static void afs_grant_locks(struct afs_vnode *vnode) in afs_grant_locks() argument
93 bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE); in afs_grant_locks()
95 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { in afs_grant_locks()
99 list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks); in afs_grant_locks()
101 trace_afs_flock_op(vnode, p, afs_flock_op_grant); in afs_grant_locks()
111 static void afs_next_locker(struct afs_vnode *vnode, int error) in afs_next_locker() argument
114 struct key *key = vnode->lock_key; in afs_next_locker()
119 if (vnode->lock_type == AFS_LOCK_WRITE) in afs_next_locker()
122 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { in afs_next_locker()
137 vnode->lock_key = NULL; in afs_next_locker()
141 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING); in afs_next_locker()
143 trace_afs_flock_op(vnode, next, afs_flock_op_wake); in afs_next_locker()
146 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE); in afs_next_locker()
147 trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0); in afs_next_locker()
157 static void afs_kill_lockers_enoent(struct afs_vnode *vnode) in afs_kill_lockers_enoent() argument
161 afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED); in afs_kill_lockers_enoent()
163 while (!list_empty(&vnode->pending_locks)) { in afs_kill_lockers_enoent()
164 p = list_entry(vnode->pending_locks.next, in afs_kill_lockers_enoent()
171 key_put(vnode->lock_key); in afs_kill_lockers_enoent()
172 vnode->lock_key = NULL; in afs_kill_lockers_enoent()
191 static int afs_set_lock(struct afs_vnode *vnode, struct key *key, in afs_set_lock() argument
197 vnode->volume->name, in afs_set_lock()
198 vnode->fid.vid, in afs_set_lock()
199 vnode->fid.vnode, in afs_set_lock()
200 vnode->fid.unique, in afs_set_lock()
203 op = afs_alloc_operation(key, vnode->volume); in afs_set_lock()
207 afs_op_set_vnode(op, 0, vnode); in afs_set_lock()
223 static int afs_extend_lock(struct afs_vnode *vnode, struct key *key) in afs_extend_lock() argument
228 vnode->volume->name, in afs_extend_lock()
229 vnode->fid.vid, in afs_extend_lock()
230 vnode->fid.vnode, in afs_extend_lock()
231 vnode->fid.unique, in afs_extend_lock()
234 op = afs_alloc_operation(key, vnode->volume); in afs_extend_lock()
238 afs_op_set_vnode(op, 0, vnode); in afs_extend_lock()
254 static int afs_release_lock(struct afs_vnode *vnode, struct key *key) in afs_release_lock() argument
259 vnode->volume->name, in afs_release_lock()
260 vnode->fid.vid, in afs_release_lock()
261 vnode->fid.vnode, in afs_release_lock()
262 vnode->fid.unique, in afs_release_lock()
265 op = afs_alloc_operation(key, vnode->volume); in afs_release_lock()
269 afs_op_set_vnode(op, 0, vnode); in afs_release_lock()
283 struct afs_vnode *vnode = in afs_lock_work() local
288 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); in afs_lock_work()
290 spin_lock(&vnode->lock); in afs_lock_work()
293 _debug("wstate %u for %p", vnode->lock_state, vnode); in afs_lock_work()
294 switch (vnode->lock_state) { in afs_lock_work()
296 afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING); in afs_lock_work()
297 trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0); in afs_lock_work()
298 spin_unlock(&vnode->lock); in afs_lock_work()
302 ret = afs_release_lock(vnode, vnode->lock_key); in afs_lock_work()
303 if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) { in afs_lock_work()
304 trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail, in afs_lock_work()
308 vnode->fid.vid, vnode->fid.vnode, ret); in afs_lock_work()
311 spin_lock(&vnode->lock); in afs_lock_work()
313 afs_kill_lockers_enoent(vnode); in afs_lock_work()
315 afs_next_locker(vnode, 0); in afs_lock_work()
316 spin_unlock(&vnode->lock); in afs_lock_work()
325 ASSERT(!list_empty(&vnode->granted_locks)); in afs_lock_work()
327 key = key_get(vnode->lock_key); in afs_lock_work()
328 afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING); in afs_lock_work()
329 trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0); in afs_lock_work()
330 spin_unlock(&vnode->lock); in afs_lock_work()
332 ret = afs_extend_lock(vnode, key); /* RPC */ in afs_lock_work()
336 trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail, in afs_lock_work()
339 vnode->fid.vid, vnode->fid.vnode, ret); in afs_lock_work()
342 spin_lock(&vnode->lock); in afs_lock_work()
345 afs_kill_lockers_enoent(vnode); in afs_lock_work()
346 spin_unlock(&vnode->lock); in afs_lock_work()
350 if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING) in afs_lock_work()
352 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED); in afs_lock_work()
355 queue_delayed_work(afs_lock_manager, &vnode->lock_work, in afs_lock_work()
357 spin_unlock(&vnode->lock); in afs_lock_work()
369 afs_next_locker(vnode, 0); in afs_lock_work()
370 spin_unlock(&vnode->lock); in afs_lock_work()
374 afs_kill_lockers_enoent(vnode); in afs_lock_work()
375 spin_unlock(&vnode->lock); in afs_lock_work()
380 spin_unlock(&vnode->lock); in afs_lock_work()
392 static void afs_defer_unlock(struct afs_vnode *vnode) in afs_defer_unlock() argument
394 _enter("%u", vnode->lock_state); in afs_defer_unlock()
396 if (list_empty(&vnode->granted_locks) && in afs_defer_unlock()
397 (vnode->lock_state == AFS_VNODE_LOCK_GRANTED || in afs_defer_unlock()
398 vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) { in afs_defer_unlock()
399 cancel_delayed_work(&vnode->lock_work); in afs_defer_unlock()
401 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK); in afs_defer_unlock()
402 trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0); in afs_defer_unlock()
403 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0); in afs_defer_unlock()
411 static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key, in afs_do_setlk_check() argument
420 ret = afs_validate(vnode, key); in afs_do_setlk_check()
427 ret = afs_check_permit(vnode, key, &access); in afs_do_setlk_check()
454 struct afs_vnode *vnode = AFS_FS_I(inode); in afs_do_setlk() local
465 vnode->fid.vid, vnode->fid.vnode, in afs_do_setlk()
477 ret = afs_do_setlk_check(vnode, key, mode, type); in afs_do_setlk()
481 trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock); in afs_do_setlk()
497 spin_lock(&vnode->lock); in afs_do_setlk()
498 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); in afs_do_setlk()
501 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED) in afs_do_setlk()
508 _debug("try %u", vnode->lock_state); in afs_do_setlk()
509 if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) { in afs_do_setlk()
512 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); in afs_do_setlk()
517 if (vnode->lock_type == AFS_LOCK_WRITE) { in afs_do_setlk()
519 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); in afs_do_setlk()
525 if (vnode->lock_state == AFS_VNODE_LOCK_NONE && in afs_do_setlk()
529 if (vnode->status.lock_count == -1) in afs_do_setlk()
532 if (vnode->status.lock_count != 0) in afs_do_setlk()
537 if (vnode->lock_state != AFS_VNODE_LOCK_NONE) in afs_do_setlk()
549 trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0); in afs_do_setlk()
550 vnode->lock_key = key_get(key); in afs_do_setlk()
551 vnode->lock_type = type; in afs_do_setlk()
552 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING); in afs_do_setlk()
553 spin_unlock(&vnode->lock); in afs_do_setlk()
555 ret = afs_set_lock(vnode, key, type); /* RPC */ in afs_do_setlk()
557 spin_lock(&vnode->lock); in afs_do_setlk()
565 trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret); in afs_do_setlk()
567 afs_next_locker(vnode, ret); in afs_do_setlk()
572 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret); in afs_do_setlk()
574 afs_kill_lockers_enoent(vnode); in afs_do_setlk()
579 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret); in afs_do_setlk()
581 afs_next_locker(vnode, 0); in afs_do_setlk()
589 ASSERT(list_empty(&vnode->granted_locks)); in afs_do_setlk()
590 ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link); in afs_do_setlk()
594 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED); in afs_do_setlk()
595 trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type); in afs_do_setlk()
596 afs_grant_locks(vnode); in afs_do_setlk()
601 spin_unlock(&vnode->lock); in afs_do_setlk()
608 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0); in afs_do_setlk()
610 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret); in afs_do_setlk()
618 afs_validate(vnode, key); in afs_do_setlk()
625 afs_next_locker(vnode, 0); in afs_do_setlk()
630 afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB); in afs_do_setlk()
631 trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret); in afs_do_setlk()
632 queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5); in afs_do_setlk()
640 spin_unlock(&vnode->lock); in afs_do_setlk()
642 trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0); in afs_do_setlk()
645 trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret); in afs_do_setlk()
648 spin_lock(&vnode->lock); in afs_do_setlk()
660 ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB); in afs_do_setlk()
661 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING); in afs_do_setlk()
671 spin_unlock(&vnode->lock); in afs_do_setlk()
687 spin_lock(&vnode->lock); in afs_do_setlk()
689 afs_defer_unlock(vnode); in afs_do_setlk()
692 spin_unlock(&vnode->lock); in afs_do_setlk()
703 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); in afs_do_unlk() local
706 _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, in afs_do_unlk()
709 trace_afs_flock_op(vnode, fl, afs_flock_op_unlock); in afs_do_unlk()
715 _leave(" = %d [%u]", ret, vnode->lock_state); in afs_do_unlk()
724 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); in afs_do_getlk() local
730 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED) in afs_do_getlk()
739 ret = afs_fetch_status(vnode, key, false, NULL); in afs_do_getlk()
743 lock_count = READ_ONCE(vnode->status.lock_count); in afs_do_getlk()
766 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); in afs_lock() local
771 vnode->fid.vid, vnode->fid.vnode, cmd, in afs_lock()
779 trace_afs_flock_op(vnode, fl, afs_flock_op_lock); in afs_lock()
792 trace_afs_flock_op(vnode, fl, op); in afs_lock()
801 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); in afs_flock() local
806 vnode->fid.vid, vnode->fid.vnode, cmd, in afs_flock()
820 trace_afs_flock_op(vnode, fl, afs_flock_op_flock); in afs_flock()
834 trace_afs_flock_op(vnode, fl, op); in afs_flock()
846 struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->c.flc_file)); in afs_fl_copy_lock() local
852 spin_lock(&vnode->lock); in afs_fl_copy_lock()
853 trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock); in afs_fl_copy_lock()
855 spin_unlock(&vnode->lock); in afs_fl_copy_lock()
864 struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->c.flc_file)); in afs_fl_release_private() local
868 spin_lock(&vnode->lock); in afs_fl_release_private()
870 trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock); in afs_fl_release_private()
872 if (list_empty(&vnode->granted_locks)) in afs_fl_release_private()
873 afs_defer_unlock(vnode); in afs_fl_release_private()
875 _debug("state %u for %p", vnode->lock_state, vnode); in afs_fl_release_private()
876 spin_unlock(&vnode->lock); in afs_fl_release_private()