| /fs/bcachefs/ |
| A D | journal_reclaim.c | 90 u64 seq; in journal_dev_space_available() local 107 seq++) { in journal_dev_space_available() 431 pin->seq = seq; in bch2_journal_pin_set_locked() 448 u64 seq = READ_ONCE(src->seq); in bch2_journal_pin_copy() local 560 u64 seq; in journal_flush_pins() local 848 u64 seq; in journal_pins_still_flushing() local 962 seq = 0; in bch2_journal_flush_device_pins() 967 seq = max(seq, journal_last_seq(j)); in bch2_journal_flush_device_pins() 972 seq++; in bch2_journal_flush_device_pins() 999 *seq = max(*seq, j->pin.front); in bch2_journal_seq_pins_to_text() [all …]
|
| A D | journal_reclaim.h | 24 return pin->seq != 0; in journal_pin_active() 28 journal_seq_pin(struct journal *j, u64 seq) in journal_seq_pin() argument 30 EBUG_ON(seq < j->pin.front || seq >= j->pin.back); in journal_seq_pin() 32 return &j->pin.data[seq & j->pin.mask]; in journal_seq_pin() 43 static inline void bch2_journal_pin_add(struct journal *j, u64 seq, in bch2_journal_pin_add() argument 47 if (unlikely(!journal_pin_active(pin) || pin->seq > seq)) in bch2_journal_pin_add() 48 bch2_journal_pin_set(j, seq, pin, flush_fn); in bch2_journal_pin_add() 56 static inline void bch2_journal_pin_update(struct journal *j, u64 seq, in bch2_journal_pin_update() argument 60 if (unlikely(!journal_pin_active(pin) || pin->seq < seq)) in bch2_journal_pin_update() 61 bch2_journal_pin_set(j, seq, pin, flush_fn); in bch2_journal_pin_update()
|
| A D | seqmutex.h | 9 u32 seq; member 22 lock->seq++; in seqmutex_lock() 27 u32 seq = lock->seq; in seqmutex_unlock() local 29 return seq; in seqmutex_unlock() 32 static inline bool seqmutex_relock(struct seqmutex *lock, u32 seq) in seqmutex_relock() argument 34 if (lock->seq != seq || !mutex_trylock(&lock->lock)) in seqmutex_relock() 37 if (lock->seq != seq) { in seqmutex_relock()
|
| A D | journal_io.c | 1291 BUG_ON(seq > le64_to_cpu(i->j.seq)); in bch2_journal_check_for_missing() 1293 while (seq < le64_to_cpu(i->j.seq)) { in bch2_journal_check_for_missing() 1294 while (seq < le64_to_cpu(i->j.seq) && in bch2_journal_check_for_missing() 1298 if (seq == le64_to_cpu(i->j.seq)) in bch2_journal_check_for_missing() 1303 while (seq < le64_to_cpu(i->j.seq) && in bch2_journal_check_for_missing() 1329 seq++; in bch2_journal_check_for_missing() 1346 u64 seq; in bch2_journal_read() local 1457 seq = le64_to_cpu(i->j.seq); in bch2_journal_read() 1699 u64 seq = le64_to_cpu(w->data->seq); in CLOSURE_CALLBACK() local 1947 u64 seq = le64_to_cpu(jset->seq); in bch2_journal_write_prep() local [all …]
|
| A D | journal.h | 133 return atomic64_read(&j->seq); in journal_cur_seq() 162 union journal_res_state s, u64 seq) in journal_state_seq_count() argument 254 if (j->seq != j->last_seq) in journal_entry_empty() 285 unsigned idx = seq & JOURNAL_STATE_BUF_MASK; in __bch2_journal_buf_put() 290 bch2_journal_buf_put_final(j, seq); in __bch2_journal_buf_put() 295 unsigned idx = seq & JOURNAL_STATE_BUF_MASK; in bch2_journal_buf_put() 301 bch2_journal_buf_put_final(j, seq); in bch2_journal_buf_put() 324 bch2_journal_buf_put(j, res->seq); in bch2_journal_res_put() 382 res->seq = journal_cur_seq(j); in journal_res_get_fast() 383 res->seq -= (res->seq - old.idx) & JOURNAL_STATE_BUF_MASK; in journal_res_get_fast() [all …]
|
| A D | journal.c | 26 return seq > j->seq_ondisk; in journal_seq_unwritten() 98 seq++) in bch2_journal_bufs_to_text() 194 seq++) { in bch2_journal_do_writes() 841 seq = max(seq, journal_last_unwritten_seq(j)); in bch2_journal_flush_seq_async() 862 seq = res.seq; in bch2_journal_flush_seq_async() 886 seq++; in bch2_journal_flush_seq_async() 1073 seq++) { in __bch2_next_write_buffer_flush_journal_buf() 1428 u64 seq; in bch2_journal_writing_to_device() local 1433 seq++) { in bch2_journal_writing_to_device() 1529 u64 seq; in bch2_fs_journal_start() local [all …]
|
| A D | rcu_pending.c | 90 rcu_gp_poll_state_t seq; member 96 rcu_gp_poll_state_t seq; member 175 if (i->head && __poll_state_synchronize_rcu(p->parent->srcu, i->seq)) in merge_expired_lists() 330 get_object_radix(struct rcu_pending_pcpu *p, rcu_gp_poll_state_t seq) in get_object_radix() argument 333 if (rcu_gp_poll_cookie_eq(objs->seq, seq)) in get_object_radix() 336 if (darray_push_gfp(&p->objs, ((struct rcu_pending_seq) { .seq = seq }), GFP_ATOMIC)) in get_object_radix() 378 if (rcu_gp_poll_cookie_eq(i->seq, seq)) { in rcu_pending_enqueue_list() 387 i->seq = seq; in rcu_pending_enqueue_list() 450 objs = get_object_radix(p, seq); in __rcu_pending_enqueue() 477 start_gp = rcu_pending_enqueue_list(p, seq, head, ptr, &flags); in __rcu_pending_enqueue() [all …]
|
| A D | six.h | 138 u32 seq; member 195 return lock->seq; in six_lock_seq() 277 unsigned seq, unsigned long ip); 289 unsigned seq) in six_relock_type() argument 291 return six_relock_ip(lock, type, seq, _THIS_IP_); in six_relock_type() 341 static inline bool six_relock_ip_##type(struct six_lock *lock, u32 seq, unsigned long ip)\ 343 return six_relock_ip(lock, SIX_LOCK_##type, seq, ip); \ 346 static inline bool six_relock_##type(struct six_lock *lock, u32 seq) \ 348 return six_relock_ip(lock, SIX_LOCK_##type, seq, _THIS_IP_); \
|
| /fs/ |
| A D | d_path.c | 167 seq = 0; in prepend_path() 173 if (!(seq & 1)) in prepend_path() 176 seq = 1; in prepend_path() 241 unsigned seq; in get_fs_root_rcu() local 244 seq = read_seqbegin(&fs->seq); in get_fs_root_rcu() 246 } while (read_seqretry(&fs->seq, seq)); in get_fs_root_rcu() 335 int seq = 0; in __dentry_path() local 353 seq = 1; in __dentry_path() 385 unsigned seq; in get_fs_root_and_pwd_rcu() local 388 seq = read_seqbegin(&fs->seq); in get_fs_root_and_pwd_rcu() [all …]
|
| A D | fs_struct.c | 20 write_seqlock(&fs->seq); in set_fs_root() 23 write_sequnlock(&fs->seq); in set_fs_root() 37 write_seqlock(&fs->seq); in set_fs_pwd() 40 write_sequnlock(&fs->seq); in set_fs_pwd() 66 write_seqlock(&fs->seq); in chroot_fs_refs() 73 write_sequnlock(&fs->seq); in chroot_fs_refs() 96 read_seqlock_excl(&fs->seq); in exit_fs() 113 seqlock_init(&fs->seq); in copy_fs_struct() 116 read_seqlock_excl(&old->seq); in copy_fs_struct() 136 read_seqlock_excl(&fs->seq); in unshare_fs_struct() [all …]
|
| A D | namespace.c | 1014 u64 seq; in check_anonymous_mnt() local 1020 return !seq || (seq == current->nsproxy->mnt_ns->seq); in check_anonymous_mnt() 5342 seq->buf[seq->count] = '\0'; in statmount_mnt_root() 5391 seq->buf[seq->count] = '\0'; in statmount_sb_source() 5430 memmove(seq->buf + start, seq->buf + start + 1, in statmount_mnt_opts() 5445 buf_end = seq->buf + seq->count; in statmount_opt_process() 5462 seq->count = dst - 1 - seq->buf; in statmount_opt_process() 5551 struct seq_file *seq = &s->seq; in statmount_string() local 5625 seq->buf[seq->count++] = '\0'; in statmount_string() 5634 struct seq_file *seq = &s->seq; in copy_statmount_to_user() local [all …]
|
| /fs/nfsd/ |
| A D | stats.c | 30 static int nfsd_show(struct seq_file *seq, void *v) in nfsd_show() argument 32 struct net *net = pde_data(file_inode(seq->file)); in nfsd_show() 36 seq_printf(seq, "rc %lld %lld %lld\nfh %lld 0 0 0 0\nio %lld %lld\n", in nfsd_show() 45 seq_printf(seq, "th %u 0", atomic_read(&nfsd_th_cnt)); in nfsd_show() 49 seq_puts(seq, " 0.000"); in nfsd_show() 52 seq_puts(seq, "\nra 0 0 0 0 0 0 0 0 0 0 0 0\n"); in nfsd_show() 55 svc_seq_show(seq, &nn->nfsd_svcstats); in nfsd_show() 60 seq_printf(seq, "proc4ops %u", LAST_NFS4_OP + 1); in nfsd_show() 62 seq_printf(seq, " %lld", in nfsd_show() 65 seq_printf(seq, "\nwdeleg_getattr %lld", in nfsd_show() [all …]
|
| /fs/ocfs2/cluster/ |
| A D | netdebug.c | 109 static int nst_seq_show(struct seq_file *seq, void *v) in nst_seq_show() argument 126 seq_printf(seq, "%p:\n" in nst_seq_show() 178 struct seq_file *seq = file->private_data; in nst_fop_release() local 236 struct o2net_sock_debug *sd = seq->private; in sc_seq_start() 248 struct o2net_sock_debug *sd = seq->private; in sc_seq_next() 315 seq_printf(seq, "%p:\n" in sc_show_sock_container() 349 struct o2net_sock_debug *sd = seq->private; in sc_seq_show() 357 sc_show_sock_container(seq, sc); in sc_seq_show() 359 sc_show_sock_stats(seq, sc); in sc_seq_show() 403 struct seq_file *seq = file->private_data; in sc_fop_release() local [all …]
|
| /fs/dlm/ |
| A D | debug_fs.c | 386 print_format1(rsb, seq); in table_seq_show() 388 print_format2(rsb, seq); in table_seq_show() 390 print_format3(rsb, seq); in table_seq_show() 392 print_format4(rsb, seq); in table_seq_show() 475 struct seq_file *seq; in table_open1() local 482 seq = file->private_data; in table_open1() 489 struct seq_file *seq; in table_open2() local 496 seq = file->private_data; in table_open2() 532 struct seq_file *seq; in table_open3() local 539 seq = file->private_data; in table_open3() [all …]
|
| A D | rcom.c | 32 uint64_t seq) in _create_rcom() argument 52 uint64_t seq) in create_rcom() argument 172 uint64_t seq) in dlm_rcom_status() argument 189 seq); in dlm_rcom_status() 225 uint64_t seq) in receive_rcom_status() argument 318 &rc, &mh, seq); in dlm_rcom_names() 349 &rc, &mh, seq); in receive_rcom_names() 368 &rc, &mh, seq); in dlm_send_rcom_lookup() 396 seq); in receive_rcom_lookup() 459 seq); in dlm_send_rcom_lock() [all …]
|
| A D | recover.c | 96 int save_slots, uint64_t seq) in wait_status_all() argument 129 uint32_t status_flags, uint64_t seq) in wait_status_low() argument 160 error = wait_status_all(ls, status, 0, seq); in wait_status() 203 DLM_RSF_NEED_SLOTS, seq); in dlm_recover_members_wait() 215 return wait_status(ls, DLM_RS_DIR, seq); in dlm_recover_directory_wait() 220 return wait_status(ls, DLM_RS_LOCKS, seq); in dlm_recover_locks_wait() 225 return wait_status(ls, DLM_RS_DONE, seq); in dlm_recover_done_wait() 549 error = recover_master(r, &count, seq); in dlm_recover_masters() 619 uint64_t seq) in recover_locks_queue() argument 625 error = dlm_send_rcom_lock(r, lkb, seq); in recover_locks_queue() [all …]
|
| A D | recoverd.c | 99 if (ls->ls_recover_seq == seq) { in enable_locking() 180 error = dlm_recover_members_wait(ls, rv->seq); in ls_recover() 194 error = dlm_recover_directory(ls, rv->seq); in ls_recover() 203 error = dlm_recover_directory_wait(ls, rv->seq); in ls_recover() 255 error = dlm_recover_locks_wait(ls, rv->seq); in ls_recover() 279 error = dlm_recover_locks_wait(ls, rv->seq); in ls_recover() 298 error = dlm_recover_done_wait(ls, rv->seq); in ls_recover() 308 error = enable_locking(ls, rv->seq); in ls_recover() 355 if (rv && ls->ls_recover_seq == rv->seq) in do_ls_recovery() 373 __func__, (unsigned long long)rv->seq); in do_ls_recovery() [all …]
|
| A D | recover.h | 18 int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq); 19 int dlm_recover_directory_wait(struct dlm_ls *ls, uint64_t seq); 20 int dlm_recover_locks_wait(struct dlm_ls *ls, uint64_t seq); 21 int dlm_recover_done_wait(struct dlm_ls *ls, uint64_t seq); 22 int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq, 25 int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq,
|
| A D | midcomms.c | 204 uint32_t seq; member 456 if (before(mh->seq, seq)) { in dlm_receive_ack() 467 if (before(mh->seq, seq)) { in dlm_receive_ack() 520 uint32_t seq) in dlm_midcomms_receive_buffer() argument 527 is_expected_seq = (oval == seq); in dlm_midcomms_receive_buffer() 599 if (seq < oval) in dlm_midcomms_receive_buffer() 603 seq, oval, node->nodeid); in dlm_midcomms_receive_buffer() 661 uint32_t seq; in dlm_midcomms_receive_buffer_3_2() local 773 dlm_receive_ack(node, seq); in dlm_midcomms_receive_buffer_3_2() 948 mh->seq, node->nodeid); in dlm_midcomms_unack_msg_resend() [all …]
|
| /fs/hfsplus/ |
| A D | options.c | 146 int hfsplus_show_options(struct seq_file *seq, struct dentry *root) in hfsplus_show_options() argument 151 seq_show_option_n(seq, "creator", (char *)&sbi->creator, 4); in hfsplus_show_options() 153 seq_show_option_n(seq, "type", (char *)&sbi->type, 4); in hfsplus_show_options() 154 seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask, in hfsplus_show_options() 158 seq_printf(seq, ",part=%u", sbi->part); in hfsplus_show_options() 160 seq_printf(seq, ",session=%u", sbi->session); in hfsplus_show_options() 162 seq_printf(seq, ",nls=%s", sbi->nls->charset); in hfsplus_show_options() 164 seq_puts(seq, ",nodecompose"); in hfsplus_show_options() 166 seq_puts(seq, ",nobarrier"); in hfsplus_show_options()
|
| /fs/f2fs/ |
| A D | sysfs.c | 1605 seq_printf(seq, "%-10d", i); in segment_info_seq_show() 1608 seq_putc(seq, '\n'); in segment_info_seq_show() 1610 seq_putc(seq, ' '); in segment_info_seq_show() 1631 seq_printf(seq, "%-10d", i); in segment_bits_seq_show() 1636 seq_putc(seq, '\n'); in segment_bits_seq_show() 1653 seq_printf(seq, "%-10d", i); in victim_bits_seq_show() 1656 seq_putc(seq, '\n'); in victim_bits_seq_show() 1658 seq_putc(seq, ' '); in victim_bits_seq_show() 1690 seq_puts(seq, " ."); in discard_plist_seq_show() 1692 seq_putc(seq, '\n'); in discard_plist_seq_show() [all …]
|
| /fs/btrfs/ |
| A D | tree-mod-log.c | 18 u64 seq; member 75 if (!elem->seq) { in btrfs_get_tree_mod_seq() 82 return elem->seq; in btrfs_get_tree_mod_seq() 100 elem->seq = 0; in btrfs_put_tree_mod_seq() 165 else if (cur->seq < tm->seq) in tree_mod_log_insert() 167 else if (cur->seq > tm->seq) in tree_mod_log_insert() 533 BUG_ON(found->seq > cur->seq); in __tree_mod_log_search() 539 BUG_ON(found->seq < cur->seq); in __tree_mod_log_search() 1071 ASSERT(tm2 == tm || tm2->seq > tm->seq); in btrfs_get_old_root() 1072 if (!tm2 || tm2->seq < tm->seq) { in btrfs_get_old_root() [all …]
|
| /fs/ext4/ |
| A D | mmp.c | 143 u32 seq = 0; in kmmpd() local 171 if (++seq > EXT4_MMP_SEQ_MAX) in kmmpd() 172 seq = 1; in kmmpd() 174 mmp->mmp_seq = cpu_to_le32(seq); in kmmpd() 285 u32 seq; in ext4_multi_mount_protect() local 313 seq = le32_to_cpu(mmp->mmp_seq); in ext4_multi_mount_protect() 314 if (seq == EXT4_MMP_SEQ_CLEAN) in ext4_multi_mount_protect() 317 if (seq == EXT4_MMP_SEQ_FSCK) { in ext4_multi_mount_protect() 341 if (seq != le32_to_cpu(mmp->mmp_seq)) { in ext4_multi_mount_protect() 352 seq = mmp_new_seq(); in ext4_multi_mount_protect() [all …]
|
| /fs/ceph/ |
| A D | snap.c | 365 realm->cached_context->seq == realm->seq && in build_snap_context() 367 realm->cached_context->seq >= parent->cached_context->seq)) { in build_snap_context() 385 snapc->seq = realm->seq; in build_snap_context() 396 if (parent->cached_context->seq > snapc->seq) in build_snap_context() 397 snapc->seq = parent->cached_context->seq; in build_snap_context() 513 return n->snaps[0] > o->seq; in has_new_snaps() 827 if (le64_to_cpu(ri->seq) > realm->seq) { in ceph_update_snap_trace() 829 realm, realm->seq, le64_to_cpu(ri->seq)); in ceph_update_snap_trace() 831 realm->seq = le64_to_cpu(ri->seq); in ceph_update_snap_trace() 853 realm->seq); in ceph_update_snap_trace() [all …]
|
| /fs/gfs2/ |
| A D | glock.c | 1453 if (seq) { in gfs2_print_dbg() 1454 seq_vprintf(seq, fmt, args); in gfs2_print_dbg() 2172 gfs2_dump_glock(seq, gl, fsid); in dump_glock() 2413 dump_holder(seq, gh, fs_id_buf); in gfs2_dump_glock() 2482 seq_printf(seq, " %15u", i); in gfs2_sbstats_seq_show() 2487 seq_putc(seq, '\n'); in gfs2_sbstats_seq_show() 2602 dump_glock(seq, iter_ptr, false); in gfs2_glock_seq_show() 2661 if (seq->buf) in __gfs2_glocks_open() 2662 seq->size = GFS2_SEQ_GOODSIZE; in __gfs2_glocks_open() 2821 seq_printf(seq, "%d %u %u/%llx\n", in gfs2_glockfd_seq_show_flock() [all …]
|