| /kernel/ |
| A D | cred.c | 178 if (!new) in cred_alloc_blank() 185 return new; in cred_alloc_blank() 213 if (!new) in prepare_creds() 238 new->ucounts = get_ucounts(new->ucounts); in prepare_creds() 245 return new; in prepare_creds() 262 if (!new) in prepare_exec_creds() 275 new->suid = new->fsuid = new->euid; in prepare_exec_creds() 276 new->sgid = new->fsgid = new->egid; in prepare_exec_creds() 313 if (!new) in copy_creds() 588 if (!new) in prepare_kernel_cred() [all …]
|
| A D | ucount.c | 166 new = kzalloc(sizeof(*new), GFP_KERNEL); in alloc_ucounts() 167 if (!new) in alloc_ucounts() 170 new->ns = ns; in alloc_ucounts() 178 kfree(new); in alloc_ucounts() 185 return new; in alloc_ucounts() 255 if (new < 0 || new > max) in inc_rlimit_ucounts() 258 ret = new; in inc_rlimit_ucounts() 272 new = dec; in dec_rlimit_ucounts() 305 if (new < 0 || new > max) in inc_rlimit_get_ucounts() 308 ret = new; in inc_rlimit_get_ucounts() [all …]
|
| A D | sys.c | 430 if (!new) in __sys_setregid() 455 new->sgid = new->egid; in __sys_setregid() 456 new->fsgid = new->egid; in __sys_setregid() 498 new->gid = new->egid = new->sgid = new->fsgid = kgid; in __sys_setgid() 500 new->egid = new->fsgid = kgid; in __sys_setgid() 616 new->suid = new->euid; in __sys_setreuid() 617 new->fsuid = new->euid; in __sys_setreuid() 670 new->suid = new->uid = kuid; in __sys_setuid() 680 new->fsuid = new->euid = kuid; in __sys_setuid() 765 new->fsuid = new->euid; in __sys_setresuid() [all …]
|
| A D | resource.c | 197 new->sibling = tmp; in __request_resource() 198 *p = new; in __request_resource() 199 new->parent = root; in __request_resource() 789 *old = new; in reallocate_resource() 881 if ((first->start > new->start) || (first->end < new->end)) in __insert_resource() 883 if ((first->start == new->start) && (first->end == new->end)) in __insert_resource() 889 if (next->start < new->start || next->end > new->end) in __insert_resource() 899 new->child = first; in __insert_resource() 971 if (new->parent) in insert_resource_expand_to_fit() 1598 *ptr = new; in devm_request_resource() [all …]
|
| A D | user.c | 206 struct user_struct *up, *new; in alloc_uid() local 214 if (!new) in alloc_uid() 217 new->uid = uid; in alloc_uid() 218 refcount_set(&new->__count, 1); in alloc_uid() 219 if (user_epoll_alloc(new)) { in alloc_uid() 220 kmem_cache_free(uid_cachep, new); in alloc_uid() 223 ratelimit_state_init(&new->ratelimit, HZ, 100); in alloc_uid() 233 user_epoll_free(new); in alloc_uid() 234 kmem_cache_free(uid_cachep, new); in alloc_uid() 236 uid_hash_insert(new, hashent); in alloc_uid() [all …]
|
| A D | groups.c | 118 void set_groups(struct cred *new, struct group_info *group_info) in set_groups() argument 120 put_group_info(new->group_info); in set_groups() 122 new->group_info = group_info; in set_groups() 136 struct cred *new; in set_current_groups() local 140 new = prepare_creds(); in set_current_groups() 141 if (!new) in set_current_groups() 146 set_groups(new, group_info); in set_current_groups() 148 retval = security_task_fix_setgroups(new, old); in set_current_groups() 152 return commit_creds(new); in set_current_groups() 155 abort_creds(new); in set_current_groups()
|
| A D | umh.c | 66 struct cred *new; in call_usermodehelper_exec_async() local 88 new = prepare_kernel_cred(current); in call_usermodehelper_exec_async() 89 if (!new) in call_usermodehelper_exec_async() 93 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); in call_usermodehelper_exec_async() 94 new->cap_inheritable = cap_intersect(usermodehelper_inheritable, in call_usermodehelper_exec_async() 95 new->cap_inheritable); in call_usermodehelper_exec_async() 99 retval = sub_info->init(sub_info, new); in call_usermodehelper_exec_async() 101 abort_creds(new); in call_usermodehelper_exec_async() 106 commit_creds(new); in call_usermodehelper_exec_async() 357 int (*init)(struct subprocess_info *info, struct cred *new), in call_usermodehelper_setup() argument
|
| A D | tracepoint.c | 137 struct tracepoint_func *old, *new; in func_add() local 159 new = allocate_probes(nr_probes + 2); in func_add() 160 if (new == NULL) in func_add() 170 new[nr_probes++] = old[iter_probes]; in func_add() 179 new[pos] = *tp_func; in func_add() 180 new[nr_probes].func = NULL; in func_add() 181 *funcs = new; in func_add() 190 struct tracepoint_func *old, *new; in func_remove() local 222 if (new) { in func_remove() 227 new[j++] = old[i]; in func_remove() [all …]
|
| A D | capability.c | 221 struct cred *new; in SYSCALL_DEFINE2() local 247 new = prepare_creds(); in SYSCALL_DEFINE2() 248 if (!new) in SYSCALL_DEFINE2() 251 ret = security_capset(new, current_cred(), in SYSCALL_DEFINE2() 256 audit_log_capset(new, current_cred()); in SYSCALL_DEFINE2() 258 return commit_creds(new); in SYSCALL_DEFINE2() 261 abort_creds(new); in SYSCALL_DEFINE2()
|
| A D | audit_watch.c | 206 struct audit_watch *new; in audit_dupe_watch() local 212 new = audit_init_watch(path); in audit_dupe_watch() 213 if (IS_ERR(new)) { in audit_dupe_watch() 218 new->dev = old->dev; in audit_dupe_watch() 219 new->ino = old->ino; in audit_dupe_watch() 221 new->parent = old->parent; in audit_dupe_watch() 224 return new; in audit_dupe_watch() 509 int audit_dupe_exe(struct audit_krule *new, struct audit_krule *old) in audit_dupe_exe() argument 518 audit_mark = audit_alloc_mark(new, pathname, strlen(pathname)); in audit_dupe_exe() 523 new->exe = audit_mark; in audit_dupe_exe()
|
| A D | audit_tree.c | 298 new->key = old->key; in replace_chunk() 301 owner->root = new; in replace_chunk() 308 new->owners[i].owner = owner; in replace_chunk() 315 replace_mark_chunk(old->mark, new); in replace_chunk() 322 list_replace_rcu(&old->hash, &new->hash); in replace_chunk() 351 struct audit_chunk *new; in untag_chunk() local 377 new = alloc_chunk(size); in untag_chunk() 378 if (!new) in untag_chunk() 386 replace_chunk(new, chunk); in untag_chunk() 874 int audit_tag_tree(char *old, char *new) in audit_tag_tree() argument [all …]
|
| A D | auditfilter.c | 825 struct audit_krule *new; in audit_dupe_rule() local 833 new = &entry->rule; in audit_dupe_rule() 834 new->flags = old->flags; in audit_dupe_rule() 835 new->pflags = old->pflags; in audit_dupe_rule() 836 new->listnr = old->listnr; in audit_dupe_rule() 837 new->action = old->action; in audit_dupe_rule() 840 new->prio = old->prio; in audit_dupe_rule() 852 new->tree = old->tree; in audit_dupe_rule() 877 new->filterkey = fk; in audit_dupe_rule() 884 if (new->exe) in audit_dupe_rule() [all …]
|
| A D | nsproxy.c | 229 void switch_task_namespaces(struct task_struct *p, struct nsproxy *new) in switch_task_namespaces() argument 237 p->nsproxy = new; in switch_task_namespaces() 252 struct nsproxy *new; in exec_task_namespaces() local 257 new = create_new_namespaces(0, tsk, current_user_ns(), tsk->fs); in exec_task_namespaces() 258 if (IS_ERR(new)) in exec_task_namespaces() 259 return PTR_ERR(new); in exec_task_namespaces() 261 timens_on_fork(new, tsk); in exec_task_namespaces() 262 switch_task_namespaces(tsk, new); in exec_task_namespaces()
|
| /kernel/printk/ |
| A D | nbcon.c | 274 new.atom = cur->atom; in nbcon_context_try_acquire_direct() 278 new.cpu = cpu; in nbcon_context_try_acquire_direct() 371 new.atom = cur->atom; in nbcon_context_try_acquire_requested() 372 new.prio = ctxt->prio; in nbcon_context_try_acquire_requested() 375 new.cpu = cpu; in nbcon_context_try_acquire_requested() 471 new.atom = cur->atom; in nbcon_context_try_acquire_handover() 476 cur->atom = new.atom; in nbcon_context_try_acquire_handover() 565 new.cpu = cpu; in nbcon_context_try_acquire_hostile() 683 new.atom = cur.atom; in nbcon_context_release() 844 new.atom = cur.atom; in __nbcon_context_update_unsafe() [all …]
|
| /kernel/irq/ |
| A D | manage.c | 1318 if (new->handler && new->thread_fn) { in irq_setup_forced_threading() 1324 new->secondary->thread_fn = new->thread_fn; in irq_setup_forced_threading() 1325 new->secondary->dev_id = new->dev_id; in irq_setup_forced_threading() 1326 new->secondary->irq = new->irq; in irq_setup_forced_threading() 1327 new->secondary->name = new->name; in irq_setup_forced_threading() 1331 new->thread_fn = new->handler; in irq_setup_forced_threading() 1450 new->irq = irq; in __setup_irq() 1736 *old_ptr = new; in __setup_irq() 1763 new->dir = NULL; in __setup_irq() 1770 irq, new->flags, new->name, old->flags, old->name); in __setup_irq() [all …]
|
| /kernel/bpf/ |
| A D | local_storage.c | 108 while (*new) { in cgroup_storage_insert() 113 parent = *new; in cgroup_storage_insert() 116 new = &((*new)->rb_left); in cgroup_storage_insert() 119 new = &((*new)->rb_right); in cgroup_storage_insert() 126 rb_link_node(&storage->node, parent, new); in cgroup_storage_insert() 148 struct bpf_storage_buffer *new; in cgroup_storage_update_elem() local 167 new = bpf_map_kmalloc_node(map, struct_size(new, data, map->value_size), in cgroup_storage_update_elem() 170 if (!new) in cgroup_storage_update_elem() 174 check_and_init_map_value(map, new->data); in cgroup_storage_update_elem() 176 new = xchg(&storage->buf, new); in cgroup_storage_update_elem() [all …]
|
| A D | dispatcher.c | 108 void *new, *tmp; in bpf_dispatcher_update() local 114 new = d->num_progs ? d->image + noff : NULL; in bpf_dispatcher_update() 116 if (new) { in bpf_dispatcher_update() 120 if (bpf_dispatcher_prepare(d, new, tmp)) in bpf_dispatcher_update() 122 if (IS_ERR(bpf_arch_text_copy(new, tmp, PAGE_SIZE / 2))) in bpf_dispatcher_update() 126 __BPF_DISPATCHER_UPDATE(d, new ?: (void *)&bpf_dispatcher_nop_func); in bpf_dispatcher_update() 133 if (new) in bpf_dispatcher_update()
|
| A D | rqspinlock.h | 30 u32 old, new; in try_cmpxchg_tail() local 42 new = (old & _Q_LOCKED_PENDING_MASK) | new_tail; in try_cmpxchg_tail() 43 } while (!atomic_try_cmpxchg_relaxed(&lock->val, &old, new)); in try_cmpxchg_tail()
|
| /kernel/futex/ |
| A D | requeue.c | 102 int old, new; in futex_requeue_pi_prepare() local 125 new = Q_REQUEUE_PI_IN_PROGRESS; in futex_requeue_pi_prepare() 126 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); in futex_requeue_pi_prepare() 134 int old, new; in futex_requeue_pi_complete() local 145 new = Q_REQUEUE_PI_DONE + locked; in futex_requeue_pi_complete() 148 new = Q_REQUEUE_PI_NONE; in futex_requeue_pi_complete() 152 new = Q_REQUEUE_PI_IGNORE; in futex_requeue_pi_complete() 154 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); in futex_requeue_pi_complete() 165 int old, new; in futex_requeue_pi_wakeup_sync() local 177 new = Q_REQUEUE_PI_WAIT; in futex_requeue_pi_wakeup_sync() [all …]
|
| A D | core.c | 246 mm->futex_phash_new = new; in __futex_pivot_hash() 252 new->state = FR_PERCPU; in __futex_pivot_hash() 511 if (WARN_ON_ONCE(!new)) in get_inode_sequence_number() 517 return new; in get_inode_sequence_number() 1830 new = mm->futex_phash_new; in futex_hash_allocate() 1844 if (cur && !new) { in futex_hash_allocate() 1853 if (new) { in futex_hash_allocate() 1858 free = new; in futex_hash_allocate() 1859 new = fph; in futex_hash_allocate() 1864 new = fph; in futex_hash_allocate() [all …]
|
| /kernel/trace/ |
| A D | trace_stat.c | 77 struct rb_node **new = &(root->rb_node), *parent = NULL; in insert_stat() local 89 while (*new) { in insert_stat() 93 this = container_of(*new, struct stat_node, node); in insert_stat() 96 parent = *new; in insert_stat() 98 new = &((*new)->rb_left); in insert_stat() 100 new = &((*new)->rb_right); in insert_stat() 103 rb_link_node(&data->node, parent, new); in insert_stat()
|
| /kernel/rcu/ |
| A D | rcu.h | 183 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new) in rcu_seq_completed_gp() argument 185 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK); in rcu_seq_completed_gp() 191 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) in rcu_seq_new_gp() argument 194 new); in rcu_seq_new_gp() 201 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) in rcu_seq_diff() argument 205 if (old == new) in rcu_seq_diff() 211 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) - in rcu_seq_diff() 213 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK)); in rcu_seq_diff()
|
| /kernel/kcsan/ |
| A D | report.c | 391 u64 old, u64 new, u64 mask) in print_report() argument 474 u64 diff = old ^ new; in print_report() 481 hex_len, old, hex_len, new); in print_report() 668 int watchpoint_idx, u64 old, u64 new, u64 mask) in kcsan_report_known_origin() argument 692 print_report(value_change, &ai, other_info, old, new, mask); in kcsan_report_known_origin() 701 unsigned long ip, u64 old, u64 new, u64 mask) in kcsan_report_unknown_origin() argument 710 print_report(KCSAN_VALUE_CHANGE_TRUE, &ai, NULL, old, new, mask); in kcsan_report_unknown_origin()
|
| /kernel/locking/ |
| A D | qspinlock.h | 160 u32 old, new; in xchg_tail() local 164 new = (old & _Q_LOCKED_PENDING_MASK) | tail; in xchg_tail() 170 } while (!atomic_try_cmpxchg_relaxed(&lock->val, &old, new)); in xchg_tail()
|
| A D | rwsem.c | 607 long count, new; in rwsem_try_write_lock() local 625 new = count; in rwsem_try_write_lock() 637 new |= RWSEM_FLAG_HANDOFF; in rwsem_try_write_lock() 639 new |= RWSEM_WRITER_LOCKED; in rwsem_try_write_lock() 640 new &= ~RWSEM_FLAG_HANDOFF; in rwsem_try_write_lock() 643 new &= ~RWSEM_FLAG_WAITERS; in rwsem_try_write_lock() 652 if (new & RWSEM_FLAG_HANDOFF) { in rwsem_try_write_lock() 746 struct task_struct *new, *owner; in rwsem_spin_on_owner() local 764 new = rwsem_owner_flags(sem, &new_flags); in rwsem_spin_on_owner() 765 if ((new != owner) || (new_flags != flags)) { in rwsem_spin_on_owner() [all …]
|