| /net/sunrpc/auth_gss/ |
| A D | gss_krb5_wrap.c | 61 BUG_ON(shift > LOCAL_BUF_LEN); in rotate_buf_a_little() 63 read_bytes_from_xdr_buf(buf, 0, head, shift); in rotate_buf_a_little() 64 for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) { in rotate_buf_a_little() 65 this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift)); in rotate_buf_a_little() 66 read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len); in rotate_buf_a_little() 69 write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift); in rotate_buf_a_little() 72 static void _rotate_left(struct xdr_buf *buf, unsigned int shift) in _rotate_left() argument 77 shift %= buf->len; in _rotate_left() 78 while (shifted < shift) { in _rotate_left() 79 this_shift = min(shift - shifted, LOCAL_BUF_LEN); in _rotate_left() [all …]
|
| /net/sunrpc/ |
| A D | xdr.c | 666 if (!shift || !len) in xdr_buf_pages_shift_right() 674 shift); in xdr_buf_pages_shift_right() 684 if (!shift) in xdr_buf_head_shift_right() 688 shift); in xdr_buf_head_shift_right() 693 shift); in xdr_buf_head_shift_right() 725 if (shift > base) { in xdr_buf_tail_copy_left() 854 buf->len -= shift; in xdr_shrink_bufhead() 855 return shift; in xdr_shrink_bufhead() 883 buf->len -= shift; in xdr_shrink_pagelen() 885 return shift; in xdr_shrink_pagelen() [all …]
|
| /net/netfilter/ |
| A D | nf_conntrack_sip.c | 206 const char *limit, int *shift) in skp_epaddr_len() argument 209 int s = *shift; in skp_epaddr_len() 216 (*shift)++; in skp_epaddr_len() 222 (*shift)++; in skp_epaddr_len() 225 *shift = s; in skp_epaddr_len() 245 int shift = 0; in ct_sip_parse_request() local 266 dptr += shift; in ct_sip_parse_request() 374 int shift = 0; in ct_sip_get_header() local 442 int shift = 0; in ct_sip_next_header() local 459 *matchoff += shift; in ct_sip_next_header() [all …]
|
| A D | nft_bitwise.c | 41 u32 shift = priv->data.data[0]; in nft_bitwise_eval_lshift() local 46 dst[i - 1] = (src[i - 1] << shift) | carry; in nft_bitwise_eval_lshift() 47 carry = src[i - 1] >> (BITS_PER_TYPE(u32) - shift); in nft_bitwise_eval_lshift() 54 u32 shift = priv->data.data[0]; in nft_bitwise_eval_rshift() local 59 dst[i] = carry | (src[i] >> shift); in nft_bitwise_eval_rshift() 60 carry = src[i] << (BITS_PER_TYPE(u32) - shift); in nft_bitwise_eval_rshift()
|
| A D | nf_conntrack_h323_asn1.c | 215 unsigned int v, l, shift, bytes; in get_bitmap() local 229 for (bytes = l >> 3, shift = 24, v = 0; bytes; in get_bitmap() 230 bytes--, shift -= 8) in get_bitmap() 231 v |= (unsigned int)(*bs->cur++) << shift; in get_bitmap() 234 v |= (unsigned int)(*bs->cur) << shift; in get_bitmap()
|
| /net/mac80211/ |
| A D | airtime.c | 34 #define MCS_DURATION_S(shift, streams, sgi, bps) \ argument 35 ((u16)((MCS_DURATION(streams, sgi, bps) >> shift))) 55 #define HE_DURATION_S(shift, streams, gi, bps) \ argument 56 (HE_DURATION(streams, gi, bps) >> shift) 65 #define EHT_DURATION_S(shift, streams, gi, bps) \ argument 66 HE_DURATION_S(shift, streams, gi, bps) 117 .shift = _s, \ 148 .shift = _s, \ 190 .shift = _s, \ 238 .shift = _s, \ [all …]
|
| A D | rc80211_minstrel_ht_debugfs.c | 120 duration <<= mg->shift; in minstrel_ht_stats_dump() 263 duration <<= mg->shift; in minstrel_ht_stats_csv_dump()
|
| A D | rc80211_minstrel_ht.c | 61 .shift = _s, \ 98 .shift = _s, \ 152 .shift = _s, \ 185 .shift = _s, \ 467 return duration << group->shift; in minstrel_get_duration() 517 minstrel_mcs_groups[group].shift; in minstrel_ht_get_tp_avg() 1513 duration <<= g->shift; in minstrel_ht_get_max_amsdu_len()
|
| A D | rc80211_minstrel_ht.h | 101 u8 shift; member
|
| A D | sta_info.c | 2523 unsigned int shift; in sta_stats_decode_rate() local 2534 shift = 2; in sta_stats_decode_rate() 2536 shift = 1; in sta_stats_decode_rate() 2538 shift = 0; in sta_stats_decode_rate() 2539 rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); in sta_stats_decode_rate()
|
| /net/rxrpc/ |
| A D | proc.c | 317 unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash); in rxrpc_peer_seq_start() local 325 n = *_pos & ((1U << shift) - 1); in rxrpc_peer_seq_start() 326 bucket = *_pos >> shift; in rxrpc_peer_seq_start() 344 *_pos = (bucket << shift) | n; in rxrpc_peer_seq_start() 352 unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash); in rxrpc_peer_seq_next() local 358 bucket = *_pos >> shift; in rxrpc_peer_seq_next() 367 *_pos = (bucket << shift) | n; in rxrpc_peer_seq_next()
|
| A D | conn_client.c | 342 unsigned int shift = slot * RXRPC_MAXCALLS; in rxrpc_add_conn_to_bundle() local 360 conn->bundle_shift = shift; in rxrpc_add_conn_to_bundle() 364 set_bit(shift + i, &bundle->avail_chans); in rxrpc_add_conn_to_bundle()
|
| /net/sched/ |
| A D | em_meta.c | 714 int shift = v->hdr.shift; in meta_var_apply_extras() local 716 if (shift && shift < dst->len) in meta_var_apply_extras() 717 dst->len -= shift; in meta_var_apply_extras() 765 if (v->hdr.shift) in meta_int_apply_extras() 766 dst->value >>= v->hdr.shift; in meta_int_apply_extras()
|
| A D | act_pedit.c | 267 nparms->tcfp_keys[i].shift = min_t(size_t, in tcf_pedit_init() 269 nparms->tcfp_keys[i].shift); in tcf_pedit_init() 274 cur += (0xff & offmask) >> nparms->tcfp_keys[i].shift; in tcf_pedit_init() 447 offset += (*d & tkey->offmask) >> tkey->shift; in tcf_pedit_act()
|
| A D | sch_generic.c | 1526 static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift) in psched_ratecfg_precompute__() argument 1531 *shift = 0; in psched_ratecfg_precompute__() 1541 (*shift)++; in psched_ratecfg_precompute__() 1554 psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift); in psched_ratecfg_precompute() 1561 psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift); in psched_ppscfg_precompute()
|
| A D | sch_qfq.c | 739 static inline u64 qfq_round_down(u64 ts, unsigned int shift) in qfq_round_down() argument 741 return ts & ~((1ULL << shift) - 1); in qfq_round_down()
|
| A D | sch_cake.c | 1354 static u64 cake_ewma(u64 avg, u64 sample, u32 shift) in cake_ewma() argument 1356 avg -= avg >> shift; in cake_ewma() 1357 avg += sample >> shift; in cake_ewma()
|
| /net/netfilter/ipvs/ |
| A D | ip_vs_mh.c | 347 int mw, shift; in ip_vs_mh_shift_weight() local 368 shift = fls(mw) - IP_VS_MH_TAB_BITS; in ip_vs_mh_shift_weight() 369 return (shift >= 0) ? shift : 0; in ip_vs_mh_shift_weight()
|
| /net/ipv4/ |
| A D | tcp_cubic.c | 169 u32 x, b, shift; in cubic_root() local 196 shift = (a >> (b * 3)); in cubic_root() 198 x = ((u32)(((u32)v[shift] + 10) << b)) >> 6; in cubic_root()
|
| A D | tcp_timer.c | 108 int shift = 0; in tcp_out_of_resources() local 113 shift++; in tcp_out_of_resources() 117 shift++; in tcp_out_of_resources() 119 if (tcp_check_oom(sk, shift)) { in tcp_out_of_resources()
|
| A D | fib_trie.c | 373 unsigned int shift = pos + bits; in tnode_new() local 378 BUG_ON(!bits || (shift > KEYLENGTH)); in tnode_new() 393 tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0; in tnode_new()
|
| A D | tcp.c | 3072 static bool tcp_too_many_orphans(int shift) in tcp_too_many_orphans() argument 3074 return READ_ONCE(tcp_orphan_cache) << shift > in tcp_too_many_orphans() 3086 bool tcp_check_oom(const struct sock *sk, int shift) in tcp_check_oom() argument 3090 too_many_orphans = tcp_too_many_orphans(shift); in tcp_check_oom()
|
| /net/xfrm/ |
| A D | xfrm_iptfs.c | 1287 static void __vec_shift(struct xfrm_iptfs_data *xtfs, u32 shift) in __vec_shift() argument 1291 if (shift > savedlen) in __vec_shift() 1292 shift = savedlen; in __vec_shift() 1293 if (shift != savedlen) in __vec_shift() 1294 memcpy(xtfs->w_saved, xtfs->w_saved + shift, in __vec_shift() 1295 (savedlen - shift) * sizeof(*xtfs->w_saved)); in __vec_shift() 1296 memset(xtfs->w_saved + savedlen - shift, 0, in __vec_shift() 1297 shift * sizeof(*xtfs->w_saved)); in __vec_shift() 1298 xtfs->w_savedlen -= shift; in __vec_shift()
|
| /net/netlink/ |
| A D | af_netlink.c | 1747 int pos, idx, shift, err = 0; in netlink_getsockopt() local 1755 shift = (pos % sizeof(unsigned long)) * 8; in netlink_getsockopt() 1756 if (put_user((u32)(nlk->groups[idx] >> shift), in netlink_getsockopt()
|
| /net/core/ |
| A D | filter.c | 2650 u32 first_sge, last_sge, i, shift, bytes_sg_total; in BPF_CALL_4() local 2728 shift = last_sge > first_sge ? in BPF_CALL_4() 2731 if (!shift) in BPF_CALL_4() 2739 if (i + shift >= NR_MSG_FRAG_IDS) in BPF_CALL_4() 2740 move_from = i + shift - NR_MSG_FRAG_IDS; in BPF_CALL_4() 2742 move_from = i + shift; in BPF_CALL_4() 2753 msg->sg.end = msg->sg.end - shift > msg->sg.end ? in BPF_CALL_4() 2754 msg->sg.end - shift + NR_MSG_FRAG_IDS : in BPF_CALL_4() 2755 msg->sg.end - shift; in BPF_CALL_4()
|