Lines Matching refs:jmp_table

51 static struct hid_bpf_jmp_table jmp_table;  variable
84 FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) { in hid_bpf_program_count()
85 struct hid_bpf_prog_entry *entry = &jmp_table.entries[i]; in hid_bpf_program_count()
122 if (!test_bit(idx, jmp_table.enabled)) in hid_bpf_prog_run()
177 FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) { in hid_bpf_populate_hdev()
178 struct hid_bpf_prog_entry *entry = &jmp_table.entries[i]; in hid_bpf_populate_hdev()
181 test_bit(entry->idx, jmp_table.enabled)) in hid_bpf_populate_hdev()
193 jmp_table.progs[idx] = NULL; in __hid_bpf_do_release_prog()
200 if (!jmp_table.map) in hid_bpf_release_progs()
204 map_fd = skel_map_get_fd_by_id(jmp_table.map->id); in hid_bpf_release_progs()
211 FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) { in hid_bpf_release_progs()
212 struct hid_bpf_prog_entry *entry = &jmp_table.entries[i]; in hid_bpf_release_progs()
216 if (test_bit(entry->idx, jmp_table.enabled)) in hid_bpf_release_progs()
227 FOR_ENTRIES(j, i, jmp_table.head) { in hid_bpf_release_progs()
230 next = &jmp_table.entries[j]; in hid_bpf_release_progs()
232 if (test_bit(next->idx, jmp_table.enabled)) in hid_bpf_release_progs()
246 FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) { in hid_bpf_release_progs()
247 struct hid_bpf_prog_entry *entry = &jmp_table.entries[i]; in hid_bpf_release_progs()
249 if (test_bit(entry->idx, jmp_table.enabled)) in hid_bpf_release_progs()
257 n = jmp_table.tail; in hid_bpf_release_progs()
258 FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) { in hid_bpf_release_progs()
259 struct hid_bpf_prog_entry *entry = &jmp_table.entries[i]; in hid_bpf_release_progs()
261 if (!test_bit(entry->idx, jmp_table.enabled)) in hid_bpf_release_progs()
264 jmp_table.entries[n] = jmp_table.entries[i]; in hid_bpf_release_progs()
268 jmp_table.head = n; in hid_bpf_release_progs()
281 map_fd = skel_map_get_fd_by_id(jmp_table.map->id); in hid_bpf_release_prog_at()
299 map_fd = skel_map_get_fd_by_id(jmp_table.map->id); in hid_bpf_insert_prog()
308 if (!jmp_table.progs[i] && index < 0) { in hid_bpf_insert_prog()
310 jmp_table.progs[i] = prog; in hid_bpf_insert_prog()
312 __set_bit(i, jmp_table.enabled); in hid_bpf_insert_prog()
363 __clear_bit(hid_link->hid_table_index, jmp_table.enabled); in hid_bpf_link_release()
436 jmp_table.tail = PREV(jmp_table.tail); in __hid_bpf_attach_prog()
437 prog_entry = &jmp_table.entries[jmp_table.tail]; in __hid_bpf_attach_prog()
440 prog_entry = &jmp_table.entries[jmp_table.head]; in __hid_bpf_attach_prog()
441 jmp_table.head = NEXT(jmp_table.head); in __hid_bpf_attach_prog()
490 __clear_bit(prog_list->prog_idx[i], jmp_table.enabled); in __hid_bpf_destroy_device()
512 if (jmp_table.map) in hid_bpf_free_links_and_skel()
513 bpf_map_put_with_uref(jmp_table.map); in hid_bpf_free_links_and_skel()
553 jmp_table.map = bpf_map_get_with_uref(skel->maps.hid_jmp_table.map_fd); in hid_bpf_preload_skel()
554 if (IS_ERR(jmp_table.map)) { in hid_bpf_preload_skel()
555 err = PTR_ERR(jmp_table.map); in hid_bpf_preload_skel()