| /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| A D | crypto.c | 23 ({ typeof(bulk) _bulk = (bulk); \ 296 bulk = kzalloc(sizeof(*bulk), GFP_KERNEL); in mlx5_crypto_dek_bulk_create() 297 if (!bulk) in mlx5_crypto_dek_bulk_create() 324 return bulk; in mlx5_crypto_dek_bulk_create() 329 kfree(bulk); in mlx5_crypto_dek_bulk_create() 379 if (bulk) { in mlx5_crypto_dek_pool_pop() 408 bulk->avail_start = bulk->num_deks; in mlx5_crypto_dek_pool_pop() 429 struct mlx5_crypto_dek_bulk *bulk = dek->bulk; in mlx5_crypto_dek_free_locked() local 443 if (!bulk->avail_deks && !bulk->in_use_deks) in mlx5_crypto_dek_free_locked() 572 bulk->avail_deks = bulk->num_deks; in mlx5_crypto_dek_pool_reset_synced() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | fs_counters.c | 230 if (counter->bulk) in mlx5_fc_release() 548 counter->bulk = bulk; in mlx5_fc_init() 554 return bitmap_weight(bulk->bitmask, bulk->bulk_len); in mlx5_fc_bulk_get_free_fcs_amount() 569 bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL); in mlx5_fc_bulk_create() 570 if (!bulk) in mlx5_fc_bulk_create() 585 mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i); in mlx5_fc_bulk_create() 589 return bulk; in mlx5_fc_bulk_create() 594 kvfree(bulk); in mlx5_fc_bulk_create() 602 if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) { in mlx5_fc_bulk_destroy() 609 kvfree(bulk); in mlx5_fc_bulk_destroy() [all …]
|
| /linux/drivers/gpu/drm/ttm/ |
| A D | ttm_resource.c | 42 cursor->bulk = NULL; in ttm_resource_cursor_clear_bulk() 54 if (WARN_ON_ONCE(bulk != cursor->bulk)) { in ttm_resource_cursor_move_bulk_tail() 106 memset(bulk, 0, sizeof(*bulk)); in ttm_lru_bulk_move_init() 107 INIT_LIST_HEAD(&bulk->cursor_list); in ttm_lru_bulk_move_init() 120 struct ttm_lru_bulk_move *bulk) in ttm_lru_bulk_move_fini() argument 123 ttm_bulk_move_drop_cursors(bulk); in ttm_lru_bulk_move_fini() 140 ttm_bulk_move_adjust_cursors(bulk); in ttm_lru_bulk_move_tail() 564 bulk = bo->bulk_move; in ttm_resource_cursor_check_bulk() 566 if (cursor->bulk != bulk) { in ttm_resource_cursor_check_bulk() 567 if (bulk) { in ttm_resource_cursor_check_bulk() [all …]
|
| /linux/drivers/staging/vc04_services/interface/ |
| A D | TESTING | 49 Testing bulk transfer for alignment. 50 Testing bulk transfer at PAGE_SIZE. 61 vchi bulk (size 0, 0 async, 0 oneway) -> 546.000000us 62 vchi bulk (size 0, 0 oneway) -> 230.000000us 65 vchi bulk (size 0, 0 async, 0 oneway) -> 296.000000us 66 vchi bulk (size 0, 0 oneway) -> 266.000000us 68 vchi bulk (size 0, 0 oneway) -> 456.000000us 70 vchi bulk (size 0, 0 oneway) -> 640.000000us 72 vchi bulk (size 0, 0 oneway) -> 2309.000000us 78 vchi bulk (size 0, 0 oneway) -> nanus [all …]
|
| /linux/drivers/staging/vc04_services/vchiq-mmal/ |
| A D | mmal-vchiq.c | 150 } bulk; /* bulk data */ member 270 msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance, in buffer_work_cb() 271 msg_context->u.bulk.port, in buffer_work_cb() 367 msg_context->u.bulk.buffer_used = in inline_receive() 397 msg_context->u.bulk.port = port; in buffer_from_host() 398 msg_context->u.bulk.buffer = buf; in buffer_from_host() 477 msg_context->u.bulk.mmal_flags = in buffer_to_host_cb() 490 msg_context->u.bulk.status = in buffer_to_host_cb() 498 msg_context->u.bulk.status = 0; in buffer_to_host_cb() 503 msg_context->u.bulk.status = in buffer_to_host_cb() [all …]
|
| /linux/drivers/gpu/drm/msm/ |
| A D | msm_io_utils.c | 17 struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count, in msm_clk_bulk_get_clock() argument 25 for (i = 0; bulk && i < count; i++) { in msm_clk_bulk_get_clock() 26 if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n)) in msm_clk_bulk_get_clock() 27 return bulk[i].clk; in msm_clk_bulk_get_clock()
|
| A D | msm_mdss.c | 406 struct clk_bulk_data *bulk; in mdp5_mdss_parse_clock() local 413 bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL); in mdp5_mdss_parse_clock() 414 if (!bulk) in mdp5_mdss_parse_clock() 417 bulk[num_clocks++].id = "iface"; in mdp5_mdss_parse_clock() 418 bulk[num_clocks++].id = "bus"; in mdp5_mdss_parse_clock() 419 bulk[num_clocks++].id = "vsync"; in mdp5_mdss_parse_clock() 421 ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk); in mdp5_mdss_parse_clock() 425 *clocks = bulk; in mdp5_mdss_parse_clock()
|
| /linux/Documentation/w1/masters/ |
| A D | ds2490.rst | 49 - The ds2490 specification doesn't cover short bulk in reads in 51 available, the bulk read will return an error and the hardware will 52 clear the entire bulk in buffer. It would be possible to read the 63 most of the time one of the bulk out or in, and usually the bulk in 64 would fail. qemu sets a 50ms timeout and the bulk in would timeout 65 even when the status shows data available. A bulk out write would
|
| /linux/drivers/media/usb/uvc/ |
| A D | uvc_video.c | 1346 nbytes = min(stream->bulk.max_payload_size - stream->bulk.payload_size, in uvc_video_encode_data() 1546 if (stream->bulk.header_size == 0 && !stream->bulk.skip_payload) { in uvc_video_decode_bulk() 1582 stream->bulk.payload_size >= stream->bulk.max_payload_size) { in uvc_video_decode_bulk() 1585 stream->bulk.payload_size); in uvc_video_decode_bulk() 1590 stream->bulk.header_size = 0; in uvc_video_decode_bulk() 1591 stream->bulk.skip_payload = 0; in uvc_video_decode_bulk() 1626 stream->bulk.payload_size == stream->bulk.max_payload_size) { in uvc_video_encode_bulk() 1635 stream->bulk.header_size = 0; in uvc_video_encode_bulk() 1982 stream->bulk.header_size = 0; in uvc_video_start_transfer() 1983 stream->bulk.skip_payload = 0; in uvc_video_start_transfer() [all …]
|
| /linux/drivers/staging/vc04_services/interface/vchiq_arm/ |
| A D | vchiq_core.c | 1319 bulk->actual); in notify_bulks() 1323 bulk->actual); in notify_bulks() 1441 bulk->remote_size = 0; in abort_outstanding_bulks() 1452 service->remoteport, bulk->size, bulk->remote_size); in abort_outstanding_bulks() 1455 bulk->data = 0; in abort_outstanding_bulks() 1456 bulk->size = 0; in abort_outstanding_bulks() 1756 localport, bulk->actual, &bulk->data); in parse_message() 2708 bulk->mode = mode; in vchiq_bulk_xfer_queue_msg_interruptible() 2709 bulk->dir = dir; in vchiq_bulk_xfer_queue_msg_interruptible() 2711 bulk->size = size; in vchiq_bulk_xfer_queue_msg_interruptible() [all …]
|
| A D | vchiq_arm.c | 652 bulk->data = pagelistinfo->dma_addr; in vchiq_prepare_bulk_data() 658 bulk->remote_data = pagelistinfo; in vchiq_prepare_bulk_data() 666 if (bulk && bulk->remote_data && bulk->actual) in vchiq_complete_bulk() 668 bulk->actual); in vchiq_complete_bulk() 951 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; in vchiq_blocking_bulk_transfer() local 953 if (bulk) { in vchiq_blocking_bulk_transfer() 956 if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) { in vchiq_blocking_bulk_transfer() 962 bulk->userdata = NULL; in vchiq_blocking_bulk_transfer() 975 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; in vchiq_blocking_bulk_transfer() local 977 if (bulk) { in vchiq_blocking_bulk_transfer() [all …]
|
| /linux/Documentation/ABI/testing/ |
| A D | sysfs-driver-w1_therm | 75 * If a bulk read has been triggered, it will directly 76 return the temperature computed when the bulk read 80 * If no bulk read has been triggered, it will trigger 115 (RW) trigger a bulk read conversion. read the status 124 no bulk operation. Reading temperature will 128 'trigger': trigger a bulk read on all supporting 131 Note that if a bulk read is sent but one sensor is not read 134 of the bulk read command (not the current temperature).
|
| A D | configfs-usb-gadget-sourcesink | 14 bulk_qlen depth of queue for bulk
|
| /linux/include/drm/ttm/ |
| A D | ttm_resource.h | 323 struct ttm_lru_bulk_move *bulk; member 416 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk); 417 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk); 419 struct ttm_lru_bulk_move *bulk);
|
| /linux/drivers/remoteproc/ |
| A D | qcom_wcnss.c | 431 struct regulator_bulk_data *bulk; in wcnss_init_regulators() local 445 bulk = devm_kcalloc(wcnss->dev, in wcnss_init_regulators() 448 if (!bulk) in wcnss_init_regulators() 452 bulk[i].supply = info[i].name; in wcnss_init_regulators() 454 ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk); in wcnss_init_regulators() 460 regulator_set_voltage(bulk[i].consumer, in wcnss_init_regulators() 465 regulator_set_load(bulk[i].consumer, info[i].load_uA); in wcnss_init_regulators() 468 wcnss->vregs = bulk; in wcnss_init_regulators()
|
| /linux/drivers/media/usb/dvb-usb-v2/ |
| A D | usb_urb.c | 155 stream->props.u.bulk.buffersize, in usb_urb_alloc_bulk_urbs() 260 buf_size = stream->props.u.bulk.buffersize; in usb_urb_reconfig() 281 props->u.bulk.buffersize == in usb_urb_reconfig() 282 stream->props.u.bulk.buffersize) in usb_urb_reconfig() 326 stream->props.u.bulk.buffersize); in usb_urb_initv2()
|
| /linux/Documentation/driver-api/usb/ |
| A D | bulk-streams.rst | 1 USB bulk streams 8 device driver to overload a bulk endpoint so that multiple transfers can be 41 ID for the bulk IN and OUT endpoints used in a Bi-directional command sequence. 46 declares how many stream IDs it can support, and each bulk endpoint on a
|
| A D | index.rst | 10 bulk-streams
|
| /linux/drivers/media/usb/dvb-usb/ |
| A D | dtt200u.c | 213 .bulk = { 265 .bulk = { 317 .bulk = { 369 .bulk = {
|
| A D | dibusb-mb.c | 224 .bulk = { 314 .bulk = { 383 .bulk = { 445 .bulk = {
|
| A D | cxusb.c | 1744 .bulk = { 1796 .bulk = { 1855 .bulk = { 1923 .bulk = { 1982 .bulk = { 2034 .bulk = { 2088 .bulk = { 2145 .bulk = { 2199 .bulk = { 2245 .bulk = { [all …]
|
| /linux/lib/ |
| A D | test_objpool.c | 72 int bulk[2]; /* for thread and irq */ member 208 item->bulk[0] = test->bulk_normal; in ot_init_cpu_item() 209 item->bulk[1] = test->bulk_irq; in ot_init_cpu_item() 330 for (i = 0; i < item->bulk[irq]; i++) in ot_bulk_sync() 511 for (i = 0; i < item->bulk[irq]; i++) in ot_bulk_async()
|
| /linux/Documentation/usb/ |
| A D | ehci.rst | 58 At this writing the driver should comfortably handle all control, bulk, 125 and bulk transfers. Shows each active qh and the qtds 161 good to keep in mind that bulk transfers are always in 512 byte packets, 165 So more than 50 MByte/sec is available for bulk transfers, when both 195 you issue a control or bulk request you can often expect to learn that 203 or using bulk queuing if a series of small requests needs to be issued. 213 I/O be efficient, it's better to just queue up several (bulk) requests
|
| /linux/drivers/interconnect/ |
| A D | Makefile | 4 icc-core-objs := core.o bulk.o debugfs-client.o
|
| /linux/drivers/usb/core/ |
| A D | devio.c | 1297 len1 = bulk->len; in do_proc_bulk() 1301 if (bulk->ep & USB_DIR_IN) in do_proc_bulk() 1333 tmo = bulk->timeout; in do_proc_bulk() 1334 if (bulk->ep & 0x80) { in do_proc_bulk() 1372 struct usbdevfs_bulktransfer bulk; in proc_bulk() local 1374 if (copy_from_user(&bulk, arg, sizeof(bulk))) in proc_bulk() 1376 return do_proc_bulk(ps, &bulk); in proc_bulk() 2151 struct usbdevfs_bulktransfer bulk; in proc_bulk_compat() local 2154 if (get_user(bulk.ep, &p32->ep) || in proc_bulk_compat() 2159 bulk.data = compat_ptr(addr); in proc_bulk_compat() [all …]
|