/linux-6.3-rc2/drivers/dma/idxd/ |
A D | init.c | 169 wq->idxd = idxd; in idxd_setup_wqs() 239 engine->idxd = idxd; in idxd_setup_engines() 286 group->idxd = idxd; in idxd_setup_groups() 425 idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift); in idxd_read_caps() 434 idxd->max_groups = idxd->hw.group_cap.num_groups; in idxd_read_caps() 436 idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; in idxd_read_caps() 438 idxd->nr_rdbufs = idxd->max_rdbufs; in idxd_read_caps() 450 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; in idxd_read_caps() 452 idxd->max_wqs = idxd->hw.wq_cap.num_wqs; in idxd_read_caps() 459 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + in idxd_read_caps() [all …]
|
A D | device.c | 110 struct idxd_device *idxd = wq->idxd; in idxd_wq_alloc_resources() local 180 struct idxd_device *idxd = wq->idxd; in idxd_wq_enable() local 205 struct idxd_device *idxd = wq->idxd; in idxd_wq_disable() local 234 struct idxd_device *idxd = wq->idxd; in idxd_wq_drain() local 250 struct idxd_device *idxd = wq->idxd; in idxd_wq_reset() local 266 struct idxd_device *idxd = wq->idxd; in idxd_wq_map_portal() local 304 struct idxd_device *idxd = wq->idxd; in __idxd_wq_set_priv_locked() local 319 struct idxd_device *idxd = wq->idxd; in __idxd_wq_set_pasid_locked() local 352 struct idxd_device *idxd = wq->idxd; in idxd_wq_disable_pasid() local 378 struct idxd_device *idxd = wq->idxd; in idxd_wq_disable_cleanup() local [all …]
|
A D | sysfs.c | 36 struct idxd_device *idxd = engine->idxd; in engine_group_id_store() local 137 struct idxd_device *idxd = group->idxd; in group_read_buffers_reserved_store() local 203 struct idxd_device *idxd = group->idxd; in group_read_buffers_allowed_store() local 266 struct idxd_device *idxd = group->idxd; in group_use_read_buffer_limit_store() local 632 struct idxd_device *idxd = wq->idxd; in wq_group_id_store() local 684 struct idxd_device *idxd = wq->idxd; in wq_mode_store() local 735 struct idxd_device *idxd = wq->idxd; in wq_size_store() local 772 struct idxd_device *idxd = wq->idxd; in wq_priority_store() local 808 struct idxd_device *idxd = wq->idxd; in wq_block_on_fault_store() local 850 struct idxd_device *idxd = wq->idxd; in wq_threshold_store() local [all …]
|
A D | irq.c | 27 struct idxd_device *idxd; member 36 idxd_device_reset(idxd); in idxd_device_reinit() 72 struct idxd_device *idxd = wq->idxd; in idxd_int_handle_revoke_drain() local 132 struct idxd_device *idxd = revoke->idxd; in idxd_int_handle_revoke() local 234 idxd->sw_err.bits[i] = ioread64(idxd->reg_base + in process_misc_interrupts() 240 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { in process_misc_interrupts() 273 revoke->idxd = idxd; in process_misc_interrupts() 279 idxd_wqs_quiesce(idxd); in process_misc_interrupts() 317 queue_work(idxd->wq, &idxd->work); in process_misc_interrupts() 320 idxd_wqs_quiesce(idxd); in process_misc_interrupts() [all …]
|
A D | perfmon.c | 126 struct idxd_device *idxd = idxd_pmu->idxd; in perfmon_assign_hw_event() local 200 struct idxd_device *idxd; in perfmon_pmu_event_init() local 233 struct idxd_device *idxd; in perfmon_pmu_read_counter() local 328 struct idxd_device *idxd; in perfmon_pmu_event_start() local 388 idxd->idxd_pmu->event_list[i - 1] = idxd->idxd_pmu->event_list[i]; in perfmon_pmu_event_stop() 497 if (!idxd->idxd_pmu) in perfmon_pmu_remove() 502 kfree(idxd->idxd_pmu); in perfmon_pmu_remove() 503 idxd->idxd_pmu = NULL; in perfmon_pmu_remove() 567 idxd_pmu->idxd = idxd; in perfmon_pmu_init() 582 perfmon_reset(idxd); in perfmon_pmu_init() [all …]
|
A D | perfmon.h | 38 return idxd_pmu->idxd; in event_to_idxd() 47 return idxd_pmu->idxd; in pmu_to_idxd() 88 (PERFMON_TABLE_OFFSET(idxd) + (offset)) 90 #define PERFCAP_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFCAP_OFFSET)) argument 91 #define PERFRST_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFRST_OFFSET)) argument 92 #define OVFSTATUS_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_OVFSTATUS_OFFSET)) argument 93 #define PERFFRZ_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFFRZ_OFFSET)) argument 98 #define CNTRCFG_REG(idxd, cntr) \ argument 100 #define CNTRDATA_REG(idxd, cntr) \ argument 102 #define CNTRCAP_REG(idxd, cntr) \ argument [all …]
|
A D | cdev.c | 74 struct idxd_device *idxd; in idxd_cdev_open() local 82 idxd = wq->idxd; in idxd_cdev_open() 83 dev = &idxd->pdev->dev; in idxd_cdev_open() 143 struct idxd_device *idxd = wq->idxd; in idxd_cdev_release() local 193 struct idxd_device *idxd = wq->idxd; in idxd_cdev_mmap() local 219 struct idxd_device *idxd = wq->idxd; in idxd_cdev_poll() local 224 if (idxd->sw_err.valid) in idxd_cdev_poll() 246 struct idxd_device *idxd = wq->idxd; in idxd_wq_add_cdev() local 275 rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id); in idxd_wq_add_cdev() 308 struct idxd_device *idxd = wq->idxd; in idxd_user_drv_probe() local [all …]
|
A D | idxd.h | 89 struct idxd_device *idxd; member 104 struct idxd_device *idxd; member 188 struct idxd_device *idxd; member 223 struct idxd_device *idxd; member 251 struct idxd_device *idxd; member 349 #define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev argument 406 return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie; in idxd_get_ie() 560 idxd->max_batch_size = 0; in idxd_set_max_batch_size() 562 idxd->max_batch_size = max_batch_size; in idxd_set_max_batch_size() 599 void idxd_wqs_quiesce(struct idxd_device *idxd); [all …]
|
A D | dma.c | 27 struct idxd_device *idxd = desc->wq->idxd; in idxd_dma_complete_txd() local 111 struct idxd_device *idxd = wq->idxd; in idxd_dma_submit_memcpy() local 117 if (len > idxd->max_xfer_bytes) in idxd_dma_submit_memcpy() 200 struct device *dev = &idxd->pdev->dev; in idxd_register_dma_device() 233 idxd_dma->idxd = idxd; in idxd_register_dma_device() 238 idxd->idxd_dma = idxd_dma; in idxd_register_dma_device() 249 struct idxd_device *idxd = wq->idxd; in idxd_register_dma_channel() local 251 struct device *dev = &idxd->pdev->dev; in idxd_register_dma_channel() 301 struct idxd_device *idxd = wq->idxd; in idxd_dmaengine_drv_probe() local 304 if (idxd->state != IDXD_DEV_ENABLED) in idxd_dmaengine_drv_probe() [all …]
|
A D | submit.c | 14 struct idxd_device *idxd = wq->idxd; in __get_desc() local 18 memset(desc->completion, 0, idxd->data->compl_size); in __get_desc() 21 if (device_pasid_enabled(idxd)) in __get_desc() 22 desc->hw->pasid = idxd->pasid; in __get_desc() 30 struct idxd_device *idxd = wq->idxd; in idxd_alloc_desc() local 35 if (idxd->state != IDXD_DEV_ENABLED) in idxd_alloc_desc() 168 struct idxd_device *idxd = wq->idxd; in idxd_submit_desc() local 174 if (idxd->state != IDXD_DEV_ENABLED) in idxd_submit_desc()
|
A D | Makefile | 3 obj-$(CONFIG_INTEL_IDXD) += idxd.o 4 idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o 6 idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
|
/linux-6.3-rc2/drivers/dma/ |
A D | Makefile | 46 obj-y += idxd/
|
A D | Kconfig | 309 bool "Legacy behavior for idxd driver" 326 # support shared virtual memory for the devices supported by idxd.
|
/linux-6.3-rc2/Documentation/admin-guide/ |
A D | kernel-parameters.txt | 1849 idxd.sva= [HW] 1852 support for the idxd driver. By default it is set to 1855 idxd.tc_override= [HW]
|
/linux-6.3-rc2/ |
A D | MAINTAINERS | 10321 F: drivers/dma/idxd/* 10322 F: include/uapi/linux/idxd.h
|