| /kernel/time/ |
| A D | clockevents.c | 104 return dev->set_state_shutdown(dev); in __clockevents_switch_state() 112 return dev->set_state_periodic(dev); in __clockevents_switch_state() 120 return dev->set_state_oneshot(dev); in __clockevents_switch_state() 131 return dev->set_state_oneshot_stopped(dev); in __clockevents_switch_state() 186 ret = dev->tick_resume(dev); in clockevents_tick_resume() 222 dev->name ? dev->name : "?", in clockevents_increase_min_delta() 324 return dev->set_next_ktime(expires, dev); in clockevents_program_event() 499 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); in clockevents_config() 500 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); in clockevents_config() 604 dev->suspend(dev); in clockevents_suspend() [all …]
|
| A D | tick-internal.h | 26 extern void tick_handle_periodic(struct clock_event_device *dev); 27 extern void tick_check_new_device(struct clock_event_device *dev); 34 extern void tick_install_replacement(struct clock_event_device *dev); 38 extern int clockevents_tick_resume(struct clock_event_device *dev); 42 return !(dev->features & CLOCK_EVT_FEAT_DUMMY); in tick_device_is_functional() 47 return dev->state_use_accessors; in clockevent_get_state() 53 dev->state_use_accessors = state; in clockevent_set_state() 56 extern void clockevents_shutdown(struct clock_event_device *dev); 59 extern void clockevents_switch_state(struct clock_event_device *dev, 61 extern int clockevents_program_event(struct clock_event_device *dev, [all …]
|
| A D | tick-oneshot.c | 32 dev->next_event = KTIME_MAX; in tick_program_event() 36 if (unlikely(clockevent_state_oneshot_stopped(dev))) { in tick_program_event() 55 clockevents_program_event(dev, ktime_get(), true); in tick_resume_oneshot() 76 struct clock_event_device *dev = td->evtdev; in tick_switch_to_oneshot() local 78 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || in tick_switch_to_oneshot() 79 !tick_device_is_functional(dev)) { in tick_switch_to_oneshot() 82 if (!dev) { in tick_switch_to_oneshot() 85 if (!tick_device_is_functional(dev)) in tick_switch_to_oneshot() 86 pr_cont(" %s is not functional.\n", dev->name); in tick_switch_to_oneshot() 89 dev->name); in tick_switch_to_oneshot() [all …]
|
| A D | tick-broadcast.c | 173 if (!try_module_get(dev->owner)) in tick_install_broadcast_device() 211 return (dev && tick_broadcast_device.evtdev == dev); in tick_is_broadcast_device() 234 if (!dev->broadcast) in tick_device_setup_broadcast_func() 236 if (!dev->broadcast) { in tick_device_setup_broadcast_func() 238 dev->name); in tick_device_setup_broadcast_func() 239 dev->broadcast = err_broadcast; in tick_device_setup_broadcast_func() 448 dev = td->evtdev; in tick_broadcast_control() 453 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) in tick_broadcast_control() 480 clockevents_shutdown(dev); in tick_broadcast_control() 698 dev->next_event = KTIME_MAX; in tick_handle_oneshot_broadcast() [all …]
|
| A D | tick-common.c | 76 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) in tick_is_oneshot_available() 78 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) in tick_is_oneshot_available() 111 ktime_t next = dev->next_event; in tick_handle_periodic() 123 if (!clockevent_state_oneshot(dev)) in tick_handle_periodic() 153 tick_set_periodic_handler(dev, broadcast); in tick_setup_periodic() 156 if (!tick_device_is_functional(dev)) in tick_setup_periodic() 159 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && in tick_setup_periodic() 423 struct clock_event_device *dev = td->evtdev; in tick_shutdown() local 426 if (dev) { in tick_shutdown() 432 clockevents_exchange_device(dev, NULL); in tick_shutdown() [all …]
|
| A D | timer_list.c | 195 if (!dev) { in print_tickdevice() 199 SEQ_printf(m, "%s\n", dev->name); in print_tickdevice() 212 if (dev->set_state_shutdown) in print_tickdevice() 214 dev->set_state_shutdown); in print_tickdevice() 216 if (dev->set_state_periodic) in print_tickdevice() 218 dev->set_state_periodic); in print_tickdevice() 220 if (dev->set_state_oneshot) in print_tickdevice() 222 dev->set_state_oneshot); in print_tickdevice() 226 dev->set_state_oneshot_stopped); in print_tickdevice() 228 if (dev->tick_resume) in print_tickdevice() [all …]
|
| /kernel/dma/ |
| A D | mapping.c | 114 devres_add(dev, dr); in dmam_alloc_attrs() 123 if (use_dma_iommu(dev)) in dma_go_direct() 130 if (dev->dma_ops_bypass) in dma_go_direct() 146 return dma_go_direct(dev, dev->coherent_dma_mask, ops); in dma_alloc_direct() 152 return dma_go_direct(dev, *dev->dma_mask, ops); in dma_map_direct() 456 if (!dma_map_direct(dev, get_dma_ops(dev))) in dma_need_unmap() 468 if (dma_map_direct(dev, ops) || use_dma_iommu(dev)) in dma_setup_need_sync() 474 dev->dma_skip_sync = dev_is_dma_coherent(dev); in dma_setup_need_sync() 904 if (!dev->dma_mask || !dma_supported(dev, mask)) in dma_set_mask() 908 *dev->dma_mask = mask; in dma_set_mask() [all …]
|
| A D | direct.c | 51 dev->coherent_dma_mask, in dma_direct_optimal_gfp_mask() 52 dev->bus_dma_limit); in dma_direct_optimal_gfp_mask() 77 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); in dma_coherent_ok() 220 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) in dma_direct_alloc() 321 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { in dma_direct_free() 355 __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); in dma_direct_free() 364 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) in dma_direct_alloc_pages() 511 dev_err_once(dev, in dma_direct_map_resource() 513 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in dma_direct_map_resource() 639 (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev))) in dma_direct_max_mapping_size() [all …]
|
| A D | direct.h | 16 bool dma_direct_can_mmap(struct device *dev); 59 phys_addr_t paddr = dma_to_phys(dev, addr); in dma_direct_sync_single_for_device() 63 if (!dev_is_dma_coherent(dev)) in dma_direct_sync_single_for_device() 70 phys_addr_t paddr = dma_to_phys(dev, addr); in dma_direct_sync_single_for_cpu() 72 if (!dev_is_dma_coherent(dev)) { in dma_direct_sync_single_for_cpu() 88 dma_addr_t dma_addr = phys_to_dma(dev, phys); in dma_direct_map_page() 90 if (is_swiotlb_force_bounce(dev)) { in dma_direct_map_page() 100 if (is_swiotlb_active(dev)) in dma_direct_map_page() 103 dev_WARN_ONCE(dev, 1, in dma_direct_map_page() 105 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in dma_direct_map_page() [all …]
|
| A D | debug.c | 279 if (a->dev != b->dev) in containing_match() 528 if (!dev || dev == entry->dev) { in debug_dma_dump_mappings() 1220 entry->dev = dev; in debug_dma_map_page() 1249 ref.dev = dev; in debug_dma_mapping_error() 1282 .dev = dev, in debug_dma_unmap_page() 1413 entry->dev = dev; in debug_dma_alloc_coherent() 1427 .dev = dev, in debug_dma_free_coherent() 1459 entry->dev = dev; in debug_dma_map_resource() 1474 .dev = dev, in debug_dma_unmap_resource() 1495 ref.dev = dev; in debug_dma_sync_single_for_cpu() [all …]
|
| A D | debug.h | 12 extern void debug_dma_map_page(struct device *dev, struct page *page, 17 extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 27 extern void debug_dma_alloc_coherent(struct device *dev, size_t size, 31 extern void debug_dma_free_coherent(struct device *dev, size_t size, 42 extern void debug_dma_sync_single_for_cpu(struct device *dev, 46 extern void debug_dma_sync_single_for_device(struct device *dev, 50 extern void debug_dma_sync_sg_for_cpu(struct device *dev, 54 extern void debug_dma_sync_sg_for_device(struct device *dev, 76 static inline void debug_dma_unmap_sg(struct device *dev, in debug_dma_unmap_sg() argument 100 static inline void debug_dma_unmap_resource(struct device *dev, in debug_dma_unmap_resource() argument [all …]
|
| A D | coherent.c | 25 if (dev && dev->dma_mem) in dev_get_coherent_memory() 26 return dev->dma_mem; in dev_get_coherent_memory() 90 if (!dev) in dma_assign_coherent_memory() 93 if (dev->dma_mem) in dma_assign_coherent_memory() 96 dev->dma_mem = mem; in dma_assign_coherent_memory() 135 if (dev) { in dma_release_coherent_memory() 137 dev->dma_mem = NULL; in dma_release_coherent_memory() 351 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit)) in rmem_dma_device_init() 359 struct device *dev) in rmem_dma_device_release() argument 361 if (dev) in rmem_dma_device_release() [all …]
|
| A D | swiotlb.c | 621 if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) { in swiotlb_alloc_tlb() 889 dev_WARN_ONCE(dev, 1, in swiotlb_bounce() 1210 phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit); in swiotlb_find_slots() 1383 dev_warn_ratelimited(dev, in swiotlb_tbl_map_single() 1416 dma_reset_need_sync(dev); in swiotlb_tbl_map_single() 1582 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size); in swiotlb_map() 1594 dev_WARN_ONCE(dev, 1, in swiotlb_map() 1596 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in swiotlb_map() 1793 struct device *dev) in rmem_swiotlb_device_init() argument 1850 dev->dma_io_tlb_mem = mem; in rmem_swiotlb_device_init() [all …]
|
| A D | ops_helpers.c | 65 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_common_alloc_pages() 68 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages() 74 if (use_dma_iommu(dev)) in dma_common_alloc_pages() 75 *dma_handle = iommu_dma_map_page(dev, page, 0, size, dir, in dma_common_alloc_pages() 78 *dma_handle = ops->map_page(dev, page, 0, size, dir, in dma_common_alloc_pages() 81 dma_free_contiguous(dev, page, size); in dma_common_alloc_pages() 92 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_common_free_pages() 94 if (use_dma_iommu(dev)) in dma_common_free_pages() 95 iommu_dma_unmap_page(dev, dma_handle, size, dir, in dma_common_free_pages() 98 ops->unmap_page(dev, dma_handle, size, dir, in dma_common_free_pages() [all …]
|
| A D | map_benchmark.c | 24 struct device *dev; member 65 dev_name(map->dev)); in map_benchmark_thread() 128 get_device(map->dev); in do_map_benchmark() 194 put_device(map->dev); in do_map_benchmark() 256 old_dma_mask = dma_get_mask(map->dev); in map_benchmark_ioctl() 258 ret = dma_set_mask(map->dev, in map_benchmark_ioctl() 262 dev_name(map->dev)); in map_benchmark_ioctl() 274 dma_set_mask(map->dev, old_dma_mask); in map_benchmark_ioctl() 310 map->dev = dev; in __map_benchmark_probe() 333 return __map_benchmark_probe(&pdev->dev); in map_benchmark_platform_probe() [all …]
|
| A D | contiguous.c | 306 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, in dma_alloc_from_contiguous() argument 312 return cma_alloc(dev_get_cma_area(dev), count, align, no_warn); in dma_alloc_from_contiguous() 328 return cma_release(dev_get_cma_area(dev), pages, count); in dma_release_from_contiguous() 356 int nid = dev_to_node(dev); in dma_alloc_contiguous() 362 if (dev->cma_area) in dma_alloc_contiguous() 363 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous() 408 if (dev->cma_area) { in dma_free_contiguous() 409 if (cma_release(dev->cma_area, page, count)) in dma_free_contiguous() 444 dev->cma_area = rmem->priv; in rmem_cma_device_init() 449 struct device *dev) in rmem_cma_device_release() argument [all …]
|
| /kernel/power/ |
| A D | energy_model.c | 135 em_dbg = devm_kcalloc(dev, dev->em_pd->nr_perf_states, in em_debug_create_pd() 308 if (!dev) in em_dev_update_perf_domain() 314 if (!dev->em_pd) { in em_dev_update_perf_domain() 318 pd = dev->em_pd; in em_dev_update_perf_domain() 439 dev->em_pd = pd; in em_create_pd() 506 return dev->em_pd; in em_pd_get() 570 if (dev->em_pd) { in em_dev_register_perf_domain() 654 if (IS_ERR_OR_NULL(dev) || !dev->em_pd) in em_dev_unregister_perf_domain() 671 kfree(dev->em_pd); in em_dev_unregister_perf_domain() 672 dev->em_pd = NULL; in em_dev_unregister_perf_domain() [all …]
|
| A D | console.c | 25 struct device *dev; member 47 void pm_vt_switch_required(struct device *dev, bool required) in pm_vt_switch_required() argument 53 if (tmp->dev == dev) { in pm_vt_switch_required() 65 entry->dev = dev; in pm_vt_switch_required() 79 void pm_vt_switch_unregister(struct device *dev) in pm_vt_switch_unregister() argument 85 if (tmp->dev == dev) { in pm_vt_switch_unregister()
|
| A D | suspend_test.c | 81 printk(err_readtime, dev_name(&rtc->dev), status); in test_wakealarm() 92 printk(err_wakealarm, dev_name(&rtc->dev), status); in test_wakealarm() 128 static int __init has_wakealarm(struct device *dev, const void *data) in has_wakealarm() argument 130 struct rtc_device *candidate = to_rtc_device(dev); in has_wakealarm() 134 if (!device_may_wakeup(candidate->dev.parent)) in has_wakealarm() 185 struct device *dev; in test_suspend() local 204 dev = class_find_device(&rtc_class, NULL, NULL, has_wakealarm); in test_suspend() 205 if (dev) { in test_suspend() 206 rtc = rtc_class_open(dev_name(dev)); in test_suspend() 207 put_device(dev); in test_suspend()
|
| /kernel/irq/ |
| A D | msi.c | 84 desc->dev = dev; in msi_alloc_desc() 315 if (dev->msi.data) in msi_setup_device_data() 337 if (dev->msi.domain && !irq_domain_is_msi_parent(dev->msi.domain)) in msi_setup_device_data() 341 dev->msi.data = md; in msi_setup_device_data() 342 devres_add(dev, md); in msi_setup_device_data() 459 if (!dev->msi.data) in msi_domain_get_virq() 506 bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false; in msi_mode_show() 892 domain->dev = info->dev; in __msi_create_irq_domain() 1055 bundle->info.dev = dev; in msi_create_device_irq_domain() 1581 struct device *dev = domain->dev; in msi_device_domain_alloc_wired() local [all …]
|
| A D | devres.c | 19 static void devm_irq_release(struct device *dev, void *res) in devm_irq_release() argument 26 static int devm_irq_match(struct device *dev, void *res, void *data) in devm_irq_match() argument 66 devname = dev_name(dev); in devm_request_threaded_irq() 77 devres_add(dev, dr); in devm_request_threaded_irq() 113 devname = dev_name(dev); in devm_request_any_context_irq() 123 devres_add(dev, dr); in devm_request_any_context_irq() 144 WARN_ON(devres_release(dev, devm_irq_release, devm_irq_match, in devm_free_irq() 154 static void devm_irq_desc_release(struct device *dev, void *res) in devm_irq_desc_release() argument 197 devres_add(dev, dr); in __devm_irq_alloc_descs() 279 devres_add(dev, dr); in devm_irq_setup_generic_chip() [all …]
|
| /kernel/bpf/ |
| A D | devmap.c | 228 dev_put(dev->dev); in dev_map_free() 244 dev_put(dev->dev); in dev_map_free() 378 struct net_device *dev = bq->dev; in bq_xmit_all() local 505 struct xdp_txq_info txq = { .dev = dst->dev }; in dev_map_bpf_prog_run_skb() 543 struct net_device *dev = dst->dev; in dev_map_enqueue() local 696 skb->dev = dst->dev; in dev_map_generic_redirect() 816 dev_put(dev->dev); in __dev_map_entry_free() 874 if (!dev->dev) in __dev_map_alloc_node() 901 dev_put(dev->dev); in __dev_map_alloc_node() 1090 if (netdev != dev->dev) in dev_map_hash_remove_netdev() [all …]
|
| A D | tcx.c | 21 if (!dev) { in tcx_prog_attach() 69 if (!dev) { in tcx_prog_detach() 131 if (!dev) { in tcx_prog_query() 147 struct net_device *dev = tcx->dev; in tcx_link_prog_attach() local 178 dev = tcx->dev; in tcx_link_release() 179 if (!dev) in tcx_link_release() 213 dev = tcx->dev; in tcx_link_update() 214 if (!dev) { in tcx_link_update() 257 if (tcx->dev) in tcx_link_fdinfo() 306 tcx->dev = dev; in tcx_link_init() [all …]
|
| /kernel/trace/ |
| A D | trace_mmiotrace.c | 20 struct pci_dev *dev; member 68 dev->bus->number, dev->devfn, in mmio_print_pcidev() 69 dev->vendor, dev->device, dev->irq); in mmio_print_pcidev() 71 start = dev->resource[i].start; in mmio_print_pcidev() 77 start = dev->resource[i].start; in mmio_print_pcidev() 78 end = dev->resource[i].end; in mmio_print_pcidev() 80 dev->resource[i].start < dev->resource[i].end ? in mmio_print_pcidev() 93 pci_dev_put(hiter->dev); in destroy_header_iter() 152 mmio_print_pcidev(s, hiter->dev); in mmio_read() 153 hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev); in mmio_read() [all …]
|
| /kernel/sched/ |
| A D | idle.c | 134 struct cpuidle_device *dev) in call_cpuidle_s2idle() argument 139 return cpuidle_enter_s2idle(drv, dev); in call_cpuidle_s2idle() 150 dev->last_residency_ns = 0; in call_cpuidle() 160 return cpuidle_enter(drv, dev, next_state); in call_cpuidle() 174 struct cpuidle_device *dev = cpuidle_get_device(); in cpuidle_idle_call() local 187 if (cpuidle_not_available(drv, dev)) { in cpuidle_idle_call() 209 entered_state = call_cpuidle_s2idle(drv, dev); in cpuidle_idle_call() 221 call_cpuidle(drv, dev, next_state); in cpuidle_idle_call() 228 next_state = cpuidle_select(drv, dev, &stop_tick); in cpuidle_idle_call() 235 entered_state = call_cpuidle(drv, dev, next_state); in cpuidle_idle_call() [all …]
|