| /linux/tools/lib/api/fs/ |
| A D | cgroup.c | 17 static struct cgroupfs_cache_entry cached; variable 27 if (!strcmp(cached.subsys, subsys)) { in cgroupfs_find_mountpoint() 28 if (strlen(cached.mountpoint) < maxlen) { in cgroupfs_find_mountpoint() 29 strcpy(buf, cached.mountpoint); in cgroupfs_find_mountpoint() 94 strncpy(cached.subsys, subsys, sizeof(cached.subsys) - 1); in cgroupfs_find_mountpoint() 95 strcpy(cached.mountpoint, mountpoint); in cgroupfs_find_mountpoint()
|
| /linux/fs/lockd/ |
| A D | mon.c | 340 if (cached != NULL) { in nsm_get_handle() 341 refcount_inc(&cached->sm_count); in nsm_get_handle() 345 "cnt %d\n", cached->sm_name, in nsm_get_handle() 346 cached->sm_addrbuf, in nsm_get_handle() 348 return cached; in nsm_get_handle() 379 struct nsm_handle *cached; in nsm_reboot_lookup() local 385 if (unlikely(cached == NULL)) { in nsm_reboot_lookup() 389 return cached; in nsm_reboot_lookup() 392 refcount_inc(&cached->sm_count); in nsm_reboot_lookup() 396 cached->sm_name, cached->sm_addrbuf, in nsm_reboot_lookup() [all …]
|
| /linux/lib/zstd/compress/ |
| A D | hist.c | 91 { U32 cached = MEM_read32(ip); ip += 4; in HIST_count_parallel_wksp() local 93 U32 c = cached; cached = MEM_read32(ip); ip += 4; in HIST_count_parallel_wksp() 98 c = cached; cached = MEM_read32(ip); ip += 4; in HIST_count_parallel_wksp() 103 c = cached; cached = MEM_read32(ip); ip += 4; in HIST_count_parallel_wksp() 108 c = cached; cached = MEM_read32(ip); ip += 4; in HIST_count_parallel_wksp()
|
| /linux/tools/perf/util/ |
| A D | smt.c | 10 static bool cached; in smt_on() local 15 if (cached) in smt_on() 42 cached = true; in smt_on() 46 if (!cached) { in smt_on() 49 cached = true; in smt_on()
|
| A D | util.c | 60 static bool cached; in sysctl__nmi_watchdog_enabled() local 64 if (cached) in sysctl__nmi_watchdog_enabled() 71 cached = true; in sysctl__nmi_watchdog_enabled()
|
| /linux/drivers/mtd/maps/ |
| A D | pxa2xx-flash.c | 28 unsigned long start = (unsigned long)map->cached + from; in pxa2xx_map_inval_cache() 71 info->map.cached = ioremap_cache(info->map.phys, info->map.size); in pxa2xx_flash_probe() 72 if (!info->map.cached) in pxa2xx_flash_probe() 88 if (info->map.cached) in pxa2xx_flash_probe() 89 iounmap(info->map.cached); in pxa2xx_flash_probe() 109 if (info->map.cached) in pxa2xx_flash_remove() 110 iounmap(info->map.cached); in pxa2xx_flash_remove()
|
| /linux/fs/btrfs/ |
| A D | extent-io-tree.h | 108 struct extent_state **cached); 131 struct extent_state **cached); 134 struct extent_state **cached, gfp_t mask, 143 u64 end, struct extent_state **cached) in unlock_extent_cached() argument 145 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, in unlock_extent_cached() 150 u64 start, u64 end, struct extent_state **cached) in unlock_extent_cached_atomic() argument 152 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, in unlock_extent_cached_atomic() 198 u64 end, struct extent_state **cached) in clear_extent_dirty() argument 202 EXTENT_DO_ACCOUNTING, 0, 0, cached); in clear_extent_dirty()
|
| A D | block-group.h | 106 int cached; member 325 return cache->cached == BTRFS_CACHE_FINISHED || in btrfs_block_group_done() 326 cache->cached == BTRFS_CACHE_ERROR; in btrfs_block_group_done()
|
| /linux/fs/proc/ |
| A D | meminfo.c | 36 long cached; in meminfo_proc_show() local 46 cached = global_node_page_state(NR_FILE_PAGES) - in meminfo_proc_show() 48 if (cached < 0) in meminfo_proc_show() 49 cached = 0; in meminfo_proc_show() 62 show_val_kb(m, "Cached: ", cached); in meminfo_proc_show()
|
| /linux/drivers/gpu/drm/msm/dsi/phy/ |
| A D | dsi_phy_10nm.c | 472 struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; in dsi_10nm_pll_save_state() local 476 cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base + in dsi_10nm_pll_save_state() 478 cached->pll_out_div &= 0x3; in dsi_10nm_pll_save_state() 481 cached->bit_clk_div = cmn_clk_cfg0 & 0xf; in dsi_10nm_pll_save_state() 482 cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4; in dsi_10nm_pll_save_state() 485 cached->pll_mux = cmn_clk_cfg1 & 0x3; in dsi_10nm_pll_save_state() 488 pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div, in dsi_10nm_pll_save_state() 489 cached->pix_clk_div, cached->pll_mux); in dsi_10nm_pll_save_state() 502 val |= cached->pll_out_div; in dsi_10nm_pll_restore_state() 506 cached->bit_clk_div | (cached->pix_clk_div << 4)); in dsi_10nm_pll_restore_state() [all …]
|
| A D | dsi_phy_7nm.c | 498 struct pll_7nm_cached_state *cached = &pll_7nm->cached_state; in dsi_7nm_pll_save_state() local 502 cached->pll_out_div = dsi_phy_read(pll_7nm->phy->pll_base + in dsi_7nm_pll_save_state() 504 cached->pll_out_div &= 0x3; in dsi_7nm_pll_save_state() 507 cached->bit_clk_div = cmn_clk_cfg0 & 0xf; in dsi_7nm_pll_save_state() 508 cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4; in dsi_7nm_pll_save_state() 511 cached->pll_mux = cmn_clk_cfg1 & 0x3; in dsi_7nm_pll_save_state() 514 pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div, in dsi_7nm_pll_save_state() 515 cached->pix_clk_div, cached->pll_mux); in dsi_7nm_pll_save_state() 528 val |= cached->pll_out_div; in dsi_7nm_pll_restore_state() 532 cached->bit_clk_div | (cached->pix_clk_div << 4)); in dsi_7nm_pll_restore_state() [all …]
|
| /linux/Documentation/filesystems/ |
| A D | fuse-io.rst | 10 - cached 20 In cached mode reads may be satisfied from the page cache, and data may be 24 The cached mode has two sub modes controlling how writes are handled. The 30 WRITE requests, as well as updating any cached pages (and caching previously
|
| /linux/arch/arm/mach-omap2/ |
| A D | sram.c | 122 int cached = 1; in omap2_map_sram() local 132 cached = 0; in omap2_map_sram() 136 omap_sram_skip, cached); in omap2_map_sram()
|
| /linux/drivers/staging/media/atomisp/pci/hmm/ |
| A D | hmm_bo.c | 696 bool cached, in alloc_private_pages() argument 728 cached); in alloc_private_pages() 744 cached); in alloc_private_pages() 807 if (!cached) { in alloc_private_pages() 881 const void __user *userptr, bool cached) in alloc_user_pages() argument 982 const void __user *userptr, bool cached) in hmm_bo_alloc_pages() argument 997 cached, &dynamic_pool, &reserved_pool); in hmm_bo_alloc_pages() 999 ret = alloc_user_pages(bo, userptr, cached); in hmm_bo_alloc_pages() 1236 if (((bo->status & HMM_BO_VMAPED) && !cached) || in hmm_bo_vmap() 1237 ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) { in hmm_bo_vmap() [all …]
|
| A D | hmm.c | 230 bool cached = attrs & ATOMISP_MAP_FLAG_CACHED; in hmm_alloc() local 253 ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached); in hmm_alloc() 273 __func__, bo->start, bytes, type, from_highmem, userptr, cached); in hmm_alloc() 623 void *hmm_vmap(ia_css_ptr virt, bool cached) in hmm_vmap() argument 636 ptr = hmm_bo_vmap(bo, cached); in hmm_vmap() 716 void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached) in hmm_isp_vaddr_to_host_vaddr() argument 718 return hmm_vmap(ptr, cached); in hmm_isp_vaddr_to_host_vaddr()
|
| /linux/Documentation/admin-guide/device-mapper/ |
| A D | writecache.rst | 6 doesn't cache reads because reads are supposed to be cached in page cache 17 2. the underlying device that will be cached 57 new writes (however, writes to already cached blocks are 59 writes) and it will gradually writeback any cached 61 process with "dmsetup status". When the number of cached
|
| /linux/drivers/staging/media/atomisp/include/hmm/ |
| A D | hmm.h | 62 void *hmm_vmap(ia_css_ptr virt, bool cached); 75 void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached);
|
| A D | hmm_bo.h | 238 const void __user *userptr, bool cached); 259 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached);
|
| /linux/arch/arm/plat-omap/ |
| A D | sram.c | 98 unsigned long skip, int cached) in omap_map_sram() argument 109 omap_sram_base = __arm_ioremap_exec(start, size, cached); in omap_map_sram()
|
| /linux/Documentation/ABI/testing/ |
| A D | sysfs-class-iommu-intel-iommu | 15 The cached hardware capability register value 23 The cached hardware extended capability register
|
| /linux/drivers/block/ |
| A D | ps3vram.c | 428 unsigned int cached, count; in ps3vram_read() local 449 cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; in ps3vram_read() 453 (unsigned int)from, cached, offset, avail, count); in ps3vram_read() 457 memcpy(buf, priv->xdr_buf + cached, avail); in ps3vram_read() 472 unsigned int cached, count; in ps3vram_write() local 490 cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; in ps3vram_write() 494 cached, offset, avail, count); in ps3vram_write() 498 memcpy(priv->xdr_buf + cached, buf, avail); in ps3vram_write()
|
| /linux/drivers/gpu/drm/ttm/ |
| A D | ttm_agp_backend.c | 55 int ret, cached = ttm->caching == ttm_cached; in ttm_agp_bind() local 77 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; in ttm_agp_bind()
|
| /linux/sound/usb/ |
| A D | mixer_us16x08.c | 229 elem->cached |= 1 << index; in snd_us16x08_route_put() 288 elem->cached |= 1 << index; in snd_us16x08_master_put() 329 elem->cached |= 1; in snd_us16x08_bus_put() 397 elem->cached |= 1 << index; in snd_us16x08_channel_put() 475 elem->cached |= 1 << index; in snd_us16x08_comp_put() 534 elem->cached |= 1 << index; in snd_us16x08_eqswitch_put() 598 elem->cached |= 1 << index; in snd_us16x08_eq_put() 1315 elem->cached = 0xff; in snd_us16x08_controls_create() 1337 elem->cached = 1; in snd_us16x08_controls_create() 1357 elem->cached = 0xffff; in snd_us16x08_controls_create()
|
| /linux/arch/s390/appldata/ |
| A D | appldata_mem.c | 54 u64 cached; /* size of (used) cache, w/o buffers */ member 102 mem_data->cached = P2K(global_node_page_state(NR_FILE_PAGES) in appldata_get_mem_data()
|
| /linux/tools/perf/Documentation/ |
| A D | perf-buildid-cache.txt | 45 Remove a cached binary which has same build-id of specified file 49 Purge all cached binaries including older caches which have specified 53 Purge all cached binaries. This will flush out entire cache.
|