| /linux/drivers/md/dm-vdo/ |
| A D | logical-zone.c | 67 zone->zones = zones; in initialize_zone() 91 zone_count_t zone; in vdo_make_logical_zones() local 104 for (zone = 0; zone < zone_count; zone++) { in vdo_make_logical_zones() 155 if (!vdo_is_state_draining(&zone->state) || zone->notifying || in check_for_drain_complete() 252 zone->zone_number, (unsigned long long) zone->flush_generation, in vdo_increment_logical_zone_flush_generation() 267 struct logical_zone *zone = data_vio->logical.zone; in vdo_acquire_flush_generation_lock() local 305 if (zone->oldest_active_generation <= zone->notification_generation) { in attempt_generation_complete_notification() 312 zone->notification_generation = zone->oldest_active_generation; in attempt_generation_complete_notification() 327 struct logical_zone *zone = data_vio->logical.zone; in vdo_release_flush_generation_lock() local 340 if (!update_oldest_active_generation(zone) || zone->notifying) in vdo_release_flush_generation_lock() [all …]
|
| A D | block-map.c | 1446 a, b, zone->oldest_generation, zone->generation); in is_not_older() 1468 (zone->oldest_generation != zone->generation)) in release_generation() 1573 .zone = zone, in finish_page_write() 1705 zone = data_vio->logical.zone->block_map_zone; in release_page_lock() 2097 set_generation(zone, tree_page, zone->generation); in finish_block_map_allocation() 2330 set_generation(zone, page, zone->generation); in vdo_write_tree_page() 2753 VIO_PRIORITY_METADATA, zone, &zone->vio_pool); in initialize_block_map_zone() 2759 zone->page_cache.zone = zone; in initialize_block_map_zone() 2847 for (zone = 0; zone < map->zone_count; zone++) in vdo_free_block_map() 2896 for (zone = 0; zone < map->zone_count; zone++) { in vdo_decode_block_map() [all …]
|
| A D | physical-zone.c | 337 vdo_int_map_free(zone->pbn_operations); in initialize_zone() 341 zone->zone_number = zone_number; in initialize_zone() 347 free_pbn_lock_pool(vdo_forget(zone->lock_pool)); in initialize_zone() 348 vdo_int_map_free(zone->pbn_operations); in initialize_zone() 399 struct physical_zone *zone = &zones->zones[index]; in vdo_free_physical_zones() local 401 free_pbn_lock_pool(vdo_forget(zone->lock_pool)); in vdo_free_physical_zones() 418 return ((zone == NULL) ? NULL : vdo_int_map_get(zone->pbn_operations, pbn)); in vdo_get_physical_zone_pbn_lock() 542 struct physical_zone *zone = allocation->zone; in continue_allocating() local 574 allocation->zone = zone->next; in continue_allocating() 634 return_pbn_lock_to_pool(zone->lock_pool, lock); in vdo_release_physical_zone_pbn_lock() [all …]
|
| /linux/drivers/block/null_blk/ |
| A D | zoned.c | 136 zone->capacity = zone->len; in null_init_zoned_dev() 137 zone->wp = zone->start + zone->len; in null_init_zoned_dev() 158 zone->wp = zone->start + zone->capacity; in null_init_zoned_dev() 161 zone->wp = zone->start; in null_init_zoned_dev() 387 zone->wp + nr_sectors > zone->start + zone->capacity) { in null_zone_write() 420 if (zone->wp == zone->start + zone->capacity) { in null_zone_write() 530 if (zone->wp > zone->start) in null_close_zone() 536 if (zone->wp == zone->start) in null_close_zone() 590 zone->wp = zone->start + zone->len; in null_finish_zone() 626 zone->wp = zone->start; in null_reset_zone() [all …]
|
| /linux/fs/pstore/ |
| A D | zone.c | 413 zone->name, i, zone->off, in psz_kmsg_recover_meta() 421 zone->name, i, zone->off, in psz_kmsg_recover_meta() 444 zone->name, i, zone->off, in psz_kmsg_recover_meta() 453 zone->name, i, zone->off, in psz_kmsg_recover_meta() 489 if (!zone || zone->oldbuf) in psz_recover_zone() 516 zone->name, zone->off, zone->buffer_size); in psz_recover_zone() 523 zone->name, zone->off, zone->buffer_size, in psz_recover_zone() 529 zone->name, zone->off, zone->buffer_size, in psz_recover_zone() 652 if (zone && zone->buffer && buffer_datalen(zone)) in psz_ok() 763 zone->oldbuf = zone->buffer; in psz_kmsg_write_record() [all …]
|
| /linux/mm/ |
| A D | page_alloc.c | 2090 struct zone *zone; in unreserve_highatomic_pageblock() local 2412 struct zone *zone; in drain_pages() local 2648 struct zone *zone; in free_unref_page() local 3339 struct zone *zone; in get_page_from_freelist() local 3807 struct zone *zone; in should_compact_retry() local 3977 struct zone *zone; in wake_all_kswapds() local 4098 struct zone *zone; in should_reclaim_retry() local 4558 struct zone *zone; in alloc_pages_bulk_noprof() local 5102 struct zone *zone; in nr_free_zone_pages() local 5147 struct zone *zone; in build_zonerefs_node() local [all …]
|
| A D | show_mem.c | 26 static inline void show_node(struct zone *zone) in show_node() argument 38 struct zone *zone; in si_mem_available() local 40 for_each_zone(zone) in si_mem_available() 104 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local 106 if (is_highmem(zone)) { in si_meminfo_node() 190 struct zone *zone; in show_free_areas() local 304 show_node(zone); in show_free_areas() 327 zone->name, in show_free_areas() 362 show_node(zone); in show_free_areas() 403 struct zone *zone; in __show_mem() local [all …]
|
| A D | vmstat.c | 54 struct zone *zone; in zero_zones_numa_counters() local 189 struct zone *zone; in fold_vm_numa_events() local 275 struct zone *zone; in refresh_zone_stat_thresholds() local 320 struct zone *zone; in set_pgdat_percpu_threshold() local 706 struct zone *zone; in inc_zone_page_state() local 811 struct zone *zone; in refresh_cpu_vm_stats() local 899 struct zone *zone; in cpu_vm_stats_fold() local 1505 struct zone *zone; in walk_zones_in_node() local 1524 struct zone *zone) in frag_show_print() argument 1728 struct zone *zone) in zoneinfo_show_print() argument [all …]
|
| A D | compaction.c | 468 struct zone *zone = cc->zone; in update_cached_migrate() local 491 struct zone *zone = cc->zone; in update_pageblock_skip() local 1713 struct zone *zone = cc->zone; in isolate_freepages() local 2455 struct zone *zone; in compaction_zonelist_suitable() local 2753 .zone = zone, in compact_zone_order() 2813 struct zone *zone; in try_to_compact_pages() local 2890 struct zone *zone; in compact_node() local 2908 cc.zone = zone; in compact_node() 3026 struct zone *zone; in kcompactd_node_suitable() local 3053 struct zone *zone; in kcompactd_do_work() local [all …]
|
| A D | memory_hotplug.c | 475 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span() 491 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span() 502 struct zone *zone; in update_pgdat_span() local 527 void remove_pfn_range_from_zone(struct zone *zone, in remove_pfn_range_from_zone() argument 677 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_online() argument 789 struct zone *zone) in auto_movable_stats_account_zone() argument 843 struct zone *zone; in auto_movable_can_online_movable() local 899 struct zone *zone = &pgdat->node_zones[zid]; in default_kernel_zone_for_pfn() local 1066 struct zone *zone = page_zone(page); in adjust_present_page_count() local 1877 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_offline() argument [all …]
|
| A D | mm_init.c | 52 struct zone *zone; in mminit_verify_zonelist() local 74 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); in mminit_verify_zonelist() 720 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local 950 struct zone *zone = node->node_zones + j; in memmap_init() local 1306 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local 1547 struct zone *zone = pgdat->node_zones + z; in free_area_init_core_hotplug() local 1564 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local 2067 struct zone *zone = arg; in deferred_init_memmap_chunk() local 2096 struct zone *zone; in deferred_init_memmap() local 2250 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument [all …]
|
| A D | page_isolation.c | 37 struct zone *zone = page_zone(page); in has_unmovable_pages() local 72 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages() 150 struct zone *zone = page_zone(page); in set_migratetype_isolate() local 188 zone->nr_isolate_pageblock++; in set_migratetype_isolate() 207 struct zone *zone; in unset_migratetype_isolate() local 213 zone = page_zone(page); in unset_migratetype_isolate() 263 zone->nr_isolate_pageblock--; in unset_migratetype_isolate() 315 struct zone *zone; in isolate_single_pageblock() local 333 zone->zone_start_pfn); in isolate_single_pageblock() 611 struct zone *zone; in test_pages_isolated() local [all …]
|
| /linux/include/linux/ |
| A D | memory_hotplug.h | 11 struct zone; 101 static inline unsigned zone_span_seqbegin(struct zone *zone) in zone_span_seqbegin() argument 109 static inline void zone_span_writelock(struct zone *zone) in zone_span_writelock() argument 113 static inline void zone_span_writeunlock(struct zone *zone) in zone_span_writeunlock() argument 117 static inline void zone_seqlock_init(struct zone *zone) in zone_seqlock_init() argument 126 struct zone *zone, bool mhp_off_inaccessible); 129 struct zone *zone, struct memory_group *group); 216 static inline void zone_seqlock_init(struct zone *zone) {} in zone_seqlock_init() argument 283 struct zone *zone, struct memory_group *group); 292 struct zone *zone, struct memory_group *group) in offline_pages() argument [all …]
|
| A D | mmzone.h | 1056 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn() 1069 static inline bool zone_is_empty(struct zone *zone) in zone_is_empty() argument 1228 struct zone *zone; /* Pointer to actual zone */ member 1496 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) argument 1516 static inline bool managed_zone(struct zone *zone) in managed_zone() argument 1528 static inline int zone_to_nid(struct zone *zone) in zone_to_nid() argument 1538 static inline int zone_to_nid(struct zone *zone) in zone_to_nid() argument 1565 static inline int is_highmem(struct zone *zone) in is_highmem() argument 1596 extern struct zone *next_zone(struct zone *zone); 1616 zone = next_zone(zone)) [all …]
|
| A D | vmstat.h | 149 static inline void zone_numa_event_add(long x, struct zone *zone, in zone_numa_event_add() argument 156 static inline unsigned long zone_numa_event_state(struct zone *zone, in zone_numa_event_state() argument 169 static inline void zone_page_state_add(long x, struct zone *zone, in zone_page_state_add() argument 211 static inline unsigned long zone_page_state(struct zone *zone, in zone_page_state() argument 228 static inline unsigned long zone_page_state_snapshot(struct zone *zone, in zone_page_state_snapshot() argument 247 __count_numa_event(struct zone *zone, enum numa_stat_item item) in __count_numa_event() argument 311 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *); 313 int calculate_pressure_threshold(struct zone *zone); 314 int calculate_normal_threshold(struct zone *zone); 323 static inline void __mod_zone_page_state(struct zone *zone, in __mod_zone_page_state() argument [all …]
|
| /linux/tools/power/cpupower/lib/ |
| A D | powercap.c | 132 strcat(file, zone->sys_name); in sysfs_powercap_get64_val() 175 strcat(path, zone->sys_name); in powercap_zone_get_enabled() 206 strcat(file, zone->sys_name); in powercap_read_zone() 209 if (zone->parent) in powercap_read_zone() 210 zone->tree_depth = zone->parent->tree_depth + 1; in powercap_read_zone() 213 zone->has_energy_uj = 1; in powercap_read_zone() 216 zone->has_power_uw = 1; in powercap_read_zone() 249 child_zone->parent = zone; in powercap_read_zone() 295 if (!zone) in powercap_walk_zones() 298 ret = f(zone); in powercap_walk_zones() [all …]
|
| /linux/drivers/md/dm-vdo/indexer/ |
| A D | index.c | 110 unsigned int zone; in enqueue_barrier_messages() local 112 for (zone = 0; zone < index->zone_count; zone++) { in enqueue_barrier_messages() 205 swap(zone->open_chapter, zone->writing_chapter); in swap_open_chapter() 239 if (zone->id == i) in announce_chapter_closed() 259 (unsigned long long) zone->newest_virtual_chapter, zone->id, in open_next_chapter() 261 zone->open_chapter->capacity - zone->open_chapter->size); in open_next_chapter() 268 uds_set_volume_index_zone_open_chapter(zone->index->volume_index, zone->id, in open_next_chapter() 272 finished_zones = start_closing_chapter(zone->index, zone->id, in open_next_chapter() 1113 if (zone == NULL) in free_index_zone() 1118 vdo_free(zone); in free_index_zone() [all …]
|
| /linux/drivers/md/ |
| A D | dm-zoned-metadata.c | 226 return zone->id - zone->dev->zone_offset; in dmz_dev_zone_id() 313 if (!zone) in dmz_insert() 327 return zone; in dmz_insert() 1409 zmd->sb[0].zone = zone; in dmz_init_zone() 1630 zone->id, zone->wp_block, wp); in dmz_handle_seq_write_err() 1633 dmz_invalidate_blocks(zmd, zone, zone->wp_block, in dmz_handle_seq_write_err() 2518 zone->id, zone->weight, in dmz_validate_blocks() 2598 zone->id, zone->weight, n); in dmz_invalidate_blocks() 2913 zone = dmz_get(zmd, zmd->sb[0].zone->id + i); in dmz_ctr_metadata() 2920 if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) { in dmz_ctr_metadata() [all …]
|
| /linux/include/net/netfilter/ |
| A D | nf_conntrack_zones.h | 12 return &ct->zone; in nf_ct_zone() 21 zone->id = id; in nf_ct_zone_init() 22 zone->flags = flags; in nf_ct_zone_init() 23 zone->dir = dir; in nf_ct_zone_init() 25 return zone; in nf_ct_zone_init() 36 if (tmpl->zone.flags & NF_CT_FLAG_MARK) in nf_ct_zone_tmpl() 43 const struct nf_conntrack_zone *zone) in nf_ct_zone_add() argument 46 ct->zone = *zone; in nf_ct_zone_add() 53 return zone->dir & (1 << dir); in nf_ct_zone_matches_dir() 60 return nf_ct_zone_matches_dir(zone, dir) ? in nf_ct_zone_id() [all …]
|
| /linux/fs/adfs/ |
| A D | map.c | 179 } while (--zone > 0); in scan_map() 202 unsigned int zone; in adfs_map_statfs() local 209 } while (--zone > 0); in adfs_map_statfs() 322 for (zone = 1; zone < nzones; zone++) { in adfs_map_layout() 324 dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS; in adfs_map_layout() 337 unsigned int zone; in adfs_map_read() local 339 for (zone = 0; zone < nzones; zone++) { in adfs_map_read() 340 dm[zone].dm_bh = sb_bread(sb, map_addr + zone); in adfs_map_read() 341 if (!dm[zone].dm_bh) in adfs_map_read() 350 unsigned int zone; in adfs_map_relse() local [all …]
|
| /linux/kernel/power/ |
| A D | snapshot.c | 630 struct zone *zone; in create_mem_extents() local 755 zone = bm->cur.zone; in memory_bm_find_bit() 785 if (zone == bm->cur.zone && in memory_bm_find_bit() 803 bm->cur.zone = zone; in memory_bm_find_bit() 1248 static void mark_free_pages(struct zone *zone) in mark_free_pages() argument 1303 struct zone *zone; in count_free_highmem_pages() local 1351 struct zone *zone; in count_highmem_pages() local 1414 struct zone *zone; in count_data_pages() local 1529 struct zone *zone; in copy_data_pages() local 1831 struct zone *zone; in hibernate_preallocate_memory() local [all …]
|
| /linux/virt/kvm/ |
| A D | coalesced_mmio.c | 36 if (addr < dev->zone.addr) in coalesced_mmio_in_range() 38 if (addr + len > dev->zone.addr + dev->zone.size) in coalesced_mmio_in_range() 123 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_register_coalesced_mmio() argument 128 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_register_coalesced_mmio() 138 dev->zone = *zone; in kvm_vm_ioctl_register_coalesced_mmio() 142 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, in kvm_vm_ioctl_register_coalesced_mmio() 143 zone->addr, zone->size, &dev->dev); in kvm_vm_ioctl_register_coalesced_mmio() 159 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_unregister_coalesced_mmio() argument 164 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_unregister_coalesced_mmio() 170 if (zone->pio == dev->zone.pio && in kvm_vm_ioctl_unregister_coalesced_mmio() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| A D | alloc.c | 250 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); in mlx4_zone_add_one() local 252 if (NULL == zone) in mlx4_zone_add_one() 279 *puid = zone->uid; in mlx4_zone_add_one() 328 kfree(zone); in mlx4_zone_allocator_destroy() 349 uid = zone->uid; in __mlx4_alloc_from_zone() 421 mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr); in __mlx4_free_from_zone() 432 return zone; in __mlx4_find_zone_by_uid() 447 bitmap = zone == NULL ? NULL : zone->bitmap; in mlx4_zone_get_bitmap() 463 if (NULL == zone) { in mlx4_zone_remove_one() 472 kfree(zone); in mlx4_zone_remove_one() [all …]
|
| /linux/include/trace/events/ |
| A D | compaction.h | 194 TP_PROTO(struct zone *zone, 223 TP_PROTO(struct zone *zone, 232 TP_PROTO(struct zone *zone, 241 TP_PROTO(struct zone *zone, int order), 243 TP_ARGS(zone, order), 274 TP_PROTO(struct zone *zone, int order), 276 TP_ARGS(zone, order) 281 TP_PROTO(struct zone *zone, int order), 283 TP_ARGS(zone, order) 288 TP_PROTO(struct zone *zone, int order), [all …]
|
| /linux/drivers/thermal/tegra/ |
| A D | tegra-bpmp-thermal.c | 43 req.get_temp.zone = zone->idx; in __tegra_bpmp_thermal_get_temp() 81 req.set_trip.zone = zone->idx; in tegra_bpmp_thermal_set_trips() 102 struct tegra_bpmp_thermal_zone *zone; in tz_device_update_work_fn() local 137 req.host_trip_reached.zone); in bpmp_mrq_thermal() 256 zone = devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL); in tegra_bpmp_thermal_probe() 257 if (!zone) in tegra_bpmp_thermal_probe() 260 zone->idx = i; in tegra_bpmp_thermal_probe() 261 zone->tegra = tegra; in tegra_bpmp_thermal_probe() 270 devm_kfree(&pdev->dev, zone); in tegra_bpmp_thermal_probe() 279 devm_kfree(&pdev->dev, zone); in tegra_bpmp_thermal_probe() [all …]
|