| /drivers/acpi/acpica/ |
| A D | utdelete.c | 368 u16 new_count = 0; in acpi_ut_update_ref_count() local 390 new_count = original_count + 1; in acpi_ut_update_ref_count() 391 object->common.reference_count = new_count; in acpi_ut_update_ref_count() 406 new_count)); in acpi_ut_update_ref_count() 415 new_count = original_count - 1; in acpi_ut_update_ref_count() 416 object->common.reference_count = new_count; in acpi_ut_update_ref_count() 431 object->common.type, new_count)); in acpi_ut_update_ref_count() 435 if (new_count == 0) { in acpi_ut_update_ref_count() 453 if (new_count > ACPI_MAX_REFERENCE_COUNT) { in acpi_ut_update_ref_count() 456 new_count, object, object->common.type, message)); in acpi_ut_update_ref_count()
|
| A D | nsrepair.c | 447 u32 new_count; in acpi_ns_remove_null_elements() local 476 new_count = count; in acpi_ns_remove_null_elements() 485 new_count--; in acpi_ns_remove_null_elements() 496 if (new_count < count) { in acpi_ns_remove_null_elements() 499 info->full_pathname, (count - new_count))); in acpi_ns_remove_null_elements() 504 obj_desc->package.count = new_count; in acpi_ns_remove_null_elements()
|
| A D | nsrepair2.c | 922 u32 new_count; in acpi_ns_remove_element() local 928 new_count = count - 1; in acpi_ns_remove_element() 950 obj_desc->package.count = new_count; in acpi_ns_remove_element()
|
| /drivers/perf/ |
| A D | marvell_cn10k_ddr_pmu.c | 595 u64 prev_count, new_count, mask; in cn10k_ddr_perf_event_update() local 599 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); in cn10k_ddr_perf_event_update() 600 } while (local64_xchg(&hwc->prev_count, new_count) != prev_count); in cn10k_ddr_perf_event_update() 604 local64_add((new_count - prev_count) & mask, &event->count); in cn10k_ddr_perf_event_update() 856 u64 prev_count, new_count; in cn10k_ddr_pmu_overflow_handler() local 864 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); in cn10k_ddr_pmu_overflow_handler() 869 if (new_count < prev_count) in cn10k_ddr_pmu_overflow_handler() 877 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); in cn10k_ddr_pmu_overflow_handler() 882 if (new_count < prev_count) in cn10k_ddr_pmu_overflow_handler()
|
| A D | marvell_pem_pmu.c | 238 u64 prev_count, new_count; in pem_perf_event_update() local 242 new_count = pem_perf_read_counter(pmu, event, hwc->idx); in pem_perf_event_update() 243 } while (local64_xchg(&hwc->prev_count, new_count) != prev_count); in pem_perf_event_update() 245 local64_add((new_count - prev_count), &event->count); in pem_perf_event_update()
|
| A D | arm_dmc620_pmu.c | 327 u64 delta, prev_count, new_count; in dmc620_pmu_event_update() local 332 new_count = dmc620_pmu_read_counter(event); in dmc620_pmu_event_update() 334 prev_count, new_count) != prev_count); in dmc620_pmu_event_update() 335 delta = (new_count - prev_count) & DMC620_CNT_MAX_PERIOD; in dmc620_pmu_event_update()
|
| A D | arm_dsu_pmu.c | 334 u64 delta, prev_count, new_count; in dsu_pmu_event_update() local 339 new_count = dsu_pmu_read_counter(event); in dsu_pmu_event_update() 340 } while (local64_cmpxchg(&hwc->prev_count, prev_count, new_count) != in dsu_pmu_event_update() 342 delta = (new_count - prev_count) & DSU_PMU_COUNTER_MASK(hwc->idx); in dsu_pmu_event_update()
|
| A D | arm-ccn.c | 863 u64 prev_count, new_count, mask; in arm_ccn_pmu_event_update() local 867 new_count = arm_ccn_pmu_read_counter(ccn, hw->idx); in arm_ccn_pmu_event_update() 868 } while (local64_xchg(&hw->prev_count, new_count) != prev_count); in arm_ccn_pmu_event_update() 872 local64_add((new_count - prev_count) & mask, &event->count); in arm_ccn_pmu_event_update()
|
| /drivers/net/ethernet/mellanox/mlxsw/ |
| A D | spectrum_acl_ctcam.c | 122 unsigned long new_count) in mlxsw_sp_acl_ctcam_region_parman_resize() argument 130 if (new_count > max_tcam_rules) in mlxsw_sp_acl_ctcam_region_parman_resize() 132 return mlxsw_sp_acl_ctcam_region_resize(mlxsw_sp, region, new_count); in mlxsw_sp_acl_ctcam_region_parman_resize()
|
| A D | spectrum1_mr_tcam.c | 199 unsigned long new_count) in mlxsw_sp1_mr_tcam_region_parman_resize() argument 207 if (new_count > max_tcam_rules) in mlxsw_sp1_mr_tcam_region_parman_resize() 210 mr_tcam_region->rtar_key_type, new_count); in mlxsw_sp1_mr_tcam_region_parman_resize()
|
| /drivers/vfio/pci/hisilicon/ |
| A D | hisi_acc_vfio_pci.c | 1255 size_t *new_count) in hisi_acc_pci_rw_access_check() argument 1269 *new_count = min(count, (size_t)(end - pos)); in hisi_acc_pci_rw_access_check() 1303 size_t new_count = count; in hisi_acc_vfio_pci_write() local 1306 ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count); in hisi_acc_vfio_pci_write() 1310 return vfio_pci_core_write(core_vdev, buf, new_count, ppos); in hisi_acc_vfio_pci_write() 1317 size_t new_count = count; in hisi_acc_vfio_pci_read() local 1320 ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count); in hisi_acc_vfio_pci_read() 1324 return vfio_pci_core_read(core_vdev, buf, new_count, ppos); in hisi_acc_vfio_pci_read()
|
| /drivers/gpu/drm/msm/disp/dpu1/ |
| A D | dpu_trace.h | 556 TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count, 558 TP_ARGS(drm_id, pp, new_count, event), 562 __field( int, new_count ) 568 __entry->new_count = new_count; 572 __entry->pp, __entry->new_count, __entry->event)
|
| /drivers/iommu/intel/ |
| A D | perfmon.c | 305 u64 prev_count, new_count, delta; in iommu_pmu_event_update() local 310 new_count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx)); in iommu_pmu_event_update() 311 if (local64_xchg(&hwc->prev_count, new_count) != prev_count) in iommu_pmu_event_update() 318 delta = (new_count << shift) - (prev_count << shift); in iommu_pmu_event_update()
|
| /drivers/md/ |
| A D | dm-thin-metadata.c | 2028 static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count) in __resize_space_map() argument 2037 if (new_count == old_count) in __resize_space_map() 2040 if (new_count < old_count) { in __resize_space_map() 2045 return dm_sm_extend(sm, new_count - old_count); in __resize_space_map() 2048 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) in dm_pool_resize_data_dev() argument 2054 r = __resize_space_map(pmd->data_sm, new_count); in dm_pool_resize_data_dev() 2060 int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) in dm_pool_resize_metadata_dev() argument 2066 r = __resize_space_map(pmd->metadata_sm, new_count); in dm_pool_resize_metadata_dev()
|
| /drivers/net/ethernet/microsoft/mana/ |
| A D | mana_ethtool.c | 395 unsigned int new_count = channels->combined_count; in mana_set_channels() local 399 err = mana_pre_alloc_rxbufs(apc, ndev->mtu, new_count); in mana_set_channels() 411 apc->num_queues = new_count; in mana_set_channels()
|
| /drivers/scsi/isci/ |
| A D | remote_node_context.c | 357 u32 new_count = rnc->suspend_count + 1; in sci_remote_node_context_tx_rx_suspended_state_enter() local 359 if (new_count == 0) in sci_remote_node_context_tx_rx_suspended_state_enter() 362 rnc->suspend_count = new_count; in sci_remote_node_context_tx_rx_suspended_state_enter()
|
| /drivers/char/ |
| A D | random.c | 1120 unsigned int new_count; in add_interrupt_randomness() local 1124 new_count = ++fast_pool->count; in add_interrupt_randomness() 1126 if (new_count & MIX_INFLIGHT) in add_interrupt_randomness() 1129 if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ)) in add_interrupt_randomness()
|
| /drivers/net/dsa/sja1105/ |
| A D | sja1105_static_config.c | 1931 int sja1105_table_resize(struct sja1105_table *table, size_t new_count) in sja1105_table_resize() argument 1936 if (new_count > table->ops->max_entry_count) in sja1105_table_resize() 1939 new_entries = kcalloc(new_count, entry_size, GFP_KERNEL); in sja1105_table_resize() 1943 memcpy(new_entries, old_entries, min(new_count, table->entry_count) * in sja1105_table_resize() 1947 table->entry_count = new_count; in sja1105_table_resize()
|
| A D | sja1105_static_config.h | 493 int sja1105_table_resize(struct sja1105_table *table, size_t new_count);
|
| /drivers/acpi/arm64/ |
| A D | iort.c | 932 u32 new_count) in iort_rmr_alloc_sids() argument 935 u32 total_count = count + new_count; in iort_rmr_alloc_sids() 938 new_sids = krealloc_array(sids, count + new_count, in iort_rmr_alloc_sids()
|
| /drivers/net/ethernet/freescale/dpaa2/ |
| A D | dpaa2-eth.c | 1784 int new_count; in dpaa2_eth_seed_pool() local 1787 new_count = dpaa2_eth_add_bufs(priv, ch); in dpaa2_eth_seed_pool() 1788 ch->buf_count += new_count; in dpaa2_eth_seed_pool() 1790 if (new_count < DPAA2_ETH_BUFS_PER_CMD) in dpaa2_eth_seed_pool() 1880 int new_count; in dpaa2_eth_refill_pool() local 1886 new_count = dpaa2_eth_add_bufs(priv, ch); in dpaa2_eth_refill_pool() 1887 if (unlikely(!new_count)) { in dpaa2_eth_refill_pool() 1891 ch->buf_count += new_count; in dpaa2_eth_refill_pool()
|
| A D | dpaa2-switch.c | 2627 int new_count; in dpaa2_switch_refill_bp() local 2632 new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); in dpaa2_switch_refill_bp() 2633 if (unlikely(!new_count)) { in dpaa2_switch_refill_bp() 2639 *count += new_count; in dpaa2_switch_refill_bp()
|
| /drivers/md/dm-vdo/ |
| A D | dm-vdo-target.c | 1532 unsigned int new_count = max(instances.bit_count + BIT_COUNT_INCREMENT, in grow_bit_array() local 1539 get_bit_array_size(new_count), in grow_bit_array() 1544 instances.bit_count = new_count; in grow_bit_array()
|
| A D | block-map.c | 1473 u32 new_count; in set_generation() local 1482 new_count = ++zone->dirty_page_counts[new_generation]; in set_generation() 1483 result = VDO_ASSERT((new_count != 0), "dirty page count overflow for generation %u", in set_generation()
|
| /drivers/net/ethernet/marvell/octeontx2/af/ |
| A D | rvu_nix.c | 6492 u16 prev_count, new_count; in rvu_mbox_handler_nix_mcast_grp_update() local 6543 new_count = prev_count + req->num_mce_entry; in rvu_mbox_handler_nix_mcast_grp_update() 6547 elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir); in rvu_mbox_handler_nix_mcast_grp_update() 6561 nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir); in rvu_mbox_handler_nix_mcast_grp_update() 6583 new_count = prev_count - req->num_mce_entry; in rvu_mbox_handler_nix_mcast_grp_update() 6584 elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir); in rvu_mbox_handler_nix_mcast_grp_update() 6587 nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir); in rvu_mbox_handler_nix_mcast_grp_update()
|