| /drivers/media/i2c/ |
| A D | ccs-pll.c | 166 lim_fr = &lim->op_fr; in check_fr_bounds() 169 lim_fr = &lim->vt_fr; in check_fr_bounds() 209 lim_bk = &lim->op_bk; in check_bk_bounds() 212 lim_bk = &lim->vt_bk; in check_bk_bounds() 489 if (lim->min_line_length_pck_bin > lim->min_line_length_pck in ccs_pll_calculate_vt() 530 max_vt_div = lim->vt_bk.max_sys_clk_div * lim->vt_bk.max_pix_clk_div; in ccs_pll_calculate_vt() 753 op_lim_fr = &lim->op_fr; in ccs_pll_calculate() 754 op_lim_bk = &lim->op_bk; in ccs_pll_calculate() 763 op_lim_fr = &lim->vt_fr; in ccs_pll_calculate() 764 op_lim_bk = &lim->vt_bk; in ccs_pll_calculate() [all …]
|
| /drivers/md/ |
| A D | dm-zone.c | 266 struct queue_limits *lim; member 319 zlim->lim->max_active_zones = in device_get_zone_resource_limits() 325 zlim->lim->max_open_zones = in device_get_zone_resource_limits() 339 struct queue_limits *lim) in dm_set_zones_restrictions() argument 345 .lim = lim, in dm_set_zones_restrictions() 358 lim->max_hw_zone_append_sectors = lim->max_zone_append_sectors; in dm_set_zones_restrictions() 390 lim->max_open_zones = 0; in dm_set_zones_restrictions() 391 lim->max_active_zones = 0; in dm_set_zones_restrictions() 393 lim->max_zone_append_sectors = 0; in dm_set_zones_restrictions() 394 lim->zone_write_granularity = 0; in dm_set_zones_restrictions() [all …]
|
| A D | md-linear.c | 70 struct queue_limits lim; in linear_set_limits() local 73 md_init_stacking_limits(&lim); in linear_set_limits() 74 lim.max_hw_sectors = mddev->chunk_sectors; in linear_set_limits() 75 lim.max_write_zeroes_sectors = mddev->chunk_sectors; in linear_set_limits() 76 lim.io_min = mddev->chunk_sectors << 9; in linear_set_limits() 77 err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); in linear_set_limits() 81 return queue_limits_set(mddev->gendisk->queue, &lim); in linear_set_limits()
|
| A D | raid0.c | 379 struct queue_limits lim; in raid0_set_limits() local 382 md_init_stacking_limits(&lim); in raid0_set_limits() 383 lim.max_hw_sectors = mddev->chunk_sectors; in raid0_set_limits() 384 lim.max_write_zeroes_sectors = mddev->chunk_sectors; in raid0_set_limits() 385 lim.io_min = mddev->chunk_sectors << 9; in raid0_set_limits() 386 lim.io_opt = lim.io_min * mddev->raid_disks; in raid0_set_limits() 387 lim.chunk_sectors = mddev->chunk_sectors; in raid0_set_limits() 388 lim.features |= BLK_FEAT_ATOMIC_WRITES; in raid0_set_limits() 389 err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); in raid0_set_limits() 392 return queue_limits_set(mddev->gendisk->queue, &lim); in raid0_set_limits()
|
| /drivers/mmc/core/ |
| A D | queue.c | 178 struct queue_limits *lim) in mmc_queue_setup_discard() argument 186 lim->max_hw_discard_sectors = max_discard; in mmc_queue_setup_discard() 188 lim->max_secure_erase_sectors = max_discard; in mmc_queue_setup_discard() 194 lim->discard_granularity = SECTOR_SIZE; in mmc_queue_setup_discard() 350 struct queue_limits lim = { in mmc_alloc_disk() local 356 mmc_queue_setup_discard(card, &lim); in mmc_alloc_disk() 363 lim.logical_block_size = 512; in mmc_alloc_disk() 366 lim.logical_block_size != 4096); in mmc_alloc_disk() 376 lim.max_segment_size = in mmc_alloc_disk() 378 lim.max_segments = host->max_segs; in mmc_alloc_disk() [all …]
|
| /drivers/block/ |
| A D | virtio_blk.c | 718 struct queue_limits *lim) in virtblk_read_zoned_limits() argument 729 lim->max_open_zones = v; in virtblk_read_zoned_limits() 734 lim->max_active_zones = v; in virtblk_read_zoned_limits() 744 lim->io_min = wg; in virtblk_read_zoned_limits() 793 struct queue_limits *lim) in virtblk_read_zoned_limits() argument 1092 struct queue_limits lim; in cache_type_store() local 1239 struct queue_limits *lim) in virtblk_read_limits() argument 1297 lim->alignment_offset = in virtblk_read_limits() 1304 lim->io_min = lim->logical_block_size * min_io_size; in virtblk_read_limits() 1310 lim->io_opt = lim->logical_block_size * opt_io_size; in virtblk_read_limits() [all …]
|
| A D | loop.c | 219 lim.max_write_zeroes_sectors = 0; in loop_clear_limits() 222 lim.max_hw_discard_sectors = 0; in loop_clear_limits() 223 lim.discard_granularity = 0; in loop_clear_limits() 955 lim->logical_block_size = bsize; in loop_update_limits() 957 lim->io_min = bsize; in loop_update_limits() 968 lim->discard_granularity = 0; in loop_update_limits() 976 struct queue_limits lim; in loop_configure() local 1105 struct queue_limits lim; in __loop_clr_fd() local 1128 lim.io_min = SECTOR_SIZE; in __loop_clr_fd() 1437 struct queue_limits lim; in loop_set_block_size() local [all …]
|
| A D | xen-blkfront.c | 948 struct queue_limits *lim) in blkif_set_queue_limits() argument 954 lim->max_hw_discard_sectors = UINT_MAX; in blkif_set_queue_limits() 963 lim->features |= BLK_FEAT_WRITE_CACHE; in blkif_set_queue_limits() 965 lim->features |= BLK_FEAT_FUA; in blkif_set_queue_limits() 974 lim->seg_boundary_mask = PAGE_SIZE - 1; in blkif_set_queue_limits() 975 lim->max_segment_size = PAGE_SIZE; in blkif_set_queue_limits() 981 lim->dma_alignment = 511; in blkif_set_queue_limits() 1075 struct queue_limits lim = {}; in xlvbd_alloc_gendisk() local 1141 blkif_set_queue_limits(info, &lim); in xlvbd_alloc_gendisk() 2008 struct queue_limits lim; in blkif_recover() local [all …]
|
| /drivers/usb/storage/ |
| A D | scsiglue.c | 91 static int sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) in sdev_configure() argument 106 lim->max_hw_sectors = min(lim->max_hw_sectors, max_sectors); in sdev_configure() 113 lim->max_hw_sectors = 0x7FFFFF; in sdev_configure() 119 lim->max_hw_sectors = 2048; in sdev_configure() 126 lim->max_hw_sectors = min_t(size_t, in sdev_configure() 127 lim->max_hw_sectors, dma_max_mapping_size(dev) >> SECTOR_SHIFT); in sdev_configure() 588 struct queue_limits lim; in max_sectors_store() local 595 lim = queue_limits_start_update(sdev->request_queue); in max_sectors_store() 596 lim.max_hw_sectors = ms; in max_sectors_store() 597 ret = queue_limits_commit_update_frozen(sdev->request_queue, &lim); in max_sectors_store()
|
| /drivers/pps/generators/ |
| A D | pps_gen_parport.c | 60 long lim, delta; in hrtimer_event() local 78 lim = NSEC_PER_SEC - send_delay - dev->port_write_time; in hrtimer_event() 81 if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) { in hrtimer_event() 91 } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); in hrtimer_event() 98 lim = NSEC_PER_SEC - dev->port_write_time; in hrtimer_event() 101 } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); in hrtimer_event()
|
| /drivers/scsi/ |
| A D | sd.c | 108 struct queue_limits *lim); 125 struct queue_limits *lim) in sd_set_flush_flag() argument 173 struct queue_limits lim; in cache_type_store() local 181 &lim); in cache_type_store() 470 struct queue_limits lim; in provisioning_mode_store() local 570 struct queue_limits lim; in max_write_same_blocks_store() local 1087 struct queue_limits *lim) in sd_config_write_same() argument 2578 struct queue_limits *lim) in sd_config_protection() argument 3699 struct queue_limits lim; in sd_revalidate_disk() local 3783 lim.io_min = 0; in sd_revalidate_disk() [all …]
|
| A D | sun3x_esp.c | 87 int lim; in sun3x_esp_dma_drain() local 95 lim = 1000; in sun3x_esp_dma_drain() 97 if (--lim == 0) { in sun3x_esp_dma_drain() 109 int lim; in sun3x_esp_dma_invalidate() local 111 lim = 1000; in sun3x_esp_dma_invalidate() 113 if (--lim == 0) { in sun3x_esp_dma_invalidate()
|
| A D | sun_esp.c | 225 int can_do_sbus64, lim; in sbus_esp_reset_dma() local 265 lim = 1000; in sbus_esp_reset_dma() 267 if (--lim == 0) { in sbus_esp_reset_dma() 324 int lim; in sbus_esp_dma_drain() local 336 lim = 1000; in sbus_esp_dma_drain() 338 if (--lim == 0) { in sbus_esp_dma_drain() 366 int lim; in sbus_esp_dma_invalidate() local 368 lim = 1000; in sbus_esp_dma_invalidate() 370 if (--lim == 0) { in sbus_esp_dma_invalidate()
|
| A D | sd_zbc.c | 595 int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, in sd_zbc_read_zones() argument 605 lim->features |= BLK_FEAT_ZONED; in sd_zbc_read_zones() 612 lim->zone_write_granularity = sdkp->physical_block_size; in sd_zbc_read_zones() 635 lim->max_open_zones = 0; in sd_zbc_read_zones() 637 lim->max_open_zones = sdkp->zones_max_open; in sd_zbc_read_zones() 638 lim->max_active_zones = 0; in sd_zbc_read_zones() 639 lim->chunk_sectors = logical_to_sectors(sdkp->device, zone_blocks); in sd_zbc_read_zones()
|
| A D | sd_dif.c | 27 void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim) in sd_dif_config_host() argument 31 struct blk_integrity *bi = &lim->integrity; in sd_dif_config_host()
|
| A D | am53c974.c | 142 int lim = 1000; in pci_esp_dma_drain() local 150 while (--lim > 0) { in pci_esp_dma_drain() 164 lim = 1000; in pci_esp_dma_drain() 167 if (--lim == 0) in pci_esp_dma_drain() 172 esp_dma_log("DMA blast done (%d tries, %d bytes left)\n", lim, resid); in pci_esp_dma_drain()
|
| A D | sd.h | 231 void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim); 235 int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, 248 struct queue_limits *lim, u8 buf[SD_BUF_SIZE]) in sd_zbc_read_zones() argument
|
| /drivers/nvme/host/ |
| A D | core.c | 1893 lim->discard_granularity = lim->logical_block_size; in nvme_config_discard() 2068 struct queue_limits *lim) in nvme_set_ctrl_limits() argument 2076 lim->dma_alignment = 3; in nvme_set_ctrl_limits() 2115 lim->io_min = phys_bs; in nvme_update_disk_info() 2116 lim->io_opt = io_opt; in nvme_update_disk_info() 2165 lim->chunk_sectors = iob; in nvme_set_chunk_sectors() 2171 struct queue_limits lim; in nvme_update_ns_info_generic() local 2334 struct queue_limits lim; in nvme_update_ns_info_block() local 2422 lim.max_hw_wzeroes_unmap_sectors = lim.max_write_zeroes_sectors; in nvme_update_ns_info_block() 2489 struct queue_limits lim; in nvme_update_ns_info() local [all …]
|
| A D | zns.c | 108 void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, in nvme_update_zone_info() argument 111 lim->features |= BLK_FEAT_ZONED; in nvme_update_zone_info() 112 lim->max_open_zones = zi->max_open_zones; in nvme_update_zone_info() 113 lim->max_active_zones = zi->max_active_zones; in nvme_update_zone_info() 114 lim->max_hw_zone_append_sectors = ns->ctrl->max_zone_append; in nvme_update_zone_info() 115 lim->chunk_sectors = ns->head->zsze = in nvme_update_zone_info()
|
| /drivers/edac/ |
| A D | amd64_edac.h | 136 #define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7)) 137 #define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7)) 289 struct reg_pair lim; member 422 u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff; in get_dram_limit() local 425 return lim; in get_dram_limit() 427 return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim; in get_dram_limit()
|
| /drivers/regulator/ |
| A D | qcom-labibb-regulator.c | 310 static int qcom_labibb_set_ocp(struct regulator_dev *rdev, int lim, in qcom_labibb_set_ocp() argument 322 if (lim || severity != REGULATOR_SEVERITY_PROT || !enable) in qcom_labibb_set_ocp() 565 struct labibb_current_limits *lim = &vreg->uA_limits; in qcom_labibb_set_current_limit() local 569 if (min_uA < lim->uA_min || max_uA < lim->uA_min) in qcom_labibb_set_current_limit() 573 int uA_limit = (lim->uA_step * i) + lim->uA_min; in qcom_labibb_set_current_limit() 587 mask = desc->csel_mask | lim->ovr_val; in qcom_labibb_set_current_limit() 589 val = (u32)sel | lim->ovr_val; in qcom_labibb_set_current_limit() 599 struct labibb_current_limits *lim = &vreg->uA_limits; in qcom_labibb_get_current_limit() local 608 return (cur_step * lim->uA_step) + lim->uA_min; in qcom_labibb_get_current_limit()
|
| A D | rt5759-regulator.c | 147 static int rt5759_set_otp(struct regulator_dev *rdev, int lim, int severity, in rt5759_set_otp() argument 161 if (lim == 0) in rt5759_set_otp() 162 lim = 150; in rt5759_set_otp() 165 if (lim <= otp_lvl[i]) in rt5759_set_otp()
|
| /drivers/media/usb/pvrusb2/ |
| A D | pvrusb2-ctrl.c | 22 int lim; in pvr2_ctrl_range_check() local 23 lim = cptr->info->def.type_int.min_value; in pvr2_ctrl_range_check() 25 cptr->info->get_min_value(cptr,&lim); in pvr2_ctrl_range_check() 27 if (val < lim) return -ERANGE; in pvr2_ctrl_range_check() 28 lim = cptr->info->def.type_int.max_value; in pvr2_ctrl_range_check() 30 cptr->info->get_max_value(cptr,&lim); in pvr2_ctrl_range_check() 32 if (val > lim) return -ERANGE; in pvr2_ctrl_range_check()
|
| /drivers/block/aoe/ |
| A D | aoedev.c | 258 size_t lim; in user_req() local 263 lim = sizeof(d->gd->disk_name); in user_req() 264 lim -= p - d->gd->disk_name; in user_req() 265 if (slen < lim) in user_req() 266 lim = slen; in user_req() 268 return !strncmp(s, p, lim); in user_req()
|
| /drivers/firmware/broadcom/ |
| A D | bcm47xx_nvram.c | 138 int bcm47xx_nvram_init_from_mem(u32 base, u32 lim) in bcm47xx_nvram_init_from_mem() argument 143 iobase = ioremap(base, lim); in bcm47xx_nvram_init_from_mem() 147 err = bcm47xx_nvram_find_and_copy(iobase, lim); in bcm47xx_nvram_init_from_mem()
|