/block/ |
D | blk-stat.c | 22 stat->min = -1ULL; in blk_rq_stat_init() 33 dst->min = min(dst->min, src->min); in blk_rq_stat_sum() 44 stat->min = min(stat->min, value); in blk_rq_stat_add()
|
D | blk-settings.c | 207 max_sectors = min(max_sectors, limits->max_user_sectors); in blk_queue_max_hw_sectors() 209 max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS); in blk_queue_max_hw_sectors() 290 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); in blk_queue_max_zone_append_sectors() 291 max_sectors = min(q->limits.chunk_sectors, max_sectors); in blk_queue_max_zone_append_sectors() 487 void blk_limits_io_min(struct queue_limits *limits, unsigned int min) in blk_limits_io_min() argument 489 limits->io_min = min; in blk_limits_io_min() 513 void blk_queue_io_min(struct request_queue *q, unsigned int min) in blk_queue_io_min() argument 515 blk_limits_io_min(&q->limits, min); in blk_queue_io_min() 632 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, in blk_stack_limits() 634 t->max_zone_append_sectors = min(t->max_zone_append_sectors, in blk_stack_limits() [all …]
|
D | early-lookup.c | 199 unsigned maj, min, offset; in devt_from_devnum() local 202 if (sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2 || in devt_from_devnum() 203 sscanf(name, "%u:%u:%u:%c", &maj, &min, &offset, &dummy) == 3) { in devt_from_devnum() 204 *devt = MKDEV(maj, min); in devt_from_devnum() 205 if (maj != MAJOR(*devt) || min != MINOR(*devt)) in devt_from_devnum()
|
D | blk-merge.c | 122 min(lim->max_discard_sectors, bio_allowed_max_sectors(lim)); in bio_split_discard() 177 max_sectors = min(max_sectors, in get_max_io_size() 208 return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1; in get_max_segment_size() 235 unsigned max_len = min(max_bytes, UINT_MAX) - *bytes; in bvec_split_segs() 236 unsigned len = min(bv->bv_len, max_len); in bvec_split_segs() 243 seg_size = min(seg_size, len); in bvec_split_segs() 472 unsigned len = min(get_max_segment_size(&q->limits, in blk_bvec_map_sg() 608 return min(max_sectors, in blk_rq_get_max_sectors()
|
D | blk-lib.c | 65 min(nr_sects, bio_discard_limit(bdev, sector)); in __blkdev_issue_discard() 165 return min(pages, (sector_t)BIO_MAX_VECS); in __blkdev_sectors_to_bio_pages() 185 sz = min((sector_t) PAGE_SIZE, nr_sects << 9); in __blkdev_issue_zero_pages()
|
D | blk-throttle.c | 172 ret = min(tg->bps[rw][LIMIT_MAX], adjusted); in tg_bps_limit() 1023 min_wait = min(read_wait, write_wait); in tg_update_disptime() 1346 this_tg->idletime_threshold = min(this_tg->idletime_threshold, in tg_conf_updated() 1640 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW], in tg_set_limit() 1642 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], in tg_set_limit() 1644 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW], in tg_set_limit() 1646 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], in tg_set_limit() 1773 return min(rtime, wtime); in __tg_last_low_overflow_time()
|
D | ioprio.c | 207 return min(aprio, bprio); in ioprio_best()
|
D | blk-iocost.c | 386 s64 min; member 759 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate; in ioc_refresh_margins() 1016 vrate = min(vrate, vrate_min); in ioc_adjust_base_vrate() 1449 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt); in iocg_pay_debt() 1756 time_after64(vtime, now->vnow - ioc->margins.min)) in hweight_after_donation() 1949 iocg->hweight_donating = min(iocg->hweight_donating, in transfer_surpluses() 1951 iocg->hweight_after_donation = min(iocg->hweight_after_donation, in transfer_surpluses() 2408 ioc->busy_level = min(ioc->busy_level, 0); in ioc_timer_fn()
|
D | blk-wbt.c | 345 if (stat[READ].min > rwb->min_lat_nsec) { in latency_exceeded() 346 trace_wbt_lat(bdi, stat[READ].min); in latency_exceeded()
|
D | bsg.c | 138 return put_user(min(bd->reserved_size, queue_max_bytes(q)), in bsg_ioctl()
|
D | blk-rq-qos.c | 140 depth = 1 + ((depth - 1) >> min(31, rqd->scale_step)); in rq_depth_calc_max_depth()
|
D | bio.c | 990 min(bio->bi_max_vecs, queue_max_segments(q))) in bio_add_hw_page() 1184 size = min(size, max_sectors << SECTOR_SHIFT); in bio_iov_bvec_set() 1411 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); in bio_copy_data_iter()
|
D | blk-crypto-profile.c | 525 min(parent->max_dun_bytes_supported, in blk_crypto_intersect_capabilities()
|
D | bio-integrity.c | 144 min(bip->bip_max_vcnt, queue_max_integrity_segments(q))) in bio_integrity_add_page()
|
D | bsg-lib.c | 116 int len = min(hdr->max_response_len, job->reply_len); in bsg_transport_sg_io_fn()
|
D | blk.h | 201 return min(q->limits.max_discard_sectors, in blk_queue_get_max_sectors()
|
D | bfq-iosched.c | 2856 int proc_ref = min(bfqq_process_refs(bfqq), in bfq_setup_stable_merge() 4018 budget = min(budget * 2, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget() 4033 budget = min(budget * 2, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget() 4045 budget = min(budget * 4, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget() 4099 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
|
D | blk-sysfs.c | 243 max_sectors_kb = min(max_hw_sectors_kb, in queue_max_sectors_store()
|
D | blk-iolatency.c | 388 old = min(old, qd); in scale_change()
|
D | sed-opal.c | 1337 memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len)); in gen_key() 1436 len = min(remaining_size(dev) - (2+1+CMD_FINALIZE_BYTES_NEEDED), in generic_table_write_data() 2406 len = min(max_read_size, (size_t)(read_size - off)); in read_table_data()
|
D | bfq-wf2q.c | 842 unsigned long bounded_time_ms = min(time_ms, timeout_ms); in bfq_bfqq_charge_time()
|
D | blk-mq.c | 920 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); in blk_update_request() 3461 to_do = min(entries_per_page, depth - i); in blk_mq_alloc_rqs() 4511 set->queue_depth = min(64U, set->queue_depth); in blk_mq_alloc_tag_set()
|
/block/partitions/ |
D | efi.c | 748 label_max = min(ARRAY_SIZE(info->volname) - 1, in efi_partition()
|
D | msdos.c | 678 n = min(size, max(sector_size, n)); in msdos_partition()
|