/drivers/md/ |
D | dm-stats.c | 192 atomic_read(&shared->in_flight[WRITE]); in dm_stat_in_flight() 229 atomic_read(&shared->in_flight[WRITE])); in dm_stats_cleanup() 318 atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0); in dm_stats_create() 505 in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]); in dm_stat_round() 509 p->io_ticks[WRITE] += difference; in dm_stat_round() 648 ((bi_rw == WRITE) == in dm_stats_account_io() 649 (ACCESS_ONCE(last->last_rw) == WRITE)) in dm_stats_account_io() 684 shared->tmp.sectors[WRITE] = 0; in __dm_stat_init_temporary_percpu_totals() 686 shared->tmp.ios[WRITE] = 0; in __dm_stat_init_temporary_percpu_totals() 688 shared->tmp.merges[WRITE] = 0; in __dm_stat_init_temporary_percpu_totals() [all …]
|
D | dm-flakey.c | 103 fc->corrupt_bio_rw = WRITE; in parse_features() 135 if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) { in parse_features() 273 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, in corrupt_bio_data() 314 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) { in flakey_map() 383 (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r', in flakey_status()
|
D | faulty.c | 118 case WritePersistent*2+WRITE: return 1; in check_sector() 121 case ReadFixable*2+WRITE: in check_sector() 125 case AllPersist*2+WRITE: return 1; in check_sector() 178 if (bio_data_dir(bio) == WRITE) { in faulty_make_request() 189 bio_end_sector(bio), WRITE)) in faulty_make_request()
|
D | dm-delay.c | 90 if ((bio_data_dir(bio) == WRITE)) in flush_delayed_bios() 250 if (bio_data_dir(bio) == WRITE) in delay_bio() 284 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { in delay_map()
|
D | dm-crypt.c | 587 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { in crypt_iv_lmk_gen() 603 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) in crypt_iv_lmk_post() 729 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { in crypt_iv_tcw_gen() 750 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) in crypt_iv_tcw_post() 874 if (bio_data_dir(ctx->bio_in) == WRITE) in crypt_convert_block() 1114 if (rw == WRITE) in crypt_endio() 1938 bio_data_dir(bio) == WRITE) in crypt_map()
|
D | dm-raid1.c | 127 bl = (rw == WRITE) ? &ms->writes : &ms->reads; in queue_bio() 143 queue_bio(ms, bio, WRITE); in dispatch_bios() 1202 if (rw == WRITE) { in mirror_map() 1252 if (rw == WRITE) { in mirror_end_io()
|
D | raid1.c | 275 (bio_data_dir(bio) == WRITE) ? "write" : "read", in raid_end_bio_io() 832 else if (conf->barrier && bio_data_dir(bio) == WRITE) { in need_to_wait_for_sync() 872 if (bio && bio_data_dir(bio) == WRITE) { in wait_barrier() 1064 if (bio_data_dir(bio) == WRITE && in raid1_make_request() 1068 md_cluster_ops->area_resyncing(mddev, WRITE, in raid1_make_request() 1082 !md_cluster_ops->area_resyncing(mddev, WRITE, in raid1_make_request() 1762 if (rw == WRITE) { in r1_sync_page_io() 1869 WRITE) == 0) { in fix_sync_read_error() 2108 conf->tmppage, WRITE); in fix_read_error()
|
D | dm-snap.c | 1700 bio_data_dir(bio) == WRITE)) { in snapshot_map() 1717 if (bio_data_dir(bio) == WRITE) { in snapshot_map() 1823 if (bio_data_dir(bio) == WRITE && in snapshot_merge_map() 1835 if (bio_data_dir(bio) == WRITE) in snapshot_merge_map() 1843 if (bio_data_dir(bio) == WRITE) { in snapshot_merge_map() 2292 if (bio_data_dir(bio) != WRITE) in origin_map()
|
D | dm-cache-target.c | 804 if (bio_data_dir(bio) == WRITE) in remap_to_origin_clear_discard() 813 if (bio_data_dir(bio) == WRITE) { in remap_to_cache_dirty() 1323 return (bio_data_dir(bio) == WRITE) && in bio_writes_complete_block() 1682 if (bio_data_dir(cell->holder) == WRITE) in inc_fn() 1691 if (bio_data_dir(bio) == WRITE) in inc_fn() 1834 if (bio_data_dir(bio) == WRITE) { in process_cell() 1847 if (bio_data_dir(bio) == WRITE && in process_cell() 3091 if (bio_data_dir(bio) == WRITE) { in cache_map() 3111 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && in cache_map()
|
/drivers/s390/net/ |
D | ctcm_sysfs.c | 94 priv->channel[WRITE]->prof.maxmulti); in ctcm_print_statistics() 96 priv->channel[WRITE]->prof.maxcqueue); in ctcm_print_statistics() 98 priv->channel[WRITE]->prof.doios_single); in ctcm_print_statistics() 100 priv->channel[WRITE]->prof.doios_multi); in ctcm_print_statistics() 102 priv->channel[WRITE]->prof.txlen); in ctcm_print_statistics() 104 jiffies_to_usecs(priv->channel[WRITE]->prof.tx_time)); in ctcm_print_statistics() 131 memset(&priv->channel[WRITE]->prof, 0, in stats_write()
|
/drivers/block/drbd/ |
D | drbd_req.c | 60 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) in drbd_req_new() 837 if (bio_data_dir(req->master_bio) == WRITE) in __req_mod() 1251 if (rw == WRITE && req->private_bio && req->i.size in drbd_request_prepare() 1291 if (rw == WRITE) { in drbd_send_and_submit() 1318 if (rw != WRITE) { in drbd_send_and_submit() 1329 if (rw == WRITE) in drbd_send_and_submit() 1335 if (rw == WRITE) { in drbd_send_and_submit() 1358 &device->pending_master_completion[rw == WRITE]); in drbd_send_and_submit() 1363 &device->pending_completion[rw == WRITE]); in drbd_send_and_submit() 1407 if (rw == WRITE /* rw != WRITE should not even end up here! */ in submit_fast_path()
|
/drivers/nvme/target/ |
D | admin-cmd.c | 51 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]); in nvmet_get_smart_log_nsid() 52 data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]); in nvmet_get_smart_log_nsid() 80 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]); in nvmet_get_smart_log_all() 82 part_stat_read(ns->bdev->bd_part, sectors[WRITE]); in nvmet_get_smart_log_all()
|
/drivers/s390/block/ |
D | dasd_fba.c | 95 if (rw == WRITE) in define_extent() 115 if (rw == WRITE) in locate_record() 269 } else if (rq_data_dir(req) == WRITE) { in dasd_fba_build_cp() 327 if (copy && rq_data_dir(req) == WRITE) in dasd_fba_build_cp()
|
D | scm_blk_cluster.c | 97 (rq_data_dir(req) == WRITE || in scm_reserve_cluster() 98 rq_data_dir(iter->request[pos]) == WRITE)) { in scm_reserve_cluster()
|
/drivers/block/ |
D | swim3.c | 362 if (rq_data_dir(req) == WRITE) { in start_request() 461 if (rq_data_dir(req) == WRITE) in setup_transfer() 478 if (rq_data_dir(req) == WRITE) { in setup_transfer() 493 if (rq_data_dir(req) == WRITE) in setup_transfer() 667 (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"), in xfer_timeout() 754 if (rq_data_dir(req) == WRITE) in swim3_interrupt() 787 rq_data_dir(req) == WRITE? "writ": "read", in swim3_interrupt()
|
D | nbd.c | 284 else if (rq_data_dir(req) == WRITE) in nbd_send_cmd() 402 if (rq_data_dir(req) != WRITE) { in nbd_read_stat() 502 if (rq_data_dir(req) == WRITE && in nbd_handle_cmd() 615 sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0); in __nbd_ioctl()
|
/drivers/scsi/device_handler/ |
D | scsi_dh_hp_sw.c | 112 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); in hp_sw_tur() 242 req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC); in hp_sw_start_stop()
|
/drivers/staging/lustre/lustre/llite/ |
D | rw26.c | 255 src_page = (rw == WRITE) ? pages[i] : vmpage; in ll_direct_rw_pages() 256 dst_page = (rw == WRITE) ? vmpage : pages[i]; in ll_direct_rw_pages() 267 if (rw == WRITE) in ll_direct_rw_pages()
|
/drivers/memstick/core/ |
D | memstick.c | 273 mrq->data_dir = WRITE; in memstick_init_req_sg() 303 mrq->data_dir = WRITE; in memstick_init_req() 308 if (mrq->data_dir == WRITE) in memstick_init_req()
|
/drivers/memstick/host/ |
D | tifm_ms.c | 219 t_size = host->req->data_dir == WRITE in tifm_ms_transfer_data() 236 if (!length && (host->req->data_dir == WRITE)) { in tifm_ms_transfer_data() 299 if (host->req->data_dir == WRITE) in tifm_ms_issue_cmd()
|
D | jmb38x_ms.c | 334 if (host->req->data_dir == WRITE) in jmb38x_ms_transfer_data() 355 if (!length && host->req->data_dir == WRITE) { in jmb38x_ms_transfer_data() 452 if (host->req->data_dir == WRITE) { in jmb38x_ms_issue_cmd()
|
/drivers/ide/ |
D | ide-floppy.c | 209 if (cmd == WRITE) in idefloppy_create_rw_cmd() 223 if (rq_data_dir(rq) == WRITE) in idefloppy_blockpc_cmd()
|
/drivers/staging/lustre/lnet/klnds/socklnd/ |
D | socklnd_lib.c | 93 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, in ksocknal_lib_send_iov() 141 iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, in ksocknal_lib_send_kiov()
|
/drivers/scsi/ |
D | eata_generic.h | 83 #define WRITE 1 macro
|
/drivers/net/ethernet/wiznet/ |
D | Kconfig | 81 In W5100 SPI mode, burst READ/WRITE processing are not provided.
|