Lines Matching refs:acb
348 QCowAIOCB *acb = container_of(blockacb, QCowAIOCB, common); in qcow_aio_cancel() local
349 if (acb->hd_aiocb) in qcow_aio_cancel()
350 bdrv_aio_cancel(acb->hd_aiocb); in qcow_aio_cancel()
351 qemu_aio_release(acb); in qcow_aio_cancel()
362 QCowAIOCB *acb = opaque; in qcow_aio_read_bh() local
363 qemu_bh_delete(acb->bh); in qcow_aio_read_bh()
364 acb->bh = NULL; in qcow_aio_read_bh()
368 static int qcow_schedule_bh(QEMUBHFunc *cb, QCowAIOCB *acb) in qcow_schedule_bh() argument
370 if (acb->bh) in qcow_schedule_bh()
373 acb->bh = qemu_bh_new(cb, acb); in qcow_schedule_bh()
374 if (!acb->bh) in qcow_schedule_bh()
377 qemu_bh_schedule(acb->bh); in qcow_schedule_bh()
384 QCowAIOCB *acb = opaque; in qcow_aio_read_cb() local
385 BlockDriverState *bs = acb->common.bs; in qcow_aio_read_cb()
389 acb->hd_aiocb = NULL; in qcow_aio_read_cb()
394 if (!acb->cluster_offset) { in qcow_aio_read_cb()
396 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) { in qcow_aio_read_cb()
400 qcow2_encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf, in qcow_aio_read_cb()
401 acb->cur_nr_sectors, 0, in qcow_aio_read_cb()
406 acb->remaining_sectors -= acb->cur_nr_sectors; in qcow_aio_read_cb()
407 acb->sector_num += acb->cur_nr_sectors; in qcow_aio_read_cb()
408 acb->buf += acb->cur_nr_sectors * 512; in qcow_aio_read_cb()
410 if (acb->remaining_sectors == 0) { in qcow_aio_read_cb()
417 acb->cur_nr_sectors = acb->remaining_sectors; in qcow_aio_read_cb()
418 ret = qcow2_get_cluster_offset(bs, acb->sector_num << 9, in qcow_aio_read_cb()
419 &acb->cur_nr_sectors, &acb->cluster_offset); in qcow_aio_read_cb()
424 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); in qcow_aio_read_cb()
426 if (!acb->cluster_offset) { in qcow_aio_read_cb()
429 n1 = qcow2_backing_read1(bs->backing_hd, acb->sector_num, in qcow_aio_read_cb()
430 acb->buf, acb->cur_nr_sectors); in qcow_aio_read_cb()
432 acb->hd_iov.iov_base = (void *)acb->buf; in qcow_aio_read_cb()
433 acb->hd_iov.iov_len = acb->cur_nr_sectors * 512; in qcow_aio_read_cb()
434 qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); in qcow_aio_read_cb()
436 acb->hd_aiocb = bdrv_aio_readv(bs->backing_hd, acb->sector_num, in qcow_aio_read_cb()
437 &acb->hd_qiov, acb->cur_nr_sectors, in qcow_aio_read_cb()
438 qcow_aio_read_cb, acb); in qcow_aio_read_cb()
439 if (acb->hd_aiocb == NULL) in qcow_aio_read_cb()
442 ret = qcow_schedule_bh(qcow_aio_read_bh, acb); in qcow_aio_read_cb()
448 memset(acb->buf, 0, 512 * acb->cur_nr_sectors); in qcow_aio_read_cb()
449 ret = qcow_schedule_bh(qcow_aio_read_bh, acb); in qcow_aio_read_cb()
453 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) { in qcow_aio_read_cb()
455 if (qcow2_decompress_cluster(bs, acb->cluster_offset) < 0) in qcow_aio_read_cb()
457 memcpy(acb->buf, s->cluster_cache + index_in_cluster * 512, in qcow_aio_read_cb()
458 512 * acb->cur_nr_sectors); in qcow_aio_read_cb()
459 ret = qcow_schedule_bh(qcow_aio_read_bh, acb); in qcow_aio_read_cb()
463 if ((acb->cluster_offset & 511) != 0) { in qcow_aio_read_cb()
468 acb->hd_iov.iov_base = (void *)acb->buf; in qcow_aio_read_cb()
469 acb->hd_iov.iov_len = acb->cur_nr_sectors * 512; in qcow_aio_read_cb()
470 qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); in qcow_aio_read_cb()
472 acb->hd_aiocb = bdrv_aio_readv(bs->file, in qcow_aio_read_cb()
473 (acb->cluster_offset >> 9) + index_in_cluster, in qcow_aio_read_cb()
474 &acb->hd_qiov, acb->cur_nr_sectors, in qcow_aio_read_cb()
475 qcow_aio_read_cb, acb); in qcow_aio_read_cb()
476 if (acb->hd_aiocb == NULL) { in qcow_aio_read_cb()
484 if (acb->qiov->niov > 1) { in qcow_aio_read_cb()
485 qemu_iovec_from_buffer(acb->qiov, acb->orig_buf, acb->qiov->size); in qcow_aio_read_cb()
486 qemu_vfree(acb->orig_buf); in qcow_aio_read_cb()
488 acb->common.cb(acb->common.opaque, ret); in qcow_aio_read_cb()
489 qemu_aio_release(acb); in qcow_aio_read_cb()
496 QCowAIOCB *acb; in qcow_aio_setup() local
498 acb = qemu_aio_get(&qcow_aio_pool, bs, cb, opaque); in qcow_aio_setup()
499 if (!acb) in qcow_aio_setup()
501 acb->hd_aiocb = NULL; in qcow_aio_setup()
502 acb->sector_num = sector_num; in qcow_aio_setup()
503 acb->qiov = qiov; in qcow_aio_setup()
505 acb->buf = acb->orig_buf = qemu_blockalign(bs, qiov->size); in qcow_aio_setup()
507 qemu_iovec_to_buffer(qiov, acb->buf); in qcow_aio_setup()
509 acb->buf = (uint8_t *)qiov->iov->iov_base; in qcow_aio_setup()
511 acb->remaining_sectors = nb_sectors; in qcow_aio_setup()
512 acb->cur_nr_sectors = 0; in qcow_aio_setup()
513 acb->cluster_offset = 0; in qcow_aio_setup()
514 acb->l2meta.nb_clusters = 0; in qcow_aio_setup()
515 QLIST_INIT(&acb->l2meta.dependent_requests); in qcow_aio_setup()
516 return acb; in qcow_aio_setup()
523 QCowAIOCB *acb; in qcow_aio_readv() local
525 acb = qcow_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); in qcow_aio_readv()
526 if (!acb) in qcow_aio_readv()
529 qcow_aio_read_cb(acb, 0); in qcow_aio_readv()
530 return &acb->common; in qcow_aio_readv()
556 QCowAIOCB *acb = opaque; in qcow_aio_write_cb() local
557 BlockDriverState *bs = acb->common.bs; in qcow_aio_write_cb()
563 acb->hd_aiocb = NULL; in qcow_aio_write_cb()
566 ret = qcow2_alloc_cluster_link_l2(bs, &acb->l2meta); in qcow_aio_write_cb()
569 run_dependent_requests(&acb->l2meta); in qcow_aio_write_cb()
574 acb->remaining_sectors -= acb->cur_nr_sectors; in qcow_aio_write_cb()
575 acb->sector_num += acb->cur_nr_sectors; in qcow_aio_write_cb()
576 acb->buf += acb->cur_nr_sectors * 512; in qcow_aio_write_cb()
578 if (acb->remaining_sectors == 0) { in qcow_aio_write_cb()
584 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); in qcow_aio_write_cb()
585 n_end = index_in_cluster + acb->remaining_sectors; in qcow_aio_write_cb()
590 ret = qcow2_alloc_cluster_offset(bs, acb->sector_num << 9, in qcow_aio_write_cb()
591 index_in_cluster, n_end, &acb->cur_nr_sectors, &acb->l2meta); in qcow_aio_write_cb()
596 acb->cluster_offset = acb->l2meta.cluster_offset; in qcow_aio_write_cb()
599 if (acb->l2meta.nb_clusters == 0 && acb->l2meta.depends_on != NULL) { in qcow_aio_write_cb()
600 QLIST_INSERT_HEAD(&acb->l2meta.depends_on->dependent_requests, in qcow_aio_write_cb()
601 acb, next_depend); in qcow_aio_write_cb()
605 assert((acb->cluster_offset & 511) == 0); in qcow_aio_write_cb()
608 if (!acb->cluster_data) { in qcow_aio_write_cb()
609 acb->cluster_data = qemu_mallocz(QCOW_MAX_CRYPT_CLUSTERS * in qcow_aio_write_cb()
612 qcow2_encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf, in qcow_aio_write_cb()
613 acb->cur_nr_sectors, 1, &s->aes_encrypt_key); in qcow_aio_write_cb()
614 src_buf = acb->cluster_data; in qcow_aio_write_cb()
616 src_buf = acb->buf; in qcow_aio_write_cb()
618 acb->hd_iov.iov_base = (void *)src_buf; in qcow_aio_write_cb()
619 acb->hd_iov.iov_len = acb->cur_nr_sectors * 512; in qcow_aio_write_cb()
620 qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); in qcow_aio_write_cb()
622 acb->hd_aiocb = bdrv_aio_writev(bs->file, in qcow_aio_write_cb()
623 (acb->cluster_offset >> 9) + index_in_cluster, in qcow_aio_write_cb()
624 &acb->hd_qiov, acb->cur_nr_sectors, in qcow_aio_write_cb()
625 qcow_aio_write_cb, acb); in qcow_aio_write_cb()
626 if (acb->hd_aiocb == NULL) { in qcow_aio_write_cb()
634 if (acb->l2meta.nb_clusters != 0) { in qcow_aio_write_cb()
635 QLIST_REMOVE(&acb->l2meta, next_in_flight); in qcow_aio_write_cb()
638 if (acb->qiov->niov > 1) in qcow_aio_write_cb()
639 qemu_vfree(acb->orig_buf); in qcow_aio_write_cb()
640 acb->common.cb(acb->common.opaque, ret); in qcow_aio_write_cb()
641 qemu_aio_release(acb); in qcow_aio_write_cb()
649 QCowAIOCB *acb; in qcow_aio_writev() local
653 acb = qcow_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); in qcow_aio_writev()
654 if (!acb) in qcow_aio_writev()
657 qcow_aio_write_cb(acb, 0); in qcow_aio_writev()
658 return &acb->common; in qcow_aio_writev()