Lines Matching +full:deep +full:- +full:touch
2 * Linux driver for VMware's para-virtualized SCSI HBA.
4 * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
58 * 1-to-1 mapping completions back to requests.
116 MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
123 MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
127 MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
131 MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
134 MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
137 MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
141 MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1…
153 return &(adapter->dev->dev); in pvscsi_dev()
161 end = &adapter->cmd_map[adapter->req_depth]; in pvscsi_find_context()
162 for (ctx = adapter->cmd_map; ctx < end; ctx++) in pvscsi_find_context()
163 if (ctx->cmd == cmd) in pvscsi_find_context()
174 if (list_empty(&adapter->cmd_pool)) in pvscsi_acquire_context()
177 ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list); in pvscsi_acquire_context()
178 ctx->cmd = cmd; in pvscsi_acquire_context()
179 list_del(&ctx->list); in pvscsi_acquire_context()
187 ctx->cmd = NULL; in pvscsi_release_context()
188 ctx->abort_cmp = NULL; in pvscsi_release_context()
189 list_add(&ctx->list, &adapter->cmd_pool); in pvscsi_release_context()
194 * non-zero integer. ctx always points to an entry in cmd_map array, hence
200 return ctx - adapter->cmd_map + 1; in pvscsi_map_context()
206 return &adapter->cmd_map[context - 1]; in pvscsi_get_context()
212 writel(val, adapter->mmioBase + offset); in pvscsi_reg_write()
217 return readl(adapter->mmioBase + offset); in pvscsi_reg_read()
236 if (adapter->use_msg) in pvscsi_unmask_intr()
265 cmd.target = ctx->cmd->device->id; in pvscsi_abort_cmd()
293 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_kick_io()
295 if (!adapter->use_req_threshold || in pvscsi_kick_io()
296 s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold) in pvscsi_kick_io()
337 sge = &ctx->sgl->sge[0]; in pvscsi_create_sg()
357 e->dataLen = bufflen; in pvscsi_map_buffers()
358 e->dataAddr = 0; in pvscsi_map_buffers()
367 if (segs == -ENOMEM) { in pvscsi_map_buffers()
370 return -ENOMEM; in pvscsi_map_buffers()
374 e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; in pvscsi_map_buffers()
375 ctx->sglPA = dma_map_single(&adapter->dev->dev, in pvscsi_map_buffers()
376 ctx->sgl, SGL_SIZE, DMA_TO_DEVICE); in pvscsi_map_buffers()
377 if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) { in pvscsi_map_buffers()
381 ctx->sglPA = 0; in pvscsi_map_buffers()
382 return -ENOMEM; in pvscsi_map_buffers()
384 e->dataAddr = ctx->sglPA; in pvscsi_map_buffers()
386 e->dataAddr = sg_dma_address(sg); in pvscsi_map_buffers()
392 ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen, in pvscsi_map_buffers()
393 cmd->sc_data_direction); in pvscsi_map_buffers()
394 if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) { in pvscsi_map_buffers()
397 return -ENOMEM; in pvscsi_map_buffers()
399 e->dataAddr = ctx->dataPA; in pvscsi_map_buffers()
412 if (cmd->sense_buffer) in pvscsi_patch_sense()
413 cmd->sense_buffer[0] = 0; in pvscsi_patch_sense()
422 cmd = ctx->cmd; in pvscsi_unmap_buffers()
430 if (ctx->sglPA) { in pvscsi_unmap_buffers()
431 dma_unmap_single(&adapter->dev->dev, ctx->sglPA, in pvscsi_unmap_buffers()
433 ctx->sglPA = 0; in pvscsi_unmap_buffers()
436 dma_unmap_single(&adapter->dev->dev, ctx->dataPA, in pvscsi_unmap_buffers()
437 bufflen, cmd->sc_data_direction); in pvscsi_unmap_buffers()
439 if (cmd->sense_buffer) in pvscsi_unmap_buffers()
440 dma_unmap_single(&adapter->dev->dev, ctx->sensePA, in pvscsi_unmap_buffers()
446 adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, in pvscsi_allocate_rings()
447 &adapter->ringStatePA, GFP_KERNEL); in pvscsi_allocate_rings()
448 if (!adapter->rings_state) in pvscsi_allocate_rings()
449 return -ENOMEM; in pvscsi_allocate_rings()
451 adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, in pvscsi_allocate_rings()
453 adapter->req_depth = adapter->req_pages in pvscsi_allocate_rings()
455 adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev, in pvscsi_allocate_rings()
456 adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA, in pvscsi_allocate_rings()
458 if (!adapter->req_ring) in pvscsi_allocate_rings()
459 return -ENOMEM; in pvscsi_allocate_rings()
461 adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, in pvscsi_allocate_rings()
463 adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev, in pvscsi_allocate_rings()
464 adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA, in pvscsi_allocate_rings()
466 if (!adapter->cmp_ring) in pvscsi_allocate_rings()
467 return -ENOMEM; in pvscsi_allocate_rings()
469 BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE)); in pvscsi_allocate_rings()
470 BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE)); in pvscsi_allocate_rings()
471 BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE)); in pvscsi_allocate_rings()
473 if (!adapter->use_msg) in pvscsi_allocate_rings()
476 adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, in pvscsi_allocate_rings()
478 adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev, in pvscsi_allocate_rings()
479 adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA, in pvscsi_allocate_rings()
481 if (!adapter->msg_ring) in pvscsi_allocate_rings()
482 return -ENOMEM; in pvscsi_allocate_rings()
483 BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); in pvscsi_allocate_rings()
494 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; in pvscsi_setup_all_rings()
495 cmd.reqRingNumPages = adapter->req_pages; in pvscsi_setup_all_rings()
496 cmd.cmpRingNumPages = adapter->cmp_pages; in pvscsi_setup_all_rings()
498 base = adapter->reqRingPA; in pvscsi_setup_all_rings()
499 for (i = 0; i < adapter->req_pages; i++) { in pvscsi_setup_all_rings()
504 base = adapter->cmpRingPA; in pvscsi_setup_all_rings()
505 for (i = 0; i < adapter->cmp_pages; i++) { in pvscsi_setup_all_rings()
510 memset(adapter->rings_state, 0, PAGE_SIZE); in pvscsi_setup_all_rings()
511 memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE); in pvscsi_setup_all_rings()
512 memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE); in pvscsi_setup_all_rings()
517 if (adapter->use_msg) { in pvscsi_setup_all_rings()
520 cmd_msg.numPages = adapter->msg_pages; in pvscsi_setup_all_rings()
522 base = adapter->msgRingPA; in pvscsi_setup_all_rings()
523 for (i = 0; i < adapter->msg_pages; i++) { in pvscsi_setup_all_rings()
527 memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE); in pvscsi_setup_all_rings()
536 if (!sdev->tagged_supported) in pvscsi_change_queue_depth()
551 u32 btstat = e->hostStatus; in pvscsi_complete_request()
552 u32 sdstat = e->scsiStatus; in pvscsi_complete_request()
554 ctx = pvscsi_get_context(adapter, e->context); in pvscsi_complete_request()
555 cmd = ctx->cmd; in pvscsi_complete_request()
556 abort_cmp = ctx->abort_cmp; in pvscsi_complete_request()
572 cmd->result = 0; in pvscsi_complete_request()
578 cmd->result = (DID_RESET << 16); in pvscsi_complete_request()
580 cmd->result = (DID_OK << 16) | sdstat; in pvscsi_complete_request()
582 cmd->sense_buffer) in pvscsi_complete_request()
583 cmd->result |= (DRIVER_SENSE << 24); in pvscsi_complete_request()
595 * returns zero dataLen with non zero data - do not set in pvscsi_complete_request()
598 if (e->dataLen && (e->dataLen < scsi_bufflen(cmd))) in pvscsi_complete_request()
599 scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); in pvscsi_complete_request()
600 cmd->result = (DID_OK << 16); in pvscsi_complete_request()
606 scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); in pvscsi_complete_request()
607 cmd->result = (DID_ERROR << 16); in pvscsi_complete_request()
611 /* Our emulation returns this for non-connected devs */ in pvscsi_complete_request()
612 cmd->result = (DID_BAD_TARGET << 16); in pvscsi_complete_request()
618 cmd->result = (DRIVER_INVALID << 24); in pvscsi_complete_request()
629 cmd->result |= (DID_ERROR << 16); in pvscsi_complete_request()
635 cmd->result = (DID_RESET << 16); in pvscsi_complete_request()
639 cmd->result = (DID_BUS_BUSY << 16); in pvscsi_complete_request()
643 cmd->result = (DID_PARITY << 16); in pvscsi_complete_request()
647 cmd->result = (DID_ERROR << 16); in pvscsi_complete_request()
653 dev_dbg(&cmd->device->sdev_gendev, in pvscsi_complete_request()
655 cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); in pvscsi_complete_request()
657 cmd->scsi_done(cmd); in pvscsi_complete_request()
669 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_process_completion_ring()
670 struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring; in pvscsi_process_completion_ring()
671 u32 cmp_entries = s->cmpNumEntriesLog2; in pvscsi_process_completion_ring()
673 while (s->cmpConsIdx != s->cmpProdIdx) { in pvscsi_process_completion_ring()
674 struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx & in pvscsi_process_completion_ring()
679 * Since the device emulation advances s->cmpProdIdx only after in pvscsi_process_completion_ring()
686 * to s->cmpConsIdx before the read of (*e) inside in pvscsi_process_completion_ring()
691 s->cmpConsIdx++; in pvscsi_process_completion_ring()
706 s = adapter->rings_state; in pvscsi_queue_ring()
707 sdev = cmd->device; in pvscsi_queue_ring()
708 req_entries = s->reqNumEntriesLog2; in pvscsi_queue_ring()
713 * However, we have already ruled out this possibility - we would not in pvscsi_queue_ring()
718 if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) { in pvscsi_queue_ring()
721 s->reqProdIdx, s->cmpConsIdx); in pvscsi_queue_ring()
722 return -1; in pvscsi_queue_ring()
725 e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries)); in pvscsi_queue_ring()
727 e->bus = sdev->channel; in pvscsi_queue_ring()
728 e->target = sdev->id; in pvscsi_queue_ring()
729 memset(e->lun, 0, sizeof(e->lun)); in pvscsi_queue_ring()
730 e->lun[1] = sdev->lun; in pvscsi_queue_ring()
732 if (cmd->sense_buffer) { in pvscsi_queue_ring()
733 ctx->sensePA = dma_map_single(&adapter->dev->dev, in pvscsi_queue_ring()
734 cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in pvscsi_queue_ring()
736 if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) { in pvscsi_queue_ring()
739 ctx->sensePA = 0; in pvscsi_queue_ring()
740 return -ENOMEM; in pvscsi_queue_ring()
742 e->senseAddr = ctx->sensePA; in pvscsi_queue_ring()
743 e->senseLen = SCSI_SENSE_BUFFERSIZE; in pvscsi_queue_ring()
745 e->senseLen = 0; in pvscsi_queue_ring()
746 e->senseAddr = 0; in pvscsi_queue_ring()
748 e->cdbLen = cmd->cmd_len; in pvscsi_queue_ring()
749 e->vcpuHint = smp_processor_id(); in pvscsi_queue_ring()
750 memcpy(e->cdb, cmd->cmnd, e->cdbLen); in pvscsi_queue_ring()
752 e->tag = SIMPLE_QUEUE_TAG; in pvscsi_queue_ring()
754 if (cmd->sc_data_direction == DMA_FROM_DEVICE) in pvscsi_queue_ring()
755 e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; in pvscsi_queue_ring()
756 else if (cmd->sc_data_direction == DMA_TO_DEVICE) in pvscsi_queue_ring()
757 e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE; in pvscsi_queue_ring()
758 else if (cmd->sc_data_direction == DMA_NONE) in pvscsi_queue_ring()
759 e->flags = PVSCSI_FLAG_CMD_DIR_NONE; in pvscsi_queue_ring()
761 e->flags = 0; in pvscsi_queue_ring()
764 if (cmd->sense_buffer) { in pvscsi_queue_ring()
765 dma_unmap_single(&adapter->dev->dev, ctx->sensePA, in pvscsi_queue_ring()
768 ctx->sensePA = 0; in pvscsi_queue_ring()
770 return -ENOMEM; in pvscsi_queue_ring()
773 e->context = pvscsi_map_context(adapter, ctx); in pvscsi_queue_ring()
777 s->reqProdIdx++; in pvscsi_queue_ring()
784 struct Scsi_Host *host = cmd->device->host; in pvscsi_queue_lck()
790 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_queue_lck()
796 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_queue_lck()
800 cmd->scsi_done = done; in pvscsi_queue_lck()
801 op = cmd->cmnd[0]; in pvscsi_queue_lck()
803 dev_dbg(&cmd->device->sdev_gendev, in pvscsi_queue_lck()
806 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_queue_lck()
817 struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); in DEF_SCSI_QCMD()
825 adapter->host->host_no, cmd); in DEF_SCSI_QCMD()
827 spin_lock_irqsave(&adapter->hw_lock, flags); in DEF_SCSI_QCMD()
830 * Poll the completion ring first - we might be trying to abort in DEF_SCSI_QCMD()
849 ctx->abort_cmp = &abort_cmp; in DEF_SCSI_QCMD()
852 spin_unlock_irqrestore(&adapter->hw_lock, flags); in DEF_SCSI_QCMD()
855 spin_lock_irqsave(&adapter->hw_lock, flags); in DEF_SCSI_QCMD()
862 ctx->abort_cmp = NULL; in DEF_SCSI_QCMD()
873 cmd->result = (DID_ABORT << 16); in DEF_SCSI_QCMD()
874 cmd->scsi_done(cmd); in DEF_SCSI_QCMD()
877 spin_unlock_irqrestore(&adapter->hw_lock, flags); in DEF_SCSI_QCMD()
884 * destroys the 1-1 mapping between context field passed to emulation and our
891 for (i = 0; i < adapter->req_depth; i++) { in pvscsi_reset_all()
892 struct pvscsi_ctx *ctx = &adapter->cmd_map[i]; in pvscsi_reset_all()
893 struct scsi_cmnd *cmd = ctx->cmd; in pvscsi_reset_all()
900 cmd->result = (DID_RESET << 16); in pvscsi_reset_all()
901 cmd->scsi_done(cmd); in pvscsi_reset_all()
908 struct Scsi_Host *host = cmd->device->host; in pvscsi_host_reset()
915 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_host_reset()
917 use_msg = adapter->use_msg; in pvscsi_host_reset()
920 adapter->use_msg = false; in pvscsi_host_reset()
921 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_host_reset()
927 flush_workqueue(adapter->workqueue); in pvscsi_host_reset()
928 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_host_reset()
945 * not touch the ring memory after reset, so the immediately pre-reset in pvscsi_host_reset()
951 adapter->use_msg = use_msg; in pvscsi_host_reset()
955 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_host_reset()
962 struct Scsi_Host *host = cmd->device->host; in pvscsi_bus_reset()
974 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_bus_reset()
980 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_bus_reset()
987 struct Scsi_Host *host = cmd->device->host; in pvscsi_device_reset()
992 host->host_no, cmd->device->id); in pvscsi_device_reset()
999 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_device_reset()
1002 ll_device_reset(adapter, cmd->device->id); in pvscsi_device_reset()
1005 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_device_reset()
1018 "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, in pvscsi_info()
1019 adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, in pvscsi_info()
1031 .this_id = -1,
1045 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_process_msg()
1046 struct Scsi_Host *host = adapter->host; in pvscsi_process_msg()
1049 printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n", in pvscsi_process_msg()
1050 e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2); in pvscsi_process_msg()
1054 if (e->type == PVSCSI_MSG_DEV_ADDED) { in pvscsi_process_msg()
1060 desc->bus, desc->target, desc->lun[1]); in pvscsi_process_msg()
1065 sdev = scsi_device_lookup(host, desc->bus, desc->target, in pvscsi_process_msg()
1066 desc->lun[1]); in pvscsi_process_msg()
1071 scsi_add_device(adapter->host, desc->bus, in pvscsi_process_msg()
1072 desc->target, desc->lun[1]); in pvscsi_process_msg()
1075 } else if (e->type == PVSCSI_MSG_DEV_REMOVED) { in pvscsi_process_msg()
1081 desc->bus, desc->target, desc->lun[1]); in pvscsi_process_msg()
1086 sdev = scsi_device_lookup(host, desc->bus, desc->target, in pvscsi_process_msg()
1087 desc->lun[1]); in pvscsi_process_msg()
1094 desc->bus, desc->target, desc->lun[1]); in pvscsi_process_msg()
1102 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_msg_pending()
1104 return s->msgProdIdx != s->msgConsIdx; in pvscsi_msg_pending()
1109 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_process_msg_ring()
1110 struct PVSCSIRingMsgDesc *ring = adapter->msg_ring; in pvscsi_process_msg_ring()
1111 u32 msg_entries = s->msgNumEntriesLog2; in pvscsi_process_msg_ring()
1114 struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx & in pvscsi_process_msg_ring()
1120 s->msgConsIdx++; in pvscsi_process_msg_ring()
1143 if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1) in pvscsi_setup_msg_workqueue()
1147 "vmw_pvscsi_wq_%u", adapter->host->host_no); in pvscsi_setup_msg_workqueue()
1149 adapter->workqueue = create_singlethread_workqueue(name); in pvscsi_setup_msg_workqueue()
1150 if (!adapter->workqueue) { in pvscsi_setup_msg_workqueue()
1154 INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); in pvscsi_setup_msg_workqueue()
1170 if (val == -1) { in pvscsi_setup_req_threshold()
1192 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_isr()
1194 if (adapter->use_msg && pvscsi_msg_pending(adapter)) in pvscsi_isr()
1195 queue_work(adapter->workqueue, &adapter->work); in pvscsi_isr()
1196 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_isr()
1214 struct pvscsi_ctx *ctx = adapter->cmd_map; in pvscsi_free_sgls()
1217 for (i = 0; i < adapter->req_depth; ++i, ++ctx) in pvscsi_free_sgls()
1218 free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); in pvscsi_free_sgls()
1223 free_irq(pci_irq_vector(adapter->dev, 0), adapter); in pvscsi_shutdown_intr()
1224 pci_free_irq_vectors(adapter->dev); in pvscsi_shutdown_intr()
1229 if (adapter->workqueue) in pvscsi_release_resources()
1230 destroy_workqueue(adapter->workqueue); in pvscsi_release_resources()
1232 if (adapter->mmioBase) in pvscsi_release_resources()
1233 pci_iounmap(adapter->dev, adapter->mmioBase); in pvscsi_release_resources()
1235 pci_release_regions(adapter->dev); in pvscsi_release_resources()
1237 if (adapter->cmd_map) { in pvscsi_release_resources()
1239 kfree(adapter->cmd_map); in pvscsi_release_resources()
1242 if (adapter->rings_state) in pvscsi_release_resources()
1243 dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, in pvscsi_release_resources()
1244 adapter->rings_state, adapter->ringStatePA); in pvscsi_release_resources()
1246 if (adapter->req_ring) in pvscsi_release_resources()
1247 dma_free_coherent(&adapter->dev->dev, in pvscsi_release_resources()
1248 adapter->req_pages * PAGE_SIZE, in pvscsi_release_resources()
1249 adapter->req_ring, adapter->reqRingPA); in pvscsi_release_resources()
1251 if (adapter->cmp_ring) in pvscsi_release_resources()
1252 dma_free_coherent(&adapter->dev->dev, in pvscsi_release_resources()
1253 adapter->cmp_pages * PAGE_SIZE, in pvscsi_release_resources()
1254 adapter->cmp_ring, adapter->cmpRingPA); in pvscsi_release_resources()
1256 if (adapter->msg_ring) in pvscsi_release_resources()
1257 dma_free_coherent(&adapter->dev->dev, in pvscsi_release_resources()
1258 adapter->msg_pages * PAGE_SIZE, in pvscsi_release_resources()
1259 adapter->msg_ring, adapter->msgRingPA); in pvscsi_release_resources()
1267 * Dynamic allocation can fail, and we can't go deep into the memory
1270 * in that case because we can't get an allocation - the I/O could be
1280 ctx = adapter->cmd_map; in pvscsi_allocate_sg()
1283 for (i = 0; i < adapter->req_depth; ++i, ++ctx) { in pvscsi_allocate_sg()
1284 ctx->sgl = (void *)__get_free_pages(GFP_KERNEL, in pvscsi_allocate_sg()
1286 ctx->sglPA = 0; in pvscsi_allocate_sg()
1287 BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); in pvscsi_allocate_sg()
1288 if (!ctx->sgl) { in pvscsi_allocate_sg()
1289 for (; i >= 0; --i, --ctx) { in pvscsi_allocate_sg()
1290 free_pages((unsigned long)ctx->sgl, in pvscsi_allocate_sg()
1292 ctx->sgl = NULL; in pvscsi_allocate_sg()
1294 return -ENOMEM; in pvscsi_allocate_sg()
1316 config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, in pvscsi_get_max_targets()
1337 header->hostStatus = BTSTAT_INVPARAM; in pvscsi_get_max_targets()
1338 header->scsiStatus = SDSTAT_CHECK; in pvscsi_get_max_targets()
1342 if (header->hostStatus == BTSTAT_SUCCESS && in pvscsi_get_max_targets()
1343 header->scsiStatus == SDSTAT_GOOD) { in pvscsi_get_max_targets()
1347 numPhys = config->numPhys; in pvscsi_get_max_targets()
1350 header->hostStatus, header->scsiStatus); in pvscsi_get_max_targets()
1351 dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page, in pvscsi_get_max_targets()
1367 error = -ENODEV; in pvscsi_probe()
1372 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { in pvscsi_probe()
1374 } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { in pvscsi_probe()
1388 adapter->dev = pdev; in pvscsi_probe()
1389 adapter->rev = pdev->revision; in pvscsi_probe()
1412 adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); in pvscsi_probe()
1414 if (!adapter->mmioBase) { in pvscsi_probe()
1458 adapter->dev = pdev; in pvscsi_probe()
1459 adapter->host = host; in pvscsi_probe()
1463 adapter->rev = adapter_temp.rev; in pvscsi_probe()
1464 adapter->mmioBase = adapter_temp.mmioBase; in pvscsi_probe()
1466 spin_lock_init(&adapter->hw_lock); in pvscsi_probe()
1467 host->max_channel = 0; in pvscsi_probe()
1468 host->max_lun = 1; in pvscsi_probe()
1469 host->max_cmd_len = 16; in pvscsi_probe()
1470 host->max_id = max_id; in pvscsi_probe()
1476 adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); in pvscsi_probe()
1490 adapter->cmd_map = kcalloc(adapter->req_depth, in pvscsi_probe()
1492 if (!adapter->cmd_map) { in pvscsi_probe()
1494 error = -ENOMEM; in pvscsi_probe()
1498 INIT_LIST_HEAD(&adapter->cmd_pool); in pvscsi_probe()
1499 for (i = 0; i < adapter->req_depth; i++) { in pvscsi_probe()
1500 struct pvscsi_ctx *ctx = adapter->cmd_map + i; in pvscsi_probe()
1501 list_add(&ctx->list, &adapter->cmd_pool); in pvscsi_probe()
1515 error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag); in pvscsi_probe()
1519 adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); in pvscsi_probe()
1520 printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n", in pvscsi_probe()
1521 adapter->use_req_threshold ? "en" : "dis"); in pvscsi_probe()
1523 if (adapter->dev->msix_enabled || adapter->dev->msi_enabled) { in pvscsi_probe()
1525 adapter->dev->msix_enabled ? "-X" : ""); in pvscsi_probe()
1540 error = scsi_add_host(host, &pdev->dev); in pvscsi_probe()
1547 dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", in pvscsi_probe()
1548 adapter->rev, host->host_no); in pvscsi_probe()
1577 if (adapter->workqueue) in __pvscsi_shutdown()
1578 flush_workqueue(adapter->workqueue); in __pvscsi_shutdown()
1620 pr_info("%s - version %s\n", in pvscsi_init()