• Home
  • Raw
  • Download

Lines Matching refs:nvmeq

128 static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)  in nvme_read_completion_status()  argument
130 u64 start = (ulong)&nvmeq->cqes[index]; in nvme_read_completion_status()
135 return le16_to_cpu(readw(&(nvmeq->cqes[index].status))); in nvme_read_completion_status()
144 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) in nvme_submit_cmd() argument
146 u16 tail = nvmeq->sq_tail; in nvme_submit_cmd()
148 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); in nvme_submit_cmd()
149 flush_dcache_range((ulong)&nvmeq->sq_cmds[tail], in nvme_submit_cmd()
150 (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd)); in nvme_submit_cmd()
152 if (++tail == nvmeq->q_depth) in nvme_submit_cmd()
154 writel(tail, nvmeq->q_db); in nvme_submit_cmd()
155 nvmeq->sq_tail = tail; in nvme_submit_cmd()
158 static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, in nvme_submit_sync_cmd() argument
162 u16 head = nvmeq->cq_head; in nvme_submit_sync_cmd()
163 u16 phase = nvmeq->cq_phase; in nvme_submit_sync_cmd()
169 nvme_submit_cmd(nvmeq, cmd); in nvme_submit_sync_cmd()
174 status = nvme_read_completion_status(nvmeq, head); in nvme_submit_sync_cmd()
187 if (++head == nvmeq->q_depth) { in nvme_submit_sync_cmd()
191 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_submit_sync_cmd()
192 nvmeq->cq_head = head; in nvme_submit_sync_cmd()
193 nvmeq->cq_phase = phase; in nvme_submit_sync_cmd()
199 *result = le32_to_cpu(readl(&(nvmeq->cqes[head].result))); in nvme_submit_sync_cmd()
201 if (++head == nvmeq->q_depth) { in nvme_submit_sync_cmd()
205 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_submit_sync_cmd()
206 nvmeq->cq_head = head; in nvme_submit_sync_cmd()
207 nvmeq->cq_phase = phase; in nvme_submit_sync_cmd()
222 struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq)); in nvme_alloc_queue() local
223 if (!nvmeq) in nvme_alloc_queue()
225 memset(nvmeq, 0, sizeof(*nvmeq)); in nvme_alloc_queue()
227 nvmeq->cqes = (void *)memalign(4096, NVME_CQ_SIZE(depth)); in nvme_alloc_queue()
228 if (!nvmeq->cqes) in nvme_alloc_queue()
230 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth)); in nvme_alloc_queue()
232 nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth)); in nvme_alloc_queue()
233 if (!nvmeq->sq_cmds) in nvme_alloc_queue()
235 memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth)); in nvme_alloc_queue()
237 nvmeq->dev = dev; in nvme_alloc_queue()
239 nvmeq->cq_head = 0; in nvme_alloc_queue()
240 nvmeq->cq_phase = 1; in nvme_alloc_queue()
241 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
242 nvmeq->q_depth = depth; in nvme_alloc_queue()
243 nvmeq->qid = qid; in nvme_alloc_queue()
245 dev->queues[qid] = nvmeq; in nvme_alloc_queue()
247 return nvmeq; in nvme_alloc_queue()
250 free((void *)nvmeq->cqes); in nvme_alloc_queue()
252 free(nvmeq); in nvme_alloc_queue()
296 static void nvme_free_queue(struct nvme_queue *nvmeq) in nvme_free_queue() argument
298 free((void *)nvmeq->cqes); in nvme_free_queue()
299 free(nvmeq->sq_cmds); in nvme_free_queue()
300 free(nvmeq); in nvme_free_queue()
308 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_free_queues() local
311 nvme_free_queue(nvmeq); in nvme_free_queues()
315 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) in nvme_init_queue() argument
317 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
319 nvmeq->sq_tail = 0; in nvme_init_queue()
320 nvmeq->cq_head = 0; in nvme_init_queue()
321 nvmeq->cq_phase = 1; in nvme_init_queue()
322 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
323 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth)); in nvme_init_queue()
324 flush_dcache_range((ulong)nvmeq->cqes, in nvme_init_queue()
325 (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth)); in nvme_init_queue()
334 struct nvme_queue *nvmeq; in nvme_configure_admin_queue() local
356 nvmeq = dev->queues[NVME_ADMIN_Q]; in nvme_configure_admin_queue()
357 if (!nvmeq) { in nvme_configure_admin_queue()
358 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); in nvme_configure_admin_queue()
359 if (!nvmeq) in nvme_configure_admin_queue()
363 aqa = nvmeq->q_depth - 1; in nvme_configure_admin_queue()
375 nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq); in nvme_configure_admin_queue()
376 nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq); in nvme_configure_admin_queue()
382 nvmeq->cq_vector = 0; in nvme_configure_admin_queue()
395 struct nvme_queue *nvmeq) in nvme_alloc_cq() argument
402 c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes); in nvme_alloc_cq()
404 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in nvme_alloc_cq()
406 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); in nvme_alloc_cq()
412 struct nvme_queue *nvmeq) in nvme_alloc_sq() argument
419 c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds); in nvme_alloc_sq()
421 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in nvme_alloc_sq()
498 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) in nvme_create_queue() argument
500 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
503 nvmeq->cq_vector = qid - 1; in nvme_create_queue()
504 result = nvme_alloc_cq(dev, qid, nvmeq); in nvme_create_queue()
508 result = nvme_alloc_sq(dev, qid, nvmeq); in nvme_create_queue()
512 nvme_init_queue(nvmeq, qid); in nvme_create_queue()