Lines Matching refs:nvmeq
141 static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index) in nvme_read_completion_status() argument
143 u64 start = (ulong)&nvmeq->cqes[index]; in nvme_read_completion_status()
148 return le16_to_cpu(readw(&(nvmeq->cqes[index].status))); in nvme_read_completion_status()
157 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) in nvme_submit_cmd() argument
159 u16 tail = nvmeq->sq_tail; in nvme_submit_cmd()
161 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); in nvme_submit_cmd()
162 flush_dcache_range((ulong)&nvmeq->sq_cmds[tail], in nvme_submit_cmd()
163 (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd)); in nvme_submit_cmd()
165 if (++tail == nvmeq->q_depth) in nvme_submit_cmd()
167 writel(tail, nvmeq->q_db); in nvme_submit_cmd()
168 nvmeq->sq_tail = tail; in nvme_submit_cmd()
171 static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, in nvme_submit_sync_cmd() argument
175 u16 head = nvmeq->cq_head; in nvme_submit_sync_cmd()
176 u16 phase = nvmeq->cq_phase; in nvme_submit_sync_cmd()
182 nvme_submit_cmd(nvmeq, cmd); in nvme_submit_sync_cmd()
187 status = nvme_read_completion_status(nvmeq, head); in nvme_submit_sync_cmd()
200 if (++head == nvmeq->q_depth) { in nvme_submit_sync_cmd()
204 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_submit_sync_cmd()
205 nvmeq->cq_head = head; in nvme_submit_sync_cmd()
206 nvmeq->cq_phase = phase; in nvme_submit_sync_cmd()
212 *result = le32_to_cpu(readl(&(nvmeq->cqes[head].result))); in nvme_submit_sync_cmd()
214 if (++head == nvmeq->q_depth) { in nvme_submit_sync_cmd()
218 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_submit_sync_cmd()
219 nvmeq->cq_head = head; in nvme_submit_sync_cmd()
220 nvmeq->cq_phase = phase; in nvme_submit_sync_cmd()
235 struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq)); in nvme_alloc_queue() local
236 if (!nvmeq) in nvme_alloc_queue()
238 memset(nvmeq, 0, sizeof(*nvmeq)); in nvme_alloc_queue()
240 nvmeq->cqes = (void *)memalign(4096, NVME_CQ_SIZE(depth)); in nvme_alloc_queue()
241 if (!nvmeq->cqes) in nvme_alloc_queue()
243 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth)); in nvme_alloc_queue()
245 nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth)); in nvme_alloc_queue()
246 if (!nvmeq->sq_cmds) in nvme_alloc_queue()
248 memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth)); in nvme_alloc_queue()
250 nvmeq->dev = dev; in nvme_alloc_queue()
252 nvmeq->cq_head = 0; in nvme_alloc_queue()
253 nvmeq->cq_phase = 1; in nvme_alloc_queue()
254 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
255 nvmeq->q_depth = depth; in nvme_alloc_queue()
256 nvmeq->qid = qid; in nvme_alloc_queue()
258 dev->queues[qid] = nvmeq; in nvme_alloc_queue()
260 return nvmeq; in nvme_alloc_queue()
263 free((void *)nvmeq->cqes); in nvme_alloc_queue()
265 free(nvmeq); in nvme_alloc_queue()
309 static void nvme_free_queue(struct nvme_queue *nvmeq) in nvme_free_queue() argument
311 free((void *)nvmeq->cqes); in nvme_free_queue()
312 free(nvmeq->sq_cmds); in nvme_free_queue()
313 free(nvmeq); in nvme_free_queue()
321 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_free_queues() local
324 nvme_free_queue(nvmeq); in nvme_free_queues()
328 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) in nvme_init_queue() argument
330 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
332 nvmeq->sq_tail = 0; in nvme_init_queue()
333 nvmeq->cq_head = 0; in nvme_init_queue()
334 nvmeq->cq_phase = 1; in nvme_init_queue()
335 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
336 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth)); in nvme_init_queue()
337 flush_dcache_range((ulong)nvmeq->cqes, in nvme_init_queue()
338 (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth)); in nvme_init_queue()
347 struct nvme_queue *nvmeq; in nvme_configure_admin_queue() local
369 nvmeq = dev->queues[NVME_ADMIN_Q]; in nvme_configure_admin_queue()
370 if (!nvmeq) { in nvme_configure_admin_queue()
371 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); in nvme_configure_admin_queue()
372 if (!nvmeq) in nvme_configure_admin_queue()
376 aqa = nvmeq->q_depth - 1; in nvme_configure_admin_queue()
388 nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq); in nvme_configure_admin_queue()
389 nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq); in nvme_configure_admin_queue()
395 nvmeq->cq_vector = 0; in nvme_configure_admin_queue()
408 struct nvme_queue *nvmeq) in nvme_alloc_cq() argument
415 c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes); in nvme_alloc_cq()
417 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in nvme_alloc_cq()
419 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); in nvme_alloc_cq()
425 struct nvme_queue *nvmeq) in nvme_alloc_sq() argument
432 c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds); in nvme_alloc_sq()
434 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in nvme_alloc_sq()
511 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) in nvme_create_queue() argument
513 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
516 nvmeq->cq_vector = qid - 1; in nvme_create_queue()
517 result = nvme_alloc_cq(dev, qid, nvmeq); in nvme_create_queue()
521 result = nvme_alloc_sq(dev, qid, nvmeq); in nvme_create_queue()
525 nvme_init_queue(nvmeq, qid); in nvme_create_queue()