• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "qmgr.h"
24 
25 static bool
nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq * cmdq,u32 size,bool * rewind)26 nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind)
27 {
28 	u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg);
29 	u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg);
30 	u32 free;
31 
32 	size = ALIGN(size, QUEUE_ALIGNMENT);
33 
34 	if (head >= tail) {
35 		free = cmdq->offset + cmdq->size - head;
36 		free -= HDR_SIZE;
37 
38 		if (size > free) {
39 			*rewind = true;
40 			head = cmdq->offset;
41 		}
42 	}
43 
44 	if (head < tail)
45 		free = tail - head - 1;
46 
47 	return size <= free;
48 }
49 
50 static void
nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq * cmdq,void * data,u32 size)51 nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
52 {
53 	struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
54 	nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0);
55 	cmdq->position += ALIGN(size, QUEUE_ALIGNMENT);
56 }
57 
58 static void
nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq * cmdq)59 nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
60 {
61 	struct nvfw_falcon_cmd cmd;
62 
63 	cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
64 	cmd.size = sizeof(cmd);
65 	nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size);
66 
67 	cmdq->position = cmdq->offset;
68 }
69 
70 static int
nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq * cmdq,u32 size)71 nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size)
72 {
73 	struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
74 	bool rewind = false;
75 
76 	mutex_lock(&cmdq->mutex);
77 
78 	if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) {
79 		FLCNQ_DBG(cmdq, "queue full");
80 		mutex_unlock(&cmdq->mutex);
81 		return -EAGAIN;
82 	}
83 
84 	cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg);
85 
86 	if (rewind)
87 		nvkm_falcon_cmdq_rewind(cmdq);
88 
89 	return 0;
90 }
91 
92 static void
nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq * cmdq)93 nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
94 {
95 	nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position);
96 	mutex_unlock(&cmdq->mutex);
97 }
98 
99 static int
nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq * cmdq,struct nvfw_falcon_cmd * cmd)100 nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd)
101 {
102 	static unsigned timeout = 2000;
103 	unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
104 	int ret = -EAGAIN;
105 
106 	while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
107 		ret = nvkm_falcon_cmdq_open(cmdq, cmd->size);
108 	if (ret) {
109 		FLCNQ_ERR(cmdq, "timeout waiting for queue space");
110 		return ret;
111 	}
112 
113 	nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size);
114 	nvkm_falcon_cmdq_close(cmdq);
115 	return ret;
116 }
117 
118 /* specifies that we want to know the command status in the answer message */
119 #define CMD_FLAGS_STATUS BIT(0)
120 /* specifies that we want an interrupt when the answer message is queued */
121 #define CMD_FLAGS_INTR BIT(1)
122 
123 int
nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq * cmdq,struct nvfw_falcon_cmd * cmd,nvkm_falcon_qmgr_callback cb,void * priv,unsigned long timeout)124 nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd,
125 		      nvkm_falcon_qmgr_callback cb, void *priv,
126 		      unsigned long timeout)
127 {
128 	struct nvkm_falcon_qmgr_seq *seq;
129 	int ret;
130 
131 	if (!wait_for_completion_timeout(&cmdq->ready,
132 					 msecs_to_jiffies(1000))) {
133 		FLCNQ_ERR(cmdq, "timeout waiting for queue ready");
134 		return -ETIMEDOUT;
135 	}
136 
137 	seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr);
138 	if (IS_ERR(seq))
139 		return PTR_ERR(seq);
140 
141 	cmd->seq_id = seq->id;
142 	cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
143 
144 	seq->state = SEQ_STATE_USED;
145 	seq->async = !timeout;
146 	seq->callback = cb;
147 	seq->priv = priv;
148 
149 	ret = nvkm_falcon_cmdq_write(cmdq, cmd);
150 	if (ret) {
151 		seq->state = SEQ_STATE_PENDING;
152 		nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
153 		return ret;
154 	}
155 
156 	if (!seq->async) {
157 		if (!wait_for_completion_timeout(&seq->done, timeout)) {
158 			FLCNQ_ERR(cmdq, "timeout waiting for reply");
159 			return -ETIMEDOUT;
160 		}
161 		ret = seq->result;
162 		nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
163 	}
164 
165 	return ret;
166 }
167 
168 void
nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq * cmdq)169 nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq)
170 {
171 	reinit_completion(&cmdq->ready);
172 }
173 
174 void
nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq * cmdq,u32 index,u32 offset,u32 size)175 nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq,
176 		      u32 index, u32 offset, u32 size)
177 {
178 	const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func;
179 
180 	cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride;
181 	cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride;
182 	cmdq->offset = offset;
183 	cmdq->size = size;
184 	complete_all(&cmdq->ready);
185 
186 	FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x",
187 		  index, cmdq->offset, cmdq->size);
188 }
189 
190 void
nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq ** pcmdq)191 nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq)
192 {
193 	struct nvkm_falcon_cmdq *cmdq = *pcmdq;
194 	if (cmdq) {
195 		kfree(*pcmdq);
196 		*pcmdq = NULL;
197 	}
198 }
199 
200 int
nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr * qmgr,const char * name,struct nvkm_falcon_cmdq ** pcmdq)201 nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
202 		     struct nvkm_falcon_cmdq **pcmdq)
203 {
204 	struct nvkm_falcon_cmdq *cmdq = *pcmdq;
205 
206 	if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL)))
207 		return -ENOMEM;
208 
209 	cmdq->qmgr = qmgr;
210 	cmdq->name = name;
211 	mutex_init(&cmdq->mutex);
212 	init_completion(&cmdq->ready);
213 	return 0;
214 }
215