1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/refcount.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-direction.h>
13
14 #include "gsi.h"
15 #include "gsi_private.h"
16 #include "gsi_trans.h"
17 #include "ipa_gsi.h"
18 #include "ipa_data.h"
19 #include "ipa_cmd.h"
20
21 /**
22 * DOC: GSI Transactions
23 *
24 * A GSI transaction abstracts the behavior of a GSI channel by representing
25 * everything about a related group of IPA commands in a single structure.
26 * (A "command" in this sense is either a data transfer or an IPA immediate
27 * command.) Most details of interaction with the GSI hardware are managed
28 * by the GSI transaction core, allowing users to simply describe commands
29 * to be performed. When a transaction has completed a callback function
30 * (dependent on the type of endpoint associated with the channel) allows
31 * cleanup of resources associated with the transaction.
32 *
33 * To perform a command (or set of them), a user of the GSI transaction
34 * interface allocates a transaction, indicating the number of TREs required
35 * (one per command). If sufficient TREs are available, they are reserved
36 * for use in the transaction and the allocation succeeds. This way
37 * exhaustion of the available TREs in a channel ring is detected
38 * as early as possible. All resources required to complete a transaction
39 * are allocated at transaction allocation time.
40 *
41 * Commands performed as part of a transaction are represented in an array
42 * of Linux scatterlist structures. This array is allocated with the
43 * transaction, and its entries are initialized using standard scatterlist
44 * functions (such as sg_set_buf() or skb_to_sgvec()).
45 *
46 * Once a transaction's scatterlist structures have been initialized, the
47 * transaction is committed. The caller is responsible for mapping buffers
48 * for DMA if necessary, and this should be done *before* allocating
49 * the transaction. Between a successful allocation and commit of a
50 * transaction no errors should occur.
51 *
52 * Committing transfers ownership of the entire transaction to the GSI
53 * transaction core. The GSI transaction code formats the content of
54 * the scatterlist array into the channel ring buffer and informs the
55 * hardware that new TREs are available to process.
56 *
57 * The last TRE in each transaction is marked to interrupt the AP when the
58 * GSI hardware has completed it. Because transfers described by TREs are
59 * performed strictly in order, signaling the completion of just the last
60 * TRE in the transaction is sufficient to indicate the full transaction
61 * is complete.
62 *
63 * When a transaction is complete, ipa_gsi_trans_complete() is called by the
64 * GSI code into the IPA layer, allowing it to perform any final cleanup
65 * required before the transaction is freed.
66 */
67
68 /* Hardware values representing a transfer element type */
69 enum gsi_tre_type {
70 GSI_RE_XFER = 0x2,
71 GSI_RE_IMMD_CMD = 0x3,
72 };
73
74 /* An entry in a channel ring */
75 struct gsi_tre {
76 __le64 addr; /* DMA address */
77 __le16 len_opcode; /* length in bytes or enum IPA_CMD_* */
78 __le16 reserved;
79 __le32 flags; /* TRE_FLAGS_* */
80 };
81
82 /* gsi_tre->flags mask values (in CPU byte order) */
83 #define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
84 #define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
85 #define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
86 #define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
87
gsi_trans_pool_init(struct gsi_trans_pool * pool,size_t size,u32 count,u32 max_alloc)88 int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
89 u32 max_alloc)
90 {
91 void *virt;
92
93 #ifdef IPA_VALIDATE
94 if (!size || size % 8)
95 return -EINVAL;
96 if (count < max_alloc)
97 return -EINVAL;
98 if (!max_alloc)
99 return -EINVAL;
100 #endif /* IPA_VALIDATE */
101
102 /* By allocating a few extra entries in our pool (one less
103 * than the maximum number that will be requested in a
104 * single allocation), we can always satisfy requests without
105 * ever worrying about straddling the end of the pool array.
106 * If there aren't enough entries starting at the free index,
107 * we just allocate free entries from the beginning of the pool.
108 */
109 virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
110 if (!virt)
111 return -ENOMEM;
112
113 pool->base = virt;
114 /* If the allocator gave us any extra memory, use it */
115 pool->count = ksize(pool->base) / size;
116 pool->free = 0;
117 pool->max_alloc = max_alloc;
118 pool->size = size;
119 pool->addr = 0; /* Only used for DMA pools */
120
121 return 0;
122 }
123
gsi_trans_pool_exit(struct gsi_trans_pool * pool)124 void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
125 {
126 kfree(pool->base);
127 memset(pool, 0, sizeof(*pool));
128 }
129
130 /* Allocate the requested number of (zeroed) entries from the pool */
131 /* Home-grown DMA pool. This way we can preallocate and use the tre_count
132 * to guarantee allocations will succeed. Even though we specify max_alloc
133 * (and it can be more than one), we only allow allocation of a single
134 * element from a DMA pool.
135 */
gsi_trans_pool_init_dma(struct device * dev,struct gsi_trans_pool * pool,size_t size,u32 count,u32 max_alloc)136 int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
137 size_t size, u32 count, u32 max_alloc)
138 {
139 size_t total_size;
140 dma_addr_t addr;
141 void *virt;
142
143 #ifdef IPA_VALIDATE
144 if (!size || size % 8)
145 return -EINVAL;
146 if (count < max_alloc)
147 return -EINVAL;
148 if (!max_alloc)
149 return -EINVAL;
150 #endif /* IPA_VALIDATE */
151
152 /* Don't let allocations cross a power-of-two boundary */
153 size = __roundup_pow_of_two(size);
154 total_size = (count + max_alloc - 1) * size;
155
156 /* The allocator will give us a power-of-2 number of pages. But we
157 * can't guarantee that, so request it. That way we won't waste any
158 * memory that would be available beyond the required space.
159 *
160 * Note that gsi_trans_pool_exit_dma() assumes the total allocated
161 * size is exactly (count * size).
162 */
163 total_size = get_order(total_size) << PAGE_SHIFT;
164
165 virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
166 if (!virt)
167 return -ENOMEM;
168
169 pool->base = virt;
170 pool->count = total_size / size;
171 pool->free = 0;
172 pool->size = size;
173 pool->max_alloc = max_alloc;
174 pool->addr = addr;
175
176 return 0;
177 }
178
gsi_trans_pool_exit_dma(struct device * dev,struct gsi_trans_pool * pool)179 void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
180 {
181 size_t total_size = pool->count * pool->size;
182
183 dma_free_coherent(dev, total_size, pool->base, pool->addr);
184 memset(pool, 0, sizeof(*pool));
185 }
186
187 /* Return the byte offset of the next free entry in the pool */
gsi_trans_pool_alloc_common(struct gsi_trans_pool * pool,u32 count)188 static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
189 {
190 u32 offset;
191
192 /* assert(count > 0); */
193 /* assert(count <= pool->max_alloc); */
194
195 /* Allocate from beginning if wrap would occur */
196 if (count > pool->count - pool->free)
197 pool->free = 0;
198
199 offset = pool->free * pool->size;
200 pool->free += count;
201 memset(pool->base + offset, 0, count * pool->size);
202
203 return offset;
204 }
205
206 /* Allocate a contiguous block of zeroed entries from a pool */
gsi_trans_pool_alloc(struct gsi_trans_pool * pool,u32 count)207 void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
208 {
209 return pool->base + gsi_trans_pool_alloc_common(pool, count);
210 }
211
212 /* Allocate a single zeroed entry from a DMA pool */
gsi_trans_pool_alloc_dma(struct gsi_trans_pool * pool,dma_addr_t * addr)213 void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
214 {
215 u32 offset = gsi_trans_pool_alloc_common(pool, 1);
216
217 *addr = pool->addr + offset;
218
219 return pool->base + offset;
220 }
221
222 /* Return the pool element that immediately follows the one given.
223 * This only works done if elements are allocated one at a time.
224 */
gsi_trans_pool_next(struct gsi_trans_pool * pool,void * element)225 void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
226 {
227 void *end = pool->base + pool->count * pool->size;
228
229 /* assert(element >= pool->base); */
230 /* assert(element < end); */
231 /* assert(pool->max_alloc == 1); */
232 element += pool->size;
233
234 return element < end ? element : pool->base;
235 }
236
237 /* Map a given ring entry index to the transaction associated with it */
gsi_channel_trans_map(struct gsi_channel * channel,u32 index,struct gsi_trans * trans)238 static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
239 struct gsi_trans *trans)
240 {
241 /* Note: index *must* be used modulo the ring count here */
242 channel->trans_info.map[index % channel->tre_ring.count] = trans;
243 }
244
245 /* Return the transaction mapped to a given ring entry */
246 struct gsi_trans *
gsi_channel_trans_mapped(struct gsi_channel * channel,u32 index)247 gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
248 {
249 /* Note: index *must* be used modulo the ring count here */
250 return channel->trans_info.map[index % channel->tre_ring.count];
251 }
252
253 /* Return the oldest completed transaction for a channel (or null) */
gsi_channel_trans_complete(struct gsi_channel * channel)254 struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
255 {
256 return list_first_entry_or_null(&channel->trans_info.complete,
257 struct gsi_trans, links);
258 }
259
260 /* Move a transaction from the allocated list to the pending list */
gsi_trans_move_pending(struct gsi_trans * trans)261 static void gsi_trans_move_pending(struct gsi_trans *trans)
262 {
263 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
264 struct gsi_trans_info *trans_info = &channel->trans_info;
265
266 spin_lock_bh(&trans_info->spinlock);
267
268 list_move_tail(&trans->links, &trans_info->pending);
269
270 spin_unlock_bh(&trans_info->spinlock);
271 }
272
273 /* Move a transaction and all of its predecessors from the pending list
274 * to the completed list.
275 */
gsi_trans_move_complete(struct gsi_trans * trans)276 void gsi_trans_move_complete(struct gsi_trans *trans)
277 {
278 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
279 struct gsi_trans_info *trans_info = &channel->trans_info;
280 struct list_head list;
281
282 spin_lock_bh(&trans_info->spinlock);
283
284 /* Move this transaction and all predecessors to completed list */
285 list_cut_position(&list, &trans_info->pending, &trans->links);
286 list_splice_tail(&list, &trans_info->complete);
287
288 spin_unlock_bh(&trans_info->spinlock);
289 }
290
291 /* Move a transaction from the completed list to the polled list */
gsi_trans_move_polled(struct gsi_trans * trans)292 void gsi_trans_move_polled(struct gsi_trans *trans)
293 {
294 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
295 struct gsi_trans_info *trans_info = &channel->trans_info;
296
297 spin_lock_bh(&trans_info->spinlock);
298
299 list_move_tail(&trans->links, &trans_info->polled);
300
301 spin_unlock_bh(&trans_info->spinlock);
302 }
303
304 /* Reserve some number of TREs on a channel. Returns true if successful */
305 static bool
gsi_trans_tre_reserve(struct gsi_trans_info * trans_info,u32 tre_count)306 gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
307 {
308 int avail = atomic_read(&trans_info->tre_avail);
309 int new;
310
311 do {
312 new = avail - (int)tre_count;
313 if (unlikely(new < 0))
314 return false;
315 } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
316
317 return true;
318 }
319
320 /* Release previously-reserved TRE entries to a channel */
321 static void
gsi_trans_tre_release(struct gsi_trans_info * trans_info,u32 tre_count)322 gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
323 {
324 atomic_add(tre_count, &trans_info->tre_avail);
325 }
326
327 /* Allocate a GSI transaction on a channel */
gsi_channel_trans_alloc(struct gsi * gsi,u32 channel_id,u32 tre_count,enum dma_data_direction direction)328 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
329 u32 tre_count,
330 enum dma_data_direction direction)
331 {
332 struct gsi_channel *channel = &gsi->channel[channel_id];
333 struct gsi_trans_info *trans_info;
334 struct gsi_trans *trans;
335
336 /* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */
337
338 trans_info = &channel->trans_info;
339
340 /* We reserve the TREs now, but consume them at commit time.
341 * If there aren't enough available, we're done.
342 */
343 if (!gsi_trans_tre_reserve(trans_info, tre_count))
344 return NULL;
345
346 /* Allocate and initialize non-zero fields in the the transaction */
347 trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
348 trans->gsi = gsi;
349 trans->channel_id = channel_id;
350 trans->tre_count = tre_count;
351 init_completion(&trans->completion);
352
353 /* Allocate the scatterlist and (if requested) info entries. */
354 trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
355 sg_init_marker(trans->sgl, tre_count);
356
357 trans->direction = direction;
358
359 spin_lock_bh(&trans_info->spinlock);
360
361 list_add_tail(&trans->links, &trans_info->alloc);
362
363 spin_unlock_bh(&trans_info->spinlock);
364
365 refcount_set(&trans->refcount, 1);
366
367 return trans;
368 }
369
370 /* Free a previously-allocated transaction */
gsi_trans_free(struct gsi_trans * trans)371 void gsi_trans_free(struct gsi_trans *trans)
372 {
373 refcount_t *refcount = &trans->refcount;
374 struct gsi_trans_info *trans_info;
375 bool last;
376
377 /* We must hold the lock to release the last reference */
378 if (refcount_dec_not_one(refcount))
379 return;
380
381 trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
382
383 spin_lock_bh(&trans_info->spinlock);
384
385 /* Reference might have been added before we got the lock */
386 last = refcount_dec_and_test(refcount);
387 if (last)
388 list_del(&trans->links);
389
390 spin_unlock_bh(&trans_info->spinlock);
391
392 if (!last)
393 return;
394
395 ipa_gsi_trans_release(trans);
396
397 /* Releasing the reserved TREs implicitly frees the sgl[] and
398 * (if present) info[] arrays, plus the transaction itself.
399 */
400 gsi_trans_tre_release(trans_info, trans->tre_count);
401 }
402
403 /* Add an immediate command to a transaction */
gsi_trans_cmd_add(struct gsi_trans * trans,void * buf,u32 size,dma_addr_t addr,enum dma_data_direction direction,enum ipa_cmd_opcode opcode)404 void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
405 dma_addr_t addr, enum dma_data_direction direction,
406 enum ipa_cmd_opcode opcode)
407 {
408 struct ipa_cmd_info *info;
409 u32 which = trans->used++;
410 struct scatterlist *sg;
411
412 /* assert(which < trans->tre_count); */
413
414 /* Commands are quite different from data transfer requests.
415 * Their payloads come from a pool whose memory is allocated
416 * using dma_alloc_coherent(). We therefore do *not* map them
417 * for DMA (unlike what we do for pages and skbs).
418 *
419 * When a transaction completes, the SGL is normally unmapped.
420 * A command transaction has direction DMA_NONE, which tells
421 * gsi_trans_complete() to skip the unmapping step.
422 *
423 * The only things we use directly in a command scatter/gather
424 * entry are the DMA address and length. We still need the SG
425 * table flags to be maintained though, so assign a NULL page
426 * pointer for that purpose.
427 */
428 sg = &trans->sgl[which];
429 sg_assign_page(sg, NULL);
430 sg_dma_address(sg) = addr;
431 sg_dma_len(sg) = size;
432
433 info = &trans->info[which];
434 info->opcode = opcode;
435 info->direction = direction;
436 }
437
438 /* Add a page transfer to a transaction. It will fill the only TRE. */
gsi_trans_page_add(struct gsi_trans * trans,struct page * page,u32 size,u32 offset)439 int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
440 u32 offset)
441 {
442 struct scatterlist *sg = &trans->sgl[0];
443 int ret;
444
445 /* assert(trans->tre_count == 1); */
446 /* assert(!trans->used); */
447
448 sg_set_page(sg, page, size, offset);
449 ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
450 if (!ret)
451 return -ENOMEM;
452
453 trans->used++; /* Transaction now owns the (DMA mapped) page */
454
455 return 0;
456 }
457
458 /* Add an SKB transfer to a transaction. No other TREs will be used. */
gsi_trans_skb_add(struct gsi_trans * trans,struct sk_buff * skb)459 int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
460 {
461 struct scatterlist *sg = &trans->sgl[0];
462 u32 used;
463 int ret;
464
465 /* assert(trans->tre_count == 1); */
466 /* assert(!trans->used); */
467
468 /* skb->len will not be 0 (checked early) */
469 ret = skb_to_sgvec(skb, sg, 0, skb->len);
470 if (ret < 0)
471 return ret;
472 used = ret;
473
474 ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
475 if (!ret)
476 return -ENOMEM;
477
478 trans->used += used; /* Transaction now owns the (DMA mapped) skb */
479
480 return 0;
481 }
482
483 /* Compute the length/opcode value to use for a TRE */
gsi_tre_len_opcode(enum ipa_cmd_opcode opcode,u32 len)484 static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
485 {
486 return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
487 : cpu_to_le16((u16)opcode);
488 }
489
490 /* Compute the flags value to use for a given TRE */
gsi_tre_flags(bool last_tre,bool bei,enum ipa_cmd_opcode opcode)491 static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
492 {
493 enum gsi_tre_type tre_type;
494 u32 tre_flags;
495
496 tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
497 tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
498
499 /* Last TRE contains interrupt flags */
500 if (last_tre) {
501 /* All transactions end in a transfer completion interrupt */
502 tre_flags |= TRE_FLAGS_IEOT_FMASK;
503 /* Don't interrupt when outbound commands are acknowledged */
504 if (bei)
505 tre_flags |= TRE_FLAGS_BEI_FMASK;
506 } else { /* All others indicate there's more to come */
507 tre_flags |= TRE_FLAGS_CHAIN_FMASK;
508 }
509
510 return cpu_to_le32(tre_flags);
511 }
512
gsi_trans_tre_fill(struct gsi_tre * dest_tre,dma_addr_t addr,u32 len,bool last_tre,bool bei,enum ipa_cmd_opcode opcode)513 static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
514 u32 len, bool last_tre, bool bei,
515 enum ipa_cmd_opcode opcode)
516 {
517 struct gsi_tre tre;
518
519 tre.addr = cpu_to_le64(addr);
520 tre.len_opcode = gsi_tre_len_opcode(opcode, len);
521 tre.reserved = 0;
522 tre.flags = gsi_tre_flags(last_tre, bei, opcode);
523
524 /* ARM64 can write 16 bytes as a unit with a single instruction.
525 * Doing the assignment this way is an attempt to make that happen.
526 */
527 *dest_tre = tre;
528 }
529
530 /**
531 * __gsi_trans_commit() - Common GSI transaction commit code
532 * @trans: Transaction to commit
533 * @ring_db: Whether to tell the hardware about these queued transfers
534 *
535 * Formats channel ring TRE entries based on the content of the scatterlist.
536 * Maps a transaction pointer to the last ring entry used for the transaction,
537 * so it can be recovered when it completes. Moves the transaction to the
538 * pending list. Finally, updates the channel ring pointer and optionally
539 * rings the doorbell.
540 */
__gsi_trans_commit(struct gsi_trans * trans,bool ring_db)541 static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
542 {
543 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
544 struct gsi_ring *ring = &channel->tre_ring;
545 enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
546 bool bei = channel->toward_ipa;
547 struct ipa_cmd_info *info;
548 struct gsi_tre *dest_tre;
549 struct scatterlist *sg;
550 u32 byte_count = 0;
551 u32 avail;
552 u32 i;
553
554 /* assert(trans->used > 0); */
555
556 /* Consume the entries. If we cross the end of the ring while
557 * filling them we'll switch to the beginning to finish.
558 * If there is no info array we're doing a simple data
559 * transfer request, whose opcode is IPA_CMD_NONE.
560 */
561 info = trans->info ? &trans->info[0] : NULL;
562 avail = ring->count - ring->index % ring->count;
563 dest_tre = gsi_ring_virt(ring, ring->index);
564 for_each_sg(trans->sgl, sg, trans->used, i) {
565 bool last_tre = i == trans->used - 1;
566 dma_addr_t addr = sg_dma_address(sg);
567 u32 len = sg_dma_len(sg);
568
569 byte_count += len;
570 if (!avail--)
571 dest_tre = gsi_ring_virt(ring, 0);
572 if (info)
573 opcode = info++->opcode;
574
575 gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
576 dest_tre++;
577 }
578 ring->index += trans->used;
579
580 if (channel->toward_ipa) {
581 /* We record TX bytes when they are sent */
582 trans->len = byte_count;
583 trans->trans_count = channel->trans_count;
584 trans->byte_count = channel->byte_count;
585 channel->trans_count++;
586 channel->byte_count += byte_count;
587 }
588
589 /* Associate the last TRE with the transaction */
590 gsi_channel_trans_map(channel, ring->index - 1, trans);
591
592 gsi_trans_move_pending(trans);
593
594 /* Ring doorbell if requested, or if all TREs are allocated */
595 if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
596 /* Report what we're handing off to hardware for TX channels */
597 if (channel->toward_ipa)
598 gsi_channel_tx_queued(channel);
599 gsi_channel_doorbell(channel);
600 }
601 }
602
603 /* Commit a GSI transaction */
gsi_trans_commit(struct gsi_trans * trans,bool ring_db)604 void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
605 {
606 if (trans->used)
607 __gsi_trans_commit(trans, ring_db);
608 else
609 gsi_trans_free(trans);
610 }
611
612 /* Commit a GSI transaction and wait for it to complete */
gsi_trans_commit_wait(struct gsi_trans * trans)613 void gsi_trans_commit_wait(struct gsi_trans *trans)
614 {
615 if (!trans->used)
616 goto out_trans_free;
617
618 refcount_inc(&trans->refcount);
619
620 __gsi_trans_commit(trans, true);
621
622 wait_for_completion(&trans->completion);
623
624 out_trans_free:
625 gsi_trans_free(trans);
626 }
627
628 /* Commit a GSI transaction and wait for it to complete, with timeout */
gsi_trans_commit_wait_timeout(struct gsi_trans * trans,unsigned long timeout)629 int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
630 unsigned long timeout)
631 {
632 unsigned long timeout_jiffies = msecs_to_jiffies(timeout);
633 unsigned long remaining = 1; /* In case of empty transaction */
634
635 if (!trans->used)
636 goto out_trans_free;
637
638 refcount_inc(&trans->refcount);
639
640 __gsi_trans_commit(trans, true);
641
642 remaining = wait_for_completion_timeout(&trans->completion,
643 timeout_jiffies);
644 out_trans_free:
645 gsi_trans_free(trans);
646
647 return remaining ? 0 : -ETIMEDOUT;
648 }
649
650 /* Process the completion of a transaction; called while polling */
gsi_trans_complete(struct gsi_trans * trans)651 void gsi_trans_complete(struct gsi_trans *trans)
652 {
653 /* If the entire SGL was mapped when added, unmap it now */
654 if (trans->direction != DMA_NONE)
655 dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
656 trans->direction);
657
658 ipa_gsi_trans_complete(trans);
659
660 complete(&trans->completion);
661
662 gsi_trans_free(trans);
663 }
664
665 /* Cancel a channel's pending transactions */
gsi_channel_trans_cancel_pending(struct gsi_channel * channel)666 void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
667 {
668 struct gsi_trans_info *trans_info = &channel->trans_info;
669 struct gsi_trans *trans;
670 bool cancelled;
671
672 /* channel->gsi->mutex is held by caller */
673 spin_lock_bh(&trans_info->spinlock);
674
675 cancelled = !list_empty(&trans_info->pending);
676 list_for_each_entry(trans, &trans_info->pending, links)
677 trans->cancelled = true;
678
679 list_splice_tail_init(&trans_info->pending, &trans_info->complete);
680
681 spin_unlock_bh(&trans_info->spinlock);
682
683 /* Schedule NAPI polling to complete the cancelled transactions */
684 if (cancelled)
685 napi_schedule(&channel->napi);
686 }
687
688 /* Issue a command to read a single byte from a channel */
gsi_trans_read_byte(struct gsi * gsi,u32 channel_id,dma_addr_t addr)689 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
690 {
691 struct gsi_channel *channel = &gsi->channel[channel_id];
692 struct gsi_ring *ring = &channel->tre_ring;
693 struct gsi_trans_info *trans_info;
694 struct gsi_tre *dest_tre;
695
696 trans_info = &channel->trans_info;
697
698 /* First reserve the TRE, if possible */
699 if (!gsi_trans_tre_reserve(trans_info, 1))
700 return -EBUSY;
701
702 /* Now fill the the reserved TRE and tell the hardware */
703
704 dest_tre = gsi_ring_virt(ring, ring->index);
705 gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
706
707 ring->index++;
708 gsi_channel_doorbell(channel);
709
710 return 0;
711 }
712
713 /* Mark a gsi_trans_read_byte() request done */
gsi_trans_read_byte_done(struct gsi * gsi,u32 channel_id)714 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
715 {
716 struct gsi_channel *channel = &gsi->channel[channel_id];
717
718 gsi_trans_tre_release(&channel->trans_info, 1);
719 }
720
721 /* Initialize a channel's GSI transaction info */
gsi_channel_trans_init(struct gsi * gsi,u32 channel_id)722 int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
723 {
724 struct gsi_channel *channel = &gsi->channel[channel_id];
725 struct gsi_trans_info *trans_info;
726 u32 tre_max;
727 int ret;
728
729 /* Ensure the size of a channel element is what's expected */
730 BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
731
732 /* The map array is used to determine what transaction is associated
733 * with a TRE that the hardware reports has completed. We need one
734 * map entry per TRE.
735 */
736 trans_info = &channel->trans_info;
737 trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
738 GFP_KERNEL);
739 if (!trans_info->map)
740 return -ENOMEM;
741
742 /* We can't use more TREs than there are available in the ring.
743 * This limits the number of transactions that can be oustanding.
744 * Worst case is one TRE per transaction (but we actually limit
745 * it to something a little less than that). We allocate resources
746 * for transactions (including transaction structures) based on
747 * this maximum number.
748 */
749 tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
750
751 /* Transactions are allocated one at a time. */
752 ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
753 tre_max, 1);
754 if (ret)
755 goto err_kfree;
756
757 /* A transaction uses a scatterlist array to represent the data
758 * transfers implemented by the transaction. Each scatterlist
759 * element is used to fill a single TRE when the transaction is
760 * committed. So we need as many scatterlist elements as the
761 * maximum number of TREs that can be outstanding.
762 *
763 * All TREs in a transaction must fit within the channel's TLV FIFO.
764 * A transaction on a channel can allocate as many TREs as that but
765 * no more.
766 */
767 ret = gsi_trans_pool_init(&trans_info->sg_pool,
768 sizeof(struct scatterlist),
769 tre_max, channel->tlv_count);
770 if (ret)
771 goto err_trans_pool_exit;
772
773 /* Finally, the tre_avail field is what ultimately limits the number
774 * of outstanding transactions and their resources. A transaction
775 * allocation succeeds only if the TREs available are sufficient for
776 * what the transaction might need. Transaction resource pools are
777 * sized based on the maximum number of outstanding TREs, so there
778 * will always be resources available if there are TREs available.
779 */
780 atomic_set(&trans_info->tre_avail, tre_max);
781
782 spin_lock_init(&trans_info->spinlock);
783 INIT_LIST_HEAD(&trans_info->alloc);
784 INIT_LIST_HEAD(&trans_info->pending);
785 INIT_LIST_HEAD(&trans_info->complete);
786 INIT_LIST_HEAD(&trans_info->polled);
787
788 return 0;
789
790 err_trans_pool_exit:
791 gsi_trans_pool_exit(&trans_info->pool);
792 err_kfree:
793 kfree(trans_info->map);
794
795 dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
796 ret, channel_id);
797
798 return ret;
799 }
800
801 /* Inverse of gsi_channel_trans_init() */
gsi_channel_trans_exit(struct gsi_channel * channel)802 void gsi_channel_trans_exit(struct gsi_channel *channel)
803 {
804 struct gsi_trans_info *trans_info = &channel->trans_info;
805
806 gsi_trans_pool_exit(&trans_info->sg_pool);
807 gsi_trans_pool_exit(&trans_info->pool);
808 kfree(trans_info->map);
809 }
810