1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * ISHTP DMA I/F functions
4 *
5 * Copyright (c) 2003-2016, Intel Corporation.
6 */
7
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/wait.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include "ishtp-dev.h"
14 #include "client.h"
15
16 /**
17 * ishtp_cl_alloc_dma_buf() - Allocate DMA RX and TX buffer
18 * @dev: ishtp device
19 *
20 * Allocate RX and TX DMA buffer once during bus setup.
21 * It allocates 1MB, RX and TX DMA buffer, which are divided
22 * into slots.
23 */
ishtp_cl_alloc_dma_buf(struct ishtp_device * dev)24 void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev)
25 {
26 dma_addr_t h;
27
28 if (dev->ishtp_host_dma_tx_buf)
29 return;
30
31 dev->ishtp_host_dma_tx_buf_size = 1024*1024;
32 dev->ishtp_host_dma_rx_buf_size = 1024*1024;
33
34 /* Allocate Tx buffer and init usage bitmap */
35 dev->ishtp_host_dma_tx_buf = dma_alloc_coherent(dev->devc,
36 dev->ishtp_host_dma_tx_buf_size,
37 &h, GFP_KERNEL);
38 if (dev->ishtp_host_dma_tx_buf)
39 dev->ishtp_host_dma_tx_buf_phys = h;
40
41 dev->ishtp_dma_num_slots = dev->ishtp_host_dma_tx_buf_size /
42 DMA_SLOT_SIZE;
43
44 dev->ishtp_dma_tx_map = kcalloc(dev->ishtp_dma_num_slots,
45 sizeof(uint8_t),
46 GFP_KERNEL);
47 spin_lock_init(&dev->ishtp_dma_tx_lock);
48
49 /* Allocate Rx buffer */
50 dev->ishtp_host_dma_rx_buf = dma_alloc_coherent(dev->devc,
51 dev->ishtp_host_dma_rx_buf_size,
52 &h, GFP_KERNEL);
53
54 if (dev->ishtp_host_dma_rx_buf)
55 dev->ishtp_host_dma_rx_buf_phys = h;
56 }
57
58 /**
59 * ishtp_cl_free_dma_buf() - Free DMA RX and TX buffer
60 * @dev: ishtp device
61 *
62 * Free DMA buffer when all clients are released. This is
63 * only happens during error path in ISH built in driver
64 * model
65 */
ishtp_cl_free_dma_buf(struct ishtp_device * dev)66 void ishtp_cl_free_dma_buf(struct ishtp_device *dev)
67 {
68 dma_addr_t h;
69
70 if (dev->ishtp_host_dma_tx_buf) {
71 h = dev->ishtp_host_dma_tx_buf_phys;
72 dma_free_coherent(dev->devc, dev->ishtp_host_dma_tx_buf_size,
73 dev->ishtp_host_dma_tx_buf, h);
74 }
75
76 if (dev->ishtp_host_dma_rx_buf) {
77 h = dev->ishtp_host_dma_rx_buf_phys;
78 dma_free_coherent(dev->devc, dev->ishtp_host_dma_rx_buf_size,
79 dev->ishtp_host_dma_rx_buf, h);
80 }
81
82 kfree(dev->ishtp_dma_tx_map);
83 dev->ishtp_host_dma_tx_buf = NULL;
84 dev->ishtp_host_dma_rx_buf = NULL;
85 dev->ishtp_dma_tx_map = NULL;
86 }
87
88 /*
89 * ishtp_cl_get_dma_send_buf() - Get a DMA memory slot
90 * @dev: ishtp device
91 * @size: Size of memory to get
92 *
93 * Find and return free address of "size" bytes in dma tx buffer.
94 * the function will mark this address as "in-used" memory.
95 *
96 * Return: NULL when no free buffer else a buffer to copy
97 */
ishtp_cl_get_dma_send_buf(struct ishtp_device * dev,uint32_t size)98 void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
99 uint32_t size)
100 {
101 unsigned long flags;
102 int i, j, free;
103 /* additional slot is needed if there is rem */
104 int required_slots = (size / DMA_SLOT_SIZE)
105 + 1 * (size % DMA_SLOT_SIZE != 0);
106
107 if (!dev->ishtp_dma_tx_map) {
108 dev_err(dev->devc, "Fail to allocate Tx map\n");
109 return NULL;
110 }
111
112 spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
113 for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
114 free = 1;
115 for (j = 0; j < required_slots; j++)
116 if (dev->ishtp_dma_tx_map[i+j]) {
117 free = 0;
118 i += j;
119 break;
120 }
121 if (free) {
122 /* mark memory as "caught" */
123 for (j = 0; j < required_slots; j++)
124 dev->ishtp_dma_tx_map[i+j] = 1;
125 spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
126 return (i * DMA_SLOT_SIZE) +
127 (unsigned char *)dev->ishtp_host_dma_tx_buf;
128 }
129 }
130 spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
131 dev_err(dev->devc, "No free DMA buffer to send msg\n");
132 return NULL;
133 }
134
135 /*
136 * ishtp_cl_release_dma_acked_mem() - Release DMA memory slot
137 * @dev: ishtp device
138 * @msg_addr: message address of slot
139 * @size: Size of memory to get
140 *
141 * Release_dma_acked_mem - returnes the acked memory to free list.
142 * (from msg_addr, size bytes long)
143 */
ishtp_cl_release_dma_acked_mem(struct ishtp_device * dev,void * msg_addr,uint8_t size)144 void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
145 void *msg_addr,
146 uint8_t size)
147 {
148 unsigned long flags;
149 int acked_slots = (size / DMA_SLOT_SIZE)
150 + 1 * (size % DMA_SLOT_SIZE != 0);
151 int i, j;
152
153 if ((msg_addr - dev->ishtp_host_dma_tx_buf) % DMA_SLOT_SIZE) {
154 dev_err(dev->devc, "Bad DMA Tx ack address\n");
155 return;
156 }
157
158 if (!dev->ishtp_dma_tx_map) {
159 dev_err(dev->devc, "Fail to allocate Tx map\n");
160 return;
161 }
162
163 i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
164 spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
165 for (j = 0; j < acked_slots; j++) {
166 if ((i + j) >= dev->ishtp_dma_num_slots ||
167 !dev->ishtp_dma_tx_map[i+j]) {
168 /* no such slot, or memory is already free */
169 spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
170 dev_err(dev->devc, "Bad DMA Tx ack address\n");
171 return;
172 }
173 dev->ishtp_dma_tx_map[i+j] = 0;
174 }
175 spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
176 }
177