• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3  * Portions based on net/core/datagram.c and copyrighted by their authors.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the Free
7  * Software Foundation; either version 2 of the License, or (at your option)
8  * any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc., 59
17  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
18  *
19  * The full GNU General Public License is included in this distribution in the
20  * file called COPYING.
21  */
22 
23 /*
24  * This code allows the net stack to make use of a DMA engine for
25  * skb to iovec copies.
26  */
27 
28 #include <linux/dmaengine.h>
29 #include <linux/pagemap.h>
30 #include <net/tcp.h> /* for memcpy_toiovec */
31 #include <asm/io.h>
32 #include <asm/uaccess.h>
33 
num_pages_spanned(struct iovec * iov)34 static int num_pages_spanned(struct iovec *iov)
35 {
36 	return
37 	((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
38 	((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT);
39 }
40 
41 /*
42  * Pin down all the iovec pages needed for len bytes.
43  * Return a struct dma_pinned_list to keep track of pages pinned down.
44  *
45  * We are allocating a single chunk of memory, and then carving it up into
46  * 3 sections, the latter 2 whose size depends on the number of iovecs and the
47  * total number of pages, respectively.
48  */
dma_pin_iovec_pages(struct iovec * iov,size_t len)49 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
50 {
51 	struct dma_pinned_list *local_list;
52 	struct page **pages;
53 	int i;
54 	int ret;
55 	int nr_iovecs = 0;
56 	int iovec_len_used = 0;
57 	int iovec_pages_used = 0;
58 
59 	/* don't pin down non-user-based iovecs */
60 	if (segment_eq(get_fs(), KERNEL_DS))
61 		return NULL;
62 
63 	/* determine how many iovecs/pages there are, up front */
64 	do {
65 		iovec_len_used += iov[nr_iovecs].iov_len;
66 		iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
67 		nr_iovecs++;
68 	} while (iovec_len_used < len);
69 
70 	/* single kmalloc for pinned list, page_list[], and the page arrays */
71 	local_list = kmalloc(sizeof(*local_list)
72 		+ (nr_iovecs * sizeof (struct dma_page_list))
73 		+ (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
74 	if (!local_list)
75 		goto out;
76 
77 	/* list of pages starts right after the page list array */
78 	pages = (struct page **) &local_list->page_list[nr_iovecs];
79 
80 	local_list->nr_iovecs = 0;
81 
82 	for (i = 0; i < nr_iovecs; i++) {
83 		struct dma_page_list *page_list = &local_list->page_list[i];
84 
85 		len -= iov[i].iov_len;
86 
87 		if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
88 			goto unpin;
89 
90 		page_list->nr_pages = num_pages_spanned(&iov[i]);
91 		page_list->base_address = iov[i].iov_base;
92 
93 		page_list->pages = pages;
94 		pages += page_list->nr_pages;
95 
96 		/* pin pages down */
97 		down_read(&current->mm->mmap_sem);
98 		ret = get_user_pages(
99 			current,
100 			current->mm,
101 			(unsigned long) iov[i].iov_base,
102 			page_list->nr_pages,
103 			1,	/* write */
104 			0,	/* force */
105 			page_list->pages,
106 			NULL);
107 		up_read(&current->mm->mmap_sem);
108 
109 		if (ret != page_list->nr_pages)
110 			goto unpin;
111 
112 		local_list->nr_iovecs = i + 1;
113 	}
114 
115 	return local_list;
116 
117 unpin:
118 	dma_unpin_iovec_pages(local_list);
119 out:
120 	return NULL;
121 }
122 
dma_unpin_iovec_pages(struct dma_pinned_list * pinned_list)123 void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
124 {
125 	int i, j;
126 
127 	if (!pinned_list)
128 		return;
129 
130 	for (i = 0; i < pinned_list->nr_iovecs; i++) {
131 		struct dma_page_list *page_list = &pinned_list->page_list[i];
132 		for (j = 0; j < page_list->nr_pages; j++) {
133 			set_page_dirty_lock(page_list->pages[j]);
134 			page_cache_release(page_list->pages[j]);
135 		}
136 	}
137 
138 	kfree(pinned_list);
139 }
140 
141 
142 /*
143  * We have already pinned down the pages we will be using in the iovecs.
144  * Each entry in iov array has corresponding entry in pinned_list->page_list.
145  * Using array indexing to keep iov[] and page_list[] in sync.
146  * Initial elements in iov array's iov->iov_len will be 0 if already copied into
147  *   by another call.
148  * iov array length remaining guaranteed to be bigger than len.
149  */
dma_memcpy_to_iovec(struct dma_chan * chan,struct iovec * iov,struct dma_pinned_list * pinned_list,unsigned char * kdata,size_t len)150 dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
151 	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len)
152 {
153 	int iov_byte_offset;
154 	int copy;
155 	dma_cookie_t dma_cookie = 0;
156 	int iovec_idx;
157 	int page_idx;
158 
159 	if (!chan)
160 		return memcpy_toiovec(iov, kdata, len);
161 
162 	iovec_idx = 0;
163 	while (iovec_idx < pinned_list->nr_iovecs) {
164 		struct dma_page_list *page_list;
165 
166 		/* skip already used-up iovecs */
167 		while (!iov[iovec_idx].iov_len)
168 			iovec_idx++;
169 
170 		page_list = &pinned_list->page_list[iovec_idx];
171 
172 		iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
173 		page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
174 			 - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
175 
176 		/* break up copies to not cross page boundary */
177 		while (iov[iovec_idx].iov_len) {
178 			copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
179 			copy = min_t(int, copy, iov[iovec_idx].iov_len);
180 
181 			dma_cookie = dma_async_memcpy_buf_to_pg(chan,
182 					page_list->pages[page_idx],
183 					iov_byte_offset,
184 					kdata,
185 					copy);
186 
187 			len -= copy;
188 			iov[iovec_idx].iov_len -= copy;
189 			iov[iovec_idx].iov_base += copy;
190 
191 			if (!len)
192 				return dma_cookie;
193 
194 			kdata += copy;
195 			iov_byte_offset = 0;
196 			page_idx++;
197 		}
198 		iovec_idx++;
199 	}
200 
201 	/* really bad if we ever run out of iovecs */
202 	BUG();
203 	return -EFAULT;
204 }
205 
dma_memcpy_pg_to_iovec(struct dma_chan * chan,struct iovec * iov,struct dma_pinned_list * pinned_list,struct page * page,unsigned int offset,size_t len)206 dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
207 	struct dma_pinned_list *pinned_list, struct page *page,
208 	unsigned int offset, size_t len)
209 {
210 	int iov_byte_offset;
211 	int copy;
212 	dma_cookie_t dma_cookie = 0;
213 	int iovec_idx;
214 	int page_idx;
215 	int err;
216 
217 	/* this needs as-yet-unimplemented buf-to-buff, so punt. */
218 	/* TODO: use dma for this */
219 	if (!chan || !pinned_list) {
220 		u8 *vaddr = kmap(page);
221 		err = memcpy_toiovec(iov, vaddr + offset, len);
222 		kunmap(page);
223 		return err;
224 	}
225 
226 	iovec_idx = 0;
227 	while (iovec_idx < pinned_list->nr_iovecs) {
228 		struct dma_page_list *page_list;
229 
230 		/* skip already used-up iovecs */
231 		while (!iov[iovec_idx].iov_len)
232 			iovec_idx++;
233 
234 		page_list = &pinned_list->page_list[iovec_idx];
235 
236 		iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
237 		page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
238 			 - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
239 
240 		/* break up copies to not cross page boundary */
241 		while (iov[iovec_idx].iov_len) {
242 			copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
243 			copy = min_t(int, copy, iov[iovec_idx].iov_len);
244 
245 			dma_cookie = dma_async_memcpy_pg_to_pg(chan,
246 					page_list->pages[page_idx],
247 					iov_byte_offset,
248 					page,
249 					offset,
250 					copy);
251 
252 			len -= copy;
253 			iov[iovec_idx].iov_len -= copy;
254 			iov[iovec_idx].iov_base += copy;
255 
256 			if (!len)
257 				return dma_cookie;
258 
259 			offset += copy;
260 			iov_byte_offset = 0;
261 			page_idx++;
262 		}
263 		iovec_idx++;
264 	}
265 
266 	/* really bad if we ever run out of iovecs */
267 	BUG();
268 	return -EFAULT;
269 }
270