• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include "hfi.h"
49 
50 /* additive distance between non-SOP and SOP space */
51 #define SOP_DISTANCE (TXE_PIO_SIZE / 2)
52 #define PIO_BLOCK_MASK (PIO_BLOCK_SIZE - 1)
53 /* number of QUADWORDs in a block */
54 #define PIO_BLOCK_QWS (PIO_BLOCK_SIZE / sizeof(u64))
55 
56 /**
57  * pio_copy - copy data block to MMIO space
58  * @pbuf: a number of blocks allocated within a PIO send context
59  * @pbc: PBC to send
60  * @from: source, must be 8 byte aligned
61  * @count: number of DWORD (32-bit) quantities to copy from source
62  *
63  * Copy data from source to PIO Send Buffer memory, 8 bytes at a time.
64  * Must always write full BLOCK_SIZE bytes blocks.  The first block must
65  * be written to the corresponding SOP=1 address.
66  *
67  * Known:
68  * o pbuf->start always starts on a block boundary
69  * o pbuf can wrap only at a block boundary
70  */
pio_copy(struct hfi1_devdata * dd,struct pio_buf * pbuf,u64 pbc,const void * from,size_t count)71 void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
72 	      const void *from, size_t count)
73 {
74 	void __iomem *dest = pbuf->start + SOP_DISTANCE;
75 	void __iomem *send = dest + PIO_BLOCK_SIZE;
76 	void __iomem *dend;			/* 8-byte data end */
77 
78 	/* write the PBC */
79 	writeq(pbc, dest);
80 	dest += sizeof(u64);
81 
82 	/* calculate where the QWORD data ends - in SOP=1 space */
83 	dend = dest + ((count >> 1) * sizeof(u64));
84 
85 	if (dend < send) {
86 		/*
87 		 * all QWORD data is within the SOP block, does *not*
88 		 * reach the end of the SOP block
89 		 */
90 
91 		while (dest < dend) {
92 			writeq(*(u64 *)from, dest);
93 			from += sizeof(u64);
94 			dest += sizeof(u64);
95 		}
96 		/*
97 		 * No boundary checks are needed here:
98 		 * 0. We're not on the SOP block boundary
99 		 * 1. The possible DWORD dangle will still be within
100 		 *    the SOP block
101 		 * 2. We cannot wrap except on a block boundary.
102 		 */
103 	} else {
104 		/* QWORD data extends _to_ or beyond the SOP block */
105 
106 		/* write 8-byte SOP chunk data */
107 		while (dest < send) {
108 			writeq(*(u64 *)from, dest);
109 			from += sizeof(u64);
110 			dest += sizeof(u64);
111 		}
112 		/* drop out of the SOP range */
113 		dest -= SOP_DISTANCE;
114 		dend -= SOP_DISTANCE;
115 
116 		/*
117 		 * If the wrap comes before or matches the data end,
118 		 * copy until until the wrap, then wrap.
119 		 *
120 		 * If the data ends at the end of the SOP above and
121 		 * the buffer wraps, then pbuf->end == dend == dest
122 		 * and nothing will get written, but we will wrap in
123 		 * case there is a dangling DWORD.
124 		 */
125 		if (pbuf->end <= dend) {
126 			while (dest < pbuf->end) {
127 				writeq(*(u64 *)from, dest);
128 				from += sizeof(u64);
129 				dest += sizeof(u64);
130 			}
131 
132 			dest -= pbuf->sc->size;
133 			dend -= pbuf->sc->size;
134 		}
135 
136 		/* write 8-byte non-SOP, non-wrap chunk data */
137 		while (dest < dend) {
138 			writeq(*(u64 *)from, dest);
139 			from += sizeof(u64);
140 			dest += sizeof(u64);
141 		}
142 	}
143 	/* at this point we have wrapped if we are going to wrap */
144 
145 	/* write dangling u32, if any */
146 	if (count & 1) {
147 		union mix val;
148 
149 		val.val64 = 0;
150 		val.val32[0] = *(u32 *)from;
151 		writeq(val.val64, dest);
152 		dest += sizeof(u64);
153 	}
154 	/*
155 	 * fill in rest of block, no need to check pbuf->end
156 	 * as we only wrap on a block boundary
157 	 */
158 	while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
159 		writeq(0, dest);
160 		dest += sizeof(u64);
161 	}
162 
163 	/* finished with this buffer */
164 	this_cpu_dec(*pbuf->sc->buffers_allocated);
165 	preempt_enable();
166 }
167 
168 /*
169  * Handle carry bytes using shifts and masks.
170  *
171  * NOTE: the value the unused portion of carry is expected to always be zero.
172  */
173 
174 /*
175  * "zero" shift - bit shift used to zero out upper bytes.  Input is
176  * the count of LSB bytes to preserve.
177  */
178 #define zshift(x) (8 * (8 - (x)))
179 
180 /*
181  * "merge" shift - bit shift used to merge with carry bytes.  Input is
182  * the LSB byte count to move beyond.
183  */
184 #define mshift(x) (8 * (x))
185 
186 /*
187  * Jump copy - no-loop copy for < 8 bytes.
188  */
jcopy(u8 * dest,const u8 * src,u32 n)189 static inline void jcopy(u8 *dest, const u8 *src, u32 n)
190 {
191 	switch (n) {
192 	case 7:
193 		*dest++ = *src++;
194 		fallthrough;
195 	case 6:
196 		*dest++ = *src++;
197 		fallthrough;
198 	case 5:
199 		*dest++ = *src++;
200 		fallthrough;
201 	case 4:
202 		*dest++ = *src++;
203 		fallthrough;
204 	case 3:
205 		*dest++ = *src++;
206 		fallthrough;
207 	case 2:
208 		*dest++ = *src++;
209 		fallthrough;
210 	case 1:
211 		*dest++ = *src++;
212 	}
213 }
214 
215 /*
216  * Read nbytes from "from" and and place them in the low bytes
217  * of pbuf->carry.  Other bytes are left as-is.  Any previous
218  * value in pbuf->carry is lost.
219  *
220  * NOTES:
221  * o do not read from from if nbytes is zero
222  * o from may _not_ be u64 aligned.
223  */
read_low_bytes(struct pio_buf * pbuf,const void * from,unsigned int nbytes)224 static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
225 				  unsigned int nbytes)
226 {
227 	pbuf->carry.val64 = 0;
228 	jcopy(&pbuf->carry.val8[0], from, nbytes);
229 	pbuf->carry_bytes = nbytes;
230 }
231 
232 /*
233  * Read nbytes bytes from "from" and put them at the end of pbuf->carry.
234  * It is expected that the extra read does not overfill carry.
235  *
236  * NOTES:
237  * o from may _not_ be u64 aligned
238  * o nbytes may span a QW boundary
239  */
read_extra_bytes(struct pio_buf * pbuf,const void * from,unsigned int nbytes)240 static inline void read_extra_bytes(struct pio_buf *pbuf,
241 				    const void *from, unsigned int nbytes)
242 {
243 	jcopy(&pbuf->carry.val8[pbuf->carry_bytes], from, nbytes);
244 	pbuf->carry_bytes += nbytes;
245 }
246 
247 /*
248  * Write a quad word using parts of pbuf->carry and the next 8 bytes of src.
249  * Put the unused part of the next 8 bytes of src into the LSB bytes of
250  * pbuf->carry with the upper bytes zeroed..
251  *
252  * NOTES:
253  * o result must keep unused bytes zeroed
254  * o src must be u64 aligned
255  */
merge_write8(struct pio_buf * pbuf,void __iomem * dest,const void * src)256 static inline void merge_write8(
257 	struct pio_buf *pbuf,
258 	void __iomem *dest,
259 	const void *src)
260 {
261 	u64 new, temp;
262 
263 	new = *(u64 *)src;
264 	temp = pbuf->carry.val64 | (new << mshift(pbuf->carry_bytes));
265 	writeq(temp, dest);
266 	pbuf->carry.val64 = new >> zshift(pbuf->carry_bytes);
267 }
268 
269 /*
270  * Write a quad word using all bytes of carry.
271  */
carry8_write8(union mix carry,void __iomem * dest)272 static inline void carry8_write8(union mix carry, void __iomem *dest)
273 {
274 	writeq(carry.val64, dest);
275 }
276 
277 /*
278  * Write a quad word using all the valid bytes of carry.  If carry
279  * has zero valid bytes, nothing is written.
280  * Returns 0 on nothing written, non-zero on quad word written.
281  */
carry_write8(struct pio_buf * pbuf,void __iomem * dest)282 static inline int carry_write8(struct pio_buf *pbuf, void __iomem *dest)
283 {
284 	if (pbuf->carry_bytes) {
285 		/* unused bytes are always kept zeroed, so just write */
286 		writeq(pbuf->carry.val64, dest);
287 		return 1;
288 	}
289 
290 	return 0;
291 }
292 
293 /*
294  * Segmented PIO Copy - start
295  *
296  * Start a PIO copy.
297  *
298  * @pbuf: destination buffer
299  * @pbc: the PBC for the PIO buffer
300  * @from: data source, QWORD aligned
301  * @nbytes: bytes to copy
302  */
seg_pio_copy_start(struct pio_buf * pbuf,u64 pbc,const void * from,size_t nbytes)303 void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
304 			const void *from, size_t nbytes)
305 {
306 	void __iomem *dest = pbuf->start + SOP_DISTANCE;
307 	void __iomem *send = dest + PIO_BLOCK_SIZE;
308 	void __iomem *dend;			/* 8-byte data end */
309 
310 	writeq(pbc, dest);
311 	dest += sizeof(u64);
312 
313 	/* calculate where the QWORD data ends - in SOP=1 space */
314 	dend = dest + ((nbytes >> 3) * sizeof(u64));
315 
316 	if (dend < send) {
317 		/*
318 		 * all QWORD data is within the SOP block, does *not*
319 		 * reach the end of the SOP block
320 		 */
321 
322 		while (dest < dend) {
323 			writeq(*(u64 *)from, dest);
324 			from += sizeof(u64);
325 			dest += sizeof(u64);
326 		}
327 		/*
328 		 * No boundary checks are needed here:
329 		 * 0. We're not on the SOP block boundary
330 		 * 1. The possible DWORD dangle will still be within
331 		 *    the SOP block
332 		 * 2. We cannot wrap except on a block boundary.
333 		 */
334 	} else {
335 		/* QWORD data extends _to_ or beyond the SOP block */
336 
337 		/* write 8-byte SOP chunk data */
338 		while (dest < send) {
339 			writeq(*(u64 *)from, dest);
340 			from += sizeof(u64);
341 			dest += sizeof(u64);
342 		}
343 		/* drop out of the SOP range */
344 		dest -= SOP_DISTANCE;
345 		dend -= SOP_DISTANCE;
346 
347 		/*
348 		 * If the wrap comes before or matches the data end,
349 		 * copy until until the wrap, then wrap.
350 		 *
351 		 * If the data ends at the end of the SOP above and
352 		 * the buffer wraps, then pbuf->end == dend == dest
353 		 * and nothing will get written, but we will wrap in
354 		 * case there is a dangling DWORD.
355 		 */
356 		if (pbuf->end <= dend) {
357 			while (dest < pbuf->end) {
358 				writeq(*(u64 *)from, dest);
359 				from += sizeof(u64);
360 				dest += sizeof(u64);
361 			}
362 
363 			dest -= pbuf->sc->size;
364 			dend -= pbuf->sc->size;
365 		}
366 
367 		/* write 8-byte non-SOP, non-wrap chunk data */
368 		while (dest < dend) {
369 			writeq(*(u64 *)from, dest);
370 			from += sizeof(u64);
371 			dest += sizeof(u64);
372 		}
373 	}
374 	/* at this point we have wrapped if we are going to wrap */
375 
376 	/* ...but it doesn't matter as we're done writing */
377 
378 	/* save dangling bytes, if any */
379 	read_low_bytes(pbuf, from, nbytes & 0x7);
380 
381 	pbuf->qw_written = 1 /*PBC*/ + (nbytes >> 3);
382 }
383 
384 /*
385  * Mid copy helper, "mixed case" - source is 64-bit aligned but carry
386  * bytes are non-zero.
387  *
388  * Whole u64s must be written to the chip, so bytes must be manually merged.
389  *
390  * @pbuf: destination buffer
391  * @from: data source, is QWORD aligned.
392  * @nbytes: bytes to copy
393  *
394  * Must handle nbytes < 8.
395  */
mid_copy_mix(struct pio_buf * pbuf,const void * from,size_t nbytes)396 static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
397 {
398 	void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
399 	void __iomem *dend;			/* 8-byte data end */
400 	unsigned long qw_to_write = nbytes >> 3;
401 	unsigned long bytes_left = nbytes & 0x7;
402 
403 	/* calculate 8-byte data end */
404 	dend = dest + (qw_to_write * sizeof(u64));
405 
406 	if (pbuf->qw_written < PIO_BLOCK_QWS) {
407 		/*
408 		 * Still within SOP block.  We don't need to check for
409 		 * wrap because we are still in the first block and
410 		 * can only wrap on block boundaries.
411 		 */
412 		void __iomem *send;		/* SOP end */
413 		void __iomem *xend;
414 
415 		/*
416 		 * calculate the end of data or end of block, whichever
417 		 * comes first
418 		 */
419 		send = pbuf->start + PIO_BLOCK_SIZE;
420 		xend = min(send, dend);
421 
422 		/* shift up to SOP=1 space */
423 		dest += SOP_DISTANCE;
424 		xend += SOP_DISTANCE;
425 
426 		/* write 8-byte chunk data */
427 		while (dest < xend) {
428 			merge_write8(pbuf, dest, from);
429 			from += sizeof(u64);
430 			dest += sizeof(u64);
431 		}
432 
433 		/* shift down to SOP=0 space */
434 		dest -= SOP_DISTANCE;
435 	}
436 	/*
437 	 * At this point dest could be (either, both, or neither):
438 	 * - at dend
439 	 * - at the wrap
440 	 */
441 
442 	/*
443 	 * If the wrap comes before or matches the data end,
444 	 * copy until until the wrap, then wrap.
445 	 *
446 	 * If dest is at the wrap, we will fall into the if,
447 	 * not do the loop, when wrap.
448 	 *
449 	 * If the data ends at the end of the SOP above and
450 	 * the buffer wraps, then pbuf->end == dend == dest
451 	 * and nothing will get written.
452 	 */
453 	if (pbuf->end <= dend) {
454 		while (dest < pbuf->end) {
455 			merge_write8(pbuf, dest, from);
456 			from += sizeof(u64);
457 			dest += sizeof(u64);
458 		}
459 
460 		dest -= pbuf->sc->size;
461 		dend -= pbuf->sc->size;
462 	}
463 
464 	/* write 8-byte non-SOP, non-wrap chunk data */
465 	while (dest < dend) {
466 		merge_write8(pbuf, dest, from);
467 		from += sizeof(u64);
468 		dest += sizeof(u64);
469 	}
470 
471 	pbuf->qw_written += qw_to_write;
472 
473 	/* handle carry and left-over bytes */
474 	if (pbuf->carry_bytes + bytes_left >= 8) {
475 		unsigned long nread;
476 
477 		/* there is enough to fill another qw - fill carry */
478 		nread = 8 - pbuf->carry_bytes;
479 		read_extra_bytes(pbuf, from, nread);
480 
481 		/*
482 		 * One more write - but need to make sure dest is correct.
483 		 * Check for wrap and the possibility the write
484 		 * should be in SOP space.
485 		 *
486 		 * The two checks immediately below cannot both be true, hence
487 		 * the else. If we have wrapped, we cannot still be within the
488 		 * first block. Conversely, if we are still in the first block,
489 		 * we cannot have wrapped. We do the wrap check first as that
490 		 * is more likely.
491 		 */
492 		/* adjust if we have wrapped */
493 		if (dest >= pbuf->end)
494 			dest -= pbuf->sc->size;
495 		/* jump to the SOP range if within the first block */
496 		else if (pbuf->qw_written < PIO_BLOCK_QWS)
497 			dest += SOP_DISTANCE;
498 
499 		/* flush out full carry */
500 		carry8_write8(pbuf->carry, dest);
501 		pbuf->qw_written++;
502 
503 		/* now adjust and read the rest of the bytes into carry */
504 		bytes_left -= nread;
505 		from += nread; /* from is now not aligned */
506 		read_low_bytes(pbuf, from, bytes_left);
507 	} else {
508 		/* not enough to fill another qw, append the rest to carry */
509 		read_extra_bytes(pbuf, from, bytes_left);
510 	}
511 }
512 
513 /*
514  * Mid copy helper, "straight case" - source pointer is 64-bit aligned
515  * with no carry bytes.
516  *
517  * @pbuf: destination buffer
518  * @from: data source, is QWORD aligned
519  * @nbytes: bytes to copy
520  *
521  * Must handle nbytes < 8.
522  */
mid_copy_straight(struct pio_buf * pbuf,const void * from,size_t nbytes)523 static void mid_copy_straight(struct pio_buf *pbuf,
524 			      const void *from, size_t nbytes)
525 {
526 	void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
527 	void __iomem *dend;			/* 8-byte data end */
528 
529 	/* calculate 8-byte data end */
530 	dend = dest + ((nbytes >> 3) * sizeof(u64));
531 
532 	if (pbuf->qw_written < PIO_BLOCK_QWS) {
533 		/*
534 		 * Still within SOP block.  We don't need to check for
535 		 * wrap because we are still in the first block and
536 		 * can only wrap on block boundaries.
537 		 */
538 		void __iomem *send;		/* SOP end */
539 		void __iomem *xend;
540 
541 		/*
542 		 * calculate the end of data or end of block, whichever
543 		 * comes first
544 		 */
545 		send = pbuf->start + PIO_BLOCK_SIZE;
546 		xend = min(send, dend);
547 
548 		/* shift up to SOP=1 space */
549 		dest += SOP_DISTANCE;
550 		xend += SOP_DISTANCE;
551 
552 		/* write 8-byte chunk data */
553 		while (dest < xend) {
554 			writeq(*(u64 *)from, dest);
555 			from += sizeof(u64);
556 			dest += sizeof(u64);
557 		}
558 
559 		/* shift down to SOP=0 space */
560 		dest -= SOP_DISTANCE;
561 	}
562 	/*
563 	 * At this point dest could be (either, both, or neither):
564 	 * - at dend
565 	 * - at the wrap
566 	 */
567 
568 	/*
569 	 * If the wrap comes before or matches the data end,
570 	 * copy until until the wrap, then wrap.
571 	 *
572 	 * If dest is at the wrap, we will fall into the if,
573 	 * not do the loop, when wrap.
574 	 *
575 	 * If the data ends at the end of the SOP above and
576 	 * the buffer wraps, then pbuf->end == dend == dest
577 	 * and nothing will get written.
578 	 */
579 	if (pbuf->end <= dend) {
580 		while (dest < pbuf->end) {
581 			writeq(*(u64 *)from, dest);
582 			from += sizeof(u64);
583 			dest += sizeof(u64);
584 		}
585 
586 		dest -= pbuf->sc->size;
587 		dend -= pbuf->sc->size;
588 	}
589 
590 	/* write 8-byte non-SOP, non-wrap chunk data */
591 	while (dest < dend) {
592 		writeq(*(u64 *)from, dest);
593 		from += sizeof(u64);
594 		dest += sizeof(u64);
595 	}
596 
597 	/* we know carry_bytes was zero on entry to this routine */
598 	read_low_bytes(pbuf, from, nbytes & 0x7);
599 
600 	pbuf->qw_written += nbytes >> 3;
601 }
602 
603 /*
604  * Segmented PIO Copy - middle
605  *
606  * Must handle any aligned tail and any aligned source with any byte count.
607  *
608  * @pbuf: a number of blocks allocated within a PIO send context
609  * @from: data source
610  * @nbytes: number of bytes to copy
611  */
seg_pio_copy_mid(struct pio_buf * pbuf,const void * from,size_t nbytes)612 void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
613 {
614 	unsigned long from_align = (unsigned long)from & 0x7;
615 
616 	if (pbuf->carry_bytes + nbytes < 8) {
617 		/* not enough bytes to fill a QW */
618 		read_extra_bytes(pbuf, from, nbytes);
619 		return;
620 	}
621 
622 	if (from_align) {
623 		/* misaligned source pointer - align it */
624 		unsigned long to_align;
625 
626 		/* bytes to read to align "from" */
627 		to_align = 8 - from_align;
628 
629 		/*
630 		 * In the advance-to-alignment logic below, we do not need
631 		 * to check if we are using more than nbytes.  This is because
632 		 * if we are here, we already know that carry+nbytes will
633 		 * fill at least one QW.
634 		 */
635 		if (pbuf->carry_bytes + to_align < 8) {
636 			/* not enough align bytes to fill a QW */
637 			read_extra_bytes(pbuf, from, to_align);
638 			from += to_align;
639 			nbytes -= to_align;
640 		} else {
641 			/* bytes to fill carry */
642 			unsigned long to_fill = 8 - pbuf->carry_bytes;
643 			/* bytes left over to be read */
644 			unsigned long extra = to_align - to_fill;
645 			void __iomem *dest;
646 
647 			/* fill carry... */
648 			read_extra_bytes(pbuf, from, to_fill);
649 			from += to_fill;
650 			nbytes -= to_fill;
651 			/* may not be enough valid bytes left to align */
652 			if (extra > nbytes)
653 				extra = nbytes;
654 
655 			/* ...now write carry */
656 			dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
657 
658 			/*
659 			 * The two checks immediately below cannot both be
660 			 * true, hence the else.  If we have wrapped, we
661 			 * cannot still be within the first block.
662 			 * Conversely, if we are still in the first block, we
663 			 * cannot have wrapped.  We do the wrap check first
664 			 * as that is more likely.
665 			 */
666 			/* adjust if we've wrapped */
667 			if (dest >= pbuf->end)
668 				dest -= pbuf->sc->size;
669 			/* jump to SOP range if within the first block */
670 			else if (pbuf->qw_written < PIO_BLOCK_QWS)
671 				dest += SOP_DISTANCE;
672 
673 			carry8_write8(pbuf->carry, dest);
674 			pbuf->qw_written++;
675 
676 			/* read any extra bytes to do final alignment */
677 			/* this will overwrite anything in pbuf->carry */
678 			read_low_bytes(pbuf, from, extra);
679 			from += extra;
680 			nbytes -= extra;
681 			/*
682 			 * If no bytes are left, return early - we are done.
683 			 * NOTE: This short-circuit is *required* because
684 			 * "extra" may have been reduced in size and "from"
685 			 * is not aligned, as required when leaving this
686 			 * if block.
687 			 */
688 			if (nbytes == 0)
689 				return;
690 		}
691 
692 		/* at this point, from is QW aligned */
693 	}
694 
695 	if (pbuf->carry_bytes)
696 		mid_copy_mix(pbuf, from, nbytes);
697 	else
698 		mid_copy_straight(pbuf, from, nbytes);
699 }
700 
701 /*
702  * Segmented PIO Copy - end
703  *
704  * Write any remainder (in pbuf->carry) and finish writing the whole block.
705  *
706  * @pbuf: a number of blocks allocated within a PIO send context
707  */
seg_pio_copy_end(struct pio_buf * pbuf)708 void seg_pio_copy_end(struct pio_buf *pbuf)
709 {
710 	void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
711 
712 	/*
713 	 * The two checks immediately below cannot both be true, hence the
714 	 * else.  If we have wrapped, we cannot still be within the first
715 	 * block.  Conversely, if we are still in the first block, we
716 	 * cannot have wrapped.  We do the wrap check first as that is
717 	 * more likely.
718 	 */
719 	/* adjust if we have wrapped */
720 	if (dest >= pbuf->end)
721 		dest -= pbuf->sc->size;
722 	/* jump to the SOP range if within the first block */
723 	else if (pbuf->qw_written < PIO_BLOCK_QWS)
724 		dest += SOP_DISTANCE;
725 
726 	/* write final bytes, if any */
727 	if (carry_write8(pbuf, dest)) {
728 		dest += sizeof(u64);
729 		/*
730 		 * NOTE: We do not need to recalculate whether dest needs
731 		 * SOP_DISTANCE or not.
732 		 *
733 		 * If we are in the first block and the dangle write
734 		 * keeps us in the same block, dest will need
735 		 * to retain SOP_DISTANCE in the loop below.
736 		 *
737 		 * If we are in the first block and the dangle write pushes
738 		 * us to the next block, then loop below will not run
739 		 * and dest is not used.  Hence we do not need to update
740 		 * it.
741 		 *
742 		 * If we are past the first block, then SOP_DISTANCE
743 		 * was never added, so there is nothing to do.
744 		 */
745 	}
746 
747 	/* fill in rest of block */
748 	while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
749 		writeq(0, dest);
750 		dest += sizeof(u64);
751 	}
752 
753 	/* finished with this buffer */
754 	this_cpu_dec(*pbuf->sc->buffers_allocated);
755 	preempt_enable();
756 }
757