• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #ifndef __ASM_X86_PMEM_H__
14 #define __ASM_X86_PMEM_H__
15 
16 #include <linux/uaccess.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cpufeature.h>
19 #include <asm/special_insns.h>
20 
21 #ifdef CONFIG_ARCH_HAS_PMEM_API
22 /**
23  * arch_memcpy_to_pmem - copy data to persistent memory
24  * @dst: destination buffer for the copy
25  * @src: source buffer for the copy
26  * @n: length of the copy in bytes
27  *
28  * Copy data to persistent memory media via non-temporal stores so that
29  * a subsequent pmem driver flush operation will drain posted write queues.
30  */
arch_memcpy_to_pmem(void * dst,const void * src,size_t n)31 static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
32 {
33 	int rem;
34 
35 	/*
36 	 * We are copying between two kernel buffers, if
37 	 * __copy_from_user_inatomic_nocache() returns an error (page
38 	 * fault) we would have already reported a general protection fault
39 	 * before the WARN+BUG.
40 	 */
41 	rem = __copy_from_user_inatomic_nocache(dst, (void __user *) src, n);
42 	if (WARN(rem, "%s: fault copying %p <- %p unwritten: %d\n",
43 				__func__, dst, src, rem))
44 		BUG();
45 }
46 
arch_memcpy_from_pmem(void * dst,const void * src,size_t n)47 static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
48 {
49 	return memcpy_mcsafe(dst, src, n);
50 }
51 
52 /**
53  * arch_wb_cache_pmem - write back a cache range with CLWB
54  * @vaddr:	virtual start address
55  * @size:	number of bytes to write back
56  *
57  * Write back a cache range using the CLWB (cache line write back)
58  * instruction. Note that @size is internally rounded up to be cache
59  * line size aligned.
60  */
arch_wb_cache_pmem(void * addr,size_t size)61 static inline void arch_wb_cache_pmem(void *addr, size_t size)
62 {
63 	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
64 	unsigned long clflush_mask = x86_clflush_size - 1;
65 	void *vend = addr + size;
66 	void *p;
67 
68 	for (p = (void *)((unsigned long)addr & ~clflush_mask);
69 	     p < vend; p += x86_clflush_size)
70 		clwb(p);
71 }
72 
73 /**
74  * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
75  * @addr:	PMEM destination address
76  * @bytes:	number of bytes to copy
77  * @i:		iterator with source data
78  *
79  * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
80  */
arch_copy_from_iter_pmem(void * addr,size_t bytes,struct iov_iter * i)81 static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
82 		struct iov_iter *i)
83 {
84 	size_t len;
85 
86 	/* TODO: skip the write-back by always using non-temporal stores */
87 	len = copy_from_iter_nocache(addr, bytes, i);
88 
89 	/*
90 	 * In the iovec case on x86_64 copy_from_iter_nocache() uses
91 	 * non-temporal stores for the bulk of the transfer, but we need
92 	 * to manually flush if the transfer is unaligned. A cached
93 	 * memory copy is used when destination or size is not naturally
94 	 * aligned. That is:
95 	 *   - Require 8-byte alignment when size is 8 bytes or larger.
96 	 *   - Require 4-byte alignment when size is 4 bytes.
97 	 *
98 	 * In the non-iovec case the entire destination needs to be
99 	 * flushed.
100 	 */
101 	if (iter_is_iovec(i)) {
102 		unsigned long flushed, dest = (unsigned long) addr;
103 
104 		if (bytes < 8) {
105 			if (!IS_ALIGNED(dest, 4) || (bytes != 4))
106 				arch_wb_cache_pmem(addr, bytes);
107 		} else {
108 			if (!IS_ALIGNED(dest, 8)) {
109 				dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
110 				arch_wb_cache_pmem(addr, 1);
111 			}
112 
113 			flushed = dest - (unsigned long) addr;
114 			if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
115 				arch_wb_cache_pmem(addr + bytes - 1, 1);
116 		}
117 	} else
118 		arch_wb_cache_pmem(addr, bytes);
119 
120 	return len;
121 }
122 
123 /**
124  * arch_clear_pmem - zero a PMEM memory range
125  * @addr:	virtual start address
126  * @size:	number of bytes to zero
127  *
128  * Write zeros into the memory range starting at 'addr' for 'size' bytes.
129  */
arch_clear_pmem(void * addr,size_t size)130 static inline void arch_clear_pmem(void *addr, size_t size)
131 {
132 	memset(addr, 0, size);
133 	arch_wb_cache_pmem(addr, size);
134 }
135 
arch_invalidate_pmem(void * addr,size_t size)136 static inline void arch_invalidate_pmem(void *addr, size_t size)
137 {
138 	clflush_cache_range(addr, size);
139 }
140 #endif /* CONFIG_ARCH_HAS_PMEM_API */
141 #endif /* __ASM_X86_PMEM_H__ */
142