1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
20 *
21 * GPL HEADER END
22 */
23 /*
24 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2012, Intel Corporation.
26 */
27 /*
28 * This file is part of Lustre, http://www.lustre.org/
29 * Lustre is a trademark of Sun Microsystems, Inc.
30 *
31 * libcfs/include/libcfs/libcfs_cpu.h
32 *
33 * CPU partition
34 * . CPU partition is virtual processing unit
35 *
36 * . CPU partition can present 1-N cores, or 1-N NUMA nodes,
37 * in other words, CPU partition is a processors pool.
38 *
39 * CPU Partition Table (CPT)
40 * . a set of CPU partitions
41 *
42 * . There are two modes for CPT: CFS_CPU_MODE_NUMA and CFS_CPU_MODE_SMP
43 *
44 * . User can specify total number of CPU partitions while creating a
45 * CPT, ID of CPU partition is always start from 0.
46 *
47 * Example: if there are 8 cores on the system, while creating a CPT
48 * with cpu_npartitions=4:
49 * core[0, 1] = partition[0], core[2, 3] = partition[1]
50 * core[4, 5] = partition[2], core[6, 7] = partition[3]
51 *
52 * cpu_npartitions=1:
53 * core[0, 1, ... 7] = partition[0]
54 *
55 * . User can also specify CPU partitions by string pattern
56 *
57 * Examples: cpu_partitions="0[0,1], 1[2,3]"
58 * cpu_partitions="N 0[0-3], 1[4-8]"
59 *
60 * The first character "N" means following numbers are numa ID
61 *
62 * . NUMA allocators, CPU affinity threads are built over CPU partitions,
63 * instead of HW CPUs or HW nodes.
64 *
65 * . By default, Lustre modules should refer to the global cfs_cpt_table,
66 * instead of accessing HW CPUs directly, so concurrency of Lustre can be
67 * configured by cpu_npartitions of the global cfs_cpt_table
68 *
69 * . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
70 * same way as 2.2 or earlier versions
71 *
72 * Author: liang@whamcloud.com
73 */
74
75 #ifndef __LIBCFS_CPU_H__
76 #define __LIBCFS_CPU_H__
77
78 /* any CPU partition */
79 #define CFS_CPT_ANY (-1)
80
81 #ifdef CONFIG_SMP
82 /**
83 * return cpumask of CPU partition \a cpt
84 */
85 cpumask_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
86 /**
87 * print string information of cpt-table
88 */
89 int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len);
90 #else /* !CONFIG_SMP */
91 struct cfs_cpt_table {
92 /* # of CPU partitions */
93 int ctb_nparts;
94 /* cpu mask */
95 cpumask_t ctb_mask;
96 /* node mask */
97 nodemask_t ctb_nodemask;
98 /* version */
99 __u64 ctb_version;
100 };
101
102 static inline cpumask_t *
cfs_cpt_cpumask(struct cfs_cpt_table * cptab,int cpt)103 cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
104 {
105 return NULL;
106 }
107
108 static inline int
cfs_cpt_table_print(struct cfs_cpt_table * cptab,char * buf,int len)109 cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
110 {
111 return 0;
112 }
113 #endif /* CONFIG_SMP */
114
115 extern struct cfs_cpt_table *cfs_cpt_table;
116
117 /**
118 * destroy a CPU partition table
119 */
120 void cfs_cpt_table_free(struct cfs_cpt_table *cptab);
121 /**
122 * create a cfs_cpt_table with \a ncpt number of partitions
123 */
124 struct cfs_cpt_table *cfs_cpt_table_alloc(unsigned int ncpt);
125 /**
126 * return total number of CPU partitions in \a cptab
127 */
128 int
129 cfs_cpt_number(struct cfs_cpt_table *cptab);
130 /**
131 * return number of HW cores or hyper-threadings in a CPU partition \a cpt
132 */
133 int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt);
134 /**
135 * is there any online CPU in CPU partition \a cpt
136 */
137 int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt);
138 /**
139 * return nodemask of CPU partition \a cpt
140 */
141 nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt);
142 /**
143 * shadow current HW processor ID to CPU-partition ID of \a cptab
144 */
145 int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap);
146 /**
147 * shadow HW processor ID \a CPU to CPU-partition ID by \a cptab
148 */
149 int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu);
150 /**
151 * bind current thread on a CPU-partition \a cpt of \a cptab
152 */
153 int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt);
154 /**
155 * add \a cpu to CPU partition @cpt of \a cptab, return 1 for success,
156 * otherwise 0 is returned
157 */
158 int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
159 /**
160 * remove \a cpu from CPU partition \a cpt of \a cptab
161 */
162 void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
163 /**
164 * add all cpus in \a mask to CPU partition \a cpt
165 * return 1 if successfully set all CPUs, otherwise return 0
166 */
167 int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab,
168 int cpt, cpumask_t *mask);
169 /**
170 * remove all cpus in \a mask from CPU partition \a cpt
171 */
172 void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab,
173 int cpt, cpumask_t *mask);
174 /**
175 * add all cpus in NUMA node \a node to CPU partition \a cpt
176 * return 1 if successfully set all CPUs, otherwise return 0
177 */
178 int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node);
179 /**
180 * remove all cpus in NUMA node \a node from CPU partition \a cpt
181 */
182 void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node);
183
184 /**
185 * add all cpus in node mask \a mask to CPU partition \a cpt
186 * return 1 if successfully set all CPUs, otherwise return 0
187 */
188 int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab,
189 int cpt, nodemask_t *mask);
190 /**
191 * remove all cpus in node mask \a mask from CPU partition \a cpt
192 */
193 void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab,
194 int cpt, nodemask_t *mask);
195 /**
196 * unset all cpus for CPU partition \a cpt
197 */
198 void cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt);
199 /**
200 * convert partition id \a cpt to numa node id, if there are more than one
201 * nodes in this partition, it might return a different node id each time.
202 */
203 int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
204
205 /**
206 * return number of HTs in the same core of \a cpu
207 */
208 int cfs_cpu_ht_nsiblings(int cpu);
209
210 /**
211 * iterate over all CPU partitions in \a cptab
212 */
213 #define cfs_cpt_for_each(i, cptab) \
214 for (i = 0; i < cfs_cpt_number(cptab); i++)
215
216 int cfs_cpu_init(void);
217 void cfs_cpu_fini(void);
218
219 #endif /* __LIBCFS_CPU_H__ */
220