• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; if not, write to the
18  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19  * Boston, MA 021110-1307, USA
20  *
21  * GPL HEADER END
22  */
23 /* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, Intel Corporation.
25  */
26 /*
27  * This file is part of Lustre, http://www.lustre.org/
28  * Lustre is a trademark of Sun Microsystems, Inc.
29  *
30  * Author: liang@whamcloud.com
31  */
32 
33 #define DEBUG_SUBSYSTEM S_LNET
34 
35 #include "../../include/linux/libcfs/libcfs.h"
36 
37 
38 /** destroy cpu-partition lock, see libcfs_private.h for more detail */
39 void
cfs_percpt_lock_free(struct cfs_percpt_lock * pcl)40 cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
41 {
42 	LASSERT(pcl->pcl_locks != NULL);
43 	LASSERT(!pcl->pcl_locked);
44 
45 	cfs_percpt_free(pcl->pcl_locks);
46 	LIBCFS_FREE(pcl, sizeof(*pcl));
47 }
48 EXPORT_SYMBOL(cfs_percpt_lock_free);
49 
50 /**
51  * create cpu-partition lock, see libcfs_private.h for more detail.
52  *
53  * cpu-partition lock is designed for large-scale SMP system, so we need to
54  * reduce cacheline conflict as possible as we can, that's the
55  * reason we always allocate cacheline-aligned memory block.
56  */
57 struct cfs_percpt_lock *
cfs_percpt_lock_alloc(struct cfs_cpt_table * cptab)58 cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
59 {
60 	struct cfs_percpt_lock	*pcl;
61 	spinlock_t		*lock;
62 	int			i;
63 
64 	/* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
65 	LIBCFS_ALLOC(pcl, sizeof(*pcl));
66 	if (pcl == NULL)
67 		return NULL;
68 
69 	pcl->pcl_cptab = cptab;
70 	pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock));
71 	if (pcl->pcl_locks == NULL) {
72 		LIBCFS_FREE(pcl, sizeof(*pcl));
73 		return NULL;
74 	}
75 
76 	cfs_percpt_for_each(lock, i, pcl->pcl_locks)
77 		spin_lock_init(lock);
78 
79 	return pcl;
80 }
81 EXPORT_SYMBOL(cfs_percpt_lock_alloc);
82 
83 /**
84  * lock a CPU partition
85  *
86  * \a index != CFS_PERCPT_LOCK_EX
87  *     hold private lock indexed by \a index
88  *
89  * \a index == CFS_PERCPT_LOCK_EX
90  *     exclusively lock @pcl and nobody can take private lock
91  */
92 void
cfs_percpt_lock(struct cfs_percpt_lock * pcl,int index)93 cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
94 {
95 	int	ncpt = cfs_cpt_number(pcl->pcl_cptab);
96 	int	i;
97 
98 	LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt);
99 
100 	if (ncpt == 1) {
101 		index = 0;
102 	} else { /* serialize with exclusive lock */
103 		while (pcl->pcl_locked)
104 			cpu_relax();
105 	}
106 
107 	if (likely(index != CFS_PERCPT_LOCK_EX)) {
108 		spin_lock(pcl->pcl_locks[index]);
109 		return;
110 	}
111 
112 	/* exclusive lock request */
113 	for (i = 0; i < ncpt; i++) {
114 		spin_lock(pcl->pcl_locks[i]);
115 		if (i == 0) {
116 			LASSERT(!pcl->pcl_locked);
117 			/* nobody should take private lock after this
118 			 * so I wouldn't starve for too long time */
119 			pcl->pcl_locked = 1;
120 		}
121 	}
122 }
123 EXPORT_SYMBOL(cfs_percpt_lock);
124 
125 /** unlock a CPU partition */
126 void
cfs_percpt_unlock(struct cfs_percpt_lock * pcl,int index)127 cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
128 {
129 	int	ncpt = cfs_cpt_number(pcl->pcl_cptab);
130 	int	i;
131 
132 	index = ncpt == 1 ? 0 : index;
133 
134 	if (likely(index != CFS_PERCPT_LOCK_EX)) {
135 		spin_unlock(pcl->pcl_locks[index]);
136 		return;
137 	}
138 
139 	for (i = ncpt - 1; i >= 0; i--) {
140 		if (i == 0) {
141 			LASSERT(pcl->pcl_locked);
142 			pcl->pcl_locked = 0;
143 		}
144 		spin_unlock(pcl->pcl_locks[i]);
145 	}
146 }
147 EXPORT_SYMBOL(cfs_percpt_unlock);
148 
149 
150 /** free cpu-partition refcount */
151 void
cfs_percpt_atomic_free(atomic_t ** refs)152 cfs_percpt_atomic_free(atomic_t **refs)
153 {
154 	cfs_percpt_free(refs);
155 }
156 EXPORT_SYMBOL(cfs_percpt_atomic_free);
157 
158 /** allocate cpu-partition refcount with initial value @init_val */
159 atomic_t **
cfs_percpt_atomic_alloc(struct cfs_cpt_table * cptab,int init_val)160 cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
161 {
162 	atomic_t	**refs;
163 	atomic_t	*ref;
164 	int		i;
165 
166 	refs = cfs_percpt_alloc(cptab, sizeof(*ref));
167 	if (refs == NULL)
168 		return NULL;
169 
170 	cfs_percpt_for_each(ref, i, refs)
171 		atomic_set(ref, init_val);
172 	return refs;
173 }
174 EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
175 
176 /** return sum of cpu-partition refs */
177 int
cfs_percpt_atomic_summary(atomic_t ** refs)178 cfs_percpt_atomic_summary(atomic_t **refs)
179 {
180 	atomic_t	*ref;
181 	int		i;
182 	int		val = 0;
183 
184 	cfs_percpt_for_each(ref, i, refs)
185 		val += atomic_read(ref);
186 
187 	return val;
188 }
189 EXPORT_SYMBOL(cfs_percpt_atomic_summary);
190