• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; if not, write to the
18  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19  * Boston, MA 021110-1307, USA
20  *
21  * GPL HEADER END
22  */
23 /* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, Intel Corporation.
25  */
26 /*
27  * This file is part of Lustre, http://www.lustre.org/
28  * Lustre is a trademark of Sun Microsystems, Inc.
29  *
30  * Author: liang@whamcloud.com
31  */
32 
33 #define DEBUG_SUBSYSTEM S_LNET
34 
35 #include "../../include/linux/libcfs/libcfs.h"
36 
37 /** destroy cpu-partition lock, see libcfs_private.h for more detail */
38 void
cfs_percpt_lock_free(struct cfs_percpt_lock * pcl)39 cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
40 {
41 	LASSERT(pcl->pcl_locks != NULL);
42 	LASSERT(!pcl->pcl_locked);
43 
44 	cfs_percpt_free(pcl->pcl_locks);
45 	LIBCFS_FREE(pcl, sizeof(*pcl));
46 }
47 EXPORT_SYMBOL(cfs_percpt_lock_free);
48 
49 /**
50  * create cpu-partition lock, see libcfs_private.h for more detail.
51  *
52  * cpu-partition lock is designed for large-scale SMP system, so we need to
53  * reduce cacheline conflict as possible as we can, that's the
54  * reason we always allocate cacheline-aligned memory block.
55  */
56 struct cfs_percpt_lock *
cfs_percpt_lock_alloc(struct cfs_cpt_table * cptab)57 cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
58 {
59 	struct cfs_percpt_lock	*pcl;
60 	spinlock_t		*lock;
61 	int			i;
62 
63 	/* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
64 	LIBCFS_ALLOC(pcl, sizeof(*pcl));
65 	if (!pcl)
66 		return NULL;
67 
68 	pcl->pcl_cptab = cptab;
69 	pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock));
70 	if (!pcl->pcl_locks) {
71 		LIBCFS_FREE(pcl, sizeof(*pcl));
72 		return NULL;
73 	}
74 
75 	cfs_percpt_for_each(lock, i, pcl->pcl_locks)
76 		spin_lock_init(lock);
77 
78 	return pcl;
79 }
80 EXPORT_SYMBOL(cfs_percpt_lock_alloc);
81 
82 /**
83  * lock a CPU partition
84  *
85  * \a index != CFS_PERCPT_LOCK_EX
86  *     hold private lock indexed by \a index
87  *
88  * \a index == CFS_PERCPT_LOCK_EX
89  *     exclusively lock @pcl and nobody can take private lock
90  */
91 void
cfs_percpt_lock(struct cfs_percpt_lock * pcl,int index)92 cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
93 {
94 	int	ncpt = cfs_cpt_number(pcl->pcl_cptab);
95 	int	i;
96 
97 	LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt);
98 
99 	if (ncpt == 1) {
100 		index = 0;
101 	} else { /* serialize with exclusive lock */
102 		while (pcl->pcl_locked)
103 			cpu_relax();
104 	}
105 
106 	if (likely(index != CFS_PERCPT_LOCK_EX)) {
107 		spin_lock(pcl->pcl_locks[index]);
108 		return;
109 	}
110 
111 	/* exclusive lock request */
112 	for (i = 0; i < ncpt; i++) {
113 		spin_lock(pcl->pcl_locks[i]);
114 		if (i == 0) {
115 			LASSERT(!pcl->pcl_locked);
116 			/* nobody should take private lock after this
117 			 * so I wouldn't starve for too long time */
118 			pcl->pcl_locked = 1;
119 		}
120 	}
121 }
122 EXPORT_SYMBOL(cfs_percpt_lock);
123 
124 /** unlock a CPU partition */
125 void
cfs_percpt_unlock(struct cfs_percpt_lock * pcl,int index)126 cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
127 {
128 	int	ncpt = cfs_cpt_number(pcl->pcl_cptab);
129 	int	i;
130 
131 	index = ncpt == 1 ? 0 : index;
132 
133 	if (likely(index != CFS_PERCPT_LOCK_EX)) {
134 		spin_unlock(pcl->pcl_locks[index]);
135 		return;
136 	}
137 
138 	for (i = ncpt - 1; i >= 0; i--) {
139 		if (i == 0) {
140 			LASSERT(pcl->pcl_locked);
141 			pcl->pcl_locked = 0;
142 		}
143 		spin_unlock(pcl->pcl_locks[i]);
144 	}
145 }
146 EXPORT_SYMBOL(cfs_percpt_unlock);
147 
148 /** free cpu-partition refcount */
149 void
cfs_percpt_atomic_free(atomic_t ** refs)150 cfs_percpt_atomic_free(atomic_t **refs)
151 {
152 	cfs_percpt_free(refs);
153 }
154 EXPORT_SYMBOL(cfs_percpt_atomic_free);
155 
156 /** allocate cpu-partition refcount with initial value @init_val */
157 atomic_t **
cfs_percpt_atomic_alloc(struct cfs_cpt_table * cptab,int init_val)158 cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
159 {
160 	atomic_t	**refs;
161 	atomic_t	*ref;
162 	int		i;
163 
164 	refs = cfs_percpt_alloc(cptab, sizeof(*ref));
165 	if (!refs)
166 		return NULL;
167 
168 	cfs_percpt_for_each(ref, i, refs)
169 		atomic_set(ref, init_val);
170 	return refs;
171 }
172 EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
173 
174 /** return sum of cpu-partition refs */
175 int
cfs_percpt_atomic_summary(atomic_t ** refs)176 cfs_percpt_atomic_summary(atomic_t **refs)
177 {
178 	atomic_t	*ref;
179 	int		i;
180 	int		val = 0;
181 
182 	cfs_percpt_for_each(ref, i, refs)
183 		val += atomic_read(ref);
184 
185 	return val;
186 }
187 EXPORT_SYMBOL(cfs_percpt_atomic_summary);
188