1 #ifndef __CARD_BASE_H__
2 #define __CARD_BASE_H__
3
4 /**
5 * IBM Accelerator Family 'GenWQE'
6 *
7 * (C) Copyright IBM Corp. 2013
8 *
9 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
10 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
11 * Author: Michael Jung <mijung@gmx.net>
12 * Author: Michael Ruettger <michael@ibmra.de>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License (version 2 only)
16 * as published by the Free Software Foundation.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23
24 /*
25 * Interfaces within the GenWQE module. Defines genwqe_card and
26 * ddcb_queue as well as ddcb_requ.
27 */
28
29 #include <linux/kernel.h>
30 #include <linux/types.h>
31 #include <linux/cdev.h>
32 #include <linux/stringify.h>
33 #include <linux/pci.h>
34 #include <linux/semaphore.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/version.h>
38 #include <linux/debugfs.h>
39 #include <linux/slab.h>
40
41 #include <linux/genwqe/genwqe_card.h>
42 #include "genwqe_driver.h"
43
44 #define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */
45 #define GENWQE_FLAG_MSI_ENABLED (1 << 0)
46
47 #define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */
48 #define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */
49 #define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS)
50
51 /* Compile parameters, some of them appear in debugfs for later adjustment */
52 #define genwqe_ddcb_max 32 /* DDCBs on the work-queue */
53 #define genwqe_polling_enabled 0 /* in case of irqs not working */
54 #define genwqe_ddcb_software_timeout 10 /* timeout per DDCB in seconds */
55 #define genwqe_kill_timeout 8 /* time until process gets killed */
56 #define genwqe_vf_jobtimeout_msec 250 /* 250 msec */
57 #define genwqe_pf_jobtimeout_msec 8000 /* 8 sec should be ok */
58 #define genwqe_health_check_interval 4 /* <= 0: disabled */
59
60 /* Sysfs attribute groups used when we create the genwqe device */
61 extern const struct attribute_group *genwqe_attribute_groups[];
62
63 /*
64 * Config space for Genwqe5 A7:
65 * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00
66 * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00
67 * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04]
68 * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00
69 */
70 #define PCI_DEVICE_GENWQE 0x044b /* Genwqe DeviceID */
71
72 #define PCI_SUBSYSTEM_ID_GENWQE5 0x035f /* Genwqe A5 Subsystem-ID */
73 #define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b /* Genwqe A5 Subsystem-ID */
74 #define PCI_CLASSCODE_GENWQE5 0x1200 /* UNKNOWN */
75
76 #define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000
77 #define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000 /* Genwqe A5 Subsystem-ID */
78 #define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200 /* UNKNOWN */
79
80 #define GENWQE_SLU_ARCH_REQ 2 /* Required SLU architecture level */
81
82 /**
83 * struct genwqe_reg - Genwqe data dump functionality
84 */
85 struct genwqe_reg {
86 u32 addr;
87 u32 idx;
88 u64 val;
89 };
90
91 /*
92 * enum genwqe_dbg_type - Specify chip unit to dump/debug
93 */
94 enum genwqe_dbg_type {
95 GENWQE_DBG_UNIT0 = 0, /* captured before prev errs cleared */
96 GENWQE_DBG_UNIT1 = 1,
97 GENWQE_DBG_UNIT2 = 2,
98 GENWQE_DBG_UNIT3 = 3,
99 GENWQE_DBG_UNIT4 = 4,
100 GENWQE_DBG_UNIT5 = 5,
101 GENWQE_DBG_UNIT6 = 6,
102 GENWQE_DBG_UNIT7 = 7,
103 GENWQE_DBG_REGS = 8,
104 GENWQE_DBG_DMA = 9,
105 GENWQE_DBG_UNITS = 10, /* max number of possible debug units */
106 };
107
108 /* Software error injection to simulate card failures */
109 #define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001 /* injects -1 reg reads */
110 #define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */
111 #define GENWQE_INJECT_GFIR_FATAL 0x00000004 /* GFIR = 0x0000ffff */
112 #define GENWQE_INJECT_GFIR_INFO 0x00000008 /* GFIR = 0xffff0000 */
113
114 /*
115 * Genwqe card description and management data.
116 *
117 * Error-handling in case of card malfunction
118 * ------------------------------------------
119 *
120 * If the card is detected to be defective the outside environment
121 * will cause the PCI layer to call deinit (the cleanup function for
122 * probe). This is the same effect like doing a unbind/bind operation
123 * on the card.
124 *
125 * The genwqe card driver implements a health checking thread which
126 * verifies the card function. If this detects a problem the cards
127 * device is being shutdown and restarted again, along with a reset of
128 * the card and queue.
129 *
130 * All functions accessing the card device return either -EIO or -ENODEV
131 * code to indicate the malfunction to the user. The user has to close
132 * the file descriptor and open a new one, once the card becomes
133 * available again.
134 *
135 * If the open file descriptor is setup to receive SIGIO, the signal is
136 * genereated for the application which has to provide a handler to
137 * react on it. If the application does not close the open
138 * file descriptor a SIGKILL is send to enforce freeing the cards
139 * resources.
140 *
141 * I did not find a different way to prevent kernel problems due to
142 * reference counters for the cards character devices getting out of
143 * sync. The character device deallocation does not block, even if
144 * there is still an open file descriptor pending. If this pending
145 * descriptor is closed, the data structures used by the character
146 * device is reinstantiated, which will lead to the reference counter
147 * dropping below the allowed values.
148 *
149 * Card recovery
150 * -------------
151 *
152 * To test the internal driver recovery the following command can be used:
153 * sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject'
154 */
155
156
157 /**
158 * struct dma_mapping_type - Mapping type definition
159 *
160 * To avoid memcpying data arround we use user memory directly. To do
161 * this we need to pin/swap-in the memory and request a DMA address
162 * for it.
163 */
164 enum dma_mapping_type {
165 GENWQE_MAPPING_RAW = 0, /* contignous memory buffer */
166 GENWQE_MAPPING_SGL_TEMP, /* sglist dynamically used */
167 GENWQE_MAPPING_SGL_PINNED, /* sglist used with pinning */
168 };
169
170 /**
171 * struct dma_mapping - Information about memory mappings done by the driver
172 */
173 struct dma_mapping {
174 enum dma_mapping_type type;
175
176 void *u_vaddr; /* user-space vaddr/non-aligned */
177 void *k_vaddr; /* kernel-space vaddr/non-aligned */
178 dma_addr_t dma_addr; /* physical DMA address */
179
180 struct page **page_list; /* list of pages used by user buff */
181 dma_addr_t *dma_list; /* list of dma addresses per page */
182 unsigned int nr_pages; /* number of pages */
183 unsigned int size; /* size in bytes */
184
185 struct list_head card_list; /* list of usr_maps for card */
186 struct list_head pin_list; /* list of pinned memory for dev */
187 };
188
genwqe_mapping_init(struct dma_mapping * m,enum dma_mapping_type type)189 static inline void genwqe_mapping_init(struct dma_mapping *m,
190 enum dma_mapping_type type)
191 {
192 memset(m, 0, sizeof(*m));
193 m->type = type;
194 }
195
196 /**
197 * struct ddcb_queue - DDCB queue data
198 * @ddcb_max: Number of DDCBs on the queue
199 * @ddcb_next: Next free DDCB
200 * @ddcb_act: Next DDCB supposed to finish
201 * @ddcb_seq: Sequence number of last DDCB
202 * @ddcbs_in_flight: Currently enqueued DDCBs
203 * @ddcbs_completed: Number of already completed DDCBs
204 * @return_on_busy: Number of -EBUSY returns on full queue
205 * @wait_on_busy: Number of waits on full queue
206 * @ddcb_daddr: DMA address of first DDCB in the queue
207 * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue
208 * @ddcb_req: Associated requests (one per DDCB)
209 * @ddcb_waitqs: Associated wait queues (one per DDCB)
210 * @ddcb_lock: Lock to protect queuing operations
211 * @ddcb_waitq: Wait on next DDCB finishing
212 */
213
214 struct ddcb_queue {
215 int ddcb_max; /* amount of DDCBs */
216 int ddcb_next; /* next available DDCB num */
217 int ddcb_act; /* DDCB to be processed */
218 u16 ddcb_seq; /* slc seq num */
219 unsigned int ddcbs_in_flight; /* number of ddcbs in processing */
220 unsigned int ddcbs_completed;
221 unsigned int ddcbs_max_in_flight;
222 unsigned int return_on_busy; /* how many times -EBUSY? */
223 unsigned int wait_on_busy;
224
225 dma_addr_t ddcb_daddr; /* DMA address */
226 struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */
227 struct ddcb_requ **ddcb_req; /* ddcb processing parameter */
228 wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */
229
230 spinlock_t ddcb_lock; /* exclusive access to queue */
231 wait_queue_head_t busy_waitq; /* wait for ddcb processing */
232
233 /* registers or the respective queue to be used */
234 u32 IO_QUEUE_CONFIG;
235 u32 IO_QUEUE_STATUS;
236 u32 IO_QUEUE_SEGMENT;
237 u32 IO_QUEUE_INITSQN;
238 u32 IO_QUEUE_WRAP;
239 u32 IO_QUEUE_OFFSET;
240 u32 IO_QUEUE_WTIME;
241 u32 IO_QUEUE_ERRCNTS;
242 u32 IO_QUEUE_LRW;
243 };
244
245 /*
246 * GFIR, SLU_UNITCFG, APP_UNITCFG
247 * 8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC.
248 */
249 #define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64)))
250
251 struct genwqe_ffdc {
252 unsigned int entries;
253 struct genwqe_reg *regs;
254 };
255
256 /**
257 * struct genwqe_dev - GenWQE device information
258 * @card_state: Card operation state, see above
259 * @ffdc: First Failure Data Capture buffers for each unit
260 * @card_thread: Working thread to operate the DDCB queue
261 * @card_waitq: Wait queue used in card_thread
262 * @queue: DDCB queue
263 * @health_thread: Card monitoring thread (only for PFs)
264 * @health_waitq: Wait queue used in health_thread
265 * @pci_dev: Associated PCI device (function)
266 * @mmio: Base address of 64-bit register space
267 * @mmio_len: Length of register area
268 * @file_lock: Lock to protect access to file_list
269 * @file_list: List of all processes with open GenWQE file descriptors
270 *
271 * This struct contains all information needed to communicate with a
272 * GenWQE card. It is initialized when a GenWQE device is found and
273 * destroyed when it goes away. It holds data to maintain the queue as
274 * well as data needed to feed the user interfaces.
275 */
276 struct genwqe_dev {
277 enum genwqe_card_state card_state;
278 spinlock_t print_lock;
279
280 int card_idx; /* card index 0..CARD_NO_MAX-1 */
281 u64 flags; /* general flags */
282
283 /* FFDC data gathering */
284 struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS];
285
286 /* DDCB workqueue */
287 struct task_struct *card_thread;
288 wait_queue_head_t queue_waitq;
289 struct ddcb_queue queue; /* genwqe DDCB queue */
290 unsigned int irqs_processed;
291
292 /* Card health checking thread */
293 struct task_struct *health_thread;
294 wait_queue_head_t health_waitq;
295
296 int use_platform_recovery; /* use platform recovery mechanisms */
297
298 /* char device */
299 dev_t devnum_genwqe; /* major/minor num card */
300 struct class *class_genwqe; /* reference to class object */
301 struct device *dev; /* for device creation */
302 struct cdev cdev_genwqe; /* char device for card */
303
304 struct dentry *debugfs_root; /* debugfs card root directory */
305 struct dentry *debugfs_genwqe; /* debugfs driver root directory */
306
307 /* pci resources */
308 struct pci_dev *pci_dev; /* PCI device */
309 void __iomem *mmio; /* BAR-0 MMIO start */
310 unsigned long mmio_len;
311 int num_vfs;
312 u32 vf_jobtimeout_msec[GENWQE_MAX_VFS];
313 int is_privileged; /* access to all regs possible */
314
315 /* config regs which we need often */
316 u64 slu_unitcfg;
317 u64 app_unitcfg;
318 u64 softreset;
319 u64 err_inject;
320 u64 last_gfir;
321 char app_name[5];
322
323 spinlock_t file_lock; /* lock for open files */
324 struct list_head file_list; /* list of open files */
325
326 /* debugfs parameters */
327 int ddcb_software_timeout; /* wait until DDCB times out */
328 int skip_recovery; /* circumvention if recovery fails */
329 int kill_timeout; /* wait after sending SIGKILL */
330 };
331
332 /**
333 * enum genwqe_requ_state - State of a DDCB execution request
334 */
335 enum genwqe_requ_state {
336 GENWQE_REQU_NEW = 0,
337 GENWQE_REQU_ENQUEUED = 1,
338 GENWQE_REQU_TAPPED = 2,
339 GENWQE_REQU_FINISHED = 3,
340 GENWQE_REQU_STATE_MAX,
341 };
342
343 /**
344 * struct genwqe_sgl - Scatter gather list describing user-space memory
345 * @sgl: scatter gather list needs to be 128 byte aligned
346 * @sgl_dma_addr: dma address of sgl
347 * @sgl_size: size of area used for sgl
348 * @user_addr: user-space address of memory area
349 * @user_size: size of user-space memory area
350 * @page: buffer for partial pages if needed
351 * @page_dma_addr: dma address partial pages
352 */
353 struct genwqe_sgl {
354 dma_addr_t sgl_dma_addr;
355 struct sg_entry *sgl;
356 size_t sgl_size; /* size of sgl */
357
358 void __user *user_addr; /* user-space base-address */
359 size_t user_size; /* size of memory area */
360
361 unsigned long nr_pages;
362 unsigned long fpage_offs;
363 size_t fpage_size;
364 size_t lpage_size;
365
366 void *fpage;
367 dma_addr_t fpage_dma_addr;
368
369 void *lpage;
370 dma_addr_t lpage_dma_addr;
371 };
372
373 int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
374 void __user *user_addr, size_t user_size);
375
376 int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
377 dma_addr_t *dma_list);
378
379 int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl);
380
381 /**
382 * struct ddcb_requ - Kernel internal representation of the DDCB request
383 * @cmd: User space representation of the DDCB execution request
384 */
385 struct ddcb_requ {
386 /* kernel specific content */
387 enum genwqe_requ_state req_state; /* request status */
388 int num; /* ddcb_no for this request */
389 struct ddcb_queue *queue; /* associated queue */
390
391 struct dma_mapping dma_mappings[DDCB_FIXUPS];
392 struct genwqe_sgl sgls[DDCB_FIXUPS];
393
394 /* kernel/user shared content */
395 struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */
396 struct genwqe_debug_data debug_data;
397 };
398
399 /**
400 * struct genwqe_file - Information for open GenWQE devices
401 */
402 struct genwqe_file {
403 struct genwqe_dev *cd;
404 struct genwqe_driver *client;
405 struct file *filp;
406
407 struct fasync_struct *async_queue;
408 struct task_struct *owner;
409 struct list_head list; /* entry in list of open files */
410
411 spinlock_t map_lock; /* lock for dma_mappings */
412 struct list_head map_list; /* list of dma_mappings */
413
414 spinlock_t pin_lock; /* lock for pinned memory */
415 struct list_head pin_list; /* list of pinned memory */
416 };
417
418 int genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */
419 int genwqe_finish_queue(struct genwqe_dev *cd);
420 int genwqe_release_service_layer(struct genwqe_dev *cd);
421
422 /**
423 * genwqe_get_slu_id() - Read Service Layer Unit Id
424 * Return: 0x00: Development code
425 * 0x01: SLC1 (old)
426 * 0x02: SLC2 (sept2012)
427 * 0x03: SLC2 (feb2013, generic driver)
428 */
genwqe_get_slu_id(struct genwqe_dev * cd)429 static inline int genwqe_get_slu_id(struct genwqe_dev *cd)
430 {
431 return (int)((cd->slu_unitcfg >> 32) & 0xff);
432 }
433
434 int genwqe_ddcbs_in_flight(struct genwqe_dev *cd);
435
436 u8 genwqe_card_type(struct genwqe_dev *cd);
437 int genwqe_card_reset(struct genwqe_dev *cd);
438 int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count);
439 void genwqe_reset_interrupt_capability(struct genwqe_dev *cd);
440
441 int genwqe_device_create(struct genwqe_dev *cd);
442 int genwqe_device_remove(struct genwqe_dev *cd);
443
444 /* debugfs */
445 int genwqe_init_debugfs(struct genwqe_dev *cd);
446 void genqwe_exit_debugfs(struct genwqe_dev *cd);
447
448 int genwqe_read_softreset(struct genwqe_dev *cd);
449
450 /* Hardware Circumventions */
451 int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd);
452 int genwqe_flash_readback_fails(struct genwqe_dev *cd);
453
454 /**
455 * genwqe_write_vreg() - Write register in VF window
456 * @cd: genwqe device
457 * @reg: register address
458 * @val: value to write
459 * @func: 0: PF, 1: VF0, ..., 15: VF14
460 */
461 int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func);
462
463 /**
464 * genwqe_read_vreg() - Read register in VF window
465 * @cd: genwqe device
466 * @reg: register address
467 * @func: 0: PF, 1: VF0, ..., 15: VF14
468 *
469 * Return: content of the register
470 */
471 u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func);
472
473 /* FFDC Buffer Management */
474 int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id);
475 int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id,
476 struct genwqe_reg *regs, unsigned int max_regs);
477 int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
478 unsigned int max_regs, int all);
479 int genwqe_ffdc_dump_dma(struct genwqe_dev *cd,
480 struct genwqe_reg *regs, unsigned int max_regs);
481
482 int genwqe_init_debug_data(struct genwqe_dev *cd,
483 struct genwqe_debug_data *d);
484
485 void genwqe_init_crc32(void);
486 int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len);
487
488 /* Memory allocation/deallocation; dma address handling */
489 int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
490 void *uaddr, unsigned long size,
491 struct ddcb_requ *req);
492
493 int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
494 struct ddcb_requ *req);
495
dma_mapping_used(struct dma_mapping * m)496 static inline bool dma_mapping_used(struct dma_mapping *m)
497 {
498 if (!m)
499 return 0;
500 return m->size != 0;
501 }
502
503 /**
504 * __genwqe_execute_ddcb() - Execute DDCB request with addr translation
505 *
506 * This function will do the address translation changes to the DDCBs
507 * according to the definitions required by the ATS field. It looks up
508 * the memory allocation buffer or does vmap/vunmap for the respective
509 * user-space buffers, inclusive page pinning and scatter gather list
510 * buildup and teardown.
511 */
512 int __genwqe_execute_ddcb(struct genwqe_dev *cd,
513 struct genwqe_ddcb_cmd *cmd, unsigned int f_flags);
514
515 /**
516 * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation
517 *
518 * This version will not do address translation or any modifcation of
519 * the DDCB data. It is used e.g. for the MoveFlash DDCB which is
520 * entirely prepared by the driver itself. That means the appropriate
521 * DMA addresses are already in the DDCB and do not need any
522 * modification.
523 */
524 int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
525 struct genwqe_ddcb_cmd *cmd,
526 unsigned int f_flags);
527 int __genwqe_enqueue_ddcb(struct genwqe_dev *cd,
528 struct ddcb_requ *req,
529 unsigned int f_flags);
530
531 int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
532 int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
533
534 /* register access */
535 int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val);
536 u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs);
537 int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val);
538 u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs);
539
540 void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
541 dma_addr_t *dma_handle);
542 void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
543 void *vaddr, dma_addr_t dma_handle);
544
545 /* Base clock frequency in MHz */
546 int genwqe_base_clock_frequency(struct genwqe_dev *cd);
547
548 /* Before FFDC is captured the traps should be stopped. */
549 void genwqe_stop_traps(struct genwqe_dev *cd);
550 void genwqe_start_traps(struct genwqe_dev *cd);
551
552 /* Hardware circumvention */
553 bool genwqe_need_err_masking(struct genwqe_dev *cd);
554
555 /**
556 * genwqe_is_privileged() - Determine operation mode for PCI function
557 *
558 * On Intel with SRIOV support we see:
559 * PF: is_physfn = 1 is_virtfn = 0
560 * VF: is_physfn = 0 is_virtfn = 1
561 *
562 * On Systems with no SRIOV support _and_ virtualized systems we get:
563 * is_physfn = 0 is_virtfn = 0
564 *
565 * Other vendors have individual pci device ids to distinguish between
566 * virtual function drivers and physical function drivers. GenWQE
567 * unfortunately has just on pci device id for both, VFs and PF.
568 *
569 * The following code is used to distinguish if the card is running in
570 * privileged mode, either as true PF or in a virtualized system with
571 * full register access e.g. currently on PowerPC.
572 *
573 * if (pci_dev->is_virtfn)
574 * cd->is_privileged = 0;
575 * else
576 * cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM)
577 * != IO_ILLEGAL_VALUE);
578 */
genwqe_is_privileged(struct genwqe_dev * cd)579 static inline int genwqe_is_privileged(struct genwqe_dev *cd)
580 {
581 return cd->is_privileged;
582 }
583
584 #endif /* __CARD_BASE_H__ */
585