1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/debugfs.h>
4 #include <linux/delay.h>
5 #include <linux/init.h>
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/random.h>
10 #include <linux/slab.h>
11 #include <linux/ntb.h>
12 #include <linux/log2.h>
13
14 #include "ntb_hw_intel.h"
15 #include "ntb_hw_gen1.h"
16 #include "ntb_hw_gen3.h"
17 #include "ntb_hw_gen4.h"
18
19 static int gen4_poll_link(struct intel_ntb_dev *ndev);
20 static int gen4_link_is_up(struct intel_ntb_dev *ndev);
21
22 static const struct intel_ntb_reg gen4_reg = {
23 .poll_link = gen4_poll_link,
24 .link_is_up = gen4_link_is_up,
25 .db_ioread = gen3_db_ioread,
26 .db_iowrite = gen3_db_iowrite,
27 .db_size = sizeof(u32),
28 .ntb_ctl = GEN4_NTBCNTL_OFFSET,
29 .mw_bar = {2, 4},
30 };
31
32 static const struct intel_ntb_alt_reg gen4_pri_reg = {
33 .db_clear = GEN4_IM_INT_STATUS_OFFSET,
34 .db_mask = GEN4_IM_INT_DISABLE_OFFSET,
35 .spad = GEN4_IM_SPAD_OFFSET,
36 };
37
38 static const struct intel_ntb_xlat_reg gen4_sec_xlat = {
39 .bar2_limit = GEN4_IM23XLMT_OFFSET,
40 .bar2_xlat = GEN4_IM23XBASE_OFFSET,
41 .bar2_idx = GEN4_IM23XBASEIDX_OFFSET,
42 };
43
44 static const struct intel_ntb_alt_reg gen4_b2b_reg = {
45 .db_bell = GEN4_IM_DOORBELL_OFFSET,
46 .spad = GEN4_EM_SPAD_OFFSET,
47 };
48
gen4_poll_link(struct intel_ntb_dev * ndev)49 static int gen4_poll_link(struct intel_ntb_dev *ndev)
50 {
51 u16 reg_val;
52
53 /*
54 * We need to write to DLLSCS bit in the SLOTSTS before we
55 * can clear the hardware link interrupt on ICX NTB.
56 */
57 iowrite16(GEN4_SLOTSTS_DLLSCS, ndev->self_mmio + GEN4_SLOTSTS);
58 ndev->reg->db_iowrite(ndev->db_link_mask,
59 ndev->self_mmio +
60 ndev->self_reg->db_clear);
61
62 reg_val = ioread16(ndev->self_mmio + GEN4_LINK_STATUS_OFFSET);
63 if (reg_val == ndev->lnk_sta)
64 return 0;
65
66 ndev->lnk_sta = reg_val;
67
68 return 1;
69 }
70
gen4_link_is_up(struct intel_ntb_dev * ndev)71 static int gen4_link_is_up(struct intel_ntb_dev *ndev)
72 {
73 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
74 }
75
gen4_init_isr(struct intel_ntb_dev * ndev)76 static int gen4_init_isr(struct intel_ntb_dev *ndev)
77 {
78 int i;
79
80 /*
81 * The MSIX vectors and the interrupt status bits are not lined up
82 * on Gen3 (Skylake) and Gen4. By default the link status bit is bit
83 * 32, however it is by default MSIX vector0. We need to fixup to
84 * line them up. The vectors at reset is 1-32,0. We need to reprogram
85 * to 0-32.
86 */
87 for (i = 0; i < GEN4_DB_MSIX_VECTOR_COUNT; i++)
88 iowrite8(i, ndev->self_mmio + GEN4_INTVEC_OFFSET + i);
89
90 return ndev_init_isr(ndev, GEN4_DB_MSIX_VECTOR_COUNT,
91 GEN4_DB_MSIX_VECTOR_COUNT,
92 GEN4_DB_MSIX_VECTOR_SHIFT,
93 GEN4_DB_TOTAL_SHIFT);
94 }
95
gen4_setup_b2b_mw(struct intel_ntb_dev * ndev,const struct intel_b2b_addr * addr,const struct intel_b2b_addr * peer_addr)96 static int gen4_setup_b2b_mw(struct intel_ntb_dev *ndev,
97 const struct intel_b2b_addr *addr,
98 const struct intel_b2b_addr *peer_addr)
99 {
100 struct pci_dev *pdev;
101 void __iomem *mmio;
102 phys_addr_t bar_addr;
103
104 pdev = ndev->ntb.pdev;
105 mmio = ndev->self_mmio;
106
107 /* setup incoming bar limits == base addrs (zero length windows) */
108 bar_addr = addr->bar2_addr64;
109 iowrite64(bar_addr, mmio + GEN4_IM23XLMT_OFFSET);
110 bar_addr = ioread64(mmio + GEN4_IM23XLMT_OFFSET);
111 dev_dbg(&pdev->dev, "IM23XLMT %#018llx\n", bar_addr);
112
113 bar_addr = addr->bar4_addr64;
114 iowrite64(bar_addr, mmio + GEN4_IM45XLMT_OFFSET);
115 bar_addr = ioread64(mmio + GEN4_IM45XLMT_OFFSET);
116 dev_dbg(&pdev->dev, "IM45XLMT %#018llx\n", bar_addr);
117
118 /* zero incoming translation addrs */
119 iowrite64(0, mmio + GEN4_IM23XBASE_OFFSET);
120 iowrite64(0, mmio + GEN4_IM45XBASE_OFFSET);
121
122 ndev->peer_mmio = ndev->self_mmio;
123
124 return 0;
125 }
126
gen4_init_ntb(struct intel_ntb_dev * ndev)127 static int gen4_init_ntb(struct intel_ntb_dev *ndev)
128 {
129 int rc;
130
131
132 ndev->mw_count = XEON_MW_COUNT;
133 ndev->spad_count = GEN4_SPAD_COUNT;
134 ndev->db_count = GEN4_DB_COUNT;
135 ndev->db_link_mask = GEN4_DB_LINK_BIT;
136
137 ndev->self_reg = &gen4_pri_reg;
138 ndev->xlat_reg = &gen4_sec_xlat;
139 ndev->peer_reg = &gen4_b2b_reg;
140
141 if (ndev->ntb.topo == NTB_TOPO_B2B_USD)
142 rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_dsd_addr,
143 &xeon_b2b_usd_addr);
144 else
145 rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_usd_addr,
146 &xeon_b2b_dsd_addr);
147 if (rc)
148 return rc;
149
150 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
151
152 ndev->reg->db_iowrite(ndev->db_valid_mask,
153 ndev->self_mmio +
154 ndev->self_reg->db_mask);
155
156 return 0;
157 }
158
gen4_ppd_topo(struct intel_ntb_dev * ndev,u32 ppd)159 static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
160 {
161 switch (ppd & GEN4_PPD_TOPO_MASK) {
162 case GEN4_PPD_TOPO_B2B_USD:
163 return NTB_TOPO_B2B_USD;
164 case GEN4_PPD_TOPO_B2B_DSD:
165 return NTB_TOPO_B2B_DSD;
166 }
167
168 return NTB_TOPO_NONE;
169 }
170
spr_ppd_topo(struct intel_ntb_dev * ndev,u32 ppd)171 static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
172 {
173 switch (ppd & SPR_PPD_TOPO_MASK) {
174 case SPR_PPD_TOPO_B2B_USD:
175 return NTB_TOPO_B2B_USD;
176 case SPR_PPD_TOPO_B2B_DSD:
177 return NTB_TOPO_B2B_DSD;
178 }
179
180 return NTB_TOPO_NONE;
181 }
182
gen4_init_dev(struct intel_ntb_dev * ndev)183 int gen4_init_dev(struct intel_ntb_dev *ndev)
184 {
185 struct pci_dev *pdev = ndev->ntb.pdev;
186 u32 ppd1/*, ppd0*/;
187 u16 lnkctl;
188 int rc;
189
190 ndev->reg = &gen4_reg;
191
192 if (pdev_is_ICX(pdev))
193 ndev->hwerr_flags |= NTB_HWERR_BAR_ALIGN;
194
195 ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
196 if (pdev_is_ICX(pdev))
197 ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
198 else if (pdev_is_SPR(pdev))
199 ndev->ntb.topo = spr_ppd_topo(ndev, ppd1);
200 dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
201 ntb_topo_string(ndev->ntb.topo));
202 if (ndev->ntb.topo == NTB_TOPO_NONE)
203 return -EINVAL;
204
205 rc = gen4_init_ntb(ndev);
206 if (rc)
207 return rc;
208
209 /* init link setup */
210 lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
211 lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE;
212 iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
213
214 return gen4_init_isr(ndev);
215 }
216
ndev_ntb4_debugfs_read(struct file * filp,char __user * ubuf,size_t count,loff_t * offp)217 ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf,
218 size_t count, loff_t *offp)
219 {
220 struct intel_ntb_dev *ndev;
221 void __iomem *mmio;
222 char *buf;
223 size_t buf_size;
224 ssize_t ret, off;
225 union { u64 v64; u32 v32; u16 v16; } u;
226
227 ndev = filp->private_data;
228 mmio = ndev->self_mmio;
229
230 buf_size = min(count, 0x800ul);
231
232 buf = kmalloc(buf_size, GFP_KERNEL);
233 if (!buf)
234 return -ENOMEM;
235
236 off = 0;
237
238 off += scnprintf(buf + off, buf_size - off,
239 "NTB Device Information:\n");
240
241 off += scnprintf(buf + off, buf_size - off,
242 "Connection Topology -\t%s\n",
243 ntb_topo_string(ndev->ntb.topo));
244
245 off += scnprintf(buf + off, buf_size - off,
246 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
247 off += scnprintf(buf + off, buf_size - off,
248 "LNK STA (cached) -\t\t%#06x\n", ndev->lnk_sta);
249
250 if (!ndev->reg->link_is_up(ndev))
251 off += scnprintf(buf + off, buf_size - off,
252 "Link Status -\t\tDown\n");
253 else {
254 off += scnprintf(buf + off, buf_size - off,
255 "Link Status -\t\tUp\n");
256 off += scnprintf(buf + off, buf_size - off,
257 "Link Speed -\t\tPCI-E Gen %u\n",
258 NTB_LNK_STA_SPEED(ndev->lnk_sta));
259 off += scnprintf(buf + off, buf_size - off,
260 "Link Width -\t\tx%u\n",
261 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
262 }
263
264 off += scnprintf(buf + off, buf_size - off,
265 "Memory Window Count -\t%u\n", ndev->mw_count);
266 off += scnprintf(buf + off, buf_size - off,
267 "Scratchpad Count -\t%u\n", ndev->spad_count);
268 off += scnprintf(buf + off, buf_size - off,
269 "Doorbell Count -\t%u\n", ndev->db_count);
270 off += scnprintf(buf + off, buf_size - off,
271 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
272 off += scnprintf(buf + off, buf_size - off,
273 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
274
275 off += scnprintf(buf + off, buf_size - off,
276 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
277 off += scnprintf(buf + off, buf_size - off,
278 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
279 off += scnprintf(buf + off, buf_size - off,
280 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
281
282 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
283 off += scnprintf(buf + off, buf_size - off,
284 "Doorbell Mask -\t\t%#llx\n", u.v64);
285
286 off += scnprintf(buf + off, buf_size - off,
287 "\nNTB Incoming XLAT:\n");
288
289 u.v64 = ioread64(mmio + GEN4_IM23XBASE_OFFSET);
290 off += scnprintf(buf + off, buf_size - off,
291 "IM23XBASE -\t\t%#018llx\n", u.v64);
292
293 u.v64 = ioread64(mmio + GEN4_IM45XBASE_OFFSET);
294 off += scnprintf(buf + off, buf_size - off,
295 "IM45XBASE -\t\t%#018llx\n", u.v64);
296
297 u.v64 = ioread64(mmio + GEN4_IM23XLMT_OFFSET);
298 off += scnprintf(buf + off, buf_size - off,
299 "IM23XLMT -\t\t\t%#018llx\n", u.v64);
300
301 u.v64 = ioread64(mmio + GEN4_IM45XLMT_OFFSET);
302 off += scnprintf(buf + off, buf_size - off,
303 "IM45XLMT -\t\t\t%#018llx\n", u.v64);
304
305 off += scnprintf(buf + off, buf_size - off,
306 "\nNTB Statistics:\n");
307
308 off += scnprintf(buf + off, buf_size - off,
309 "\nNTB Hardware Errors:\n");
310
311 if (!pci_read_config_word(ndev->ntb.pdev,
312 GEN4_DEVSTS_OFFSET, &u.v16))
313 off += scnprintf(buf + off, buf_size - off,
314 "DEVSTS -\t\t%#06x\n", u.v16);
315
316 u.v16 = ioread16(mmio + GEN4_LINK_STATUS_OFFSET);
317 off += scnprintf(buf + off, buf_size - off,
318 "LNKSTS -\t\t%#06x\n", u.v16);
319
320 if (!pci_read_config_dword(ndev->ntb.pdev,
321 GEN4_UNCERRSTS_OFFSET, &u.v32))
322 off += scnprintf(buf + off, buf_size - off,
323 "UNCERRSTS -\t\t%#06x\n", u.v32);
324
325 if (!pci_read_config_dword(ndev->ntb.pdev,
326 GEN4_CORERRSTS_OFFSET, &u.v32))
327 off += scnprintf(buf + off, buf_size - off,
328 "CORERRSTS -\t\t%#06x\n", u.v32);
329
330 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
331 kfree(buf);
332 return ret;
333 }
334
intel_ntb4_mw_set_trans(struct ntb_dev * ntb,int pidx,int idx,dma_addr_t addr,resource_size_t size)335 static int intel_ntb4_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
336 dma_addr_t addr, resource_size_t size)
337 {
338 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
339 unsigned long xlat_reg, limit_reg, idx_reg;
340 unsigned short base_idx, reg_val16;
341 resource_size_t bar_size, mw_size;
342 void __iomem *mmio;
343 u64 base, limit, reg_val;
344 int bar;
345
346 if (pidx != NTB_DEF_PEER_IDX)
347 return -EINVAL;
348
349 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
350 idx += 1;
351
352 bar = ndev_mw_to_bar(ndev, idx);
353 if (bar < 0)
354 return bar;
355
356 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
357
358 if (idx == ndev->b2b_idx)
359 mw_size = bar_size - ndev->b2b_off;
360 else
361 mw_size = bar_size;
362
363 if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) {
364 /* hardware requires that addr is aligned to bar size */
365 if (addr & (bar_size - 1))
366 return -EINVAL;
367 } else {
368 if (addr & (PAGE_SIZE - 1))
369 return -EINVAL;
370 }
371
372 /* make sure the range fits in the usable mw size */
373 if (size > mw_size)
374 return -EINVAL;
375
376 mmio = ndev->self_mmio;
377 xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
378 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
379 base = pci_resource_start(ndev->ntb.pdev, bar);
380
381 /* Set the limit if supported, if size is not mw_size */
382 if (limit_reg && size != mw_size) {
383 limit = base + size;
384 base_idx = __ilog2_u64(size);
385 } else {
386 limit = base + mw_size;
387 base_idx = __ilog2_u64(mw_size);
388 }
389
390
391 /* set and verify setting the translation address */
392 iowrite64(addr, mmio + xlat_reg);
393 reg_val = ioread64(mmio + xlat_reg);
394 if (reg_val != addr) {
395 iowrite64(0, mmio + xlat_reg);
396 return -EIO;
397 }
398
399 dev_dbg(&ntb->pdev->dev, "BAR %d IMXBASE: %#Lx\n", bar, reg_val);
400
401 /* set and verify setting the limit */
402 iowrite64(limit, mmio + limit_reg);
403 reg_val = ioread64(mmio + limit_reg);
404 if (reg_val != limit) {
405 iowrite64(base, mmio + limit_reg);
406 iowrite64(0, mmio + xlat_reg);
407 return -EIO;
408 }
409
410 dev_dbg(&ntb->pdev->dev, "BAR %d IMXLMT: %#Lx\n", bar, reg_val);
411
412 if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) {
413 idx_reg = ndev->xlat_reg->bar2_idx + (idx * 0x2);
414 iowrite16(base_idx, mmio + idx_reg);
415 reg_val16 = ioread16(mmio + idx_reg);
416 if (reg_val16 != base_idx) {
417 iowrite64(base, mmio + limit_reg);
418 iowrite64(0, mmio + xlat_reg);
419 iowrite16(0, mmio + idx_reg);
420 return -EIO;
421 }
422 dev_dbg(&ntb->pdev->dev, "BAR %d IMBASEIDX: %#x\n", bar, reg_val16);
423 }
424
425
426 return 0;
427 }
428
intel_ntb4_link_enable(struct ntb_dev * ntb,enum ntb_speed max_speed,enum ntb_width max_width)429 static int intel_ntb4_link_enable(struct ntb_dev *ntb,
430 enum ntb_speed max_speed, enum ntb_width max_width)
431 {
432 struct intel_ntb_dev *ndev;
433 u32 ntb_ctl, ppd0;
434 u16 lnkctl;
435
436 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
437
438 dev_dbg(&ntb->pdev->dev,
439 "Enabling link with max_speed %d max_width %d\n",
440 max_speed, max_width);
441
442 if (max_speed != NTB_SPEED_AUTO)
443 dev_dbg(&ntb->pdev->dev,
444 "ignoring max_speed %d\n", max_speed);
445 if (max_width != NTB_WIDTH_AUTO)
446 dev_dbg(&ntb->pdev->dev,
447 "ignoring max_width %d\n", max_width);
448
449 ntb_ctl = NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP;
450 ntb_ctl |= NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP;
451 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
452
453 lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
454 lnkctl &= ~GEN4_LINK_CTRL_LINK_DISABLE;
455 iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
456
457 /* start link training in PPD0 */
458 ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET);
459 ppd0 |= GEN4_PPD_LINKTRN;
460 iowrite32(ppd0, ndev->self_mmio + GEN4_PPD0_OFFSET);
461
462 /* make sure link training has started */
463 ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET);
464 if (!(ppd0 & GEN4_PPD_LINKTRN)) {
465 dev_warn(&ntb->pdev->dev, "Link is not training\n");
466 return -ENXIO;
467 }
468
469 ndev->dev_up = 1;
470
471 return 0;
472 }
473
intel_ntb4_link_disable(struct ntb_dev * ntb)474 static int intel_ntb4_link_disable(struct ntb_dev *ntb)
475 {
476 struct intel_ntb_dev *ndev;
477 u32 ntb_cntl;
478 u16 lnkctl;
479
480 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
481
482 dev_dbg(&ntb->pdev->dev, "Disabling link\n");
483
484 /* clear the snoop bits */
485 ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
486 ntb_cntl &= ~(NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP);
487 ntb_cntl &= ~(NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP);
488 iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
489
490 lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
491 lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE;
492 iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
493
494 ndev->dev_up = 0;
495
496 return 0;
497 }
498
intel_ntb4_mw_get_align(struct ntb_dev * ntb,int pidx,int idx,resource_size_t * addr_align,resource_size_t * size_align,resource_size_t * size_max)499 static int intel_ntb4_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
500 resource_size_t *addr_align,
501 resource_size_t *size_align,
502 resource_size_t *size_max)
503 {
504 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
505 resource_size_t bar_size, mw_size;
506 int bar;
507
508 if (pidx != NTB_DEF_PEER_IDX)
509 return -EINVAL;
510
511 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
512 idx += 1;
513
514 bar = ndev_mw_to_bar(ndev, idx);
515 if (bar < 0)
516 return bar;
517
518 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
519
520 if (idx == ndev->b2b_idx)
521 mw_size = bar_size - ndev->b2b_off;
522 else
523 mw_size = bar_size;
524
525 if (addr_align) {
526 if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN)
527 *addr_align = pci_resource_len(ndev->ntb.pdev, bar);
528 else
529 *addr_align = PAGE_SIZE;
530 }
531
532 if (size_align)
533 *size_align = 1;
534
535 if (size_max)
536 *size_max = mw_size;
537
538 return 0;
539 }
540
541 const struct ntb_dev_ops intel_ntb4_ops = {
542 .mw_count = intel_ntb_mw_count,
543 .mw_get_align = intel_ntb4_mw_get_align,
544 .mw_set_trans = intel_ntb4_mw_set_trans,
545 .peer_mw_count = intel_ntb_peer_mw_count,
546 .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
547 .link_is_up = intel_ntb_link_is_up,
548 .link_enable = intel_ntb4_link_enable,
549 .link_disable = intel_ntb4_link_disable,
550 .db_valid_mask = intel_ntb_db_valid_mask,
551 .db_vector_count = intel_ntb_db_vector_count,
552 .db_vector_mask = intel_ntb_db_vector_mask,
553 .db_read = intel_ntb3_db_read,
554 .db_clear = intel_ntb3_db_clear,
555 .db_set_mask = intel_ntb_db_set_mask,
556 .db_clear_mask = intel_ntb_db_clear_mask,
557 .peer_db_addr = intel_ntb3_peer_db_addr,
558 .peer_db_set = intel_ntb3_peer_db_set,
559 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
560 .spad_count = intel_ntb_spad_count,
561 .spad_read = intel_ntb_spad_read,
562 .spad_write = intel_ntb_spad_write,
563 .peer_spad_addr = intel_ntb_peer_spad_addr,
564 .peer_spad_read = intel_ntb_peer_spad_read,
565 .peer_spad_write = intel_ntb_peer_spad_write,
566 };
567
568