• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2018-2021, Mellanox Technologies inc.  All rights reserved. */
3 
4 #ifndef __LIB_MLX5_EQ_H__
5 #define __LIB_MLX5_EQ_H__
6 #include <linux/mlx5/driver.h>
7 #include <linux/mlx5/eq.h>
8 #include <linux/mlx5/cq.h>
9 
10 #define MLX5_EQE_SIZE       (sizeof(struct mlx5_eqe))
11 
12 struct mlx5_eq_tasklet {
13 	struct list_head      list;
14 	struct list_head      process_list;
15 	struct tasklet_struct task;
16 	spinlock_t            lock; /* lock completion tasklet list */
17 };
18 
19 struct mlx5_cq_table {
20 	spinlock_t              lock;	/* protect radix tree */
21 	struct radix_tree_root  tree;
22 };
23 
24 struct mlx5_eq {
25 	struct mlx5_frag_buf_ctrl fbc;
26 	struct mlx5_frag_buf    frag_buf;
27 	struct mlx5_core_dev    *dev;
28 	struct mlx5_cq_table    cq_table;
29 	__be32 __iomem	        *doorbell;
30 	u32                     cons_index;
31 	unsigned int            vecidx;
32 	unsigned int            irqn;
33 	u8                      eqn;
34 	struct mlx5_rsc_debug   *dbg;
35 	struct mlx5_irq         *irq;
36 };
37 
38 struct mlx5_eq_async {
39 	struct mlx5_eq          core;
40 	struct notifier_block   irq_nb;
41 	spinlock_t              lock; /* To avoid irq EQ handle races with resiliency flows */
42 };
43 
44 struct mlx5_eq_comp {
45 	struct mlx5_eq          core;
46 	struct notifier_block   irq_nb;
47 	struct mlx5_eq_tasklet  tasklet_ctx;
48 	struct list_head        list;
49 };
50 
eq_get_size(struct mlx5_eq * eq)51 static inline u32 eq_get_size(struct mlx5_eq *eq)
52 {
53 	return eq->fbc.sz_m1 + 1;
54 }
55 
get_eqe(struct mlx5_eq * eq,u32 entry)56 static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
57 {
58 	return mlx5_frag_buf_get_wqe(&eq->fbc, entry);
59 }
60 
next_eqe_sw(struct mlx5_eq * eq)61 static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
62 {
63 	struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & eq->fbc.sz_m1);
64 
65 	return (eqe->owner ^ (eq->cons_index >> eq->fbc.log_sz)) & 1 ? NULL : eqe;
66 }
67 
eq_update_ci(struct mlx5_eq * eq,int arm)68 static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
69 {
70 	__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
71 	u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
72 
73 	__raw_writel((__force u32)cpu_to_be32(val), addr);
74 	/* We still want ordering, just not swabbing, so add a barrier */
75 	mb();
76 }
77 
78 int mlx5_eq_table_init(struct mlx5_core_dev *dev);
79 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev);
80 int mlx5_eq_table_create(struct mlx5_core_dev *dev);
81 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
82 
83 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
84 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
85 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
86 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
87 void mlx5_cq_tasklet_cb(struct tasklet_struct *t);
88 struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
89 
90 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
91 void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev);
92 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev);
93 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
94 
95 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
96 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
97 void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
98 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
99 
100 /* This function should only be called after mlx5_cmd_force_teardown_hca */
101 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
102 
103 #ifdef CONFIG_RFS_ACCEL
104 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
105 #endif
106 
107 int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
108 
109 #endif
110