• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2022 HiSilicon (Shanghai) Technologies CO., LIMITED.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18 #include <linux/platform_device.h>
19 #include <linux/mm.h>
20 #include <linux/interrupt.h>
21 #include <linux/version.h>
22 #include "securec.h"
23 #include "hiirq.h"
24 #include "hi_osal.h"
25 
26 #ifndef NULL
27 #define NULL ((void *)0)
28 #endif
29 
30 #define SINGLE_REG_SIZE   4
31 #define MAX_NODE_LIMIT    64
32 #define HIIRQ_PAGE_SHIFT  12
33 
34 static int node_cnt = 0;
35 static osal_spinlock g_irq_spin_lock = { 0 };
36 #define irq_spin_lock(flags) osal_spin_lock_irqsave(&g_irq_spin_lock, &(flags))
37 #define irq_spin_unlock(flags) osal_spin_unlock_irqrestore(&g_irq_spin_lock, &(flags))
38 
39 #define HIIRQ_PFX "hiirq: "
40 
41 #ifdef CONFIG_HI_LOG_TRACE_SUPPORT
42 #define hiirq_trace osal_printk
43 #else
44 #define hiirq_trace(str, fmt...)
45 #endif
46 
47 #define osal_wait_event_interruptible(wait, func, param)                       \
48     ({                                                                         \
49         int __ret = 0;                                                         \
50                                                                                \
51         for (; ;) {                                                             \
52             if (func(param) != 0) {                                            \
53                 __ret = 0;                                                     \
54                 break;                                                         \
55             }                                                                  \
56             __ret = osal_wait_timeout_interruptible(wait, (func), param, 100); \
57             if (__ret < 0) {                                                   \
58                 break;                                                         \
59             }                                                                  \
60         }                                                                      \
61         __ret;                                                                 \
62     })
63 
osal_iounmap_ex(void * addr,unsigned long size)64 static inline void osal_iounmap_ex(void *addr, unsigned long size)
65 {
66     osal_iounmap(addr);
67 }
68 
69 typedef struct {
70     volatile unsigned int reg_num;
71     volatile unsigned int *raw_int_reg[HI_MAX_STATE_NUM];
72     volatile unsigned int *mask_int_reg[HI_MAX_STATE_NUM];
73     volatile unsigned int *clr_int_reg[HI_MAX_STATE_NUM];
74     unsigned int clr_or_mask[HI_MAX_STATE_NUM];
75 } hiirq_normal_reg_map;
76 
77 typedef struct {
78     volatile unsigned int *cap_int_reg;
79     volatile unsigned int *pt_int_reg[VI_MAX_DEV_NUM];
80     volatile unsigned int *ch_int_reg[VI_MAX_PHY_PIPE_NUM];
81     volatile unsigned int *isp_fe_int_reg[VI_MAX_ISP_FE_NUM];
82 } hiirq_vicap_reg_map;
83 
84 typedef struct {
85     volatile unsigned int *aio_int_status_reg;
86     volatile unsigned int *aio_int_raw_reg;
87     volatile unsigned int *tx_clr_reg[AIO_TX_MAX_NUM];
88     volatile unsigned int *tx_raw_reg[AIO_TX_MAX_NUM];
89     volatile unsigned int *tx_stat_reg[AIO_TX_MAX_NUM];
90     volatile unsigned int *rx_clr_reg[AIO_RX_MAX_NUM];
91     volatile unsigned int *rx_raw_reg[AIO_RX_MAX_NUM];
92     volatile unsigned int *rx_stat_reg[AIO_RX_MAX_NUM];
93     volatile unsigned int *spdiftx_clr_reg[AIO_SPDIFTX_MAX_NUM];
94     volatile unsigned int *spdiftx_raw_reg[AIO_SPDIFTX_MAX_NUM];
95     volatile unsigned int *spdiftx_stat_reg[AIO_SPDIFTX_MAX_NUM];
96 } hiirq_aio_reg_map;
97 
98 typedef struct {
99     hi_int_type type;
100     union {
101         hiirq_normal_reg_map normal;
102         hiirq_vicap_reg_map vicap;
103         hiirq_aio_reg_map aio;
104     };
105 } hiirq_reg_map_info;
106 
107 
108 typedef struct irq_strct {
109     int                  irq_cnt;
110     hiirq_irq_attr       irq_attr;
111     osal_wait          irq_wait;
112     hiirq_reg_map_info   map_info;
113     hi_int_state_info    int_info;
114     struct irq_strct     *next;
115 } hiirq_irq_list;
116 
117 static hiirq_irq_list *head = NULL;
118 
add_list(hiirq_irq_attr * irqattr)119 static hiirq_irq_list *add_list(hiirq_irq_attr *irqattr)
120 {
121     hiirq_irq_list *tmp;
122 
123     tmp = (hiirq_irq_list *)osal_kmalloc(0, sizeof(hiirq_irq_list), OSAL_GFP_KERNEL);
124     if (tmp == NULL) {
125         return NULL;
126     }
127     (hi_void)memset_s(tmp, sizeof(hiirq_irq_list), 0, sizeof(hiirq_irq_list));
128     (hi_void)memcpy_s(&(tmp->irq_attr), sizeof(hiirq_irq_attr), irqattr, sizeof(hiirq_irq_attr));
129     tmp->irq_attr.request_flag = HI_FALSE;
130     tmp->irq_cnt = 0;
131     osal_wait_init(&(tmp->irq_wait));
132     tmp->next = head;
133     head = tmp;
134     return tmp;
135 }
136 
free_list(void)137 static void free_list(void)
138 {
139     hiirq_irq_list *tmp = NULL;
140 
141     while (head != NULL) {
142         if (head->irq_attr.request_flag == 1) {
143             free_irq(head->irq_attr.__irq, head->irq_attr.dev);
144         }
145 
146         tmp = head->next;
147         osal_kfree(0, head);
148         head = tmp;
149     }
150 }
151 
get_list_node(int irq,void * dev_id)152 static hiirq_irq_list *get_list_node(int irq, void *dev_id)
153 {
154     hiirq_irq_list *tmp = head;
155 
156     node_cnt = 0;
157     while (tmp != NULL) {
158         if (tmp->irq_attr.irq_num == irq &&
159             ((tmp->irq_attr.dev == dev_id) || (tmp->irq_attr.dev == NULL) || dev_id == NULL)) {
160             if (tmp->irq_attr.dev == NULL) {
161                 tmp->irq_attr.dev = dev_id;
162             }
163             return tmp;
164         }
165         node_cnt++;
166         tmp = tmp->next;
167     }
168     return NULL;
169 }
170 
get_list_node_by_realirq(int realirq)171 static hiirq_irq_list *get_list_node_by_realirq(int realirq)
172 {
173     hiirq_irq_list *tmp = head;
174 
175     while (tmp != NULL) {
176         if (tmp->irq_attr.__irq == realirq) {
177             return tmp;
178         }
179         tmp = tmp->next;
180     }
181     return NULL;
182 }
183 
184 
unmap_tx_reg_for_aio(hiirq_aio_reg_map * map)185 static void unmap_tx_reg_for_aio(hiirq_aio_reg_map *map)
186 {
187     int i;
188     for (i = 0; i < AIO_TX_MAX_NUM; i++) {
189         if (map->tx_clr_reg[i] != HI_NULL) {
190             osal_iounmap_ex((void *)map->tx_clr_reg[i], SINGLE_REG_SIZE);
191             map->tx_clr_reg[i] = HI_NULL;
192         }
193         if (map->tx_raw_reg[i] != HI_NULL) {
194             osal_iounmap_ex((void *)map->tx_raw_reg[i], SINGLE_REG_SIZE);
195             map->tx_raw_reg[i] = HI_NULL;
196         }
197         if (map->tx_stat_reg[i] != HI_NULL) {
198             osal_iounmap_ex((void *)map->tx_stat_reg[i], SINGLE_REG_SIZE);
199             map->tx_stat_reg[i] = HI_NULL;
200         }
201     }
202 }
203 
unmap_rx_reg_for_aio(hiirq_aio_reg_map * map)204 static void unmap_rx_reg_for_aio(hiirq_aio_reg_map *map)
205 {
206     int i;
207     for (i = 0; i < AIO_RX_MAX_NUM; i++) {
208         if (map->rx_clr_reg[i] != HI_NULL) {
209             osal_iounmap_ex((void *)map->rx_clr_reg[i], SINGLE_REG_SIZE);
210             map->rx_clr_reg[i] = HI_NULL;
211         }
212         if (map->rx_raw_reg[i] != HI_NULL) {
213             osal_iounmap_ex((void *)map->rx_raw_reg[i], SINGLE_REG_SIZE);
214             map->rx_raw_reg[i] = HI_NULL;
215         }
216         if (map->rx_stat_reg[i] != HI_NULL) {
217             osal_iounmap_ex((void *)map->rx_stat_reg[i], SINGLE_REG_SIZE);
218             map->rx_stat_reg[i] = HI_NULL;
219         }
220     }
221 }
222 
unmap_spdiftx_reg_for_aio(hiirq_aio_reg_map * map)223 static void unmap_spdiftx_reg_for_aio(hiirq_aio_reg_map *map)
224 {
225     int i;
226     for (i = 0; i < AIO_SPDIFTX_MAX_NUM; i++) {
227         if (map->spdiftx_clr_reg[i] != HI_NULL) {
228             osal_iounmap_ex((void *)map->spdiftx_clr_reg[i], SINGLE_REG_SIZE);
229             map->spdiftx_clr_reg[i] = HI_NULL;
230         }
231         if (map->spdiftx_raw_reg[i] != HI_NULL) {
232             osal_iounmap_ex((void *)map->spdiftx_raw_reg[i], SINGLE_REG_SIZE);
233             map->spdiftx_raw_reg[i] = HI_NULL;
234         }
235         if (map->spdiftx_stat_reg[i] != HI_NULL) {
236             osal_iounmap_ex((void *)map->spdiftx_stat_reg[i], SINGLE_REG_SIZE);
237             map->spdiftx_stat_reg[i] = HI_NULL;
238         }
239     }
240 }
241 
unmap_int_reg_for_aio(hiirq_aio_reg_map * map)242 static void unmap_int_reg_for_aio(hiirq_aio_reg_map *map)
243 {
244     if (map->aio_int_status_reg != HI_NULL) {
245         osal_iounmap_ex((void *)map->aio_int_status_reg, SINGLE_REG_SIZE);
246         map->aio_int_status_reg = HI_NULL;
247     }
248     if (map->aio_int_raw_reg != HI_NULL) {
249         osal_iounmap_ex((void *)map->aio_int_raw_reg, SINGLE_REG_SIZE);
250         map->aio_int_raw_reg = HI_NULL;
251     }
252 
253     unmap_tx_reg_for_aio(map);
254     unmap_rx_reg_for_aio(map);
255     unmap_spdiftx_reg_for_aio(map);
256     return;
257 }
258 
unmap_int_reg_for_vicap(hiirq_vicap_reg_map * map)259 static void unmap_int_reg_for_vicap(hiirq_vicap_reg_map *map)
260 {
261     int i;
262 
263     if (map->cap_int_reg != HI_NULL) {
264         osal_iounmap_ex((void *)map->cap_int_reg, SINGLE_REG_SIZE);
265         map->cap_int_reg = HI_NULL;
266     }
267 
268     for (i = 0; i < VI_MAX_DEV_NUM; i++) {
269         if (map->pt_int_reg[i] != HI_NULL) {
270             osal_iounmap_ex((void *)map->pt_int_reg[i], SINGLE_REG_SIZE);
271             map->pt_int_reg[i] = HI_NULL;
272         }
273     }
274 
275     for (i = 0; i < VI_MAX_PHY_PIPE_NUM; i++) {
276         if (map->ch_int_reg[i] != HI_NULL) {
277             osal_iounmap_ex((void *)map->ch_int_reg[i], SINGLE_REG_SIZE);
278             map->ch_int_reg[i] = HI_NULL;
279         }
280     }
281 
282     for (i = 0; i < VI_MAX_ISP_FE_NUM; i++) {
283         if (map->isp_fe_int_reg[i] != HI_NULL) {
284             osal_iounmap_ex((void *)map->isp_fe_int_reg[i], SINGLE_REG_SIZE);
285             map->isp_fe_int_reg[i] = HI_NULL;
286         }
287     }
288 
289     return;
290 }
291 
unmap_int_reg_for_normal(hiirq_normal_reg_map * map)292 static void unmap_int_reg_for_normal(hiirq_normal_reg_map *map)
293 {
294     int i;
295 
296     for (i = 0; i < map->reg_num; i++) {
297         if (map->raw_int_reg[i] != HI_NULL) {
298             osal_iounmap_ex((void *)map->raw_int_reg[i], SINGLE_REG_SIZE);
299             map->raw_int_reg[i] = HI_NULL;
300         }
301         if (map->mask_int_reg[i] != HI_NULL) {
302             osal_iounmap_ex((void *)map->mask_int_reg[i], SINGLE_REG_SIZE);
303             map->mask_int_reg[i] = HI_NULL;
304         }
305         if (map->clr_int_reg[i] != HI_NULL) {
306             osal_iounmap_ex((void *)map->clr_int_reg[i], SINGLE_REG_SIZE);
307             map->clr_int_reg[i] = HI_NULL;
308         }
309     }
310     map->reg_num = 0;
311 
312     return;
313 }
314 
unmap_int_reg(hiirq_reg_map_info * map_info)315 static void unmap_int_reg(hiirq_reg_map_info *map_info)
316 {
317     if (map_info->type == HI_INT_VICAP) {
318         unmap_int_reg_for_vicap(&map_info->vicap);
319     } else if (map_info->type == HI_INT_AIO) {
320         unmap_int_reg_for_aio(&map_info->aio);
321     } else {
322         unmap_int_reg_for_normal(&map_info->normal);
323     }
324     return;
325 }
326 
del_list(void)327 static void del_list(void)
328 {
329     hiirq_irq_list *tmp = head;
330     hiirq_irq_list *tmp2 = NULL;
331 
332     while (tmp != NULL) {
333         tmp2 = tmp->next;
334         osal_wait_destroy(&(tmp->irq_wait));
335         unmap_int_reg(&(tmp->map_info));
336         osal_kfree(0, tmp);
337         tmp = tmp2;
338     }
339     head = NULL;
340 }
341 
read_int_status_for_aio(hiirq_aio_reg_map * map,hi_aio_int_state * state)342 static void read_int_status_for_aio(hiirq_aio_reg_map *map, hi_aio_int_state *state)
343 {
344     int i;
345 
346     state->aio_int_status_state = *map->aio_int_status_reg;
347     state->aio_int_raw_state = *map->aio_int_raw_reg;
348 
349     for (i = 0; i < AIO_TX_MAX_NUM; i++) {
350         state->tx_raw_state[i] = *map->tx_raw_reg[i];
351         state->tx_stat_state[i] = *map->tx_stat_reg[i];
352     }
353     for (i = 0; i < AIO_RX_MAX_NUM; i++) {
354         state->rx_raw_state[i] = *map->rx_raw_reg[i];
355         state->rx_stat_state[i] = *map->rx_stat_reg[i];
356     }
357     for (i = 0; i < AIO_SPDIFTX_MAX_NUM; i++) {
358         state->spdiftx_raw_state[i] = *map->spdiftx_raw_reg[i];
359         state->spdiftx_stat_state[i] = *map->spdiftx_stat_reg[i];
360     }
361     return;
362 }
363 
read_int_status_for_vicap(hiirq_vicap_reg_map * map,hi_vicap_int_state * state)364 static void read_int_status_for_vicap(hiirq_vicap_reg_map *map, hi_vicap_int_state *state)
365 {
366     int i;
367 
368     state->cap_state = *map->cap_int_reg;
369     for (i = 0; i < VI_MAX_DEV_NUM; i++) {
370         state->pt_state[i] = *map->pt_int_reg[i];
371     }
372     for (i = 0; i < VI_MAX_PHY_PIPE_NUM; i++) {
373         state->ch_state[i] = *map->ch_int_reg[i];
374     }
375     for (i = 0; i < VI_MAX_ISP_FE_NUM; i++) {
376         state->isp_fe_state[i] = *map->isp_fe_int_reg[i];
377     }
378     return;
379 }
380 
read_int_status_for_normal(hiirq_normal_reg_map * map,hi_normal_int_state * state)381 static void read_int_status_for_normal(hiirq_normal_reg_map *map, hi_normal_int_state *state)
382 {
383     int i;
384 
385     state->state_num = map->reg_num;
386     for (i = 0; i < map->reg_num; i++) {
387         state->raw_state[i] = *(map->raw_int_reg[i]);
388         state->mask_state[i] = *(map->mask_int_reg[i]);
389     }
390 
391     return;
392 }
393 
read_int_status(hiirq_irq_list * irq_node,hi_int_state_info * int_info)394 static void read_int_status(hiirq_irq_list *irq_node, hi_int_state_info *int_info)
395 {
396     int_info->type = irq_node->map_info.type;
397     if (irq_node->map_info.type == HI_INT_VICAP) {
398         read_int_status_for_vicap(&irq_node->map_info.vicap, &int_info->vicap);
399     } else if (irq_node->map_info.type == HI_INT_AIO) {
400         read_int_status_for_aio(&irq_node->map_info.aio, &int_info->aio);
401     } else {
402         read_int_status_for_normal(&irq_node->map_info.normal, &int_info->normal);
403     }
404     return;
405 }
406 
clear_int_for_aio(hiirq_aio_reg_map * map,hi_aio_int_state * state)407 static void clear_int_for_aio(hiirq_aio_reg_map *map, hi_aio_int_state *state)
408 {
409     int i;
410 
411     for (i = 0; i < AIO_RX_MAX_NUM; i++) {
412         *map->rx_clr_reg[i] = state->rx_raw_state[i];
413     }
414 
415     for (i = 0; i < AIO_TX_MAX_NUM; i++) {
416         *map->tx_clr_reg[i] = state->tx_raw_state[i];
417     }
418 
419     for (i = 0; i < AIO_SPDIFTX_MAX_NUM; i++) {
420         *map->spdiftx_clr_reg[i] = state->spdiftx_raw_state[i];
421     }
422 
423     return;
424 }
425 
clear_int_for_vicap(hiirq_vicap_reg_map * map,hi_vicap_int_state * state)426 static void clear_int_for_vicap(hiirq_vicap_reg_map *map, hi_vicap_int_state *state)
427 {
428     int i;
429 
430     *map->cap_int_reg = state->cap_state;
431     for (i = 0; i < VI_MAX_DEV_NUM; i++) {
432         *map->pt_int_reg[i] = state->pt_state[i];
433     }
434     for (i = 0; i < VI_MAX_PHY_PIPE_NUM; i++) {
435         *map->ch_int_reg[i] = state->ch_state[i];
436     }
437     for (i = 0; i < VI_MAX_ISP_FE_NUM; i++) {
438         *map->isp_fe_int_reg[i] = state->isp_fe_state[i];
439     }
440     return;
441 }
442 
clear_int_for_scd(hiirq_normal_reg_map * map)443 static void clear_int_for_scd(hiirq_normal_reg_map *map)
444 {
445     int i;
446     for (i = 0; i < map->reg_num; i++) {
447         *map->clr_int_reg[i] = 1;
448     }
449     return;
450 }
451 
clear_int_for_vdh(hiirq_normal_reg_map * map,hi_normal_int_state * state)452 static void clear_int_for_vdh(hiirq_normal_reg_map *map, hi_normal_int_state *state)
453 {
454     int i;
455     unsigned int_flag;
456     unsigned u32;
457 
458     for (i = 0; i < map->reg_num; i++) {
459         int_flag = ((state->raw_state[i] >> 17) & 0x1) |
460             (((state->raw_state[i] >> 19) & 0x1) << 1);
461         u32 = 0;
462         if (int_flag & 0x1) {
463             u32 |= 0x1;
464         }
465         if (int_flag & 0x2) {
466             u32 |= (0x1 << 2);
467         }
468         *map->clr_int_reg[i] = u32;
469     }
470     return;
471 }
472 
clear_int_for_nnie(hiirq_normal_reg_map * map,hi_normal_int_state * state)473 static void clear_int_for_nnie(hiirq_normal_reg_map *map, hi_normal_int_state *state)
474 {
475     int i;
476     for (i = 0; i < map->reg_num; i++) {
477         *map->clr_int_reg[i] = state->mask_state[i];
478     }
479     return;
480 }
481 
clear_int_for_normal(hiirq_normal_reg_map * map,hi_normal_int_state * state)482 static void clear_int_for_normal(hiirq_normal_reg_map *map, hi_normal_int_state *state)
483 {
484     int i;
485     for (i = 0; i < map->reg_num; i++) {
486         *map->clr_int_reg[i] = state->raw_state[i] | map->clr_or_mask[i];
487     }
488     return;
489 }
490 
clear_int(hiirq_irq_list * irq_node,hi_int_state_info * int_info)491 static void clear_int(hiirq_irq_list *irq_node, hi_int_state_info *int_info)
492 {
493     if (irq_node->map_info.type == HI_INT_VICAP) {
494         clear_int_for_vicap(&irq_node->map_info.vicap, &int_info->vicap);
495     } else if (irq_node->map_info.type == HI_INT_AIO) {
496         clear_int_for_aio(&irq_node->map_info.aio, &int_info->aio);
497     } else if (irq_node->map_info.type == HI_INT_SCD) {
498         clear_int_for_scd(&irq_node->map_info.normal);
499     } else if (irq_node->map_info.type == HI_INT_VDH) {
500         clear_int_for_vdh(&irq_node->map_info.normal, &int_info->normal);
501     } else if (irq_node->map_info.type == HI_INT_NNIE) {
502         clear_int_for_nnie(&irq_node->map_info.normal, &int_info->normal);
503     } else {
504         clear_int_for_normal(&irq_node->map_info.normal, &int_info->normal);
505     }
506     return;
507 }
508 
save_int_status_for_aio(hi_aio_int_state * save,hi_aio_int_state * state)509 static void save_int_status_for_aio(hi_aio_int_state *save, hi_aio_int_state *state)
510 {
511     int i;
512 
513     save->aio_int_status_state |= state->aio_int_status_state;
514     save->aio_int_raw_state |= state->aio_int_raw_state;
515     for (i = 0; i < AIO_TX_MAX_NUM; i++) {
516         save->tx_raw_state[i] |= state->tx_raw_state[i];
517         save->tx_stat_state[i] |= state->tx_stat_state[i];
518     }
519     for (i = 0; i < AIO_RX_MAX_NUM; i++) {
520         save->rx_raw_state[i] |= state->rx_raw_state[i];
521         save->rx_stat_state[i] |= state->rx_stat_state[i];
522     }
523     for (i = 0; i < AIO_SPDIFTX_MAX_NUM; i++) {
524         save->spdiftx_raw_state[i] |= state->spdiftx_raw_state[i];
525         save->spdiftx_stat_state[i] |= state->spdiftx_stat_state[i];
526     }
527     return;
528 }
529 
save_int_status_for_vicap(hi_vicap_int_state * save,hi_vicap_int_state * state)530 static void save_int_status_for_vicap(hi_vicap_int_state *save, hi_vicap_int_state *state)
531 {
532     int i;
533 
534     save->cap_state |= state->cap_state;
535     for (i = 0; i < VI_MAX_DEV_NUM; i++) {
536         save->pt_state[i] |= state->pt_state[i];
537     }
538     for (i = 0; i < VI_MAX_PHY_PIPE_NUM; i++) {
539         save->ch_state[i] |= state->ch_state[i];
540     }
541     for (i = 0; i < VI_MAX_ISP_FE_NUM; i++) {
542         save->isp_fe_state[i] |= state->isp_fe_state[i];
543     }
544     return;
545 }
546 
save_int_status_for_normal(hi_normal_int_state * save,hi_normal_int_state * state)547 static void save_int_status_for_normal(hi_normal_int_state *save, hi_normal_int_state *state)
548 {
549     int i;
550 
551     save->state_num = state->state_num;
552     for (i = 0; i < state->state_num; i++) {
553         save->raw_state[i] |= state->raw_state[i];
554         save->mask_state[i] |= state->mask_state[i];
555     }
556     return;
557 }
558 
save_int_status(hiirq_irq_list * irq_node,hi_int_state_info * int_info)559 static void save_int_status(hiirq_irq_list *irq_node, hi_int_state_info *int_info)
560 {
561     irq_node->int_info.type = int_info->type;
562     if (int_info->type == HI_INT_VICAP) {
563         save_int_status_for_vicap(&irq_node->int_info.vicap, &int_info->vicap);
564     } else if (int_info->type == HI_INT_AIO) {
565         save_int_status_for_aio(&irq_node->int_info.aio, &int_info->aio);
566     } else {
567         save_int_status_for_normal(&irq_node->int_info.normal, &int_info->normal);
568     }
569 }
570 
hiirq_interrupt(int realirq,void * dev_id)571 static int hiirq_interrupt(int realirq, void *dev_id)
572 {
573     hiirq_irq_list *irq_node = NULL;
574     hi_int_state_info int_info = { 0 };
575     unsigned long flags;
576 
577     irq_node = get_list_node_by_realirq(realirq);
578     if (irq_node == NULL || irq_node->irq_attr.request_flag != HI_TRUE) {
579         return OSAL_IRQ_NONE;
580     } else {
581         irq_spin_lock(flags);
582         irq_node->irq_cnt++;
583         read_int_status(irq_node, &int_info);
584         osal_isb();
585         clear_int(irq_node, &int_info);
586         save_int_status(irq_node, &int_info);
587         irq_spin_unlock(flags);
588 
589         osal_wait_wakeup(&(irq_node->irq_wait));
590     }
591     return OSAL_IRQ_HANDLED;
592 }
593 
hiirq_wait_condition_callback(const void * param)594 static int hiirq_wait_condition_callback(const void *param)
595 {
596     hiirq_irq_list *irq = (hiirq_irq_list *)param;
597     return (irq->irq_cnt != 0);
598 }
599 
hiirq_request_or_free_irq(unsigned int cmd,hi_void * arg,hi_void * private_data)600 hi_s32 hiirq_request_or_free_irq(unsigned int cmd, hi_void *arg, hi_void *private_data)
601 {
602     hiirq_irq_attr *p = (hiirq_irq_attr *)arg;
603     hiirq_irq_list *irq_node = NULL;
604     int ret;
605 
606     if (p == NULL) {
607         hiirq_trace("[%s,line:%d]hiirq_set_irq_param arg is null\n", HIIRQ_PFX, __LINE__);
608         return HI_FAILURE;
609     }
610 
611     irq_node = get_list_node(p->irq_num, p->dev);
612     if (irq_node == NULL) {
613         hiirq_trace("[%s,line:%d] not find irq_node.\n", HIIRQ_PFX, __LINE__);
614         return HI_FAILURE;
615     }
616 
617     if (p->request_flag == irq_node->irq_attr.request_flag) {
618         return HI_FAILURE;
619     }
620 
621     if (p->request_flag) { // add irq
622         int __irq = hiirq_get_irq_byname(p->irq_name);
623         irq_node->irq_attr.__irq = __irq;
624         (hi_void)memcpy_s(irq_node->irq_attr.irq_name, MAX_IRQ_NAME_LEN, p->irq_name, MAX_IRQ_NAME_LEN);
625 
626         ret = request_threaded_irq(__irq, (irq_handler_t)hiirq_interrupt, NULL, IRQF_SHARED,
627             irq_node->irq_attr.irq_name, p->dev);
628         if (ret != 0) {
629             hiirq_trace("[%s,line:%d]hiirq: failed to register (%s),irq_num:%d, ret %d, dev %p\n", HIIRQ_PFX, __LINE__,
630                 p->irq_name, p->irq_num, ret, p->dev);
631             ret = HI_FAILURE;
632             return ret;
633         }
634         irq_node->irq_cnt = 0;
635         irq_node->irq_attr.enable_flag = 1;
636     }
637     if (!p->request_flag) { // free irq
638         hiirq_trace("hiirq:free irq_num:%d\n", p->irq_num);
639         irq_node->irq_cnt = -1;
640         osal_wait_wakeup(&(irq_node->irq_wait));
641         free_irq(irq_node->irq_attr.__irq, p->dev);
642         irq_node->irq_attr.enable_flag = 0;
643     }
644     irq_node->irq_attr.request_flag = p->request_flag;
645     irq_node->irq_attr.irq_mod     = p->irq_mod;
646     irq_node->irq_attr.wait_mode   = p->wait_mode;
647     irq_node->irq_attr.dev         = p->dev;
648     return HI_SUCCESS;
649 }
650 
hiirq_enable_or_disable_irq(unsigned int cmd,hi_void * arg,hi_void * private_data)651 hi_s32 hiirq_enable_or_disable_irq(unsigned int cmd, hi_void *arg, hi_void *private_data)
652 {
653     hiirq_irq_attr *p = (hiirq_irq_attr *)arg;
654     hiirq_irq_list *irq_node = NULL;
655     int __irq;
656 
657     if (p == NULL) {
658         hiirq_trace("[%s,line:%d]hiirq_set_irq_param arg is null\n", HIIRQ_PFX, __LINE__);
659         return HI_FAILURE;
660     }
661 
662     irq_node = get_list_node(p->irq_num, NULL);
663     if (irq_node == NULL) {
664         hiirq_trace("[%s,line:%d] not find irq_node.\n", HIIRQ_PFX, __LINE__);
665         return HI_FAILURE;
666     }
667 
668     if (p->enable_flag == irq_node->irq_attr.enable_flag) {
669         return HI_FAILURE;
670     }
671 
672     __irq = irq_node->irq_attr.__irq;
673     if (p->enable_flag == 1) { // enable irq
674         enable_irq(__irq);
675         irq_node->irq_attr.enable_flag = 1;
676     }
677 
678     if (p->enable_flag == 0) { // disable irq
679         hiirq_trace("hiirq:disable irq_num:%d\n", p->irq_num);
680         disable_irq(__irq);
681         irq_node->irq_attr.enable_flag = 0;
682     }
683     return HI_SUCCESS;
684 }
685 
hiirq_set_affinity(unsigned int cmd,hi_void * arg,hi_void * private_data)686 hi_s32 hiirq_set_affinity(unsigned int cmd, hi_void *arg, hi_void *private_data)
687 {
688     hiirq_irq_attr *p = (hiirq_irq_attr *)arg;
689     hiirq_irq_list *irq_node = NULL;
690     int __irq;
691     struct cpumask cpumask_set = {0};
692     int cpu_mask;
693 
694     if (p == NULL) {
695         hiirq_trace("[%s,line:%d]hiirq_set_irq_param arg is null\n", HIIRQ_PFX, __LINE__);
696         return HI_FAILURE;
697     }
698 
699     irq_node = get_list_node(p->irq_num, NULL);
700     if (irq_node == NULL) {
701         hiirq_trace("[%s,line:%d] not find irq_node.\n", HIIRQ_PFX, __LINE__);
702         return HI_FAILURE;
703     }
704 
705     __irq = irq_node->irq_attr.__irq;
706     cpu_mask = p->cpu_mask;
707     cpumask_clear(&cpumask_set);
708     ((OSAL_CPU_0 & (unsigned int)cpu_mask) == 0) ?
709         0 : cpumask_set_cpu(0, &cpumask_set); // cpu0
710 
711     ((OSAL_CPU_1 & (unsigned int)cpu_mask) == 0) ?
712         0 : cpumask_set_cpu(1, &cpumask_set); // cpu1
713 
714     ((OSAL_CPU_2 & (unsigned int)cpu_mask) == 0) ?
715         0 : cpumask_set_cpu(2, &cpumask_set); // cpu2
716 
717     ((OSAL_CPU_3 & (unsigned int)cpu_mask) == 0) ?
718         0 : cpumask_set_cpu(3, &cpumask_set); // cpu3
719     return irq_set_affinity_hint(__irq, &cpumask_set);
720 }
721 
hiirq_wait_irq(unsigned int cmd,hi_void * arg,hi_void * private_data)722 hi_s32 hiirq_wait_irq(unsigned int cmd, hi_void *arg, hi_void *private_data)
723 {
724     hiirq_irq_attr *para = (hiirq_irq_attr *)arg;
725     hiirq_irq_list *irq_node = NULL;
726     hi_irq_arg user_dev = { 0 };
727     unsigned long flags;
728     int ret;
729 
730     if (para == NULL) {
731         hiirq_trace("[%s,line:%d] error invalid arg\n", HIIRQ_PFX, __LINE__);
732         return HI_FAILURE;
733     }
734     osal_copy_from_user(&user_dev, para->dev, sizeof(hi_irq_arg));
735     irq_node = get_list_node(para->irq_num, para->dev);
736     if (irq_node == NULL || irq_node->irq_attr.request_flag == HI_FALSE) {
737         hiirq_trace("[%s,line:%d]irq_num:%d not enable\n", HIIRQ_PFX, __LINE__, para->irq_num);
738         return HI_ERR_IRQ_UNEXIST;
739     }
740 retry:
741     if (irq_node->irq_cnt == 0) {
742         ret = osal_wait_event_interruptible(&(irq_node->irq_wait), hiirq_wait_condition_callback, irq_node);
743         if (ret != 0) {
744             hiirq_trace("[%s,line:%d]osal_wait_event_interruptible return error:%d, irq: %d\n", HIIRQ_PFX, __LINE__,
745                 ret, para->irq_num);
746             return ret;
747         }
748         if (irq_node->irq_cnt < 0) {
749             return HI_FAILURE;
750         }
751         if (irq_node->irq_attr.wait_mode == IRQ_WAIT_FOREVER) { // wait forever
752             goto retry;
753         }
754     } else {
755         if (irq_node->irq_cnt < 0) {
756             return HI_FAILURE;
757         }
758     }
759     irq_spin_lock(flags);
760     if (irq_node->irq_attr.irq_mod == IRQ_TRIG_ONECE) { // clear irqcnt for trigger mode 0
761         irq_node->irq_cnt = 0;
762     } else {
763         irq_node->irq_cnt--;
764     }
765     (hi_void)memcpy_s(&user_dev.int_info, sizeof(hi_int_state_info), &irq_node->int_info, sizeof(hi_int_state_info));
766     (hi_void)memset_s(&irq_node->int_info, sizeof(hi_int_state_info), 0, sizeof(hi_int_state_info));
767     irq_spin_unlock(flags);
768 
769     osal_copy_to_user(para->dev, &user_dev, sizeof(hi_irq_arg));
770 
771     return HI_SUCCESS;
772 }
773 
774 static struct platform_device *g_hiirq_pdev = NULL;
hiirq_get_irq_num(unsigned int cmd,hi_void * arg,hi_void * private_data)775 hi_s32 hiirq_get_irq_num(unsigned int cmd, hi_void *arg, hi_void *private_data)
776 {
777     hiirq_irq_attr *para = (hiirq_irq_attr *)arg;
778 
779     if (para == NULL) {
780         hiirq_trace("[%s,line:%d] error invalid arg\n", HIIRQ_PFX, __LINE__);
781         return HI_FAILURE;
782     }
783     if (para->irq_name[0] != 0) {
784         return platform_get_irq_byname(g_hiirq_pdev, para->irq_name);
785     }
786     return platform_get_irq(g_hiirq_pdev, para->irq_num);
787 }
788 
hiirq_get_irq_byname(char * name)789 hi_s32 hiirq_get_irq_byname(char *name)
790 {
791     return platform_get_irq_byname(g_hiirq_pdev, name);
792 }
793 
794 EXPORT_SYMBOL(hiirq_get_irq_byname);
795 #ifdef CFG_HI_USER_DRV
796 hi_s32 hi_get_irq_byname(char *name) __attribute__((weak, alias("hiirq_get_irq_byname")));
797 #endif
hiirq_map_irq_reg_for_aio(hiirq_aio_reg_map * map,hi_aio_int_reg * reg)798 static hi_s32 hiirq_map_irq_reg_for_aio(hiirq_aio_reg_map *map, hi_aio_int_reg *reg)
799 {
800     int i;
801 
802     map->aio_int_status_reg = osal_ioremap(reg->aio_int_status_reg, SINGLE_REG_SIZE);
803     map->aio_int_raw_reg = osal_ioremap(reg->aio_int_raw_reg, SINGLE_REG_SIZE);
804     if ((map->aio_int_status_reg == NULL) || (map->aio_int_raw_reg == NULL)) {
805         hiirq_trace("[%s,line:%d] osal_ioremap failed\n", HIIRQ_PFX, __LINE__);
806         goto fail;
807     }
808 
809     for (i = 0; i < AIO_TX_MAX_NUM; i++) {
810         map->tx_clr_reg[i] = osal_ioremap(reg->tx_clr_reg[i], SINGLE_REG_SIZE);
811         map->tx_raw_reg[i] = osal_ioremap(reg->tx_raw_reg[i], SINGLE_REG_SIZE);
812         map->tx_stat_reg[i] = osal_ioremap(reg->tx_stat_reg[i], SINGLE_REG_SIZE);
813         if ((map->tx_clr_reg[i] == NULL) || (map->tx_raw_reg[i] == NULL) || (map->tx_stat_reg[i] == NULL)) {
814             hiirq_trace("[%s,line:%d] osal_ioremap failed\n", HIIRQ_PFX, __LINE__);
815             goto fail;
816         }
817     }
818     for (i = 0; i < AIO_RX_MAX_NUM; i++) {
819         map->rx_clr_reg[i] = osal_ioremap(reg->rx_clr_reg[i], SINGLE_REG_SIZE);
820         map->rx_raw_reg[i] = osal_ioremap(reg->rx_raw_reg[i], SINGLE_REG_SIZE);
821         map->rx_stat_reg[i] = osal_ioremap(reg->rx_stat_reg[i], SINGLE_REG_SIZE);
822         if ((map->rx_clr_reg[i] == NULL) || (map->rx_raw_reg[i] == NULL) || (map->rx_stat_reg[i] == NULL)) {
823             hiirq_trace("[%s,line:%d] osal_ioremap failed\n", HIIRQ_PFX, __LINE__);
824             goto fail;
825         }
826     }
827     for (i = 0; i < AIO_SPDIFTX_MAX_NUM; i++) {
828         map->spdiftx_clr_reg[i] = osal_ioremap(reg->spdiftx_clr_reg[i], SINGLE_REG_SIZE);
829         map->spdiftx_raw_reg[i] = osal_ioremap(reg->spdiftx_raw_reg[i], SINGLE_REG_SIZE);
830         map->spdiftx_stat_reg[i] = osal_ioremap(reg->spdiftx_stat_reg[i], SINGLE_REG_SIZE);
831         if ((map->spdiftx_clr_reg[i] == NULL) || (map->spdiftx_raw_reg[i] == NULL) ||
832             (map->spdiftx_stat_reg[i] == NULL)) {
833             hiirq_trace("[%s,line:%d] osal_ioremap failed\n", HIIRQ_PFX, __LINE__);
834             goto fail;
835         }
836     }
837     return HI_SUCCESS;
838 
839 fail:
840     unmap_int_reg_for_aio(map);
841     return HI_FAILURE;
842 }
843 
844 
hiirq_map_irq_reg_for_vicap(hiirq_vicap_reg_map * map,hi_vicap_int_reg * reg)845 static hi_s32 hiirq_map_irq_reg_for_vicap(hiirq_vicap_reg_map *map, hi_vicap_int_reg *reg)
846 {
847     int i;
848 
849     map->cap_int_reg = osal_ioremap(reg->cap_reg, SINGLE_REG_SIZE);
850     for (i = 0; i < VI_MAX_DEV_NUM; i++) {
851         map->pt_int_reg[i] = osal_ioremap(reg->pt_reg[i], SINGLE_REG_SIZE);
852         if (map->pt_int_reg[i] == NULL) {
853             hiirq_trace("[%s,line:%d] osal_ioremap failed\n", HIIRQ_PFX, __LINE__);
854             goto fail;
855         }
856     }
857     for (i = 0; i < VI_MAX_PHY_PIPE_NUM; i++) {
858         map->ch_int_reg[i] = osal_ioremap(reg->ch_reg[i], SINGLE_REG_SIZE);
859         if (map->ch_int_reg[i] == NULL) {
860             hiirq_trace("[%s,line:%d] osal_ioremap failed\n", HIIRQ_PFX, __LINE__);
861             goto fail;
862         }
863     }
864     for (i = 0; i < VI_MAX_ISP_FE_NUM; i++) {
865         map->isp_fe_int_reg[i] = osal_ioremap(reg->isp_fe_reg[i], SINGLE_REG_SIZE);
866         if (map->isp_fe_int_reg[i] == NULL) {
867             hiirq_trace("[%s,line:%d] osal_ioremap failed\n", HIIRQ_PFX, __LINE__);
868             goto fail;
869         }
870     }
871     return HI_SUCCESS;
872 
873 fail:
874     unmap_int_reg_for_vicap(map);
875     return HI_FAILURE;
876 }
877 
hiirq_map_irq_reg_for_normal(hiirq_normal_reg_map * map,hi_normal_int_reg * reg)878 static hi_s32 hiirq_map_irq_reg_for_normal(hiirq_normal_reg_map *map, hi_normal_int_reg *reg)
879 {
880     int i;
881 
882     if (reg->reg_num > HI_MAX_STATE_NUM) {
883         hiirq_trace("[%s,line:%d] error invalid reg_num:%d\n", HIIRQ_PFX, __LINE__, reg->reg_num);
884         return HI_FAILURE;
885     }
886 
887     for (i = 0; i < reg->reg_num; i++) {
888         map->raw_int_reg[i] = osal_ioremap(reg->raw_int_reg[i], SINGLE_REG_SIZE);
889         if (map->raw_int_reg[i] == NULL) {
890             hiirq_trace("[%s,line:%d] osal_ioremap failed\n", HIIRQ_PFX, __LINE__);
891             goto fail;
892         }
893 
894         map->mask_int_reg[i] = osal_ioremap(reg->mask_int_reg[i], SINGLE_REG_SIZE);
895         if (map->mask_int_reg[i] == NULL) {
896             hiirq_trace("[%s,line:%d] osal_ioremap failed\n", HIIRQ_PFX, __LINE__);
897             goto fail;
898         }
899 
900         map->clr_int_reg[i] = osal_ioremap(reg->clr_int_reg[i], SINGLE_REG_SIZE);
901         if (map->clr_int_reg[i] == NULL) {
902             hiirq_trace("[%s,line:%d] osal_ioremap failed\n", HIIRQ_PFX, __LINE__);
903             goto fail;
904         }
905 
906     map->clr_or_mask[i] = reg->clr_or_mask[i];
907     }
908 
909     map->reg_num = reg->reg_num;
910     return HI_SUCCESS;
911 
912 fail:
913     unmap_int_reg_for_normal(map);
914     return HI_FAILURE;
915 }
916 
hiirq_set_irq_reg(unsigned int cmd,hi_void * arg,hi_void * private_data)917 hi_s32 hiirq_set_irq_reg(unsigned int cmd, hi_void *arg, hi_void *private_data)
918 {
919     hiirq_set_irq_reg_info *para = (hiirq_set_irq_reg_info *)arg;
920     hiirq_irq_list *irq_node = NULL;
921     hiirq_irq_attr irqattr = { 0 };
922     int ret;
923 
924     if (para == NULL) {
925         hiirq_trace("[%s,line:%d] error invalid arg\n", HIIRQ_PFX, __LINE__);
926         return HI_FAILURE;
927     }
928 
929     irq_node = get_list_node(para->irq_num, 0);
930     if (irq_node == NULL) {
931         if (node_cnt >= MAX_NODE_LIMIT) {
932             hiirq_trace("hiirq: node_cnt out of limit!\n");
933             return HI_FAILURE;
934         }
935         irqattr.irq_num = para->irq_num;
936         irq_node = add_list(&irqattr);
937         if (irq_node == NULL) {
938             hiirq_trace("hiirq: add irq node failed!\n");
939             return HI_FAILURE;
940         }
941     }
942 
943     irq_node->map_info.type = para->reg_info.type;
944     if (para->reg_info.type == HI_INT_VICAP) {
945         ret = hiirq_map_irq_reg_for_vicap(&irq_node->map_info.vicap, &para->reg_info.vicap);
946     } else if (para->reg_info.type == HI_INT_AIO) {
947         ret = hiirq_map_irq_reg_for_aio(&irq_node->map_info.aio, &para->reg_info.aio);
948     } else {
949         ret = hiirq_map_irq_reg_for_normal(&irq_node->map_info.normal, &para->reg_info.normal);
950     }
951 
952     return ret;
953 }
954 
osal_pgprot_noncached(osal_vm * vm)955 static inline void osal_pgprot_noncached(osal_vm *vm)
956 {
957     if (vm != NULL) {
958         struct vm_area_struct *v = (struct vm_area_struct *)(vm->vm);
959         v->vm_page_prot = pgprot_writecombine(v->vm_page_prot);
960     }
961 }
962 
cmpi_check_mmz_phy_addr(unsigned long long phy_addr,unsigned int len)963 static inline int cmpi_check_mmz_phy_addr(unsigned long long phy_addr, unsigned int len)
964 {
965     (void)phy_addr;
966     (void)len;
967     return 0;
968 }
969 
hiirq_mmap(osal_vm * vm,hi_ulong start,hi_ulong end,hi_ulong vm_pgoff,hi_void * private_data)970 static hi_s32 hiirq_mmap(osal_vm *vm, hi_ulong start, hi_ulong end, hi_ulong vm_pgoff, hi_void *private_data)
971 {
972     hi_s32 size;
973     hi_u64 phy_addr;
974 
975     if (start > end) {
976         return HI_FAILURE;
977     }
978 
979     size = (end - start) / 2;
980 
981     /* it's necessary for the variable "size" to align 4k(page_size). */
982 #define PAGE_SIZE_MASK 0xfffff000
983     if ((hi_u32)size & (~PAGE_SIZE_MASK)) {
984         return HI_FAILURE;
985     }
986 #undef PAGE_SIZE_MASK
987 
988     phy_addr = (hi_u64)(vm_pgoff << HIIRQ_PAGE_SHIFT);
989     if (cmpi_check_mmz_phy_addr(phy_addr, size) != HI_SUCCESS) {
990         hiirq_trace("addr: %#llx, size: %d, invalid phyaddr!\n", phy_addr, size);
991         return HI_FAILURE;
992     }
993 
994     osal_pgprot_noncached(vm);
995 
996     if (osal_remap_pfn_range(vm, start, vm_pgoff, size, OSAL_CACHE)) {
997         return HI_FAILURE;
998     }
999     if (osal_remap_pfn_range(vm, start + size, vm_pgoff, size, OSAL_CACHE)) {
1000         return HI_FAILURE;
1001     }
1002 
1003     return HI_SUCCESS;
1004 }
1005 
hiirq_open(void * private_data)1006 static int hiirq_open(void *private_data)
1007 {
1008     (void)private_data;
1009     hiirq_trace("Enter hiirq_open\n");
1010     return HI_SUCCESS;
1011 }
1012 
hiirq_release(void * private_data)1013 static int hiirq_release(void *private_data)
1014 {
1015     (void)private_data;
1016     hiirq_trace("Enter hiirq_release.\n");
1017     free_list();
1018     return HI_SUCCESS;
1019 }
1020 
1021 static osal_ioctl_cmd g_hiirq_cmd[] = {
1022     {IRQ_REQUEST_OR_FREE_IRQ_CTRL, hiirq_request_or_free_irq},
1023     {IRQ_WAIT_IRQ_CTRL, hiirq_wait_irq},
1024     {IRQ_IOC_GET_IRQ_NUM_CTRL, hiirq_get_irq_num},
1025     {IRQ_IOC_SET_IRQ_REG_CTRL, hiirq_set_irq_reg},
1026     {IRQ_ENABLE_OR_DISABLE_CTRL, hiirq_enable_or_disable_irq},
1027     {IRQ_SET_AFFINITY_CTRL, hiirq_set_affinity},
1028 };
1029 
1030 static osal_fileops g_hiirq_fops = {
1031     .open = hiirq_open,
1032     .release = hiirq_release,
1033     .mmap = hiirq_mmap,
1034     .cmd_list = g_hiirq_cmd,
1035     .cmd_cnt = sizeof(g_hiirq_cmd) / sizeof(g_hiirq_cmd[0]),
1036 };
1037 
1038 static osal_dev g_hiirq_dev = {
1039     .name = HIIRQ_DEVICE_NAME,
1040     .minor = 255, /* define a macro to substitute 255 */
1041     .fops = &g_hiirq_fops,
1042     .pmops = NULL,
1043 };
1044 
hiirq_init(struct platform_device * pdev)1045 int hiirq_init(struct platform_device *pdev)
1046 {
1047     osal_spin_lock_init(&g_irq_spin_lock);
1048     if (osal_dev_register(&g_hiirq_dev) != 0) {
1049         hiirq_trace("[%s,line:%d]Error: can't register\n", HIIRQ_PFX, __LINE__);
1050         return HI_FAILURE;
1051     }
1052     g_hiirq_pdev = pdev;
1053 
1054     hiirq_trace("hi_irq init ok. ver=%s, %s.\n", __DATE__, __TIME__);
1055     return HI_SUCCESS;
1056 }
1057 
hiirq_exit(void)1058 void hiirq_exit(void)
1059 {
1060     del_list();
1061     osal_dev_unregister(&g_hiirq_dev);
1062     g_hiirq_pdev = NULL;
1063     osal_spin_lock_destory(&g_irq_spin_lock);
1064 
1065     hiirq_trace("hi_irq exit ok.\n");
1066 }
1067