• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# coding=utf-8
3##############################################
4# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#     http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16##############################################
17
18import re
19import os
20import json
21import clang.cindex
22from clang.cindex import Config
23from clang.cindex import Index
24from clang.cindex import CursorKind
25from clang.cindex import TypeKind
26from utils.constants import StringConstant
27from utils.constants import RegularExpressions
28from typedef.parser.parser import NodeKind, DifferApiInfor, DifferApiRegular
29
30
31line_dist = {}
32calculation_times = 0
33
34
35def find_parent(cursor):  # 获取父节点
36    cursor_parent = cursor.semantic_parent
37    if cursor_parent is not None:
38        if cursor_parent.kind == CursorKind.VAR_DECL:  # 父节点为VAR_DECL 用于整型变量节点
39            return cursor_parent.kind
40
41        # 用于判断里面成员属于那类
42        elif cursor_parent.kind == CursorKind.STRUCT_DECL or cursor_parent.kind == CursorKind.UNION_DECL:
43            return cursor_parent.kind
44        else:
45            parent_kind = processing_root_parent(cursor_parent)
46            return parent_kind
47    return None
48
49
50def processing_root_parent(cursor_parent):
51    parent = cursor_parent.semantic_parent
52    if parent is not None:
53        if parent.type.kind == TypeKind.INVALID:
54            parent_kind = CursorKind.TRANSLATION_UNIT
55            return parent_kind
56        else:
57            return parent.kind
58    return None
59
60
61def processing_no_child(cursor, data):  # 处理没有子节点的节点
62    if cursor.kind == CursorKind.INTEGER_LITERAL:  # 整型字面量类型节点,没有子节点
63        tokens = cursor.get_tokens()
64        for token in tokens:
65            data["integer_value"] = token.spelling  # 获取整型变量值
66
67
68def get_token(cursor):
69    tokens = []
70    for token in cursor.get_tokens():
71        tokens.append(token.spelling)
72
73    return tokens
74
75
76def judgment_extern(cursor, data):  # 判断是否带有extern
77    tokens = get_token(cursor)
78    if cursor.kind == CursorKind.FUNCTION_DECL:
79        if 'static' in tokens or 'deprecated' in tokens:
80            is_extern = False
81        else:
82            is_extern = True
83    elif cursor.kind == CursorKind.VAR_DECL:
84        if 'extern' in tokens:
85            is_extern = True
86        else:
87            is_extern = False
88        if 'const' in tokens:
89            data["is_const"] = True
90        else:
91            data["is_const"] = False
92    else:
93        is_extern = True
94
95    data["is_extern"] = is_extern
96
97
98def binary_operator(cursor, data):  # 二元操作符处理
99    data["name"] = ""
100    tokens = cursor.get_tokens()
101    spelling_arr = ['<<', '>>', '+', '-', '*', '/']
102    for token in tokens:
103        if token.spelling in spelling_arr:
104            data["operator"] = token.spelling
105
106
107def distinction_member(cursor, data):  # 区别结构体和联合体成员
108    parent_kind = find_parent(cursor)  # 查找父节点类型
109    if parent_kind:
110        if parent_kind == CursorKind.UNION_DECL:
111            data["member"] = "union_member"
112        elif parent_kind == CursorKind.STRUCT_DECL:
113            data["member"] = "struct_member"
114
115
116def processing_parm(cursor, data):  # 函数参数节点处理
117    if cursor.spelling:  # 函数参数是否带参数名
118        data["name"] = cursor.spelling
119    else:
120        data["name"] = ""
121
122    if cursor.type.get_pointee().kind == TypeKind.FUNCTIONPROTO:  # 参数为函数指针,获取对应的返回类型
123        data["func_pointer_result_type"] = cursor.type.get_pointee().get_result().spelling
124
125
126def processing_enum(cursor, data):  # 获取枚举值
127    data["value"] = cursor.enum_value
128
129
130def processing_def(cursor, data):  # 处理宏定义
131    data['is_def_func'] = False
132    data['name'] = cursor.spelling
133    name_len = len(data['name'])
134    str1_len = len(data['node_content']['content'])
135    text = ''
136    if name_len != str1_len:
137        if data['node_content']['content']:
138            if data['node_content']['content'][name_len] == '(':
139                right_index = data['node_content']['content'].index(')')
140                param = data['node_content']['content'][name_len:right_index + 1]
141                text = data['node_content']['content'][right_index + 1:]
142                data['is_def_func'] = True
143                data['def_func_name'] = data['name']
144                data['def_func_param'] = param
145                data['name'] = ''.join(data['name'] + param)
146            else:
147                text = data['node_content']['content'][name_len:]
148        else:
149            print('mar_define error, its content is none')
150    if text:
151        text = text.strip()  # 删除两边的字符(默认是删除左右空格)
152    data['text'] = text
153    data["type"] = ""
154
155
156def difference_api(api_data: dict):
157    api_name = api_data['name']
158    closed_pattern = DifferApiRegular.CLOSED_SOURCE_API_REGULAR.value
159    open_pattern = DifferApiRegular.OPEN_SOURCE_API_REGULAR.value
160    if re.search(closed_pattern, api_name, flags=re.IGNORECASE):
161        api_data['open_close_api'] = DifferApiInfor.CLOSED_SOURCE_API.value
162    elif re.search(open_pattern, api_name, flags=re.IGNORECASE):
163        api_data['open_close_api'] = DifferApiInfor.OPEN_SOURCE_API.value
164    else:
165        api_data['is_third_party_api'] = True
166
167
168def processing_func(cursor, data):  # 处理函数
169    data["return_type"] = cursor.result_type.spelling  # 增加返回类型键值对
170    judgment_extern(cursor, data)
171    difference_api(data)
172
173
174def processing_type(cursor, data):  # 没有类型的节点处理
175    if cursor.kind == CursorKind.MACRO_INSTANTIATION:  # 也属于宏定义 --宏引用
176        data["type"] = "insta_no_type"
177
178    elif cursor.kind == CursorKind.INCLUSION_DIRECTIVE:  # 头文件也没type,规定
179        data["type"] = "inclusion_no_type"
180
181
182def processing_name(cursor, data):  # 没有名的节点处理
183    if cursor.kind == CursorKind.PAREN_EXPR:  # 括号表达式()
184        data["paren"] = "()"
185        data["name"] = ""
186
187    elif cursor.kind == CursorKind.UNEXPOSED_EXPR:  # 未公开表达式,用于表示未明确定义的表达式
188        data["name"] = ""
189
190
191def processing_char(cursor, data):  # 字符节点处理
192    tokens = list(cursor.get_tokens())
193    char_value = (tokens[0].spelling.strip("'"))
194    data["name"] = char_value
195
196
197special_node_process = {
198    CursorKind.ENUM_CONSTANT_DECL.name: processing_enum,
199    CursorKind.MACRO_DEFINITION.name: processing_def,
200    CursorKind.FUNCTION_DECL.name: processing_func,
201    CursorKind.VAR_DECL.name: judgment_extern,
202    CursorKind.PARM_DECL.name: processing_parm,
203    CursorKind.FIELD_DECL.name: distinction_member,
204    CursorKind.MACRO_INSTANTIATION.name: processing_type,
205    CursorKind.INCLUSION_DIRECTIVE.name: processing_type,
206    CursorKind.BINARY_OPERATOR.name: binary_operator,
207    CursorKind.PAREN_EXPR.name: processing_name,
208    CursorKind.UNEXPOSED_EXPR.name: processing_name,
209    CursorKind.CHARACTER_LITERAL.name: processing_char
210}
211
212
213def process_members_class_name(data: dict, parent_cursor):
214    file_name = os.path.split(data['location']['location_path'])[1]
215    if (not data['name']) and (file_name not in parent_cursor.type.spelling):
216        data['class_name'] = '{}-{}'.format(file_name, parent_cursor.type.spelling)
217
218
219def get_api_unique_id(cursor, loc, data):
220    unique_id = ''
221    if cursor.kind == CursorKind.MACRO_DEFINITION:
222        unique_id = '{}#{}'.format(loc["location_path"], cursor.spelling)
223        return unique_id
224
225    parent_of_cursor = cursor.semantic_parent
226    struct_union_enum = [NodeKind.STRUCT_DECL.value, NodeKind.UNION_DECL.value,
227                         NodeKind.ENUM_DECL.value]
228    if parent_of_cursor:
229        unique_name = cursor.spelling
230        try:
231            if parent_of_cursor.kind == CursorKind.TRANSLATION_UNIT:
232                parent_name_str = ''
233            elif parent_of_cursor.kind.name in struct_union_enum:
234                parent_name_str = parent_of_cursor.type.spelling
235                process_members_class_name(data, parent_of_cursor)
236            else:
237                parent_name_str = parent_of_cursor.spelling
238        except ValueError:
239            parent_name_str = ''
240        if cursor.kind.name in struct_union_enum:
241            unique_name = cursor.type.spelling
242        if not parent_name_str:
243            unique_id = '{}#{}'.format(loc["location_path"], unique_name)
244        else:
245            unique_id = '{}#{}#{}'.format(loc["location_path"], parent_name_str, unique_name)
246    return unique_id
247
248
249def get_node_class_name(data):
250    struct_union_enum = [NodeKind.STRUCT_DECL.value, NodeKind.UNION_DECL.value,
251                         NodeKind.ENUM_DECL.value]
252    current_file_name = os.path.split(data["location"]["location_path"])[1]
253    if data.get('kind') in struct_union_enum and 'class_name' in data:
254        class_name = '{}-{}'.format(current_file_name, data["name"])
255        if (not data["name"]) and (current_file_name not in data["type"]):
256            class_name = '{}-{}'.format(current_file_name, data["type"])
257    else:
258        class_name = current_file_name
259
260    return class_name
261
262
263def processing_special_node(cursor, data, key, directory_path):  # 处理需要特殊处理的节点
264    if key == 0:
265        location_path = cursor.spelling
266        kind_name = CursorKind.TRANSLATION_UNIT.name
267    else:
268        location_path = cursor.location.file.name
269        kind_name = cursor.kind.name
270
271    loc = {
272        "location_path": '{}'.format(location_path),
273        "location_line": cursor.location.line,
274        "location_column": cursor.location.column
275    }
276    if directory_path:
277        relative_path = os.path.relpath(location_path, directory_path)  # 获取头文件相对路
278        loc["location_path"] = relative_path
279    data["location"] = loc
280    data["class_name"] = get_node_class_name(data)
281    data["unique_id"] = get_api_unique_id(cursor, loc, data)
282    if key == 0:
283        data["unique_id"] = data["name"]
284        syntax_error_message = diagnostic_callback(cursor.translation_unit.diagnostics, directory_path)
285        data["syntax_error"] = syntax_error_message
286    if kind_name in special_node_process.keys():
287        node_process = special_node_process[kind_name]
288        node_process(cursor, data)  # 调用对应节点处理函数
289
290
291def node_extent(cursor, current_file):
292    start_offset = cursor.extent.start.offset
293    end_offset = cursor.extent.end.offset
294    start_line = cursor.extent.start.line
295    end_line = cursor.extent.end.line
296    with open(current_file, 'r', encoding='utf=8') as f:
297        f.seek(start_offset)
298        content = f.read(end_offset - start_offset)
299        f.seek(0)
300        file_content_all = f.readlines()
301        line_content = file_content_all[start_line - 1: end_line]
302        line_content = ''.join(line_content)
303    extent = {
304        "start_offset": start_offset,
305        "end_offset": end_offset,
306        "content": content,
307        "line_content": line_content
308    }
309    f.close()
310    return extent
311
312
313def define_comment(cursor, current_file, data):
314    line = cursor.location.line
315    with open(current_file, mode='r', encoding='utf-8') as file:
316        file_content = file.readlines()[:line]
317        file_content = ''.join(file_content)
318        pattern = '{} {})'.format(RegularExpressions.DEFINE_COMMENT.value, cursor.spelling)
319        matches = re.search(pattern, file_content)
320        if matches:
321            data['comment'] = matches.group()
322
323
324def get_default_node_data(cursor, directory_path):
325    data = {
326        "name": cursor.spelling,
327        "kind": '',
328        "type": cursor.type.spelling,
329        "gn_path": directory_path,
330        "node_content": {},
331        "comment": '',
332        "syscap": '',
333        "since": '',
334        "kit_name": '',
335        "sub_system": '',
336        "module_name": '',
337        "permission": '',
338        "class_name": 'global',
339        "deprecate_since": '',
340        "error_num": 'NA',
341        "is_system_api": 'NA',
342        "model_constraint": 'NA',
343        "cross_platform": 'NA',
344        "form": 'NA',
345        "atomic_service": 'NA',
346        "decorator": 'NA',
347        "unique_id": '',
348        "syntax_error": 'NA',
349        "open_close_api": 'NA',
350        "is_third_party_api": False
351    }
352    return data
353
354
355def diagnostic_callback(diagnostic, dir_path):
356    # 获取诊断信息的详细内容
357    syntax_error_message = 'NA'
358    key = 0
359    for dig in diagnostic:
360        file_path = f"{dig.location.file}"
361        try:
362            file_path = os.path.relpath(os.path.normpath(file_path), dir_path)
363        except ValueError:
364            pass
365        line = dig.location.line
366        message = dig.spelling
367        # 输出诊断信息
368        error_message = f"{file_path}:{line}\n错误信息:{message}"
369        if 0 == key:
370            syntax_error_message = error_message
371            key = 1
372        else:
373            syntax_error_message = '{}\n{}'.format(syntax_error_message, error_message)
374    return syntax_error_message
375
376
377def parser_data_assignment(cursor, current_file, directory_path, comment=None, key=0):
378    data = get_default_node_data(cursor, directory_path)
379    get_comment(cursor, data)
380    if key == 0:
381        data["kind"] = CursorKind.TRANSLATION_UNIT.name
382        if comment:
383            data["comment"] = comment
384        if directory_path:
385            relative_path = os.path.relpath(cursor.spelling, directory_path)
386            data["name"] = relative_path
387    else:
388        content = node_extent(cursor, current_file)
389        data["node_content"] = dict(content)
390        data["kind"] = cursor.kind.name
391        if cursor.kind.name == CursorKind.MACRO_DEFINITION.name:
392            define_comment(cursor, current_file, data)
393    get_syscap_value(data)
394    get_since_value(data)
395    get_kit_value(data)
396    get_permission_value(data)
397    get_module_name_value(data)
398    get_deprecate_since_value(data)
399    processing_special_node(cursor, data, key, directory_path)  # 节点处理
400    get_file_kit_or_system(data)
401
402    return data
403
404
405def ast_to_dict(cursor, current_file, last_data, directory_path, comment=None, key=0):  # 解析数据的整理
406    # 通用赋值
407    data = parser_data_assignment(cursor, current_file, directory_path, comment, key)
408    if last_data:
409        data['module_name'] = last_data['module_name']
410        data['kit_name'] = last_data['kit_name']
411        data['syscap'] = last_data['syscap']
412    children = list(cursor.get_children())  # 判断是否有子节点,有就追加children,没有根据情况来
413    if len(children) > 0:
414        if key != 0:
415            if cursor.kind == CursorKind.FUNCTION_DECL:  # 函数参数
416                name = "parm"
417            elif (cursor.kind == CursorKind.ENUM_DECL
418                  or cursor.kind == CursorKind.STRUCT_DECL
419                  or cursor.kind == CursorKind.UNION_DECL):
420                name = "members"
421            else:
422                name = "children"
423        else:
424            name = "children"
425        data[name] = []
426
427        for child in children:
428            # 剔除多余宏定义和跳过UNEXPOSED_ATTR节点
429            if (child.location.file is not None) and (not child.kind.is_attribute()) \
430                    and child.kind.name != CursorKind.MACRO_INSTANTIATION.name \
431                    and child.kind.name != CursorKind.INCLUSION_DIRECTIVE.name \
432                    and (child.location.file.name == current_file):
433                processing_ast_node(child, current_file, data, name, directory_path)
434    else:
435        if cursor.kind == CursorKind.FUNCTION_DECL:  # 防止clang默认处理(对于头文件没有的情况)出现没有该键值对
436            data["parm"] = []
437        processing_no_child(cursor, data)  # 处理没有子节点的节点
438    return data
439
440
441def get_syscap_value(data: dict):
442    syscap_list = []
443    if 'none_comment' != data["comment"]:
444        pattern = r'@([Ss]yscap).*?(?=\n)'
445        matches = re.finditer(pattern, data['comment'], re.DOTALL | re.MULTILINE)
446        for mat in matches:
447            syscap_list.append(mat.group())
448    if len(syscap_list) > 1:
449        data["syscap"] = re.sub('@syscap', '', syscap_list[len(syscap_list) - 1], flags=re.IGNORECASE)
450    elif 1 == len(syscap_list):
451        data["syscap"] = re.sub('@syscap', '', syscap_list[0], flags=re.IGNORECASE)
452
453
454def get_since_value(data: dict):
455    since_list = []
456    if 'none_comment' != data["comment"]:
457        pattern = r'@(since).*?(?=\n)'
458        matches = re.finditer(pattern, data['comment'], re.DOTALL | re.MULTILINE)
459        for mat in matches:
460            since_list.append(mat.group())
461    if len(since_list) > 1:
462        data["since"] = since_list[len(since_list) - 1].replace('@since', '')
463    elif 1 == len(since_list):
464        data["since"] = since_list[0].replace('@since', '')
465
466
467def get_kit_value(data: dict):
468    kit_list = []
469    if 'none_comment' != data["comment"]:
470        pattern = r'@(kit).*?(?=\n)'
471        matches = re.finditer(pattern, data['comment'], re.DOTALL | re.MULTILINE)
472        for mat in matches:
473            kit_list.append(mat.group())
474    if len(kit_list) > 1:
475        data["kit_name"] = kit_list[len(kit_list) - 1].replace('@kit', '')
476    elif 1 == len(kit_list):
477        data["kit_name"] = kit_list[0].replace('@kit', '')
478
479
480def get_module_name_value(data: dict):
481    module_name_list = []
482    if 'none_comment' != data["comment"]:
483        pattern = r'@(addtogroup).*?(?=\n)'
484        matches = re.finditer(pattern, data['comment'], re.DOTALL | re.MULTILINE)
485        for mat in matches:
486            module_name_list.append(mat.group())
487    if len(module_name_list) > 1:
488        data["module_name"] = module_name_list[len(module_name_list) - 1].replace('@addtogroup', '')
489    elif 1 == len(module_name_list):
490        data["module_name"] = module_name_list[0].replace('@addtogroup', '')
491
492
493def get_permission_value(data: dict):
494    permission_list = []
495    if 'none_comment' != data["comment"]:
496        pattern = r'@(permission).*?(?=\n)'
497        matches = re.finditer(pattern, data['comment'], re.DOTALL | re.MULTILINE)
498        for mat in matches:
499            permission_list.append(mat.group())
500    if len(permission_list) > 1:
501        data["permission"] = permission_list[len(permission_list) - 1].replace('@permission', '')
502    elif 1 == len(permission_list):
503        data["permission"] = permission_list[0].replace('@permission', '')
504
505
506def get_deprecate_since_value(data: dict):
507    deprecate_list = []
508    if 'none_comment' != data["comment"]:
509        pattern = r'@(deprecated).*?(?=\n)'
510        matches = re.finditer(pattern, data['comment'], re.DOTALL | re.MULTILINE)
511        for mat in matches:
512            deprecate_list.append(mat.group())
513    if len(deprecate_list) > 1:
514        data["deprecate_since"] = (deprecate_list[len(deprecate_list) - 1].replace('@deprecated', '')
515                                   .replace('since', ''))
516    elif 1 == len(deprecate_list):
517        data["deprecate_since"] = (deprecate_list[0].replace('@deprecated', '')
518                                   .replace('since', ''))
519
520
521def get_file_kit_or_system(node_data):
522    current_file = os.path.dirname(__file__)
523    kit_json_file_path = os.path.abspath(os.path.join(current_file,
524                                                      r"kit_sub_system/c_file_kit_sub_system.json"))
525    if 'kit_name' in node_data and 'sub_system' in node_data and \
526            (not node_data['kit_name'] or not node_data['sub_system']):
527        relative_path = node_data.get('location').get('location_path').replace('\\', '/')
528        kit_name, sub_system = get_kit_system_data(kit_json_file_path, relative_path)
529        if not node_data['kit_name']:
530            node_data['kit_name'] = kit_name
531        if not node_data['sub_system']:
532            node_data['sub_system'] = sub_system
533
534
535def get_kit_system_data(json_path, relative_path):
536    kit_name = ''
537    sub_system_name = ''
538    with open(json_path, 'r', encoding='utf-8') as fs:
539        kit_system_data = json.load(fs)
540        for data in kit_system_data['data']:
541            if 'filePath' in data and relative_path in data['filePath']:
542                kit_name = data['kitName']
543                sub_system_name = data['subSystem']
544                break
545    return kit_name, sub_system_name
546
547
548def get_comment(cursor, data: dict):
549    if cursor.raw_comment:  # 是否有注释信息,有就取,没有过
550        data["comment"] = cursor.raw_comment
551    else:
552        data["comment"] = 'none_comment'
553
554
555def processing_ast_node(child, current_file, data, name, directory_path):
556    child_data = ast_to_dict(child, current_file, data, directory_path, key=1)
557    if child.kind == CursorKind.TYPE_REF:
558        data["type_ref"] = child_data
559    else:
560        data[name].append(child_data)
561
562
563def preorder_travers_ast(cursor, comment, current_file, directory_path):  # 获取属性
564    previous_data = {}
565    ast_dict = ast_to_dict(cursor, current_file, previous_data, directory_path, comment)  # 获取节点属性
566    return ast_dict
567
568
569def get_start_comments(include_path):  # 获取每个头文件的最开始注释
570    global line_dist
571    line_dist = {}
572    global calculation_times
573    with open(include_path, 'r', encoding='utf-8') as f:
574        file_line_data = f.readlines()
575        if file_line_data:
576            last_line = file_line_data[-1]
577        else:
578            last_line = -1
579        f.seek(0)
580        content = ''
581        mark = 0
582        max_line = 0
583        end_line_mark = r'#'
584        line = f.readline()
585        line_number = 1
586        line_list = []
587        while line:
588            if line.startswith(end_line_mark):
589                mark = 1
590                max_line = line_number
591                line_dist[calculation_times] = line_list
592                calculation_times += 1
593                break
594            if line.startswith('/**'):
595                line_list.append(line_number)
596            line_number += 1
597            content += line
598            line = f.readline()
599        if line == last_line and last_line != -1:
600            mark = 0
601        if 0 == mark:
602            content = ''
603            line_dist[calculation_times] = []
604            calculation_times += 1
605        f.seek(0)
606        content_all = f.read()
607        pattern_high = RegularExpressions.END_COMMENT.value
608        matches_high = re.finditer(pattern_high, content_all)
609        for mat in matches_high:
610            # 获取匹配项开始的行数
611            start_line = content_all.count('\n', 0, mat.start()) + 1
612            # 当前行数大于开头记录行数,则加入到结果中
613            if start_line > max_line:
614                line_list.append(start_line)
615                content = "{}{}".format(content, '/** @} */\n')
616        f.close()
617        return content
618
619
620def api_entrance(share_lib, include_path, directory_path, link_path):  # 统计入口
621    # clang.cindex需要用到libclang.dll共享库   所以配置共享库
622    if not Config.loaded:
623        Config.set_library_file(share_lib)
624        print("lib.dll: install path")
625    # 创建AST索引
626    index = Index.create()
627    # options赋值为如下,代表宏定义解析数据也要
628    args = ['-I{}'.format(path) for path in link_path]
629    args.append('-std=c99')
630    args.append('--target=aarch64-linux-musl')
631    options = clang.cindex.TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD
632
633    data_total = []  # 列表对象-用于统计
634    for item in include_path:  # 对每个头文件做处理
635        tu = index.parse(item, args=args, options=options)
636        ast_root_node = tu.cursor  # 获取根节点
637        matches = get_start_comments(item)  # 接收文件最开始的注释
638        # 前序遍历AST
639        file_result_data = preorder_travers_ast(ast_root_node, matches, item, directory_path)  # 调用处理函数
640        data_total.append(file_result_data)
641        iter_line_dist = iter(line_dist)
642        first = next(iter_line_dist)
643        array_index = int(first)
644        if len(data_total) - 1 >= array_index and first in line_dist:
645            data_dist = data_total.__getitem__(array_index)    # ==>data_total[array_index]
646            data_dist['line_list'] = line_dist.get(first)
647
648    return data_total
649
650
651def get_include_file(include_file_path, link_path, directory_path):  # 库路径、.h文件路径、链接头文件路径
652    # libclang.dll库路径
653    libclang_path = StringConstant.LIB_CLG_PATH.value
654    # c头文件的路径
655    file_path = include_file_path
656    # 头文件链接路径
657    link_include_path = link_path  # 可以通过列表传入
658    data = api_entrance(libclang_path, file_path, directory_path, link_include_path)  # 调用接口
659    return data
660