• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 PLUMgrid
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from __future__ import print_function
16try:
17    from collections.abc import MutableMapping
18except ImportError:
19    from collections import MutableMapping
20from time import strftime
21import ctypes as ct
22from functools import reduce
23import os
24import errno
25import re
26import sys
27
28from .libbcc import lib, _RAW_CB_TYPE, _LOST_CB_TYPE, _RINGBUF_CB_TYPE, bcc_perf_buffer_opts
29from .utils import get_online_cpus
30from .utils import get_possible_cpus
31
32BPF_MAP_TYPE_HASH = 1
33BPF_MAP_TYPE_ARRAY = 2
34BPF_MAP_TYPE_PROG_ARRAY = 3
35BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4
36BPF_MAP_TYPE_PERCPU_HASH = 5
37BPF_MAP_TYPE_PERCPU_ARRAY = 6
38BPF_MAP_TYPE_STACK_TRACE = 7
39BPF_MAP_TYPE_CGROUP_ARRAY = 8
40BPF_MAP_TYPE_LRU_HASH = 9
41BPF_MAP_TYPE_LRU_PERCPU_HASH = 10
42BPF_MAP_TYPE_LPM_TRIE = 11
43BPF_MAP_TYPE_ARRAY_OF_MAPS = 12
44BPF_MAP_TYPE_HASH_OF_MAPS = 13
45BPF_MAP_TYPE_DEVMAP = 14
46BPF_MAP_TYPE_SOCKMAP = 15
47BPF_MAP_TYPE_CPUMAP = 16
48BPF_MAP_TYPE_XSKMAP = 17
49BPF_MAP_TYPE_SOCKHASH = 18
50BPF_MAP_TYPE_CGROUP_STORAGE = 19
51BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20
52BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21
53BPF_MAP_TYPE_QUEUE = 22
54BPF_MAP_TYPE_STACK = 23
55BPF_MAP_TYPE_SK_STORAGE = 24
56BPF_MAP_TYPE_DEVMAP_HASH = 25
57BPF_MAP_TYPE_STRUCT_OPS = 26
58BPF_MAP_TYPE_RINGBUF = 27
59BPF_MAP_TYPE_INODE_STORAGE = 28
60BPF_MAP_TYPE_TASK_STORAGE = 29
61
62map_type_name = {
63    BPF_MAP_TYPE_HASH: "HASH",
64    BPF_MAP_TYPE_ARRAY: "ARRAY",
65    BPF_MAP_TYPE_PROG_ARRAY: "PROG_ARRAY",
66    BPF_MAP_TYPE_PERF_EVENT_ARRAY: "PERF_EVENT_ARRAY",
67    BPF_MAP_TYPE_PERCPU_HASH: "PERCPU_HASH",
68    BPF_MAP_TYPE_PERCPU_ARRAY: "PERCPU_ARRAY",
69    BPF_MAP_TYPE_STACK_TRACE: "STACK_TRACE",
70    BPF_MAP_TYPE_CGROUP_ARRAY: "CGROUP_ARRAY",
71    BPF_MAP_TYPE_LRU_HASH: "LRU_HASH",
72    BPF_MAP_TYPE_LRU_PERCPU_HASH: "LRU_PERCPU_HASH",
73    BPF_MAP_TYPE_LPM_TRIE: "LPM_TRIE",
74    BPF_MAP_TYPE_ARRAY_OF_MAPS: "ARRAY_OF_MAPS",
75    BPF_MAP_TYPE_HASH_OF_MAPS: "HASH_OF_MAPS",
76    BPF_MAP_TYPE_DEVMAP: "DEVMAP",
77    BPF_MAP_TYPE_SOCKMAP: "SOCKMAP",
78    BPF_MAP_TYPE_CPUMAP: "CPUMAP",
79    BPF_MAP_TYPE_XSKMAP: "XSKMAP",
80    BPF_MAP_TYPE_SOCKHASH: "SOCKHASH",
81    BPF_MAP_TYPE_CGROUP_STORAGE: "CGROUP_STORAGE",
82    BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: "REUSEPORT_SOCKARRAY",
83    BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: "PERCPU_CGROUP_STORAGE",
84    BPF_MAP_TYPE_QUEUE: "QUEUE",
85    BPF_MAP_TYPE_STACK: "STACK",
86    BPF_MAP_TYPE_SK_STORAGE: "SK_STORAGE",
87    BPF_MAP_TYPE_DEVMAP_HASH: "DEVMAP_HASH",
88    BPF_MAP_TYPE_STRUCT_OPS: "STRUCT_OPS",
89    BPF_MAP_TYPE_RINGBUF: "RINGBUF",
90    BPF_MAP_TYPE_INODE_STORAGE: "INODE_STORAGE",
91    BPF_MAP_TYPE_TASK_STORAGE: "TASK_STORAGE",
92}
93
94stars_max = 40
95log2_index_max = 65
96linear_index_max = 1025
97
98# helper functions, consider moving these to a utils module
99def _stars(val, val_max, width):
100    i = 0
101    text = ""
102    while (1):
103        if (i > (width * val / val_max) - 1) or (i > width - 1):
104            break
105        text += "*"
106        i += 1
107    if val > val_max:
108        text = text[:-1] + "+"
109    return text
110
111def _print_json_hist(vals, val_type, section_bucket=None):
112    hist_list = []
113    max_nonzero_idx = 0
114    for i in range(len(vals)):
115        if vals[i] != 0:
116            max_nonzero_idx = i
117    index = 1
118    prev = 0
119    for i in range(len(vals)):
120        if i != 0 and i <= max_nonzero_idx:
121            index = index * 2
122
123            list_obj = {}
124            list_obj['interval-start'] = prev
125            list_obj['interval-end'] = int(index) - 1
126            list_obj['count'] = int(vals[i])
127
128            hist_list.append(list_obj)
129
130            prev = index
131    histogram = {"ts": strftime("%Y-%m-%d %H:%M:%S"), "val_type": val_type, "data": hist_list}
132    if section_bucket:
133        histogram[section_bucket[0]] = section_bucket[1]
134    print(histogram)
135
136def _print_log2_hist(vals, val_type, strip_leading_zero):
137    global stars_max
138    log2_dist_max = 64
139    idx_max = -1
140    val_max = 0
141
142    for i, v in enumerate(vals):
143        if v > 0: idx_max = i
144        if v > val_max: val_max = v
145
146    if idx_max <= 32:
147        header = "     %-19s : count     distribution"
148        body = "%10d -> %-10d : %-8d |%-*s|"
149        stars = stars_max
150    else:
151        header = "               %-29s : count     distribution"
152        body = "%20d -> %-20d : %-8d |%-*s|"
153        stars = int(stars_max / 2)
154
155    if idx_max > 0:
156        print(header % val_type)
157
158    for i in range(1, idx_max + 1):
159        low = (1 << i) >> 1
160        high = (1 << i) - 1
161        if (low == high):
162            low -= 1
163        val = vals[i]
164
165        if strip_leading_zero:
166            if val:
167                print(body % (low, high, val, stars,
168                              _stars(val, val_max, stars)))
169                strip_leading_zero = False
170        else:
171            print(body % (low, high, val, stars,
172                          _stars(val, val_max, stars)))
173
174def _print_linear_hist(vals, val_type, strip_leading_zero):
175    global stars_max
176    log2_dist_max = 64
177    idx_max = -1
178    val_max = 0
179
180    for i, v in enumerate(vals):
181        if v > 0: idx_max = i
182        if v > val_max: val_max = v
183
184    header = "     %-13s : count     distribution"
185    body = "        %-10d : %-8d |%-*s|"
186    stars = stars_max
187
188    if idx_max >= 0:
189        print(header % val_type)
190    for i in range(0, idx_max + 1):
191        val = vals[i]
192
193        if strip_leading_zero:
194            if val:
195                print(body % (i, val, stars,
196                              _stars(val, val_max, stars)))
197                strip_leading_zero = False
198        else:
199                print(body % (i, val, stars,
200                              _stars(val, val_max, stars)))
201
202
203def get_table_type_name(ttype):
204    try:
205        return map_type_name[ttype]
206    except KeyError:
207        return "<unknown>"
208
209
210def _get_event_class(event_map):
211    ct_mapping = {
212        'char'              : ct.c_char,
213        's8'                : ct.c_char,
214        'unsigned char'     : ct.c_ubyte,
215        'u8'                : ct.c_ubyte,
216        'u8 *'              : ct.c_char_p,
217        'char *'            : ct.c_char_p,
218        'short'             : ct.c_short,
219        's16'               : ct.c_short,
220        'unsigned short'    : ct.c_ushort,
221        'u16'               : ct.c_ushort,
222        'int'               : ct.c_int,
223        's32'               : ct.c_int,
224        'enum'              : ct.c_int,
225        'unsigned int'      : ct.c_uint,
226        'u32'               : ct.c_uint,
227        'long'              : ct.c_long,
228        'unsigned long'     : ct.c_ulong,
229        'long long'         : ct.c_longlong,
230        's64'               : ct.c_longlong,
231        'unsigned long long': ct.c_ulonglong,
232        'u64'               : ct.c_ulonglong,
233        '__int128'          : (ct.c_longlong * 2),
234        'unsigned __int128' : (ct.c_ulonglong * 2),
235        'void *'            : ct.c_void_p,
236    }
237
238    # handle array types e.g. "int [16]" or "char[16]"
239    array_type = re.compile(r"([^ ]+) ?\[([0-9]+)\]$")
240
241    fields = []
242    num_fields = lib.bpf_perf_event_fields(event_map.bpf.module, event_map._name)
243    i = 0
244    while i < num_fields:
245        field = lib.bpf_perf_event_field(event_map.bpf.module, event_map._name, i).decode()
246        m = re.match(r"(.*)#(.*)", field)
247        field_name = m.group(1)
248        field_type = m.group(2)
249
250        if re.match(r"enum .*", field_type):
251            field_type = "enum"
252
253        m = array_type.match(field_type)
254        try:
255            if m:
256                fields.append((field_name, ct_mapping[m.group(1)] * int(m.group(2))))
257            else:
258                fields.append((field_name, ct_mapping[field_type]))
259        except KeyError:
260            # Using print+sys.exit instead of raising exceptions,
261            # because exceptions are caught by the caller.
262            print("Type: '%s' not recognized. Please define the data with ctypes manually."
263                  % field_type, file=sys.stderr)
264            sys.exit(1)
265        i += 1
266    return type('', (ct.Structure,), {'_fields_': fields})
267
268
269def Table(bpf, map_id, map_fd, keytype, leaftype, name, **kwargs):
270    """Table(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
271
272    Create a python object out of a reference to a bpf table handle"""
273
274    ttype = lib.bpf_table_type_id(bpf.module, map_id)
275    t = None
276    if ttype == BPF_MAP_TYPE_HASH:
277        t = HashTable(bpf, map_id, map_fd, keytype, leaftype)
278    elif ttype == BPF_MAP_TYPE_ARRAY:
279        t = Array(bpf, map_id, map_fd, keytype, leaftype)
280    elif ttype == BPF_MAP_TYPE_PROG_ARRAY:
281        t = ProgArray(bpf, map_id, map_fd, keytype, leaftype)
282    elif ttype == BPF_MAP_TYPE_PERF_EVENT_ARRAY:
283        t = PerfEventArray(bpf, map_id, map_fd, keytype, leaftype, name)
284    elif ttype == BPF_MAP_TYPE_PERCPU_HASH:
285        t = PerCpuHash(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
286    elif ttype == BPF_MAP_TYPE_PERCPU_ARRAY:
287        t = PerCpuArray(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
288    elif ttype == BPF_MAP_TYPE_LPM_TRIE:
289        t = LpmTrie(bpf, map_id, map_fd, keytype, leaftype)
290    elif ttype == BPF_MAP_TYPE_STACK_TRACE:
291        t = StackTrace(bpf, map_id, map_fd, keytype, leaftype)
292    elif ttype == BPF_MAP_TYPE_LRU_HASH:
293        t = LruHash(bpf, map_id, map_fd, keytype, leaftype)
294    elif ttype == BPF_MAP_TYPE_LRU_PERCPU_HASH:
295        t = LruPerCpuHash(bpf, map_id, map_fd, keytype, leaftype)
296    elif ttype == BPF_MAP_TYPE_CGROUP_ARRAY:
297        t = CgroupArray(bpf, map_id, map_fd, keytype, leaftype)
298    elif ttype == BPF_MAP_TYPE_DEVMAP:
299        t = DevMap(bpf, map_id, map_fd, keytype, leaftype)
300    elif ttype == BPF_MAP_TYPE_CPUMAP:
301        t = CpuMap(bpf, map_id, map_fd, keytype, leaftype)
302    elif ttype == BPF_MAP_TYPE_XSKMAP:
303        t = XskMap(bpf, map_id, map_fd, keytype, leaftype)
304    elif ttype == BPF_MAP_TYPE_ARRAY_OF_MAPS:
305        t = MapInMapArray(bpf, map_id, map_fd, keytype, leaftype)
306    elif ttype == BPF_MAP_TYPE_HASH_OF_MAPS:
307        t = MapInMapHash(bpf, map_id, map_fd, keytype, leaftype)
308    elif ttype == BPF_MAP_TYPE_QUEUE or ttype == BPF_MAP_TYPE_STACK:
309        t = QueueStack(bpf, map_id, map_fd, leaftype)
310    elif ttype == BPF_MAP_TYPE_RINGBUF:
311        t = RingBuf(bpf, map_id, map_fd, keytype, leaftype, name)
312    if t == None:
313        raise Exception("Unknown table type %d" % ttype)
314    return t
315
316
317class TableBase(MutableMapping):
318
319    def __init__(self, bpf, map_id, map_fd, keytype, leaftype, name=None):
320        self.bpf = bpf
321        self.map_id = map_id
322        self.map_fd = map_fd
323        self.Key = keytype
324        self.Leaf = leaftype
325        self.ttype = lib.bpf_table_type_id(self.bpf.module, self.map_id)
326        self.flags = lib.bpf_table_flags_id(self.bpf.module, self.map_id)
327        self._cbs = {}
328        self._name = name
329        self.max_entries = int(lib.bpf_table_max_entries_id(self.bpf.module,
330                self.map_id))
331
332    def get_fd(self):
333        return self.map_fd
334
335    def key_sprintf(self, key):
336        buf = ct.create_string_buffer(ct.sizeof(self.Key) * 8)
337        res = lib.bpf_table_key_snprintf(self.bpf.module, self.map_id, buf,
338                                         len(buf), ct.byref(key))
339        if res < 0:
340            raise Exception("Could not printf key")
341        return buf.value
342
343    def leaf_sprintf(self, leaf):
344        buf = ct.create_string_buffer(ct.sizeof(self.Leaf) * 8)
345        res = lib.bpf_table_leaf_snprintf(self.bpf.module, self.map_id, buf,
346                                          len(buf), ct.byref(leaf))
347        if res < 0:
348            raise Exception("Could not printf leaf")
349        return buf.value
350
351    def key_scanf(self, key_str):
352        key = self.Key()
353        res = lib.bpf_table_key_sscanf(self.bpf.module, self.map_id, key_str,
354                                       ct.byref(key))
355        if res < 0:
356            raise Exception("Could not scanf key")
357        return key
358
359    def leaf_scanf(self, leaf_str):
360        leaf = self.Leaf()
361        res = lib.bpf_table_leaf_sscanf(self.bpf.module, self.map_id, leaf_str,
362                                        ct.byref(leaf))
363        if res < 0:
364            raise Exception("Could not scanf leaf")
365        return leaf
366
367    def __getitem__(self, key):
368        leaf = self.Leaf()
369        res = lib.bpf_lookup_elem(self.map_fd, ct.byref(key), ct.byref(leaf))
370        if res < 0:
371            raise KeyError
372        return leaf
373
374    def __setitem__(self, key, leaf):
375        res = lib.bpf_update_elem(self.map_fd, ct.byref(key), ct.byref(leaf), 0)
376        if res < 0:
377            errstr = os.strerror(ct.get_errno())
378            raise Exception("Could not update table: %s" % errstr)
379
380    def __delitem__(self, key):
381        res = lib.bpf_delete_elem(self.map_fd, ct.byref(key))
382        if res < 0:
383            raise KeyError
384
385    # override the MutableMapping's implementation of these since they
386    # don't handle KeyError nicely
387    def itervalues(self):
388        for key in self:
389            # a map entry may be deleted in between discovering the key and
390            # fetching the value, suppress such errors
391            try:
392                yield self[key]
393            except KeyError:
394                pass
395
396    def iteritems(self):
397        for key in self:
398            try:
399                yield (key, self[key])
400            except KeyError:
401                pass
402
403    def items(self):
404        return [item for item in self.iteritems()]
405
406    def values(self):
407        return [value for value in self.itervalues()]
408
409    def clear(self):
410        # default clear uses popitem, which can race with the bpf prog
411        for k in self.keys():
412            self.__delitem__(k)
413
414    def _alloc_keys_values(self, alloc_k=False, alloc_v=False, count=None):
415        """Allocate keys and/or values arrays. Useful for in items_*_batch.
416
417        Args:
418            alloc_k (bool): True to allocate keys array, False otherwise.
419            Default is False.
420            alloc_v (bool): True to allocate values array, False otherwise.
421            Default is False.
422            count (int): number of elements in the array(s) to allocate. If
423            count is None then it allocates the maximum number of elements i.e
424            self.max_entries.
425
426        Returns:
427            tuple: (count, keys, values). Where count is ct.c_uint32,
428            and keys and values an instance of ct.Array
429        Raises:
430            ValueError: If count is less than 1 or greater than
431            self.max_entries.
432        """
433        keys = values = None
434        if not alloc_k and not alloc_v:
435            return (ct.c_uint32(0), None, None)
436
437        if not count:  # means alloc maximum size
438            count = self.max_entries
439        elif count < 1 or count > self.max_entries:
440            raise ValueError("Wrong count")
441
442        if alloc_k:
443            keys = (self.Key * count)()
444        if alloc_v:
445            values = (self.Leaf * count)()
446
447        return (ct.c_uint32(count), keys, values)
448
449    def _sanity_check_keys_values(self, keys=None, values=None):
450        """Check if the given keys or values have the right type and size.
451
452        Args:
453            keys (ct.Array): keys array to check
454            values (ct.Array): values array to check
455        Returns:
456            ct.c_uint32 : the size of the array(s)
457        Raises:
458            ValueError: If length of arrays is less than 1 or greater than
459            self.max_entries, or when both arrays length are different.
460            TypeError: If the keys and values are not an instance of ct.Array
461        """
462        arr_len = 0
463        for elem in [keys, values]:
464            if elem:
465                if not isinstance(elem, ct.Array):
466                    raise TypeError
467
468                arr_len = len(elem)
469                if arr_len < 1 or arr_len > self.max_entries:
470                    raise ValueError("Array's length is wrong")
471
472        if keys and values:
473            # check both length are equal
474            if len(keys) != len(values):
475                raise ValueError("keys array length != values array length")
476
477        return ct.c_uint32(arr_len)
478
479    def items_lookup_batch(self):
480        """Look up all the key-value pairs in the map.
481
482        Args:
483            None
484        Yields:
485            tuple: The tuple of (key,value) for every entries that have
486            been looked up.
487        Notes: lookup batch on a keys subset is not supported by the kernel.
488        """
489        for k, v in self._items_lookup_and_optionally_delete_batch(delete=False):
490            yield(k, v)
491        return
492
493    def items_delete_batch(self, ct_keys=None):
494        """Delete the key-value pairs related to the keys given as parameters.
495        Note that if no key are given, it is faster to call
496        lib.bpf_lookup_and_delete_batch than create keys array and then call
497        lib.bpf_delete_batch on these keys.
498
499        Args:
500            ct_keys (ct.Array): keys array to delete. If an array of keys is
501            given then it deletes all the related keys-values.
502            If keys is None (default) then it deletes all entries.
503        Yields:
504            tuple: The tuple of (key,value) for every entries that have
505            been deleted.
506        Raises:
507            Exception: If bpf syscall return value indicates an error.
508        """
509        if ct_keys is not None:
510            ct_cnt = self._sanity_check_keys_values(keys=ct_keys)
511            res = lib.bpf_delete_batch(self.map_fd,
512                                       ct.byref(ct_keys),
513                                       ct.byref(ct_cnt)
514                                       )
515            if (res != 0):
516                raise Exception("BPF_MAP_DELETE_BATCH has failed: %s"
517                                % os.strerror(ct.get_errno()))
518
519        else:
520            for _ in self.items_lookup_and_delete_batch():
521                return
522
523    def items_update_batch(self, ct_keys, ct_values):
524        """Update all the key-value pairs in the map provided.
525        The arrays must be the same length, between 1 and the maximum number
526        of entries.
527
528        Args:
529            ct_keys (ct.Array): keys array to update
530            ct_values (ct.Array): values array to update
531        Raises:
532            Exception: If bpf syscall return value indicates an error.
533        """
534        ct_cnt = self._sanity_check_keys_values(keys=ct_keys, values=ct_values)
535        res = lib.bpf_update_batch(self.map_fd,
536                                   ct.byref(ct_keys),
537                                   ct.byref(ct_values),
538                                   ct.byref(ct_cnt)
539                                   )
540        if (res != 0):
541            raise Exception("BPF_MAP_UPDATE_BATCH has failed: %s"
542                            % os.strerror(ct.get_errno()))
543
544    def items_lookup_and_delete_batch(self):
545        """Look up and delete all the key-value pairs in the map.
546
547        Args:
548            None
549        Yields:
550            tuple: The tuple of (key,value) for every entries that have
551            been looked up and deleted.
552        Notes: lookup and delete batch on a keys subset is not supported by
553        the kernel.
554        """
555        for k, v in self._items_lookup_and_optionally_delete_batch(delete=True):
556            yield(k, v)
557        return
558
559    def _items_lookup_and_optionally_delete_batch(self, delete=True):
560        """Look up and optionally delete all the key-value pairs in the map.
561
562        Args:
563            delete (bool) : look up and delete the key-value pairs when True,
564            else just look up.
565        Yields:
566            tuple: The tuple of (key,value) for every entries that have
567            been looked up and deleted.
568        Raises:
569            Exception: If bpf syscall return value indicates an error.
570        Notes: lookup and delete batch on a keys subset is not supported by
571        the kernel.
572        """
573        if delete is True:
574            bpf_batch = lib.bpf_lookup_and_delete_batch
575            bpf_cmd = "BPF_MAP_LOOKUP_AND_DELETE_BATCH"
576        else:
577            bpf_batch = lib.bpf_lookup_batch
578            bpf_cmd = "BPF_MAP_LOOKUP_BATCH"
579
580        # alloc keys and values to the max size
581        ct_buf_size, ct_keys, ct_values = self._alloc_keys_values(alloc_k=True,
582                                                                  alloc_v=True)
583        ct_out_batch = ct_cnt = ct.c_uint32(0)
584        total = 0
585        while True:
586            ct_cnt.value = ct_buf_size.value - total
587            res = bpf_batch(self.map_fd,
588                            ct.byref(ct_out_batch) if total else None,
589                            ct.byref(ct_out_batch),
590                            ct.byref(ct_keys, ct.sizeof(self.Key) * total),
591                            ct.byref(ct_values, ct.sizeof(self.Leaf) * total),
592                            ct.byref(ct_cnt)
593                            )
594            errcode = ct.get_errno()
595            total += ct_cnt.value
596            if (res != 0 and errcode != errno.ENOENT):
597                raise Exception("%s has failed: %s" % (bpf_cmd,
598                                                       os.strerror(errcode)))
599
600            if res != 0:
601                break  # success
602
603            if total == ct_buf_size.value:  # buffer full, we can't progress
604                break
605
606            if ct_cnt.value == 0:
607                # no progress, probably because concurrent update
608                # puts too many elements in one bucket.
609                break
610
611        for i in range(0, total):
612            yield (ct_keys[i], ct_values[i])
613
614    def zero(self):
615        # Even though this is not very efficient, we grab the entire list of
616        # keys before enumerating it. This helps avoid a potential race where
617        # the leaf assignment changes a hash table bucket that is being
618        # enumerated by the same loop, and may lead to a hang.
619        for k in list(self.keys()):
620            self[k] = self.Leaf()
621
622    def __iter__(self):
623        return TableBase.Iter(self)
624
625    def iter(self): return self.__iter__()
626    def keys(self): return self.__iter__()
627
628    class Iter(object):
629        def __init__(self, table):
630            self.table = table
631            self.key = None
632        def __iter__(self):
633            return self
634        def __next__(self):
635            return self.next()
636        def next(self):
637            self.key = self.table.next(self.key)
638            return self.key
639
640    def next(self, key):
641        next_key = self.Key()
642
643        if key is None:
644            res = lib.bpf_get_first_key(self.map_fd, ct.byref(next_key),
645                                        ct.sizeof(self.Key))
646        else:
647            res = lib.bpf_get_next_key(self.map_fd, ct.byref(key),
648                                       ct.byref(next_key))
649
650        if res < 0:
651            raise StopIteration()
652        return next_key
653
654    def decode_c_struct(self, tmp, buckets, bucket_fn, bucket_sort_fn):
655        f1 = self.Key._fields_[0][0]
656        f2 = self.Key._fields_[1][0]
657        # The above code assumes that self.Key._fields_[1][0] holds the
658        # slot. But a padding member may have been inserted here, which
659        # breaks the assumption and leads to chaos.
660        # TODO: this is a quick fix. Fixing/working around in the BCC
661        # internal library is the right thing to do.
662        if f2 == '__pad_1' and len(self.Key._fields_) == 3:
663            f2 = self.Key._fields_[2][0]
664        for k, v in self.items():
665            bucket = getattr(k, f1)
666            if bucket_fn:
667                bucket = bucket_fn(bucket)
668            vals = tmp[bucket] = tmp.get(bucket, [0] * log2_index_max)
669            slot = getattr(k, f2)
670            vals[slot] = v.value
671        buckets_lst = list(tmp.keys())
672        if bucket_sort_fn:
673            buckets_lst = bucket_sort_fn(buckets_lst)
674        for bucket in buckets_lst:
675            buckets.append(bucket)
676
677    def print_json_hist(self, val_type="value", section_header="Bucket ptr",
678                        section_print_fn=None, bucket_fn=None, bucket_sort_fn=None):
679        """print_json_hist(val_type="value", section_header="Bucket ptr",
680                                   section_print_fn=None, bucket_fn=None,
681                                   bucket_sort_fn=None):
682
683                Prints a table as a json histogram. The table must be stored as
684                log2. The val_type argument is optional, and is a column header.
685                If the histogram has a secondary key, the dictionary will be split by secondary key
686                If section_print_fn is not None, it will be passed the bucket value
687                to format into a string as it sees fit. If bucket_fn is not None,
688                it will be used to produce a bucket value for the histogram keys.
689                If bucket_sort_fn is not None, it will be used to sort the buckets
690                before iterating them, and it is useful when there are multiple fields
691                in the secondary key.
692                The maximum index allowed is log2_index_max (65), which will
693                accommodate any 64-bit integer in the histogram.
694                """
695        if isinstance(self.Key(), ct.Structure):
696            tmp = {}
697            buckets = []
698            self.decode_c_struct(tmp, buckets, bucket_fn, bucket_sort_fn)
699            for bucket in buckets:
700                vals = tmp[bucket]
701                if section_print_fn:
702                    section_bucket = (section_header, section_print_fn(bucket))
703                else:
704                    section_bucket = (section_header, bucket)
705                _print_json_hist(vals, val_type, section_bucket)
706
707        else:
708            vals = [0] * log2_index_max
709            for k, v in self.items():
710                vals[k.value] = v.value
711            _print_json_hist(vals, val_type)
712
713    def print_log2_hist(self, val_type="value", section_header="Bucket ptr",
714            section_print_fn=None, bucket_fn=None, strip_leading_zero=None,
715            bucket_sort_fn=None):
716        """print_log2_hist(val_type="value", section_header="Bucket ptr",
717                           section_print_fn=None, bucket_fn=None,
718                           strip_leading_zero=None, bucket_sort_fn=None):
719
720        Prints a table as a log2 histogram. The table must be stored as
721        log2. The val_type argument is optional, and is a column header.
722        If the histogram has a secondary key, multiple tables will print
723        and section_header can be used as a header description for each.
724        If section_print_fn is not None, it will be passed the bucket value
725        to format into a string as it sees fit. If bucket_fn is not None,
726        it will be used to produce a bucket value for the histogram keys.
727        If the value of strip_leading_zero is not False, prints a histogram
728        that is omitted leading zeros from the beginning.
729        If bucket_sort_fn is not None, it will be used to sort the buckets
730        before iterating them, and it is useful when there are multiple fields
731        in the secondary key.
732        The maximum index allowed is log2_index_max (65), which will
733        accommodate any 64-bit integer in the histogram.
734        """
735        if isinstance(self.Key(), ct.Structure):
736            tmp = {}
737            buckets = []
738            self.decode_c_struct(tmp, buckets, bucket_fn, bucket_sort_fn)
739            for bucket in buckets:
740                vals = tmp[bucket]
741                if section_print_fn:
742                    print("\n%s = %s" % (section_header,
743                        section_print_fn(bucket)))
744                else:
745                    print("\n%s = %r" % (section_header, bucket))
746                _print_log2_hist(vals, val_type, strip_leading_zero)
747        else:
748            vals = [0] * log2_index_max
749            for k, v in self.items():
750                vals[k.value] = v.value
751            _print_log2_hist(vals, val_type, strip_leading_zero)
752
753    def print_linear_hist(self, val_type="value", section_header="Bucket ptr",
754            section_print_fn=None, bucket_fn=None, strip_leading_zero=None,
755            bucket_sort_fn=None):
756        """print_linear_hist(val_type="value", section_header="Bucket ptr",
757                           section_print_fn=None, bucket_fn=None,
758                           strip_leading_zero=None, bucket_sort_fn=None)
759
760        Prints a table as a linear histogram. This is intended to span integer
761        ranges, eg, from 0 to 100. The val_type argument is optional, and is a
762        column header.  If the histogram has a secondary key, multiple tables
763        will print and section_header can be used as a header description for
764        each.  If section_print_fn is not None, it will be passed the bucket
765        value to format into a string as it sees fit. If bucket_fn is not None,
766        it will be used to produce a bucket value for the histogram keys.
767        If the value of strip_leading_zero is not False, prints a histogram
768        that is omitted leading zeros from the beginning.
769        If bucket_sort_fn is not None, it will be used to sort the buckets
770        before iterating them, and it is useful when there are multiple fields
771        in the secondary key.
772        The maximum index allowed is linear_index_max (1025), which is hoped
773        to be sufficient for integer ranges spanned.
774        """
775        if isinstance(self.Key(), ct.Structure):
776            tmp = {}
777            buckets = []
778            self.decode_c_struct(tmp, buckets, bucket_fn, bucket_sort_fn)
779
780            for bucket in buckets:
781                vals = tmp[bucket]
782                if section_print_fn:
783                    print("\n%s = %s" % (section_header,
784                        section_print_fn(bucket)))
785                else:
786                    print("\n%s = %r" % (section_header, bucket))
787                _print_linear_hist(vals, val_type, strip_leading_zero)
788        else:
789            vals = [0] * linear_index_max
790            for k, v in self.items():
791                try:
792                    vals[k.value] = v.value
793                except IndexError:
794                    # Improve error text. If the limit proves a nusiance, this
795                    # function be rewritten to avoid having one.
796                    raise IndexError(("Index in print_linear_hist() of %d " +
797                        "exceeds max of %d.") % (k.value, linear_index_max))
798            _print_linear_hist(vals, val_type, strip_leading_zero)
799
800
801class HashTable(TableBase):
802    def __init__(self, *args, **kwargs):
803        super(HashTable, self).__init__(*args, **kwargs)
804
805    def __len__(self):
806        i = 0
807        for k in self: i += 1
808        return i
809
810class LruHash(HashTable):
811    def __init__(self, *args, **kwargs):
812        super(LruHash, self).__init__(*args, **kwargs)
813
814class ArrayBase(TableBase):
815    def __init__(self, *args, **kwargs):
816        super(ArrayBase, self).__init__(*args, **kwargs)
817
818    def _normalize_key(self, key):
819        if isinstance(key, int):
820            if key < 0:
821                key = len(self) + key
822            key = self.Key(key)
823        if not isinstance(key, ct._SimpleCData):
824            raise IndexError("Array index must be an integer type")
825        if key.value >= len(self):
826            raise IndexError("Array index out of range")
827        return key
828
829    def __len__(self):
830        return self.max_entries
831
832    def __getitem__(self, key):
833        key = self._normalize_key(key)
834        return super(ArrayBase, self).__getitem__(key)
835
836    def __setitem__(self, key, leaf):
837        key = self._normalize_key(key)
838        super(ArrayBase, self).__setitem__(key, leaf)
839
840    def __delitem__(self, key):
841        key = self._normalize_key(key)
842        super(ArrayBase, self).__delitem__(key)
843
844    def clearitem(self, key):
845        key = self._normalize_key(key)
846        leaf = self.Leaf()
847        res = lib.bpf_update_elem(self.map_fd, ct.byref(key), ct.byref(leaf), 0)
848        if res < 0:
849            raise Exception("Could not clear item")
850
851    def __iter__(self):
852        return ArrayBase.Iter(self, self.Key)
853
854    class Iter(object):
855        def __init__(self, table, keytype):
856            self.Key = keytype
857            self.table = table
858            self.i = -1
859
860        def __iter__(self):
861            return self
862        def __next__(self):
863            return self.next()
864        def next(self):
865            self.i += 1
866            if self.i == len(self.table):
867                raise StopIteration()
868            return self.Key(self.i)
869
870class Array(ArrayBase):
871    def __init__(self, *args, **kwargs):
872        super(Array, self).__init__(*args, **kwargs)
873
874    def __delitem__(self, key):
875        # Delete in Array type does not have an effect, so zero out instead
876        self.clearitem(key)
877
878class ProgArray(ArrayBase):
879    def __init__(self, *args, **kwargs):
880        super(ProgArray, self).__init__(*args, **kwargs)
881
882    def __setitem__(self, key, leaf):
883        if isinstance(leaf, int):
884            leaf = self.Leaf(leaf)
885        if isinstance(leaf, self.bpf.Function):
886            leaf = self.Leaf(leaf.fd)
887        super(ProgArray, self).__setitem__(key, leaf)
888
889class FileDesc:
890    def __init__(self, fd):
891        if (fd is None) or (fd < 0):
892            raise Exception("Invalid file descriptor")
893        self.fd = fd
894
895    def clean_up(self):
896        if (self.fd is not None) and (self.fd >= 0):
897            os.close(self.fd)
898            self.fd = None
899
900    def __del__(self):
901        self.clean_up()
902
903    def __enter__(self, *args, **kwargs):
904        return self
905
906    def __exit__(self, *args, **kwargs):
907        self.clean_up()
908
909class CgroupArray(ArrayBase):
910    def __init__(self, *args, **kwargs):
911        super(CgroupArray, self).__init__(*args, **kwargs)
912
913    def __setitem__(self, key, leaf):
914        if isinstance(leaf, int):
915            super(CgroupArray, self).__setitem__(key, self.Leaf(leaf))
916        elif isinstance(leaf, str):
917            # TODO: Add os.O_CLOEXEC once we move to Python version >3.3
918            with FileDesc(os.open(leaf, os.O_RDONLY)) as f:
919                super(CgroupArray, self).__setitem__(key, self.Leaf(f.fd))
920        else:
921            raise Exception("Cgroup array key must be either FD or cgroup path")
922
923class PerfEventArray(ArrayBase):
924
925    def __init__(self, *args, **kwargs):
926        super(PerfEventArray, self).__init__(*args, **kwargs)
927        self._open_key_fds = {}
928        self._event_class = None
929
930    def __del__(self):
931        keys = list(self._open_key_fds.keys())
932        for key in keys:
933            del self[key]
934
935    def __delitem__(self, key):
936        if key not in self._open_key_fds:
937            return
938        # Delete entry from the array
939        super(PerfEventArray, self).__delitem__(key)
940        key_id = (id(self), key)
941        if key_id in self.bpf.perf_buffers:
942            # The key is opened for perf ring buffer
943            lib.perf_reader_free(self.bpf.perf_buffers[key_id])
944            del self.bpf.perf_buffers[key_id]
945            del self._cbs[key]
946        else:
947            # The key is opened for perf event read
948            lib.bpf_close_perf_event_fd(self._open_key_fds[key])
949        del self._open_key_fds[key]
950
951    def event(self, data):
952        """event(data)
953
954        When perf buffers are opened to receive custom perf event,
955        the underlying event data struct which is defined in C in
956        the BPF program can be deduced via this function. This avoids
957        redundant definitions in Python.
958        """
959        if self._event_class == None:
960            self._event_class = _get_event_class(self)
961        return ct.cast(data, ct.POINTER(self._event_class)).contents
962
963    def open_perf_buffer(self, callback, page_cnt=8, lost_cb=None, wakeup_events=1):
964        """open_perf_buffers(callback)
965
966        Opens a set of per-cpu ring buffer to receive custom perf event
967        data from the bpf program. The callback will be invoked for each
968        event submitted from the kernel, up to millions per second. Use
969        page_cnt to change the size of the per-cpu ring buffer. The value
970        must be a power of two and defaults to 8.
971        """
972
973        if page_cnt & (page_cnt - 1) != 0:
974            raise Exception("Perf buffer page_cnt must be a power of two")
975
976        for i in get_online_cpus():
977            self._open_perf_buffer(i, callback, page_cnt, lost_cb, wakeup_events)
978
979    def _open_perf_buffer(self, cpu, callback, page_cnt, lost_cb, wakeup_events):
980        def raw_cb_(_, data, size):
981            try:
982                callback(cpu, data, size)
983            except IOError as e:
984                if e.errno == errno.EPIPE:
985                    exit()
986                else:
987                    raise e
988        def lost_cb_(_, lost):
989            try:
990                lost_cb(lost)
991            except IOError as e:
992                if e.errno == errno.EPIPE:
993                    exit()
994                else:
995                    raise e
996        fn = _RAW_CB_TYPE(raw_cb_)
997        lost_fn = _LOST_CB_TYPE(lost_cb_) if lost_cb else ct.cast(None, _LOST_CB_TYPE)
998        opts = bcc_perf_buffer_opts()
999        opts.pid = -1
1000        opts.cpu = cpu
1001        opts.wakeup_events = wakeup_events
1002        reader = lib.bpf_open_perf_buffer_opts(fn, lost_fn, None, page_cnt, ct.byref(opts))
1003        if not reader:
1004            raise Exception("Could not open perf buffer")
1005        fd = lib.perf_reader_fd(reader)
1006        self[self.Key(cpu)] = self.Leaf(fd)
1007        self.bpf.perf_buffers[(id(self), cpu)] = reader
1008        # keep a refcnt
1009        self._cbs[cpu] = (fn, lost_fn)
1010        # The actual fd is held by the perf reader, add to track opened keys
1011        self._open_key_fds[cpu] = -1
1012
1013    def _open_perf_event(self, cpu, typ, config):
1014        fd = lib.bpf_open_perf_event(typ, config, -1, cpu)
1015        if fd < 0:
1016            raise Exception("bpf_open_perf_event failed")
1017        self[self.Key(cpu)] = self.Leaf(fd)
1018        self._open_key_fds[cpu] = fd
1019
1020    def open_perf_event(self, typ, config):
1021        """open_perf_event(typ, config)
1022
1023        Configures the table such that calls from the bpf program to
1024        table.perf_read(CUR_CPU_IDENTIFIER) will return the hardware
1025        counter denoted by event ev on the local cpu.
1026        """
1027        for i in get_online_cpus():
1028            self._open_perf_event(i, typ, config)
1029
1030
1031class PerCpuHash(HashTable):
1032    def __init__(self, *args, **kwargs):
1033        self.reducer = kwargs.pop("reducer", None)
1034        super(PerCpuHash, self).__init__(*args, **kwargs)
1035        self.sLeaf = self.Leaf
1036        self.total_cpu = len(get_possible_cpus())
1037        # This needs to be 8 as hard coded into the linux kernel.
1038        self.alignment = ct.sizeof(self.sLeaf) % 8
1039        if self.alignment == 0:
1040            self.Leaf = self.sLeaf * self.total_cpu
1041        else:
1042            # Currently Float, Char, un-aligned structs are not supported
1043            if self.sLeaf == ct.c_uint:
1044                self.Leaf = ct.c_uint64 * self.total_cpu
1045            elif self.sLeaf == ct.c_int:
1046                self.Leaf = ct.c_int64 * self.total_cpu
1047            else:
1048                raise IndexError("Leaf must be aligned to 8 bytes")
1049
1050    def getvalue(self, key):
1051        result = super(PerCpuHash, self).__getitem__(key)
1052        if self.alignment == 0:
1053            ret = result
1054        else:
1055            ret = (self.sLeaf * self.total_cpu)()
1056            for i in range(0, self.total_cpu):
1057                ret[i] = result[i]
1058        return ret
1059
1060    def __getitem__(self, key):
1061        if self.reducer:
1062            return reduce(self.reducer, self.getvalue(key))
1063        else:
1064            return self.getvalue(key)
1065
1066    def __setitem__(self, key, leaf):
1067        super(PerCpuHash, self).__setitem__(key, leaf)
1068
1069    def sum(self, key):
1070        if isinstance(self.Leaf(), ct.Structure):
1071            raise IndexError("Leaf must be an integer type for default sum functions")
1072        return self.sLeaf(sum(self.getvalue(key)))
1073
1074    def max(self, key):
1075        if isinstance(self.Leaf(), ct.Structure):
1076            raise IndexError("Leaf must be an integer type for default max functions")
1077        return self.sLeaf(max(self.getvalue(key)))
1078
1079    def average(self, key):
1080        result = self.sum(key)
1081        return result.value / self.total_cpu
1082
1083class LruPerCpuHash(PerCpuHash):
1084    def __init__(self, *args, **kwargs):
1085        super(LruPerCpuHash, self).__init__(*args, **kwargs)
1086
1087class PerCpuArray(ArrayBase):
1088    def __init__(self, *args, **kwargs):
1089        self.reducer = kwargs.pop("reducer", None)
1090        super(PerCpuArray, self).__init__(*args, **kwargs)
1091        self.sLeaf = self.Leaf
1092        self.total_cpu = len(get_possible_cpus())
1093        # This needs to be 8 as hard coded into the linux kernel.
1094        self.alignment = ct.sizeof(self.sLeaf) % 8
1095        if self.alignment == 0:
1096            self.Leaf = self.sLeaf * self.total_cpu
1097        else:
1098            # Currently Float, Char, un-aligned structs are not supported
1099            if self.sLeaf == ct.c_uint:
1100                self.Leaf = ct.c_uint64 * self.total_cpu
1101            elif self.sLeaf == ct.c_int:
1102                self.Leaf = ct.c_int64 * self.total_cpu
1103            else:
1104                raise IndexError("Leaf must be aligned to 8 bytes")
1105
1106    def getvalue(self, key):
1107        result = super(PerCpuArray, self).__getitem__(key)
1108        if self.alignment == 0:
1109            ret = result
1110        else:
1111            ret = (self.sLeaf * self.total_cpu)()
1112            for i in range(0, self.total_cpu):
1113                ret[i] = result[i]
1114        return ret
1115
1116    def __getitem__(self, key):
1117        if (self.reducer):
1118            return reduce(self.reducer, self.getvalue(key))
1119        else:
1120            return self.getvalue(key)
1121
1122    def __setitem__(self, key, leaf):
1123        super(PerCpuArray, self).__setitem__(key, leaf)
1124
1125    def __delitem__(self, key):
1126        # Delete in this type does not have an effect, so zero out instead
1127        self.clearitem(key)
1128
1129    def sum(self, key):
1130        if isinstance(self.Leaf(), ct.Structure):
1131            raise IndexError("Leaf must be an integer type for default sum functions")
1132        return self.sLeaf(sum(self.getvalue(key)))
1133
1134    def max(self, key):
1135        if isinstance(self.Leaf(), ct.Structure):
1136            raise IndexError("Leaf must be an integer type for default max functions")
1137        return self.sLeaf(max(self.getvalue(key)))
1138
1139    def average(self, key):
1140        result = self.sum(key)
1141        return result.value / self.total_cpu
1142
1143class LpmTrie(TableBase):
1144    def __init__(self, *args, **kwargs):
1145        super(LpmTrie, self).__init__(*args, **kwargs)
1146
1147    def __len__(self):
1148        raise NotImplementedError
1149
1150
1151class StackTrace(TableBase):
1152    MAX_DEPTH = 127
1153    BPF_F_STACK_BUILD_ID = (1<<5)
1154    BPF_STACK_BUILD_ID_EMPTY =  0 #can't get stacktrace
1155    BPF_STACK_BUILD_ID_VALID = 1 #valid build-id,ip
1156    BPF_STACK_BUILD_ID_IP = 2 #fallback to ip
1157
1158    def __init__(self, *args, **kwargs):
1159        super(StackTrace, self).__init__(*args, **kwargs)
1160
1161    class StackWalker(object):
1162        def __init__(self, stack, flags, resolve=None):
1163            self.stack = stack
1164            self.n = -1
1165            self.resolve = resolve
1166            self.flags = flags
1167
1168        def __iter__(self):
1169            return self
1170
1171        def __next__(self):
1172            return self.next()
1173
1174        def next(self):
1175            self.n += 1
1176            if self.n == StackTrace.MAX_DEPTH:
1177                raise StopIteration()
1178
1179            if self.flags & StackTrace.BPF_F_STACK_BUILD_ID:
1180              addr = self.stack.trace[self.n]
1181              if addr.status == StackTrace.BPF_STACK_BUILD_ID_IP or \
1182                 addr.status == StackTrace.BPF_STACK_BUILD_ID_EMPTY:
1183                  raise StopIteration()
1184            else:
1185              addr = self.stack.ip[self.n]
1186
1187            if addr == 0 :
1188                raise StopIteration()
1189
1190            return self.resolve(addr) if self.resolve else addr
1191
1192    def walk(self, stack_id, resolve=None):
1193        return StackTrace.StackWalker(self[self.Key(stack_id)], self.flags, resolve)
1194
1195    def __len__(self):
1196        i = 0
1197        for k in self: i += 1
1198        return i
1199
1200    def clear(self):
1201        pass
1202
1203class DevMap(ArrayBase):
1204    def __init__(self, *args, **kwargs):
1205        super(DevMap, self).__init__(*args, **kwargs)
1206
1207class CpuMap(ArrayBase):
1208    def __init__(self, *args, **kwargs):
1209        super(CpuMap, self).__init__(*args, **kwargs)
1210
1211class XskMap(ArrayBase):
1212    def __init__(self, *args, **kwargs):
1213        super(XskMap, self).__init__(*args, **kwargs)
1214
1215class MapInMapArray(ArrayBase):
1216    def __init__(self, *args, **kwargs):
1217        super(MapInMapArray, self).__init__(*args, **kwargs)
1218
1219class MapInMapHash(HashTable):
1220    def __init__(self, *args, **kwargs):
1221        super(MapInMapHash, self).__init__(*args, **kwargs)
1222
1223class RingBuf(TableBase):
1224    def __init__(self, *args, **kwargs):
1225        super(RingBuf, self).__init__(*args, **kwargs)
1226        self._ringbuf = None
1227        self._event_class = None
1228
1229    def __delitem(self, key):
1230        pass
1231
1232    def __del__(self):
1233        pass
1234
1235    def __len__(self):
1236        return 0
1237
1238    def event(self, data):
1239        """event(data)
1240
1241        When ring buffers are opened to receive custom event,
1242        the underlying event data struct which is defined in C in
1243        the BPF program can be deduced via this function. This avoids
1244        redundant definitions in Python.
1245        """
1246        if self._event_class == None:
1247            self._event_class = _get_event_class(self)
1248        return ct.cast(data, ct.POINTER(self._event_class)).contents
1249
1250    def open_ring_buffer(self, callback, ctx=None):
1251        """open_ring_buffer(callback)
1252
1253        Opens a ring buffer to receive custom event data from the bpf program.
1254        The callback will be invoked for each event submitted from the kernel,
1255        up to millions per second.
1256        """
1257
1258        def ringbuf_cb_(ctx, data, size):
1259            try:
1260                ret = callback(ctx, data, size)
1261                # Callback for ringbufs should _always_ return an integer.
1262                # If the function the user registers does not,
1263                # simply fall back to returning 0.
1264                try:
1265                    ret = int(ret)
1266                except:
1267                    ret = 0
1268            except IOError as e:
1269                if e.errno == errno.EPIPE:
1270                    exit()
1271                else:
1272                    raise e
1273            return ret
1274
1275        fn = _RINGBUF_CB_TYPE(ringbuf_cb_)
1276        self.bpf._open_ring_buffer(self.map_fd, fn, ctx)
1277        # keep a refcnt
1278        self._cbs[0] = fn
1279
1280class QueueStack:
1281    # Flag for map.push
1282    BPF_EXIST = 2
1283
1284    def __init__(self, bpf, map_id, map_fd, leaftype):
1285        self.bpf = bpf
1286        self.map_id = map_id
1287        self.map_fd = map_fd
1288        self.Leaf = leaftype
1289        self.ttype = lib.bpf_table_type_id(self.bpf.module, self.map_id)
1290        self.flags = lib.bpf_table_flags_id(self.bpf.module, self.map_id)
1291        self.max_entries = int(lib.bpf_table_max_entries_id(self.bpf.module,
1292                self.map_id))
1293
1294    def leaf_sprintf(self, leaf):
1295        buf = ct.create_string_buffer(ct.sizeof(self.Leaf) * 8)
1296        res = lib.bpf_table_leaf_snprintf(self.bpf.module, self.map_id, buf,
1297                                          len(buf), ct.byref(leaf))
1298        if res < 0:
1299            raise Exception("Could not printf leaf")
1300        return buf.value
1301
1302    def leaf_scanf(self, leaf_str):
1303        leaf = self.Leaf()
1304        res = lib.bpf_table_leaf_sscanf(self.bpf.module, self.map_id, leaf_str,
1305                                        ct.byref(leaf))
1306        if res < 0:
1307            raise Exception("Could not scanf leaf")
1308        return leaf
1309
1310    def push(self, leaf, flags=0):
1311        res = lib.bpf_update_elem(self.map_fd, None, ct.byref(leaf), flags)
1312        if res < 0:
1313            errstr = os.strerror(ct.get_errno())
1314            raise Exception("Could not push to table: %s" % errstr)
1315
1316    def pop(self):
1317        leaf = self.Leaf()
1318        res = lib.bpf_lookup_and_delete(self.map_fd, None, ct.byref(leaf))
1319        if res < 0:
1320            raise KeyError("Could not pop from table")
1321        return leaf
1322
1323    def peek(self):
1324        leaf = self.Leaf()
1325        res = lib.bpf_lookup_elem(self.map_fd, None, ct.byref(leaf))
1326        if res < 0:
1327            raise KeyError("Could not peek table")
1328        return leaf
1329
1330    def itervalues(self):
1331        # to avoid infinite loop, set maximum pops to max_entries
1332        cnt = self.max_entries
1333        while cnt:
1334            try:
1335                yield(self.pop())
1336                cnt -= 1
1337            except KeyError:
1338                return
1339
1340    def values(self):
1341        return [value for value in self.itervalues()]
1342