• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 PLUMgrid
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from collections import MutableMapping
16import ctypes as ct
17from functools import reduce
18import multiprocessing
19import os
20import errno
21
22from .libbcc import lib, _RAW_CB_TYPE, _LOST_CB_TYPE
23from .perf import Perf
24from .utils import get_online_cpus
25from .utils import get_possible_cpus
26from subprocess import check_output
27
28BPF_MAP_TYPE_HASH = 1
29BPF_MAP_TYPE_ARRAY = 2
30BPF_MAP_TYPE_PROG_ARRAY = 3
31BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4
32BPF_MAP_TYPE_PERCPU_HASH = 5
33BPF_MAP_TYPE_PERCPU_ARRAY = 6
34BPF_MAP_TYPE_STACK_TRACE = 7
35BPF_MAP_TYPE_CGROUP_ARRAY = 8
36BPF_MAP_TYPE_LRU_HASH = 9
37BPF_MAP_TYPE_LRU_PERCPU_HASH = 10
38BPF_MAP_TYPE_LPM_TRIE = 11
39BPF_MAP_TYPE_ARRAY_OF_MAPS = 12
40BPF_MAP_TYPE_HASH_OF_MAPS = 13
41BPF_MAP_TYPE_DEVMAP = 14
42BPF_MAP_TYPE_SOCKMAP = 15
43BPF_MAP_TYPE_CPUMAP = 16
44BPF_MAP_TYPE_XSKMAP = 17
45BPF_MAP_TYPE_SOCKHASH = 18
46
47stars_max = 40
48log2_index_max = 65
49linear_index_max = 1025
50
51# helper functions, consider moving these to a utils module
52def _stars(val, val_max, width):
53    i = 0
54    text = ""
55    while (1):
56        if (i > (width * val / val_max) - 1) or (i > width - 1):
57            break
58        text += "*"
59        i += 1
60    if val > val_max:
61        text = text[:-1] + "+"
62    return text
63
64
65def _print_log2_hist(vals, val_type, strip_leading_zero):
66    global stars_max
67    log2_dist_max = 64
68    idx_max = -1
69    val_max = 0
70
71    for i, v in enumerate(vals):
72        if v > 0: idx_max = i
73        if v > val_max: val_max = v
74
75    if idx_max <= 32:
76        header = "     %-19s : count     distribution"
77        body = "%10d -> %-10d : %-8d |%-*s|"
78        stars = stars_max
79    else:
80        header = "               %-29s : count     distribution"
81        body = "%20d -> %-20d : %-8d |%-*s|"
82        stars = int(stars_max / 2)
83
84    if idx_max > 0:
85        print(header % val_type)
86
87    for i in range(1, idx_max + 1):
88        low = (1 << i) >> 1
89        high = (1 << i) - 1
90        if (low == high):
91            low -= 1
92        val = vals[i]
93
94        if strip_leading_zero:
95            if val:
96                print(body % (low, high, val, stars,
97                              _stars(val, val_max, stars)))
98                strip_leading_zero = False
99        else:
100            print(body % (low, high, val, stars,
101                          _stars(val, val_max, stars)))
102
103def _print_linear_hist(vals, val_type):
104    global stars_max
105    log2_dist_max = 64
106    idx_max = -1
107    val_max = 0
108
109    for i, v in enumerate(vals):
110        if v > 0: idx_max = i
111        if v > val_max: val_max = v
112
113    header = "     %-13s : count     distribution"
114    body = "        %-10d : %-8d |%-*s|"
115    stars = stars_max
116
117    if idx_max >= 0:
118        print(header % val_type);
119    for i in range(0, idx_max + 1):
120        val = vals[i]
121        print(body % (i, val, stars,
122                      _stars(val, val_max, stars)))
123
124
125def Table(bpf, map_id, map_fd, keytype, leaftype, **kwargs):
126    """Table(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
127
128    Create a python object out of a reference to a bpf table handle"""
129
130    ttype = lib.bpf_table_type_id(bpf.module, map_id)
131    t = None
132    if ttype == BPF_MAP_TYPE_HASH:
133        t = HashTable(bpf, map_id, map_fd, keytype, leaftype)
134    elif ttype == BPF_MAP_TYPE_ARRAY:
135        t = Array(bpf, map_id, map_fd, keytype, leaftype)
136    elif ttype == BPF_MAP_TYPE_PROG_ARRAY:
137        t = ProgArray(bpf, map_id, map_fd, keytype, leaftype)
138    elif ttype == BPF_MAP_TYPE_PERF_EVENT_ARRAY:
139        t = PerfEventArray(bpf, map_id, map_fd, keytype, leaftype)
140    elif ttype == BPF_MAP_TYPE_PERCPU_HASH:
141        t = PerCpuHash(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
142    elif ttype == BPF_MAP_TYPE_PERCPU_ARRAY:
143        t = PerCpuArray(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
144    elif ttype == BPF_MAP_TYPE_LPM_TRIE:
145        t = LpmTrie(bpf, map_id, map_fd, keytype, leaftype)
146    elif ttype == BPF_MAP_TYPE_STACK_TRACE:
147        t = StackTrace(bpf, map_id, map_fd, keytype, leaftype)
148    elif ttype == BPF_MAP_TYPE_LRU_HASH:
149        t = LruHash(bpf, map_id, map_fd, keytype, leaftype)
150    elif ttype == BPF_MAP_TYPE_LRU_PERCPU_HASH:
151        t = LruPerCpuHash(bpf, map_id, map_fd, keytype, leaftype)
152    elif ttype == BPF_MAP_TYPE_CGROUP_ARRAY:
153        t = CgroupArray(bpf, map_id, map_fd, keytype, leaftype)
154    elif ttype == BPF_MAP_TYPE_DEVMAP:
155        t = DevMap(bpf, map_id, map_fd, keytype, leaftype)
156    elif ttype == BPF_MAP_TYPE_CPUMAP:
157        t = CpuMap(bpf, map_id, map_fd, keytype, leaftype)
158    if t == None:
159        raise Exception("Unknown table type %d" % ttype)
160    return t
161
162
163class TableBase(MutableMapping):
164
165    def __init__(self, bpf, map_id, map_fd, keytype, leaftype):
166        self.bpf = bpf
167        self.map_id = map_id
168        self.map_fd = map_fd
169        self.Key = keytype
170        self.Leaf = leaftype
171        self.ttype = lib.bpf_table_type_id(self.bpf.module, self.map_id)
172        self.flags = lib.bpf_table_flags_id(self.bpf.module, self.map_id)
173        self._cbs = {}
174
175    def key_sprintf(self, key):
176        buf = ct.create_string_buffer(ct.sizeof(self.Key) * 8)
177        res = lib.bpf_table_key_snprintf(self.bpf.module, self.map_id, buf,
178                                         len(buf), ct.byref(key))
179        if res < 0:
180            raise Exception("Could not printf key")
181        return buf.value
182
183    def leaf_sprintf(self, leaf):
184        buf = ct.create_string_buffer(ct.sizeof(self.Leaf) * 8)
185        res = lib.bpf_table_leaf_snprintf(self.bpf.module, self.map_id, buf,
186                                          len(buf), ct.byref(leaf))
187        if res < 0:
188            raise Exception("Could not printf leaf")
189        return buf.value
190
191    def key_scanf(self, key_str):
192        key = self.Key()
193        res = lib.bpf_table_key_sscanf(self.bpf.module, self.map_id, key_str,
194                                       ct.byref(key))
195        if res < 0:
196            raise Exception("Could not scanf key")
197        return key
198
199    def leaf_scanf(self, leaf_str):
200        leaf = self.Leaf()
201        res = lib.bpf_table_leaf_sscanf(self.bpf.module, self.map_id, leaf_str,
202                                        ct.byref(leaf))
203        if res < 0:
204            raise Exception("Could not scanf leaf")
205        return leaf
206
207    def __getitem__(self, key):
208        leaf = self.Leaf()
209        res = lib.bpf_lookup_elem(self.map_fd, ct.byref(key), ct.byref(leaf))
210        if res < 0:
211            raise KeyError
212        return leaf
213
214    def __setitem__(self, key, leaf):
215        res = lib.bpf_update_elem(self.map_fd, ct.byref(key), ct.byref(leaf), 0)
216        if res < 0:
217            errstr = os.strerror(ct.get_errno())
218            raise Exception("Could not update table: %s" % errstr)
219
220    def __delitem__(self, key):
221        res = lib.bpf_delete_elem(self.map_fd, ct.byref(key))
222        if res < 0:
223            raise KeyError
224
225    # override the MutableMapping's implementation of these since they
226    # don't handle KeyError nicely
227    def itervalues(self):
228        for key in self:
229            # a map entry may be deleted in between discovering the key and
230            # fetching the value, suppress such errors
231            try:
232                yield self[key]
233            except KeyError:
234                pass
235
236    def iteritems(self):
237        for key in self:
238            try:
239                yield (key, self[key])
240            except KeyError:
241                pass
242
243    def items(self):
244        return [item for item in self.iteritems()]
245
246    def values(self):
247        return [value for value in self.itervalues()]
248
249    def clear(self):
250        # default clear uses popitem, which can race with the bpf prog
251        for k in self.keys():
252            self.__delitem__(k)
253
254    def zero(self):
255        # Even though this is not very efficient, we grab the entire list of
256        # keys before enumerating it. This helps avoid a potential race where
257        # the leaf assignment changes a hash table bucket that is being
258        # enumerated by the same loop, and may lead to a hang.
259        for k in list(self.keys()):
260            self[k] = self.Leaf()
261
262    def __iter__(self):
263        return TableBase.Iter(self)
264
265    def iter(self): return self.__iter__()
266    def keys(self): return self.__iter__()
267
268    class Iter(object):
269        def __init__(self, table):
270            self.table = table
271            self.key = None
272        def __iter__(self):
273            return self
274        def __next__(self):
275            return self.next()
276        def next(self):
277            self.key = self.table.next(self.key)
278            return self.key
279
280    def next(self, key):
281        next_key = self.Key()
282
283        if key is None:
284            res = lib.bpf_get_first_key(self.map_fd, ct.byref(next_key),
285                                        ct.sizeof(self.Key))
286        else:
287            res = lib.bpf_get_next_key(self.map_fd, ct.byref(key),
288                                       ct.byref(next_key))
289
290        if res < 0:
291            raise StopIteration()
292        return next_key
293
294    def print_log2_hist(self, val_type="value", section_header="Bucket ptr",
295            section_print_fn=None, bucket_fn=None, strip_leading_zero=None,
296            bucket_sort_fn=None):
297        """print_log2_hist(val_type="value", section_header="Bucket ptr",
298                           section_print_fn=None, bucket_fn=None,
299                           strip_leading_zero=None, bucket_sort_fn=None):
300
301        Prints a table as a log2 histogram. The table must be stored as
302        log2. The val_type argument is optional, and is a column header.
303        If the histogram has a secondary key, multiple tables will print
304        and section_header can be used as a header description for each.
305        If section_print_fn is not None, it will be passed the bucket value
306        to format into a string as it sees fit. If bucket_fn is not None,
307        it will be used to produce a bucket value for the histogram keys.
308        If the value of strip_leading_zero is not False, prints a histogram
309        that is omitted leading zeros from the beginning.
310        If bucket_sort_fn is not None, it will be used to sort the buckets
311        before iterating them, and it is useful when there are multiple fields
312        in the secondary key.
313        The maximum index allowed is log2_index_max (65), which will
314        accommodate any 64-bit integer in the histogram.
315        """
316        if isinstance(self.Key(), ct.Structure):
317            tmp = {}
318            f1 = self.Key._fields_[0][0]
319            f2 = self.Key._fields_[1][0]
320            for k, v in self.items():
321                bucket = getattr(k, f1)
322                if bucket_fn:
323                    bucket = bucket_fn(bucket)
324                vals = tmp[bucket] = tmp.get(bucket, [0] * log2_index_max)
325                slot = getattr(k, f2)
326                vals[slot] = v.value
327
328            buckets = list(tmp.keys())
329            if bucket_sort_fn:
330                buckets = bucket_sort_fn(buckets)
331
332            for bucket in buckets:
333                vals = tmp[bucket]
334                if section_print_fn:
335                    print("\n%s = %s" % (section_header,
336                        section_print_fn(bucket)))
337                else:
338                    print("\n%s = %r" % (section_header, bucket))
339                _print_log2_hist(vals, val_type, strip_leading_zero)
340        else:
341            vals = [0] * log2_index_max
342            for k, v in self.items():
343                vals[k.value] = v.value
344            _print_log2_hist(vals, val_type, strip_leading_zero)
345
346    def print_linear_hist(self, val_type="value", section_header="Bucket ptr",
347            section_print_fn=None, bucket_fn=None, bucket_sort_fn=None):
348        """print_linear_hist(val_type="value", section_header="Bucket ptr",
349                           section_print_fn=None, bucket_fn=None,
350                           bucket_sort_fn=None)
351
352        Prints a table as a linear histogram. This is intended to span integer
353        ranges, eg, from 0 to 100. The val_type argument is optional, and is a
354        column header.  If the histogram has a secondary key, multiple tables
355        will print and section_header can be used as a header description for
356        each.  If section_print_fn is not None, it will be passed the bucket
357        value to format into a string as it sees fit. If bucket_fn is not None,
358        it will be used to produce a bucket value for the histogram keys.
359        If bucket_sort_fn is not None, it will be used to sort the buckets
360        before iterating them, and it is useful when there are multiple fields
361        in the secondary key.
362        The maximum index allowed is linear_index_max (1025), which is hoped
363        to be sufficient for integer ranges spanned.
364        """
365        if isinstance(self.Key(), ct.Structure):
366            tmp = {}
367            f1 = self.Key._fields_[0][0]
368            f2 = self.Key._fields_[1][0]
369            for k, v in self.items():
370                bucket = getattr(k, f1)
371                if bucket_fn:
372                    bucket = bucket_fn(bucket)
373                vals = tmp[bucket] = tmp.get(bucket, [0] * linear_index_max)
374                slot = getattr(k, f2)
375                vals[slot] = v.value
376
377            buckets = tmp.keys()
378            if bucket_sort_fn:
379                buckets = bucket_sort_fn(buckets)
380
381            for bucket in buckets:
382                vals = tmp[bucket]
383                if section_print_fn:
384                    print("\n%s = %s" % (section_header,
385                        section_print_fn(bucket)))
386                else:
387                    print("\n%s = %r" % (section_header, bucket))
388                _print_linear_hist(vals, val_type)
389        else:
390            vals = [0] * linear_index_max
391            for k, v in self.items():
392                try:
393                    vals[k.value] = v.value
394                except IndexError:
395                    # Improve error text. If the limit proves a nusiance, this
396                    # function be rewritten to avoid having one.
397                    raise IndexError(("Index in print_linear_hist() of %d " +
398                        "exceeds max of %d.") % (k.value, linear_index_max))
399            _print_linear_hist(vals, val_type)
400
401
402class HashTable(TableBase):
403    def __init__(self, *args, **kwargs):
404        super(HashTable, self).__init__(*args, **kwargs)
405
406    def __len__(self):
407        i = 0
408        for k in self: i += 1
409        return i
410
411class LruHash(HashTable):
412    def __init__(self, *args, **kwargs):
413        super(LruHash, self).__init__(*args, **kwargs)
414
415class ArrayBase(TableBase):
416    def __init__(self, *args, **kwargs):
417        super(ArrayBase, self).__init__(*args, **kwargs)
418        self.max_entries = int(lib.bpf_table_max_entries_id(self.bpf.module,
419                self.map_id))
420
421    def _normalize_key(self, key):
422        if isinstance(key, int):
423            if key < 0:
424                key = len(self) + key
425            key = self.Key(key)
426        if not isinstance(key, ct._SimpleCData):
427            raise IndexError("Array index must be an integer type")
428        if key.value >= len(self):
429            raise IndexError("Array index out of range")
430        return key
431
432    def __len__(self):
433        return self.max_entries
434
435    def __getitem__(self, key):
436        key = self._normalize_key(key)
437        return super(ArrayBase, self).__getitem__(key)
438
439    def __setitem__(self, key, leaf):
440        key = self._normalize_key(key)
441        super(ArrayBase, self).__setitem__(key, leaf)
442
443    def __delitem__(self, key):
444        key = self._normalize_key(key)
445        super(ArrayBase, self).__delitem__(key)
446
447    def clearitem(self, key):
448        key = self._normalize_key(key)
449        leaf = self.Leaf()
450        res = lib.bpf_update_elem(self.map_fd, ct.byref(key), ct.byref(leaf), 0)
451        if res < 0:
452            raise Exception("Could not clear item")
453
454    def __iter__(self):
455        return ArrayBase.Iter(self, self.Key)
456
457    class Iter(object):
458        def __init__(self, table, keytype):
459            self.Key = keytype
460            self.table = table
461            self.i = -1
462
463        def __iter__(self):
464            return self
465        def __next__(self):
466            return self.next()
467        def next(self):
468            self.i += 1
469            if self.i == len(self.table):
470                raise StopIteration()
471            return self.Key(self.i)
472
473class Array(ArrayBase):
474    def __init__(self, *args, **kwargs):
475        super(Array, self).__init__(*args, **kwargs)
476
477    def __delitem__(self, key):
478        # Delete in Array type does not have an effect, so zero out instead
479        self.clearitem(key)
480
481class ProgArray(ArrayBase):
482    def __init__(self, *args, **kwargs):
483        super(ProgArray, self).__init__(*args, **kwargs)
484
485    def __setitem__(self, key, leaf):
486        if isinstance(leaf, int):
487            leaf = self.Leaf(leaf)
488        if isinstance(leaf, self.bpf.Function):
489            leaf = self.Leaf(leaf.fd)
490        super(ProgArray, self).__setitem__(key, leaf)
491
492class FileDesc:
493    def __init__(self, fd):
494        if (fd is None) or (fd < 0):
495            raise Exception("Invalid file descriptor")
496        self.fd = fd
497
498    def clean_up(self):
499        if (self.fd is not None) and (self.fd >= 0):
500            os.close(self.fd)
501            self.fd = None
502
503    def __del__(self):
504        self.clean_up()
505
506    def __enter__(self, *args, **kwargs):
507        return self
508
509    def __exit__(self, *args, **kwargs):
510        self.clean_up()
511
512class CgroupArray(ArrayBase):
513    def __init__(self, *args, **kwargs):
514        super(CgroupArray, self).__init__(*args, **kwargs)
515
516    def __setitem__(self, key, leaf):
517        if isinstance(leaf, int):
518            super(CgroupArray, self).__setitem__(key, self.Leaf(leaf))
519        elif isinstance(leaf, str):
520            # TODO: Add os.O_CLOEXEC once we move to Python version >3.3
521            with FileDesc(os.open(leaf, os.O_RDONLY)) as f:
522                super(CgroupArray, self).__setitem__(key, self.Leaf(f.fd))
523        else:
524            raise Exception("Cgroup array key must be either FD or cgroup path")
525
526class PerfEventArray(ArrayBase):
527
528    def __init__(self, *args, **kwargs):
529        super(PerfEventArray, self).__init__(*args, **kwargs)
530        self._open_key_fds = {}
531
532    def __del__(self):
533        keys = list(self._open_key_fds.keys())
534        for key in keys:
535            del self[key]
536
537    def __delitem__(self, key):
538        if key not in self._open_key_fds:
539            return
540        # Delete entry from the array
541        super(PerfEventArray, self).__delitem__(key)
542        key_id = (id(self), key)
543        if key_id in self.bpf.perf_buffers:
544            # The key is opened for perf ring buffer
545            lib.perf_reader_free(self.bpf.perf_buffers[key_id])
546            del self.bpf.perf_buffers[key_id]
547            del self._cbs[key]
548        else:
549            # The key is opened for perf event read
550            lib.bpf_close_perf_event_fd(self._open_key_fds[key])
551        del self._open_key_fds[key]
552
553    def open_perf_buffer(self, callback, page_cnt=8, lost_cb=None):
554        """open_perf_buffers(callback)
555
556        Opens a set of per-cpu ring buffer to receive custom perf event
557        data from the bpf program. The callback will be invoked for each
558        event submitted from the kernel, up to millions per second. Use
559        page_cnt to change the size of the per-cpu ring buffer. The value
560        must be a power of two and defaults to 8.
561        """
562
563        if page_cnt & (page_cnt - 1) != 0:
564            raise Exception("Perf buffer page_cnt must be a power of two")
565
566        for i in get_online_cpus():
567            self._open_perf_buffer(i, callback, page_cnt, lost_cb)
568
569    def _open_perf_buffer(self, cpu, callback, page_cnt, lost_cb):
570        def raw_cb_(_, data, size):
571            try:
572                callback(cpu, data, size)
573            except IOError as e:
574                if e.errno == errno.EPIPE:
575                    exit()
576                else:
577                    raise e
578        def lost_cb_(_, lost):
579            try:
580                lost_cb(lost)
581            except IOError as e:
582                if e.errno == errno.EPIPE:
583                    exit()
584                else:
585                    raise e
586        fn = _RAW_CB_TYPE(raw_cb_)
587        lost_fn = _LOST_CB_TYPE(lost_cb_) if lost_cb else ct.cast(None, _LOST_CB_TYPE)
588        reader = lib.bpf_open_perf_buffer(fn, lost_fn, None, -1, cpu, page_cnt)
589        if not reader:
590            raise Exception("Could not open perf buffer")
591        fd = lib.perf_reader_fd(reader)
592        self[self.Key(cpu)] = self.Leaf(fd)
593        self.bpf.perf_buffers[(id(self), cpu)] = reader
594        # keep a refcnt
595        self._cbs[cpu] = (fn, lost_fn)
596        # The actual fd is held by the perf reader, add to track opened keys
597        self._open_key_fds[cpu] = -1
598
599    def _open_perf_event(self, cpu, typ, config):
600        fd = lib.bpf_open_perf_event(typ, config, -1, cpu)
601        if fd < 0:
602            raise Exception("bpf_open_perf_event failed")
603        self[self.Key(cpu)] = self.Leaf(fd)
604        self._open_key_fds[cpu] = fd
605
606    def open_perf_event(self, typ, config):
607        """open_perf_event(typ, config)
608
609        Configures the table such that calls from the bpf program to
610        table.perf_read(CUR_CPU_IDENTIFIER) will return the hardware
611        counter denoted by event ev on the local cpu.
612        """
613        for i in get_online_cpus():
614            self._open_perf_event(i, typ, config)
615
616
617class PerCpuHash(HashTable):
618    def __init__(self, *args, **kwargs):
619        self.reducer = kwargs.pop("reducer", None)
620        super(PerCpuHash, self).__init__(*args, **kwargs)
621        self.sLeaf = self.Leaf
622        self.total_cpu = len(get_possible_cpus())
623        # This needs to be 8 as hard coded into the linux kernel.
624        self.alignment = ct.sizeof(self.sLeaf) % 8
625        if self.alignment is 0:
626            self.Leaf = self.sLeaf * self.total_cpu
627        else:
628            # Currently Float, Char, un-aligned structs are not supported
629            if self.sLeaf == ct.c_uint:
630                self.Leaf = ct.c_uint64 * self.total_cpu
631            elif self.sLeaf == ct.c_int:
632                self.Leaf = ct.c_int64 * self.total_cpu
633            else:
634                raise IndexError("Leaf must be aligned to 8 bytes")
635
636    def getvalue(self, key):
637        result = super(PerCpuHash, self).__getitem__(key)
638        if self.alignment is 0:
639            ret = result
640        else:
641            ret = (self.sLeaf * self.total_cpu)()
642            for i in range(0, self.total_cpu):
643                ret[i] = result[i]
644        return ret
645
646    def __getitem__(self, key):
647        if self.reducer:
648            return reduce(self.reducer, self.getvalue(key))
649        else:
650            return self.getvalue(key)
651
652    def __setitem__(self, key, leaf):
653        super(PerCpuHash, self).__setitem__(key, leaf)
654
655    def sum(self, key):
656        if isinstance(self.Leaf(), ct.Structure):
657            raise IndexError("Leaf must be an integer type for default sum functions")
658        return self.sLeaf(sum(self.getvalue(key)))
659
660    def max(self, key):
661        if isinstance(self.Leaf(), ct.Structure):
662            raise IndexError("Leaf must be an integer type for default max functions")
663        return self.sLeaf(max(self.getvalue(key)))
664
665    def average(self, key):
666        result = self.sum(key)
667        return result.value / self.total_cpu
668
669class LruPerCpuHash(PerCpuHash):
670    def __init__(self, *args, **kwargs):
671        super(LruPerCpuHash, self).__init__(*args, **kwargs)
672
673class PerCpuArray(ArrayBase):
674    def __init__(self, *args, **kwargs):
675        self.reducer = kwargs.pop("reducer", None)
676        super(PerCpuArray, self).__init__(*args, **kwargs)
677        self.sLeaf = self.Leaf
678        self.total_cpu = len(get_possible_cpus())
679        # This needs to be 8 as hard coded into the linux kernel.
680        self.alignment = ct.sizeof(self.sLeaf) % 8
681        if self.alignment is 0:
682            self.Leaf = self.sLeaf * self.total_cpu
683        else:
684            # Currently Float, Char, un-aligned structs are not supported
685            if self.sLeaf == ct.c_uint:
686                self.Leaf = ct.c_uint64 * self.total_cpu
687            elif self.sLeaf == ct.c_int:
688                self.Leaf = ct.c_int64 * self.total_cpu
689            else:
690                raise IndexError("Leaf must be aligned to 8 bytes")
691
692    def getvalue(self, key):
693        result = super(PerCpuArray, self).__getitem__(key)
694        if self.alignment is 0:
695            ret = result
696        else:
697            ret = (self.sLeaf * self.total_cpu)()
698            for i in range(0, self.total_cpu):
699                ret[i] = result[i]
700        return ret
701
702    def __getitem__(self, key):
703        if (self.reducer):
704            return reduce(self.reducer, self.getvalue(key))
705        else:
706            return self.getvalue(key)
707
708    def __setitem__(self, key, leaf):
709        super(PerCpuArray, self).__setitem__(key, leaf)
710
711    def __delitem__(self, key):
712        # Delete in this type does not have an effect, so zero out instead
713        self.clearitem(key)
714
715    def sum(self, key):
716        if isinstance(self.Leaf(), ct.Structure):
717            raise IndexError("Leaf must be an integer type for default sum functions")
718        return self.sLeaf(sum(self.getvalue(key)))
719
720    def max(self, key):
721        if isinstance(self.Leaf(), ct.Structure):
722            raise IndexError("Leaf must be an integer type for default max functions")
723        return self.sLeaf(max(self.getvalue(key)))
724
725    def average(self, key):
726        result = self.sum(key)
727        return result.value / self.total_cpu
728
729class LpmTrie(TableBase):
730    def __init__(self, *args, **kwargs):
731        super(LpmTrie, self).__init__(*args, **kwargs)
732
733    def __len__(self):
734        raise NotImplementedError
735
736
737class StackTrace(TableBase):
738    MAX_DEPTH = 127
739
740    def __init__(self, *args, **kwargs):
741        super(StackTrace, self).__init__(*args, **kwargs)
742
743    class StackWalker(object):
744        def __init__(self, stack, resolve=None):
745            self.stack = stack
746            self.n = -1
747            self.resolve = resolve
748
749        def __iter__(self):
750            return self
751
752        def __next__(self):
753            return self.next()
754
755        def next(self):
756            self.n += 1
757            if self.n == StackTrace.MAX_DEPTH:
758                raise StopIteration()
759
760            addr = self.stack.ip[self.n]
761            if addr == 0 :
762                raise StopIteration()
763
764            return self.resolve(addr) if self.resolve else addr
765
766    def walk(self, stack_id, resolve=None):
767        return StackTrace.StackWalker(self[self.Key(stack_id)], resolve)
768
769    def __len__(self):
770        i = 0
771        for k in self: i += 1
772        return i
773
774    def clear(self):
775        pass
776
777class DevMap(ArrayBase):
778    def __init__(self, *args, **kwargs):
779        super(DevMap, self).__init__(*args, **kwargs)
780
781class CpuMap(ArrayBase):
782    def __init__(self, *args, **kwargs):
783        super(CpuMap, self).__init__(*args, **kwargs)
784