• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2016 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""This module provides the utilities for avsync_probe's data processing.
6
7We will get a lot of raw data from the avsync_probe.Capture(). One data per
8millisecond.
9AVSyncProbeDataParser will help to transform the raw data to more readable
10formats. It also helps to calculate the audio/video sync timing if the
11sound_interval_frames parameter is not None.
12
13Example:
14    capture_data = avsync_probe.Capture(12)
15    parser = avsync_probe_utils.AVSyncProbeDataParser(self.resultsdir,
16            capture_data, 30)
17
18    # Use the following attributes to access data. They can be referenced in
19    # AVSyncProbeDataParser Class.
20    parser.video_duration_average
21    parser.video_duration_std
22    parser.sync_duration_averag
23    parser.sync_duration_std
24    parser.cumulative_frame_count
25    parser.dropped_frame_count
26    parser.corrupted_frame_count
27    parser.binarize_data
28    parser.audio_events
29    parser.video_events
30
31"""
32
33import collections
34import logging
35import math
36import os
37import sys
38
39
40# Indices for binarize_data, audio_events and video_events.
41TIME_INDEX = 0
42VIDEO_INDEX = 1
43AUDIO_INDEX = 2
44# This index is used for video_events and audio_events.
45# The slot contains the time difference to the previous event.
46TIME_DIFF_INDEX = 3
47
48# SyncResult namedtuple of audio and video frame.
49# time_delay < 0 means that audio comes out first.
50SyncResult = collections.namedtuple(
51        'SynResult', ['video_time', 'audio_time', 'time_delay'])
52
53
54class GrayCode(object):
55    """Converts bit patterns between binary and Gray code.
56
57    The bit patterns of Gray code values are packed into an int value.
58    For example, 4 is "110" in Gray code, which reads "6" when interpreted
59    as binary.
60    See "https://en.wikipedia.org/wiki/Gray_code"
61
62    """
63
64    @staticmethod
65    def binary_to_gray(binary):
66        """Binary code to gray code.
67
68        @param binary: Binary code.
69        @return: gray code.
70
71        """
72        return binary ^ (binary >> 1)
73
74    @staticmethod
75    def gray_to_binary(gray):
76        """Gray code to binary code.
77
78        @param gray: Gray code.
79        @return: binary code.
80
81        """
82        result = gray
83        result ^= (result >> 16)
84        result ^= (result >> 8)
85        result ^= (result >> 4)
86        result ^= (result >> 2)
87        result ^= (result >> 1)
88        return result
89
90
91class HysteresisSwitch(object):
92    """
93    Iteratively binarizes input sequence using hysteresis comparator with a
94    pair of fixed thresholds.
95
96    Hysteresis means to use 2 different thresholds
97    for activating and de-activating output. It is often used for thresholding
98    time-series signal while reducing small noise in the input.
99
100    Note that the low threshold is exclusive but the high threshold is
101    inclusive.
102    When the same values were applied for the both, the object works as a
103    non-hysteresis switch.
104    (i.e. equivalent to the >= operator).
105
106    """
107
108    def __init__(self, low_threshold, high_threshold, init_state):
109        """Init HysteresisSwitch class.
110
111        @param low_threshold: The threshold value to deactivate the output.
112                The comparison is exclusive.
113        @param high_threshold: The threshold value to activate the output.
114                The comparison is inclusive.
115        @param init_state: True or False of the switch initial state.
116
117        """
118        if low_threshold > high_threshold:
119            raise Exception('Low threshold %d exceeds the high threshold %d',
120                            low_threshold, high_threshold)
121        self._low_threshold = low_threshold
122        self._high_threshold = high_threshold
123        self._last_state = init_state
124
125    def adjust_state(self, value):
126        """Updates the state of the switch by the input value and returns the
127        result.
128
129        @param value: value for updating.
130        @return the state of the switch.
131
132        """
133        if value < self._low_threshold:
134            self._last_state = False
135
136        if value >= self._high_threshold:
137            self._last_state = True
138
139        return self._last_state
140
141
142class AVSyncProbeDataParser(object):
143    """ Digital information extraction from the raw sensor data sequence.
144
145    This class will transform the raw data to easier understand formats.
146
147    Attributes:
148        binarize_data: Transer the raw data to [Time, video code, is_audio].
149               video code is from 0-7 repeatedly.
150        video_events: Events of video frame.
151        audio_events: Events of when audio happens.
152        video_duration_average: (ms) The average duration during video frames.
153        video_duration_std: Standard deviation of the video_duration_average.
154        sync_duration_average: (ms) The average duration for audio/video sync.
155        sync_duration_std: Standard deviation of sync_duration_average.
156        cumulative_frame_count: Number of total video frames.
157        dropped_frame_count: Total dropped video frames.
158        corrupted_frame_count: Total corrupted video frames.
159
160    """
161    # Thresholds for hysteresis binarization of input signals.
162    # Relative to the minumum (0.0) and maximum (1.0) values of the value range
163    # of each input signal.
164    _NORMALIZED_LOW_THRESHOLD = 0.6
165    _NORMALIZED_HIGH_THRESHOLD = 0.7
166
167    _VIDEO_CODE_CYCLE = (1 << 3)
168
169    def __init__(self, log_dir, capture_raw_data, video_fps,
170                 sound_interval_frames=None):
171        """Inits AVSyncProbeDataParser class.
172
173        @param log_dir: Directory for dumping each events' contents.
174        @param capture_raw_data: Raw data from avsync_probe device.
175                A list contains the list values of [timestamp, video0, video1,
176                                                    video2, audio].
177        @param video_fps: Video frames per second. Used to know if the video
178                frame is dropoped or just corrupted.
179        @param sound_interval_frames: The period of sound (beep) in the number
180                of video frames. This class will help to calculate audio/video
181                sync stats if sound_interval_frames is not None.
182
183        """
184        self.video_duration_average = None
185        self.video_duration_std = None
186        self.sync_duration_average = None
187        self.sync_duration_std = None
188        self.cumulative_frame_count = None
189        self.dropped_frame_count = None
190
191        self._log_dir = log_dir
192        self._raw_data = capture_raw_data
193        # Translate to millisecond for each video frame.
194        self._video_duration = 1000 / video_fps
195        self._sound_interval_frames = sound_interval_frames
196        self._log_list_data_to_file('raw.txt', capture_raw_data)
197
198        self.binarize_data = self._binarize_raw_data()
199        # we need to get audio events before remove video preamble frames.
200        # Because audio event may appear before the preamble frame, if we
201        # remove the preamble frames first, we will lost the audio event.
202        self.audio_events = self._detect_audio_events()
203        self._remove_video_preamble()
204        self.video_events = self._detect_video_events()
205        self._analyze_events()
206        self._calculate_statistics_report()
207
208    def _log_list_data_to_file(self, filename, data):
209        """Log the list data to file.
210
211        It will log under self._log_dir directory.
212
213        @param filename: The file name.
214        @data: Data for logging.
215
216        """
217        filepath = os.path.join(self._log_dir, filename)
218        with open(filepath, 'w') as f:
219            for v in data:
220                f.write('%s\n' % str(v))
221
222    def _get_hysteresis_switch(self, index):
223        """Get HysteresisSwitch by the raw data.
224
225        @param index: The index of self._raw_data's element.
226        @return: HysteresisSwitch instance by the value of the raw data.
227
228        """
229        max_value = max(x[index] for x in self._raw_data)
230        min_value = min(x[index] for x in self._raw_data)
231        scale = max_value - min_value
232        logging.info('index %d, max %d, min %d, scale %d', index, max_value,
233                     min_value, scale)
234        return HysteresisSwitch(
235                min_value + scale * self._NORMALIZED_LOW_THRESHOLD,
236                min_value + scale * self._NORMALIZED_HIGH_THRESHOLD,
237                False)
238
239    def _binarize_raw_data(self):
240        """Conducts adaptive thresholding and decoding embedded frame codes.
241
242        Sensors[0] is timestamp.
243        Sensors[1-3] are photo transistors, which outputs lower value for
244        brighter light(=white pixels on screen). These are used to detect black
245        and white pattern on the screen, and decoded as an integer code.
246
247        The final channel is for audio input, which outputs higher voltage for
248        larger sound volume. This will be used for detecting beep sounds added
249        to the video.
250
251        @return Decoded frame codes list for all the input frames. Each entry
252                contains [Timestamp, video code, is_audio].
253
254        """
255        decoded_data = []
256
257        hystersis_switch = []
258        for i in xrange(5):
259            hystersis_switch.append(self._get_hysteresis_switch(i))
260
261        for data in self._raw_data:
262            code = 0
263            # Decode black-and-white pattern on video.
264            # There are 3 black or white boxes sensed by the sensors.
265            # Each square represents a single bit (white = 1, black = 0) coding
266            # an integer in Gray code.
267            for i in xrange(1, 4):
268                # Lower sensor value for brighter light(square painted white).
269                is_white = not hystersis_switch[i].adjust_state(data[i])
270                if is_white:
271                    code |= (1 << (i - 1))
272            code = GrayCode.gray_to_binary(code)
273            # The final channel is sound signal. Higher sensor value for
274            # higher sound level.
275            sound = hystersis_switch[4].adjust_state(data[4])
276            decoded_data.append([data[0], code, sound])
277
278        self._log_list_data_to_file('binarize_raw.txt', decoded_data)
279        return decoded_data
280
281    def _remove_video_preamble(self):
282        """Remove preamble video frames of self.binarize_data."""
283        # find preamble frame (code = 0)
284        index = next(i for i, v in enumerate(self.binarize_data)
285                     if v[VIDEO_INDEX] == 0)
286        self.binarize_data = self.binarize_data[index:]
287
288        # skip preamble frame (code = 0)
289        index = next(i for i, v in enumerate(self.binarize_data)
290                     if v[VIDEO_INDEX] != 0)
291        self.binarize_data = self.binarize_data[index:]
292
293    def _detect_events(self, detect_condition):
294        """Detects events from the binarize data sequence by the
295        detect_condition.
296
297        @param detect_condition: callback function for checking event happens.
298                This API will pass index and element of binarize_data to the
299                callback function.
300
301        @return: The list of events. It's the same as the binarize_data and add
302                additional time_difference information.
303
304        """
305        detected_events = []
306        previous_time = self.binarize_data[0][TIME_INDEX]
307        for i, v in enumerate(self.binarize_data):
308            if (detect_condition(i, v)):
309                time = v[TIME_INDEX]
310                time_difference = time - previous_time
311                # Copy a new instance here, because we will append time
312                # difference.
313                event = list(v)
314                event.append(time_difference)
315                detected_events.append(event)
316                previous_time = time
317
318        return detected_events
319
320    def _detect_audio_events(self):
321        """Detects the audio start frame from the binarize data sequence.
322
323        @return: The list of Audio events. It's the same as the binarize_data
324                and add additional time_difference information.
325
326        """
327        # Only check the first audio happen event.
328        detected_events = self._detect_events(
329            lambda i, v: (v[AUDIO_INDEX] and not
330                          self.binarize_data[i - 1][AUDIO_INDEX]))
331
332        self._log_list_data_to_file('audio_events.txt', detected_events)
333        return detected_events
334
335    def _detect_video_events(self):
336        """Detects the video frame from the binarize data sequence.
337
338        @return: The list of Video events. It's the same as the binarize_data
339                and add additional time_difference information.
340
341        """
342        # remove duplicate frames. (frames in transition state.)
343        detected_events = self._detect_events(
344            lambda i, v: (v[VIDEO_INDEX] !=
345                          self.binarize_data[i - 1][VIDEO_INDEX]))
346
347        self._log_list_data_to_file('video_events.txt', detected_events)
348        return detected_events
349
350    def _match_sync(self, video_time):
351        """Match the audio/video sync timing.
352
353        This function will find the closest sound in the audio_events to the
354        video_time and returns a audio/video sync tuple.
355
356        @param video_time: the time of the video which have sound.
357        @return A SyncResult namedtuple containing:
358                  - timestamp of the video frame which should have audio.
359                  - timestamp of nearest audio frame.
360                  - time delay between audio and video frame.
361
362        """
363        closest_difference = sys.maxint
364        audio_time = 0
365        for audio_event in self.audio_events:
366            difference = audio_event[TIME_INDEX] - video_time
367            if abs(difference) < abs(closest_difference):
368                closest_difference = difference
369                audio_time = audio_event[TIME_INDEX]
370        return SyncResult(video_time, audio_time, closest_difference)
371
372    def _calculate_statistics(self, data):
373        """Calculate average and standard deviation of the list data.
374
375        @param data: The list of values to be calcualted.
376        @return: An tuple with (average, standard_deviation)
377
378        """
379        if not data:
380            return (None, None)
381
382        total = sum(data)
383        average = total / len(data)
384        variance = sum((v - average)**2 for v in data) / len(data)
385        standard_deviation = math.sqrt(variance)
386        return (average, standard_deviation)
387
388    def _analyze_events(self):
389        """Analyze audio/video events.
390
391        This function will analyze video frame status and audio/video sync
392        status.
393
394        """
395        sound_interval_frames = self._sound_interval_frames
396        current_code = 0
397        cumulative_frame_count = 0
398        dropped_frame_count = 0
399        corrupted_frame_count = 0
400        sync_events = []
401
402        for v in self.video_events:
403            code = v[VIDEO_INDEX]
404            time = v[TIME_INDEX]
405            frame_diff = code - current_code
406            # Get difference of the codes.  # The code is between 0 - 7.
407            if frame_diff < 0:
408                frame_diff += self._VIDEO_CODE_CYCLE
409
410            if frame_diff != 1:
411                # Check if we dropped frame or just got corrupted frame.
412                # Treat the frame as corrupted frame if the frame duration is
413                # less than 2 video frame duration.
414                if v[TIME_DIFF_INDEX] < 2 * self._video_duration:
415                    logging.warn('Corrupted frame near %s', str(v))
416                    # Correct the code.
417                    code = current_code + 1
418                    corrupted_frame_count += 1
419                    frame_diff = 1
420                else:
421                    logging.warn('Dropped frame near %s', str(v))
422                    dropped_frame_count += (frame_diff - 1)
423
424            cumulative_frame_count += frame_diff
425
426            if sound_interval_frames is not None:
427                # This frame corresponds to a sound.
428                if cumulative_frame_count % sound_interval_frames == 1:
429                    sync_events.append(self._match_sync(time))
430
431            current_code = code
432        self.cumulative_frame_count = cumulative_frame_count
433        self.dropped_frame_count = dropped_frame_count
434        self.corrupted_frame_count = corrupted_frame_count
435        self._sync_events = sync_events
436        self._log_list_data_to_file('sync.txt', sync_events)
437
438    def _calculate_statistics_report(self):
439        """Calculates statistics report."""
440        video_duration_average, video_duration_std = self._calculate_statistics(
441                [v[TIME_DIFF_INDEX] for v in self.video_events])
442        sync_duration_average, sync_duration_std = self._calculate_statistics(
443                [v.time_delay for v in self._sync_events])
444        self.video_duration_average = video_duration_average
445        self.video_duration_std = video_duration_std
446        self.sync_duration_average = sync_duration_average
447        self.sync_duration_std = sync_duration_std
448