1#!/usr/bin/env python3.4 2# 3# Copyright 2022 - The Android Open Source Project 4# 5# Licensed under the Apache License, Version 2.0 (the 'License'); 6# you may not use this file except in compliance with the License. 7# You may obtain a copy of the License at 8# 9# http://www.apache.org/licenses/LICENSE-2.0 10# 11# Unless required by applicable law or agreed to in writing, software 12# distributed under the License is distributed on an 'AS IS' BASIS, 13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14# See the License for the specific language governing permissions and 15# limitations under the License. 16 17import collections 18import csv 19import itertools 20import numpy 21import json 22import os 23from acts import context 24from acts import base_test 25from acts.metrics.loggers.blackbox import BlackboxMappedMetricLogger 26from acts_contrib.test_utils.cellular.performance import cellular_performance_test_utils as cputils 27from acts_contrib.test_utils.wifi import wifi_performance_test_utils as wputils 28from acts_contrib.test_utils.wifi.wifi_performance_test_utils.bokeh_figure import BokehFigure 29from acts_contrib.test_utils.cellular.performance.CellularThroughputBaseTest import CellularThroughputBaseTest 30 31from functools import partial 32 33 34class CellularFr1SensitivityTest(CellularThroughputBaseTest): 35 """Class to test single cell FR1 NSA sensitivity""" 36 37 def __init__(self, controllers): 38 base_test.BaseTestClass.__init__(self, controllers) 39 self.testcase_metric_logger = ( 40 BlackboxMappedMetricLogger.for_test_case()) 41 self.testclass_metric_logger = ( 42 BlackboxMappedMetricLogger.for_test_class()) 43 self.publish_testcase_metrics = True 44 self.testclass_params = self.user_params['nr_sensitivity_test_params'] 45 self.tests = self.generate_test_cases( 46 channel_list=['LOW', 'MID', 'HIGH'], 47 dl_mcs_list=list(numpy.arange(27, -1, -1)), 48 nr_ul_mcs=4, 49 lte_dl_mcs_table='QAM256', 50 lte_dl_mcs=4, 51 lte_ul_mcs_table='QAM256', 52 lte_ul_mcs=4, 53 transform_precoding=0, 54 schedule_scenario='FULL_TPUT', 55 schedule_slot_ratio=80 56 ) 57 58 def process_testclass_results(self): 59 # Plot individual test id results raw data and compile metrics 60 plots = collections.OrderedDict() 61 compiled_data = collections.OrderedDict() 62 for testcase_name, testcase_data in self.testclass_results.items(): 63 nr_cell_index = testcase_data['testcase_params'][ 64 'endc_combo_config']['lte_cell_count'] 65 cell_config = testcase_data['testcase_params'][ 66 'endc_combo_config']['cell_list'][nr_cell_index] 67 test_id = tuple(('band', cell_config['band'])) 68 if test_id not in plots: 69 # Initialize test id data when not present 70 compiled_data[test_id] = { 71 'mcs': [], 72 'average_throughput': [], 73 'theoretical_throughput': [], 74 'cell_power': [], 75 } 76 plots[test_id] = BokehFigure( 77 title='Band {} - BLER Curves'.format(cell_config['band']), 78 x_label='Cell Power (dBm)', 79 primary_y_label='BLER (Mbps)') 80 test_id_rvr = test_id + tuple('RvR') 81 plots[test_id_rvr] = BokehFigure( 82 title='Band {} - RvR'.format(cell_config['band']), 83 x_label='Cell Power (dBm)', 84 primary_y_label='PHY Rate (Mbps)') 85 # Compile test id data and metrics 86 compiled_data[test_id]['average_throughput'].append( 87 testcase_data['average_throughput_list']) 88 compiled_data[test_id]['cell_power'].append( 89 testcase_data['cell_power_list']) 90 compiled_data[test_id]['mcs'].append( 91 testcase_data['testcase_params']['nr_dl_mcs']) 92 # Add test id to plots 93 plots[test_id].add_line( 94 testcase_data['cell_power_list'], 95 testcase_data['bler_list'], 96 'MCS {}'.format(testcase_data['testcase_params']['nr_dl_mcs']), 97 width=1) 98 plots[test_id_rvr].add_line( 99 testcase_data['cell_power_list'], 100 testcase_data['average_throughput_list'], 101 'MCS {}'.format(testcase_data['testcase_params']['nr_dl_mcs']), 102 width=1, 103 style='dashed') 104 105 for test_id, test_data in compiled_data.items(): 106 test_id_rvr = test_id + tuple('RvR') 107 cell_power_interp = sorted(set(sum(test_data['cell_power'], []))) 108 average_throughput_interp = [] 109 for mcs, cell_power, throughput in zip( 110 test_data['mcs'], test_data['cell_power'], 111 test_data['average_throughput']): 112 throughput_interp = numpy.interp(cell_power_interp, 113 cell_power[::-1], 114 throughput[::-1]) 115 average_throughput_interp.append(throughput_interp) 116 rvr = numpy.max(average_throughput_interp, 0) 117 plots[test_id_rvr].add_line(cell_power_interp, rvr, 118 'Rate vs. Range') 119 120 figure_list = [] 121 for plot_id, plot in plots.items(): 122 plot.generate_figure() 123 figure_list.append(plot) 124 output_file_path = os.path.join(self.log_path, 'results.html') 125 BokehFigure.save_figures(figure_list, output_file_path) 126 127 """Saves CSV with all test results to enable comparison.""" 128 results_file_path = os.path.join( 129 context.get_current_context().get_full_output_path(), 130 'results.csv') 131 with open(results_file_path, 'w', newline='') as csvfile: 132 field_names = [ 133 'Test Name', 'Sensitivity' 134 ] 135 writer = csv.DictWriter(csvfile, fieldnames=field_names) 136 writer.writeheader() 137 138 for testcase_name, testcase_results in self.testclass_results.items( 139 ): 140 row_dict = { 141 'Test Name': testcase_name, 142 'Sensitivity': testcase_results['sensitivity'] 143 } 144 writer.writerow(row_dict) 145 146 def process_testcase_results(self): 147 if self.current_test_name not in self.testclass_results: 148 return 149 testcase_data = self.testclass_results[self.current_test_name] 150 151 bler_list = [] 152 average_throughput_list = [] 153 theoretical_throughput_list = [] 154 nr_cell_index = testcase_data['testcase_params']['endc_combo_config'][ 155 'lte_cell_count'] 156 cell_power_list = testcase_data['testcase_params']['cell_power_sweep'][ 157 nr_cell_index] 158 for result in testcase_data['results']: 159 bler_list.append(result['throughput_measurements'] 160 ['nr_bler_result']['total']['DL']['nack_ratio']) 161 average_throughput_list.append( 162 result['throughput_measurements']['nr_tput_result']['total'] 163 ['DL']['average_tput']) 164 theoretical_throughput_list.append( 165 result['throughput_measurements']['nr_tput_result']['total'] 166 ['DL']['theoretical_tput']) 167 padding_len = len(cell_power_list) - len(average_throughput_list) 168 average_throughput_list.extend([0] * padding_len) 169 theoretical_throughput_list.extend([0] * padding_len) 170 171 bler_above_threshold = [ 172 bler > self.testclass_params['bler_threshold'] 173 for bler in bler_list 174 ] 175 for idx in range(len(bler_above_threshold)): 176 if all(bler_above_threshold[idx:]): 177 sensitivity_idx = max(idx, 1) - 1 178 break 179 else: 180 sensitivity_idx = -1 181 sensitivity = cell_power_list[sensitivity_idx] 182 self.log.info('NR Band {} MCS {} Sensitivity = {}dBm'.format( 183 testcase_data['testcase_params']['endc_combo_config']['cell_list'] 184 [nr_cell_index]['band'], 185 testcase_data['testcase_params']['nr_dl_mcs'], sensitivity)) 186 187 testcase_data['bler_list'] = bler_list 188 testcase_data['average_throughput_list'] = average_throughput_list 189 testcase_data[ 190 'theoretical_throughput_list'] = theoretical_throughput_list 191 testcase_data['cell_power_list'] = cell_power_list 192 testcase_data['sensitivity'] = sensitivity 193 194 results_file_path = os.path.join( 195 context.get_current_context().get_full_output_path(), 196 '{}.json'.format(self.current_test_name)) 197 with open(results_file_path, 'w') as results_file: 198 json.dump(wputils.serialize_dict(testcase_data), 199 results_file, 200 indent=4) 201 202 def get_per_cell_power_sweeps(self, testcase_params): 203 # get reference test 204 nr_cell_index = testcase_params['endc_combo_config']['lte_cell_count'] 205 current_band = testcase_params['endc_combo_config']['cell_list'][ 206 nr_cell_index]['band'] 207 reference_test = None 208 reference_sensitivity = None 209 for testcase_name, testcase_data in self.testclass_results.items(): 210 if testcase_data['testcase_params']['endc_combo_config'][ 211 'cell_list'][nr_cell_index]['band'] == current_band: 212 reference_test = testcase_name 213 reference_sensitivity = testcase_data['sensitivity'] 214 if reference_test and reference_sensitivity and not self.retry_flag: 215 start_atten = reference_sensitivity + self.testclass_params[ 216 'adjacent_mcs_gap'] 217 self.log.info( 218 "Reference test {} found. Sensitivity {} dBm. Starting at {} dBm" 219 .format(reference_test, reference_sensitivity, start_atten)) 220 else: 221 start_atten = self.testclass_params['nr_cell_power_start'] 222 self.log.info( 223 "Reference test not found. Starting at {} dBm".format( 224 start_atten)) 225 # get current cell power start 226 nr_cell_sweep = list( 227 numpy.arange(start_atten, 228 self.testclass_params['nr_cell_power_stop'], 229 self.testclass_params['nr_cell_power_step'])) 230 lte_sweep = [self.testclass_params['lte_cell_power'] 231 ] * len(nr_cell_sweep) 232 if nr_cell_index == 0: 233 cell_power_sweeps = [nr_cell_sweep] 234 else: 235 cell_power_sweeps = [lte_sweep, nr_cell_sweep] 236 return cell_power_sweeps 237 238 def generate_test_cases(self, channel_list, dl_mcs_list, **kwargs): 239 test_cases = [] 240 with open(self.testclass_params['nr_single_cell_configs'], 241 'r') as csvfile: 242 test_configs = csv.DictReader(csvfile) 243 for test_config, channel, nr_dl_mcs in itertools.product( 244 test_configs, channel_list, dl_mcs_list): 245 if int(test_config['skip_test']): 246 continue 247 endc_combo_config = cputils.generate_endc_combo_config_from_csv_row( 248 test_config) 249 test_name = 'test_fr1_{}_{}_dl_mcs{}'.format( 250 test_config['nr_band'], channel.lower(), nr_dl_mcs) 251 test_params = collections.OrderedDict( 252 endc_combo_config=endc_combo_config, 253 nr_dl_mcs=nr_dl_mcs, 254 **kwargs) 255 setattr(self, test_name, 256 partial(self._test_throughput_bler, test_params)) 257 test_cases.append(test_name) 258 return test_cases 259 260class CellularFr1Sensitivity_SampleMCS_Test(CellularFr1SensitivityTest): 261 """Class to test single cell FR1 NSA sensitivity""" 262 263 def __init__(self, controllers): 264 base_test.BaseTestClass.__init__(self, controllers) 265 self.testcase_metric_logger = ( 266 BlackboxMappedMetricLogger.for_test_case()) 267 self.testclass_metric_logger = ( 268 BlackboxMappedMetricLogger.for_test_class()) 269 self.publish_testcase_metrics = True 270 self.testclass_params = self.user_params['nr_sensitivity_test_params'] 271 self.tests = self.generate_test_cases( 272 channel_list=['LOW'], 273 dl_mcs_list=[27, 25, 16, 9], 274 nr_ul_mcs=4, 275 lte_dl_mcs_table='QAM256', 276 lte_dl_mcs=4, 277 lte_ul_mcs_table='QAM256', 278 lte_ul_mcs=4, 279 transform_precoding=0, 280 schedule_scenario='FULL_TPUT', 281 schedule_slot_ratio=80 282 )