• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The Chromium Authors
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""Presubmit script validating field trial configs.
5
6See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
7for more details on the presubmit API built into depot_tools.
8"""
9
10import copy
11import io
12import json
13import re
14import sys
15
16# TODO(b/365662411): Upgrade to PRESUBMIT_VERSION 2.0.0.
17
18from collections import OrderedDict
19
20VALID_EXPERIMENT_KEYS = [
21    'name', 'forcing_flag', 'params', 'enable_features', 'disable_features',
22    'min_os_version', 'hardware_classes', 'exclude_hardware_classes', '//0',
23    '//1', '//2', '//3', '//4', '//5', '//6', '//7', '//8', '//9'
24]
25
26FIELDTRIAL_CONFIG_FILE_NAME = 'fieldtrial_testing_config.json'
27
28BASE_FEATURE_PATTERN = r'BASE_FEATURE\((.*?),(.*?),(.*?)\);'
29BASE_FEATURE_RE = re.compile(BASE_FEATURE_PATTERN,
30                             flags=re.MULTILINE + re.DOTALL)
31
32
33def PrettyPrint(contents):
34  """Pretty prints a fieldtrial configuration.
35
36  Args:
37    contents: File contents as a string.
38
39  Returns:
40    Pretty printed file contents.
41  """
42
43  # We have a preferred ordering of the fields (e.g. platforms on top). This
44  # code loads everything into OrderedDicts and then tells json to dump it out.
45  # The JSON dumper will respect the dict ordering.
46  #
47  # The ordering is as follows:
48  # {
49  #     'StudyName Alphabetical': [
50  #         {
51  #             'platforms': [sorted platforms]
52  #             'groups': [
53  #                 {
54  #                     name: ...
55  #                     forcing_flag: "forcing flag string"
56  #                     params: {sorted dict}
57  #                     enable_features: [sorted features]
58  #                     disable_features: [sorted features]
59  #                     min_os_version: "version string"
60  #                     hardware_classes: [sorted classes]
61  #                     exclude_hardware_classes: [sorted classes]
62  #                     (Unexpected extra keys will be caught by the validator)
63  #                 }
64  #             ],
65  #             ....
66  #         },
67  #         ...
68  #     ]
69  #     ...
70  # }
71  config = json.loads(contents)
72  ordered_config = OrderedDict()
73  for key in sorted(config.keys()):
74    study = copy.deepcopy(config[key])
75    ordered_study = []
76    for experiment_config in study:
77      ordered_experiment_config = OrderedDict([('platforms',
78                                                experiment_config['platforms']),
79                                               ('experiments', [])])
80      for experiment in experiment_config['experiments']:
81        ordered_experiment = OrderedDict()
82        for index in range(0, 10):
83          comment_key = '//' + str(index)
84          if comment_key in experiment:
85            ordered_experiment[comment_key] = experiment[comment_key]
86        ordered_experiment['name'] = experiment['name']
87        if 'forcing_flag' in experiment:
88          ordered_experiment['forcing_flag'] = experiment['forcing_flag']
89        if 'params' in experiment:
90          ordered_experiment['params'] = OrderedDict(
91              sorted(experiment['params'].items(), key=lambda t: t[0]))
92        if 'enable_features' in experiment:
93          ordered_experiment['enable_features'] = \
94              sorted(experiment['enable_features'])
95        if 'disable_features' in experiment:
96          ordered_experiment['disable_features'] = \
97              sorted(experiment['disable_features'])
98        if 'min_os_version' in experiment:
99          ordered_experiment['min_os_version'] = experiment['min_os_version']
100        if 'hardware_classes' in experiment:
101          ordered_experiment['hardware_classes'] = \
102              sorted(experiment['hardware_classes'])
103        if 'exclude_hardware_classes' in experiment:
104          ordered_experiment['exclude_hardware_classes'] = \
105              sorted(experiment['exclude_hardware_classes'])
106        ordered_experiment_config['experiments'].append(ordered_experiment)
107      ordered_study.append(ordered_experiment_config)
108    ordered_config[key] = ordered_study
109  return json.dumps(
110      ordered_config, sort_keys=False, indent=4, separators=(',', ': ')) + '\n'
111
112
113def ValidateData(json_data, file_path, message_type):
114  """Validates the format of a fieldtrial configuration.
115
116  Args:
117    json_data: Parsed JSON object representing the fieldtrial config.
118    file_path: String representing the path to the JSON file.
119    message_type: Type of message from |output_api| to return in the case of
120      errors/warnings.
121
122  Returns:
123    A list of |message_type| messages. In the case of all tests passing with no
124    warnings/errors, this will return [].
125  """
126
127  def _CreateMessage(message_format, *args):
128    return _CreateMalformedConfigMessage(message_type, file_path,
129                                         message_format, *args)
130
131  if not isinstance(json_data, dict):
132    return _CreateMessage('Expecting dict')
133  for (study, experiment_configs) in iter(json_data.items()):
134    warnings = _ValidateEntry(study, experiment_configs, _CreateMessage)
135    if warnings:
136      return warnings
137
138  return []
139
140
141def _ValidateEntry(study, experiment_configs, create_message_fn):
142  """Validates one entry of the field trial configuration."""
143  if not isinstance(study, str):
144    return create_message_fn('Expecting keys to be string, got %s', type(study))
145  if not isinstance(experiment_configs, list):
146    return create_message_fn('Expecting list for study %s', study)
147
148  # Add context to other messages.
149  def _CreateStudyMessage(message_format, *args):
150    suffix = ' in Study[%s]' % study
151    return create_message_fn(message_format + suffix, *args)
152
153  for experiment_config in experiment_configs:
154    warnings = _ValidateExperimentConfig(experiment_config, _CreateStudyMessage)
155    if warnings:
156      return warnings
157  return []
158
159
160def _ValidateExperimentConfig(experiment_config, create_message_fn):
161  """Validates one config in a configuration entry."""
162  if not isinstance(experiment_config, dict):
163    return create_message_fn('Expecting dict for experiment config')
164  if not 'experiments' in experiment_config:
165    return create_message_fn('Missing valid experiments for experiment config')
166  if not isinstance(experiment_config['experiments'], list):
167    return create_message_fn('Expecting list for experiments')
168  for experiment_group in experiment_config['experiments']:
169    warnings = _ValidateExperimentGroup(experiment_group, create_message_fn)
170    if warnings:
171      return warnings
172  if not 'platforms' in experiment_config:
173    return create_message_fn('Missing valid platforms for experiment config')
174  if not isinstance(experiment_config['platforms'], list):
175    return create_message_fn('Expecting list for platforms')
176  supported_platforms = [
177      'android', 'android_weblayer', 'android_webview', 'chromeos',
178      'chromeos_lacros', 'fuchsia', 'ios', 'linux', 'mac', 'windows'
179  ]
180  experiment_platforms = experiment_config['platforms']
181  unsupported_platforms = list(
182      set(experiment_platforms).difference(supported_platforms))
183  if unsupported_platforms:
184    return create_message_fn('Unsupported platforms %s', unsupported_platforms)
185  return []
186
187
188def _ValidateExperimentGroup(experiment_group, create_message_fn):
189  """Validates one group of one config in a configuration entry."""
190  name = experiment_group.get('name', '')
191  if not name or not isinstance(name, str):
192    return create_message_fn('Missing valid name for experiment')
193
194  # Add context to other messages.
195  def _CreateGroupMessage(message_format, *args):
196    suffix = ' in Group[%s]' % name
197    return create_message_fn(message_format + suffix, *args)
198
199  if 'params' in experiment_group:
200    params = experiment_group['params']
201    if not isinstance(params, dict):
202      return _CreateGroupMessage('Expected dict for params')
203    for (key, value) in iter(params.items()):
204      if not isinstance(key, str) or not isinstance(value, str):
205        return _CreateGroupMessage('Invalid param (%s: %s)', key, value)
206  for key in experiment_group.keys():
207    if key not in VALID_EXPERIMENT_KEYS:
208      return _CreateGroupMessage('Key[%s] is not a valid key', key)
209  return []
210
211
212def _CreateMalformedConfigMessage(message_type, file_path, message_format,
213                                  *args):
214  """Returns a list containing one |message_type| with the error message.
215
216  Args:
217    message_type: Type of message from |output_api| to return in the case of
218      errors/warnings.
219    message_format: The error message format string.
220    file_path: The path to the config file.
221    *args: The args for message_format.
222
223  Returns:
224    A list containing a message_type with a formatted error message and
225    'Malformed config file [file]: ' prepended to it.
226  """
227  error_message_format = 'Malformed config file %s: ' + message_format
228  format_args = (file_path, ) + args
229  return [message_type(error_message_format % format_args)]
230
231
232def CheckPretty(contents, file_path, message_type):
233  """Validates the pretty printing of fieldtrial configuration.
234
235  Args:
236    contents: File contents as a string.
237    file_path: String representing the path to the JSON file.
238    message_type: Type of message from |output_api| to return in the case of
239      errors/warnings.
240
241  Returns:
242    A list of |message_type| messages. In the case of all tests passing with no
243    warnings/errors, this will return [].
244  """
245  pretty = PrettyPrint(contents)
246  if contents != pretty:
247    return [
248        message_type('Pretty printing error: Run '
249                     'python3 testing/variations/PRESUBMIT.py %s' % file_path)
250    ]
251  return []
252
253
254def _GetStudyConfigFeatures(study_config):
255  """Gets the set of features overridden in a study config."""
256  features = set()
257  for experiment in study_config.get('experiments', []):
258    features.update(experiment.get('enable_features', []))
259    features.update(experiment.get('disable_features', []))
260  return features
261
262
263def _GetDuplicatedFeatures(study1, study2):
264  """Gets the set of features that are overridden in two overlapping studies."""
265  duplicated_features = set()
266  for study_config1 in study1:
267    features = _GetStudyConfigFeatures(study_config1)
268    platforms = set(study_config1.get('platforms', []))
269    for study_config2 in study2:
270      # If the study configs do not specify any common platform, they do not
271      # overlap, so we can skip them.
272      if platforms.isdisjoint(set(study_config2.get('platforms', []))):
273        continue
274
275      common_features = features & _GetStudyConfigFeatures(study_config2)
276      duplicated_features.update(common_features)
277
278  return duplicated_features
279
280
281def CheckDuplicatedFeatures(new_json_data, old_json_data, message_type):
282  """Validates that features are not specified in multiple studies.
283
284  Note that a feature may be specified in different studies that do not overlap.
285  For example, if they specify different platforms. In such a case, this will
286  not give a warning/error. However, it is possible that this incorrectly
287  gives an error, as it is possible for studies to have complex filters (e.g.,
288  if they make use of additional filters such as form_factors,
289  is_low_end_device, etc.). In those cases, the PRESUBMIT check can be bypassed.
290  Since this will only check for studies that were changed in this particular
291  commit, bypassing the PRESUBMIT check will not block future commits.
292
293  Args:
294    new_json_data: Parsed JSON object representing the new fieldtrial config.
295    old_json_data: Parsed JSON object representing the old fieldtrial config.
296    message_type: Type of message from |output_api| to return in the case of
297      errors/warnings.
298
299  Returns:
300    A list of |message_type| messages. In the case of all tests passing with no
301    warnings/errors, this will return [].
302  """
303  # Get list of studies that changed.
304  changed_studies = []
305  for study_name in new_json_data:
306    if (study_name not in old_json_data
307        or new_json_data[study_name] != old_json_data[study_name]):
308      changed_studies.append(study_name)
309
310  # A map between a feature name and the name of studies that use it. E.g.,
311  # duplicated_features_to_studies_map["FeatureA"] = {"StudyA", "StudyB"}.
312  # Only features that are defined in multiple studies are added to this map.
313  duplicated_features_to_studies_map = dict()
314
315  # Compare the changed studies against all studies defined.
316  for changed_study_name in changed_studies:
317    for study_name in new_json_data:
318      if changed_study_name == study_name:
319        continue
320
321      duplicated_features = _GetDuplicatedFeatures(
322          new_json_data[changed_study_name], new_json_data[study_name])
323
324      for feature in duplicated_features:
325        if feature not in duplicated_features_to_studies_map:
326          duplicated_features_to_studies_map[feature] = set()
327        duplicated_features_to_studies_map[feature].update(
328            [changed_study_name, study_name])
329
330  if len(duplicated_features_to_studies_map) == 0:
331    return []
332
333  duplicated_features_strings = [
334      '%s (in studies %s)' % (feature, ', '.join(studies))
335      for feature, studies in duplicated_features_to_studies_map.items()
336  ]
337
338  return [
339      message_type('The following feature(s) were specified in multiple '
340                   'studies: %s' % ', '.join(duplicated_features_strings))
341  ]
342
343
344def CheckUndeclaredFeatures(input_api, output_api, json_data, changed_lines):
345  """Checks that feature names are all valid declared features.
346
347  There have been more than one instance of developers accidentally mistyping
348  a feature name in the fieldtrial_testing_config.json file, which leads
349  to the config silently doing nothing.
350
351  This check aims to catch these errors by validating that the feature name
352  is defined somewhere in the Chrome source code.
353
354  Args:
355    input_api: Presubmit InputApi
356    output_api: Presubmit OutputApi
357    json_data: The parsed fieldtrial_testing_config.json
358    changed_lines: The AffectedFile.ChangedContents() of the json file
359
360  Returns:
361    List of validation messages - empty if there are no errors.
362  """
363
364  declared_features = set()
365  # I was unable to figure out how to do a proper top-level include that did
366  # not depend on getting the path from input_api. I found this pattern
367  # elsewhere in the code base. Please change to a top-level include if you
368  # know how.
369  old_sys_path = sys.path[:]
370  try:
371    sys.path.append(
372        input_api.os_path.join(input_api.PresubmitLocalPath(), 'presubmit'))
373    # pylint: disable=import-outside-toplevel
374    import find_features
375    # pylint: enable=import-outside-toplevel
376    declared_features = find_features.FindDeclaredFeatures(input_api)
377  finally:
378    sys.path = old_sys_path
379
380  if not declared_features:
381    return [
382        output_api.PresubmitError(
383            'Presubmit unable to find any declared flags in source. Please '
384            'check PRESUBMIT.py for errors.')
385    ]
386
387  messages = []
388  # Join all changed lines into a single string. This will be used to check
389  # if feature names are present in the changed lines by substring search.
390  changed_contents = ' '.join([x[1].strip() for x in changed_lines])
391  for study_name in json_data:
392    study = json_data[study_name]
393    for config in study:
394      features = set(_GetStudyConfigFeatures(config))
395      # Determine if a study has been touched by the current change by checking
396      # if any of the features are part of the changed lines of the file.
397      # This limits the noise from old configs that are no longer valid.
398      probably_affected = False
399      for feature in features:
400        if feature in changed_contents:
401          probably_affected = True
402          break
403
404      if probably_affected and not declared_features.issuperset(features):
405        missing_features = features - declared_features
406        # CrOS has external feature declarations starting with this prefix
407        # (checked by build tools in base/BUILD.gn).
408        # Warn, but don't break, if they are present in the CL
409        cros_late_boot_features = {
410            s
411            for s in missing_features if s.startswith('CrOSLateBoot')
412        }
413        missing_features = missing_features - cros_late_boot_features
414        if cros_late_boot_features:
415          msg = ('CrOSLateBoot features added to '
416                 'study %s are not checked by presubmit.'
417                 '\nPlease manually check that they exist in the code base.'
418                 ) % study_name
419          messages.append(
420              output_api.PresubmitResult(msg, cros_late_boot_features))
421
422        if missing_features:
423          msg = ('Presubmit was unable to verify existence of features in '
424                 'study %s.\nThis happens most commonly if the feature is '
425                 'defined by code generation.\n'
426                 'Please verify that the feature names have been spelled '
427                 'correctly before submitting. The affected features are:'
428                 ) % study_name
429          messages.append(output_api.PresubmitResult(msg, missing_features))
430
431  return messages
432
433
434def CommonChecks(input_api, output_api):
435  affected_files = input_api.AffectedFiles(
436      include_deletes=False,
437      file_filter=lambda x: x.LocalPath().endswith('.json'))
438  for f in affected_files:
439    if not f.LocalPath().endswith(FIELDTRIAL_CONFIG_FILE_NAME):
440      return [
441          output_api.PresubmitError(
442              '%s is the only json file expected in this folder. If new jsons '
443              'are added, please update the presubmit process with proper '
444              'validation. ' % FIELDTRIAL_CONFIG_FILE_NAME)
445      ]
446    contents = input_api.ReadFile(f)
447    try:
448      json_data = input_api.json.loads(contents)
449      result = ValidateData(json_data, f.AbsoluteLocalPath(),
450                            output_api.PresubmitError)
451      if result:
452        return result
453      result = CheckPretty(contents, f.LocalPath(), output_api.PresubmitError)
454      if result:
455        return result
456      result = CheckDuplicatedFeatures(
457          json_data, input_api.json.loads('\n'.join(f.OldContents())),
458          output_api.PresubmitError)
459      if result:
460        return result
461      if input_api.is_committing:
462        result = CheckUndeclaredFeatures(input_api, output_api, json_data,
463                                         f.ChangedContents())
464        if result:
465          return result
466    except ValueError:
467      return [
468          output_api.PresubmitError('Malformed JSON file: %s' % f.LocalPath())
469      ]
470  return []
471
472
473def CheckChangeOnUpload(input_api, output_api):
474  return CommonChecks(input_api, output_api)
475
476
477def CheckChangeOnCommit(input_api, output_api):
478  return CommonChecks(input_api, output_api)
479
480
481def main(argv):
482  with io.open(argv[1], encoding='utf-8') as f:
483    content = f.read()
484  pretty = PrettyPrint(content)
485  io.open(argv[1], 'wb').write(pretty.encode('utf-8'))
486
487
488if __name__ == '__main__':
489  sys.exit(main(sys.argv))
490