• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1from __future__ import print_function
2from __future__ import absolute_import
3import argparse
4import os
5import re
6import copy
7import uuid
8import calendar
9import time
10import datetime
11
12from util import big_query_utils
13from util import result_parser
14
15_PROJECT_ID = 'grpc-testing'
16_DATASET = 'protobuf_benchmark_result'
17_TABLE = 'opensource_result_v2'
18_NOW = "%d%02d%02d" % (datetime.datetime.now().year,
19                       datetime.datetime.now().month,
20                       datetime.datetime.now().day)
21
22_INITIAL_TIME = calendar.timegm(time.gmtime())
23
24def get_metadata():
25  build_number = os.getenv('BUILD_NUMBER')
26  build_url = os.getenv('BUILD_URL')
27  job_name = os.getenv('JOB_NAME')
28  git_commit = os.getenv('GIT_COMMIT')
29  # actual commit is the actual head of PR that is getting tested
30  git_actual_commit = os.getenv('ghprbActualCommit')
31
32  utc_timestamp = str(calendar.timegm(time.gmtime()))
33  metadata = {'created': utc_timestamp}
34
35  if build_number:
36    metadata['buildNumber'] = build_number
37  if build_url:
38    metadata['buildUrl'] = build_url
39  if job_name:
40    metadata['jobName'] = job_name
41  if git_commit:
42    metadata['gitCommit'] = git_commit
43  if git_actual_commit:
44    metadata['gitActualCommit'] = git_actual_commit
45
46  return metadata
47
48
49def upload_result(result_list, metadata):
50  for result in result_list:
51    new_result = {}
52    new_result["metric"] = "throughput"
53    new_result["value"] = result["throughput"]
54    new_result["unit"] = "MB/s"
55    new_result["test"] = "protobuf_benchmark"
56    new_result["product_name"] = "protobuf"
57    labels_string = ""
58    for key in result:
59      labels_string += ",|%s:%s|" % (key, result[key])
60    new_result["labels"] = labels_string[1:]
61    new_result["timestamp"] = _INITIAL_TIME
62    print(labels_string)
63
64    bq = big_query_utils.create_big_query()
65    row = big_query_utils.make_row(str(uuid.uuid4()), new_result)
66    if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET,
67                                        _TABLE + "$" + _NOW,
68                                        [row]):
69      print('Error when uploading result', new_result)
70
71
72if __name__ == "__main__":
73  parser = argparse.ArgumentParser()
74  parser.add_argument("-cpp", "--cpp_input_file",
75                      help="The CPP benchmark result file's name",
76                      default="")
77  parser.add_argument("-java", "--java_input_file",
78                      help="The Java benchmark result file's name",
79                      default="")
80  parser.add_argument("-python", "--python_input_file",
81                      help="The Python benchmark result file's name",
82                      default="")
83  parser.add_argument("-go", "--go_input_file",
84                      help="The golang benchmark result file's name",
85                      default="")
86  parser.add_argument("-node", "--node_input_file",
87                      help="The node.js benchmark result file's name",
88                      default="")
89  parser.add_argument("-php", "--php_input_file",
90                      help="The pure php benchmark result file's name",
91                      default="")
92  parser.add_argument("-php_c", "--php_c_input_file",
93                      help="The php with c ext benchmark result file's name",
94                      default="")
95  args = parser.parse_args()
96
97  metadata = get_metadata()
98  print("uploading results...")
99  upload_result(result_parser.get_result_from_file(
100      cpp_file=args.cpp_input_file,
101      java_file=args.java_input_file,
102      python_file=args.python_input_file,
103      go_file=args.go_input_file,
104      node_file=args.node_input_file,
105      php_file=args.php_input_file,
106      php_c_file=args.php_c_input_file,
107  ), metadata)
108