Python PerfherderSuite Example

说明

python perfherdersuite示例是从最受好评的开源项目中提取的实现代码,你可以参考下面示例的使用方式。

编程语言: Python

命名空间/包名称: perftest

示例#1
文件: talostest.py项目: wlach/autophone

 def create_suite(self, metric, testname):
     phsuite = PerfherderSuite(name=testname,
                               value=metric['summary'])
     for p in metric:
         if p != 'summary':
             phsuite.add_subtest(p, metric[p])
     return phsuite

示例#2
文件: s1s2test.py项目: mozilla/autophone

    def run_job(self):
        is_test_completed = False

        if not self.install_local_pages():
            self.add_failure(
                self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                'Aborting test - Could not install local pages on phone.',
                TreeherderStatus.EXCEPTION)
            return is_test_completed

        if not self.create_profile():
            self.add_failure(
                self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                'Aborting test - Could not run Fennec.',
                TreeherderStatus.BUSTED)
            return is_test_completed

        perfherder_options = PerfherderOptions(self.perfherder_options,
                                               repo=self.build.tree)
        is_test_completed = True
        testcount = len(self._urls.keys())
        for testnum, (testname, url) in enumerate(self._urls.iteritems(), 1):
            self.loggerdeco = self.loggerdeco.clone(
                extradict={
                    'repo': self.build.tree,
                    'buildid': self.build.id,
                    'buildtype': self.build.type,
                    'sdk': self.phone.sdk,
                    'platform': self.build.platform,
                    'testname': testname
                },
                extraformat='S1S2TestJob %(repo)s %(buildid)s %(buildtype)s %(sdk)s %(platform)s %(testname)s %(message)s')
            self.dm._logger = self.loggerdeco
            self.loggerdeco.info('Running test (%d/%d) for %d iterations',
                                 testnum, testcount, self._iterations)

            command = None
            for attempt in range(1, self.stderrp_attempts+1):
                # dataset is a list of the measurements made for the
                # iterations for this test.
                #
                # An empty item in the dataset list represents a
                # failure to obtain any measurement for that
                # iteration.
                #
                # It is possible for an item in the dataset to have an
                # uncached value and not have a corresponding cached
                # value if the cached test failed to record the
                # values.

                iteration = 0
                dataset = []
                for iteration in range(1, self._iterations+1):
                    # Calling svc power stayon true will turn on the
                    # display for at least some devices if it has
                    # turned off.
                    self.dm.power_on()
                    command = self.worker_subprocess.process_autophone_cmd(
                        test=self, require_ip_address=url.startswith('http'))
                    if command['interrupt']:
                        self.handle_test_interrupt(command['reason'],
                                                   command['test_result'])
                        break
                    self.update_status(message='Attempt %d/%d for Test %d/%d, '
                                       'run %d, for url %s' %
                                       (attempt, self.stderrp_attempts,
                                        testnum, testcount, iteration, url))

                    if not self.create_profile():
                        self.add_failure(
                            self.name,
                            TestStatus.TEST_UNEXPECTED_FAIL,
                            'Failed to create profile',
                            TreeherderStatus.TESTFAILED)
                        continue

                    measurement = self.runtest(url)
                    if not measurement:
                        self.loggerdeco.warning(
                            '%s %s Attempt %s Failed to get uncached measurement.',
                            testname, url, attempt)
                        continue

                    self.add_pass(url, text='uncached')
                    dataset.append({'uncached': measurement})

                    measurement = self.runtest(url)
                    if not measurement:
                        self.loggerdeco.warning(
                            '%s %s Attempt %s Failed to get cached measurement.',
                            testname, url, attempt)
                        continue

                    self.add_pass(url, text='cached')
                    dataset[-1]['cached'] = measurement

                    if self.is_stderr_below_threshold(
                            ('throbberstart',
                             'throbberstop'),
                            dataset,
                            self.stderrp_accept):
                        self.loggerdeco.info(
                            'Accepted test (%d/%d) after %d of %d iterations',
                            testnum, testcount, iteration, self._iterations)
                        break

                if command and command['interrupt']:
                    break
                measurements = len(dataset)
                if measurements > 0 and self._iterations != measurements:
                    self.add_failure(
                        self.name,
                        TestStatus.TEST_UNEXPECTED_FAIL,
                        'Failed to get all measurements',
                        TreeherderStatus.TESTFAILED)
                elif measurements == 0:
                    # If we have not gotten a single measurement at this point,
                    # just bail and report the failure rather than wasting time
                    # continuing more attempts.
                    self.add_failure(
                        self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                        'No measurements detected.',
                        TreeherderStatus.BUSTED)
                    self.loggerdeco.info(
                        'Failed to get measurements for test %s after %d/%d attempt '
                        'of %d iterations', testname, attempt,
                        self.stderrp_attempts, self._iterations)
                    self.worker_subprocess.mailer.send(
                        '%s %s failed for Build %s %s on %s %s' %
                        (self.__class__.__name__, testname, self.build.tree,
                         self.build.id, utils.host(), self.phone.id),
                        'No measurements were detected for test %s.\n\n'
                        'Job        %s\n'
                        'Host       %s\n'
                        'Phone      %s\n'
                        'Repository %s\n'
                        'Build      %s\n'
                        'Revision   %s\n' %
                        (testname,
                         self.job_url,
                         utils.host(),
                         self.phone.id,
                         self.build.tree,
                         self.build.id,
                         self.build.changeset))
                    break

                if self.is_stderr_below_threshold(
                        ('throbberstart',
                         'throbberstop'),
                        dataset,
                        self.stderrp_reject):
                    rejected = False
                else:
                    rejected = True
                    self.loggerdeco.info(
                        'Rejected test (%d/%d) after %d/%d iterations',
                        testnum, testcount, iteration, self._iterations)

                self.loggerdeco.debug('publishing results')

                perfherder_values = {'geometric_mean': 0}
                metric_keys = ['throbberstart', 'throbberstop', 'throbbertime']
                cache_names = {'uncached': 'first', 'cached': 'second'}
                cache_keys = cache_names.keys()

                for metric_key in metric_keys:
                    perfherder_values[metric_key] = {'geometric_mean': 0}
                    for cache_key in cache_keys:
                        perfherder_values[metric_key][cache_key] = {'median': 0, 'values': []}

                for datapoint in dataset:
                    for cache_key in datapoint:
                        starttime = datapoint[cache_key]['starttime']
                        throbberstart = datapoint[cache_key]['throbberstart']
                        throbberstop = datapoint[cache_key]['throbberstop']
                        self.report_results(
                            starttime=starttime,
                            tstrt=throbberstart,
                            tstop=throbberstop,
                            testname=testname,
                            cache_enabled=(cache_key == 'cached'),
                            rejected=rejected)
                        perfherder_values['throbberstart'][cache_key]['values'].append(
                            throbberstart - starttime)
                        perfherder_values['throbberstop'][cache_key]['values'].append(
                            throbberstop - starttime)
                        perfherder_values['throbbertime'][cache_key]['values'].append(
                            throbberstop - throbberstart)

                test_values = []
                for metric_key in metric_keys:
                    for cache_key in cache_keys:
                        perfherder_values[metric_key][cache_key]['median'] = utils.median(
                            perfherder_values[metric_key][cache_key]['values'])
                    perfherder_values[metric_key]['geometric_mean'] = utils.geometric_mean(
                        [perfherder_values[metric_key]['uncached']['median'],
                         perfherder_values[metric_key]['cached']['median']])
                    test_values.append(perfherder_values[metric_key]['geometric_mean'])

                perfherder_suite = PerfherderSuite(name=testname,
                                                   value=utils.geometric_mean(test_values),
                                                   options=perfherder_options)
                for metric_key in metric_keys:
                    for cache_key in cache_keys:
                        cache_name = cache_names[cache_key]
                        subtest_name = "%s %s" % (metric_key, cache_name)
                        perfherder_suite.add_subtest(
                            subtest_name,
                            perfherder_values[metric_key][cache_key]['median'],
                            options=perfherder_options)

                self.perfherder_artifact = PerfherderArtifact()
                self.perfherder_artifact.add_suite(perfherder_suite)
                self.loggerdeco.debug("PerfherderArtifact: %s", self.perfherder_artifact)

                if not rejected:
                    break

            if command and command['interrupt']:
                break

        return is_test_completed

展开阅读全文