X Tutup
Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Address feedbacks
  • Loading branch information
diegorusso committed Sep 24, 2024
commit 6d07201f70411b3238e31061e04227c201e51fab
22 changes: 10 additions & 12 deletions doc/usage.rst
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,8 @@ Usage::

pyperformance run [-h] [-r] [-f] [--debug-single-value] [-v] [-m]
[--affinity CPU_LIST] [-o FILENAME]
[--append FILENAME] [--min-time MIN_TIME]
[--same-loops SAME_LOOPS] [--timeout TIMEOUT]
[--manifest MANIFEST] [-b BM_LIST]
[--append FILENAME] [--manifest MANIFEST]
[--timeout TIMEOUT] [-b BM_LIST]
[--inherit-environ VAR_LIST] [-p PYTHON]

options::
Expand All @@ -125,17 +124,12 @@ options::
baseline_python, not changed_python.
--append FILENAME Add runs to an existing file, or create it if
it doesn't exist
--min-time MIN_TIME Minimum duration in seconds of a single value, used
to calibrate the number of loops
--same-loops SAME_LOOPS
Use the same number of loops as a previous run
(i.e., don't recalibrate). Should be a path to a
.json file from a previous run.
--timeout TIMEOUT Timeout for a benchmark run (default: disabled)
--timeout TIMEOUT Specify a timeout in seconds for a single
benchmark run (default: disabled)
--manifest MANIFEST benchmark manifest file to use
-b BM_LIST, --benchmarks BM_LIST
Comma-separated list of benchmarks or groups to run.
Can contain both positive and negative arguments:
Comma-separated list of benchmarks to run. Can
contain both positive and negative arguments:
--benchmarks=run_this,also_this,-not_this. If
there are no positive arguments, we'll run all
benchmarks except the negative arguments.
Expand All @@ -148,6 +142,10 @@ options::
-p PYTHON, --python PYTHON
Python executable (default: use running
Python)
--same-loops SAME_LOOPS
Use the same number of loops as a previous run
(i.e., don't recalibrate). Should be a path to a
.json file from a previous run.

show
----
Expand Down
10 changes: 5 additions & 5 deletions pyperformance/_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,6 @@ def python(self):
def run(self, python, runid=None, pyperf_opts=None, *,
venv=None,
verbose=False,
timeout=None,
):
if venv and python == sys.executable:
python = venv.python
Expand All @@ -194,7 +193,6 @@ def run(self, python, runid=None, pyperf_opts=None, *,
extra_opts=self.extra_opts,
pyperf_opts=pyperf_opts,
verbose=verbose,
timeout=timeout,
)

return bench
Expand All @@ -207,7 +205,6 @@ def _run_perf_script(python, runscript, runid, *,
extra_opts=None,
pyperf_opts=None,
verbose=False,
timeout=None,
):
if not runscript:
raise ValueError('missing runscript')
Expand All @@ -230,14 +227,17 @@ def _run_perf_script(python, runscript, runid, *,
argv,
env=env,
capture='stderr' if hide_stderr else None,
timeout=timeout,
)
if ec != 0:
if hide_stderr:
sys.stderr.flush()
sys.stderr.write(stderr)
sys.stderr.flush()
raise RuntimeError("Benchmark died")
# pyperf returns exit code 124 if the benchmark execution times out
if ec == 124:
raise TimeoutError("Benchmark timed out")
else:
raise RuntimeError("Benchmark died")
return pyperf.BenchmarkSuite.load(tmp)


Expand Down
9 changes: 1 addition & 8 deletions pyperformance/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def safe_rmtree(path):
MS_WINDOWS = (sys.platform == 'win32')


def run_cmd(argv, *, env=None, capture=None, verbose=True, timeout=None):
def run_cmd(argv, *, env=None, capture=None, verbose=True):
try:
cmdstr = ' '.join(shlex.quote(a) for a in argv)
except TypeError:
Expand Down Expand Up @@ -130,20 +130,13 @@ def run_cmd(argv, *, env=None, capture=None, verbose=True, timeout=None):
if verbose:
print('#', cmdstr)

if timeout:
kw.update(timeout=timeout)

# Explicitly flush standard streams, required if streams are buffered
# (not TTY) to write lines in the expected order
sys.stdout.flush()
sys.stderr.flush()

try:
proc = subprocess.run(argv, **kw)
except subprocess.TimeoutExpired as exc:
if verbose:
print('command timed out (%s)' % exc)
raise
except OSError as exc:
if exc.errno == errno.ENOENT:
if verbose:
Expand Down
3 changes: 2 additions & 1 deletion pyperformance/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ def parse_args():
"(i.e., don't recalibrate). Should be a path to a "
".json file from a previous run.")
cmd.add_argument("--timeout",
help="Timeout for a benchmark run (default: disabled)",
help="Specify a timeout in seconds for a single "
"benchmark run (default: disabled)",
type=check_positive)
filter_opts(cmd)

Expand Down
11 changes: 5 additions & 6 deletions pyperformance/run.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from collections import namedtuple
import hashlib
import json
import subprocess
import sys
import time
import traceback
Expand Down Expand Up @@ -174,12 +173,10 @@ def add_bench(dest_suite, obj):
pyperf_opts,
venv=bench_venv,
verbose=options.verbose,
timeout=options.timeout,
)
except subprocess.TimeoutExpired as exc:
timeout = round(exc.timeout)
print("ERROR: Benchmark %s timed out after %s seconds" % (name, timeout))
errors.append((name, "Timed out after %s seconds" % timeout))
except TimeoutError as exc:
print("ERROR: Benchmark %s timed out" % name)
errors.append((name, exc))
except RuntimeError as exc:
print("ERROR: Benchmark %s failed: %s" % (name, exc))
traceback.print_exc()
Expand Down Expand Up @@ -243,5 +240,7 @@ def get_pyperf_opts(options):
opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ))
if options.min_time:
opts.append('--min-time=%s' % options.min_time)
if options.timeout:
opts.append('--timeout=%s' % options.timeout)

return opts
6 changes: 1 addition & 5 deletions pyperformance/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
DEV_SCRIPT = os.path.join(REPO_ROOT, 'dev.py')


def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True, timeout=None):
def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True):
# XXX Optionally write the output to a file.
argv = (cmd,) + args
if not all(a and isinstance(a, str) for a in argv):
Expand All @@ -39,10 +39,6 @@ def run_cmd(cmd, *args, capture=None, onfail='exit', verbose=True, timeout=None)

if verbose:
print(f"(tests) Execute: {argv_str}", flush=True)

if timeout:
kwargs['timeout'] = 60

proc = subprocess.run(argv, **kwargs)

exitcode = proc.returncode
Expand Down
3 changes: 0 additions & 3 deletions pyperformance/tests/test_commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,12 @@ def run_pyperformance(self, cmd, *args,
exitcode=0,
capture='both',
verbose=True,
timeout=None,
):
ec, stdout, stderr = self.run_module(
'pyperformance', cmd, *args,
capture=capture,
onfail=None,
verbose=verbose,
timeout=timeout,
)
if exitcode is True:
self.assertGreater(ec, 0, repr(stdout))
Expand Down Expand Up @@ -156,7 +154,6 @@ def test_run_and_show(self):
'--debug-single-value',
'-o', filename,
capture=None,
timeout=None,
)

# Display slowest benchmarks
Expand Down
X Tutup