myqueue
Advanced tools
| Metadata-Version: 2.4 | ||
| Name: myqueue | ||
| Version: 25.11.0 | ||
| Version: 26.3.0 | ||
| Summary: Frontend for SLURM/LSF/PBS | ||
@@ -5,0 +5,0 @@ Maintainer-email: Jens Jørgen Mortensen <jjmo@dtu.dk> |
@@ -1,36 +0,5 @@ | ||
| .gitignore | ||
| .gitlab-ci.yml | ||
| .mailmap | ||
| .readthedocs.yml | ||
| CHANGELOG.rst | ||
| CONTRIBUTING.rst | ||
| LICENSE | ||
| MANIFEST.in | ||
| README.rst | ||
| mypy.ini | ||
| pyproject.toml | ||
| pytest.ini | ||
| docs/Makefile | ||
| docs/api.rst | ||
| docs/cli.rst | ||
| docs/conf.py | ||
| docs/configuration.rst | ||
| docs/development.rst | ||
| docs/documentation.rst | ||
| docs/example_config.py | ||
| docs/howitworks.rst | ||
| docs/howto.rst | ||
| docs/index.rst | ||
| docs/installation.rst | ||
| docs/paper.bib | ||
| docs/paper.md | ||
| docs/quickstart.rst | ||
| docs/releasenotes.rst | ||
| docs/requirements.txt | ||
| docs/workflows.rst | ||
| docs/_static/favicon.ico | ||
| docs/_static/logo.svg | ||
| docs/prime/check.py | ||
| docs/prime/factor.py | ||
| docs/prime/workflow.py | ||
| myqueue/__init__.py | ||
@@ -69,3 +38,2 @@ myqueue/__main__.py | ||
| myqueue.egg-info/entry_points.txt | ||
| myqueue.egg-info/pbr.json | ||
| myqueue.egg-info/requires.txt | ||
@@ -72,0 +40,0 @@ myqueue.egg-info/top_level.txt |
@@ -30,3 +30,3 @@ from __future__ import annotations | ||
| y = wrap(f, name=f'fa-{x}')(x) | ||
| run(function=f, name=f'fb-{x}', args=[y + 1]) | ||
| run(function=f, name=f'fb-{x}', args=[y + 1], serialize=True) | ||
| A.append(y) | ||
@@ -33,0 +33,0 @@ |
@@ -15,3 +15,3 @@ from pathlib import Path | ||
| with run(function=print, name='T1'): | ||
| run(function=print, name='T2') | ||
| run(function=print, name='T2', serialize=True) | ||
@@ -18,0 +18,0 @@ |
+7
-3
@@ -311,2 +311,8 @@ """Useful utilities.""" | ||
| try: | ||
| suggested.parent.mkdir(exist_ok=True, parents=True) | ||
| suggested.touch() | ||
| except PermissionError: | ||
| return | ||
| cmd = completion_command() | ||
@@ -318,7 +324,5 @@ activate = Path(venv) / 'bin/activate' | ||
| print(f' {activate}\n') | ||
| print(f'\n echo "{cmd}" >> {activate}\n') | ||
| suggested.parent.mkdir(exist_ok=True, parents=True) | ||
| suggested.touch() | ||
| def convert_done_files() -> None: | ||
@@ -325,0 +329,0 @@ """Convert old done-files to new-style state files.""" |
+21
-11
@@ -348,2 +348,3 @@ from __future__ import annotations | ||
| weight: float = None, | ||
| serialize: bool = False, | ||
| folder: Path | str = '.') -> RunHandle: | ||
@@ -416,2 +417,3 @@ """Run or submit a task. | ||
| creates=creates, | ||
| serialize=serialize, | ||
| **resource_kwargs) | ||
@@ -460,2 +462,3 @@ | ||
| kwargs=kwargs, | ||
| serialize=True, | ||
| **run_kwargs) | ||
@@ -518,2 +521,3 @@ return handle.result | ||
| creates: list[str] = [], | ||
| serialize: bool = False, | ||
| **resource_kwargs: Any) -> Task: | ||
@@ -529,6 +533,11 @@ """Create a Task object.""" | ||
| name = name or get_name(function) | ||
| cached_function = json_cached_function(function, name, args, kwargs) | ||
| command = WorkflowTask(f'{workflow_script}:{name}', [], | ||
| cached_function) | ||
| creates = creates + [f'{name}.result'] | ||
| if serialize: | ||
| func = json_cached_function(function, name, args, kwargs) | ||
| creates = creates + [f'{name}.result'] | ||
| else: | ||
| def func() -> None: | ||
| """Pass args amd kwargs.""" | ||
| function(*args, **kwargs) | ||
| command = WorkflowTask(f'{workflow_script}:{name}', [], func) | ||
| elif module: | ||
@@ -565,9 +574,10 @@ assert not kwargs | ||
| if function and not any(isinstance(thing, Result) | ||
| for thing in list(args) + list(kwargs.values())): | ||
| try: | ||
| task.result = cached_function(only_read_from_cache=True) | ||
| task.state = State.done | ||
| except CacheFileNotFoundError: | ||
| pass | ||
| if function and serialize: | ||
| if not any(isinstance(thing, Result) | ||
| for thing in list(args) + list(kwargs.values())): | ||
| try: | ||
| task.result = func(only_read_from_cache=True) | ||
| task.state = State.done | ||
| except CacheFileNotFoundError: | ||
| pass | ||
@@ -574,0 +584,0 @@ return task |
+1
-1
| Metadata-Version: 2.4 | ||
| Name: myqueue | ||
| Version: 25.11.0 | ||
| Version: 26.3.0 | ||
| Summary: Frontend for SLURM/LSF/PBS | ||
@@ -5,0 +5,0 @@ Maintainer-email: Jens Jørgen Mortensen <jjmo@dtu.dk> |
+1
-1
@@ -7,3 +7,3 @@ [build-system] | ||
| name = "myqueue" | ||
| version = "25.11.0" | ||
| version = "26.3.0" | ||
| description = "Frontend for SLURM/LSF/PBS" | ||
@@ -10,0 +10,0 @@ |
-13
| *.pyc | ||
| myqueue.egg-info/ | ||
| .mypy_cache | ||
| build/ | ||
| dist/ | ||
| docs/_build/ | ||
| .coverage | ||
| htmlcov/ | ||
| venv/ | ||
| # Editor backup files | ||
| *~ | ||
| .*.sw? |
| tests: | ||
| image: python:3.13 | ||
| script: | ||
| - python -m venv venv | ||
| - . venv/bin/activate | ||
| - pip install -U pip | ||
| - pip install flake8 mypy coverage pytest interrogate numpy | ||
| - pip install . | ||
| - echo "import coverage; coverage.process_startup()" > venv/lib/python3.13/site-packages/cov.pth | ||
| - touch cov.cfg | ||
| - export COVERAGE_PROCESS_START=$PWD/cov.cfg | ||
| - coverage run -p --source=myqueue -m pytest -v --color=yes | ||
| - coverage combine -a .coverage.* | ||
| - coverage report -i --precision=2 --fail-under=87.0 --skip-covered --sort=miss | ||
| - coverage html | ||
| - mypy -p myqueue | ||
| - flake8 myqueue | ||
| - interrogate -v -m -i -f 52.7 myqueue -e myqueue/test | ||
| coverage: '/TOTAL +[0-9]+ +[0-9]+ +([0-9]+\.[0-9]+%)/' | ||
| artifacts: | ||
| paths: | ||
| - htmlcov | ||
| expire_in: 1 week | ||
| oldtests: | ||
| image: python:3.10 | ||
| script: | ||
| - pip install pytest numpy | ||
| - pip install -e . | ||
| - pytest | ||
| when: manual |
-6
| Morten Gjerding <mortengjerding@gmail.com> | ||
| Jens Jørgen Mortensen <jjmo@dtu.dk> Jens Jørgen Mortensen <jensj@fysik.dtu.dk> | ||
| Jens Jørgen Mortensen <jjmo@dtu.dk> Jens Jørgen Mortensen <jj@smoerhul.dk> | ||
| Morten Gjerding <mortengjerding@gmail.com> Morten Gjerding <mogje@fysik.dtu.dk> | ||
| Thorbjørn Skovhus <thosk@dtu.dk> Thorbjørn Skovhus <s134264@student.dtu.dk> | ||
| Thorbjørn Skovhus <thosk@dtu.dk> tskovhus <thosk@dtu.dk> |
| version: 2 | ||
| build: | ||
| os: ubuntu-24.04 | ||
| tools: | ||
| python: "3.13" | ||
| sphinx: | ||
| configuration: docs/conf.py | ||
| fail_on_warning: true | ||
| python: | ||
| install: | ||
| - requirements: docs/requirements.txt | ||
| - method: pip | ||
| path: . |
| Changelog | ||
| ========= | ||
| See what's new in MyQueue here: | ||
| https://myqueue.readthedocs.io/releasenotes.html |
| Contributing | ||
| ============ | ||
| See how to contribute here: | ||
| https://myqueue.readthedocs.io/development.html |
Sorry, the diff of this file is not supported yet
| <?xml version="1.0" encoding="UTF-8" standalone="no"?> | ||
| <!-- Created with Inkscape (http://www.inkscape.org/) --> | ||
| <svg | ||
| version="1.1" | ||
| id="svg2" | ||
| xml:space="preserve" | ||
| width="788" | ||
| height="720" | ||
| viewBox="0 0 788 720" | ||
| sodipodi:docname="logo.svg" | ||
| inkscape:version="1.1.2 (76b9e6a115, 2022-02-25)" | ||
| xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" | ||
| xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" | ||
| xmlns="http://www.w3.org/2000/svg" | ||
| xmlns:svg="http://www.w3.org/2000/svg"><defs | ||
| id="defs6"><clipPath | ||
| clipPathUnits="userSpaceOnUse" | ||
| id="clipPath18"><path | ||
| d="M 0,1.206994e-5 H 959.76 V 540.00001 H 0 Z" | ||
| id="path16" /></clipPath></defs><sodipodi:namedview | ||
| id="namedview4" | ||
| pagecolor="#ffffff" | ||
| bordercolor="#666666" | ||
| borderopacity="1.0" | ||
| inkscape:pageshadow="2" | ||
| inkscape:pageopacity="0.0" | ||
| inkscape:pagecheckerboard="0" | ||
| showgrid="false" | ||
| inkscape:zoom="1.0453125" | ||
| inkscape:cx="310.43348" | ||
| inkscape:cy="360.17937" | ||
| inkscape:window-width="2488" | ||
| inkscape:window-height="1376" | ||
| inkscape:window-x="72" | ||
| inkscape:window-y="27" | ||
| inkscape:window-maximized="1" | ||
| inkscape:current-layer="g8" | ||
| showborder="true" | ||
| width="721px" /><g | ||
| id="g8" | ||
| inkscape:groupmode="layer" | ||
| inkscape:label="logo3" | ||
| transform="matrix(1.3333333,0,0,-1.3333333,0,720)"><g | ||
| id="g10" /><path | ||
| d="m 23.85337,251.60008 c 0,8.93829 7.32729,16.18422 16.36592,16.18422 h 65.46156 c 9.03864,0 16.36593,-7.24593 16.36593,-16.18422 v -64.73493 c 0,-8.93828 -7.32729,-16.18404 -16.36593,-16.18404 H 40.21929 c -9.03863,0 -16.36592,7.24576 -16.36592,16.18404 z" | ||
| style="fill:#4472c4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path26" /><path | ||
| d="m 172.62347,251.60008 c 0,8.93829 7.32729,16.18422 16.36593,16.18422 h 65.46156 c 9.03863,0 16.36593,-7.24593 16.36593,-16.18422 v -64.73493 c 0,-8.93828 -7.3273,-16.18404 -16.36593,-16.18404 H 188.9894 c -9.03864,0 -16.36593,7.24576 -16.36593,16.18404 z" | ||
| style="fill:#ffc000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path28" /><path | ||
| d="m 321.39376,251.60008 c 0,8.93829 7.32711,16.18422 16.36575,16.18422 h 65.46173 c 9.03864,0 16.36593,-7.24593 16.36593,-16.18422 v -64.73493 c 0,-8.93828 -7.32729,-16.18404 -16.36593,-16.18404 h -65.46173 c -9.03864,0 -16.36575,7.24576 -16.36575,16.18404 z" | ||
| style="fill:#4472c4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path30" /><path | ||
| d="m 470.16386,252.39855 c 0,8.93828 7.3273,16.18422 16.36593,16.18422 h 65.46155 c 9.03864,0 16.36593,-7.24594 16.36593,-16.18422 v -64.73476 c 0,-8.93828 -7.32729,-16.18422 -16.36593,-16.18422 h -65.46155 c -9.03863,0 -16.36593,7.24594 -16.36593,16.18422 z" | ||
| style="fill:#4472c4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path32" /><path | ||
| d="m 73.51395,363.30937 c 0,6.97919 5.7214,12.63689 12.77895,12.63689 h 51.11384 c 7.05755,0 12.77877,-5.6577 12.77877,-12.63689 v -50.54651 c 0,-6.9792 -5.72122,-12.6369 -12.77877,-12.6369 H 86.2929 c -7.05755,0 -12.77895,5.6577 -12.77895,12.6369 z" | ||
| style="fill:#4472c4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path34" /><path | ||
| d="m 138.55493,445.61386 c 0,4.65279 3.81421,8.42465 8.51943,8.42465 h 34.07642 c 4.70504,0 8.51942,-3.77186 8.51942,-8.42465 v -33.69827 c 0,-4.65279 -3.81438,-8.42465 -8.51942,-8.42465 h -34.07642 c -4.70522,0 -8.51943,3.77186 -8.51943,8.42465 z" | ||
| style="fill:#4472c4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path36" /><path | ||
| d="m 182.64602,500.41163 c 0,2.88986 2.36884,5.2324 5.29096,5.2324 h 21.1635 c 2.92213,0 5.29115,-2.34254 5.29115,-5.2324 V 479.4831 c 0,-2.88969 -2.36902,-5.23222 -5.29115,-5.23222 h -21.1635 c -2.92212,0 -5.29096,2.34253 -5.29096,5.23222 z" | ||
| style="fill:#4472c4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path38" /><path | ||
| d="m 443.29151,363.30937 c 0,6.97919 5.7214,12.63689 12.77895,12.63689 h 51.11384 c 7.05755,0 12.77878,-5.6577 12.77878,-12.63689 v -50.54651 c 0,-6.9792 -5.72123,-12.6369 -12.77878,-12.6369 h -51.11384 c -7.05755,0 -12.77895,5.6577 -12.77895,12.6369 z" | ||
| style="fill:#4472c4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path40" /><path | ||
| d="m 196.71772,363.30937 c 0,6.97919 5.72122,12.63689 12.77876,12.63689 h 51.11385 c 7.05755,0 12.77895,-5.6577 12.77895,-12.63689 v -50.54651 c 0,-6.9792 -5.7214,-12.6369 -12.77895,-12.6369 h -51.11385 c -7.05754,0 -12.77876,5.6577 -12.77876,12.6369 z" | ||
| style="fill:#ffc000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path42" /><path | ||
| d="m 319.31887,363.30937 c 0,6.97919 5.72123,12.63689 12.77877,12.63689 h 51.11385 c 7.05755,0 12.77895,-5.6577 12.77895,-12.63689 v -50.54651 c 0,-6.9792 -5.7214,-12.6369 -12.77895,-12.6369 h -51.11385 c -7.05754,0 -12.77877,5.6577 -12.77877,12.6369 z" | ||
| style="fill:#4472c4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path44" /><path | ||
| d="m 402.29347,445.61386 c 0,4.65279 3.81439,8.42465 8.51942,8.42465 h 34.07644 c 4.70521,0 8.51942,-3.77186 8.51942,-8.42465 v -33.69827 c 0,-4.65279 -3.81421,-8.42465 -8.51942,-8.42465 h -34.07644 c -4.70503,0 -8.51942,3.77186 -8.51942,8.42465 z" | ||
| style="fill:#4472c4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path46" /><path | ||
| d="m 314.08637,445.61386 c 0,4.65279 3.81439,8.42465 8.51942,8.42465 h 34.07644 c 4.70521,0 8.51942,-3.77186 8.51942,-8.42465 v -33.69827 c 0,-4.65279 -3.81421,-8.42465 -8.51942,-8.42465 h -34.07644 c -4.70503,0 -8.51942,3.77186 -8.51942,8.42465 z" | ||
| style="fill:#4472c4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path48" /><path | ||
| d="m 226.94461,445.61386 c 0,4.65279 3.8142,8.42465 8.51942,8.42465 h 34.07643 c 4.70503,0 8.51942,-3.77186 8.51942,-8.42465 v -33.69827 c 0,-4.65279 -3.81439,-8.42465 -8.51942,-8.42465 h -34.07643 c -4.70522,0 -8.51942,3.77186 -8.51942,8.42465 z" | ||
| style="fill:#ffc000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path50" /><path | ||
| d="m 247.99907,500.41163 c 0,2.88986 2.36884,5.2324 5.29096,5.2324 h 21.16351 c 2.92212,0 5.29096,-2.34254 5.29096,-5.2324 V 479.4831 c 0,-2.88969 -2.36884,-5.23222 -5.29096,-5.23222 h -21.16351 c -2.92212,0 -5.29096,2.34253 -5.29096,5.23222 z" | ||
| style="fill:#ffc000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path52" /><path | ||
| d="m 313.05135,500.41163 c 0,2.88986 2.36884,5.2324 5.29096,5.2324 h 21.16352 c 2.92212,0 5.29114,-2.34254 5.29114,-5.2324 V 479.4831 c 0,-2.88969 -2.36902,-5.23222 -5.29114,-5.23222 h -21.16352 c -2.92212,0 -5.29096,2.34253 -5.29096,5.23222 z" | ||
| style="fill:#4472c4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path54" /><path | ||
| d="m 377.199,500.41163 c 0,2.88986 2.36884,5.2324 5.29114,5.2324 h 21.16351 c 2.92213,0 5.29097,-2.34254 5.29097,-5.2324 V 479.4831 c 0,-2.88969 -2.36884,-5.23222 -5.29097,-5.23222 h -21.16351 c -2.9223,0 -5.29114,2.34253 -5.29114,5.23222 z" | ||
| style="fill:#4472c4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.7835" | ||
| id="path56" /><g | ||
| id="g58" | ||
| transform="matrix(0.43043688,0,0,0.42565783,-1.7875798,-902.15576)"><text | ||
| transform="matrix(1,0,0,-1,0,2250)" | ||
| style="font-variant:normal;font-weight:500;font-size:295px;font-family:Futura;-inkscape-font-specification:Futura-Medium;writing-mode:lr-tb;fill:#4472c4;fill-opacity:1;fill-rule:nonzero;stroke:none" | ||
| id="text62"><tspan | ||
| x="0 258.16663 439.33328 643.16663 824.99994 1009.4999 1191.3333" | ||
| y="0" | ||
| sodipodi:role="line" | ||
| id="tspan60">myqueue</tspan></text></g></g></svg> |
-101
| Python API | ||
| ========== | ||
| .. module:: myqueue | ||
| Simple examples | ||
| --------------- | ||
| :: | ||
| from myqueue.task import task | ||
| task('./script.py', tmax='2d', cores=24).submit() | ||
| The helper function :func:`myqueue.task.task` makes it easy to create | ||
| :class:`myqueue.task.Task` objects. The :func:`myqueue.submit` function | ||
| can be used if you want to submit several tasks at the same time:: | ||
| from myqueue import submit | ||
| from myqueue.task import task | ||
| tasks = [task('mymodule@func', args=[arg], tmax='2d', cores=24) | ||
| for arg in [42, 117, 999]] | ||
| submit(*tasks) | ||
| Advanced example | ||
| ---------------- | ||
| :: | ||
| from myqueue.commands import PythonModule | ||
| from myqueue.resources import Resources | ||
| from myqueue.task import Task | ||
| task = Task(PythonModule('module', []), | ||
| Resources(cores=8, tmax=3600)) | ||
| task.submit() | ||
| API | ||
| --- | ||
| .. autofunction:: myqueue.submit | ||
| Tasks | ||
| ..... | ||
| .. module:: myqueue.task | ||
| .. autofunction:: myqueue.task.task | ||
| .. autoclass:: myqueue.task.Task | ||
| :members: submit | ||
| Commands | ||
| ........ | ||
| .. module:: myqueue.commands | ||
| .. autofunction:: myqueue.commands.create_command | ||
| .. autoclass:: myqueue.commands.ShellCommand | ||
| .. autoclass:: myqueue.commands.ShellScript | ||
| .. autoclass:: myqueue.commands.PythonScript | ||
| .. autoclass:: myqueue.commands.PythonModule | ||
| .. autoclass:: myqueue.commands.PythonFunction | ||
| States | ||
| ...... | ||
| .. module:: myqueue.states | ||
| .. autoclass:: myqueue.queue.State | ||
| :members: | ||
| Resources | ||
| ......... | ||
| .. module:: myqueue.resources | ||
| .. autoclass:: myqueue.resources.Resources | ||
| :members: | ||
| Queue | ||
| ..... | ||
| .. module:: myqueue.queue | ||
| .. autoclass:: myqueue.queue.Queue | ||
| :members: | ||
| Schedulers | ||
| .......... | ||
| .. module:: myqueue.schedulers.slurm | ||
| .. autoclass:: myqueue.schedulers.slurm.SLURM | ||
| :members: | ||
| .. module:: myqueue.schedulers.lsf | ||
| .. autoclass:: myqueue.schedulers.lsf.LSF | ||
| :members: | ||
| .. module:: myqueue.schedulers.pbs | ||
| .. autoclass:: myqueue.schedulers.pbs.PBS | ||
| :members: |
-532
| .. _cli: | ||
| ====================== | ||
| Command-line interface | ||
| ====================== | ||
| .. _commands: | ||
| Sub-commands | ||
| ============ | ||
| .. computer generated text: | ||
| .. list-table:: | ||
| :widths: 1 3 | ||
| * - :ref:`help <help>` | ||
| - Show how to use this tool | ||
| * - :ref:`list <list>` (ls) | ||
| - List tasks in queue | ||
| * - :ref:`submit <submit>` | ||
| - Submit task(s) to queue | ||
| * - :ref:`resubmit <resubmit>` | ||
| - Resubmit done, failed or timed-out tasks | ||
| * - :ref:`remove <remove>` (rm) | ||
| - Remove or cancel task(s) | ||
| * - :ref:`info <info>` | ||
| - Show detailed information about MyQueue or a task | ||
| * - :ref:`workflow <workflow>` | ||
| - Submit tasks from Python script or several scripts matching pattern | ||
| * - :ref:`kick <kick>` | ||
| - Restart T and M tasks (timed-out and out-of-memory) | ||
| * - :ref:`modify <modify>` | ||
| - Modify task(s) | ||
| * - :ref:`init <init>` | ||
| - Initialize new queue | ||
| * - :ref:`sync <sync>` | ||
| - Make sure SLURM/LSF/PBS and MyQueue are in sync | ||
| * - :ref:`completion <completion>` | ||
| - Set up tab-completion for Bash | ||
| * - :ref:`config <config>` | ||
| - Create config.py file | ||
| * - :ref:`daemon <daemon>` | ||
| - Interact with the background process | ||
| .. _help: | ||
| Help: Show how to use this tool | ||
| ------------------------------- | ||
| usage: mq help [-h] [cmd] | ||
| Show how to use this tool. | ||
| More help can be found here: https://myqueue.readthedocs.io/. | ||
| cmd: | ||
| Subcommand. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| .. _list: | ||
| List (ls): List tasks in queue | ||
| ------------------------------ | ||
| usage: mq list [-h] [-s qhrdFCTMaA] [-i ID] [-n NAME] [-e ERROR] | ||
| [-c ifnaIrAste] [-S c] [-C] [--not-recursive] [-v] [-q] [-T] | ||
| [folder ...] | ||
| List tasks in queue. | ||
| Only tasks in the chosen folder and its subfolders are shown. | ||
| Columns:: | ||
| i: id | ||
| f: folder | ||
| n: name of task | ||
| a: arguments | ||
| I: info: "+<nargs>,*<repeats>,d<ndeps>" | ||
| r: resources | ||
| A: age | ||
| s: state | ||
| t: time | ||
| e: error message | ||
| Examples:: | ||
| $ mq list -s rq # show running and queued jobs | ||
| $ mq ls -s F abc/ # show failed jobs in abc/ folder | ||
| folder: | ||
| List tasks in this folder and its subfolders. Defaults to current folder. Use --not-recursive to exclude subfolders. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| -s, --states qhrdFCTMaA | ||
| Selection of states. First letters of "queued", | ||
| "hold", "running", "done", "FAILED", "CANCELED", | ||
| "TIMEOUT", "MEMORY", "all" and "ALL". | ||
| -i, --id ID Comma-separated list of task ID's. Use "-i -" for | ||
| reading ID's from stdin (one ID per line; extra stuff | ||
| after the ID will be ignored). | ||
| -n, --name NAME Select only tasks with names matching "NAME" (* and ? | ||
| can be used). | ||
| -e, --error ERROR Select only tasks with error message matching "ERROR" | ||
| (* and ? can be used). | ||
| -c, --columns ifnaIrAste | ||
| Select columns to show. Use "-c a-" to remove the "a" | ||
| column. | ||
| -S, --sort c Sort rows using column c, where c must be one of i, f, | ||
| n, a, r, A, s, t or e. Use "-S c-" for a descending | ||
| sort. | ||
| -C, --count Just show the number of tasks. | ||
| --not-recursive Do not list subfolders. | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. | ||
| .. _submit: | ||
| Submit: Submit task(s) to queue | ||
| ------------------------------- | ||
| usage: mq submit [-h] [-d DEPENDENCIES] [-n NAME] [--restart N] | ||
| [--max-tasks MAX_TASKS] [-R RESOURCES] [-w] | ||
| [-X EXTRA_SCHEDULER_ARGS] [-z] [-v] [-q] [-T] | ||
| task [folder ...] | ||
| Submit task(s) to queue. | ||
| Example:: | ||
| $ mq submit script.py -R 24:1d # 24 cores for 1 day | ||
| task: | ||
| Task to submit. | ||
| folder: | ||
| Submit tasks in this folder. Defaults to current folder. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| -d, --dependencies DEPENDENCIES | ||
| Comma-separated task names. | ||
| -n, --name NAME Name used for task. | ||
| --restart N Restart N times if task times out or runs out of | ||
| memory. Time-limit will be doubled for a timed out | ||
| task and number of cores will be increased to the next | ||
| number of nodes for a task that runs out of memory. | ||
| --max-tasks MAX_TASKS | ||
| Maximum number of tasks to submit. | ||
| -R, --resources RESOURCES | ||
| With RESOURCES=[m:]c[:p][:g][:n]:t[:w] where m=use-mpi | ||
| (s: serial, p:use MPI), c=cores, p=processes, g=gpus- | ||
| per-node, n=nodename, t=tmax and w=weight. Number of | ||
| cores and tmax must always be specified. Examples: | ||
| "8:1h", 8 cores for 1 hour (use "m" for minutes, "h" | ||
| for hours and "d" for days). "16:1:30m": 16 cores, 1 | ||
| process, half an hour. "40:xeon40:5m": 40 cores on | ||
| "xeon40" for 5 minutes. "40:1:xeon40:5m": 40 cores and | ||
| 1 process on "xeon40" for 5 minutes. | ||
| "40:1:xeon40:5m:0.5": same as previous, but with a | ||
| weight of 0.5. Use "4G" for 4 GPUs per node. | ||
| "s:40:1d": 40 cores for one day, do not call mpiexec. | ||
| -w, --workflow Write <task-name>.state file when task has finished. | ||
| -X, --extra-scheduler-args EXTRA_SCHEDULER_ARGS | ||
| Extra arguments for scheduler. Example: -X bla-bla. | ||
| For arguments that start with a dash, leave out the | ||
| space: -X--gres=gpu:4 or -X=--gres=gpu:4. Can be used | ||
| multiple times. | ||
| -z, --dry-run Show what will happen without doing anything. | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. | ||
| .. _resubmit: | ||
| Resubmit: Resubmit done, failed or timed-out tasks | ||
| -------------------------------------------------- | ||
| usage: mq resubmit [-h] [--keep] [-R RESOURCES] [-w] [-X EXTRA_SCHEDULER_ARGS] | ||
| [-s qhrdFCTMaA] [-i ID] [-n NAME] [-e ERROR] [-z] [-v] [-q] | ||
| [-T] [-r] | ||
| [folder ...] | ||
| Resubmit done, failed or timed-out tasks. | ||
| Example:: | ||
| $ mq resubmit -i 4321 # resubmit job with id=4321 | ||
| folder: | ||
| Task-folder. Use --recursive (or -r) to include subfolders. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| --keep Do not remove old tasks. | ||
| -R, --resources RESOURCES | ||
| With RESOURCES=[m:]c[:p][:g][:n]:t[:w] where m=use-mpi | ||
| (s: serial, p:use MPI), c=cores, p=processes, g=gpus- | ||
| per-node, n=nodename, t=tmax and w=weight. Number of | ||
| cores and tmax must always be specified. Examples: | ||
| "8:1h", 8 cores for 1 hour (use "m" for minutes, "h" | ||
| for hours and "d" for days). "16:1:30m": 16 cores, 1 | ||
| process, half an hour. "40:xeon40:5m": 40 cores on | ||
| "xeon40" for 5 minutes. "40:1:xeon40:5m": 40 cores and | ||
| 1 process on "xeon40" for 5 minutes. | ||
| "40:1:xeon40:5m:0.5": same as previous, but with a | ||
| weight of 0.5. Use "4G" for 4 GPUs per node. | ||
| "s:40:1d": 40 cores for one day, do not call mpiexec. | ||
| -w, --workflow Write <task-name>.state file when task has finished. | ||
| -X, --extra-scheduler-args EXTRA_SCHEDULER_ARGS | ||
| Extra arguments for scheduler. Example: -X bla-bla. | ||
| For arguments that start with a dash, leave out the | ||
| space: -X--gres=gpu:4 or -X=--gres=gpu:4. Can be used | ||
| multiple times. | ||
| -s, --states qhrdFCTMaA | ||
| Selection of states. First letters of "queued", | ||
| "hold", "running", "done", "FAILED", "CANCELED", | ||
| "TIMEOUT", "MEMORY", "all" and "ALL". | ||
| -i, --id ID Comma-separated list of task ID's. Use "-i -" for | ||
| reading ID's from stdin (one ID per line; extra stuff | ||
| after the ID will be ignored). | ||
| -n, --name NAME Select only tasks with names matching "NAME" (* and ? | ||
| can be used). | ||
| -e, --error ERROR Select only tasks with error message matching "ERROR" | ||
| (* and ? can be used). | ||
| -z, --dry-run Show what will happen without doing anything. | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. | ||
| -r, --recursive Use also subfolders. | ||
| .. _remove: | ||
| Remove (rm): Remove or cancel task(s) | ||
| ------------------------------------- | ||
| usage: mq remove [-h] [-f] [-s qhrdFCTMaA] [-i ID] [-n NAME] [-e ERROR] [-z] | ||
| [-v] [-q] [-T] [-r] | ||
| [folder ...] | ||
| Remove or cancel task(s). | ||
| Examples:: | ||
| $ mq remove -i 4321,4322 # remove jobs with ids 4321 and 4322 | ||
| $ mq rm -s d . -r # remove done jobs in this folder and its subfolders | ||
| folder: | ||
| Task-folder. Use --recursive (or -r) to include subfolders. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| -f, --force Remove also workflow tasks. | ||
| -s, --states qhrdFCTMaA | ||
| Selection of states. First letters of "queued", | ||
| "hold", "running", "done", "FAILED", "CANCELED", | ||
| "TIMEOUT", "MEMORY", "all" and "ALL". | ||
| -i, --id ID Comma-separated list of task ID's. Use "-i -" for | ||
| reading ID's from stdin (one ID per line; extra stuff | ||
| after the ID will be ignored). | ||
| -n, --name NAME Select only tasks with names matching "NAME" (* and ? | ||
| can be used). | ||
| -e, --error ERROR Select only tasks with error message matching "ERROR" | ||
| (* and ? can be used). | ||
| -z, --dry-run Show what will happen without doing anything. | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. | ||
| -r, --recursive Use also subfolders. | ||
| .. _info: | ||
| Info: Show detailed information about MyQueue or a task | ||
| ------------------------------------------------------- | ||
| usage: mq info [-h] [-v] [-q] [-T] [-i ID] [-A] [folder] | ||
| Show detailed information about MyQueue or a task. | ||
| Example:: | ||
| $ mq info # show information about MyQueue | ||
| $ mq info -i 12345 # show information about task with id=12345 | ||
| folder: | ||
| Show information for queues in this folder and its subfolders. Defaults to current folder. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. | ||
| -i, --id ID Show information about specific task. | ||
| -A, --all Show information about all your queues. | ||
| .. _workflow: | ||
| Workflow: Submit tasks from Python script or several scripts matching pattern | ||
| ----------------------------------------------------------------------------- | ||
| usage: mq workflow [-h] [--max-tasks MAX_TASKS] [-f] [-t TARGETS] [-p] | ||
| [-a ARGUMENTS] [-z] [-v] [-q] [-T] | ||
| script [folder ...] | ||
| Submit tasks from Python script or several scripts matching pattern. | ||
| The script(s) must define a workflow() function as shown here:: | ||
| $ cat flow.py | ||
| from myqueue.workflow import run | ||
| def workflow(): | ||
| with run(<task1>): | ||
| run(<task2>) | ||
| $ mq workflow flow.py F1/ F2/ # submit tasks in F1 and F2 folders | ||
| script: | ||
| Submit tasks from workflow script. | ||
| folder: | ||
| Submit tasks in this folder. Defaults to current folder. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| --max-tasks MAX_TASKS | ||
| Maximum number of tasks to submit. | ||
| -f, --force Submit also failed tasks. | ||
| -t, --targets TARGETS | ||
| Comma-separated target names. Without any targets, all | ||
| tasks will be submitted. | ||
| -p, --pattern Use submit scripts matching "script" pattern in all | ||
| subfolders. | ||
| -a, --arguments ARGUMENTS | ||
| Pass arguments to workflow() function. Example: "-a | ||
| name=hello,n=5" will call workflow(name='hello', n=5). | ||
| -z, --dry-run Show what will happen without doing anything. | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. | ||
| .. _kick: | ||
| Kick: Restart T and M tasks (timed-out and out-of-memory) | ||
| --------------------------------------------------------- | ||
| usage: mq kick [-h] [-z] [-v] [-q] [-T] [folder] | ||
| Restart T and M tasks (timed-out and out-of-memory). | ||
| The queue is kicked automatically every ten minutes - so you don't have to do | ||
| it manually. | ||
| folder: | ||
| Kick tasks in this folder and its subfolders. Defaults to current folder. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| -z, --dry-run Show what will happen without doing anything. | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. | ||
| .. _modify: | ||
| Modify: Modify task(s) | ||
| ---------------------- | ||
| usage: mq modify [-h] [-E STATES] [-N NEW_STATE] [-s qhrdFCTMaA] [-i ID] | ||
| [-n NAME] [-e ERROR] [-z] [-v] [-q] [-T] [-r] | ||
| [folder ...] | ||
| Modify task(s). | ||
| The following state changes are allowed: h->q or q->h. | ||
| folder: | ||
| Task-folder. Use --recursive (or -r) to include subfolders. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| -E, --email STATES Send email when state changes to one of the specified | ||
| states (one or more of the letters: rdFCTMA). | ||
| -N, --new-state NEW_STATE | ||
| New state (one of the letters: qhrdFCTM). | ||
| -s, --states qhrdFCTMaA | ||
| Selection of states. First letters of "queued", | ||
| "hold", "running", "done", "FAILED", "CANCELED", | ||
| "TIMEOUT", "MEMORY", "all" and "ALL". | ||
| -i, --id ID Comma-separated list of task ID's. Use "-i -" for | ||
| reading ID's from stdin (one ID per line; extra stuff | ||
| after the ID will be ignored). | ||
| -n, --name NAME Select only tasks with names matching "NAME" (* and ? | ||
| can be used). | ||
| -e, --error ERROR Select only tasks with error message matching "ERROR" | ||
| (* and ? can be used). | ||
| -z, --dry-run Show what will happen without doing anything. | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. | ||
| -r, --recursive Use also subfolders. | ||
| .. _init: | ||
| Init: Initialize new queue | ||
| -------------------------- | ||
| usage: mq init [-h] [-z] [-v] [-q] [-T] | ||
| Initialize new queue. | ||
| This will create a .myqueue/ folder in your current working directory and copy | ||
| ~/.myqueue/config.py into it. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| -z, --dry-run Show what will happen without doing anything. | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. | ||
| .. _sync: | ||
| Sync: Make sure SLURM/LSF/PBS and MyQueue are in sync | ||
| ----------------------------------------------------- | ||
| usage: mq sync [-h] [-z] [-v] [-q] [-T] [folder] | ||
| Make sure SLURM/LSF/PBS and MyQueue are in sync. | ||
| Remove tasks that SLURM/LSF/PBS doesn't know about. Also removes a task if | ||
| its corresponding folder no longer exists. | ||
| folder: | ||
| Sync tasks in this folder and its subfolders. Defaults to current folder. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| -z, --dry-run Show what will happen without doing anything. | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. | ||
| .. _completion: | ||
| Completion: Set up tab-completion for Bash | ||
| ------------------------------------------ | ||
| usage: mq completion [-h] [-v] [-q] [-T] | ||
| Set up tab-completion for Bash. | ||
| Do this:: | ||
| $ mq completion >> ~/.bashrc | ||
| options: | ||
| -h, --help show this help message and exit | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. | ||
| .. _config: | ||
| Config: Create config.py file | ||
| ----------------------------- | ||
| usage: mq config [-h] [-Q QUEUE_NAME] [--in-place] [-z] [-v] [-q] [-T] | ||
| [{local,slurm,pbs,lsf}] | ||
| Create config.py file. | ||
| This tool will try to guess your configuration. Some hand editing afterwards | ||
| will most likely be needed. Read more about config.py file here:: | ||
| https://myqueue.readthedocs.io/configuration.html | ||
| Example:: | ||
| $ mq config -Q hpc lsf | ||
| {local,slurm,pbs,lsf}: | ||
| Name of scheduler. Will be guessed if not supplied. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| -Q, --queue-name QUEUE_NAME | ||
| Name of queue. May be needed. | ||
| --in-place Overwrite ~/.myqueue/config.py file. | ||
| -z, --dry-run Show what will happen without doing anything. | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. | ||
| .. _daemon: | ||
| Daemon: Interact with the background process | ||
| -------------------------------------------- | ||
| usage: mq daemon [-h] [-z] [-v] [-q] [-T] {start,stop,status} [folder] | ||
| Interact with the background process. | ||
| Manage daemon for sending notifications, restarting, holding and releasing | ||
| tasks. | ||
| {start,stop,status}: | ||
| Start, stop or check status. | ||
| folder: | ||
| Pick daemon process corresponding to this folder. Defaults to current folder. | ||
| options: | ||
| -h, --help show this help message and exit | ||
| -z, --dry-run Show what will happen without doing anything. | ||
| -v, --verbose More output. | ||
| -q, --quiet Less output. | ||
| -T, --traceback Show full traceback. |
-195
| # -*- coding: utf-8 -*- | ||
| # | ||
| # Configuration file for the Sphinx documentation builder. | ||
| # | ||
| # This file does only contain a selection of the most common options. For a | ||
| # full list see the documentation: | ||
| # http://www.sphinx-doc.org/en/master/config | ||
| # -- Path setup -------------------------------------------------------------- | ||
| # If extensions (or modules to document with autodoc) are in another directory, | ||
| # add these directories to sys.path here. If the directory is relative to the | ||
| # documentation root, use os.path.abspath to make it absolute, like shown here. | ||
| # | ||
| # import os | ||
| # import sys | ||
| # sys.path.insert(0, os.path.abspath('.')) | ||
| # -- Project information ----------------------------------------------------- | ||
| import datetime | ||
| import importlib.metadata | ||
| project = 'MyQueue' | ||
| copyright = f'2018-{datetime.date.today().year}, J. J. Mortensen' | ||
| author = 'J. J. Mortensen' | ||
| # with open('../myqueue/__init__.py') as fd: | ||
| # version = re.search("__version__ = '(.*)'", fd.read()).group(1) | ||
| intersphinx_mapping = {'python': ('https://docs.python.org/3.10', None)} | ||
| # The full version, including alpha/beta/rc tags | ||
| version = importlib.metadata.version('myqueue') | ||
| release = version | ||
| # -- General configuration --------------------------------------------------- | ||
| # If your documentation needs a minimal Sphinx version, state it here. | ||
| # | ||
| # needs_sphinx = '1.0' | ||
| # Add any Sphinx extension module names here, as strings. They can be | ||
| # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom | ||
| # ones. | ||
| extensions = [ | ||
| 'sphinx.ext.autodoc', | ||
| # 'sphinx.ext.autodoc.typehints', | ||
| 'sphinx.ext.extlinks', | ||
| 'sphinx.ext.viewcode', | ||
| 'sphinx.ext.napoleon', | ||
| 'sphinx.ext.intersphinx'] | ||
| # Add any paths that contain templates here, relative to this directory. | ||
| templates_path = ['_templates'] | ||
| # The suffix(es) of source filenames. | ||
| # You can specify multiple suffix as a list of string: | ||
| # | ||
| # source_suffix = ['.rst', '.md'] | ||
| source_suffix = '.rst' | ||
| # The master toctree document. | ||
| master_doc = 'index' | ||
| # The language for content autogenerated by Sphinx. Refer to documentation | ||
| # for a list of supported languages. | ||
| # | ||
| # This is also used if you do content translation via gettext catalogs. | ||
| # Usually you set "language" from the command line for these cases. | ||
| # language = None | ||
| # List of patterns, relative to source directory, that match files and | ||
| # directories to ignore when looking for source files. | ||
| # This pattern also affects html_static_path and html_extra_path. | ||
| exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] | ||
| # The name of the Pygments (syntax highlighting) style to use. | ||
| pygments_style = None | ||
| extlinks = {'issue': | ||
| ('https://gitlab.com/myqueue/myqueue/-/issues/%s', | ||
| 'issue: #%s'), | ||
| 'mr': | ||
| ('https://gitlab.com/myqueue/myqueue/-/merge_requests/%s', | ||
| 'MR: !%s')} | ||
| # -- Options for HTML output ------------------------------------------------- | ||
| # The theme to use for HTML and HTML Help pages. See the documentation for | ||
| # a list of builtin themes. | ||
| # | ||
| html_theme = 'alabaster' | ||
| # Theme options are theme-specific and customize the look and feel of a theme | ||
| # further. For a list of options available for each theme, see the | ||
| # documentation. | ||
| # | ||
| html_theme_options = {'logo': 'logo.svg'} | ||
| html_favicon = '_static/favicon.ico' | ||
| # Add any paths that contain custom static files (such as style sheets) here, | ||
| # relative to this directory. They are copied after the builtin static files, | ||
| # so a file named "default.css" will overwrite the builtin "default.css". | ||
| html_static_path = ['_static'] | ||
| # Custom sidebar templates, must be a dictionary that maps document names | ||
| # to template names. | ||
| # | ||
| # The default sidebars (for documents that don't match any pattern) are | ||
| # defined by theme itself. Builtin themes are using these templates by | ||
| # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', | ||
| # 'searchbox.html']``. | ||
| # | ||
| # html_sidebars = {} | ||
| # -- Options for HTMLHelp output --------------------------------------------- | ||
| # Output file base name for HTML help builder. | ||
| htmlhelp_basename = 'MyQueuedoc' | ||
| # -- Options for LaTeX output ------------------------------------------------ | ||
| latex_elements = { | ||
| # The paper size ('letterpaper' or 'a4paper'). | ||
| # | ||
| # 'papersize': 'letterpaper', | ||
| # The font size ('10pt', '11pt' or '12pt'). | ||
| # | ||
| # 'pointsize': '10pt', | ||
| # Additional stuff for the LaTeX preamble. | ||
| # | ||
| # 'preamble': '', | ||
| # Latex figure (float) alignment | ||
| # | ||
| # 'figure_align': 'htbp', | ||
| } | ||
| # Grouping the document tree into LaTeX files. List of tuples | ||
| # (source start file, target name, title, | ||
| # author, documentclass [howto, manual, or own class]). | ||
| latex_documents = [ | ||
| (master_doc, 'MyQueue.tex', 'MyQueue Documentation', | ||
| 'Jens Jørgen Mortensen', 'manual'), | ||
| ] | ||
| # -- Options for manual page output ------------------------------------------ | ||
| # One entry per manual page. List of tuples | ||
| # (source start file, name, description, authors, manual section). | ||
| man_pages = [ | ||
| (master_doc, 'myqueue', 'MyQueue Documentation', | ||
| [author], 1) | ||
| ] | ||
| # -- Options for Texinfo output ---------------------------------------------- | ||
| # Grouping the document tree into Texinfo files. List of tuples | ||
| # (source start file, target name, title, author, | ||
| # dir menu entry, description, category) | ||
| texinfo_documents = [ | ||
| (master_doc, 'MyQueue', 'MyQueue Documentation', | ||
| author, 'MyQueue', 'One line description of project.', | ||
| 'Miscellaneous'), | ||
| ] | ||
| # -- Options for Epub output ------------------------------------------------- | ||
| # Bibliographic Dublin Core info. | ||
| epub_title = project | ||
| # The unique identifier of the text. This can be a ISBN number | ||
| # or the project homepage. | ||
| # | ||
| # epub_identifier = '' | ||
| # A unique identification for the text. | ||
| # | ||
| # epub_uid = '' | ||
| # A list of files that should not be packed into the epub file. | ||
| epub_exclude_files = ['search.html'] | ||
| # -- Extension configuration ------------------------------------------------- | ||
| autodoc_typehints = 'description' |
| ==================== | ||
| Configure your queue | ||
| ==================== | ||
| You need to configure your SLURM/PBS/LSF system with a ``~/.myqueue/config.py`` | ||
| file. The file describes what your system looks like: Names of the nodes, | ||
| number of cores and other things. | ||
| .. highlight:: bash | ||
| The simplest way is to copy the file from a friend who has already written a | ||
| configuration file for you supercomputer:: | ||
| $ ls ~/../*/.myqueue/config.py | ||
| /home/you/../alice/.myqueue/config.py | ||
| /home/you/../bob/.myqueue/config.py | ||
| ... | ||
| $ mkdir ~/.myqueue | ||
| $ cp ~alice/.myqueue/config.py ~/.myqueue/ | ||
| .. highlight:: python | ||
| Here is an example configuration file: | ||
| .. literalinclude:: example_config.py | ||
| The configuration file uses Python syntax to define a dictionary called | ||
| ``config``. The dictionary can have the following key-value pairs | ||
| (``scheduler`` is requred, the rest are optional): | ||
| .. list-table:: | ||
| * - Key | ||
| - Description | ||
| - type | ||
| - default | ||
| * - ``scheduler`` | ||
| - :ref:`scheduler` | ||
| - ``str`` | ||
| - | ||
| * - ``nodes`` | ||
| - :ref:`nodes` | ||
| - ``list[tuple[str, dict[str, Any]]]`` | ||
| - ``[]`` | ||
| * - ``mpiexec`` | ||
| - :ref:`mpiexec` | ||
| - ``str`` | ||
| - ``'mpiexec'`` | ||
| * - ``parallel_python`` | ||
| - :ref:`parallel_python` | ||
| - ``str`` | ||
| - ``'python3'`` | ||
| * - ``use_mpi`` | ||
| - :ref:`use_mpi` | ||
| - ``bool`` | ||
| - ``True`` | ||
| * - ``serial_python`` | ||
| - :ref:`serial_python` | ||
| - ``str`` | ||
| - ``'python3'`` | ||
| * - ``extra_args`` | ||
| - :ref:`extra_args` | ||
| - ``list[str]`` | ||
| - ``[]`` | ||
| * - ``maximum_total_task_weight`` | ||
| - :ref:`task_weight` | ||
| - ``float`` | ||
| - ``inf`` | ||
| * - ``default_task_weight`` | ||
| - :ref:`task_weight` | ||
| - ``float`` | ||
| - ``0.0`` | ||
| * - ``notifications`` | ||
| - :ref:`notifications` | ||
| - ``dict[str, str]`` | ||
| - ``{}`` | ||
| See details below. | ||
| .. _autoconfig: | ||
| Guessing your configuration | ||
| =========================== | ||
| .. highlight:: bash | ||
| Try the following :ref:`command <config>`:: | ||
| $ mq config | ||
| ... | ||
| It will try to guess your configuration. It can be a good starting point | ||
| for a ``config.py`` file. You may need to help ``mq config`` a bit by | ||
| giving it the scheduler name and/or the queue name (try ``mq config -h``). | ||
| .. _scheduler: | ||
| Name of scheduler | ||
| ================= | ||
| The type of scheduler you are using must be ``'slurm'``, ``'pbs'``, ``'lsf'`` | ||
| or ``'local'``. The *local* scheduler can be used for testing on a system | ||
| without SLURM/LSF/PBS. Start the local scheduler with:: | ||
| $ python3 -m myqueue.local | ||
| .. highlight:: python | ||
| .. _nodes: | ||
| Description of node types | ||
| ========================= | ||
| This is a list of ``('<node-name>', <dictionary>)`` tuples describing the | ||
| different types of nodes:: | ||
| ('xeon24', {'cores': 24, 'memory': '255GB'}) | ||
| .. highlight:: bash | ||
| The node-name is what SLURM calls a partition-name and you would use it like | ||
| this:: | ||
| $ sbatch --partition=<node-name> ... script | ||
| or like this with a PBS system:: | ||
| $ qsub -l nodes=<node-name>:ppn=... ... script | ||
| Each dictionary *must* have the following entries: | ||
| * ``cores``: Number of cores for the node type. | ||
| * ``memory``: The memory available for the entire node. Specified as a string | ||
| such as ``'500GiB'``. MyQueue understands the following memory units: | ||
| ``MB``, ``MiB``, ``GB`` and ``GiB``. Note: The ``memory`` entry is only | ||
| used by the LSF scheduler - other schedulers can leave this out. | ||
| Other possible keys that you normally don't need are: | ||
| * ``extra_args``: See :ref:`extra_args`. | ||
| * ``mpiargs``: See the `source code`_ for how to use this. | ||
| * ``nodename``: The *real* node-name in case you have several node | ||
| specifications refering to the same *real* node-name:: | ||
| config = { | ||
| ..., | ||
| 'nodes': [ | ||
| ('xeon24', {'cores': 24}), | ||
| ('xeon24b', {'cores': 24, | ||
| 'extra_args': ['--arg=value'], | ||
| 'nodename': 'xeon24'}), | ||
| ...], | ||
| ...} | ||
| .. tip:: | ||
| Set ``nodename`` to the empty string if you don't want a | ||
| ``--partition=...`` command-line argument when subbmitting jobs | ||
| with SLURM. This can sometimes conflict with ``extra_args`` like | ||
| ``--qos=...``. | ||
| * ``special``: When set to ``True`` (default is ``False``) a node can only | ||
| be selected by name. Useful for *special* nodes (like nodes with a GPU) | ||
| that you don't want to submit to with a resources specification like | ||
| ``-R 24:1h``. Instead, you would have to use | ||
| ``-R 24:<name-of-special-node>:1h``. | ||
| The order of your nodes is significant. If you ask for :math:`N` cores, | ||
| MyQueue will pick the first type of node from the list that has a core count | ||
| that divides :math:`N`. Given the configuration shown above, here are some | ||
| example :ref:`resource <resources>` specifications: | ||
| ``48:12h``: 2 :math:`\times` *xeon24* | ||
| ``48:xeon8:12h``: 6 :math:`\times` *xeon8* | ||
| ``48:xeon16:12h``: 3 :math:`\times` *xeon16* | ||
| .. _source code: https://gitlab.com/myqueue/myqueue/blob/master/myqueue/schedulers/slurm.py | ||
| .. _mpiexec: | ||
| MPI-run command | ||
| =============== | ||
| .. highlight:: python | ||
| By default, parallel jobs will be started with the ``mpiexec`` command found | ||
| on your ``PATH``. You can specify a different executable with this extra line | ||
| in your ``config.py`` file:: | ||
| config = { | ||
| ..., | ||
| 'mpiexec': '/path/to/your/mpiexec/my-mpiexec', | ||
| ...} | ||
| .. _parallel_python: | ||
| Parallel Python interpreter | ||
| =========================== | ||
| If you want to use an MPI enabled Python interpreter for running your Python | ||
| scripts in parallel then you must specify which one you want to use:: | ||
| config = { | ||
| ..., | ||
| 'parallel_python': 'your-python', | ||
| ...} | ||
| Use ``'asap-python'`` for ASAP_ and ``'gpaw python'`` for GPAW_. | ||
| For MPI4PY_, you don't need an MPI-enabled interpreter. | ||
| .. _MPI4PY: https://mpi4py.readthedocs.io/en/stable/index.html | ||
| .. _ASAP: https://wiki.fysik.dtu.dk/asap/ | ||
| .. _GPAW: https://gpaw.readthedocs.io/ | ||
| .. _use_mpi: | ||
| Using MPI or not? | ||
| ================= | ||
| If you want MyQueue to *not* start your job with ``mpiexec`` | ||
| (perhaps your job does that itself) then you can use:: | ||
| config = { | ||
| ..., | ||
| 'use_mpi': False, | ||
| ...} | ||
| For individual jobs, this can be overridden by specifying it in the | ||
| :ref:`resource specification <resources>`: | ||
| * ``s:40:1h``: don't use MPI | ||
| * ``p:40:1h``: do use MPI | ||
| .. _serial_python: | ||
| Serial Python interpreter | ||
| ========================= | ||
| By defaul, ``python3`` is used as the Python interpreter for serial jobs. | ||
| Use the ``serial_python`` configuration variable if you want to set it to | ||
| something else. | ||
| .. _extra_args: | ||
| Extra arguments for submit command | ||
| ================================== | ||
| Add extra arguments to the ``sbatch``, ``qsub`` or ``bsub`` command. | ||
| Example:: | ||
| config = { | ||
| ..., | ||
| 'extra_args': ['arg1', 'arg2'], | ||
| 'nodes': [ | ||
| ('xeon24', {'cores': 24, 'extra_args': ['arg3', 'arg4']}), | ||
| ...], | ||
| ...} | ||
| would give ``<submit command> arg1 arg2 arg3 arg4``. | ||
| .. _task_weight: | ||
| Task weight | ||
| =========== | ||
| In order to limit the number of tasks running at the same time, you can | ||
| submit them like this: | ||
| .. highlight:: bash | ||
| :: | ||
| $ mq submit ... -R 24:2h:5 # sets weight to 5 | ||
| (see :ref:`resources`) or mark them in your workflow script like this: | ||
| .. highlight:: python | ||
| :: | ||
| run(..., weight=5) | ||
| and set a global maximum:: | ||
| config = { | ||
| ..., | ||
| 'maximum_total_task_weight': 100, | ||
| ...} | ||
| This will allow only 100 / 5 = 20 tasks in the ``running`` or ``queued`` | ||
| state. If you submit more that 20 tasks then some of them will be put in the | ||
| ``hold`` state. As tasks finish successfully (``done`` state), tasks will be | ||
| moved from ``hold`` to ``queued``. | ||
| One use case would be to limit the disk-space used by running tasks. Note that | ||
| tasks that fail will be counted as still running, so you will have to ``mq | ||
| rm`` those and also remember to remove big files left behind. | ||
| One can also change the default task weight of 0 to something else by | ||
| setting the ``default_task_weight`` configuration variable. | ||
| .. _notifications: | ||
| Notifications | ||
| ============= | ||
| :: | ||
| config = { | ||
| ..., | ||
| 'notifications': {'email': 'you@somewhere.org', | ||
| 'host': 'smtp.somewhere.org' | ||
| 'username': 'name'}, | ||
| ...} |
| Development | ||
| =========== | ||
| Git repository | ||
| -------------- | ||
| Code, merge requests and issues can be found here: | ||
| https://gitlab.com/myqueue/myqueue/ | ||
| Contributions and suggestions for improvements are welcome. | ||
| Getting help | ||
| ------------ | ||
| For announcements, discussions, questions or help, go to our `#myqueue` room on | ||
| Matrix_. | ||
| .. _Matrix: https://matrix.to/#/#myqueue:matrix.org | ||
| Testing | ||
| ------- | ||
| Run the tests like this: | ||
| $ pytest [...] | ||
| and report any errors you get: https://gitlab.com/myqueue/myqueue/issues. | ||
| Documentation | ||
| ------------- | ||
| Whenever the output of *mq* changes, please update the examples in the | ||
| ReStructuredText documentation-files with:: | ||
| $ pytest (... with update=True in rst_test.py ...) | ||
| Whenever changes are made to the command-line tool, please update the | ||
| documentation and tab-completion script with:: | ||
| $ python -m myqueue.utils | ||
| New release | ||
| ----------- | ||
| :: | ||
| $ python -m build | ||
| $ twine upload dist/* |
| ============= | ||
| Documentation | ||
| ============= | ||
| Submitting a task with MyQueue typically works like this:: | ||
| $ mq submit <task> -R <resources> | ||
| or:: | ||
| $ mq submit "<task> <arguments>" -R <resources> | ||
| And checking the result looks like this:: | ||
| $ mq list -s <states> # or just: mq ls | ||
| Below, we describe the important concepts :ref:`tasks`, :ref:`arguments`, | ||
| :ref:`resources` and :ref:`states`. | ||
| .. _tasks: | ||
| Tasks | ||
| ===== | ||
| There are five kinds of tasks: :ref:`pymod`, :ref:`pyfunc`, :ref:`pyscript`, | ||
| :ref:`shellcmd` and :ref:`shellscript`. | ||
| .. _pymod: | ||
| Python module | ||
| ------------- | ||
| Examples: | ||
| * ``module`` | ||
| * ``module.submodule`` (a Python submodule) | ||
| These are executed as ``python3 -m module`` so Python must be able to import | ||
| the modules. | ||
| .. _pyfunc: | ||
| Function in a Python module | ||
| --------------------------- | ||
| Examples: | ||
| * ``module@function`` | ||
| * ``module.submodule@function`` | ||
| These are executed as ``python3 -c "import module; module.function(...)`` so | ||
| Python must be able to import the function from the module. | ||
| .. _pyscript: | ||
| Python script | ||
| ------------- | ||
| Examples: | ||
| * ``script.py`` (use ``script.py`` in folders where tasks are running) | ||
| * ``./script.py`` (use ``script.py`` from folder where tasks were submitted) | ||
| * ``/path/to/script.py`` (absolute path) | ||
| Executed as ``python3 script.py``. | ||
| .. _shellcmd: | ||
| Shell command | ||
| ------------- | ||
| Example: | ||
| * ``shell:command`` | ||
| The command must be in ``$PATH``. | ||
| .. _shellscript: | ||
| Shell-script | ||
| ------------ | ||
| Example: | ||
| * ``./script`` | ||
| Executed as ``. ./script``. | ||
| .. _arguments: | ||
| Arguments | ||
| ......... | ||
| All tasks can take extra arguments by enclosing task and arguments in quotes | ||
| like this:: | ||
| "<task> <arg1> <arg2> ..." | ||
| Arguments will simply be added to the command-line that executes the task, | ||
| except for :ref:`pyfunc` tasks where the arguments are converted to Python | ||
| literals and passed to the function. Some examples:: | ||
| $ mq submit "script.py ABC 123" | ||
| would run ``python3 script.py ABC 123`` and:: | ||
| $ mq submit "mymod@func ABC 123" | ||
| would run ``python3 -c "import mymod; mymod.func('ABC', 123)``. | ||
| .. _venv: | ||
| Using a Python virtual environment | ||
| ================================== | ||
| If a task is submitted from a virtual environment then that ``venv`` will also | ||
| be activated in the script that runs the task. MyQueue does this by looking | ||
| for an ``VIRTUAL_ENV`` environment variable. | ||
| .. _resources: | ||
| Resources | ||
| ========= | ||
| A resource specification has the form:: | ||
| [use-mpi:]cores[:processes][:gpus][:nodename]:tmax[:weight] | ||
| * ``use-mpi``: Use ``p`` (as in parallel) if you want to start your job with | ||
| ``mpiexec``; use ``s`` (as in serial) if you don't. The default is | ||
| to use ``mpiexec`` unless you have specified ``'use_mpi': False`` in | ||
| your configuration file: :ref:`use_mpi`. | ||
| * ``cores``: Number of cores to reserve. | ||
| * ``processes``: Number of MPI processes to start | ||
| (defaults to number of cores). | ||
| * ``gpus``: Number of GPUs per node to allocate. | ||
| Example: ``4G``. Default is no GPUs (``0G``). | ||
| * ``nodename``: Node-name | ||
| (defaults to best match in :ref:`the list of node-types <nodes>`). | ||
| * ``tmax``: Maximum time (use *s*, *m*, *h* and *d* for seconds, minutes, | ||
| hours and days respectively). Examples: ``1h``, ``2d``. Default | ||
| is ``10m``. | ||
| * ``weight``: weight of a task. Can be used to limit the number of | ||
| simultaneously running tasks. See :ref:`task_weight`. | ||
| Defaults to 0. | ||
| Both the :ref:`submit <submit>` and :ref:`resubmit <resubmit>` commands | ||
| as well as the :func:`myqueue.task.task` function, take | ||
| an optional *resources* argument (``-R`` or ``--resources``). | ||
| Default resources are a modest one core and 10 minutes. | ||
| Examples: | ||
| * ``1:1h`` 1 core and 1 process for 1 hour | ||
| * ``64:xeon:2d`` 64 cores and 64 processes on "xeon" nodes for 2 days | ||
| * ``24:1:30m`` 24 cores and 1 process for 30 minutes | ||
| (useful for OpenMP tasks) | ||
| * ``s:24:30m`` 24 cores and 24 processes for 30 minutes | ||
| (useful for tasks that do their own *mpiexec* call) | ||
| * ``96:4:4G:10h`` 96 cores and 4 processes and 4 GPUs per node for 10 hours | ||
| Resources can also be specified via special comments in scripts: | ||
| .. highlight:: python | ||
| :: | ||
| # MQ: resources=40:1d | ||
| from somewhere import run | ||
| run('something') | ||
| .. _preamble: | ||
| Preamble | ||
| ======== | ||
| The value of the :envvar:`MYQUEUE_PREAMBLE` environment variable | ||
| will be inserted at the beginning of the script that will be | ||
| submitted. | ||
| .. highlight:: bash | ||
| .. tip:: | ||
| To see the script that you are about to submit, use:: | ||
| $ mq submit ... -vz # --verbose --dry-run | ||
| .. _states: | ||
| States | ||
| ====== | ||
| These are the 8 possible states a task can be in: | ||
| ========== ================================================ | ||
| *queued* waiting for resources to become available | ||
| *hold* on hold | ||
| *running* actually running | ||
| *done* successfully finished | ||
| *FAILED* something bad happened | ||
| *MEMORY* ran out of memory | ||
| *TIMEOUT* ran out of time | ||
| *CANCELED* a dependency failed or ran out of memory or time | ||
| ========== ================================================ | ||
| The the ``-s`` or ``--states`` options of the | ||
| :ref:`list <list>`, :ref:`resubmit <resubmit>`, :ref:`remove <remove>` and | ||
| :ref:`modify <modify>` use the following abbreviations: ``q``, ``h``, ``r``, | ||
| ``d``, ``F``, ``C``, ``M`` and ``T``. It's also possible to use ``a`` as a | ||
| shortcut for the all the "good" states ``qhrd`` and ``A`` for the "bad" ones | ||
| ``FCMT``. | ||
| Examples | ||
| ======== | ||
| * Sleep for 2 seconds on 1 core using the :func:`time.sleep()` Python | ||
| function:: | ||
| $ mq submit "time@sleep 2" -R 1:1m | ||
| 1 ./ time@sleep 2 +1 1:1m | ||
| 1 task submitted | ||
| * Run the ``echo hello`` shell command in two folders | ||
| (using the defaults of 1 core for 10 minutes):: | ||
| $ mkdir f1 f2 | ||
| $ mq submit "shell:echo hello" f1/ f2/ | ||
| Submitting tasks: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2/2 | ||
| 2 ./f1/ shell:echo hello +1 1:10m | ||
| 3 ./f2/ shell:echo hello +1 1:10m | ||
| 2 tasks submitted | ||
| * Run ``script.py`` on 8 cores for 10 hours:: | ||
| $ echo "x = 1 / 0" > script.py | ||
| $ mq submit script.py -R 8:10h | ||
| 4 ./ script.py 8:10h | ||
| 1 task submitted | ||
| You can see the status of your jobs with:: | ||
| $ mq list | ||
| id folder name args info res. age state time error | ||
| ── ────── ────────── ───── ──── ───── ──── ────── ──── ─────────────────────────────────── | ||
| 1 ./ time@sleep 2 +1 1:1m 0:02 done 0:02 | ||
| 2 ./f1/ shell:echo hello +1 1:10m 0:00 done 0:00 | ||
| 3 ./f2/ shell:echo hello +1 1:10m 0:00 done 0:00 | ||
| 4 ./ script.py 8:10h 0:00 FAILED 0:00 ZeroDivisionError: division by zero | ||
| ── ────── ────────── ───── ──── ───── ──── ────── ──── ─────────────────────────────────── | ||
| done: 3, FAILED: 1, total: 4 | ||
| Remove the failed and done jobs from the list with | ||
| (notice the dot meaning the current folder):: | ||
| $ mq remove -s Fd -r . | ||
| 1 ./ time@sleep 2 +1 1:1m 0:02 done 0:02 | ||
| 2 ./f1/ shell:echo hello +1 1:10m 0:00 done 0:00 | ||
| 3 ./f2/ shell:echo hello +1 1:10m 0:00 done 0:00 | ||
| 4 ./ script.py 8:10h 0:00 FAILED 0:00 ZeroDivisionError: division by zero | ||
| 4 tasks removed | ||
| The output files from a task will look like this:: | ||
| $ ls -l f2 | ||
| total 4 | ||
| -rw-rw-r-- 1 jensj jensj 0 Oct 28 10:46 shell:echo.3.err | ||
| -rw-rw-r-- 1 jensj jensj 6 Oct 28 10:46 shell:echo.3.out | ||
| $ cat f2/shell:echo.3.out | ||
| hello | ||
| If a job fails or times out, then you can resubmit it with more resources:: | ||
| $ mq submit "shell:sleep 4" -R 1:2s | ||
| 5 ./ shell:sleep 4 +1 1:2s | ||
| 1 task submitted | ||
| $ mq list | ||
| id folder name args info res. age state time | ||
| ── ────── ─────────── ──── ──── ──── ──── ─────── ──── | ||
| 5 ./ shell:sleep 4 +1 1:2s 0:02 TIMEOUT 0:02 | ||
| ── ────── ─────────── ──── ──── ──── ──── ─────── ──── | ||
| TIMEOUT: 1, total: 1 | ||
| $ mq resubmit -i 5 -R 1:1m | ||
| 6 ./ shell:sleep 4 +1 1:1m | ||
| 1 task submitted |
| config = { | ||
| 'scheduler': 'slurm', | ||
| 'nodes': [ | ||
| ('xeon24', {'cores': 24, | ||
| 'memory': '255G'}), | ||
| ('xeon16', {'cores': 16, | ||
| 'memory': '63G'}), | ||
| ('xeon8', {'cores': 8, | ||
| 'memory': '23G'})]} |
| How it works | ||
| ============ | ||
| Your queue | ||
| ---------- | ||
| When you submit a task, MyQueue will submit it to your scheduler and add it to | ||
| a *queue* file (:file:`~/.myqueue/queue.sqlite3` by default). Once the tasks | ||
| starts running (let's say it has a task-id ``1234``), it will write a status | ||
| file called ``1234-0`` in your ``.myqueue/`` folder. When the tasks stops | ||
| running, it will write a file called ``1234-1`` if it finished successfully | ||
| and ``1234-2`` if it failed. MyQueue will remove the status files and update | ||
| your queue with information about timing and possible errors. | ||
| The processing of the status files happens whenever you interact with MyQueue | ||
| on the command-line or every 10 minutes when the MyQueue daemon wakes up. | ||
| .. _daemon process: | ||
| The daemon background process | ||
| ----------------------------- | ||
| The daemon process wakes up every ten minutes to check if any tasks need to be | ||
| resubmitted, held or released (see :meth:`~myqueue.queue.Queue.kick`). | ||
| Notification emails will also be sent. It will write its output to | ||
| ``.myqueue/daemon.out``. | ||
| How does the daemon get started? Whenever the time stamp of the | ||
| ``daemon-<username>.out`` file is older that 2 hours or the file is missing, | ||
| the *mq* command will start the daemon process. You can also use the | ||
| :ref:`daemon <daemon>` sub-command to explicitely *start* or *stop* the daemon | ||
| (and check *status*):: | ||
| $ mq daemon {start,stop,status} [folder] | ||
| More than one configuration file | ||
| -------------------------------- | ||
| If you have several projects and they need different scheduler configuration, | ||
| then you can use the :ref:`init <init>` command:: | ||
| $ mkdir project2 | ||
| $ cd project2 | ||
| $ mq init | ||
| $ ls .myqueue/ | ||
| config.py | ||
| You now have a ``project2/.myqueue/`` folder that contains a copy of your main | ||
| configuration file (``~/.myqueue/config.py``) that you can edit. All tasks | ||
| inside the ``project2/`` folder will now use ``project2/.myqueue/`` for | ||
| storing your queue and configuration. |
| How to ... | ||
| ========== | ||
| Delete lost tasks | ||
| ----------------- | ||
| Tasks were lost, but MyQueue still thinks they are there. This can | ||
| happen if they were deleted with ``scancel``, ``bkill`` or ``qdel`` instead | ||
| of ``mq rm``. Solution: | ||
| * Use ``mq sync`` (:ref:`sync`) | ||
| Remove many tasks | ||
| ----------------- | ||
| The ``mq rm`` :ref:`command <remove>` can read task ID's from standard input:: | ||
| $ cat ids | mq rm -i - | ||
| $ mq ls | grep <something> | mq rm -i - | ||
| Start from scratch | ||
| ------------------ | ||
| * Remove your ``.myqueue/queue.sqlite3`` file. | ||
| See what's in your ``.myqueue/queue.sqlite3`` file | ||
| -------------------------------------------------- | ||
| >>> import sqlite3 | ||
| >>> con = sqlite.connect('path/to/.myqueue/queue.sqlite3') | ||
| >>> for row in con.execute('SELECT * FROM tasks'): | ||
| ... print(row) | ||
| Or use:: | ||
| $ python -m myqueue.queue path/to/.myqueue/queue.sqlite3 |
| ======= | ||
| MyQueue | ||
| ======= | ||
| MyQueue is a frontend for SLURM_/PBS_/LSF_ that makes handling of tasks easy. | ||
| It has a command-line interface called ``mq`` with a number of :ref:`commands` | ||
| and a Python interface for managing :ref:`workflows`. Simple to set up: no | ||
| system administrator or database required. | ||
| .. admonition:: Features | ||
| * Easy task submission: | ||
| * from the command line: ``mq submit <task> -R <cores>:<time>`` | ||
| * from Python: :func:`myqueue.submit` | ||
| * Automatic restarting of timed-out/out-of-memory tasks | ||
| with more time/cores | ||
| * Remembers your finished and failed tasks | ||
| * Powerful :ref:`list <list>` command for monitoring | ||
| * Can be used together with Python :mod:`venv`\ 's | ||
| (see :ref:`venv`) | ||
| * Folder-based :ref:`Workflows` | ||
| .. image:: https://gitlab.com/myqueue/myqueue/badges/master/coverage.svg | ||
| :target: https://gitlab.com/myqueue/myqueue/ | ||
| .. image:: https://badge.fury.io/py/myqueue.svg | ||
| :target: https://pypi.org/project/myqueue/ | ||
| .. image:: https://joss.theoj.org/papers/10.21105/joss.01844/status.svg | ||
| :target: https://doi.org/10.21105/joss.01844 | ||
| .. toctree:: | ||
| :maxdepth: 3 | ||
| :caption: Contents: | ||
| installation | ||
| configuration | ||
| quickstart | ||
| documentation | ||
| releasenotes | ||
| howitworks | ||
| howto | ||
| cli | ||
| workflows | ||
| api | ||
| development | ||
| .. _SLURM: https://slurm.schedmd.com/ | ||
| .. _PBS: https://en.m.wikipedia.org/wiki/Portable_Batch_System | ||
| .. _LSF: https://en.m.wikipedia.org/wiki/Platform_LSF | ||
| .. _Python: https://python.org/ | ||
| Indices and tables | ||
| ================== | ||
| * :ref:`genindex` | ||
| * :ref:`modindex` | ||
| * :ref:`search` |
| ============ | ||
| Installation | ||
| ============ | ||
| Install with ``pip``:: | ||
| $ python3 -m pip install myqueue | ||
| .. note:: | ||
| Python 3.9 or later is required. | ||
| Enable bash tab-completion for future terminal sessions like this:: | ||
| $ mq completion >> ~/.profile | ||
| Subscribe here_ or use this `RSS feed`_ if you want to be notified of updates | ||
| on PyPI_. | ||
| .. _RSS feed: https://pypi.org/project/myqueue/#history | ||
| .. _here: https://libraries.io/pypi/myqueue | ||
| .. _PyPI: https://pypi.org/project/myqueue/ |
| # Minimal makefile for Sphinx documentation | ||
| # | ||
| # You can set these variables from the command line. | ||
| SPHINXOPTS = | ||
| SPHINXBUILD = sphinx-build | ||
| SOURCEDIR = . | ||
| BUILDDIR = _build | ||
| # Put it first so that "make" without argument is like "make help". | ||
| help: | ||
| @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) | ||
| .PHONY: help Makefile | ||
| # Catch-all target: route all unknown targets to Sphinx using the new | ||
| # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). | ||
| %: Makefile | ||
| @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) |
| @article{c2db, | ||
| doi = {10.1088/2053-1583/aacfc1}, | ||
| url = {https://doi.org/10.1088%2F2053-1583%2Faacfc1}, | ||
| year = 2018, | ||
| month = {sep}, | ||
| publisher = {{IOP} Publishing}, | ||
| volume = {5}, | ||
| number = {4}, | ||
| pages = {042002}, | ||
| author = {Sten Haastrup and Mikkel Strange and Mohnish Pandey and Thorsten Deilmann and Per S Schmidt and Nicki F Hinsche and Morten N Gjerding and Daniele Torelli and Peter M Larsen and Anders C Riis-Jensen and Jakob Gath and Karsten W Jacobsen and Jens J{\o}rgen Mortensen and Thomas Olsen and Kristian S Thygesen}, | ||
| title = {The Computational 2D Materials Database: high-throughput modeling and discovery of atomically thin crystals}, | ||
| journal = {2D Materials}, | ||
| abstract = {We introduce the Computational 2D Materials Database (C2DB), which organises a variety of structural, thermodynamic, elastic, electronic, magnetic, and optical properties of around 1500 two-dimensional materials distributed over more than 30 different crystal structures. Material properties are systematically calculated by state-of-the-art density functional theory and many-body perturbation theory ( and the Bethe–Salpeter equation for ∼250 materials) following a semi-automated workflow for maximal consistency and transparency. The C2DB is fully open and can be browsed online (http://c2db.fysik.dtu.dk) or downloaded in its entirety. In this paper, we describe the workflow behind the database, present an overview of the properties and materials currently available, and explore trends and correlations in the data. Moreover, we identify a large number of new potentially synthesisable 2D materials with interesting properties targeting applications within spintronics, (opto-)electronics, and plasmonics. The C2DB offers a comprehensive and easily accessible overview of the rapidly expanding family of 2D materials and forms an ideal platform for computational modeling and design of new 2D materials and van der Waals heterostructures.} | ||
| } | ||
| @INPROCEEDINGS{slurm, | ||
| author = {Morris A. Jette and Andy B. Yoo and Mark Grondona}, | ||
| title = {SLURM: Simple Linux Utility for Resource Management}, | ||
| booktitle = {In Lecture Notes in Computer Science: Proceedings of Job Scheduling Strategies for Parallel Processing ({JSSPP}) 2003}, | ||
| year = {2002}, | ||
| pages = {44--60}, | ||
| publisher = {Springer-Verlag}, | ||
| doi = {10.1007/10968987_3} | ||
| } | ||
| @article{felix, | ||
| author = "Felix Tim Bölle and Nicolai Rask Mathiesen and Alexander Juul Nielsen and Tejs Vegge and Juan Maria García Lastra and Ivano E. Castelli", | ||
| title = "{Autonomous Discovery of Materials for Intercalation Electrodes}", | ||
| year = "2019", | ||
| month = "10", | ||
| url = "https://chemrxiv.org/articles/Autonomous_Discovery_of_Materials_for_Intercalation_Electrodes/9971054", | ||
| doi = "10.26434/chemrxiv.9971054.v1" | ||
| } | ||
| @article {fireworks, | ||
| author = {Jain, Anubhav and Ong, Shyue Ping and Chen, Wei and Medasani, Bharat and Qu, Xiaohui and Kocher, Michael and Brafman, Miriam and Petretto, Guido and Rignanese, Gian-Marco and Hautier, Geoffroy and Gunter, Daniel and Persson, Kristin A.}, | ||
| title = {FireWorks: a dynamic workflow system designed for high-throughput applications}, | ||
| journal = {Concurrency and Computation: Practice and Experience}, | ||
| volume = {27}, | ||
| number = {17}, | ||
| issn = {1532-0634}, | ||
| url = {http://dx.doi.org/10.1002/cpe.3505}, | ||
| doi = {10.1002/cpe.3505}, | ||
| pages = {5037--5059}, | ||
| keywords = {scientific workflows, high-throughput computing, fault-tolerant computing}, | ||
| year = {2015}, | ||
| note = {CPE-14-0307.R2}, | ||
| } | ||
| @article{aiida, | ||
| title = "AiiDA: automated interactive infrastructure and database for computational science", | ||
| journal = "Computational Materials Science", | ||
| volume = "111", | ||
| pages = "218 - 230", | ||
| year = "2016", | ||
| issn = "0927-0256", | ||
| doi = "https://doi.org/10.1016/j.commatsci.2015.09.013", | ||
| url = "http://www.sciencedirect.com/science/article/pii/S0927025615005820", | ||
| author = "Giovanni Pizzi and Andrea Cepellotti and Riccardo Sabatini and Nicola Marzari and Boris Kozinsky", | ||
| keywords = "High-throughput, Materials database, Scientific workflow, Directed acyclic graph, Provenance, Reproducibility", | ||
| abstract = "Computational science has seen in the last decades a spectacular rise in the scope, breadth, and depth of its efforts. Notwithstanding this prevalence and impact, it is often still performed using the renaissance model of individual artisans gathered in a workshop, under the guidance of an established practitioner. Great benefits could follow instead from adopting concepts and tools coming from computer science to manage, preserve, and share these computational efforts. We illustrate here our paradigm sustaining such vision, based around the four pillars of Automation, Data, Environment, and Sharing. We then discuss its implementation in the open-source AiiDA platform (http://www.aiida.net), that has been tuned first to the demands of computational materials science. AiiDA’s design is based on directed acyclic graphs to track the provenance of data and calculations, and ensure preservation and searchability. Remote computational resources are managed transparently, and automation is coupled with data storage to ensure reproducibility. Last, complex sequences of calculations can be encoded into scientific workflows. We believe that AiiDA’s design and its sharing capabilities will encourage the creation of social ecosystems to disseminate codes, data, and scientific workflows." | ||
| } |
-105
| --- | ||
| title: 'MyQueue: Task and workflow scheduling system' | ||
| tags: | ||
| - Python | ||
| - ... | ||
| authors: | ||
| - name: Jens Jørgen Mortensen | ||
| orcid: 0000-0001-5090-6706 | ||
| affiliation: 1 | ||
| - name: Morten Gjerding | ||
| orcid: 0000-0002-5256-660X | ||
| affiliation: 1 | ||
| - name: Kristian Sommer Thygesen | ||
| orcid: 0000-0001-5197-214X | ||
| affiliation: 1 | ||
| affiliations: | ||
| - name: CAMD, Department of Physics, Technical University of Denmark, 2800 Kgs. Lyngby, Denmark | ||
| index: 1 | ||
| date: 11 November 2019 | ||
| bibliography: paper.bib | ||
| --- | ||
| # Summary | ||
| Task scheduling and workload management on high-performance computing | ||
| environments is usually done with tools such as `SLURM` [@slurm]. | ||
| [MyQueue](https://myqueue.readthedocs.io/) is a front-end for schedulers that | ||
| makes handling of tasks easy. It has a command-line interface called *mq* with | ||
| a number of sub-commands and a Python interface for managing workflows. | ||
| Currently, the following schedulers are supported: | ||
| [SLURM](https://en.m.wikipedia.org/wiki/Slurm_Workload_Manager), | ||
| [PBS](https://en.m.wikipedia.org/wiki/Portable_Batch_System), and | ||
| [LSF](https://en.m.wikipedia.org/wiki/Platform_LSF). | ||
| The idea behind `MyQueue` is to define a personal queue that the | ||
| user can interact with in an easy and efficient way while `MyQueue` handles | ||
| the interaction with the scheduler. Finished tasks will stay in the personal | ||
| queue until they are explicitly removed so they can be listed with their | ||
| status (done, failed, timed-out, or out-of-memory). This makes it easy to keep | ||
| track of your tasks: If a task is listed as "done", it reminds you that some | ||
| action should be taken, e.g., the result of the task should be checked. If a | ||
| task failed then you need to fix something and resubmit the task. In this | ||
| sense, `MyQueue` works as a to-do list. | ||
| `MyQueue` has a convenient *list* sub-command. It will by default only | ||
| show tasks belonging to the current folder and its sub-folders, making it easy | ||
| to manage several projects by putting them in separate folders. Failed tasks | ||
| will show a short error message read from the relevant line in the error file. | ||
| You can select the tasks you want to list by status, task-id, name, or error | ||
| message. A task can be marked with a *restarts* number $N$, indicating that | ||
| `MyQueue` should restart the task up to $N$ times (with increased resources) | ||
| if the task runs out of time or memory. Increased resources means longer time | ||
| or more cores for the timed-out and out-of-memory cases, respectively. | ||
| The `MyQueue` *submit* sub-command makes it easy to submit thousands | ||
| of tasks in a single command. As input *submit* takes a shell script, Python | ||
| script, or Python module and executes the script/module in a number of folders. | ||
| This makes it easy to submit a large number of tasks quickly. The *list* | ||
| sub-command can then be used to monitor the execution of the tasks. Together | ||
| with the *resubmit* sub-command it becomes easy to resubmit any tasks that | ||
| might have failed. This example show how the sub-commands of `MyQueue` | ||
| synergize and increase the efficiency of the user. | ||
| `MyQueue` has a simple Python interface that can be used to define | ||
| workflows. A Python script defines a dependency tree of tasks that `MyQueue` | ||
| can use to submit tasks without user involvement. The dependencies take the | ||
| form "if task X is done then submit task Y". `MyQueue` works directly with | ||
| folders and files, which makes it simple to use and easy to get started. | ||
| Compared to the current state of the field `MyQueue` distinguishes | ||
| itself by focusing *not* on automatic handling of crashes but *only* | ||
| on the single problem of submitting and managing thousands of tasks. | ||
| In the scientific field of the authors, atomic-scale simulations, | ||
| commonly used workflow managers are | ||
| [AiiDA](http://www.aiida.net) [@aiida] and | ||
| [Fireworks](https://materialsproject.github.io/fireworks) [@fireworks]. | ||
| Fireworks' centralized server model is advantageous when coordinating | ||
| tasks distributed between multiple users. In constrast to Fireworks, | ||
| `MyQueue` is installed per user, is completely decentralized, and | ||
| cannot coordinate tasks between multiple users. AiiDA is a fully | ||
| automatic workflow tool designed to ensure data provenance. In contrast | ||
| to AiiDA, `MyQueue` does not handle data provenance and does not focus | ||
| on the automization of the workflow process. These design decisions | ||
| can be seen both as a drawback and an advantage depending on the use | ||
| case, but in any case makes `MyQueue` easier to learn. To summarize, | ||
| `MyQueue` is a personal, decentralized, and lightweight front-end for | ||
| schedulers with support for submitting workflows. It requires no | ||
| system administrator and no database server. | ||
| `MyQueue` is useful for high-throughput computations, which require automatic | ||
| submission of thousands of interdependent jobs. For example, `MyQueue` has | ||
| been used to drive high-throughput screening studies coordinating on the | ||
| order of 100,000 individual tasks [@c2db], [@felix]. `MyQueue` is also used | ||
| by the [Atomic Simulation Recipes](https://asr.readthedocs.io/) project, which | ||
| is a library of tasks for atomic simulations. | ||
| # Acknowledgments | ||
| K. S. T. acknowledges funding from the European Research Council (ERC) under | ||
| the European Union's Horizon 2020 research and innovation program (Grant | ||
| Agreement No. 773122, LIMA). | ||
| # References |
| import json | ||
| from pathlib import Path | ||
| dct = json.loads(Path('factors.json').read_text()) | ||
| factors = dct['factors'] | ||
| if len(factors) == 1: | ||
| Path('PRIME').write_text('') # create empty file |
| from __future__ import annotations | ||
| import json | ||
| from pathlib import Path | ||
| def factor(x: int) -> list[int]: | ||
| for f in range(2, x // 2 + 1): | ||
| if x % f == 0: | ||
| return [f] + factor(x // f) | ||
| return [x] | ||
| if __name__ == '__main__': | ||
| x = int(Path.cwd().name) # name of current folder | ||
| factors = factor(x) | ||
| Path('factors.json').write_text(json.dumps({'factors': factors})) |
| from myqueue.workflow import run, resources | ||
| @resources(tmax='2s', cores=1) | ||
| def workflow(): | ||
| with run(module='prime.factor'): | ||
| run(module='prime.check') |
| ============= | ||
| A quick start | ||
| ============= | ||
| .. This file contains computer generated output. Do not touch. | ||
| .. highlight:: bash | ||
| .. mq: cd /tmp; rm -r .myqueue proj1 proj2 | ||
| Let's create a simple "Hello world" Python script:: | ||
| $ mkdir proj1 | ||
| $ cd proj1 | ||
| $ echo 'print("Hello world")' > hello.py | ||
| and :ref:`submit <submit>` it:: | ||
| $ mq submit hello.py | ||
| 1 ./ hello.py 1:10m | ||
| 1 task submitted | ||
| The :ref:`list <list>` command shows that the job is done:: | ||
| $ mq ls | ||
| id folder name res. age state time | ||
| ── ────── ──────── ───── ──── ───── ──── | ||
| 1 ./ hello.py 1:10m 0:00 done 0:00 | ||
| ── ────── ──────── ───── ──── ───── ──── | ||
| done: 1, total: 1 | ||
| The ``1:10m`` means that 1 core and 10 minutes was reserved for the task. | ||
| There is now an output file and an empty error file in the folder:: | ||
| $ ls -l | ||
| total 8 | ||
| -rw-rw-r-- 1 jensj jensj 21 Oct 28 10:46 hello.py | ||
| -rw-rw-r-- 1 jensj jensj 0 Oct 28 10:46 hello.py.1.err | ||
| -rw-rw-r-- 1 jensj jensj 12 Oct 28 10:46 hello.py.1.out | ||
| $ cat hello.py.1.out | ||
| Hello world | ||
| Now we run some calculations in another folder:: | ||
| $ cd .. | ||
| $ mkdir proj2 | ||
| $ cd proj2 | ||
| $ mq submit -R 1:10s "math@sin 3.14" | ||
| 2 ./ math@sin 3.14 +1 1:10s | ||
| 1 task submitted | ||
| This will call the :func:`~math.sin` function from the Python :mod:`math` | ||
| module with an argument of ``3.14`` and we ask for 10 seconds on 1 core. | ||
| Let's also submit a task that will fail:: | ||
| $ mq submit "math@sin hello" | ||
| 3 ./ math@sin hello +1 1:10m | ||
| 1 task submitted | ||
| The :ref:`list <list>` command shows the status of the two tasks in the | ||
| current folder:: | ||
| $ mq ls | ||
| id folder name args info res. age state time error | ||
| ── ────── ──────── ───── ──── ───── ──── ────── ──── ─────────────────────────────────────── | ||
| 2 ./ math@sin 3.14 +1 1:10s 0:00 done 0:00 | ||
| 3 ./ math@sin hello +1 1:10m 0:00 FAILED 0:00 TypeError: must be real number, not str | ||
| ── ────── ──────── ───── ──── ───── ──── ────── ──── ─────────────────────────────────────── | ||
| done: 1, FAILED: 1, total: 2 | ||
| To see the status of both the ``proj1`` and ``proj2`` folders, do this:: | ||
| $ cd .. | ||
| $ mq ls | ||
| id folder name args info res. age state time error | ||
| ── ──────── ──────── ───── ──── ───── ──── ────── ──── ─────────────────────────────────────── | ||
| 1 ./proj1/ hello.py 1:10m 0:00 done 0:00 | ||
| 2 ./proj2/ math@sin 3.14 +1 1:10s 0:00 done 0:00 | ||
| 3 ./proj2/ math@sin hello +1 1:10m 0:00 FAILED 0:00 TypeError: must be real number, not str | ||
| ── ──────── ──────── ───── ──── ───── ──── ────── ──── ─────────────────────────────────────── | ||
| done: 2, FAILED: 1, total: 3 | ||
| See status of the ``proj1`` folder only:: | ||
| $ mq ls proj1 | ||
| id folder name res. age state time | ||
| ── ──────── ──────── ───── ──── ───── ──── | ||
| 1 ./proj1/ hello.py 1:10m 0:00 done 0:00 | ||
| ── ──────── ──────── ───── ──── ───── ──── | ||
| done: 1, total: 1 | ||
| Once you have seen that your tasks have finished, you will typically remove | ||
| them so that only queued and failed tasks are left:: | ||
| $ mq rm -s d proj* | ||
| 1 ./proj1/ hello.py 1:10m 0:00 done 0:00 | ||
| 2 ./proj2/ math@sin 3.14 +1 1:10s 0:00 done 0:00 | ||
| 2 tasks removed | ||
| .. tip:: | ||
| Use ``mq ls ~`` to see all your tasks. |
| .. _releases: | ||
| ============= | ||
| Release notes | ||
| ============= | ||
| (version numbers can be interpreted as ``year.month.bug-fix-release-number``) | ||
| .. highlight:: bash | ||
| Next release | ||
| ============ | ||
| See https://gitlab.com/myqueue/myqueue/-/merge_requests | ||
| Version 25.11.0 | ||
| =============== | ||
| * TAB-completions for node names: ``mq submit ... -R 40:<TAB>``. | ||
| * Added a tool for comparing jobs in two similar folders: | ||
| ``python -m myqueue.compare folder1/ folder2/``. | ||
| Version 25.4.0 | ||
| ============== | ||
| * **IMPORTANT**: | ||
| The meaning of a resource specification like ``24:1:1h`` has changed! | ||
| With version 24.10.0 and earlier, 24 cores and processes would be allocated, | ||
| but ``mpiexec`` would not be called. The equivalent specification with new | ||
| MyQueue is ``s:24:1h``. | ||
| From now on, ``24:1:1h`` will allocate 24 cores and one process. | ||
| See :ref:`resources` and :ref:`use_mpi`. | ||
| * Resource specifications can now specify if ``mpiexec`` should be called | ||
| or not. See :ref:`resources` and :ref:`use_mpi`. | ||
| * Renamed default branch from ``master`` to ``main``. | ||
| * :ref:`mq sync <sync>` will now only remove your own tasks | ||
| (in case you are sharing folders with other users). | ||
| Version 24.10.0 | ||
| =============== | ||
| * Introduced an environment variable :envvar:`MYQUEUE_PREAMBLE`. | ||
| Its value will be inserted at the beginning of the script that is | ||
| submitted. See :ref:`preamble`. | ||
| * Number of GPUs per node can now be part of a resource specification. | ||
| See :ref:`resources` and :func:`myqueue.workflow.run`. | ||
| * Automatically adds BASH-completion line to ``$VIRTUAL_ENV/bin/activate`` | ||
| script. | ||
| Version 24.9.0 | ||
| ============== | ||
| * We now check that the task-folder is writable. | ||
| * Output from ``mq list`` is now shortened to fit the text-terminal width: | ||
| ``verylongword`` -> ``ver…ord``. | ||
| * Local scheduler can now run more than one task at a time. | ||
| * New ``nodename`` configuration variable: :ref:`nodes`. | ||
| * Setting ``nodename`` to the empty string will skip ``--partition=nodename`` | ||
| on SLURM. | ||
| Version 24.5.1 | ||
| ============== | ||
| * Fixed a problem with multinode jobs with 1 process | ||
| (:issue:`58`, :mr:`144`). | ||
| Version 24.5.0 | ||
| ============== | ||
| * SLURM jobs will now be submitted with ``--cpus-per-task`` set to the correct | ||
| value. | ||
| * The ``MYQUEUE_TASK_ID`` environment variable will now be set to | ||
| the task ID so that running tasks can inspect it. | ||
| * New configuration variable for :ref:`serial_python`. | ||
| Version 24.1.0 | ||
| ============== | ||
| * Drop support for Python 3.7. | ||
| * Move from ``setup.py`` to ``pyproject.toml``. | ||
| * The :ref:`resubmit <resubmit>` command will now remove the old task. | ||
| Use ``--keep`` to get the old behavior. | ||
| * Restarted (OOM'ed or timed out) tasks will now be cleared from the queue. | ||
| * Improved parsing or ``.err`` files. | ||
| Version 23.4.0 | ||
| ============== | ||
| * Fixed broken tab-completion for names and ids: ``mq ls -i <tab>`` | ||
| (:mr:`132`). | ||
| * Failed dependencies would block *everthing* in a workflow. Should be | ||
| fixed in :mr:`135`. | ||
| Version 23.1.0 | ||
| ============== | ||
| * Fixed a problem with dependencies inside subfolders (:issue:`51`). | ||
| Version 22.12.0 | ||
| =============== | ||
| * Added ``--extra-scheduler-args`` option to :ref:`submit <submit>` | ||
| and :ref:`resubmit <resubmit>` commands. | ||
| * Added ``special`` flag to node description (see :ref:`nodes`). | ||
| * Make sure old daemons from older versions stop running (:mr:`130`). | ||
| Version 22.11.3 | ||
| =============== | ||
| * Fixed dependency bug (:mr:`128`). | ||
| Version 22.11.2 | ||
| =============== | ||
| * Fix :issue:`48` and other regressions after move to :mod:`sqlite3`. | ||
| Version 22.11.1 | ||
| =============== | ||
| * Add missing ``weight`` argument to :func:`myqueue.workflow.run`. | ||
| Version 22.11.0 | ||
| =============== | ||
| .. important:: | ||
| No more ``<task-name>.state`` files. MyQueue will only know the state | ||
| af a task if it is listed in your queue. There are two exceptions to | ||
| this rule: | ||
| 1) If a task is set to create some files like here:: | ||
| def workflow(): | ||
| run(..., creates=['file1.abc', 'file2.xyz'], ...) | ||
| then MyQueue will consider the task done if those files exist. | ||
| See :func:`myqueue.workflow.run`. | ||
| 2) If a task is a Python function like here:: | ||
| def workflow(): | ||
| run(function=func, args=[...], name='abc', ...) | ||
| then MyQueue will consider the task done if the result file exists | ||
| (in this case ``abc.result``). See | ||
| :class:`myqueue.caching.json_cached_function`. | ||
| * Your queue is no longer stored in a ``.myqueue/queue.json`` file. Instead, | ||
| it is now in a :mod:`sqlite3` file in ``.myqueue/queue.sqlite3``. | ||
| Your old JSON file will automatically be migrated to the new format. | ||
| * Removed the *mq run* command (it may return later: :issue:`44`). | ||
| * Calling a Python function from a workflow (``run(function=...)``) | ||
| will now write the return value to a file called ``<task-name>.result`` | ||
| in the JSON format. Previously the return value was written to the | ||
| ``.state`` file. | ||
| * Removing tasks part of a workflow now needs a ``--force`` | ||
| (as MyQueue will no longer know the states of such tasks). | ||
| * Most commands have been sped up by delaying import of ``rich`` | ||
| and ``networkx``. | ||
| * The :ref:`resubmit <resubmit>` command will no longer remove the old task. | ||
| Use ``--remove`` to get the old behavior. | ||
| * The :ref:`resources` of a task now includes a *task-weight*. This can be | ||
| used to limit the number of running tasks. See more here: | ||
| :ref:`task_weight`. | ||
| Version 22.9.0 | ||
| ============== | ||
| * Hitting CTRL-C in the middle of submitting jobs is now safe. | ||
| Version 22.7.1 | ||
| ============== | ||
| * Fixed: :issue:`mq list does not work with ID specifier (-i) <42>`. | ||
| Version 22.7.0 | ||
| ============== | ||
| * Tasks will no longer activate a virtual environment if a ``venv/`` folder | ||
| is found in one of the parent folders. | ||
| * Tasks submitted from an activated virtual environment will now activate that | ||
| environment when the job starts running. | ||
| * Better error message when ``sbatch``/``qsub``/``bsub`` fails. | ||
| * Improved parsing of ``stderr`` from failed jobs. | ||
| * Depth first submit ordering. A workflow with an ``A`` task and a ``B`` | ||
| task where ``B`` depends on ``A`` would previously run all the ``A`` | ||
| tasks and then all the ``B`` tasks. The order will now be ``A``, ``B`` | ||
| in the first folder, then ``A``, ``B`` in the next folder and so on. | ||
| Version 22.6.0 | ||
| ============== | ||
| * Fixed bug related to several users having write access to the same | ||
| ``.myqueue/`` folder. | ||
| Version 22.3.0 | ||
| ============== | ||
| * There is now one background daemon per user. This will allow several users | ||
| to share a ``.myqueue/`` folder. | ||
| Version 22.1.0 | ||
| ============== | ||
| * The :ref:`list` command can now list several folders instead of, | ||
| as previously, only one. | ||
| They must all belong to the same ``.myqueue/`` folder though. | ||
| Version 21.8.0 | ||
| ============== | ||
| * The simple "local" scheduler is now feature complete. | ||
| See :ref:`scheduler`. | ||
| * The `mpi_implementations` configuration option is no longer needed and has | ||
| been deprecated. | ||
| * MyQueue no longer tries to keep track of all your ``.myqueue/`` folders. | ||
| Consequently, the ``--all`` option has been removed from the :ref:`list | ||
| <list>`, :ref:`kick <kick>` and :ref:`sync <sync>` commands. | ||
| * There is a new ``mq info --all [folder]`` command that will searsch for | ||
| your ``.myqueue/`` folders and print a status line for each. | ||
| * There is now one background daemon per ``.myqueue/`` folder. See | ||
| :ref:`daemon process`. | ||
| Version 21.7.0 | ||
| ============== | ||
| * Email notifications: ``mq modify ... -N dA``. See :ref:`modify` and | ||
| :ref:`notifications`. | ||
| * You can now use ``mq info`` to get information about your MyQueue | ||
| installation: | ||
| * version | ||
| * location of the source code | ||
| * location of ``.myqueue/`` folder | ||
| * configuration | ||
| Version 21.4.2 | ||
| ============== | ||
| * Make things work with Python 3.7. | ||
| Version 21.4.1 | ||
| ============== | ||
| * Backwards compatibility fix. | ||
| Version 21.4.0 | ||
| ============== | ||
| * For workflow tasks, ``name.done`` and ``name.FAILED`` files have now been | ||
| replaced by a ``name.state`` file. MyQueue will still read the old files, | ||
| but no longer write them. | ||
| Version 21.2.0 | ||
| ============== | ||
| * PRELIMINARY: New way to specify workflows using :func:`myqueue.workflow.run`, | ||
| :func:`myqueue.workflow.wrap` and :func:`myqueue.workflow.resources`. | ||
| See :ref:`workflow script`. | ||
| Version 21.1.0 | ||
| ============== | ||
| * New :ref:`config command <config>` for guessing your configuration. | ||
| See :ref:`autoconfig`. | ||
| * LSF-backend fixes. | ||
| Version 20.11.3 | ||
| =============== | ||
| * Bugfix: LSF-backend fixes. | ||
| Version 20.11.2 | ||
| =============== | ||
| * Bugfix: Don't remove FAILED-files in dry-run mode. | ||
| Version 20.11.1 | ||
| =============== | ||
| * Fix "workflow target" bug and ``MQ:`` comments bug. | ||
| Version 20.11.0 | ||
| =============== | ||
| * New ``mq workflow ... --arguments "key=val,..."`` option. See | ||
| :ref:`workflow`. | ||
| * Two new columns in :ref:`list output <list>`: *arguments* and *info*. | ||
| Can be hidden with: ``mq ls -c aI-``. | ||
| * Deprecated ``venv/activate`` script. Use ``venv/bin/activate`` instead. | ||
| See :ref:`venv`. | ||
| * Resources can now be specified in the scripts as special comments:: | ||
| # MQ: resources=24:2h | ||
| Version 20.9.1 | ||
| ============== | ||
| * Fix workflow+openmpi issue. | ||
| Version 20.9.0 | ||
| ============== | ||
| * Red error messages. | ||
| * Progress-bar. | ||
| Version 20.5.0 | ||
| ============== | ||
| * Using pytest_ for testing. | ||
| * Simple *local* queue for use without a real scheduler. | ||
| * New ``extra_args`` configuration parameter (:ref:`extra_args`). | ||
| Replaces, now deprecated, ``features`` and ``reservation`` parameters. | ||
| * Use ``python3 -m myqueue.config`` to auto-configure your system. | ||
| * Memory usage is now logged. | ||
| .. _pytest: https://docs.pytest.org/en/latest/ | ||
| Version 20.1.2 | ||
| ============== | ||
| * Bug-fix release with fix for single-process tasks (see :ref:`resources`). | ||
| Version 20.1.1 | ||
| ============== | ||
| * This is the version submitted to JOSS. | ||
| Version 20.1.0 | ||
| ============== | ||
| * New shortcuts introduced for specifying :ref:`states`: ``a`` is ``qhrd`` | ||
| and ``A`` is ``FCMT``. | ||
| Version 19.11.1 | ||
| =============== | ||
| * New command: :ref:`daemon`. | ||
| Version 19.11.0 | ||
| =============== | ||
| * Small bugfixes and improvements. | ||
| Version 19.10.1 | ||
| =============== | ||
| * Added support for LSF scheduler. | ||
| * Added ``--max-tasks`` option for *submit* and *workflow* commands. | ||
| Version 19.10.0 | ||
| =============== | ||
| * Shell-style wildcard matching of task names and error messages | ||
| is now possible:: | ||
| $ mq ls -n "*abc-??.py" | ||
| $ mq resubmit -s F -e "*ZeroDivision*" | ||
| * Three new :ref:`cli` options: ``mq -V/--version``, ``mq ls --not-recursive`` | ||
| and ``mq submit/workflow -f/--force``. | ||
| * All task-events (queued, running, stopped) are now logged to | ||
| ``~/.myqueue/log.csv``. List tasks from log-file with:: | ||
| $ mq ls -L ... | ||
| Version 19.9.0 | ||
| ============== | ||
| * New ``-C`` option for the :ref:`mq ls <list>` command for showing only the | ||
| count of tasks in the queue:: | ||
| $ mq ls -C | ||
| running: 12, queued: 3, FAILED: 1, total: 16 | ||
| * A background process will now automatically :ref:`kick <kick>` | ||
| your queues every ten minutes. | ||
| * Project moved to a new *myqueue* group: https://gitlab.com/myqueue/myqueue/ | ||
| Version 19.8.0 | ||
| ============== | ||
| * The ``module:function`` syntax has been changed to ``module@function``. | ||
| * Arguments to tasks are now specified like this:: | ||
| $ mq submit [options] "<task> arg1 arg2 ..." [folder1 [folder2 ...]] | ||
| * New ``run`` command:: | ||
| $ mq run [options] "<task> arg1 arg2 ..." [folder1 [folder2 ...]] | ||
| Version 19.6.0 | ||
| ============== | ||
| * Tasks will now activate a virtual environment if a ``venv/`` folder is found | ||
| in one of the parent folders. The activation script will be ``venv/activate`` | ||
| or ``venv/bin/activate`` if ``venv/activate`` does not exist. | ||
| Version 19.5.0 | ||
| ============== | ||
| * New ``--target`` option for :ref:`workflows <workflows>`. | ||
| * New API's for submitting jobs: :meth:`myqueue.task.Task.submit` and | ||
| :func:`myqueue.submit`. | ||
| * New ``--name`` option for the :ref:`submit <submit>` command. | ||
| * No more ``--arguments`` option. Use:: | ||
| $ mq submit [options] <task> [folder1 [folder2 ...]] -- arg1 arg2 ... | ||
| Version 19.2.0 | ||
| ============== | ||
| * Fix test-suite. | ||
| Version 19.1.0 | ||
| ============== | ||
| * Recognizes mpiexex variant automatically. | ||
| * New "detailed information" subcommand. | ||
| Version 18.12.0 | ||
| =============== | ||
| * The ``restart`` parameter is now an integer (number of restarts) that | ||
| counts down to zero. Avoids infinite loop. | ||
| Version 0.1.0 | ||
| ============= | ||
| Initial release. |
| sphinx>3.0 | ||
| sphinx-rtd-theme |
| .. _workflows: | ||
| ========= | ||
| Workflows | ||
| ========= | ||
| The :ref:`workflow <workflow>` subcommand combined with a :ref:`workflow | ||
| script` can be used to run sequences of tasks in several folders. The | ||
| script describes the tasks, their requirements and dependencies. | ||
| Example from real life: | ||
| * Workflow for testing a `GPAW exercise | ||
| <https://gpaw.readthedocs.io/summerschools/summerschool24/ | ||
| catalysis/catalysis.html>`__: | ||
| `agts.py <https://gitlab.com/gpaw/gpaw/-/blob/master/doc/summerschools/ | ||
| summerschool24/catalysis/agts.py>`__ | ||
| Simple example | ||
| ============== | ||
| We want to factor some integers into primes. We want to do two tasks: factor | ||
| the integer and check if the number was a prime number. | ||
| :download:`prime/factor.py`: | ||
| .. literalinclude:: prime/factor.py | ||
| :download:`prime/check.py`: | ||
| .. literalinclude:: prime/check.py | ||
| Our :ref:`workflow script` will create two tasks using the | ||
| :func:`myqueue.workflow.run` function and the :func:`myqueue.workflow.resources` | ||
| decorator. | ||
| :download:`prime/workflow.py`: | ||
| .. literalinclude:: prime/workflow.py | ||
| .. highlight:: bash | ||
| We put the three Python files in a ``prime/`` folder:: | ||
| $ ls -l prime/ | ||
| total 12 | ||
| -rw-rw-r-- 1 jensj jensj 190 Oct 28 10:46 check.py | ||
| -rw-rw-r-- 1 jensj jensj 398 Oct 28 10:46 factor.py | ||
| -rw-rw-r-- 1 jensj jensj 164 Oct 28 10:46 workflow.py | ||
| Make sure Python can find the files by adding this line:: | ||
| export PYTHONPATH=~/path/to/prime/:$PYTHONPATH | ||
| to your ``~/.bash_profile`` file. | ||
| Create some folders:: | ||
| $ mkdir numbers | ||
| $ cd numbers | ||
| $ mkdir 99 1001 8069 36791 98769 100007 | ||
| and start the workflow in one of the folders:: | ||
| $ mq workflow ../prime/workflow.py 1001/ --dry-run | ||
| Scanning folders: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 | ||
| new : 2 | ||
| Submitting tasks: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2/2 | ||
| 1 ./1001/ prime.factor 1:2s | ||
| 1 ./1001/ prime.check d1 1:2s | ||
| 2 tasks to submit | ||
| $ mq workflow ../prime/workflow.py 1001/ | ||
| Scanning folders: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1/1 | ||
| new : 2 | ||
| Submitting tasks: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2/2 | ||
| 1 ./1001/ prime.factor 1:2s | ||
| 2 ./1001/ prime.check d1 1:2s | ||
| 2 tasks submitted | ||
| $ sleep 2 | ||
| and now in all subfolders:: | ||
| $ mq ls | ||
| id folder name info res. age state time | ||
| ── ─────── ──────────── ──── ──── ──── ───── ──── | ||
| 1 ./1001/ prime.factor 1:2s 0:02 done 0:00 | ||
| 2 ./1001/ prime.check d1 1:2s 0:02 done 0:00 | ||
| ── ─────── ──────────── ──── ──── ──── ───── ──── | ||
| done: 2, total: 2 | ||
| $ mq workflow ../prime/workflow.py */ | ||
| Scanning folders: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6/6 | ||
| new : 10 | ||
| done : 2 | ||
| Submitting tasks: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 10/10 | ||
| 3 ./100007/ prime.factor 1:2s | ||
| 4 ./100007/ prime.check d1 1:2s | ||
| 5 ./36791/ prime.factor 1:2s | ||
| 6 ./36791/ prime.check d1 1:2s | ||
| 7 ./8069/ prime.factor 1:2s | ||
| 8 ./8069/ prime.check d1 1:2s | ||
| 9 ./98769/ prime.factor 1:2s | ||
| 10 ./98769/ prime.check d1 1:2s | ||
| 11 ./99/ prime.factor 1:2s | ||
| 12 ./99/ prime.check d1 1:2s | ||
| 10 tasks submitted | ||
| :: | ||
| $ sleep 2 # wait for tasks to finish | ||
| $ mq ls | ||
| id folder name info res. age state time | ||
| ── ───────── ──────────── ──── ──── ──── ───── ──── | ||
| 1 ./1001/ prime.factor 1:2s 0:04 done 0:00 | ||
| 2 ./1001/ prime.check d1 1:2s 0:04 done 0:00 | ||
| 3 ./100007/ prime.factor 1:2s 0:02 done 0:00 | ||
| 4 ./100007/ prime.check d1 1:2s 0:02 done 0:00 | ||
| 5 ./36791/ prime.factor 1:2s 0:02 done 0:00 | ||
| 6 ./36791/ prime.check d1 1:2s 0:02 done 0:00 | ||
| 7 ./8069/ prime.factor 1:2s 0:02 done 0:00 | ||
| 8 ./8069/ prime.check d1 1:2s 0:02 done 0:00 | ||
| 9 ./98769/ prime.factor 1:2s 0:02 done 0:00 | ||
| 10 ./98769/ prime.check d1 1:2s 0:02 done 0:00 | ||
| 11 ./99/ prime.factor 1:2s 0:02 done 0:00 | ||
| 12 ./99/ prime.check d1 1:2s 0:02 done 0:00 | ||
| ── ───────── ──────────── ──── ──── ──── ───── ──── | ||
| done: 12, total: 12 | ||
| Note that ``prime.check.done`` and ``prime.factor.done`` files are created | ||
| to mark that these tasks have been completed:: | ||
| $ ls -l 1001/ | ||
| total 4 | ||
| -rw-rw-r-- 1 jensj jensj 24 Oct 28 10:46 factors.json | ||
| -rw-rw-r-- 1 jensj jensj 0 Oct 28 10:46 prime.check.2.err | ||
| -rw-rw-r-- 1 jensj jensj 0 Oct 28 10:46 prime.check.2.out | ||
| -rw-rw-r-- 1 jensj jensj 0 Oct 28 10:46 prime.factor.1.err | ||
| -rw-rw-r-- 1 jensj jensj 0 Oct 28 10:46 prime.factor.1.out | ||
| Now, add another number:: | ||
| $ mkdir 42 | ||
| $ mq workflow ../prime/workflow.py */ | ||
| Scanning folders: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 7/7 | ||
| done : 12 | ||
| new : 2 | ||
| Submitting tasks: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2/2 | ||
| 13 ./42/ prime.factor 1:2s | ||
| 14 ./42/ prime.check d1 1:2s | ||
| 2 tasks submitted | ||
| Turns out, there were two prime numbers:: | ||
| $ sleep 2 | ||
| $ grep factors */factors.json | ||
| 100007/factors.json:{"factors": [97, 1031]} | ||
| 1001/factors.json:{"factors": [7, 11, 13]} | ||
| 36791/factors.json:{"factors": [36791]} | ||
| 42/factors.json:{"factors": [2, 3, 7]} | ||
| 8069/factors.json:{"factors": [8069]} | ||
| 98769/factors.json:{"factors": [3, 11, 41, 73]} | ||
| 99/factors.json:{"factors": [3, 3, 11]} | ||
| $ ls */PRIME | ||
| 36791/PRIME | ||
| 8069/PRIME | ||
| Handling many tasks | ||
| ------------------- | ||
| In the case where you have a workflow script with many tasks combined with | ||
| many folders, it can happen that ``mq workflow ... */`` will try to submit | ||
| more tasks than allowed by the scheduler. In that case, you will have to | ||
| submit the tasks in batches. Say you have 300 tasks from 150 folders:: | ||
| $ mq workflow ../prime/workflow.py */ --max-tasks=200 | ||
| Scanning folders: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 150/150 | ||
| new : 200 | ||
| Submitting tasks: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 200/200 | ||
| ... | ||
| $ # wait ten days ... | ||
| $ mq workflow ../prime/workflow.py */ --max-tasks=200 | ||
| Scanning folders: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 150/150 | ||
| new : 100 | ||
| done : 200 | ||
| Submitting tasks: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100/100 | ||
| ... | ||
| .. _workflow script: | ||
| Workflow script | ||
| =============== | ||
| A workflow script must contain a function: | ||
| .. function:: workflow() -> None | ||
| .. highlight:: python | ||
| The :func:`workflow` function should call the :func:`myqueue.workflow.run` | ||
| function for each task in the workflow. Here is an example (``flow.py``):: | ||
| from myqueue.workflow import run | ||
| from somewhere import postprocess | ||
| def workflow(): | ||
| r1 = run(script='task1.py') | ||
| r2 = run(script='task2.py', cores=8, tmax='2h') | ||
| run(function=postprocess, deps=[r1, r2]) | ||
| where ``task1.py`` and ``task2.py`` are Python scripts and ``postprocess`` is | ||
| a Python function. Calling the :func:`workflow` function directly will run | ||
| the ``task1.py`` script, then the ``task2.py`` script and finally the | ||
| ``postprocess`` function. If instead, the :func:`workflow` function is | ||
| passed to the ``mq workflow flow.py`` command, then the :func:`run` | ||
| function will not actually run the tasks, but instead collect them with | ||
| dependencies and submit them. | ||
| Here is an alternative way to specify the dependencies of the ``postprocess`` | ||
| step (see more :ref:`below <dependencies>`):: | ||
| def workflow(): | ||
| r1 = run(script='task1.py') | ||
| r2 = run(script='task2.py', cores=8, tmax='2h') | ||
| with r1, r2: | ||
| run(function=postprocess) | ||
| .. autofunction:: myqueue.workflow.run | ||
| .. autoclass:: myqueue.workflow.RunHandle | ||
| :members: | ||
| Resources | ||
| --------- | ||
| Resources for a task are set using the keywords: | ||
| ``cores``, ``tmax``, ``processes``, ``nodename`` and ``repeats``. | ||
| .. seealso:: | ||
| :ref:`resources`. | ||
| Here are three equivalent ways to set the ``cores`` resource:: | ||
| def workflow(): | ||
| run(..., cores=24) # as an argument to run() | ||
| def workflow(): | ||
| with resources(cores=24): # via a context manager | ||
| run(...) | ||
| @resources(cores=24) # with a decorator | ||
| def workflow(): | ||
| run(...) | ||
| .. autofunction:: myqueue.workflow.resources | ||
| Functions | ||
| --------- | ||
| A task that calls a Python function will cache its | ||
| result by writing the return value as JSON to a file. MyQueue does this | ||
| using the :func:`~myqueue.caching.json_cached_function` function: | ||
| .. autofunction:: myqueue.caching.json_cached_function | ||
| Helper wrapper for working with functions: | ||
| .. autofunction:: myqueue.workflow.wrap | ||
| Return values that can be written to a JSON file include everything that | ||
| the Python standard library :mod:`json` module supports and in addition also | ||
| the following types: | ||
| * :class:`numpy.ndarray` | ||
| * :class:`datetime.datetime` | ||
| * :class:`complex` | ||
| * :class:`pathlib.Path` | ||
| .. autoclass:: myqueue.caching.Encoder | ||
| .. autofunction:: myqueue.caching.object_hook | ||
| .. autofunction:: myqueue.caching.decode | ||
| .. _dependencies: | ||
| Dependencies | ||
| ------------ | ||
| Suppose we have two tasks and we want ``<task-2>`` to start after ``<task-1>``. | ||
| We can specify the dependency explicitly like this:: | ||
| def workflow(): | ||
| run1 = run(<task-1>) | ||
| run(<task-2>, deps=[run1]) | ||
| or like this using a context manager:: | ||
| def workflow(): | ||
| with run(<task-1>): | ||
| run(<task-2>) | ||
| If our tasks are functions then MyQueue can figure out the dependencies | ||
| without specifying them explicitly or using `with` statements. | ||
| Say we have the following two functions:: | ||
| def f1(): | ||
| return 2 + 2 | ||
| def f2(x): | ||
| print(x) | ||
| and we want to call ``f2`` with the result of ``f1``. Given this | ||
| workflow script:: | ||
| def workflow(): | ||
| run1 = run(function=f1) | ||
| run(function=f2, args=[run1.result]) | ||
| MyQueue will know that the ``f2`` task depends on the ``f1`` task. | ||
| Here is a shorter version using the :func:`~myqueue.workflow.wrap` | ||
| function:: | ||
| def workflow(): | ||
| x = wrap(f1)() | ||
| wrap(f2)(x) | ||
| Workflows with if-statements | ||
| ============================ | ||
| Some workflows may take different directions depending on the result of the | ||
| first part of the workflow. Continuing with out ``f1`` and ``f2`` functions, | ||
| we may only want to call ``f2`` if the result of ``f1`` is lees than five:: | ||
| def workflow(): | ||
| run1 = run(function=f1) | ||
| if run1.result < 5: | ||
| run(function=f2, args=[run1.result]) | ||
| MyQueue will know that ``run1.result < 5`` can't be decided before the first | ||
| task has been run and it will therfore only submit one task. Running ``mq | ||
| workflow ...`` a second time after the first task has finished will submit | ||
| the second task. Here is an equivalent script using functions:: | ||
| def workflow(): | ||
| x = wrap(f1)() | ||
| if x < 5: | ||
| wrap(f2)(x) | ||
| The :class:`~myqueue.workflow.RunHandle` object also has a ``done`` attribute | ||
| that can be used to break up the workflow:: | ||
| def workflow(): | ||
| run1 = run(<task-1>) | ||
| if run1.done: | ||
| something = read_result_of_task1_from file() | ||
| if ... something ...: | ||
| run(<task-2>) | ||
| Old workflow script | ||
| =================== | ||
| .. warning:: | ||
| Please use a new-style :ref:`workflow script`! | ||
| Old-style workflow scripts contain a function: | ||
| .. function:: create_tasks() -> List[myqueue.task.Task] | ||
| .. highlight:: python | ||
| It should return a list of :class:`myqueue.task.Task` objects created with the | ||
| :func:`myqueue.task.task` helper function. Here is an example:: | ||
| from myqueue.task import task | ||
| def create_tasks(): | ||
| t1 = task('<task-1>', resources=...) | ||
| t2 = task('<task-2>', resources=...) | ||
| t3 = task('<task-3>', resources=..., | ||
| deps=['<task-1>', '<task-2>']) | ||
| return [t1, t2, t3] | ||
| where ``<task-n>`` is the name of a task. See :ref:`task examples` below. | ||
| .. _task examples: | ||
| Examples | ||
| -------- | ||
| .. seealso:: | ||
| :ref:`tasks` and :ref:`resources`. | ||
| Two equivalent ways to set the resources:: | ||
| task('prime.factor', resources='8:1h') | ||
| task('prime.factor', cores=8, tmax='1h') | ||
| Given these two tasks:: | ||
| t1 = task('mod@f1') | ||
| t2 = task('mod@f2') | ||
| here are three equivalent ways to set dependencies:: | ||
| t3 = task('mod@f3', deps=[t1, t2]) | ||
| t3 = task('mod@f3', deps=['mod@f1', 'mod@f2']) | ||
| t3 = task('mod@f3', deps='mod@f1,mod@f2') | ||
| Arguments in three equivalent ways:: | ||
| task('math@sin+3.14') | ||
| task('math@sin', args=[3.14]) | ||
| task('math@sin', args=['3.14']) | ||
| More than one argument:: | ||
| task('math@gcd+42_117') | ||
| task('math@gcd', args=[42, 117]') | ||
| same as: | ||
| >>> import math | ||
| >>> math.gcd(42, 117) | ||
| 3 |
-7
| [mypy] | ||
| check_untyped_defs = True | ||
| disallow_untyped_defs = True | ||
| implicit_optional = True | ||
| packages = myqueue | ||
| [mypy-myqueue.test.*] | ||
| disallow_untyped_defs = False |
| {"git_version": "ceb90d6", "is_release": false} |
| [pytest] | ||
| testpaths = myqueue/ | ||
| addopts = --doctest-modules | ||
| markers = | ||
| docs: Test examples in docs |
Alert delta unavailable
Currently unable to show alert delta for PyPI packages.
354894
-22.89%75
-29.91%6073
-2.66%