Browse Source

Merge branch 'release-0.0.1'

Arkadiusz Ryś 2 years ago
parent
commit
a00ceb67ae
16 changed files with 467 additions and 197 deletions
  1. 10 0
      .dockerignore
  2. 18 0
      .editorconfig
  3. 7 0
      .gitignore
  4. 71 0
      .gitlab-ci.yml
  5. 13 0
      AUTHORS.rst
  6. 10 0
      Dockerfile
  7. 11 0
      HISTORY.rst
  8. 22 0
      LICENSE
  9. 0 92
      README.md
  10. 15 0
      README.rst
  11. 46 0
      docs/templates/pyproject.toml
  12. 49 0
      pyproject.toml
  13. 20 0
      requirements.txt
  14. 3 0
      spendpoint/__init__.py
  15. 99 105
      spendpoint/__main__.py
  16. 73 0
      tasks.py

+ 10 - 0
.dockerignore

@@ -0,0 +1,10 @@
+__pycache__/
+*$py.class
+/.git
+/.idea
+/docs
+/data
+/.dockerignore
+/.gitignore
+/.gitlab-ci.yml
+/README.md

+ 18 - 0
.editorconfig

@@ -0,0 +1,18 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.{css, html, yml, yaml, js, xml}]
+indent_size = 2
+
+[{*.log, LICENSE}]
+insert_final_newline = false
+
+[*.rst]
+indent_size = 3

+ 7 - 0
.gitignore

@@ -0,0 +1,7 @@
+__pycache__/
+venv/
+build/
+dist/
+*.egg-info
+
+*.log

+ 71 - 0
.gitlab-ci.yml

@@ -0,0 +1,71 @@
+image: docker:20.10.22
+
+variables:
+  DOCKER_DRIVER: overlay2
+  DOCKER_TLS_CERTDIR: "/certs"
+  DOCKER_HOST: tcp://docker:2376
+  # Where to publish this build's tagged working container.
+  DOCKER_SHA: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
+  DOCKER_BRANCH: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME
+  DOCKER_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
+  DOCKER_LATEST: $CI_REGISTRY_IMAGE:latest
+
+services:
+  - docker:20.10.22-dind
+
+stages:
+  - build
+  - test
+  - release
+
+before_script:
+  - docker info
+  - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
+
+build-upload:
+  image: python:3.11
+  stage: build
+  before_script:
+    - python -V
+    - echo "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi"
+  script:
+    - pip install build twine flit
+    - FLIT_INDEX_URL=${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi flit build
+    - TWINE_PASSWORD=${CI_JOB_TOKEN} TWINE_USERNAME=gitlab-ci-token python -m twine upload --repository-url ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi dist/*
+  only:
+    - tags
+
+build-upload-pypi:
+  image: python:3.11
+  stage: build
+  before_script:
+    - python -V
+    - echo "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi"
+  script:
+    - pip install build twine flit
+    - flit build
+    - python -m twine upload dist/*
+  only:
+    - tags
+
+build_docker:
+  stage: build
+  script:
+    - docker pull $DOCKER_BRANCH || true
+    - docker build --cache-from $DOCKER_BRANCH -f ./Dockerfile --tag $DOCKER_BRANCH .
+    - docker push $DOCKER_BRANCH
+  only:
+    refs:
+      - master
+      - tags
+
+build_tagged_docker:
+  stage: build
+  script:
+    - docker pull $DOCKER_BRANCH || true
+    - docker tag $DOCKER_BRANCH $DOCKER_LATEST
+    - docker push $DOCKER_LATEST
+    - docker tag $DOCKER_BRANCH $DOCKER_TAG
+    - docker push $DOCKER_TAG
+  only:
+    - tags

+ 13 - 0
AUTHORS.rst

@@ -0,0 +1,13 @@
+=======
+Credits
+=======
+
+Development Lead
+----------------
+
+* Arkadiusz Michał Ryś <Arkadiusz.Michal.Rys@gmail.com>
+
+Contributors
+------------
+
+None yet. Why not be the first?

+ 10 - 0
Dockerfile

@@ -0,0 +1,10 @@
+ARG PYTHON_VERSION=3.11-slim-bullseye
+FROM python:${PYTHON_VERSION}
+ARG APP_HOME=/app
+ENV PYTHONUNBUFFERED 1
+ENV PYTHONDONTWRITEBYTECODE 1
+WORKDIR ${APP_HOME}
+COPY ./requirements.txt ${APP_HOME}/requirements.txt
+RUN pip install --no-cache-dir --upgrade -r requirements.txt
+COPY . ${APP_HOME}
+CMD ["uvicorn", "spendpoint.main:app", "--host", "0.0.0.0", "--port", "80", "--proxy-headers"]

+ 11 - 0
HISTORY.rst

@@ -0,0 +1,11 @@
+=======
+History
+=======
+
+0.0.1 (2023-02-28)
+------------------
+* Release example.
+
+0.0.0 (yyyy-mm-dd)
+------------------
+* No history yet.

+ 22 - 0
LICENSE

@@ -0,0 +1,22 @@
+MIT License
+
+Copyright (c) 2023, Arkadiusz Michał Ryś
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+

File diff suppressed because it is too large
+ 0 - 92
README.md


+ 15 - 0
README.rst

@@ -0,0 +1,15 @@
+SpEndPoint
+##########
+
+Installation
+------------
+
+.. code-block:: shell
+
+   pip install spendpoint
+
+or
+
+.. code-block:: shell
+
+   pip install --index-url https://pip:glpat-m8mNfhxZAUnWvy7rLS1x@git.rys.one/api/v4/projects/262/packages/pypi/simple --no-deps spendpoint

+ 46 - 0
docs/templates/pyproject.toml

@@ -0,0 +1,46 @@
+[build-system]
+requires = ["flit_core >=3.2,<4"]
+build-backend = "flit_core.buildapi"
+
+[project]
+name = "spendpoint"
+authors = [
+    {name = "Arkadiusz Michał Ryś", email = "Arkadiusz.Michal.Rys@gmail.com"},
+]
+readme = "README.rst"
+requires-python = ">=3.9"
+classifiers = [
+    "License :: OSI Approved :: MIT License",
+    "Programming Language :: Python :: 3",
+    "Development Status :: 2 - Pre-Alpha",
+    "Intended Audience :: Developers",
+    "Natural Language :: English",
+]
+dynamic = ["version", "description"]
+license = {file = "LICENSE"}
+keywords = ["spendpoint"]
+dependencies = [
+{%- for dependency in requirements.spendpoint %}
+    "{{ dependency }}",
+{%- endfor %}
+]
+
+[project.optional-dependencies]
+test = [
+{%- for dependency in requirements.test %}
+    "{{ dependency }}",
+{%- endfor %}
+]
+doc = [
+{%- for dependency in requirements.doc %}
+    "{{ dependency }}",
+{%- endfor %}
+]
+dev = [
+{%- for dependency in requirements.dev %}
+    "{{ dependency }}",
+{%- endfor %}
+]
+
+[project.urls]
+source = "https://git.rys.one/dtdesign/spendpoint"

+ 49 - 0
pyproject.toml

@@ -0,0 +1,49 @@
+[build-system]
+requires = ["flit_core >=3.2,<4"]
+build-backend = "flit_core.buildapi"
+
+[project]
+name = "spendpoint"
+authors = [
+    {name = "Arkadiusz Michał Ryś", email = "Arkadiusz.Michal.Rys@gmail.com"},
+]
+readme = "README.rst"
+requires-python = ">=3.9"
+classifiers = [
+    "License :: OSI Approved :: MIT License",
+    "Programming Language :: Python :: 3",
+    "Development Status :: 2 - Pre-Alpha",
+    "Intended Audience :: Developers",
+    "Natural Language :: English",
+]
+dynamic = ["version", "description"]
+license = {file = "LICENSE"}
+keywords = ["spendpoint"]
+dependencies = [
+    "fastapi~=0.92",
+    "starlette~=0.25.0",
+    "python-magic~=0.4.27",
+    "rdflib-endpoint~=0.2.7",
+    "uvicorn[standard]~=0.20.0",
+]
+
+[project.optional-dependencies]
+test = [
+    "pytest~=7.2.1",
+]
+doc = [
+    "sphinx~=6.1.3",
+]
+dev = [
+    "tox~=4.4.6",
+    "pip~=23.0.1",
+    "flit~=3.8.0",
+    "twine~=4.0.2",
+    "invoke~=2.0.0",
+    "jinja2~=3.1.2",
+    "flake8~=6.0.0",
+    "coverage~=7.2.1",
+]
+
+[project.urls]
+source = "https://git.rys.one/dtdesign/spendpoint"

+ 20 - 0
requirements.txt

@@ -0,0 +1,20 @@
+# SpEndPoint
+rdflib            ~= 6.2.0
+fastapi           ~= 0.92
+starlette         ~= 0.25.0
+python-magic      ~= 0.4.27
+rdflib-endpoint   ~= 0.2.7
+uvicorn[standard] ~= 0.20.0
+# Test
+pytest ~= 7.2.1
+# Doc
+sphinx ~= 6.1.3
+# Dev
+tox      ~= 4.4.6
+pip      ~= 23.0.1
+flit     ~= 3.8.0
+twine    ~= 4.0.2
+invoke   ~= 2.0.0
+jinja2   ~= 3.1.2
+flake8   ~= 6.0.0
+coverage ~= 7.2.1

+ 3 - 0
spendpoint/__init__.py

@@ -0,0 +1,3 @@
+"""SPARQL endpoint for ontologies."""
+__version__ = "0.0.1"
+__version_info__ = tuple((int(num) if num.isdigit() else num for num in __version__.replace("-", ".", 1).split(".")))

+ 99 - 105
spendpoint/__main__.py

@@ -1,105 +1,99 @@
-import rdflib
-from rdflib import RDF, RDFS, ConjunctiveGraph, Literal, URIRef
-from rdflib.plugins.sparql.evalutils import _eval
-
-from rdflib_endpoint import SparqlEndpoint
-
-
-def custom_concat(query_results, ctx, part, eval_part):
-    """
-    Concat 2 string and return the length as additional Length variable
-    \f
-    :param query_results:   An array with the query results objects
-    :param ctx:             <class 'rdflib.plugins.sparql.sparql.QueryContext'>
-    :param part:            Part of the query processed (e.g. Extend or BGP) <class 'rdflib.plugins.sparql.parserutils.CompValue'>
-    :param eval_part:       Part currently evaluated
-    :return:                the same query_results provided in input param, with additional results
-    """
-    argument1 = str(_eval(part.expr.expr[0], eval_part.forget(ctx, _except=part.expr._vars)))
-    argument2 = str(_eval(part.expr.expr[1], eval_part.forget(ctx, _except=part.expr._vars)))
-    evaluation = []
-    scores = []
-    concat_string = argument1 + argument2
-    reverse_string = argument2 + argument1
-    # Append the concatenated string to the results
-    evaluation.append(concat_string)
-    evaluation.append(reverse_string)
-    # Append the scores for each row of results
-    scores.append(len(concat_string))
-    scores.append(len(reverse_string))
-    # Append our results to the query_results
-    for i, result in enumerate(evaluation):
-        query_results.append(
-            eval_part.merge({part.var: Literal(result), rdflib.term.Variable(part.var + "Length"): Literal(scores[i])})
-        )
-    return query_results, ctx, part, eval_part
-
-
-def most_similar(query_results, ctx, part, eval_part):
-    """
-    Get most similar entities for a given entity
-
-    PREFIX openpredict: <https://w3id.org/um/openpredict/>
-    SELECT ?drugOrDisease ?mostSimilar ?mostSimilarScore WHERE {
-        BIND("OMIM:246300" AS ?drugOrDisease)
-        BIND(openpredict:most_similar(?drugOrDisease) AS ?mostSimilar)
-    """
-    # argumentEntity = str(_eval(part.expr.expr[0], eval_part.forget(ctx, _except=part.expr._vars)))
-    # try:
-    #     argumentLimit = str(_eval(part.expr.expr[1], eval_part.forget(ctx, _except=part.expr._vars)))
-    # except:
-    #     argumentLimit = None
-
-    # Using stub data
-    similarity_results = [{"mostSimilar": "DRUGBANK:DB00001", "score": 0.42}]
-
-    evaluation = []
-    scores = []
-    for most_similar in similarity_results:
-        evaluation.append(most_similar["mostSimilar"])
-        scores.append(most_similar["score"])
-
-    # Append our results to the query_results
-    for i, result in enumerate(evaluation):
-        query_results.append(
-            eval_part.merge({part.var: Literal(result), rdflib.term.Variable(part.var + "Score"): Literal(scores[i])})
-        )
-    return query_results, ctx, part, eval_part
-
-
-example_query = """PREFIX myfunctions: <https://w3id.org/um/sparql-functions/>
-SELECT ?concat ?concatLength WHERE {
-    BIND("First" AS ?first)
-    BIND(myfunctions:custom_concat(?first, "last") AS ?concat)
-}"""
-
-# Use ConjunctiveGraph to support nquads and graphs in SPARQL queries
-# identifier is the default graph
-g = ConjunctiveGraph(
-    # store="Oxigraph",
-    identifier=URIRef("https://w3id.org/um/sparql-functions/graph/default"),
-)
-
-# Example to add a nquad to the exposed graph
-g.add((URIRef("http://subject"), RDF.type, URIRef("http://object"), URIRef("http://graph")))
-g.add((URIRef("http://subject"), RDFS.label, Literal("foo"), URIRef("http://graph")))
-
-# Start the SPARQL endpoint based on the RDFLib Graph
-app = SparqlEndpoint(
-    graph=g,
-    functions={
-        "https://w3id.org/um/openpredict/most_similar": most_similar,
-        "https://w3id.org/um/sparql-functions/custom_concat": custom_concat,
-    },
-    title="SPARQL endpoint for RDFLib graph",
-    description="A SPARQL endpoint to serve machine learning models, or any other logic implemented in Python. \n[Source code](https://github.com/vemonet/rdflib-endpoint)",
-    version="0.1.0",
-    public_url="https://service.openpredict.137.120.31.102.nip.io/sparql",
-    cors_enabled=True,
-    example_query=example_query,
-)
-
-## Uncomment to run it directly with python app/main.py
-# if __name__ == "__main__":
-#     import uvicorn
-#     uvicorn.run(app, host="0.0.0.0", port=8000)
+import rdflib
+from rdflib import RDF, RDFS, ConjunctiveGraph, Literal, URIRef
+from rdflib.plugins.sparql.evalutils import _eval
+
+from rdflib_endpoint import SparqlEndpoint
+
+
+def custom_concat(query_results, ctx, part, eval_part):
+    """
+    Concat 2 string and return the length as additional Length variable
+    \f
+    :param query_results:   An array with the query results objects
+    :param ctx:             <class 'rdflib.plugins.sparql.sparql.QueryContext'>
+    :param part:            Part of the query processed (e.g. Extend or BGP) <class 'rdflib.plugins.sparql.parserutils.CompValue'>
+    :param eval_part:       Part currently evaluated
+    :return:                the same query_results provided in input param, with additional results
+    """
+    argument1 = str(_eval(part.expr.expr[0], eval_part.forget(ctx, _except=part.expr._vars)))
+    argument2 = str(_eval(part.expr.expr[1], eval_part.forget(ctx, _except=part.expr._vars)))
+    evaluation = []
+    scores = []
+    concat_string = argument1 + argument2
+    reverse_string = argument2 + argument1
+    # Append the concatenated string to the results
+    evaluation.append(concat_string)
+    evaluation.append(reverse_string)
+    # Append the scores for each row of results
+    scores.append(len(concat_string))
+    scores.append(len(reverse_string))
+    # Append our results to the query_results
+    for i, result in enumerate(evaluation):
+        query_results.append(
+            eval_part.merge({part.var: Literal(result), rdflib.term.Variable(part.var + "Length"): Literal(scores[i])})
+        )
+    return query_results, ctx, part, eval_part
+
+
+def most_similar(query_results, ctx, part, eval_part):
+    """
+    Get most similar entities for a given entity
+
+    PREFIX openpredict: <https://w3id.org/um/openpredict/>
+    SELECT ?drugOrDisease ?mostSimilar ?mostSimilarScore WHERE {
+        BIND("OMIM:246300" AS ?drugOrDisease)
+        BIND(openpredict:most_similar(?drugOrDisease) AS ?mostSimilar)
+    """
+    # argumentEntity = str(_eval(part.expr.expr[0], eval_part.forget(ctx, _except=part.expr._vars)))
+    # try:
+    #     argumentLimit = str(_eval(part.expr.expr[1], eval_part.forget(ctx, _except=part.expr._vars)))
+    # except:
+    #     argumentLimit = None
+
+    # Using stub data
+    similarity_results = [{"mostSimilar": "DRUGBANK:DB00001", "score": 0.42}]
+
+    evaluation = []
+    scores = []
+    for most_similar in similarity_results:
+        evaluation.append(most_similar["mostSimilar"])
+        scores.append(most_similar["score"])
+
+    # Append our results to the query_results
+    for i, result in enumerate(evaluation):
+        query_results.append(
+            eval_part.merge({part.var: Literal(result), rdflib.term.Variable(part.var + "Score"): Literal(scores[i])})
+        )
+    return query_results, ctx, part, eval_part
+
+
+example_query = """
+PREFIX myfunctions: <https://w3id.org/um/sparql-functions/>
+SELECT ?concat ?concatLength WHERE {
+    BIND("First" AS ?first)
+    BIND(myfunctions:custom_concat(?first, "last") AS ?concat)
+}
+"""
+
+g = ConjunctiveGraph(
+    identifier=URIRef("https://w3id.org/um/sparql-functions/graph/default"),
+)
+
+# Example to add a nquad to the exposed graph
+g.add((URIRef("http://subject"), RDF.type, URIRef("http://object"), URIRef("http://graph")))
+g.add((URIRef("http://subject"), RDFS.label, Literal("foo"), URIRef("http://graph")))
+
+# Start the SPARQL endpoint based on the RDFLib Graph
+app = SparqlEndpoint(
+    graph=g,
+    functions={
+        "https://w3id.org/um/openpredict/most_similar": most_similar,
+        "https://w3id.org/um/sparql-functions/custom_concat": custom_concat,
+    },
+    title="SPARQL endpoint for RDFLib graph",
+    description="A SPARQL endpoint to serve machine learning models, or any other logic implemented in Python. \n[Source code](https://github.com/vemonet/rdflib-endpoint)",
+    version="0.1.0",
+    public_url="https://service.openpredict.137.120.31.102.nip.io/sparql",
+    cors_enabled=True,
+    example_query=example_query,
+)

+ 73 - 0
tasks.py

@@ -0,0 +1,73 @@
+from pathlib import Path
+
+from invoke import task
+from jinja2 import Template
+
+
+@task(name="docs")
+def documentation(c):
+    """Build the documentation."""
+    c.run("python3 -m sphinx docs docs/build/html")
+
+
+@task
+def test(c):
+    """Run all tests under the tests directory."""
+    c.run("python3 -m unittest discover tests 'test_*' -v")
+
+
+@task(name="migrate")
+def migrate_requirements(c):
+    """Copy requirements from the requirements.txt file to pyproject.toml."""
+    lines = Path("requirements.txt").read_text().split("\n")
+    requirements = {"spendpoint": [], "test": [], "doc": [], "dev": []}
+    current = "spendpoint"
+    for line in lines:
+        if line.startswith("#"):
+            candidate = line[1:].lower().strip()
+            if candidate in requirements.keys():
+                current = candidate
+                continue
+        if line.strip() == "":
+            continue
+        requirements[current].append("".join(line.split()))
+    template = Template(Path("docs/templates/pyproject.toml").read_text())
+    Path("pyproject.toml").write_text(template.render(requirements=requirements))
+
+
+@task
+def release(c, version):
+    """"""
+    if version not in ["minor", "major", "patch"]:
+        print("Version can be either major, minor or patch.")
+        return
+
+    from spendpoint import __version_info__, __version__
+    _major, _minor, _patch = __version_info__
+
+    if version == "patch":
+        _patch = _patch + 1
+    elif version == "minor":
+        _minor = _minor + 1
+        _patch = 0
+    elif version == "major":
+        _major = _major + 1
+        _minor = 0
+        _patch = 0
+
+    c.run(f"git checkout -b release-{_major}.{_minor}.{_patch} dev")
+    c.run(f"sed -i 's/{__version__}/{_major}.{_minor}.{_patch}/g' spendpoint/__init__.py")
+    print(f"Update the readme for version {_major}.{_minor}.{_patch}.")
+    input("Press enter when ready.")
+    c.run(f"git add -u")
+    c.run(f'git commit -m "Update changelog version {_major}.{_minor}.{_patch}"')
+    c.run(f"git push --set-upstream origin release-{_major}.{_minor}.{_patch}")
+    c.run(f"git checkout main")
+    c.run(f"git merge --no-ff release-{_major}.{_minor}.{_patch}")
+    c.run(f'git tag -a {_major}.{_minor}.{_patch} -m "Release {_major}.{_minor}.{_patch}"')
+    c.run(f"git push")
+    c.run(f"git checkout dev")
+    c.run(f"git merge --no-ff release-{_major}.{_minor}.{_patch}")
+    c.run(f"git push")
+    c.run(f"git branch -d release-{_major}.{_minor}.{_patch}")
+    c.run(f"git push origin --tags")