diff --git a/.gitignore b/.gitignore index fe9b6f9c..8e332557 100644 --- a/.gitignore +++ b/.gitignore @@ -34,3 +34,5 @@ server/app/version.py # Ignore the content of the server storage server/input/ server/storage/ +test.py +/storage/ diff --git a/discovery_server/Dockerfile b/discovery_server/Dockerfile new file mode 100644 index 00000000..e0c1f1f1 --- /dev/null +++ b/discovery_server/Dockerfile @@ -0,0 +1,50 @@ +FROM python:3.11-alpine + +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 +ENV PYTHONPATH="${PYTHONPATH}:/app" + +# If we have more dependencies for the server it would make sense +# to refactor uswgi to the pyproject.toml +RUN apk update && \ + apk add --no-cache nginx supervisor gcc musl-dev linux-headers python3-dev git bash && \ + pip install uwsgi && \ + apk del git bash + + +COPY discovery_server/uwsgi.ini /etc/uwsgi/ +COPY discovery_server/supervisord.ini /etc/supervisor/conf.d/supervisord.ini +COPY discovery_server/stop-supervisor.sh /etc/supervisor/stop-supervisor.sh +RUN chmod +x /etc/supervisor/stop-supervisor.sh + +# Makes it possible to use a different configuration +ENV UWSGI_INI=/etc/uwsgi/uwsgi.ini +# object stores aren't thread-safe yet +# https://github.com/eclipse-basyx/basyx-python-sdk/issues/205 +ENV UWSGI_CHEAPER=0 +ENV UWSGI_PROCESSES=1 +ENV NGINX_MAX_UPLOAD=1M +ENV NGINX_WORKER_PROCESSES=1 +ENV LISTEN_PORT=80 +ENV CLIENT_BODY_BUFFER_SIZE=1M + +# Copy the entrypoint that will generate Nginx additional configs +COPY discovery_server/entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] + +ENV SETUPTOOLS_SCM_PRETEND_VERSION=1.0.0 + + +COPY ./discovery_server/requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY ./sdk /sdk +COPY ./server /app/server +COPY ./discovery_server/app /app + +WORKDIR /app +RUN pip install ../sdk + +CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.ini"] \ No newline at end of file diff --git a/discovery_server/README.md b/discovery_server/README.md new file mode 100644 index 00000000..5083f48a --- /dev/null +++ b/discovery_server/README.md @@ -0,0 +1,48 @@ +# Eclipse BaSyx Python SDK - Discovery Service + +This is a Python-based implementation of the **BaSyx Asset Administration Shell (AAS) Discovery Service**. +It provides basic discovery functionality for AAS IDs and their corresponding assets, as specified in the official [Discovery Service Specification v3.1.0_SSP-001](https://app.swaggerhub.com/apis/Plattform_i40/DiscoveryServiceSpecification/V3.1.0_SSP-001). + +## Overview + +The Discovery Service stores and retrieves relations between AAS identifiers and asset identifiers. It acts as a lookup service for resolving asset-related queries to corresponding AAS. + +## Features + +| Function | Description | Example URL | +|------------------------------------------|----------------------------------------------------------|-----------------------------------------------------------------------| +| **search_all_aas_ids_by_asset_link** | Find AAS identifiers by providing asset link values | `POST http://localhost:8084/api/v3.0/lookup/shellsByAssetLink` | +| **get_all_specific_asset_ids_by_aas_id** | Return specific asset ids associated with an AAS ID | `GET http://localhost:8084/api/v3.0/lookup/shells/{aasIdentifier}` | +| **post_all_asset_links_by_id** | Register specific asset ids linked to an AAS | `POST http://localhost:8084/api/v3.0/lookup/shells/{aasIdentifier}` | +| **delete_all_asset_links_by_id** | Delete all asset links associated with a specific AAS ID | `DELETE http://localhost:8084/api/v3.0/lookup/shells/{aasIdentifier}` | +| + +## Configuration + +The service can be configured to use either: + +- **In-memory storage** (default): Temporary data storage that resets on service restart. +- **MongoDB storage**: Persistent backend storage using MongoDB. + +### Configuration via Environment Variables + +| Variable | Description | Default | +|------------------|--------------------------------------------|-----------------------------| +| `STORAGE_TYPE` | `inmemory` or `mongodb` | `inmemory` | +| `MONGODB_URI` | MongoDB connection URI | `mongodb://localhost:27017` | +| `MONGODB_DBNAME` | Name of the MongoDB database | `basyx_registry` | + +## Deployment via Docker + +A `Dockerfile` and `docker-compose.yml` are provided for simple deployment. +The container image can be built and run via: +```bash +docker compose up --build +``` +## Test + +Examples of asset links and specific asset IDs for testing purposes are provided as JSON files in the [storage](./storage) folder. + +## Acknowledgments + +This Dockerfile is inspired by the [tiangolo/uwsgi-nginx-docker](https://github.com/tiangolo/uwsgi-nginx-docker) repository. diff --git a/discovery_server/app/main.py b/discovery_server/app/main.py new file mode 100644 index 00000000..0092e691 --- /dev/null +++ b/discovery_server/app/main.py @@ -0,0 +1,25 @@ +import os +import sys +from server.app.interfaces.discovery import DiscoveryAPI, MongoDiscoveryStore,InMemoryDiscoveryStore + +storage_type = os.getenv("STORAGE_TYPE", "inmemory") +base_path = os.getenv("API_BASE_PATH") + +wsgi_optparams = {} + +if base_path is not None: + wsgi_optparams["base_path"] = base_path + +if storage_type == "inmemory": + application = DiscoveryAPI(InMemoryDiscoveryStore(), **wsgi_optparams) + +elif storage_type == "mongodb": + uri = os.getenv("MONGODB_URI", "mongodb://localhost:27017") + dbname = os.getenv("MONGODB_DBNAME", "basyx_registry") + + application = DiscoveryAPI(MongoDiscoveryStore(uri,dbname), **wsgi_optparams) + +else: + print(f"STORAGE_TYPE must be either inmemory or mongodb! Current value: {storage_type}", + file=sys.stderr) + diff --git a/discovery_server/compose.yml b/discovery_server/compose.yml new file mode 100644 index 00000000..56be002e --- /dev/null +++ b/discovery_server/compose.yml @@ -0,0 +1,9 @@ +services: + app: + build: + context: .. + dockerfile: discovery_server/Dockerfile + ports: + - "8084:80" + environment: + - STORAGE_TYPE=inmemory diff --git a/discovery_server/entrypoint.sh b/discovery_server/entrypoint.sh new file mode 100644 index 00000000..72239440 --- /dev/null +++ b/discovery_server/entrypoint.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env sh +set -e + +# Get the maximum upload file size for Nginx, default to 0: unlimited +USE_NGINX_MAX_UPLOAD=${NGINX_MAX_UPLOAD:-0} + +# Get the number of workers for Nginx, default to 1 +USE_NGINX_WORKER_PROCESSES=${NGINX_WORKER_PROCESSES:-1} + +# Set the max number of connections per worker for Nginx, if requested +# Cannot exceed worker_rlimit_nofile, see NGINX_WORKER_OPEN_FILES below +NGINX_WORKER_CONNECTIONS=${NGINX_WORKER_CONNECTIONS:-1024} + +# Get the listen port for Nginx, default to 80 +USE_LISTEN_PORT=${LISTEN_PORT:-80} + +# Get the client_body_buffer_size for Nginx, default to 1M +USE_CLIENT_BODY_BUFFER_SIZE=${CLIENT_BODY_BUFFER_SIZE:-1M} + +# Create the conf.d directory if it doesn't exist +if [ ! -d /etc/nginx/conf.d ]; then + mkdir -p /etc/nginx/conf.d +fi + +if [ -f /app/nginx.conf ]; then + cp /app/nginx.conf /etc/nginx/nginx.conf +else + content='user nginx;\n' + # Set the number of worker processes in Nginx + content=$content"worker_processes ${USE_NGINX_WORKER_PROCESSES};\n" + content=$content'error_log /var/log/nginx/error.log warn;\n' + content=$content'pid /var/run/nginx.pid;\n' + content=$content'events {\n' + content=$content" worker_connections ${NGINX_WORKER_CONNECTIONS};\n" + content=$content'}\n' + content=$content'http {\n' + content=$content' include /etc/nginx/mime.types;\n' + content=$content' default_type application/octet-stream;\n' + content=$content' log_format main '"'\$remote_addr - \$remote_user [\$time_local] \"\$request\" '\n" + content=$content' '"'\$status \$body_bytes_sent \"\$http_referer\" '\n" + content=$content' '"'\"\$http_user_agent\" \"\$http_x_forwarded_for\"';\n" + content=$content' access_log /var/log/nginx/access.log main;\n' + content=$content' sendfile on;\n' + content=$content' keepalive_timeout 65;\n' + content=$content' include /etc/nginx/conf.d/*.conf;\n' + content=$content'}\n' + content=$content'daemon off;\n' + # Set the max number of open file descriptors for Nginx workers, if requested + if [ -n "${NGINX_WORKER_OPEN_FILES}" ] ; then + content=$content"worker_rlimit_nofile ${NGINX_WORKER_OPEN_FILES};\n" + fi + # Save generated /etc/nginx/nginx.conf + printf "$content" > /etc/nginx/nginx.conf + + content_server='server {\n' + content_server=$content_server" listen ${USE_LISTEN_PORT};\n" + content_server=$content_server' location / {\n' + content_server=$content_server' include uwsgi_params;\n' + content_server=$content_server' uwsgi_pass unix:///tmp/uwsgi.sock;\n' + content_server=$content_server' }\n' + content_server=$content_server'}\n' + # Save generated server /etc/nginx/conf.d/nginx.conf + printf "$content_server" > /etc/nginx/conf.d/nginx.conf + + # # Generate additional configuration + printf "client_max_body_size $USE_NGINX_MAX_UPLOAD;\n" > /etc/nginx/conf.d/upload.conf + printf "client_body_buffer_size $USE_CLIENT_BODY_BUFFER_SIZE;\n" > /etc/nginx/conf.d/body-buffer-size.conf + printf "add_header Access-Control-Allow-Origin *;\n" > /etc/nginx/conf.d/cors-header.conf +fi + +exec "$@" diff --git a/discovery_server/requirements.txt b/discovery_server/requirements.txt new file mode 100644 index 00000000..376baed5 --- /dev/null +++ b/discovery_server/requirements.txt @@ -0,0 +1,2 @@ +Werkzeug +pymongo diff --git a/discovery_server/stop-supervisor.sh b/discovery_server/stop-supervisor.sh new file mode 100644 index 00000000..9a953c94 --- /dev/null +++ b/discovery_server/stop-supervisor.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env sh + +printf "READY\n" + +while read line; do + echo "Processing Event: $line" >&2 + kill $PPID +done < /dev/stdin diff --git a/discovery_server/storage/AssetIdsFullExample.json b/discovery_server/storage/AssetIdsFullExample.json new file mode 100644 index 00000000..720d106f --- /dev/null +++ b/discovery_server/storage/AssetIdsFullExample.json @@ -0,0 +1,62 @@ +[ + { + "semanticId": { + "type": "ExternalReference", + "keys": [ + { + "type": "GlobalReference", + "value": "ud800;udbff3udbffUud800Bud800qudbffhudbffTd6^dnTudbff5?Aoudbff36Xud800>udbffUudbff\"Hjeud800Fudbff;udbffC?5q]udbff8aIudbffkp[?sud800kXljub;Gudbffqud8003ud8005udbff[>Z6d_udbffO=hxs R9<_pudbffo" + } + ], + "referredSemanticId": { + "type": "ExternalReference", + "keys": [ + { + "type": "GlobalReference", + "value": "ooOud800pqudbfffud800b:4udbffiudbffudbffd_ud800sJudbffOudbffiB:udbff@pEudbffM;8ud800mS;udbff3ud800q8udbff^udbffmDhFttgudbffrudbffhudbffrEud800e" + } + ] + } + } + ], + "name": "ud800Vud800?ud800tudbff1Ah_ud8003udbffZud800d5WAud800ScMIud800e>", + "value": "udbffBudbffSud800udbffn%ud800kudbffa:Tcfudbff?udbff?ud8005udbffZudbff_ud800iud800qq.@Zud800jmludbffFB<:Wfud800=audbffludbffailudbff?ud800uLudbff7ud800GJqG'ud800kudbffrudbff>>RudbffQudbff=udbffQS]UudbffOZS", + "externalSubjectId": { + "type": "ExternalReference", + "keys": [ + { + "type": "GlobalReference", + "value": "^7<\\agVu_%ud800:pD<-ud800j9udbffkiKCudbffVudbffjudbffDudbffiudbffZsud800WhLG:tQfLP" + } + ], + "referredSemanticId": { + "type": "ExternalReference", + "keys": [ + { + "type": "GlobalReference", + "value": "]Pud800DudbffY[0Y", + "value": "udbffBudbffSud800udbffn%ud800kudbffa:Tcfudbff?udbff?ud8005udbffZudbff_ud800iud800qq.@Zud800jmludbffFB<:Wfud800=audbffludbffailudbff?ud800uLudbff7ud800GJqG'ud800kudbffrudbff>>RudbffQudbff=udbffQS]UudbffOZS" + } +] \ No newline at end of file diff --git a/discovery_server/supervisord.ini b/discovery_server/supervisord.ini new file mode 100644 index 00000000..d73d9801 --- /dev/null +++ b/discovery_server/supervisord.ini @@ -0,0 +1,27 @@ +[supervisord] +nodaemon=true + +[program:uwsgi] +command=/usr/local/bin/uwsgi --ini /etc/uwsgi/uwsgi.ini +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +startsecs = 0 +autorestart=false +# may make sense to have autorestart enabled in production + +[program:nginx] +command=/usr/sbin/nginx +stdout_logfile=/var/log/nginx.out.log +stdout_logfile_maxbytes=0 +stderr_logfile=/var/log/nginx.err.log +stderr_logfile_maxbytes=0 +stopsignal=QUIT +startsecs = 0 +autorestart=false +# may make sense to have autorestart enabled in production + +[eventlistener:quit_on_failure] +events=PROCESS_STATE_STOPPED,PROCESS_STATE_EXITED,PROCESS_STATE_FATAL +command=/etc/supervisor/stop-supervisor.sh diff --git a/discovery_server/uwsgi.ini b/discovery_server/uwsgi.ini new file mode 100644 index 00000000..9c54ae1c --- /dev/null +++ b/discovery_server/uwsgi.ini @@ -0,0 +1,9 @@ +[uwsgi] +wsgi-file = /app/main.py +socket = /tmp/uwsgi.sock +chown-socket = nginx:nginx +chmod-socket = 664 +hook-master-start = unix_signal:15 gracefully_kill_them_all +need-app = true +die-on-term = true +show-config = false diff --git a/registry_server/Dockerfile b/registry_server/Dockerfile new file mode 100644 index 00000000..344ddd0e --- /dev/null +++ b/registry_server/Dockerfile @@ -0,0 +1,49 @@ +FROM python:3.11-alpine + +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 + +# If we have more dependencies for the server it would make sense +# to refactor uswgi to the pyproject.toml +RUN apk update && \ + apk add --no-cache nginx supervisor gcc musl-dev linux-headers python3-dev git bash && \ + pip install uwsgi && \ + apk del git bash + + +COPY registry_server/uwsgi.ini /etc/uwsgi/ +COPY registry_server/supervisord.ini /etc/supervisor/conf.d/supervisord.ini +COPY registry_server/stop-supervisor.sh /etc/supervisor/stop-supervisor.sh +RUN chmod +x /etc/supervisor/stop-supervisor.sh + +# Makes it possible to use a different configuration +ENV UWSGI_INI=/etc/uwsgi/uwsgi.ini +# object stores aren't thread-safe yet +# https://github.com/eclipse-basyx/basyx-python-sdk/issues/205 +ENV UWSGI_CHEAPER=0 +ENV UWSGI_PROCESSES=1 +ENV NGINX_MAX_UPLOAD=1M +ENV NGINX_WORKER_PROCESSES=1 +ENV LISTEN_PORT=80 +ENV CLIENT_BODY_BUFFER_SIZE=1M + +# Copy the entrypoint that will generate Nginx additional configs +COPY registry_server/entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] + +ENV SETUPTOOLS_SCM_PRETEND_VERSION=1.0.0 + + +COPY ./registry_server/requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY ./sdk /sdk +COPY ./server /server +COPY ./registry_server/app /app + +WORKDIR /app +RUN pip install ../sdk + +CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.ini"] \ No newline at end of file diff --git a/registry_server/README.md b/registry_server/README.md new file mode 100644 index 00000000..6c62e506 --- /dev/null +++ b/registry_server/README.md @@ -0,0 +1,66 @@ +# Eclipse BaSyx Python SDK - Registry Service + +This is a Python-based implementation of the **BaSyx Asset Administration Shell (AAS) Registry Service**. +It provides basic registry functionality for AAS and submodels descriptors, as specified in the official [Asset Administration Shell Registry Service Specification v3.1.0_SSP-001](https://app.swaggerhub.com/apis/Plattform_i40/AssetAdministrationShellRegistryServiceSpecification/V3.1.0_SSP-001) and [Submodel Registry Service Specification v3.1.0_SSP-001](https://app.swaggerhub.com/apis/Plattform_i40/SubmodelRegistryServiceSpecification/V3.1.0_SSP-001). + +## Overview + +The Registry Service provides the endpoint for a given AAS-ID or Submodel-ID. Such an endpoint for an AAS and the related Submodel-IDs make the AAS and the submodels with their submodelElements accessible. + + + +## Features +# AAS Registry: +| Function | Description | Example URL | +|--------------------------------------------------|----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| **GetAllAssetAdministrationShellDescriptors** | Return all AAS descriptor | `GET http://localhost:8083/api/v3.0/shell-descriptors` | +| **GetAssetAdministrationShellDescriptorById** | Return a specific AAS descriptor | `GET http://localhost:8083/api/v3.0/shell-descriptors/{aasIdentifier}` | +| **PostAssetAdministrationShellDescriptor** | Register/create a new AAS descriptor | `POST http://localhost:8083/api/v3.0/shell-descriptors` | +| **PutAssetAdministrationShellDescriptorById** | Update an existing AAS descriptor | `PUT http://localhost:8083/api/v3.0/shell-descriptors/{aasIdentifier}` | +| **DeleteAssetAdministrationShellDescriptorById** | Delete an AAS descriptor by ID | `DELETE http://localhost:8083/api/v3.0/shell-descriptors/{aasIdentifier}` | +| **GetSubmodelDescriptorsThroughSuperPath** | Return all submodel descriptors under AAS descriptor | `GET http://localhost:8083/api/v3.0/shell-descriptors/{aasIdentifier}/submodel-descriptors` | +| **PostSubmodelDescriptorThroughSuperPath** | Register/create a new submodel descriptor under AAS descriptor | `POST http://localhost:8083/api/v3.0/shell-descriptors/{aasIdentifier}/submodel-descriptors` | +| **GetSubmodelDescriptorThroughSuperPath** | Return a specific submodel descriptor under AAS descriptor | `GET http://localhost:8083/api/v3.0/shell-descriptors/{aasIdentifier}/submodel-descriptors/{submodelIdentifier}` | +| **PutSubmodelDescriptorThroughSuperPath** | Update a specific submodel descriptor under AAS descriptor | `PUT http://localhost:8083/api/v3.0/shell-descriptors/{aasIdentifier}/submodel-descriptors/{submodelIdentifier}` | +| **DeleteSubmodelDescriptorThroughSuperPath** | Delete a specific submodel descriptor under AAS descriptor | `DELETE http://localhost:8083/api/v3.0/shell-descriptors/{aasIdentifier}/submodel-descriptors/{submodelIdentifier}` | +| **GetDescription** | Return the self‑description of the AAS registry service | `GET http://localhost:8083/api/v3.0/description` | + +# Submodel Registry: +| Function | Description | Example URL | +|----------------------------------|--------------------------------------------------------------|-----------------------------------------------------------------------------------| +| **GetAllSubmodelDescriptors** | Return all submodel descriptors | `GET http://localhost:8083/api/v3.0/submodel-descriptors` | +| **PostSubmodelDescriptor** | Register/create a new submodel descriptor | `POST http://localhost:8083/api/v3.0/submodel-descriptors` | +| **GetSubmodelDescriptorById** | Return a specific submodel descriptor | `GET http://localhost:8083/api/v3.0/submodel-descriptors/{submodelIdentifier}` | +| **PutSubmodelDescriptorById** | Update a specific submodel descriptor | `PUT http://localhost:8083/api/v3.0/submodel-descriptors/{submodelIdentifier}` | +| **DeleteSubmodelDescriptorById** | Delete a specific submodel descriptor | `DELETE http://localhost:8083/api/v3.0/submodel-descriptors/{submodelIdentifier}` | +| **GetDescription** | Return the self‑description of the submodel registry service | `GET http://localhost:8083/api/v3.0/description` | + + + +## Configuration + +The container can be configured via environment variables: + +- `API_BASE_PATH` determines the base path under which all other API paths are made available. Default: `/api/v3.0` +- `STORAGE_TYPE` can be one of `LOCAL_FILE_READ_ONLY` or `LOCAL_FILE_BACKEND`: + - When set to `LOCAL_FILE_READ_ONLY` (the default), the server will read and serve JSON files from the storage directory. The files are not modified, all changes done via the API are only stored in memory. + - When instead set to `LOCAL_FILE_BACKEND`, the server makes use of the [LocalFileBackend](https://github.com/eclipse-basyx/basyx-python-sdk/tree/main/backend/basyx_backend/local_file), where AAS and Submodels descriptors are persistently stored as JSON files. +- `STORAGE_PATH` sets the directory to read the files from *within the container*. If you bind your files to a directory different from the default `/storage`, you can use this variable to adjust the server accordingly. + + +## Deployment via Docker + +A `Dockerfile` and `docker-compose.yml` are provided for simple deployment. +The container image can be built and run via: +```bash +docker compose up --build +``` + +## Test + +An example descriptor for testing purposes is provided as a JSON file in the [storage](./storage) folder. + +## Acknowledgments + +This Dockerfile is inspired by the [tiangolo/uwsgi-nginx-docker](https://github.com/tiangolo/uwsgi-nginx-docker) repository. + diff --git a/registry_server/app/main.py b/registry_server/app/main.py new file mode 100644 index 00000000..b182118e --- /dev/null +++ b/registry_server/app/main.py @@ -0,0 +1,27 @@ +import sys +import os +sys.path.insert(0, "/") +from basyx.aas.backend.local_file import LocalFileObjectStore +from basyx.aas import model +from server.app.interfaces.registry import RegistryAPI + +storage_path = os.getenv("STORAGE_PATH", "/storage") +storage_type = os.getenv("STORAGE_TYPE", "LOCAL_FILE_READ_ONLY") +base_path = os.getenv("API_BASE_PATH") + +wsgi_optparams = {} + +if base_path is not None: + wsgi_optparams["base_path"] = base_path + +if storage_type == "LOCAL_FILE_BACKEND": + application = RegistryAPI(LocalFileObjectStore(storage_path), **wsgi_optparams) + +elif storage_type in "LOCAL_FILE_READ_ONLY": + object_store: model.DictObjectStore = model.DictObjectStore() + + application = RegistryAPI(object_store, **wsgi_optparams) + +else: + print(f"STORAGE_TYPE must be either LOCAL_FILE or LOCAL_FILE_READ_ONLY! Current value: {storage_type}", + file=sys.stderr) \ No newline at end of file diff --git a/registry_server/compose.yml b/registry_server/compose.yml new file mode 100644 index 00000000..99f3e3cc --- /dev/null +++ b/registry_server/compose.yml @@ -0,0 +1,9 @@ +services: + app: + build: + context: .. + dockerfile: registry_server/Dockerfile + ports: + - "8083:80" + volumes: + - ./storage:/storage diff --git a/registry_server/entrypoint.sh b/registry_server/entrypoint.sh new file mode 100644 index 00000000..522d4fca --- /dev/null +++ b/registry_server/entrypoint.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env sh +set -e + +# Get the maximum upload file size for Nginx, default to 0: unlimited +USE_NGINX_MAX_UPLOAD=${NGINX_MAX_UPLOAD:-0} + +# Get the number of workers for Nginx, default to 1 +USE_NGINX_WORKER_PROCESSES=${NGINX_WORKER_PROCESSES:-1} + +# Set the max number of connections per worker for Nginx, if requested +# Cannot exceed worker_rlimit_nofile, see NGINX_WORKER_OPEN_FILES below +NGINX_WORKER_CONNECTIONS=${NGINX_WORKER_CONNECTIONS:-1024} + +# Get the listen port for Nginx, default to 80 +USE_LISTEN_PORT=${LISTEN_PORT:-80} + +# Get the client_body_buffer_size for Nginx, default to 1M +USE_CLIENT_BODY_BUFFER_SIZE=${CLIENT_BODY_BUFFER_SIZE:-1M} + +# Create the conf.d directory if it doesn't exist +if [ ! -d /etc/nginx/conf.d ]; then + mkdir -p /etc/nginx/conf.d +fi + +if [ -f /app/nginx.conf ]; then + cp /app/nginx.conf /etc/nginx/nginx.conf +else + content='user nginx;\n' + # Set the number of worker processes in Nginx + content=$content"worker_processes ${USE_NGINX_WORKER_PROCESSES};\n" + content=$content'error_log /var/log/nginx/error.log warn;\n' + content=$content'pid /var/run/nginx.pid;\n' + content=$content'events {\n' + content=$content" worker_connections ${NGINX_WORKER_CONNECTIONS};\n" + content=$content'}\n' + content=$content'http {\n' + content=$content' include /etc/nginx/mime.types;\n' + content=$content' default_type application/octet-stream;\n' + content=$content' log_format main '"'\$remote_addr - \$remote_user [\$time_local] \"\$request\" '\n" + content=$content' '"'\$status \$body_bytes_sent \"\$http_referer\" '\n" + content=$content' '"'\"\$http_user_agent\" \"\$http_x_forwarded_for\"';\n" + content=$content' access_log /var/log/nginx/access.log main;\n' + content=$content' sendfile on;\n' + content=$content' keepalive_timeout 65;\n' + content=$content' include /etc/nginx/conf.d/*.conf;\n' + content=$content'}\n' + content=$content'daemon off;\n' + # Set the max number of open file descriptors for Nginx workers, if requested + if [ -n "${NGINX_WORKER_OPEN_FILES}" ] ; then + content=$content"worker_rlimit_nofile ${NGINX_WORKER_OPEN_FILES};\n" + fi + # Save generated /etc/nginx/nginx.conf + printf "$content" > /etc/nginx/nginx.conf + + content_server='server {\n' + content_server=$content_server" listen ${USE_LISTEN_PORT};\n" + content_server=$content_server' location / {\n' + content_server=$content_server' include uwsgi_params;\n' + content_server=$content_server' uwsgi_pass unix:///tmp/uwsgi.sock;\n' + content_server=$content_server' }\n' + content_server=$content_server'}\n' + # Save generated server /etc/nginx/conf.d/nginx.conf + printf "$content_server" > /etc/nginx/conf.d/nginx.conf + + # # Generate additional configuration + printf "client_max_body_size $USE_NGINX_MAX_UPLOAD;\n" > /etc/nginx/conf.d/upload.conf + printf "client_body_buffer_size $USE_CLIENT_BODY_BUFFER_SIZE;\n" > /etc/nginx/conf.d/body-buffer-size.conf + printf "add_header Access-Control-Allow-Origin *;\n" > /etc/nginx/conf.d/cors-header.conf +fi + +exec "$@" \ No newline at end of file diff --git a/registry_server/requirements.txt b/registry_server/requirements.txt new file mode 100644 index 00000000..9c12eecd --- /dev/null +++ b/registry_server/requirements.txt @@ -0,0 +1 @@ +Werkzeug diff --git a/registry_server/stop-supervisor.sh b/registry_server/stop-supervisor.sh new file mode 100644 index 00000000..9a953c94 --- /dev/null +++ b/registry_server/stop-supervisor.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env sh + +printf "READY\n" + +while read line; do + echo "Processing Event: $line" >&2 + kill $PPID +done < /dev/stdin diff --git a/registry_server/storage/descriptorCompleteExample.json b/registry_server/storage/descriptorCompleteExample.json new file mode 100644 index 00000000..f762aa83 --- /dev/null +++ b/registry_server/storage/descriptorCompleteExample.json @@ -0,0 +1,71 @@ +{ + "id": "https://example.org/aas/motor", + "endpoints": [ + { + "protocolInformation": { + "href": "https://localhost:1234/api/v3.0/aas", + "endpointProtocol": "HTTP", + "endpointProtocolVersion": [ + "1.1" + ] + }, + "interface": "AAS-3.0" + }, + { + "protocolInformation": { + "href": "opc.tcp://localhost:4840" + }, + "interface": "AAS-3.0" + }, + { + "protocolInformation": { + "href": "https://localhost:5678", + "endpointProtocol": "HTTP", + "endpointProtocolVersion": [ + "1.1" + ], + "subprotocol": "OPC UA Basic SOAP", + "subprotocolBody": "ns=2;s=MyAAS", + "subprotocolBodyEncoding": "application/soap+xml" + }, + "interface": "AAS-3.0" + } + ], + "submodelDescriptors":[ + { + "id": "https://admin-shell.io/zvei/nameplate/1/0/Nameplate", + "endpoints": [ + { + "href": { + "href": "https://localhost:1234/api/v3.0/submodel", + "endpointProtocol": "HTTP", + "endpointProtocolVersion": [ + "1.1" + ] + }, + "interface": "AAS-3.0" + }, + { + "protocolInformation": { + "href": "opc.tcp://localhost:4840" + }, + "interface": "AAS-3.0" + }, + { + "protocolInformation": { + "href": "https://localhost:5678", + "endpointProtocol": "HTTP", + "endpointProtocolVersion": [ + "1.1" + ], + "subprotocol": "OPC UA Basic SOAP", + "subprotocolBody": "ns=2;s=MyAAS", + "subprotocolBodyEncoding": "application/soap+xml" + }, + "interface": "AAS-3.0" + } + ] + } + + ] +} \ No newline at end of file diff --git a/registry_server/supervisord.ini b/registry_server/supervisord.ini new file mode 100644 index 00000000..d73d9801 --- /dev/null +++ b/registry_server/supervisord.ini @@ -0,0 +1,27 @@ +[supervisord] +nodaemon=true + +[program:uwsgi] +command=/usr/local/bin/uwsgi --ini /etc/uwsgi/uwsgi.ini +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +startsecs = 0 +autorestart=false +# may make sense to have autorestart enabled in production + +[program:nginx] +command=/usr/sbin/nginx +stdout_logfile=/var/log/nginx.out.log +stdout_logfile_maxbytes=0 +stderr_logfile=/var/log/nginx.err.log +stderr_logfile_maxbytes=0 +stopsignal=QUIT +startsecs = 0 +autorestart=false +# may make sense to have autorestart enabled in production + +[eventlistener:quit_on_failure] +events=PROCESS_STATE_STOPPED,PROCESS_STATE_EXITED,PROCESS_STATE_FATAL +command=/etc/supervisor/stop-supervisor.sh diff --git a/registry_server/uwsgi.ini b/registry_server/uwsgi.ini new file mode 100644 index 00000000..f333b229 --- /dev/null +++ b/registry_server/uwsgi.ini @@ -0,0 +1,10 @@ +[uwsgi] +wsgi-file = /app/main.py +socket = /tmp/uwsgi.sock +chown-socket = nginx:nginx +chmod-socket = 664 +hook-master-start = unix_signal:15 gracefully_kill_them_all +need-app = true +die-on-term = true +show-config = false +logto = /tmp/uwsgi.log diff --git a/server/app/adapter/__init__.py b/server/app/adapter/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/server/app/adapter/jsonization.py b/server/app/adapter/jsonization.py new file mode 100644 index 00000000..ca5fbb9e --- /dev/null +++ b/server/app/adapter/jsonization.py @@ -0,0 +1,336 @@ +from typing import Dict, Set, Optional, Type, Callable +import logging + +from basyx.aas import model +from basyx.aas.adapter._generic import ASSET_KIND_INVERSE, PathOrIO, ASSET_KIND, JSON_AAS_TOP_LEVEL_KEYS_TO_TYPES +from basyx.aas.adapter.json import AASToJsonEncoder +from basyx.aas.adapter.json.json_deserialization import ( + _get_ts, + AASFromJsonDecoder, + read_aas_json_file_into, + ) + +import app.model as server_model + +logger = logging.getLogger(__name__) + +JSON_SERVER_AAS_TOP_LEVEL_KEYS_TO_TYPES = JSON_AAS_TOP_LEVEL_KEYS_TO_TYPES + ( + ('assetAdministrationShellDescriptors', server_model.AssetAdministrationShellDescriptor), + ('submodelDescriptors', server_model.SubmodelDescriptor) +) + + +class ServerAASFromJsonDecoder(AASFromJsonDecoder): + @classmethod + def _get_aas_class_parsers(cls) -> Dict[str, Callable[[Dict[str, object]], object]]: + aas_class_parsers = super()._get_aas_class_parsers() + aas_class_parsers.update({ + 'AssetAdministrationShellDescriptor': cls._construct_asset_administration_shell_descriptor, + 'SubmodelDescriptor': cls._construct_submodel_descriptor, + 'AssetLink': cls._construct_asset_link, + 'ProtocolInformation': cls._construct_protocol_information, + 'Endpoint': cls._construct_endpoint + }) + return aas_class_parsers + + # ################################################################################################## + # Utility Methods used in constructor methods to add general attributes (from abstract base classes) + # ################################################################################################## + + @classmethod + def _amend_abstract_attributes(cls, obj: object, dct: Dict[str, object]) -> None: + super()._amend_abstract_attributes(obj, dct) + + if isinstance(obj, server_model.Descriptor): + if 'description' in dct: + obj.description = cls._construct_lang_string_set(_get_ts(dct, 'description', list), + model.MultiLanguageTextType) + if 'displayName' in dct: + obj.display_name = cls._construct_lang_string_set(_get_ts(dct, 'displayName', list), + model.MultiLanguageNameType) + if 'extensions' in dct: + for extension in _get_ts(dct, 'extensions', list): + obj.extension.add(cls._construct_extension(extension)) + + @classmethod + def _construct_asset_administration_shell_descriptor( + cls, dct: Dict[str, object], + object_class=server_model.AssetAdministrationShellDescriptor) -> server_model.AssetAdministrationShellDescriptor: + ret = object_class(id_=_get_ts(dct, 'id', str)) + cls._amend_abstract_attributes(ret, dct) + if 'administration' in dct: + ret.administration = cls._construct_administrative_information(_get_ts(dct, 'administration', dict)) + if 'assetKind' in dct: + ret.asset_kind = ASSET_KIND_INVERSE[_get_ts(dct, 'assetKind', str)] + if 'assetType' in dct: + ret.asset_type = _get_ts(dct, 'assetType', str) + global_asset_id = None + if 'globalAssetId' in dct: + ret.global_asset_id = _get_ts(dct, 'globalAssetId', str) + specific_asset_id = set() + if 'specificAssetIds' in dct: + for desc_data in _get_ts(dct, "specificAssetIds", list): + specific_asset_id.add(cls._construct_specific_asset_id(desc_data, model.SpecificAssetId)) + if 'endpoints' in dct: + for endpoint_dct in _get_ts(dct, 'endpoints', list): + if 'protocolInformation' in endpoint_dct: + ret.endpoints.append( + cls._construct_endpoint(endpoint_dct, + server_model.Endpoint)) + elif 'href' in endpoint_dct: + protocol_info = server_model.ProtocolInformation( + href=_get_ts(endpoint_dct['href'], 'href', str), + endpoint_protocol=_get_ts(endpoint_dct['href'], + 'endpointProtocol', + str) if 'endpointProtocol' in + endpoint_dct[ + 'href'] else None, + endpoint_protocol_version=_get_ts( + endpoint_dct['href'], + 'endpointProtocolVersion', + list) if 'endpointProtocolVersion' in + endpoint_dct['href'] else None + ) + ret.endpoints.append(server_model.Endpoint( + protocol_information=protocol_info, + interface=_get_ts(endpoint_dct, 'interface', + str))) + if 'idShort' in dct: + ret.id_short = _get_ts(dct, 'idShort', str) + if 'submodelDescriptors' in dct: + for sm_dct in _get_ts(dct, 'submodelDescriptors', list): + ret.submodel_descriptors.append(cls._construct_submodel_descriptor( + sm_dct, + server_model.SubmodelDescriptor + )) + return ret + + @classmethod + def _construct_protocol_information(cls, dct: Dict[str, object], + object_class=server_model.ProtocolInformation) -> server_model.ProtocolInformation: + ret = object_class( + href=_get_ts(dct, 'href', str), + endpoint_protocol=_get_ts(dct, 'endpointProtocol', + str) if 'endpointProtocol' in dct else None, + endpoint_protocol_version=_get_ts(dct, + 'endpointProtocolVersion', + list) if 'endpointProtocolVersion' in dct else None, + subprotocol=_get_ts(dct, 'subprotocol', + str) if 'subprotocol' in dct else None, + subprotocol_body=_get_ts(dct, 'subprotocolBody', + str) if 'subprotocolBody' in dct else None, + subprotocol_body_encoding=_get_ts(dct, + 'subprotocolBodyEncoding', + str) if 'subprotocolBodyEncoding' in dct else None + ) + return ret + + @classmethod + def _construct_endpoint(cls, dct: Dict[str, object], + object_class=server_model.Endpoint) -> server_model.Endpoint: + ret = object_class( + protocol_information=cls._construct_protocol_information( + _get_ts(dct, 'protocolInformation', dict), + server_model.ProtocolInformation + ), + interface=_get_ts(dct, 'interface', + str) + ) + cls._amend_abstract_attributes(ret, dct) + return ret + + @classmethod + def _construct_submodel_descriptor( + cls, dct: Dict[str, object], + object_class=server_model.SubmodelDescriptor) -> server_model.SubmodelDescriptor: + ret = object_class(id_=_get_ts(dct, 'id', str), + endpoints=[]) + cls._amend_abstract_attributes(ret, dct) + for endpoint_dct in _get_ts(dct, 'endpoints', list): + if 'protocolInformation' in endpoint_dct: + ret.endpoints.append( + cls._construct_endpoint(endpoint_dct, + server_model.Endpoint)) + elif 'href' in endpoint_dct: + protocol_info = server_model.ProtocolInformation( + href=_get_ts(endpoint_dct['href'], 'href', str), + endpoint_protocol=_get_ts(endpoint_dct['href'], + 'endpointProtocol', + str) if 'endpointProtocol' in + endpoint_dct[ + 'href'] else None, + endpoint_protocol_version=_get_ts( + endpoint_dct['href'], + 'endpointProtocolVersion', + list) if 'endpointProtocolVersion' in + endpoint_dct['href'] else None + ) + ret.endpoints.append(server_model.Endpoint( + protocol_information=protocol_info, + interface=_get_ts(endpoint_dct, 'interface', + str))) + if 'administration' in dct: + ret.administration = cls._construct_administrative_information( + _get_ts(dct, 'administration', dict)) + if 'idShort' in dct: + ret.id_short = _get_ts(dct, 'idShort', str) + if 'semanticId' in dct: + ret.semantic_id = cls._construct_reference(_get_ts(dct, 'semanticId', dict)) + if 'supplementalSemanticIds' in dct: + for ref in _get_ts(dct, 'supplementalSemanticIds', list): + ret.supplemental_semantic_id.append(cls._construct_reference(ref)) + return ret + + @classmethod + def _construct_asset_link( + cls, dct: Dict[str, object], object_class=server_model.AssetLink) -> server_model.AssetLink: + ret = object_class(name=_get_ts(dct, 'name', str), + value=_get_ts(dct, 'value', str)) + return ret + + +class ServerStrictAASFromJsonDecoder(ServerAASFromJsonDecoder): + """ + A strict version of the AASFromJsonDecoder class for deserializing Asset Administration Shell data from the + official JSON format + + This version has set ``failsafe = False``, which will lead to Exceptions raised for every missing attribute or wrong + object type. + """ + failsafe = False + + +class ServerStrippedAASFromJsonDecoder(ServerAASFromJsonDecoder): + """ + Decoder for stripped JSON objects. Used in the HTTP adapter. + """ + stripped = True + + +class ServerStrictStrippedAASFromJsonDecoder(ServerStrictAASFromJsonDecoder, ServerStrippedAASFromJsonDecoder): + """ + Non-failsafe decoder for stripped JSON objects. + """ + pass + + +def read_server_aas_json_file_into(object_store: model.AbstractObjectStore, file: PathOrIO, + replace_existing: bool = False, + ignore_existing: bool = False, failsafe: bool = True, stripped: bool = False, + decoder: Optional[Type[AASFromJsonDecoder]] = None) -> Set[model.Identifier]: + return read_aas_json_file_into(object_store=object_store, file=file, replace_existing=replace_existing, + ignore_existing=ignore_existing, failsafe=failsafe, stripped=stripped, + decoder=decoder, keys_to_types=JSON_SERVER_AAS_TOP_LEVEL_KEYS_TO_TYPES) + + +class ServerAASToJsonEncoder(AASToJsonEncoder): + + @classmethod + def _get_aas_class_serializers(cls) -> Dict[Type, Callable]: + serializers = super()._get_aas_class_serializers() + serializers.update({ + server_model.AssetAdministrationShellDescriptor: cls._asset_administration_shell_descriptor_to_json, + server_model.SubmodelDescriptor: cls._submodel_descriptor_to_json, + server_model.Endpoint: cls._endpoint_to_json, + server_model.ProtocolInformation: cls._protocol_information_to_json, + server_model.AssetLink: cls._asset_link_to_json + }) + return serializers + + @classmethod + def _abstract_classes_to_json(cls, obj: object) -> Dict[str, object]: + data: Dict[str, object] = super()._abstract_classes_to_json(obj) + if isinstance(obj, server_model.Descriptor): + if obj.description: + data['description'] = obj.description + if obj.display_name: + data['displayName'] = obj.display_name + if obj.extension: + data['extensions'] = list(obj.extension) + return data + + @classmethod + def _asset_administration_shell_descriptor_to_json(cls, obj: server_model.AssetAdministrationShellDescriptor) -> \ + Dict[str, object]: + """ + serialization of an object from class AssetAdministrationShell to json + + :param obj: object of class AssetAdministrationShell + :return: dict with the serialized attributes of this object + """ + data = cls._abstract_classes_to_json(obj) + data.update(cls._namespace_to_json(obj)) + data['id'] = obj.id + if obj.administration: + data['administration'] = obj.administration + if obj.asset_kind: + data['assetKind'] = ASSET_KIND[obj.asset_kind] + if obj.asset_type: + data['assetType'] = obj.asset_type + if obj.global_asset_id: + data['globalAssetId'] = obj.global_asset_id + if obj.specific_asset_id: + data['specificAssetIds'] = list(obj.specific_asset_id) + if obj.endpoints: + data['endpoints'] = list(obj.endpoints) + if obj.id_short: + data['idShort'] = obj.id_short + if obj.submodel_descriptors: + data['submodelDescriptors'] = list(obj.submodel_descriptors) + return data + + @classmethod + def _protocol_information_to_json(cls, + obj: server_model.ProtocolInformation) -> \ + Dict[str, object]: + data = cls._abstract_classes_to_json(obj) + + data['href'] = obj.href + if obj.endpoint_protocol: + data['endpointProtocol'] = obj.endpoint_protocol + if obj.endpoint_protocol_version: + data['endpointProtocolVersion'] = obj.endpoint_protocol_version + if obj.subprotocol: + data['subprotocol'] = obj.subprotocol + if obj.subprotocol_body: + data['subprotocolBody'] = obj.subprotocol_body + if obj.subprotocol_body_encoding: + data['subprotocolBodyEncoding'] = obj.subprotocol_body_encoding + return data + + @classmethod + def _endpoint_to_json(cls, obj: server_model.Endpoint) -> Dict[str, object]: + data = cls._abstract_classes_to_json(obj) + data['protocolInformation'] = cls._protocol_information_to_json( + obj.protocol_information) + data['interface'] = obj.interface + return data + + @classmethod + def _submodel_descriptor_to_json(cls, obj: server_model.SubmodelDescriptor) -> Dict[str, object]: + """ + serialization of an object from class Submodel to json + + :param obj: object of class Submodel + :return: dict with the serialized attributes of this object + """ + data = cls._abstract_classes_to_json(obj) + data['id'] = obj.id + data['endpoints'] = [cls._endpoint_to_json(ep) for ep in + obj.endpoints] + if obj.id_short: + data['idShort'] = obj.id_short + if obj.administration: + data['administration'] = obj.administration + if obj.semantic_id: + data['semanticId'] = obj.semantic_id + if obj.supplemental_semantic_id: + data['supplementalSemanticIds'] = list(obj.supplemental_semantic_id) + return data + + @classmethod + def _asset_link_to_json(cls, obj: server_model.AssetLink) -> Dict[str, object]: + data = cls._abstract_classes_to_json(obj) + data['name'] = obj.name + data['value'] = obj.value + return data diff --git a/server/app/interfaces/base.py b/server/app/interfaces/base.py index 8234eddc..14fb30c6 100644 --- a/server/app/interfaces/base.py +++ b/server/app/interfaces/base.py @@ -264,13 +264,13 @@ def http_exception_to_response(exception: werkzeug.exceptions.HTTPException, res class ObjectStoreWSGIApp(BaseWSGIApp): object_store: AbstractObjectStore - def _get_all_obj_of_type(self, type_: Type[model.provider._IDENTIFIABLE]) -> Iterator[model.provider._IDENTIFIABLE]: + def _get_all_obj_of_type(self, type_: Type[model.provider._IT]) -> Iterator[model.provider._IT]: for obj in self.object_store: if isinstance(obj, type_): yield obj - def _get_obj_ts(self, identifier: model.Identifier, type_: Type[model.provider._IDENTIFIABLE]) \ - -> model.provider._IDENTIFIABLE: + def _get_obj_ts(self, identifier: model.Identifier, type_: Type[model.provider._IT]) \ + -> model.provider._IT: identifiable = self.object_store.get(identifier) if not isinstance(identifiable, type_): raise NotFound(f"No {type_.__name__} with {identifier} found!") diff --git a/server/app/interfaces/descriptorStore.py b/server/app/interfaces/descriptorStore.py new file mode 100644 index 00000000..18768465 --- /dev/null +++ b/server/app/interfaces/descriptorStore.py @@ -0,0 +1,240 @@ +""" +Persistent Storage for BaSyx Registry API +Supports AssetAdministrationShellDescriptors and SubmodelDescriptors +""" +import json +import threading +from pathlib import Path +from typing import Dict, List, Optional +from abc import ABC, abstractmethod +import logging + +import app.model as server_model + +logger = logging.getLogger(__name__) + + +class RegistryStorageInterface(ABC): + """Abstract interface for registry storage implementations""" + + @abstractmethod + def save_aas_descriptor(self, descriptor: server_model.AssetAdministrationShellDescriptor) -> None: + """Save or update an AAS descriptor""" + pass + + @abstractmethod + def get_aas_descriptor(self, aas_id: str) -> Optional[server_model.AssetAdministrationShellDescriptor]: + """Retrieve an AAS descriptor by ID""" + pass + + @abstractmethod + def delete_aas_descriptor(self, aas_id: str) -> bool: + """Delete an AAS descriptor. Returns True if deleted, False if not found""" + pass + + @abstractmethod + def list_aas_descriptors(self) -> List[server_model.AssetAdministrationShellDescriptor]: + """List all AAS descriptors""" + pass + + @abstractmethod + def save_submodel_descriptor(self, descriptor: server_model.SubmodelDescriptor) -> None: + """Save or update a Submodel descriptor""" + pass + + @abstractmethod + def get_submodel_descriptor(self, submodel_id: str) -> Optional[server_model.SubmodelDescriptor]: + """Retrieve a Submodel descriptor by ID""" + pass + + @abstractmethod + def delete_submodel_descriptor(self, submodel_id: str) -> bool: + """Delete a Submodel descriptor. Returns True if deleted, False if not found""" + pass + + @abstractmethod + def list_submodel_descriptors(self) -> List[server_model.SubmodelDescriptor]: + """List all Submodel descriptors""" + pass + + +class JsonFileStorage(RegistryStorageInterface): + """ + File-based persistent storage using JSON. + Thread-safe with automatic saving. + """ + + def __init__(self, storage_path: str = "registry_storage.json"): + self.storage_path = Path(storage_path) + self.lock = threading.RLock() + self.data = { + "aas_descriptors": {}, + "submodel_descriptors": {} + } + self._load() + + def _load(self) -> None: + """Load data from file""" + if self.storage_path.exists(): + try: + with open(self.storage_path, 'r', encoding='utf-8') as f: + loaded_data = json.load(f) + self.data = loaded_data + logger.info(f"Loaded registry data from {self.storage_path}") + except Exception as e: + logger.error(f"Failed to load registry data: {e}") + # Keep empty data structure on error + + def _save(self) -> None: + """Save data to file""" + try: + # Write to temporary file first, then rename (atomic operation) + temp_path = self.storage_path.with_suffix('.tmp') + with open(temp_path, 'w', encoding='utf-8') as f: + json.dump(self.data, f, indent=2, ensure_ascii=False) + temp_path.replace(self.storage_path) + logger.debug(f"Saved registry data to {self.storage_path}") + except Exception as e: + logger.error(f"Failed to save registry data: {e}") + + def save_aas_descriptor(self, descriptor: server_model.AssetAdministrationShellDescriptor) -> None: + with self.lock: + # Convert descriptor to dict for JSON serialization + descriptor_dict = self._serialize_descriptor(descriptor) + aas_id = self._get_identifier(descriptor) + self.data["aas_descriptors"][aas_id] = descriptor_dict + self._save() + + def get_aas_descriptor(self, aas_id: str) -> Optional[server_model.AssetAdministrationShellDescriptor]: + with self.lock: + descriptor_dict = self.data["aas_descriptors"].get(aas_id) + if descriptor_dict: + return self._deserialize_descriptor(descriptor_dict, server_model.AssetAdministrationShellDescriptor) + return None + + def delete_aas_descriptor(self, aas_id: str) -> bool: + with self.lock: + if aas_id in self.data["aas_descriptors"]: + del self.data["aas_descriptors"][aas_id] + self._save() + return True + return False + + def list_aas_descriptors(self) -> List[server_model.AssetAdministrationShellDescriptor]: + with self.lock: + return [ + self._deserialize_descriptor(desc_dict, server_model.AssetAdministrationShellDescriptor) + for desc_dict in self.data["aas_descriptors"].values() + ] + + def save_submodel_descriptor(self, descriptor: server_model.SubmodelDescriptor) -> None: + with self.lock: + descriptor_dict = self._serialize_descriptor(descriptor) + submodel_id = self._get_identifier(descriptor) + self.data["submodel_descriptors"][submodel_id] = descriptor_dict + self._save() + + def get_submodel_descriptor(self, submodel_id: str) -> Optional[server_model.SubmodelDescriptor]: + with self.lock: + descriptor_dict = self.data["submodel_descriptors"].get(submodel_id) + if descriptor_dict: + return self._deserialize_descriptor(descriptor_dict, server_model.SubmodelDescriptor) + return None + + def delete_submodel_descriptor(self, submodel_id: str) -> bool: + with self.lock: + if submodel_id in self.data["submodel_descriptors"]: + del self.data["submodel_descriptors"][submodel_id] + self._save() + return True + return False + + def list_submodel_descriptors(self) -> List[server_model.SubmodelDescriptor]: + with self.lock: + return [ + self._deserialize_descriptor(desc_dict, server_model.SubmodelDescriptor) + for desc_dict in self.data["submodel_descriptors"].values() + ] + + def _serialize_descriptor(self, descriptor) -> dict: + """Convert descriptor object to JSON-serializable dict""" + # Use your ServerAASToJsonEncoder here + from server.app.adapter.jsonization import ServerAASToJsonEncoder + return json.loads(json.dumps(descriptor, cls=ServerAASToJsonEncoder)) + + def _deserialize_descriptor(self, descriptor_dict: dict, descriptor_class): + """Convert dict back to descriptor object""" + # Use your JSON deserialization logic here + from server.app.adapter.jsonization import AASFromJsonDecoder + # You'll need to adapt this based on your actual deserialization approach + return descriptor_class(**descriptor_dict) + + def _get_identifier(self, descriptor) -> str: + """Extract identifier from descriptor""" + if hasattr(descriptor, 'id'): + return str(descriptor.id) + elif hasattr(descriptor, 'identification'): + return str(descriptor.identification.id) + else: + raise ValueError(f"Cannot extract identifier from descriptor: {descriptor}") + + +class InMemoryStorage(RegistryStorageInterface): + """ + In-memory storage (no persistence). + Useful for testing and development. + """ + + def __init__(self): + self.aas_descriptors: Dict[str, server_model.AssetAdministrationShellDescriptor] = {} + self.submodel_descriptors: Dict[str, server_model.SubmodelDescriptor] = {} + self.lock = threading.RLock() + + def save_aas_descriptor(self, descriptor: server_model.AssetAdministrationShellDescriptor) -> None: + with self.lock: + aas_id = self._get_identifier(descriptor) + self.aas_descriptors[aas_id] = descriptor + + def get_aas_descriptor(self, aas_id: str) -> Optional[server_model.AssetAdministrationShellDescriptor]: + with self.lock: + return self.aas_descriptors.get(aas_id) + + def delete_aas_descriptor(self, aas_id: str) -> bool: + with self.lock: + if aas_id in self.aas_descriptors: + del self.aas_descriptors[aas_id] + return True + return False + + def list_aas_descriptors(self) -> List[server_model.AssetAdministrationShellDescriptor]: + with self.lock: + return list(self.aas_descriptors.values()) + + def save_submodel_descriptor(self, descriptor: server_model.SubmodelDescriptor) -> None: + with self.lock: + submodel_id = self._get_identifier(descriptor) + self.submodel_descriptors[submodel_id] = descriptor + + def get_submodel_descriptor(self, submodel_id: str) -> Optional[server_model.SubmodelDescriptor]: + with self.lock: + return self.submodel_descriptors.get(submodel_id) + + def delete_submodel_descriptor(self, submodel_id: str) -> bool: + with self.lock: + if submodel_id in self.submodel_descriptors: + del self.submodel_descriptors[submodel_id] + return True + return False + + def list_submodel_descriptors(self) -> List[server_model.SubmodelDescriptor]: + with self.lock: + return list(self.submodel_descriptors.values()) + + def _get_identifier(self, descriptor) -> str: + """Extract identifier from descriptor""" + if hasattr(descriptor, 'id'): + return str(descriptor.id) + elif hasattr(descriptor, 'identification'): + return str(descriptor.identification.id) + else: + raise ValueError(f"Cannot extract identifier from descriptor: {descriptor}") diff --git a/server/app/interfaces/discovery.py b/server/app/interfaces/discovery.py new file mode 100644 index 00000000..68684c7f --- /dev/null +++ b/server/app/interfaces/discovery.py @@ -0,0 +1,212 @@ +""" +This module implements the Discovery interface defined in the 'Specification of the Asset Administration Shell Part 2 – Application Programming Interface'. +""" + +import abc +from typing import Dict, List, Set, Any + +import werkzeug.exceptions +from pymongo import MongoClient +from pymongo.collection import Collection +from werkzeug.routing import Rule, Submount +from werkzeug.wrappers import Request, Response + +from basyx.aas import model +from app.util.converters import IdentifierToBase64URLConverter +from app.interfaces.base import BaseWSGIApp, HTTPApiDecoder +from app import model as server_model +from app.adapter.jsonization import ServerAASToJsonEncoder + +encoder=ServerAASToJsonEncoder() + +class AbstractDiscoveryStore(metaclass=abc.ABCMeta): + aas_id_to_asset_ids: Any + asset_id_to_aas_ids: Any + + @abc.abstractmethod + def __init__(self): + pass + + @abc.abstractmethod + def get_all_specific_asset_ids_by_aas_id(self, aas_id: model.Identifier) -> List[model.SpecificAssetId]: + pass + + @abc.abstractmethod + def add_specific_asset_ids_to_aas(self, aas_id: model.Identifier, asset_ids: List[model.SpecificAssetId]) -> None: + pass + + @abc.abstractmethod + def delete_specific_asset_ids_by_aas_id(self, aas_id: model.Identifier) -> None: + pass + + @abc.abstractmethod + def search_aas_ids_by_asset_link(self, asset_link: server_model.AssetLink) -> List[model.Identifier]: + pass + + @abc.abstractmethod + def _add_aas_id_to_specific_asset_id(self, asset_id: model.SpecificAssetId, aas_identifier: model.Identifier) -> None: + pass + + @abc.abstractmethod + def _delete_aas_id_from_specific_asset_ids(self, asset_id: model.SpecificAssetId, aas_id: model.Identifier) -> None: + pass + + + +class InMemoryDiscoveryStore(AbstractDiscoveryStore): + def __init__(self): + self.aas_id_to_asset_ids: Dict[model.Identifier, Set[model.SpecificAssetId]] = {} + self.asset_id_to_aas_ids: Dict[model.SpecificAssetId, Set[model.Identifier]] = {} + + def get_all_specific_asset_ids_by_aas_id(self, aas_id: model.Identifier) -> List[model.SpecificAssetId]: + return list(self.aas_id_to_asset_ids.get(aas_id, set())) + + def add_specific_asset_ids_to_aas(self, aas_id: model.Identifier, + asset_ids: List[model.SpecificAssetId]) -> None: + + if aas_id not in self.aas_id_to_asset_ids: + self.aas_id_to_asset_ids[aas_id] = set() + + for asset in asset_ids: + self.aas_id_to_asset_ids[aas_id].add(asset) + + def delete_specific_asset_ids_by_aas_id(self, aas_id: model.Identifier) -> None: + key = aas_id + if key in self.aas_id_to_asset_ids: + del self.aas_id_to_asset_ids[key] + + def search_aas_ids_by_asset_link(self, asset_link: server_model.AssetLink) -> List[model.Identifier]: + result = [] + for asset_key, aas_ids in self.asset_id_to_aas_ids.items(): + expected_key = f"{asset_link.name}:{asset_link.value}" + if asset_key == expected_key: + result.extend(list(aas_ids)) + return result + + def _add_aas_id_to_specific_asset_id(self, asset_id: model.SpecificAssetId, aas_id: model.Identifier) -> None: + if asset_id in self.asset_id_to_aas_ids: + self.asset_id_to_aas_ids[asset_id].add(aas_id) + else: + self.asset_id_to_aas_ids[asset_id] = {aas_id} + + def _delete_aas_id_from_specific_asset_ids(self, asset_id: model.SpecificAssetId, aas_id: model.Identifier) -> None: + if asset_id in self.asset_id_to_aas_ids: + self.asset_id_to_aas_ids[asset_id].discard(aas_id) + + + + +class MongoDiscoveryStore(AbstractDiscoveryStore): + def __init__(self, + uri: str = "mongodb://localhost:27017", + db_name: str = "basyx", + coll_aas_to_assets: str = "aas_to_assets", + coll_asset_to_aas: str = "asset_to_aas"): + self.client: MongoClient = MongoClient(uri) + self.db = self.client[db_name] + self.coll_aas_to_assets: Collection = self.db[coll_aas_to_assets] + self.coll_asset_to_aas: Collection = self.db[coll_asset_to_aas] + # Create an index for fast asset reverse lookups. + self.coll_asset_to_aas.create_index("_id") + + def get_all_specific_asset_ids_by_aas_id(self, aas_id: model.Identifier) -> List[model.SpecificAssetId]: + key = aas_id + doc = self.coll_aas_to_assets.find_one({"_id": key}) + return doc["asset_ids"] if doc and "asset_ids" in doc else [] + + def add_specific_asset_ids_to_aas(self, aas_id: model.Identifier, asset_ids: List[model.SpecificAssetId]) -> None: + key = aas_id + # Convert each SpecificAssetId using the serialization helper. + serializable_assets = [encoder.default(asset_id) for asset_id in asset_ids] + self.coll_aas_to_assets.update_one( + {"_id": key}, + {"$addToSet": {"asset_ids": {"$each": serializable_assets}}}, + upsert=True + ) + + def delete_specific_asset_ids_by_aas_id(self, aas_id: model.Identifier) -> None: + key = aas_id + self.coll_aas_to_assets.delete_one({"_id": key}) + + def search_aas_ids_by_asset_link(self, asset_link: server_model.AssetLink) -> List[model.Identifier]: + # Query MongoDB for specificAssetIds where 'name' and 'value' match + doc = self.coll_asset_to_aas.find_one({ + "name": asset_link.name, + "value": asset_link.value + }) + return doc["aas_ids"] if doc and "aas_ids" in doc else [] + + def _add_aas_id_to_specific_asset_id(self, asset_id: model.SpecificAssetId, aas_id: model.Identifier) -> None: + asset_key = str(encoder.default(asset_id)) + self.coll_asset_to_aas.update_one( + {"_id": asset_key}, + {"$addToSet": {"aas_ids": aas_id}}, + upsert=True + ) + + def _delete_aas_id_from_specific_asset_ids(self, asset_id: model.SpecificAssetId, aas_id: model.Identifier) -> None: + asset_key = str(encoder.default(asset_id)) + self.coll_asset_to_aas.update_one( + {"_id": asset_key}, + {"$pull": {"aas_ids": aas_id}} + ) + + +class DiscoveryAPI(BaseWSGIApp): + def __init__(self, + persistent_store: AbstractDiscoveryStore, base_path: str = "/api/v3.0"): + self.persistent_store: AbstractDiscoveryStore = persistent_store + self.url_map = werkzeug.routing.Map([ + Submount(base_path, [ + Rule("/lookup/shellsByAssetLink", methods=["POST"], + endpoint=self.search_all_aas_ids_by_asset_link), + Submount("/lookup/shells", [ + Rule("/", methods=["GET"], + endpoint=self.get_all_specific_asset_ids_by_aas_id), + Rule("/", methods=["POST"], + endpoint=self.post_all_asset_links_by_id), + Rule("/", methods=["DELETE"], + endpoint=self.delete_all_asset_links_by_id), + ]), + ]) + ], converters={ + "base64url": IdentifierToBase64URLConverter + }, strict_slashes=False) + + def search_all_aas_ids_by_asset_link(self, request: Request, url_args: dict, response_t: type, + **_kwargs) -> Response: + asset_links = HTTPApiDecoder.request_body_list(request, server_model.AssetLink, False) + matching_aas_keys = set() + for asset_link in asset_links: + aas_keys = self.persistent_store.search_aas_ids_by_asset_link(asset_link) + matching_aas_keys.update(aas_keys) + paginated_slice, cursor = self._get_slice(request, list(matching_aas_keys)) + return response_t(list(paginated_slice), cursor=cursor) + + def get_all_specific_asset_ids_by_aas_id(self, request: Request, url_args: dict, response_t: type, **_kwargs) -> Response: + aas_identifier = str(url_args["aas_id"]) + asset_ids = self.persistent_store.get_all_specific_asset_ids_by_aas_id(aas_identifier) + return response_t(asset_ids) + + def post_all_asset_links_by_id(self, request: Request, url_args: dict, response_t: type, **_kwargs) -> Response: + aas_identifier = str(url_args["aas_id"]) + specific_asset_ids = HTTPApiDecoder.request_body_list(request, model.SpecificAssetId, False) + self.persistent_store.add_specific_asset_ids_to_aas(aas_identifier, specific_asset_ids) + for asset_id in specific_asset_ids: + self.persistent_store._add_aas_id_to_specific_asset_id(asset_id, aas_identifier) + updated = {aas_identifier: self.persistent_store.get_all_specific_asset_ids_by_aas_id(aas_identifier)} + return response_t(updated) + + def delete_all_asset_links_by_id(self, request: Request, url_args: dict, response_t: type, **_kwargs) -> Response: + aas_identifier = str(url_args["aas_id"]) + self.persistent_store.delete_specific_asset_ids_by_aas_id(aas_identifier) + for key in list(self.persistent_store.asset_id_to_aas_ids.keys()): + self.persistent_store.asset_id_to_aas_ids[key].discard(aas_identifier) + return response_t() + + +if __name__ == "__main__": + from werkzeug.serving import run_simple + + run_simple("localhost", 8084, DiscoveryAPI(InMemoryDiscoveryStore()), + use_debugger=True, use_reloader=True) diff --git a/server/app/interfaces/provider.py b/server/app/interfaces/provider.py new file mode 100644 index 00000000..34f61044 --- /dev/null +++ b/server/app/interfaces/provider.py @@ -0,0 +1,176 @@ +import os +from typing import MutableSet, Type, Iterator, Iterable, Dict, Optional, Any + +from basyx.aas import model +import json + + + + +class DescriptorStore(MutableSet[object]): + """A simple in‑memory store for descriptor objects. + + The store uses a dictionary keyed by the descriptor's ``id`` attribute. + It enforces that at most one descriptor with a given identifier is + present. Membership checks can be performed either by descriptor + instance or by identifier string. Objects added to this store must + expose an ``id`` attribute. + """ + + def __init__(self, objects: Iterable[object] = ()) -> None: + self._backend: Dict[str, object] = {} + for obj in objects: + self.add(obj) + + def add(self, obj: object) -> None: + """Add a descriptor to the store. + + :param obj: The descriptor object to add. The object must have + an ``id`` attribute. If another descriptor with the same + identifier already exists, a :class:`KeyError` is raised. + """ + if not hasattr(obj, "id"): + raise TypeError("Objects stored in DescriptorStore must have an 'id' attribute") + identifier: model.Identifier = getattr(obj, "id") + if identifier in self._backend and self._backend[identifier] is not obj: + raise KeyError(f"Descriptor object with same id {identifier!r} is already stored") + self._backend[identifier] = obj + + def discard(self, obj: object) -> None: + """Remove a descriptor from the store if present. + + :param obj: The descriptor object to remove. If the object is not + present in the store, this method does nothing. + """ + if not hasattr(obj, "id"): + return + identifier: model.Identifier = getattr(obj, "id") # type: ignore[assignment] + if self._backend.get(identifier) is obj: + del self._backend[identifier] + + def __contains__(self, obj: object) -> bool: # type: ignore[override] + """Check whether a descriptor or identifier is contained in the store. + + :param obj: Either a descriptor instance or an identifier string. + :return: ``True`` if the object or identifier is present in the store. + """ + # Allow lookup by id string directly + if isinstance(obj, str): + return obj in self._backend + # Otherwise, ensure it's a descriptor and compare by identity + if not hasattr(obj, "id"): + return False + identifier: model.Identifier = getattr(obj, "id") # type: ignore[assignment] + return self._backend.get(identifier) is obj + + def __len__(self) -> int: # type: ignore[override] + return len(self._backend) + + def __iter__(self) -> Iterator[object]: # type: ignore[override] + return iter(self._backend.values()) + + # Additional helper methods + def get_descriptor(self, identifier: model.Identifier) -> object: + """Retrieve a descriptor by its identifier. + + :param identifier: The descriptor's identifier. + :return: The descriptor instance stored under the given identifier. + :raises KeyError: If no descriptor with the given identifier exists. + """ + return self._backend[identifier] + + def get(self, identifier: model.Identifier, default: Optional[object] = None) -> Optional[object]: + """Retrieve a descriptor by its identifier, returning a default if not found. + + :param identifier: The descriptor's identifier. + :param default: The value to return if the identifier is not present. + :return: The descriptor instance or ``default``. + """ + return self._backend.get(identifier, default) + + +class PersistentDescriptorStore(DescriptorStore): + """A descriptor store that persists its contents to a JSON file. + + The store writes out the entire collection of descriptors whenever + it is modified. At initialization time it attempts to read + existing descriptor data from the given file. The file on disk + will be created automatically if it does not exist. + """ + + def __init__(self, file_path: str, objects: Iterable[object] = ()) -> None: + #: Path to the backing JSON file. + self._file_path = file_path + # If a file exists, prepopulate the backend from it; otherwise start + # with an empty store. We intentionally bypass the DescriptorStore + # constructor here because we want to load from disk before adding + # any objects passed in via the ``objects`` iterable. + if os.path.isfile(self._file_path): + self._backend = self._load_from_file() + else: + self._backend = {} + # Add initial objects, if any; they will overwrite duplicates. + for obj in objects: + self.add(obj) + # Ensure the file exists and reflects the current state. + self._save_to_file() + + # -- Serialization helpers ------------------------------------------------ + + def _descriptor_to_dict(self, obj: object) -> Dict[str, Any]: + """Serialize a descriptor object into a JSON‑friendly dictionary. + + The default implementation uses ``vars(obj)`` (i.e., the object's + ``__dict__``) and filters out callables and private attributes. + Override this method if your descriptor classes require custom + serialization. + """ + return {k: v for k, v in vars(obj).items() if not callable(v) and not k.startswith("_")} + + def _dict_to_descriptor(self, data: Dict[str, Any]) -> object: + """Deserialize a JSON dictionary back into a descriptor object. + + This method instantiates either an + ``AssetAdministrationShellDescriptor`` or a ``SubmodelDescriptor`` + based on the presence of characteristic fields. It passes + all attributes except ``id`` to the descriptor constructor. + Override this method if your descriptor types or construction + signatures differ. + """ + # Lazy import to avoid heavy dependencies at module load time + from app.model.descriptor import ( + AssetAdministrationShellDescriptor, + SubmodelDescriptor, + ) + kwargs = {k: v for k, v in data.items() if k != "id"} + if "endpoints" in data and "asset_kind" in data: + return AssetAdministrationShellDescriptor(id_=data["id"], **kwargs) + else: + return SubmodelDescriptor(id_=data["id"], **kwargs) + + def _load_from_file(self) -> Dict[str, object]: + """Read descriptor data from the backing JSON file.""" + with open(self._file_path, "r", encoding="utf-8") as f: + raw_data: Dict[str, Dict[str, Any]] = json.load(f) + return {identifier: self._dict_to_descriptor(d) for identifier, d in raw_data.items()} + + def _save_to_file(self) -> None: + """Write the current collection of descriptors to the backing file.""" + raw_data: Dict[str, Dict[str, Any]] = { + identifier: self._descriptor_to_dict(obj) for identifier, obj in self._backend.items() + } + os.makedirs(os.path.dirname(self._file_path), exist_ok=True) + with open(self._file_path, "w", encoding="utf-8") as f: + json.dump(raw_data, f, indent=2) + + # -- Overrides of DescriptorStore methods --------------------------------- + + def add(self, obj: object) -> None: + """Add a descriptor and persist the updated store.""" + super().add(obj) + self._save_to_file() + + def discard(self, obj: object) -> None: + """Remove a descriptor, if present, and persist the updated store.""" + super().discard(obj) + self._save_to_file() diff --git a/server/app/interfaces/registry.py b/server/app/interfaces/registry.py new file mode 100644 index 00000000..49874a63 --- /dev/null +++ b/server/app/interfaces/registry.py @@ -0,0 +1,290 @@ +""" +This module implements the Registry interface defined in the 'Specification of the Asset Administration Shell Part 2 – Application Programming Interface'. +""" + +from typing import Dict, Iterator, Type, Tuple + +import werkzeug.exceptions +import werkzeug.routing +import werkzeug.urls +import werkzeug.utils +from werkzeug.exceptions import Conflict, NotFound, BadRequest +from werkzeug.routing import MapAdapter, Rule, Submount +from werkzeug.wrappers import Request, Response + +from basyx.aas import model +import app.model as server_model +from app.util.converters import IdentifierToBase64URLConverter, base64url_decode +from app.interfaces.base import ObjectStoreWSGIApp, APIResponse, is_stripped_request, HTTPApiDecoder + + +class RegistryAPI(ObjectStoreWSGIApp): + def __init__(self, object_store: model.AbstractObjectStore, base_path: str = "/api/v3.0"): + self.object_store: model.AbstractObjectStore = object_store + self.url_map = werkzeug.routing.Map([ + Submount(base_path, [ + Rule("/description", methods=["GET"], endpoint=self.get_self_description), + Rule("/shell-descriptors", methods=["GET"], endpoint=self.get_all_aas_descriptors), + Rule("/shell-descriptors", methods=["POST"], endpoint=self.post_aas_descriptor), + Submount("/shell-descriptors", [ + Rule("/", methods=["GET"], endpoint=self.get_aas_descriptor_by_id), + Rule("/", methods=["PUT"], endpoint=self.put_aas_descriptor_by_id), + Rule("/", methods=["DELETE"], endpoint=self.delete_aas_descriptor_by_id), + Submount("/", [ + Rule("/submodel-descriptors", methods=["GET"], + endpoint=self.get_all_submodel_descriptors_through_superpath), + Rule("/submodel-descriptors", methods=["POST"], + endpoint=self.post_submodel_descriptor_through_superpath), + Submount("/submodel-descriptors", [ + Rule("/", methods=["GET"], + endpoint=self.get_submodel_descriptor_by_id_through_superpath), + Rule("/", methods=["PUT"], + endpoint=self.put_submodel_descriptor_by_id_through_superpath), + Rule("/", methods=["DELETE"], + endpoint=self.delete_submodel_descriptor_by_id_through_superpath), + ]) + ]) + ]), + Rule("/submodel-descriptors", methods=["GET"], endpoint=self.get_all_submodel_descriptors), + Rule("/submodel-descriptors", methods=["POST"], endpoint=self.post_submodel_descriptor), + Submount("/submodel-descriptors", [ + Rule("/", methods=["GET"], endpoint=self.get_submodel_descriptor_by_id), + Rule("/", methods=["PUT"], endpoint=self.put_submodel_descriptor_by_id), + Rule("/", methods=["DELETE"], + endpoint=self.delete_submodel_descriptor_by_id), + ]) + ]) + ], converters={ + "base64url": IdentifierToBase64URLConverter + }, strict_slashes=False) + + def _get_all_aas_descriptors(self, request: "Request") -> Tuple[ + Iterator[server_model.AssetAdministrationShellDescriptor], int]: + + descriptors: Iterator[server_model.AssetAdministrationShellDescriptor] = self._get_all_obj_of_type( + server_model.AssetAdministrationShellDescriptor + ) + + asset_kind = request.args.get("assetKind") + if asset_kind is not None: + try: + asset_kind = model.AssetKind[asset_kind] + except KeyError: + raise BadRequest(f"Invalid assetKind '{asset_kind}', must be one of {list(model.AssetKind.__members__)}") + descriptors = filter( + lambda desc: desc.asset_kind == asset_kind, + descriptors + ) + + asset_type = request.args.get("assetType") + if asset_type is not None: + asset_type = base64url_decode(asset_type) + try: + asset_type = model.Identifier(asset_type) + except Exception: + raise BadRequest(f"Invalid assetType: '{asset_type}'") + descriptors = filter( + lambda desc: desc.asset_type == asset_type, + descriptors + ) + + paginated_descriptors, end_index = self._get_slice(request, descriptors) + return paginated_descriptors, end_index + + def _get_aas_descriptor(self, url_args: Dict) -> server_model.AssetAdministrationShellDescriptor: + return self._get_obj_ts(url_args["aas_id"], server_model.AssetAdministrationShellDescriptor) + + def _get_all_submodel_descriptors(self, request: Request) -> Tuple[Iterator[server_model.SubmodelDescriptor], int]: + submodel_descriptors: Iterator[server_model.SubmodelDescriptor] = self._get_all_obj_of_type(server_model.SubmodelDescriptor) + paginated_submodel_descriptors, end_index = self._get_slice(request, submodel_descriptors) + return paginated_submodel_descriptors, end_index + + def _get_submodel_descriptor(self, url_args: Dict) -> server_model.SubmodelDescriptor: + return self._get_obj_ts(url_args["submodel_id"], server_model.SubmodelDescriptor) + + # ------ COMMON ROUTES ------- + def get_self_description(self, request: Request, url_args: Dict, response_t: Type[APIResponse], + **_kwargs) -> Response: + service_description = server_model.ServiceDescription(profiles=[ + server_model.ServiceSpecificationProfileEnum.AAS_REGISTRY_FULL, + server_model.ServiceSpecificationProfileEnum.AAS_REGISTRY_READ, + server_model.ServiceSpecificationProfileEnum.SUBMODEL_REGISTRY_FULL, + server_model.ServiceSpecificationProfileEnum.SUBMODEL_REGISTRY_READ + ]) + return response_t(service_description.to_dict()) + + # ------ AAS REGISTRY ROUTES ------- + def get_all_aas_descriptors(self, request: Request, url_args: Dict, response_t: Type[APIResponse], + **_kwargs) -> Response: + aas_descriptors, cursor = self._get_all_aas_descriptors(request) + return response_t(list(aas_descriptors), cursor=cursor) + + def post_aas_descriptor(self, request: Request, url_args: Dict, response_t: Type[APIResponse], + map_adapter: MapAdapter) -> Response: + descriptor = HTTPApiDecoder.request_body(request, server_model.AssetAdministrationShellDescriptor, False) + try: + self.object_store.add(descriptor) + except KeyError as e: + raise Conflict(f"AssetAdministrationShellDescriptor with Identifier {descriptor.id} already exists!") from e + descriptor.commit() + created_resource_url = map_adapter.build(self.get_aas_descriptor_by_id, { + "aas_id": descriptor.id + }, force_external=True) + return response_t(descriptor, status=201, headers={"Location": created_resource_url}) + + def get_aas_descriptor_by_id(self, request: Request, url_args: Dict, response_t: Type[APIResponse], + **_kwargs) -> Response: + descriptor = self._get_aas_descriptor(url_args) + return response_t(descriptor) + + def put_aas_descriptor_by_id(self, request: Request, url_args: Dict, response_t: Type[APIResponse], + **_kwargs) -> Response: + descriptor = self._get_aas_descriptor(url_args) + descriptor.update_from(HTTPApiDecoder.request_body(request, server_model.AssetAdministrationShellDescriptor, + is_stripped_request(request))) + descriptor.commit() + return response_t() + + def delete_aas_descriptor_by_id(self, request: Request, url_args: Dict, response_t: Type[APIResponse], + **_kwargs) -> Response: + descriptor = self._get_aas_descriptor(url_args) + self.object_store.remove(descriptor) + return response_t() + + def get_all_submodel_descriptors_through_superpath(self, + request: Request, + url_args: Dict, + response_t: Type[ + APIResponse], + **_kwargs) -> Response: + aas_descriptor = self._get_aas_descriptor(url_args) + submodel_descriptors, cursor = self._get_slice(request, + aas_descriptor.submodel_descriptors) + return response_t(list(submodel_descriptors), cursor=cursor) + + def get_submodel_descriptor_by_id_through_superpath(self, + request: Request, + url_args: Dict, + response_t: + Type[ + APIResponse], + **_kwargs) -> Response: + aas_descriptor = self._get_aas_descriptor(url_args) + submodel_id = url_args["submodel_id"] + submodel_descriptor = next( + (sd for sd in aas_descriptor.submodel_descriptors if + sd.id == submodel_id), None) + if submodel_descriptor is None: + raise NotFound( + f"Submodel Descriptor with Identifier {submodel_id} not found in AssetAdministrationShell!") + return response_t(submodel_descriptor) + + def post_submodel_descriptor_through_superpath(self, + request: Request, + url_args: Dict, + response_t: Type[ + APIResponse], + map_adapter: MapAdapter) -> Response: + aas_descriptor = self._get_aas_descriptor(url_args) + submodel_descriptor = HTTPApiDecoder.request_body(request, + server_model.SubmodelDescriptor, + is_stripped_request( + request)) + if any(sd.id == submodel_descriptor.id for sd in + aas_descriptor.submodel_descriptors): + raise Conflict( + f"Submodel Descriptor with Identifier {submodel_descriptor.id} already exists!") + aas_descriptor.submodel_descriptors.append(submodel_descriptor) + aas_descriptor.commit() + created_resource_url = map_adapter.build( + self.get_submodel_descriptor_by_id_through_superpath, { + "aas_id": aas_descriptor.id, + "submodel_id": submodel_descriptor.id + }, force_external=True) + return response_t(submodel_descriptor, status=201, + headers={"Location": created_resource_url}) + + def put_submodel_descriptor_by_id_through_superpath(self, + request: Request, + url_args: Dict, + response_t: + Type[ + APIResponse], + **_kwargs) -> Response: + aas_descriptor = self._get_aas_descriptor(url_args) + submodel_id = url_args["submodel_id"] + submodel_descriptor = next( + (sd for sd in aas_descriptor.submodel_descriptors if + sd.id == submodel_id), None) + if submodel_descriptor is None: + raise NotFound( + f"Submodel Descriptor with Identifier {submodel_id} not found in AssetAdministrationShell!") + submodel_descriptor.update_from( + HTTPApiDecoder.request_body(request, + server_model.SubmodelDescriptor, + is_stripped_request(request))) + aas_descriptor.commit() + return response_t() + + def delete_submodel_descriptor_by_id_through_superpath(self, + request: Request, + url_args: Dict, + response_t: + Type[ + APIResponse], + **_kwargs) -> Response: + aas_descriptor = self._get_aas_descriptor(url_args) + submodel_id = url_args["submodel_id"] + submodel_descriptor = next( + (sd for sd in aas_descriptor.submodel_descriptors if sd.id == submodel_id), None) + if submodel_descriptor is None: + raise NotFound(f"Submodel Descriptor with Identifier {submodel_id} not found in AssetAdministrationShell!") + aas_descriptor.submodel_descriptors.remove(submodel_descriptor) + aas_descriptor.commit() + return response_t() + + # ------ Submodel REGISTRY ROUTES ------- + def get_all_submodel_descriptors(self, request: Request, url_args: Dict, response_t: Type[APIResponse], + **_kwargs) -> Response: + submodel_descriptors, cursor = self._get_all_submodel_descriptors(request) + return response_t(list(submodel_descriptors), cursor=cursor, stripped=is_stripped_request(request)) + + def get_submodel_descriptor_by_id(self, request: Request, url_args: Dict, response_t: Type[APIResponse], + **_kwargs) -> Response: + submodel_descriptor = self._get_submodel_descriptor(url_args) + return response_t(submodel_descriptor, stripped=is_stripped_request(request)) + + def post_submodel_descriptor(self, request: Request, url_args: Dict, response_t: Type[APIResponse], + map_adapter: MapAdapter) -> Response: + submodel_descriptor = HTTPApiDecoder.request_body(request, server_model.SubmodelDescriptor, + is_stripped_request(request)) + try: + self.object_store.add(submodel_descriptor) + except KeyError as e: + raise Conflict(f"Submodel Descriptor with Identifier {submodel_descriptor.id} already exists!") from e + submodel_descriptor.commit() + created_resource_url = map_adapter.build(self.get_submodel_descriptor_by_id, { + "submodel_id": submodel_descriptor.id + }, force_external=True) + return response_t(submodel_descriptor, status=201, headers={"Location": created_resource_url}) + + def put_submodel_descriptor_by_id(self, request: Request, url_args: Dict, response_t: Type[APIResponse], + **_kwargs) -> Response: + submodel_descriptor = self._get_submodel_descriptor(url_args) + submodel_descriptor.update_from( + HTTPApiDecoder.request_body(request, server_model.SubmodelDescriptor, is_stripped_request(request))) + submodel_descriptor.commit() + return response_t() + + def delete_submodel_descriptor_by_id(self, request: Request, url_args: Dict, response_t: Type[APIResponse], + **_kwargs) -> Response: + self.object_store.remove(self._get_obj_ts(url_args["submodel_id"], server_model.SubmodelDescriptor)) + return response_t() + + +if __name__ == "__main__": + from werkzeug.serving import run_simple + from basyx.aas.examples.data.example_aas import create_full_example + + run_simple("localhost", 8083, RegistryAPI(create_full_example()), + use_debugger=True, use_reloader=True) diff --git a/server/app/interfaces/repository.py b/server/app/interfaces/repository.py index c1ee513e..935b481a 100644 --- a/server/app/interfaces/repository.py +++ b/server/app/interfaces/repository.py @@ -167,14 +167,14 @@ def __call__(self, environ, start_response) -> Iterable[bytes]: response: Response = self.handle_request(Request(environ)) return response(environ, start_response) - def _get_obj_ts(self, identifier: model.Identifier, type_: Type[model.provider._IDENTIFIABLE]) \ - -> model.provider._IDENTIFIABLE: + def _get_obj_ts(self, identifier: model.Identifier, type_: Type[model.provider._IT]) \ + -> model.provider._IT: identifiable = self.object_store.get(identifier) if not isinstance(identifiable, type_): raise NotFound(f"No {type_.__name__} with {identifier} found!") return identifiable - def _get_all_obj_of_type(self, type_: Type[model.provider._IDENTIFIABLE]) -> Iterator[model.provider._IDENTIFIABLE]: + def _get_all_obj_of_type(self, type_: Type[model.provider._IT]) -> Iterator[model.provider._IT]: for obj in self.object_store: if isinstance(obj, type_): yield obj diff --git a/server/app/model/__init__.py b/server/app/model/__init__.py new file mode 100644 index 00000000..79b36352 --- /dev/null +++ b/server/app/model/__init__.py @@ -0,0 +1,3 @@ +from .descriptor import * +from .endpoint import * +from .service_specification import * diff --git a/server/app/model/descriptor.py b/server/app/model/descriptor.py new file mode 100644 index 00000000..0edb2448 --- /dev/null +++ b/server/app/model/descriptor.py @@ -0,0 +1,109 @@ +from __future__ import absolute_import + +import abc +from typing import Optional, Iterable, List + +from basyx.aas import model +from app.model.endpoint import Endpoint + + +class Descriptor(model.HasExtension, metaclass=abc.ABCMeta): + @abc.abstractmethod + def __init__(self, description: Optional[model.MultiLanguageTextType] = None, + display_name: Optional[model.MultiLanguageNameType] = None, extension: Iterable[model.Extension] = ()): + super().__init__() + self.description: Optional[model.MultiLanguageTextType] = description + self.display_name: Optional[model.MultiLanguageNameType] = display_name + self.extension = model.NamespaceSet(self, [("name", True)], extension) + + def commit(self): + pass + + def update(self): + pass + + def update_from(self, other: "Descriptor", update_source: bool = False): + """ + Updates the descriptor's attributes from another descriptor. + + :param other: The descriptor to update from. + :param update_source: Placeholder for compatibility; not used in this context. + """ + for attr in vars(other): + if attr == "id": + continue # Skip updating the unique identifier of the AAS + setattr(self, attr, getattr(other, attr)) + + +class SubmodelDescriptor(Descriptor): + + def __init__(self, id_: model.Identifier, endpoints: List[Endpoint], + administration: Optional[model.AdministrativeInformation] = None, + id_short: Optional[model.NameType] = None, semantic_id: Optional[model.Reference] = None, + supplemental_semantic_id: Iterable[model.Reference] = ()): + super().__init__() + self.id: model.Identifier = id_ + self.endpoints: List[Endpoint] = endpoints + self.administration: Optional[model.AdministrativeInformation] = administration + self.id_short: Optional[model.NameType] = id_short + self.semantic_id: Optional[model.Reference] = semantic_id + self.supplemental_semantic_id: model.ConstrainedList[model.Reference] = \ + model.ConstrainedList(supplemental_semantic_id) + + +class AssetAdministrationShellDescriptor(Descriptor): + + def __init__(self, + id_: model.Identifier, + administration: Optional[model.AdministrativeInformation] = None, + asset_kind: Optional[model.AssetKind] = None, + asset_type: Optional[model.Identifier] = None, + endpoints: Optional[List[Endpoint]] = None, + global_asset_id: Optional[model.Identifier] = None, + id_short: Optional[model.NameType] = None, + specific_asset_id: Iterable[model.SpecificAssetId] = (), + submodel_descriptors: Optional[List[SubmodelDescriptor]] = None, + description: Optional[model.MultiLanguageTextType] = None, + display_name: Optional[model.MultiLanguageNameType] = None, + extension: Iterable[model.Extension] = ()): + """AssetAdministrationShellDescriptor - + + Nur das 'id'-Feld (id_) ist zwingend erforderlich. Alle anderen Felder erhalten Defaultwerte. + """ + super().__init__() + self.administration: Optional[model.AdministrativeInformation] = administration + self.asset_kind: Optional[model.AssetKind] = asset_kind + self.asset_type: Optional[model.Identifier] = asset_type + self.endpoints: Optional[ + List[Endpoint]] = endpoints if endpoints is not None else [] # leere Liste, falls nicht gesetzt + self.global_asset_id: Optional[model.Identifier] = global_asset_id + self.id_short: Optional[model.NameType] = id_short + self.id: model.Identifier = id_ + self._specific_asset_id: model.ConstrainedList[model.SpecificAssetId] = model.ConstrainedList( + specific_asset_id, + item_set_hook=self._check_constraint_set_spec_asset_id, + item_del_hook=self._check_constraint_del_spec_asset_id + ) + self.submodel_descriptors = submodel_descriptors if submodel_descriptors is not None else [] + self.description: Optional[model.MultiLanguageTextType] = description + self.display_name: Optional[model.MultiLanguageNameType] = display_name + self.extension = model.NamespaceSet(self, [("name", True)], extension) + + @property + def specific_asset_id(self) -> model.ConstrainedList[model.SpecificAssetId]: + return self._specific_asset_id + + @specific_asset_id.setter + def specific_asset_id(self, specific_asset_id: Iterable[model.SpecificAssetId]) -> None: + # constraints are checked via _check_constraint_set_spec_asset_id() in this case + self._specific_asset_id[:] = specific_asset_id + + def _check_constraint_set_spec_asset_id(self, items_to_replace: List[model.SpecificAssetId], + new_items: List[model.SpecificAssetId], + old_list: List[model.SpecificAssetId]) -> None: + model.AssetInformation._validate_aasd_131(self.global_asset_id, + len(old_list) - len(items_to_replace) + len(new_items) > 0) + + def _check_constraint_del_spec_asset_id(self, _item_to_del: model.SpecificAssetId, + old_list: List[model.SpecificAssetId]) -> None: + model.AssetInformation._validate_aasd_131(self.global_asset_id, len(old_list) > 1) diff --git a/server/app/model/endpoint.py b/server/app/model/endpoint.py new file mode 100644 index 00000000..e7cc0e59 --- /dev/null +++ b/server/app/model/endpoint.py @@ -0,0 +1,110 @@ +from __future__ import absolute_import + +import re +from enum import Enum +from typing import Optional, List + +from basyx.aas.model import base + + +class AssetLink: + def __init__(self, name: base.LabelType, value: base.Identifier): + if not name: + raise ValueError("AssetLink 'name' must be a non-empty string.") + if not value: + raise ValueError("AssetLink 'value' must be a non-empty string.") + self.name = name + self.value = value + + +class SecurityTypeEnum(Enum): + NONE = "NONE" + RFC_TLSA = "RFC_TLSA" + W3C_DID = "W3C_DID" + + +class SecurityAttributeObject: + def __init__(self, type_: SecurityTypeEnum, key: str, value: str): + + if not isinstance(type_, SecurityTypeEnum): + raise ValueError(f"Invalid security type: {type_}. Must be one of {list(SecurityTypeEnum)}") + if not key or not isinstance(key, str): + raise ValueError("Key must be a non-empty string.") + if not value or not isinstance(value, str): + raise ValueError("Value must be a non-empty string.") + self.type = type_ + self.key = key + self.value = value + + +class ProtocolInformation: + + def __init__( + self, + href: str, + endpoint_protocol: Optional[str] = None, + endpoint_protocol_version: Optional[List[str]] = None, + subprotocol: Optional[str] = None, + subprotocol_body: Optional[str] = None, + subprotocol_body_encoding: Optional[str] = None, + security_attributes: Optional[List[SecurityAttributeObject]] = None + ): + if not href or not isinstance(href, str): + raise ValueError("href must be a non-empty string representing a valid URL.") + + self.href = href + self.endpoint_protocol = endpoint_protocol + self.endpoint_protocol_version = endpoint_protocol_version or [] + self.subprotocol = subprotocol + self.subprotocol_body = subprotocol_body + self.subprotocol_body_encoding = subprotocol_body_encoding + self.security_attributes = security_attributes or [] + + +class Endpoint: + INTERFACE_SHORTNAMES = { + "AAS", "SUBMODEL", "SERIALIZE", "AASX-FILE", "AAS-REGISTRY", + "SUBMODEL-REGISTRY", "AAS-REPOSITORY", "SUBMODEL-REPOSITORY", + "CD-REPOSITORY", "AAS-DISCOVERY" + } + VERSION_PATTERN = re.compile(r"^\d+(\.\d+)*$") + + def __init__(self, interface: base.NameType, protocol_information: ProtocolInformation): # noqa: E501 + + self.interface = interface + self.protocol_information = protocol_information + + @property + def interface(self) -> str: + return self._interface + + @interface.setter + def interface(self, interface: base.NameType): + if interface is None: + raise ValueError("Invalid value for `interface`, must not be `None`") + if not self.is_valid_interface(interface): + raise ValueError(f"Invalid interface format: {interface}. Expected format: '-', ") + + self._interface = interface + + @classmethod + def is_valid_interface(cls, interface: base.NameType) -> bool: + parts = interface.split("-", 1) + if len(parts) != 2: + return False + short_name, version = parts + if short_name in cls.INTERFACE_SHORTNAMES and cls.VERSION_PATTERN.match(version): + return True + else: + return False + + @property + def protocol_information(self) -> ProtocolInformation: + return self._protocol_information + + @protocol_information.setter + def protocol_information(self, protocol_information: ProtocolInformation): + if protocol_information is None: + raise ValueError("Invalid value for `protocol_information`, must not be `None`") # noqa: E501 + + self._protocol_information = protocol_information diff --git a/server/app/model/service_specification.py b/server/app/model/service_specification.py new file mode 100644 index 00000000..39bc3dc0 --- /dev/null +++ b/server/app/model/service_specification.py @@ -0,0 +1,21 @@ +from typing import List +from enum import Enum + +class ServiceSpecificationProfileEnum(str, Enum): + AAS_REGISTRY_FULL = "https://adminshell.io/aas/API/3/0/AssetAdministrationShellRegistryServiceSpecification/SSP-001" + AAS_REGISTRY_READ = "https://adminshell.io/aas/API/3/0/AssetAdministrationShellRegistryServiceSpecification/SSP-002" + SUBMODEL_REGISTRY_FULL = "https://adminshell.io/aas/API/3/0/SubmodelRegistryServiceSpecification/SSP-001" + SUBMODEL_REGISTRY_READ = "https://adminshell.io/aas/API/3/0/SubmodelRegistryServiceSpecification/SSP-002" + #TODO add other profiles + + +class ServiceDescription: + def __init__(self, profiles: List[ServiceSpecificationProfileEnum]): + if not profiles: + raise ValueError("At least one profile must be specified") + self.profiles = profiles + + def to_dict(self): + return { + "profiles": [p.value for p in self.profiles] + } \ No newline at end of file diff --git a/server/pyproject.toml b/server/pyproject.toml index 8c40deff..cbcfcf6b 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -38,6 +38,7 @@ requires-python = ">=3.10" dependencies = [ "urllib3>=1.26,<3", "Werkzeug>=3.0.3,<4", + "pymongo>=4.16.0", ] [project.optional-dependencies]