diff --git a/.gitignore b/.gitignore index d1087db..0c963ca 100644 --- a/.gitignore +++ b/.gitignore @@ -195,12 +195,18 @@ agents/matlab/dist/ **/resources/*.yml **/matlab/*.yaml **/matlab/*.yml +**/simul8/*.yml +**/simul8/*.yaml config*.yaml simulation*.yaml *.csv interactive.py *_interactive.py **/examples/interactive-simulation/* +# SIMUL8 agent +agents/simul8/client +agents/simul8/logs + #SIMULATION BRIDGE /logs /certs diff --git a/agents/simul8/.coveragerc b/agents/simul8/.coveragerc new file mode 100644 index 0000000..f847b69 --- /dev/null +++ b/agents/simul8/.coveragerc @@ -0,0 +1,13 @@ +[run] +omit = + */tests/* + */venv/* + */.venv/* + */__init__.py + */simul8_agent/docs/* + */simul8_agent/resources/* + +[report] +exclude_lines = + if __name__ == .__main__.: + pragma: no cover \ No newline at end of file diff --git a/agents/simul8/.pep8 b/agents/simul8/.pep8 new file mode 100644 index 0000000..50f451a --- /dev/null +++ b/agents/simul8/.pep8 @@ -0,0 +1,2 @@ +[pycodestyle] +max_line_length = 80 diff --git a/agents/simul8/.pylintrc b/agents/simul8/.pylintrc new file mode 100644 index 0000000..8c8f96a --- /dev/null +++ b/agents/simul8/.pylintrc @@ -0,0 +1,588 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold to be exceeded before program exits with error. +fail-under=10.0 + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the ignore-list. The +# regex matches against paths and can be in Posix or Windows format. +ignore-paths=simul8_agent/.venv + +# Files or directories matching the regex patterns are skipped. The regex +# matches against base names, not paths. The default value ignores emacs file +# locks +ignore-patterns=docs|venv|.venv|resources + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +init-hook='import sys; sys.path.append("src")' + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.10 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + missing-function-docstring, + missing-module-docstring, + missing-class-docstring + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# class is considered mixin if its name matches the mixin-class-rgx option. +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins ignore-mixin- +# members is set to 'yes' +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[BASIC] + +# Naming style matching correct argument names. +#argument-naming-style=camelCase + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=camelCase + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +#function-naming-style=camelCase + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +#method-naming-style=camelCase + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +#module-naming-style=camelCase + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +#variable-naming-style=camelCase + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it work, +# install the 'python-enchant' package. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear and the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +#notes-rgx= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=no + +# Signatures are removed from the similarity computation +ignore-signatures=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=1 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=BaseException, + Exception \ No newline at end of file diff --git a/agents/simul8/README.md b/agents/simul8/README.md new file mode 100644 index 0000000..fc4f245 --- /dev/null +++ b/agents/simul8/README.md @@ -0,0 +1,329 @@ +# Simul8 Agent + +The Simul8 Agent is a Python-based connector designed to interface with Simul8 simulations through a singular method. + +- **Batch Simulation**: Executes predefined Simul8 Simulation with specified input parameters, collecting the final results upon completion. + +The Simul8Agent +The Simul8 Agent is primarily built to integrate with the Simulation Bridge but can also be utilized by external systems via RabbitMQ exchange methods. Communication parameters and other settings must be defined in the YAML-based configuration file. + +
+ Simul8 Agent Structure +
+ +## Table of Contents + +- [Simul8 agent](#simul8-agent) + - [Table of Contents](#table-of-contents) + - [Demo Video](#demo-video) + - [Requirements](#requirements) + - [Installation](#installation) + - [1. Clone the Repository and Navigate to the Working Directory](#1-clone-the-repository-and-navigate-to-the-working-directory) + - [2. Install Poetry and Create Virtual Environment](#2-install-poetry-and-create-virtual-environment) + - [3. Install Project Dependencies](#3-install-project-dependencies) + - [Configuration](#configuration) + - [Usage](#usage) + - [Getting Started](#getting-started) + - [Running the Agent](#running-the-agent) + - [Distributing the Package as a PIP Package with Poetry](#distributing-the-package-as-a-pip-package-with-poetry) + - [Verifying the Package (Optional but Recommended)](#verifying-the-package-optional-but-recommended) + - [Releasing a New Version](#releasing-a-new-version) + - [Demonstration](#demonstration) + - [Quick Start: Interacting with the Simul8 Agent](#quick-start-interacting-with-the-simul8-agent) + - [Workflow](#workflow) + - [Package Development](#package-development) + - [Author](#author) + +## Demo Video + +For a comprehensive demonstration of the Simul8 Agent in action, you can: + +- [Watch the full video (MP4 format)](simul8_agent/images/demo-simul8-edited.mp4) + +Or view a quick preview below: + +

+ Simul8 Agent Demo Preview +

+ +

A video demonstration of the Simul8 Agent in action

+ +## Requirements + +### Installation + +#### 1. Clone the Repository and Navigate to the Working Directory + +```bash +git clone https://github.com/INTO-CPS-Association/simulation-bridge.git +cd simulation-bridge +``` + +#### 2. Install Poetry and Create Virtual Environment + +Ensure that Poetry is installed on your system. If it is not already installed, execute the following commands: + +```bash +python3 -m pip install --user pipx +python3 -m pipx ensurepath +pipx install poetry +``` + +Verify the installation by checking the Poetry version: + +```bash +poetry --version +``` + +Activate the virtual environment: + +```bash +poetry env activate +``` + +> **Important:** +> The command `poetry env activate` does not automatically activate the virtual environment; instead, it prints the command you need to run to activate it. +> You must copy and paste the displayed command, for example: + +```bash +source /path/to/virtualenv/bin/activate +``` + +Verify that the environment is active by checking the Python path: + +```bash +which python +``` + +#### 3. Install Project Dependencies + +Run the following command to install all dependencies defined in `pyproject.toml`: + +```bash +poetry install +``` + +### Configuration + +The configuration is specified in yaml format. A template file (`simul8_agent/config/config.yaml.template`) has been provided. It can be customized further. + +Explanation on different fields of the yaml template is given below. + +```yaml +agent: + agent_id: simul8 # Specifies the unique identifier for the agent. This ID is used to distinguish the agent in the system. + simulator: simul8 # Specifies the name of the simulator + +rabbitmq: + host: localhost # The hostname or IP address of the RabbitMQ server. + port: 5672 # The port number for RabbitMQ communication (default is 5672). + username: guest # The username for authenticating with RabbitMQ. + password: guest # The password for authenticating with RabbitMQ. + heartbeat: 600 # The heartbeat interval (in seconds) to keep the connection alive. + vhost: / # The virtual host to use for RabbitMQ connections. + +simulation: + path: /Users/foo/simulation-bridge/agents/simul8/simul8_agent/docs/examples # The file path to the folder containing simul8 simulation files. + +exchanges: + input: ex.bridge.output # The RabbitMQ exchange from which the agent receives commands. + output: ex.sim.result # The RabbitMQ exchange to which the agent sends simulation results. + +queue: + durable: true # Ensures that the queue persists across RabbitMQ broker restarts. + prefetch_count: 1 # Limits the number of unacknowledged messages the agent can receive at a time. + +logging: + level: INFO # Specifies the logging level. Options include DEBUG, INFO, and ERROR. + file: logs/simul8_agent.log # The file path where logs will be stored. + +tcp: + host: localhost # The hostname or IP address for TCP communication. + port: 5678 # The port number for TCP communication. + +response_templates: + success: + status: success # Indicates a successful simulation response. + simulation: + type: batch # Specifies the type of simulation (e.g., batch or streaming). + timestamp_format: "%Y-%m-%dT%H:%M:%SZ" # The timestamp format in ISO 8601 with a Z suffix for UTC. + include_metadata: true # Determines whether metadata is included in the response. + metadata_fields: # Specifies the metadata fields to include in the response. + - execution_time + - memory_usage + - simul8_version + + error: + status: error # Indicates an error response. + include_stacktrace: false # For security, stack traces are excluded in production environments. + error_codes: # Maps specific error scenarios to HTTP-like status codes. + invalid_config: 400 # Error code for invalid configuration. + simul8_start_failure: 500 # Error code for simul8 startup failure. + execution_error: 500 # Error code for simulation execution errors. + timeout: 504 # Error code for simulation timeout. + missing_file: 404 # Error code for missing files. + + timestamp_format: "%Y-%m-%dT%H:%M:%SZ" # The timestamp format in ISO 8601 with a Z suffix for UTC. + + progress: + status: in_progress # Indicates that the simulation is currently in progress. + include_percentage: true # Includes the percentage of completion in progress updates. + update_interval: 5 # Specifies the interval (in seconds) for sending progress updates. + timestamp_format: "%Y-%m-%dT%H:%M:%SZ" # The timestamp format in ISO 8601 with a Z suffix for UTC. +``` + +## Usage + +The agent requires a configuration file to run. You can start by copying the provided template and customizing it as needed. + +### Getting Started + +**Generate a configuration file template:** + +```bash +poetry run simul8-agent --generate-config +``` + +This command creates a `config.yaml` file in your current directory. If the file already exists, it will not be overwritten. + +**Generate Project Files:** + +To create a complete set of template files for your Simul8 agent project: + +```bash +poetry run simul8-agent --generate-project +``` + +This command creates the following structure in your current directory (existing files won't be overwritten): + +``` +. +├── config.yaml # Agent configuration settings +├── SimulationBatch.ms8 # Template for batch simulations +``` + +Each template file contains documentation and can be customized for your specific simulation requirements. + +### Running the Agent + +To start the Simul8 Agent with the default configuration: + +```bash +poetry run simul8-agent +``` + +To use a custom configuration file: + +```bash +poetry run simul8-agent --config-file +``` + +Or use the shorthand: + +```bash +poetry run simul8-agent -c +``` + +## Distributing the Package as a PIP Package with Poetry + +To create the package, run the following command in the project's root directory (where `pyproject.toml` is located): + +```bash +poetry build +``` + +This will generate two files in the `dist/` folder: + +- A `.whl` file → (Wheel Package). +- A `.tar.gz` file → (Source Archive). + +Example output: + +```bash +dist/ +├── simul8_agent-0.2.0-py3-none-any.whl +└── simul8_agent-0.2.0.tar.gz +``` + +### Verifying the Package (Optional but Recommended) + +You can verify that the package works by installing it locally: + +```bash +pip install dist/simul8_agent-0.2.0-py3-none-any.whl +``` + +Then, run the command defined in the script: + +```bash +simul8-agent +``` + +### Releasing a New Version + +When you modify the code and want to release a new version, increment the version number in `pyproject.toml`: + +```toml +version = "0.3.0" +``` + +Then rebuild the package: + +```bash +poetry build +``` + +## Demonstration + +For instructions on running tests created with `pytest` and `unittest.mock`, please refer to the [Tests Documentation](simul8_agent/tests/README.md). + +## Quick Start: Interacting with the Simul8 Agent + +To quickly get started, generate the default project structure by running: + +```bash +poetry run simul8-agent --generate-project +``` + +This will create a `client/` directory in the root of your project containing all necessary files for interaction. + +Next, move into the client directory: + +```bash +cd client +``` + +Inside this folder, you'll find: + +- `use.yaml` — Configuration file for the communication protocol (e.g., RabbitMQ settings) +- `simulation.yaml` — The simulation request payload that will be sent to the Simul8 Agent +- `use_simul8_agent.py` — Python script to send the request and receive the results + +For detailed instructions on how to configure and use the client, refer to the [Use Simul8 Agent](./simul8_agent/resources/README.md) in the `agents/simul8/simul8_agent/resources/` folder. + +## Workflow + +1. The agent connects to RabbitMQ and sets up the required queues and exchanges. +2. It listens for incoming messages on its dedicated queue. +3. Upon receiving a message: + +- It analyzes and processes the simulation request. +- Executes the simulation. +- Sends the results to the output exchange. + +For detailed information regarding simulations and constraints, please refer to the [Simulations and Constraints Documentation](simul8_agent/docs/README.md). + +## Package Development + +The developer-specific commands are + +```bash +pytest +pylint simul8_agent +autopep8 --in-place --aggressive --recursive 'simul8_agent' +``` + +## Author + +

Marco Melloni

Digital Automation Engineering Student
University of Modena and Reggio Emilia, Department of Sciences and Methods for Engineering (DISMI)

Prof. Marco Picone

Associate Professor
University of Modena and Reggio Emilia, Department of Sciences and Methods for Engineering (DISMI)

Dr. Prasad Talasila

Postdoctoral Researcher
Aarhus University

+

Rasmus Carlsen

Computer Engineering Student
Aarhus University

diff --git a/agents/simul8/poetry.lock b/agents/simul8/poetry.lock new file mode 100644 index 0000000..ab8756c --- /dev/null +++ b/agents/simul8/poetry.lock @@ -0,0 +1,753 @@ +# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "astroid" +version = "3.3.10" +description = "An abstract syntax tree for Python with inference support." +optional = false +python-versions = ">=3.9.0" +groups = ["dev"] +files = [ + {file = "astroid-3.3.10-py3-none-any.whl", hash = "sha256:104fb9cb9b27ea95e847a94c003be03a9e039334a8ebca5ee27dafaf5c5711eb"}, + {file = "astroid-3.3.10.tar.gz", hash = "sha256:c332157953060c6deb9caa57303ae0d20b0fbdb2e59b4a4f2a6ba49d0a7961ce"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4", markers = "python_version < \"3.11\""} + +[[package]] +name = "autopep8" +version = "2.3.2" +description = "A tool that automatically formats Python code to conform to the PEP 8 style guide" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "autopep8-2.3.2-py2.py3-none-any.whl", hash = "sha256:ce8ad498672c845a0c3de2629c15b635ec2b05ef8177a6e7c91c74f3e9b51128"}, + {file = "autopep8-2.3.2.tar.gz", hash = "sha256:89440a4f969197b69a995e4ce0661b031f455a9f776d2c5ba3dbd83466931758"}, +] + +[package.dependencies] +pycodestyle = ">=2.12.0" +tomli = {version = "*", markers = "python_version < \"3.11\""} + +[[package]] +name = "click" +version = "8.2.0" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "click-8.2.0-py3-none-any.whl", hash = "sha256:6b303f0b2aa85f1cb4e5303078fadcbcd4e476f114fab9b5007005711839325c"}, + {file = "click-8.2.0.tar.gz", hash = "sha256:f5452aeddd9988eefa20f90f05ab66f17fce1ee2a36907fd30b05bbb5953814d"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] +markers = {main = "platform_system == \"Windows\" or sys_platform == \"win32\"", dev = "sys_platform == \"win32\""} + +[[package]] +name = "colorlog" +version = "6.9.0" +description = "Add colours to the output of Python's logging module." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "colorlog-6.9.0-py3-none-any.whl", hash = "sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff"}, + {file = "colorlog-6.9.0.tar.gz", hash = "sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} + +[package.extras] +development = ["black", "flake8", "mypy", "pytest", "types-colorama"] + +[[package]] +name = "coverage" +version = "7.8.0" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "coverage-7.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2931f66991175369859b5fd58529cd4b73582461877ecfd859b6549869287ffe"}, + {file = "coverage-7.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:52a523153c568d2c0ef8826f6cc23031dc86cffb8c6aeab92c4ff776e7951b28"}, + {file = "coverage-7.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c8a5c139aae4c35cbd7cadca1df02ea8cf28a911534fc1b0456acb0b14234f3"}, + {file = "coverage-7.8.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a26c0c795c3e0b63ec7da6efded5f0bc856d7c0b24b2ac84b4d1d7bc578d676"}, + {file = "coverage-7.8.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821f7bcbaa84318287115d54becb1915eece6918136c6f91045bb84e2f88739d"}, + {file = "coverage-7.8.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a321c61477ff8ee705b8a5fed370b5710c56b3a52d17b983d9215861e37b642a"}, + {file = "coverage-7.8.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:ed2144b8a78f9d94d9515963ed273d620e07846acd5d4b0a642d4849e8d91a0c"}, + {file = "coverage-7.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:042e7841a26498fff7a37d6fda770d17519982f5b7d8bf5278d140b67b61095f"}, + {file = "coverage-7.8.0-cp310-cp310-win32.whl", hash = "sha256:f9983d01d7705b2d1f7a95e10bbe4091fabc03a46881a256c2787637b087003f"}, + {file = "coverage-7.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:5a570cd9bd20b85d1a0d7b009aaf6c110b52b5755c17be6962f8ccd65d1dbd23"}, + {file = "coverage-7.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e7ac22a0bb2c7c49f441f7a6d46c9c80d96e56f5a8bc6972529ed43c8b694e27"}, + {file = "coverage-7.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bf13d564d310c156d1c8e53877baf2993fb3073b2fc9f69790ca6a732eb4bfea"}, + {file = "coverage-7.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5761c70c017c1b0d21b0815a920ffb94a670c8d5d409d9b38857874c21f70d7"}, + {file = "coverage-7.8.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5ff52d790c7e1628241ffbcaeb33e07d14b007b6eb00a19320c7b8a7024c040"}, + {file = "coverage-7.8.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d39fc4817fd67b3915256af5dda75fd4ee10621a3d484524487e33416c6f3543"}, + {file = "coverage-7.8.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b44674870709017e4b4036e3d0d6c17f06a0e6d4436422e0ad29b882c40697d2"}, + {file = "coverage-7.8.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8f99eb72bf27cbb167b636eb1726f590c00e1ad375002230607a844d9e9a2318"}, + {file = "coverage-7.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b571bf5341ba8c6bc02e0baeaf3b061ab993bf372d982ae509807e7f112554e9"}, + {file = "coverage-7.8.0-cp311-cp311-win32.whl", hash = "sha256:e75a2ad7b647fd8046d58c3132d7eaf31b12d8a53c0e4b21fa9c4d23d6ee6d3c"}, + {file = "coverage-7.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:3043ba1c88b2139126fc72cb48574b90e2e0546d4c78b5299317f61b7f718b78"}, + {file = "coverage-7.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bbb5cc845a0292e0c520656d19d7ce40e18d0e19b22cb3e0409135a575bf79fc"}, + {file = "coverage-7.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4dfd9a93db9e78666d178d4f08a5408aa3f2474ad4d0e0378ed5f2ef71640cb6"}, + {file = "coverage-7.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f017a61399f13aa6d1039f75cd467be388d157cd81f1a119b9d9a68ba6f2830d"}, + {file = "coverage-7.8.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0915742f4c82208ebf47a2b154a5334155ed9ef9fe6190674b8a46c2fb89cb05"}, + {file = "coverage-7.8.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a40fcf208e021eb14b0fac6bdb045c0e0cab53105f93ba0d03fd934c956143a"}, + {file = "coverage-7.8.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a1f406a8e0995d654b2ad87c62caf6befa767885301f3b8f6f73e6f3c31ec3a6"}, + {file = "coverage-7.8.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:77af0f6447a582fdc7de5e06fa3757a3ef87769fbb0fdbdeba78c23049140a47"}, + {file = "coverage-7.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f2d32f95922927186c6dbc8bc60df0d186b6edb828d299ab10898ef3f40052fe"}, + {file = "coverage-7.8.0-cp312-cp312-win32.whl", hash = "sha256:769773614e676f9d8e8a0980dd7740f09a6ea386d0f383db6821df07d0f08545"}, + {file = "coverage-7.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:e5d2b9be5b0693cf21eb4ce0ec8d211efb43966f6657807f6859aab3814f946b"}, + {file = "coverage-7.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5ac46d0c2dd5820ce93943a501ac5f6548ea81594777ca585bf002aa8854cacd"}, + {file = "coverage-7.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:771eb7587a0563ca5bb6f622b9ed7f9d07bd08900f7589b4febff05f469bea00"}, + {file = "coverage-7.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42421e04069fb2cbcbca5a696c4050b84a43b05392679d4068acbe65449b5c64"}, + {file = "coverage-7.8.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:554fec1199d93ab30adaa751db68acec2b41c5602ac944bb19187cb9a41a8067"}, + {file = "coverage-7.8.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aaeb00761f985007b38cf463b1d160a14a22c34eb3f6a39d9ad6fc27cb73008"}, + {file = "coverage-7.8.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:581a40c7b94921fffd6457ffe532259813fc68eb2bdda60fa8cc343414ce3733"}, + {file = "coverage-7.8.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f319bae0321bc838e205bf9e5bc28f0a3165f30c203b610f17ab5552cff90323"}, + {file = "coverage-7.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:04bfec25a8ef1c5f41f5e7e5c842f6b615599ca8ba8391ec33a9290d9d2db3a3"}, + {file = "coverage-7.8.0-cp313-cp313-win32.whl", hash = "sha256:dd19608788b50eed889e13a5d71d832edc34fc9dfce606f66e8f9f917eef910d"}, + {file = "coverage-7.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:a9abbccd778d98e9c7e85038e35e91e67f5b520776781d9a1e2ee9d400869487"}, + {file = "coverage-7.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:18c5ae6d061ad5b3e7eef4363fb27a0576012a7447af48be6c75b88494c6cf25"}, + {file = "coverage-7.8.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:95aa6ae391a22bbbce1b77ddac846c98c5473de0372ba5c463480043a07bff42"}, + {file = "coverage-7.8.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e013b07ba1c748dacc2a80e69a46286ff145935f260eb8c72df7185bf048f502"}, + {file = "coverage-7.8.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d766a4f0e5aa1ba056ec3496243150698dc0481902e2b8559314368717be82b1"}, + {file = "coverage-7.8.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad80e6b4a0c3cb6f10f29ae4c60e991f424e6b14219d46f1e7d442b938ee68a4"}, + {file = "coverage-7.8.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b87eb6fc9e1bb8f98892a2458781348fa37e6925f35bb6ceb9d4afd54ba36c73"}, + {file = "coverage-7.8.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:d1ba00ae33be84066cfbe7361d4e04dec78445b2b88bdb734d0d1cbab916025a"}, + {file = "coverage-7.8.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f3c38e4e5ccbdc9198aecc766cedbb134b2d89bf64533973678dfcf07effd883"}, + {file = "coverage-7.8.0-cp313-cp313t-win32.whl", hash = "sha256:379fe315e206b14e21db5240f89dc0774bdd3e25c3c58c2c733c99eca96f1ada"}, + {file = "coverage-7.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2e4b6b87bb0c846a9315e3ab4be2d52fac905100565f4b92f02c445c8799e257"}, + {file = "coverage-7.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa260de59dfb143af06dcf30c2be0b200bed2a73737a8a59248fcb9fa601ef0f"}, + {file = "coverage-7.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:96121edfa4c2dfdda409877ea8608dd01de816a4dc4a0523356067b305e4e17a"}, + {file = "coverage-7.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b8af63b9afa1031c0ef05b217faa598f3069148eeee6bb24b79da9012423b82"}, + {file = "coverage-7.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89b1f4af0d4afe495cd4787a68e00f30f1d15939f550e869de90a86efa7e0814"}, + {file = "coverage-7.8.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94ec0be97723ae72d63d3aa41961a0b9a6f5a53ff599813c324548d18e3b9e8c"}, + {file = "coverage-7.8.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8a1d96e780bdb2d0cbb297325711701f7c0b6f89199a57f2049e90064c29f6bd"}, + {file = "coverage-7.8.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f1d8a2a57b47142b10374902777e798784abf400a004b14f1b0b9eaf1e528ba4"}, + {file = "coverage-7.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cf60dd2696b457b710dd40bf17ad269d5f5457b96442f7f85722bdb16fa6c899"}, + {file = "coverage-7.8.0-cp39-cp39-win32.whl", hash = "sha256:be945402e03de47ba1872cd5236395e0f4ad635526185a930735f66710e1bd3f"}, + {file = "coverage-7.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:90e7fbc6216ecaffa5a880cdc9c77b7418c1dcb166166b78dbc630d07f278cc3"}, + {file = "coverage-7.8.0-pp39.pp310.pp311-none-any.whl", hash = "sha256:b8194fb8e50d556d5849753de991d390c5a1edeeba50f68e3a9253fbd8bf8ccd"}, + {file = "coverage-7.8.0-py3-none-any.whl", hash = "sha256:dbf364b4c5e7bae9250528167dfe40219b62e2d573c854d74be213e1e52069f7"}, + {file = "coverage-7.8.0.tar.gz", hash = "sha256:7a3d62b3b03b4b6fd41a085f3574874cf946cb4604d2b4d3e8dca8cd570ca501"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] + +[[package]] +name = "dill" +version = "0.4.0" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049"}, + {file = "dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + +[[package]] +name = "exceptiongroup" +version = "1.3.0" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, + {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "iniconfig" +version = "2.1.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + +[[package]] +name = "isort" +version = "6.0.1" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.9.0" +groups = ["dev"] +files = [ + {file = "isort-6.0.1-py3-none-any.whl", hash = "sha256:2dc5d7f65c9678d94c88dfc29161a320eec67328bc97aad576874cb4be1e9615"}, + {file = "isort-6.0.1.tar.gz", hash = "sha256:1cb5df28dfbc742e490c5e41bad6da41b805b0a8be7bc93cd0fb2a8a890ac450"}, +] + +[package.extras] +colors = ["colorama"] +plugins = ["setuptools"] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +groups = ["dev"] +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "packaging" +version = "25.0" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "paho-mqtt" +version = "2.1.0" +description = "MQTT version 5.0/3.1.1 client class" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "paho_mqtt-2.1.0-py3-none-any.whl", hash = "sha256:6db9ba9b34ed5bc6b6e3812718c7e06e2fd7444540df2455d2c51bd58808feee"}, + {file = "paho_mqtt-2.1.0.tar.gz", hash = "sha256:12d6e7511d4137555a3f6ea167ae846af2c7357b10bc6fa4f7c3968fc1723834"}, +] + +[package.extras] +proxy = ["pysocks"] + +[[package]] +name = "pika" +version = "1.3.2" +description = "Pika Python AMQP Client Library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "pika-1.3.2-py3-none-any.whl", hash = "sha256:0779a7c1fafd805672796085560d290213a465e4f6f76a6fb19e378d8041a14f"}, + {file = "pika-1.3.2.tar.gz", hash = "sha256:b2a327ddddf8570b4965b3576ac77091b850262d34ce8c1d8cb4e4146aa4145f"}, +] + +[package.extras] +gevent = ["gevent"] +tornado = ["tornado"] +twisted = ["twisted"] + +[[package]] +name = "platformdirs" +version = "4.3.8" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, + {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.14.1)"] + +[[package]] +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] + +[[package]] +name = "psutil" +version = "7.0.0" +description = "Cross-platform lib for process and system monitoring in Python. NOTE: the syntax of this script MUST be kept compatible with Python 2.7." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25"}, + {file = "psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da"}, + {file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91"}, + {file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34"}, + {file = "psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993"}, + {file = "psutil-7.0.0-cp36-cp36m-win32.whl", hash = "sha256:84df4eb63e16849689f76b1ffcb36db7b8de703d1bc1fe41773db487621b6c17"}, + {file = "psutil-7.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1e744154a6580bc968a0195fd25e80432d3afec619daf145b9e5ba16cc1d688e"}, + {file = "psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99"}, + {file = "psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553"}, + {file = "psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456"}, +] + +[package.extras] +dev = ["abi3audit", "black (==24.10.0)", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest", "pytest-cov", "pytest-xdist", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "vulture", "wheel"] +test = ["pytest", "pytest-xdist", "setuptools"] + +[[package]] +name = "pycodestyle" +version = "2.13.0" +description = "Python style guide checker" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pycodestyle-2.13.0-py2.py3-none-any.whl", hash = "sha256:35863c5974a271c7a726ed228a14a4f6daf49df369d8c50cd9a6f58a5e143ba9"}, + {file = "pycodestyle-2.13.0.tar.gz", hash = "sha256:c8415bf09abe81d9c7f872502a6eee881fbe85d8763dd5b9924bb0a01d67efae"}, +] + +[[package]] +name = "pydantic" +version = "2.11.4" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"}, + {file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.33.2" +typing-extensions = ">=4.12.2" +typing-inspection = ">=0.4.0" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, + {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pylint" +version = "3.3.7" +description = "python code static checker" +optional = false +python-versions = ">=3.9.0" +groups = ["dev"] +files = [ + {file = "pylint-3.3.7-py3-none-any.whl", hash = "sha256:43860aafefce92fca4cf6b61fe199cdc5ae54ea28f9bf4cd49de267b5195803d"}, + {file = "pylint-3.3.7.tar.gz", hash = "sha256:2b11de8bde49f9c5059452e0c310c079c746a0a8eeaa789e5aa966ecc23e4559"}, +] + +[package.dependencies] +astroid = ">=3.3.8,<=3.4.0.dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.2", markers = "python_version < \"3.11\""}, + {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, + {version = ">=0.3.6", markers = "python_version == \"3.11\""}, +] +isort = ">=4.2.5,<5.13 || >5.13,<7" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2" +tomli = {version = ">=1.1", markers = "python_version < \"3.11\""} +tomlkit = ">=0.10.1" + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + +[[package]] +name = "pytest" +version = "8.3.5" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820"}, + {file = "pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-cov" +version = "6.1.1" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest_cov-6.1.1-py3-none-any.whl", hash = "sha256:bddf29ed2d0ab6f4df17b4c55b0a657287db8684af9c42ea546b21b1041b3dde"}, + {file = "pytest_cov-6.1.1.tar.gz", hash = "sha256:46935f7aaefba760e716c2ebfbe1c216240b9592966e7da99ea8292d4d3e2a0a"}, +] + +[package.dependencies] +coverage = {version = ">=7.5", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] + +[[package]] +name = "pywin32" +version = "310" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +groups = ["main"] +markers = "sys_platform == \"win32\"" +files = [ + {file = "pywin32-310-cp310-cp310-win32.whl", hash = "sha256:6dd97011efc8bf51d6793a82292419eba2c71cf8e7250cfac03bba284454abc1"}, + {file = "pywin32-310-cp310-cp310-win_amd64.whl", hash = "sha256:c3e78706e4229b915a0821941a84e7ef420bf2b77e08c9dae3c76fd03fd2ae3d"}, + {file = "pywin32-310-cp310-cp310-win_arm64.whl", hash = "sha256:33babed0cf0c92a6f94cc6cc13546ab24ee13e3e800e61ed87609ab91e4c8213"}, + {file = "pywin32-310-cp311-cp311-win32.whl", hash = "sha256:1e765f9564e83011a63321bb9d27ec456a0ed90d3732c4b2e312b855365ed8bd"}, + {file = "pywin32-310-cp311-cp311-win_amd64.whl", hash = "sha256:126298077a9d7c95c53823934f000599f66ec9296b09167810eb24875f32689c"}, + {file = "pywin32-310-cp311-cp311-win_arm64.whl", hash = "sha256:19ec5fc9b1d51c4350be7bb00760ffce46e6c95eaf2f0b2f1150657b1a43c582"}, + {file = "pywin32-310-cp312-cp312-win32.whl", hash = "sha256:8a75a5cc3893e83a108c05d82198880704c44bbaee4d06e442e471d3c9ea4f3d"}, + {file = "pywin32-310-cp312-cp312-win_amd64.whl", hash = "sha256:bf5c397c9a9a19a6f62f3fb821fbf36cac08f03770056711f765ec1503972060"}, + {file = "pywin32-310-cp312-cp312-win_arm64.whl", hash = "sha256:2349cc906eae872d0663d4d6290d13b90621eaf78964bb1578632ff20e152966"}, + {file = "pywin32-310-cp313-cp313-win32.whl", hash = "sha256:5d241a659c496ada3253cd01cfaa779b048e90ce4b2b38cd44168ad555ce74ab"}, + {file = "pywin32-310-cp313-cp313-win_amd64.whl", hash = "sha256:667827eb3a90208ddbdcc9e860c81bde63a135710e21e4cb3348968e4bd5249e"}, + {file = "pywin32-310-cp313-cp313-win_arm64.whl", hash = "sha256:e308f831de771482b7cf692a1f308f8fca701b2d8f9dde6cc440c7da17e47b33"}, + {file = "pywin32-310-cp38-cp38-win32.whl", hash = "sha256:0867beb8addefa2e3979d4084352e4ac6e991ca45373390775f7084cc0209b9c"}, + {file = "pywin32-310-cp38-cp38-win_amd64.whl", hash = "sha256:30f0a9b3138fb5e07eb4973b7077e1883f558e40c578c6925acc7a94c34eaa36"}, + {file = "pywin32-310-cp39-cp39-win32.whl", hash = "sha256:851c8d927af0d879221e616ae1f66145253537bbdd321a77e8ef701b443a9a1a"}, + {file = "pywin32-310-cp39-cp39-win_amd64.whl", hash = "sha256:96867217335559ac619f00ad70e513c0fcf84b8a3af9fc2bba3b59b97da70475"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "tomli" +version = "2.2.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, +] + +[[package]] +name = "tomlkit" +version = "0.13.2" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, + {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, +] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.20250516" +description = "Typing stubs for PyYAML" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "types_pyyaml-6.0.12.20250516-py3-none-any.whl", hash = "sha256:8478208feaeb53a34cb5d970c56a7cd76b72659442e733e268a94dc72b2d0530"}, + {file = "types_pyyaml-6.0.12.20250516.tar.gz", hash = "sha256:9f21a70216fc0fa1b216a8176db5f9e0af6eb35d2f2932acb87689d03a5bf6ba"}, +] + +[[package]] +name = "typing-extensions" +version = "4.13.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, + {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, +] +markers = {dev = "python_version == \"3.10\""} + +[[package]] +name = "typing-inspection" +version = "0.4.0" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, + {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, +] + +[package.dependencies] +typing-extensions = ">=4.12.0" + +[metadata] +lock-version = "2.1" +python-versions = "^3.10" +content-hash = "41008922d606bb738ebb952bcacc5e71841a3b64c8f42a7b3306634e6dc650f6" diff --git a/agents/simul8/pyproject.toml b/agents/simul8/pyproject.toml new file mode 100644 index 0000000..ada52d9 --- /dev/null +++ b/agents/simul8/pyproject.toml @@ -0,0 +1,34 @@ +[tool.poetry] +name = "simul8-agent" +version = "0.1.0" +description = "Agent for Simul8 integration" +authors = ["Marco Melloni <291358@studenti.unimore.it>", "Rasmus Carlsen <202005860@post.au.dk"] +readme = "README.md" +packages = [{ include = "simul8_agent"}] +license = "ICAPL" +include = ["simul8/config/config.yaml.template"] + +[tool.poetry.dependencies] +python = "^3.10" +paho-mqtt = ">=2.1.0,<3.0.0" +pyyaml = ">=6.0.2,<7.0.0" +click = "^8.1.8" +types-pyyaml = "^6.0.12.20250402" +pydantic = "^2.11.4" +pika = "^1.3.2" +psutil = "^7.0.0" +colorlog = "^6.9.0" +pywin32 = { version = "310", markers = "sys_platform == 'win32'" } + +[tool.poetry.group.dev.dependencies] +autopep8 = "^2.3.2" +pylint = "^3.3.7" +pytest = "^8.3.5" +pytest-cov = "^6.1.1" + +[tool.poetry.scripts] +simul8-agent = "simul8_agent.src.main:main" + +[build-system] +requires = ["poetry-core>=2.0.0"] +build-backend = "poetry.core.masonry.api" \ No newline at end of file diff --git a/agents/simul8/pytest.ini b/agents/simul8/pytest.ini new file mode 100644 index 0000000..9df35a7 --- /dev/null +++ b/agents/simul8/pytest.ini @@ -0,0 +1,16 @@ +[pytest] +minversion = 8.3 +#pythonpath = src src/data src/methods tests +testpaths = + tests/unit + tests/integration +addopts = --show-capture=no --cov=simul8_agent/src --cov-report=term-missing --cov-report=html --ignore=simul8_agent/docs +python_files = test_*.py + +log_cli=false +log_level=DEBUG +log_format = %(asctime)s %(levelname)s %(message)s +log_date_format = %Y-%m-%d %H:%M:%S + +#timeout slow tests +timeout=5 diff --git a/agents/simul8/simul8_agent/__version__.py b/agents/simul8/simul8_agent/__version__.py new file mode 100644 index 0000000..d3ec452 --- /dev/null +++ b/agents/simul8/simul8_agent/__version__.py @@ -0,0 +1 @@ +__version__ = "0.2.0" diff --git a/agents/simul8/simul8_agent/api/simulation.yaml.template b/agents/simul8/simul8_agent/api/simulation.yaml.template new file mode 100644 index 0000000..29dd546 --- /dev/null +++ b/agents/simul8/simul8_agent/api/simulation.yaml.template @@ -0,0 +1,31 @@ +simulation: + request_id: abcdef12345 + # (RequestID) to identify each request. + + client_id: dt + # Unique identifier of the sender of this simulation request + + simulator: simul8 + # Specifies the target system for the simulation. + # Use 'simul8' to route the request to the Simul8 simulator. + + type: batch + # The only simulation type available + + file: simulation_batch.s8 + # The name of the Simul8 file to execute for this simulation. + + inputs: + # Input variables to be passed to the simulation. + # Customize these key-value pairs as needed for your specific simulation. + run_time: .. + columns: [.. , ..] + r1: [.. , ..] + r2: [.. , ..] + + outputs: + # Expected output variables from the simulation. + # Customize these keys based on what outputs your simulation provides. + o1: .. + o2: .. + diff --git a/agents/simul8/simul8_agent/config/config.yaml.template b/agents/simul8/simul8_agent/config/config.yaml.template new file mode 100644 index 0000000..36fa5b9 --- /dev/null +++ b/agents/simul8/simul8_agent/config/config.yaml.template @@ -0,0 +1,65 @@ +agent: + agent_id: simul8 + simulator: simul8 + +rabbitmq: + host: localhost + port: 5672 + username: guest + password: guest + heartbeat: 600 + vhost: / + tls: false + +simulation: + path: /Users/foo/simulation-bridge/agents/simul8/simul8_agent/docs/examples + +exchanges: + input: ex.bridge.output # Exchange to receive commands from + output: ex.sim.result # Exchange to send results to + +queue: + durable: true # Queue persists across broker restarts + prefetch_count: 1 # Number of messages to prefetch + +logging: + level: INFO # Log level (DEBUG, INFO, ERROR) + file: logs/simul8_agent.log + +performance: + enabled: false # Enable/disable performance monitoring + log_dir: performance_logs # Directory where performance logs will be stored + log_filename: performance_metrics.csv # Name of the CSV file for performance metrics + +tcp: + host: localhost + port: 5678 + +response_templates: + success: + status: success + simulation: + type: batch + timestamp_format: "%Y-%m-%dT%H:%M:%SZ" # ISO format with Z suffix + include_metadata: true + metadata_fields: + - execution_time + - memory_usage + - simul8_version + + error: + status: error + include_stacktrace: false # For security, don't include full stack traces in production + error_codes: + invalid_config: 400 + simul8_start_failure: 500 + execution_error: 500 + timeout: 504 + missing_file: 404 + timestamp_format: "%Y-%m-%dT%H:%M:%SZ" + + progress: + status: in_progress + include_percentage: true + update_interval: 5 # Send progress updates every 5 seconds + timestamp_format: "%Y-%m-%dT%H:%M:%SZ" diff --git a/agents/simul8/simul8_agent/docs/README.md b/agents/simul8/simul8_agent/docs/README.md new file mode 100644 index 0000000..fae0edc --- /dev/null +++ b/agents/simul8/simul8_agent/docs/README.md @@ -0,0 +1,65 @@ +# Simul8 Simulation – Guidelines and Best Practices + +## Batch Simulation + +A batch simulation is executed by providing a complete set of input parameters at the start. The simulation then runs internally to completion without producing intermediate outputs. Once finished, it returns a final output containing the complete results of the simulation. + +This mode is suitable for scenarios where real-time observation is not required and the focus is on analyzing the final state or aggregated outcomes of the simulation. + +### Batch Requirements +The inputs must be used via. Visual Logic defined within the simulation. + +The the simulation file used must be configured for this type of invocation. +- ``inputSheet``and `outputSheet` must be created manually in the simulation. +- A "On Simulation Open" visual logic block must be created, which contains: `File to Sheet "input.csv" , inputSheet[1,1]` +- The data which the simulation manipulates should be put into `outputSheet` +- A "End Run Logic" Visual Logic Block must be created containing the wanted logic for creating the data which you want exported. Hence it needs to contain `Sheet to File "output.csv" , outputSheet[1,1] + + +The order of parameters in the YAML file must align **precisely** with the order of the function arguments. The Simulation Bridge extracts these parameters from the YAML file and passes them directly to the function without any intermediate processing. Each YAML parameter corresponds to a specific function argument, ensuring a direct and automatic binding. + +#### Example +#### Input in simulation.yaml +In this example: + +- Inputs: +columns: [co2, energy] + r1: [25, 100] + r2: [25, 200] +- Outputs: +total_co2: Total CO2 +total_energy: Total Energy
+ +Below is an example of the "On Simulation Open" Visual Logic : + +```python +File to Sheet "input.csv" , inputSheet[1,1] +``` +
+ +Below is an example of visual logic in the "End Run Logic" : +##### Visual Logic +```py +SET outputSheet[1,1] = "Total CO2" +SET outputSheet[2,1] = "Total Energy" +SET outputSheet[1,2] = inputSheet[1,2]+inputSheet[1,3] +SET outputSheet[2,2] = inputSheet[2,2]+inputSheet[2,3] +Sheet to File "output.csv" , outputSheet[1,1] +``` + + + +This structure is needed to convert the input data to a csv file. + +#### References + +For additional guidance, refer to the example files located in the `examples/` folder: + +- `simulation_batch.s8` + +This file provide reference implementations that can help in structuring your simulation logic. + +#### Notes + +No additional constraints are imposed on the implementation. The simulation file should be designed to meet the specific requirements of the simulation scenario. + diff --git a/agents/simul8/simul8_agent/docs/examples/batch_simulation/README.md b/agents/simul8/simul8_agent/docs/examples/batch_simulation/README.md new file mode 100644 index 0000000..294bb9b --- /dev/null +++ b/agents/simul8/simul8_agent/docs/examples/batch_simulation/README.md @@ -0,0 +1,49 @@ +# Batch Simulation +This simulation is a simple case of the batch simulation, which the only supported type of simulation for Simul8 +It takes 4 inputs, in a the form of arrays, which are translated into a csv file, as input for the simulation. +The inputs are: +- run_time: 500 +- columns: [co2,energy] +- r1: [25, 100] +- r2: [25, 200] + +_The format of these input should be read column wise, so "co2" values are 25 and 25, and "energy" values are 100 and 200._ + +The simulation provides the sum of these as the outputs: +- total_co2: Total CO2 +- total_energy: Total Energy + +Hence we expect ``total_co2`` = 50, and ``total_energy`` = 300. + +## Usage +Before running the simulation, you need to configure the Simul8 Agent by setting the simulation folder path in the `config.yaml` file under the simulation section: +```yaml +simulation: + path: +``` +This path should point to the directory `batch-simulation` containing the simulation files + +Once configured, you can initiate the simulation using the API as described below. + + +The simulation can be initiated via the API by submitting a YAML payload, a template of which is available in the file `api/simulation.yaml` + + +```yaml +simulation: + request_id: simul8_1 + client_id: dt + simulator: simul8 + type: batch + file: mysim.s8 + inputs: + run_time: 500 + columns: [co2, energy] + r1: [25, 100] + r2: [25, 200] + outputs: + total_co2: Total CO2 + total_energy: Total Energy +``` + +Use from the client folder, use `use_simul8_agent.py` with the CLI option `--api-payload` to specify the path to this YAMl file, and start the client. \ No newline at end of file diff --git a/agents/simul8/simul8_agent/docs/examples/batch_simulation/api/simulation_batch.yaml.example b/agents/simul8/simul8_agent/docs/examples/batch_simulation/api/simulation_batch.yaml.example new file mode 100644 index 0000000..2514a2e --- /dev/null +++ b/agents/simul8/simul8_agent/docs/examples/batch_simulation/api/simulation_batch.yaml.example @@ -0,0 +1,14 @@ +simulation: + request_id: abcd1234 + client_id: dt + simulator: simul8 + type: batch + file: simulation_batch.s8 + inputs: + run_time: 500 + columns: [co2, energy] + r1: [25, 100] + r2: [25, 200] + outputs: + total_co2: Total CO2 + total_energy: Total Energy \ No newline at end of file diff --git a/agents/simul8/simul8_agent/docs/examples/batch_simulation/simulation_batch.bk8 b/agents/simul8/simul8_agent/docs/examples/batch_simulation/simulation_batch.bk8 new file mode 100644 index 0000000..bc9119e Binary files /dev/null and b/agents/simul8/simul8_agent/docs/examples/batch_simulation/simulation_batch.bk8 differ diff --git a/agents/simul8/simul8_agent/docs/examples/batch_simulation/simulation_batch.s8 b/agents/simul8/simul8_agent/docs/examples/batch_simulation/simulation_batch.s8 new file mode 100644 index 0000000..8dbcd2e Binary files /dev/null and b/agents/simul8/simul8_agent/docs/examples/batch_simulation/simulation_batch.s8 differ diff --git a/agents/simul8/simul8_agent/images/carlsen.png b/agents/simul8/simul8_agent/images/carlsen.png new file mode 100644 index 0000000..cd454df Binary files /dev/null and b/agents/simul8/simul8_agent/images/carlsen.png differ diff --git a/agents/simul8/simul8_agent/images/demo-simul8-edited.gif b/agents/simul8/simul8_agent/images/demo-simul8-edited.gif new file mode 100644 index 0000000..5bba5ce Binary files /dev/null and b/agents/simul8/simul8_agent/images/demo-simul8-edited.gif differ diff --git a/agents/simul8/simul8_agent/images/demo-simul8-edited.mp4 b/agents/simul8/simul8_agent/images/demo-simul8-edited.mp4 new file mode 100644 index 0000000..02200ad Binary files /dev/null and b/agents/simul8/simul8_agent/images/demo-simul8-edited.mp4 differ diff --git a/agents/simul8/simul8_agent/images/image.png b/agents/simul8/simul8_agent/images/image.png new file mode 100644 index 0000000..f8de1f2 Binary files /dev/null and b/agents/simul8/simul8_agent/images/image.png differ diff --git a/agents/simul8/simul8_agent/images/melloni.jpg b/agents/simul8/simul8_agent/images/melloni.jpg new file mode 100644 index 0000000..33a821f Binary files /dev/null and b/agents/simul8/simul8_agent/images/melloni.jpg differ diff --git a/agents/simul8/simul8_agent/images/picone.jpeg b/agents/simul8/simul8_agent/images/picone.jpeg new file mode 100644 index 0000000..9ce7516 Binary files /dev/null and b/agents/simul8/simul8_agent/images/picone.jpeg differ diff --git a/agents/simul8/simul8_agent/images/structure.png b/agents/simul8/simul8_agent/images/structure.png new file mode 100644 index 0000000..e3ad3c9 Binary files /dev/null and b/agents/simul8/simul8_agent/images/structure.png differ diff --git a/agents/simul8/simul8_agent/images/talasila.jpeg b/agents/simul8/simul8_agent/images/talasila.jpeg new file mode 100644 index 0000000..705d414 Binary files /dev/null and b/agents/simul8/simul8_agent/images/talasila.jpeg differ diff --git a/agents/simul8/simul8_agent/resources/README.md b/agents/simul8/simul8_agent/resources/README.md new file mode 100644 index 0000000..160495e --- /dev/null +++ b/agents/simul8/simul8_agent/resources/README.md @@ -0,0 +1,104 @@ +# Use Simul8 Agent + +This Python module provides a simple RabbitMQ client to send simulation requests to a Simul8 agent and asynchronously listen for simulation results. It uses YAML configuration files for setup and supports sending payloads in YAML format over RabbitMQ messaging queues. + +## Table of Contents + +- [Use Simul8 Agent](#use-simul8-agent) + - [Table of Contents](#table-of-contents) + - [Installation](#installation) + - [Configuration](#configuration) + - [Usage](#usage) + - [Example](#example) + - [Steps to run an example](#steps-to-run-an-example) + - [Where to find the API payload files](#where-to-find-the-api-payload-files) + - [Example usage](#example-usage) + +## Installation + +Before using this agent, ensure the required Python packages are installed: + +```bash +pip install pika pyyaml +``` + +## Configuration + +The agent requires a configuration file (`use.yaml`) to set up RabbitMQ connection parameters and specify the path to the simulation request payload. + +Example `use.yaml` content: + +```yaml +rabbitmq: + host: localhost # RabbitMQ server hostname or IP address + port: 5672 # RabbitMQ server port + username: guest # RabbitMQ username + password: guest # RabbitMQ password + heartbeat: 600 # Heartbeat interval in seconds + vhost: / # RabbitMQ virtual host + +simulation_request: ../api/simulation.yaml # Default path to the simulation YAML payload +``` + +## Usage + +Run the module as a standalone script to send simulation requests to the Simul8 agent and listen asynchronously for the results. +Command-Line Options: + +- `--api-payload` (optional): + Specify the path to the YAML file containing the simulation request payload. + +If this option is omitted, the script will look for a file named `simulation.yaml` in the default location as configured in `use.yaml` (by default in the same directory or as specified in the `simulation_request` field). + +- **Without CLI option:** + The script loads the simulation payload from the default path specified in `use.yaml`. This is by default a `simulation.yaml` file located in the working directory or as configured. + +- **With CLI option:** + You can override the default by specifying a custom path to the simulation payload YAML file using the `--api-payload` option. + + +The the simulation file used must be configured for this type of invocation. +- ``inputSheet``and `outputSheet` must be created manually in the simulation. +- A "On Simulation Open" visual logic block must be created, which contains: `File to Sheet "input.csv" , inputSheet[1,1]` +- The data which the simulation manipulates should be put into `outputSheet` +- A "End Run Logic" Visual Logic Block must be created containing the wanted logic for creating the data which you want exported. Hence it needs to contain `Sheet to File "output.csv" , outputSheet[1,1] +` + + +## Example + +In the directory +`/Users/foo/simulation-bridge/agents/simul8/simul8_agent/docs/examples` +you will a folder containing a practical example. Each example folder includes a `README.md` with detailed instructions: + +- [Batch Simulation](../docs/examples/batch-simulation/README.md) + + +### Steps to run an example + +1. **Configure the simulation request path** + Edit the `config.yaml` file inside the Simul8 agent folder to set the path to the simulation request folder you want to use. This path should point to the example you want to run. + +2. **Run the Simul8 agent** + Start the Simul8 agent so it is ready to receive simulation requests. + +3. **Send a simulation request using the Python client** + Execute the Python client with the appropriate API payload file: + python use_simul8_agent.py --api-payload "path_to_api_payload" + +> **Note:** It is recommended to use absolute paths when specifying the `--api-payload` argument to avoid path resolution issues. It is a good practice to place the path in single quotes. + +### Where to find the API payload files + +Each example folder contains an `api/` subfolder with example simulation payload YAML files. Use these as the `--api-payload` argument when running the Python client. For instance: + +- Batch Simulation: + `docs/examples/batch-simulation/api/simulation_batch.yaml.example` + +### Example usage + +To run the batch simulation example, specify the full absolute path to the payload file when invoking the Python client: + +```bash +python use_simul8_agent.py --api-payload "/Users/foo/simulation-bridge/agents/simul8/simul8_agent/docs/examples/batch-simulation/api/simulation_batch.yaml.example" +``` diff --git a/agents/simul8/simul8_agent/resources/simulation_batch.bk8 b/agents/simul8/simul8_agent/resources/simulation_batch.bk8 new file mode 100644 index 0000000..61c6848 Binary files /dev/null and b/agents/simul8/simul8_agent/resources/simulation_batch.bk8 differ diff --git a/agents/simul8/simul8_agent/resources/simulation_batch.s8 b/agents/simul8/simul8_agent/resources/simulation_batch.s8 new file mode 100644 index 0000000..8dbcd2e Binary files /dev/null and b/agents/simul8/simul8_agent/resources/simulation_batch.s8 differ diff --git a/agents/simul8/simul8_agent/resources/use.yaml.template b/agents/simul8/simul8_agent/resources/use.yaml.template new file mode 100644 index 0000000..aba7bfe --- /dev/null +++ b/agents/simul8/simul8_agent/resources/use.yaml.template @@ -0,0 +1,11 @@ +rabbitmq: + host: localhost # RabbitMQ server hostname or IP address + port: 5672 # RabbitMQ server port + username: guest # RabbitMQ username + password: guest # RabbitMQ password + heartbeat: 600 # Heartbeat interval in seconds + vhost: / # RabbitMQ virtual host + +# Default path to the simulation YAML payload +# Pbsolute or relative path of the file in single quotes +simulation_request: simulation.yaml diff --git a/agents/simul8/simul8_agent/resources/use_simul8_agent.py b/agents/simul8/simul8_agent/resources/use_simul8_agent.py new file mode 100644 index 0000000..1521094 --- /dev/null +++ b/agents/simul8/simul8_agent/resources/use_simul8_agent.py @@ -0,0 +1,234 @@ +""" +use_simul8_agent.py + +A simple RabbitMQ client to send simulation requests to a Simul8 agent, +and listen asynchronously for the simulation results. +""" + +import argparse +import threading +import uuid +from typing import Any, Dict, NoReturn +import pika +import yaml + + +class SimpleUsageSimul8Agent: + """ + Simple client class to interact with Simul8 simulation agent via RabbitMQ. + + It loads configuration from YAML, connects to RabbitMQ, sends simulation + payload requests, and listens for results asynchronously. + """ + + def __init__( + self, + agent_identifier: str = "dt", + destination_identifier: str = "simul8", + config_path: str = "use.yaml" + ) -> None: + """ + Initialize the agent with identifiers and load configuration. + + Args: + agent_identifier (str): Identifier for this client agent. + destination_identifier (str): Identifier for the target agent. + config_path (str): Path to the YAML config file. + """ + self.agent_id: str = agent_identifier + self.destination_id: str = destination_identifier + + self.config = self.load_yaml(config_path) + self.simulation_request_path = self.config.get( + "simulation_request", "simulation.yaml" + ) + rabbitmq_cfg = self.config.get("rabbitmq", {}) + + credentials = pika.PlainCredentials( + rabbitmq_cfg.get("username", "guest"), + rabbitmq_cfg.get("password", "guest"), + ) + + self.connection = pika.BlockingConnection( + pika.ConnectionParameters( + host=rabbitmq_cfg.get("host", "localhost"), + port=rabbitmq_cfg.get("port", 5672), + virtual_host=rabbitmq_cfg.get("vhost", "/"), + credentials=credentials, + heartbeat=rabbitmq_cfg.get("heartbeat", 600), + ) + ) + self.channel = self.connection.channel() + self.result_queue: str = "" + self.setup_channels() + + def setup_channels(self) -> None: + """ + Declare exchanges and queues, and bind them for message routing. + """ + self.channel.exchange_declare( + exchange="ex.bridge.output", exchange_type="topic", durable=True + ) + self.channel.exchange_declare( + exchange="ex.sim.result", exchange_type="topic", durable=True + ) + + self.result_queue = f"Q.{self.agent_id}.simul8.result" + self.channel.queue_declare(queue=self.result_queue, durable=True) + + self.channel.queue_bind( + exchange="ex.sim.result", + queue=self.result_queue, + routing_key=f"{self.destination_id}.result.{self.agent_id}", + ) + + print(f"[{self.agent_id.upper()}] Infrastructure configured successfully.") + + def send_request(self, payload_data: Dict[str, Any]) -> None: + """ + Send a simulation request message with the given payload. + + Args: + payload_data (Dict[str, Any]): Simulation payload to send. + """ + payload: Dict[str, Any] = { + **payload_data, + "request_id": str(uuid.uuid4()), + } + + payload.setdefault("simulation", {})["bridge_meta"] = { + "protocol": "rabbitmq" + } + payload_yaml: str = yaml.dump(payload, default_flow_style=False) + routing_key: str = f"{self.agent_id}.{self.destination_id}" + + self.channel.basic_publish( + exchange="ex.bridge.output", + routing_key=routing_key, + body=payload_yaml, + properties=pika.BasicProperties( + delivery_mode=2, # Make message persistent + content_type="application/x-yaml", + message_id=str(uuid.uuid4()), + ), + ) + print(f"[{self.agent_id.upper()}] Message sent to simul8: {payload}") + + def handle_result( + self, ch, method, _properties, body + ) -> None: + """ + Callback to process incoming results from Simul8. + + Args: + ch: Channel object. + method: Delivery method. + _properties: Message properties (unused). + body: Message body. + """ + try: + result: Dict[str, Any] = yaml.safe_load(body) + print(f"\n[{self.agent_id.upper()}] Result received from Simul8:") + print(f"Result: {result}") + print("-" * 40) + ch.basic_ack(method.delivery_tag) + except Exception as exc: # pylint: disable=broad-except + print(f"Error processing result: {exc}") + + def start_listening(self) -> None: + """ + Start consuming messages from the result queue indefinitely. + """ + self.channel.basic_consume( + queue=self.result_queue, on_message_callback=self.handle_result + ) + print( + f"[{self.agent_id.upper()}] Listening for results on routing key " + f"'simul8.result.{self.agent_id}'..." + ) + self.channel.start_consuming() + + def load_yaml(self, file_path: str) -> Dict[str, Any]: + """ + Load YAML file and return its content as a dictionary. + + Args: + file_path (str): Path to the YAML file. + + Returns: + Dict[str, Any]: Parsed YAML content. + """ + with open(file_path, "r", encoding="utf-8") as file: + return yaml.safe_load(file) + + +def start_listener(agent_identifier: str) -> None: + """ + Initialize and start the Simul8 agent listener in a separate thread. + + Args: + agent_identifier (str): Agent identifier for the listener. + """ + simul8_agent = SimpleUsageSimul8Agent(agent_identifier) + simul8_agent.start_listening() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Simple Simul8 Agent Client" + ) + parser.add_argument( + "--api-payload", + type=str, + default=None, + help=( + "Path to the YAML file containing the simulation payload " + "(simulation.yaml). If omitted, the default path from the " + "configuration file will be used." + ), + ) + args = parser.parse_args() + + AGENT_ID = "dt" + DESTINATION = "simul8" + + # Start the listener thread to receive simulation results asynchronously. + listener_thread = threading.Thread( + target=start_listener, + args=(AGENT_ID,), + ) + listener_thread.daemon = True + listener_thread.start() + + # Instantiate the client with the default configuration file. + client = SimpleUsageSimul8Agent( + AGENT_ID, + DESTINATION, + config_path="use.yaml", + ) + + try: + # Determine the simulation payload file to load. + # Use CLI-specified payload path if provided, otherwise use default from config. + simulation_file_path = ( + args.api_payload + if args.api_payload + else client.simulation_request_path + ) + + # Load the simulation request data from the specified YAML file. + simulation_data = client.load_yaml(simulation_file_path) + + # Send the simulation request to the Simul8 agent via RabbitMQ. + client.send_request(simulation_data) + + # Keep the main thread alive to continue receiving asynchronous results. + print("\nPress Ctrl+C to terminate the program...") + while True: + pass + + except KeyboardInterrupt: + print("\nProgram terminated by user.") + + except Exception as exc: # pylint: disable=broad-except + print(f"Unexpected error: {exc}") diff --git a/agents/simul8/simul8_agent/src/__init__.py b/agents/simul8/simul8_agent/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agents/simul8/simul8_agent/src/comm/__init__.py b/agents/simul8/simul8_agent/src/comm/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agents/simul8/simul8_agent/src/comm/connect.py b/agents/simul8/simul8_agent/src/comm/connect.py new file mode 100644 index 0000000..f1209de --- /dev/null +++ b/agents/simul8/simul8_agent/src/comm/connect.py @@ -0,0 +1,189 @@ +""" +Generic communication wrapper that abstracts underlying messaging protocols. +This module provides a unified interface for communication, regardless of the +underlying technology (RabbitMQ, Kafka, etc.) being used. +""" +from typing import Any, Dict, Optional, Callable + +from ..utils.logger import get_logger +from .interfaces import IMessageBroker, IMessageHandler +from .rabbitmq.rabbitmq_manager import RabbitMQManager +from .rabbitmq.message_handler import MessageHandler + +logger = get_logger() + + +class Connect: + """ + A communication wrapper that provides a unified interface for messaging, + abstracting the underlying messaging protocol implementation. + """ + + def __init__( + self, + agent_id: str, + config: Dict[str, Any], + broker_type: str = "rabbitmq" + ) -> None: + """ + Initialize the communication wrapper. + + Args: + agent_id (str): The ID of the agent + config (Dict[str, Any]): Configuration parameters + broker_type (str): The type of message broker to use (default: "rabbitmq") + """ + self.agent_id = agent_id + self.config = config + self.broker_type = broker_type + self.broker: Optional[IMessageBroker] = None + self.message_handler: Optional[IMessageHandler] = None + + # Initialize the appropriate message broker based on the type + self._initialize_broker() + + def _initialize_broker(self) -> None: + """ + Initialize the appropriate message broker based on the type. + """ + if self.broker_type.lower() == "rabbitmq": + logger.info("Initializing RabbitMQ broker") + self.broker = RabbitMQManager(self.agent_id, self.config) + self.message_handler = MessageHandler( + self.agent_id, self.broker, self.config) + else: + raise ValueError(f"Unsupported broker type: {self.broker_type}") + + def connect(self) -> None: + """ + Establish a connection to the message broker. + """ + if self.broker: + self.broker.connect() + else: + raise RuntimeError("Broker not initialized") + + def setup(self) -> None: + """ + Set up the required infrastructure for the messaging system. + """ + if self.broker: + self.broker.setup_infrastructure() + else: + raise RuntimeError("Broker not initialized") + + def register_message_handler( + self, custom_handler: Optional[Callable] = None + ) -> None: + """ + Register a function to handle incoming messages. + + Args: + custom_handler (Optional[Callable]): A custom handler function + to use instead of the default. If None, the default handler will + be used + """ + if self.broker and self.message_handler: + if custom_handler: + self.broker.register_message_handler(custom_handler) + else: + self.broker.register_message_handler( + self.message_handler.handle_message) + else: + raise RuntimeError("Broker or message handler not initialized") + + def start_consuming(self) -> None: + """ + Start consuming messages from the input channel. + """ + if not self.broker: + raise RuntimeError("Broker not initialized") + + if not self.broker.channel or not self.broker.channel.is_open: + logger.debug( + "Channel is not initialized or is closed. Attempting to reconnect...") + if not self.broker.connect(): + logger.error( + "Failed to initialize or reopen channel. Consumption aborted.") + return + + logger.debug("Channel is active. Starting consumption.") + self.broker.start_consuming() + + def send_message( + self, + destination: str, + message: Any, + **kwargs + ) -> bool: + """ + Send a message to a specified destination. + + Args: + destination (str): The destination identifier + message (Any): The message to send + **kwargs: Additional parameters specific to the implementation + + Returns: + bool: True if successful, False otherwise + """ + if self.broker: + # For RabbitMQ, we need to extract specific parameters + if self.broker_type.lower() == "rabbitmq": + exchange = kwargs.get( + "exchange", self.config.get( + "exchanges", {}).get( + "output", "ex.sim.result")) + routing_key = kwargs.get( + "routing_key", f"{self.agent_id}.{destination}") + properties = kwargs.get("properties", None) + return self.broker.send_message( + exchange, routing_key, message, properties) + # Handle other broker types here in the future + return False + raise RuntimeError("Broker not initialized") + + def send_result(self, destination: str, result: Dict[str, Any]) -> bool: + """ + Send operation results to the specified destination. + + Args: + destination (str): The destination identifier + result (Dict[str, Any]): The result data to be sent + + Returns: + bool: True if successful, False otherwise + """ + if self.broker: + return self.broker.send_result(destination, result) + raise RuntimeError("Broker not initialized") + + def close(self) -> None: + """ + Close the connection to the message broker. + """ + if self.broker: + self.broker.close() + else: + logger.warning("Attempted to close a non-initialized broker") + + def get_message_handler(self) -> Optional[IMessageHandler]: + """ + Get the current message handler. + + Returns: + Optional[IMessageHandler]: The current message handler or None if not initialized + """ + return self.message_handler + + def set_simulation_handler(self, handler: Callable) -> None: + """ + Set the handler for simulation messages. + This provides a way to inject the simulation handler without creating + circular dependencies. + + Args: + handler (Callable): The function to handle simulation messages + """ + if self.message_handler: + self.message_handler.set_simulation_handler(handler) diff --git a/agents/simul8/simul8_agent/src/comm/interfaces.py b/agents/simul8/simul8_agent/src/comm/interfaces.py new file mode 100644 index 0000000..ee5b979 --- /dev/null +++ b/agents/simul8/simul8_agent/src/comm/interfaces.py @@ -0,0 +1,106 @@ +""" +Defines the generic IMessageBroker interface for communication. +This module provides the base interface that all communication implementations should follow, +enabling easy substitution of different messaging technologies. +""" +from abc import ABC, abstractmethod +from typing import Any, Dict, Callable, Optional + + +class IMessageBroker(ABC): + """ + Interface for a message broker that handles communication between components. + This abstraction allows for swapping different messaging technologies (RabbitMQ, Kafka, etc.) + without changing the core application logic. + """ + + @abstractmethod + def connect(self) -> None: + """ + Establish a connection to the message broker. + """ + + @abstractmethod + def setup_infrastructure(self) -> None: + """ + Set up required infrastructure (exchanges, queues, topics, etc.). + """ + + @abstractmethod + def register_message_handler(self, handler_func: Callable) -> None: + """ + Register a function to handle incoming messages. + + Args: + handler_func: A callback function that processes incoming messages + """ + + @abstractmethod + def start_consuming(self) -> None: + """ + Start consuming messages from the input channel. + """ + + @abstractmethod + def send_message( + self, + exchange: str, + routing_key: str, + body: Any, + properties: Optional[Any] = None) -> bool: + """ + Send a message to a specified destination. + + Args: + exchange: The exchange to publish to + routing_key: The routing key for the message + body: The message body + properties: Message properties + + Returns: + bool: True if successful, False otherwise + """ + + @abstractmethod + def send_result(self, destination: str, result: Dict[str, Any]) -> bool: + """ + Send operation results to the specified destination. + + Args: + destination: The destination identifier + result: The result data to be sent + + Returns: + bool: True if successful, False otherwise + """ + + @abstractmethod + def close(self) -> None: + """ + Close the connection to the message broker. + """ + + +class IMessageHandler(ABC): + """ + Interface for handling messages received from a message broker. + """ + + @abstractmethod + def handle_message(self, *args, **kwargs) -> None: + """ + Process incoming messages from the message broker. + + Args: + *args: Variable length argument list + **kwargs: Arbitrary keyword arguments + """ + + @abstractmethod + def get_agent_id(self) -> str: + """ + Retrieve the agent ID. + + Returns: + str: The ID of the agent + """ diff --git a/agents/simul8/simul8_agent/src/comm/rabbitmq/__init__.py b/agents/simul8/simul8_agent/src/comm/rabbitmq/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agents/simul8/simul8_agent/src/comm/rabbitmq/interfaces.py b/agents/simul8/simul8_agent/src/comm/rabbitmq/interfaces.py new file mode 100644 index 0000000..eddeb72 --- /dev/null +++ b/agents/simul8/simul8_agent/src/comm/rabbitmq/interfaces.py @@ -0,0 +1,126 @@ +""" +Implementation of the RabbitMQ-specific interfaces. +This module defines interface classes for RabbitMQ management and message handling. +""" +from abc import ABC, abstractmethod +from typing import Dict, Any, Callable, Optional +import pika +from pika.spec import BasicProperties + +from ..interfaces import IMessageBroker, IMessageHandler + + +class IRabbitMQManager(IMessageBroker): + """ + Interface for managing RabbitMQ connections, exchanges, queues, and message handling. + Extends the generic IMessageBroker interface with RabbitMQ-specific methods. + """ + + @abstractmethod + def connect(self) -> None: + """ + Establish a connection to RabbitMQ using configuration parameters. + """ + + @abstractmethod + def setup_infrastructure(self) -> None: + """ + Set up RabbitMQ infrastructure (exchanges, queues, bindings). + """ + + @abstractmethod + def register_message_handler( + self, + handler_func: Callable[ + [pika.adapters.blocking_connection.BlockingChannel, + pika.spec.Basic.Deliver, + BasicProperties, + bytes], + None] + ) -> None: + """ + Register a function to handle incoming messages. + + Args: + handler_func (callable): Function to handle messages. + """ + + @abstractmethod + def start_consuming(self) -> None: + """ + Start consuming messages from the input queue. + """ + + @abstractmethod + def send_message( + self, + exchange: str, + routing_key: str, + body: str, + properties: Optional[BasicProperties] = None + ) -> bool: + """ + Send a message to a specified exchange with a routing key. + + Args: + exchange (str): The exchange to publish to. + routing_key (str): The routing key for the message. + body (str): The message body. + properties (pika.BasicProperties, optional): Message properties. + + Returns: + bool: True if the message was sent successfully, False otherwise. + """ + + @abstractmethod + def send_result(self, destination: str, result: Dict[str, Any]) -> bool: + """ + Send simulation results to the specified destination. + + Args: + destination (str): The destination identifier (e.g., 'dt', 'pt'). + result (dict): The result data to be sent. + + Returns: + bool: True if the result was sent successfully, False otherwise. + """ + + @abstractmethod + def close(self) -> None: + """ + Close the RabbitMQ connection. + """ + + +class IRabbitMQMessageHandler(IMessageHandler): + """ + Interface for handling incoming messages from RabbitMQ. + Extends the generic IMessageHandler interface with RabbitMQ-specific methods. + """ + + @abstractmethod + def handle_message( + self, + ch: pika.adapters.blocking_connection.BlockingChannel, + method: pika.spec.Basic.Deliver, + properties: BasicProperties, + body: bytes + ) -> None: + """ + Process incoming messages from RabbitMQ. + + Args: + ch (BlockingChannel): Channel object + method (Basic.Deliver): Delivery method + properties (BasicProperties): Message properties + body (bytes): Message body + """ + + @abstractmethod + def get_agent_id(self) -> str: + """ + Retrieve the agent ID. + + Returns: + str: The ID of the agent + """ diff --git a/agents/simul8/simul8_agent/src/comm/rabbitmq/message_handler.py b/agents/simul8/simul8_agent/src/comm/rabbitmq/message_handler.py new file mode 100644 index 0000000..6278699 --- /dev/null +++ b/agents/simul8/simul8_agent/src/comm/rabbitmq/message_handler.py @@ -0,0 +1,237 @@ +""" +Message handler for processing incoming RabbitMQ messages. +""" +import uuid +from typing import Any, Optional, Dict + +import yaml +from pika.adapters.blocking_connection import BlockingChannel +from pika.spec import Basic, BasicProperties +from pydantic import BaseModel, ConfigDict, Field, field_validator + +from .interfaces import IRabbitMQMessageHandler +from ...utils.logger import get_logger +from ...utils.create_response import create_response +from ...core.batch import handle_batch_simulation + +logger = get_logger() + + +class SimulationInputs(BaseModel): + """Model for simulation inputs - dynamic fields allowed""" + model_config = ConfigDict(extra="allow") + + +class SimulationOutputs(BaseModel): + """Model for simulation outputs - dynamic fields allowed""" + model_config = ConfigDict(extra="allow") + + +class SimulationData(BaseModel): + """Model for simulation data structure""" + request_id: str + client_id: str + simulator: str + type: str = Field(default="batch") + file: str + inputs: 'SimulationInputs' + outputs: Optional['SimulationOutputs'] = None + bridge_meta: Optional[Dict[str, Any]] = None + + @field_validator('type', mode='before') + @classmethod + def validate_sim_type(cls, v): + """Validate that simulation type is either 'batch' or 'streaming'""" + if v not in ['batch', 'streaming']: + raise ValueError( + f"Invalid simulation type: {v}. Must be 'batch' or 'streaming'") + return v + + +class MessagePayload(BaseModel): + """Model for the entire message payload""" + simulation: SimulationData + request_id: str = Field(default_factory=lambda: str(uuid.uuid4())) + + +class MessageHandler(IRabbitMQMessageHandler): + """ + Handler for processing incoming messages from RabbitMQ. + Implements the IRabbitMQMessageHandler interface. + """ + + def __init__(self, agent_id: str, rabbitmq_manager: Any, + config: Optional[Dict]) -> None: + """ + Initialize the message handler. + + Args: + agent_id (str): The ID of the agent + rabbitmq_manager (RabbitMQManager): The RabbitMQ manager instance + """ + self.agent_id = agent_id + self.rabbitmq_manager = rabbitmq_manager + self.config = config + self.path_simulation = self.config.get( + 'simulation', {}).get( + 'path', None) + self.response_templates = self.config.get( + 'response_templates', {}) + + def get_agent_id(self) -> str: + """ + Retrieve the agent ID. + + Returns: + str: The ID of the agent + """ + return self.agent_id + + def handle_message( + self, + ch: BlockingChannel, + method: Basic.Deliver, + properties: BasicProperties, + body: bytes + ) -> None: + """ + Process incoming messages from RabbitMQ with Pydantic validation. + + Args: + ch (BlockingChannel): Channel object + method (Basic.Deliver): Delivery method + properties (BasicProperties): Message properties + body (bytes): Message body + """ + message_id = properties.message_id if properties.message_id else "unknown" + logger.debug("Received message %s", message_id) + logger.debug("Message routing key: %s", method.routing_key) + + # Extract the message source + source: str = method.routing_key.split('.')[0] + + try: + # Load the message body as YAML + try: + # Initialize msg_dict to avoid reference issues in case of + # parsing error + msg_dict = {} + msg_dict = yaml.safe_load(body) + logger.debug("Parsed message: %s", msg_dict) + except yaml.YAMLError as e: + logger.error("YAML parsing error: %s", e) + error_response = create_response( + template_type='error', + sim_file=msg_dict.get('simulation', {}).get( + 'file', '') if isinstance(msg_dict, dict) else '', + sim_type=msg_dict.get('simulation', {}).get( + 'type', '') if isinstance(msg_dict, dict) else '', + response_templates={}, + bridge_meta=msg_dict.get('simulation', {}).get( + 'bridge_meta', 'unknown') if isinstance(msg_dict, dict) + else 'unknown', + request_id=msg_dict.get('simulation', {}).get( + 'request_id', 'unknown') if isinstance(msg_dict, dict) + else 'unknown', + error={'message': 'YAML parsing error', + 'details': str(e), 'type': 'yaml_parse_error'} + ) + self.rabbitmq_manager.send_result(source, error_response) + ch.basic_nack(delivery_tag=method.delivery_tag, + requeue=False) # Don't requeue the message + return + # Validate the message structure using Pydantic + try: + + # Validate the message against our expected schema + payload = MessagePayload(**msg_dict) + logger.debug("Message validation successful") + # Access the validated data + simulation_data = payload.simulation + sim_type = simulation_data.type + sim_file = simulation_data.file + bridge_meta = simulation_data.bridge_meta or 'unknown' + request_id = simulation_data.request_id + except Exception as e: + logger.error("Message validation failed: %s", e) + sim_file = '' + sim_type = '' + bridge_meta = 'unknown' + request_id = 'unknown' + if isinstance(msg_dict, dict) and 'simulation' in msg_dict: + sim_data = msg_dict['simulation'] + sim_file = sim_data.get('file', '') + sim_type = sim_data.get('type', '') + bridge_meta = sim_data.get('bridge_meta', 'unknown') + request_id = sim_data.get('request_id', 'unknown') + + # Create an error response + error_response = create_response( + template_type='error', + sim_file=sim_file, + sim_type=sim_type, + response_templates={}, + bridge_meta=bridge_meta, + request_id=request_id, + error={ + 'message': 'Message validation failed', + 'details': str(e), + 'type': 'validation_error' + } + ) + # Send the error response back to the source + self.rabbitmq_manager.send_result(source, error_response) + # Acknowledge the message so it's not requeued + ch.basic_nack(delivery_tag=method.delivery_tag, requeue=False) + return + logger.info("Received simulation type: %s", sim_type) + # Process based on simulation type + if sim_type == 'batch': + handle_batch_simulation( + msg_dict, + source, + self.rabbitmq_manager, + self.path_simulation, + self.response_templates) + ch.basic_ack(delivery_tag=method.delivery_tag) + else: + # This shouldn't happen due to Pydantic validation, but just in + # case + logger.error("Unknown simulation type: %s", sim_type) + error_response = create_response( + template_type='error', + sim_file=sim_file, + sim_type=sim_type, + response_templates={}, + bridge_meta=bridge_meta, + request_id=request_id, + error={ + 'message': f'Unknown simulation type: {sim_type}', + 'type': 'invalid_simulation_type' + } + ) + self.rabbitmq_manager.send_result(source, error_response) + ch.basic_nack(delivery_tag=method.delivery_tag, requeue=False) + + except Exception as e: # pylint: disable=broad-except + logger.error("Error processing message %s: %s", message_id, e) + error_response = create_response( + template_type='error', + sim_file='', + sim_type='', + response_templates={}, + bridge_meta='unknown', + request_id='unknown', + error={ + 'message': 'Error processing message', + 'details': str(e), + 'type': 'execution_error' + } + ) + # Try to send the error response back + try: + self.rabbitmq_manager.send_result(source, error_response) + except Exception as send_error: # pylint: disable=broad-except + logger.error("Failed to send error response: %s", send_error) + + ch.basic_nack(delivery_tag=method.delivery_tag, requeue=False) diff --git a/agents/simul8/simul8_agent/src/comm/rabbitmq/rabbitmq_manager.py b/agents/simul8/simul8_agent/src/comm/rabbitmq/rabbitmq_manager.py new file mode 100644 index 0000000..f5b84be --- /dev/null +++ b/agents/simul8/simul8_agent/src/comm/rabbitmq/rabbitmq_manager.py @@ -0,0 +1,328 @@ +""" +RabbitMQManager class for managing RabbitMQ connections, exchanges, queues, and message handling. +This module provides functionality to establish connections with RabbitMQ, +set up exchanges and queues, and send/receive messages within a simulation agent framework. +""" +import sys +import ssl +import uuid +from typing import Dict, Any, Callable, Optional + +import yaml +import pika +import time +from pika.spec import BasicProperties + +from .interfaces import IRabbitMQManager +from ...utils.logger import get_logger + +logger = get_logger() + + +class RabbitMQManager(IRabbitMQManager): + """ + Manager for RabbitMQ connections, channels, exchanges, and queues. + Implements the IRabbitMQManager interface. + """ + + def __init__(self, agent_id: str, config: Dict[str, Any]) -> None: + """ + Initialize the RabbitMQ manager. + + Args: + agent_id (str): The ID of the agent + config (dict): Configuration parameters + """ + self.agent_id: str = agent_id + self.config: Dict[str, Any] = config + self.connection: Optional[pika.BlockingConnection] = None + self.channel: Optional[pika.adapters.blocking_connection.BlockingChannel] = None + self.input_queue_name: str = f'Q.sim.{self.agent_id}' + self.message_handler: Optional[Callable[[ + pika.adapters.blocking_connection.BlockingChannel, + pika.spec.Basic.Deliver, BasicProperties, bytes], None]] = None + + def connect(self) -> bool: + """ + Establish connection to RabbitMQ server using configuration parameters. + + Returns: + bool: True if connection and channel are open, False otherwise. + """ + rabbitmq_config: Dict[str, Any] = self.config.get('rabbitmq', {}) + max_retries = 5 + retry_delay = 2 + + for attempt in range(1, max_retries + 1): + try: + logger.debug("Connecting to RabbitMQ (attempt %d)...", attempt) + credentials = pika.PlainCredentials( + rabbitmq_config.get('username', 'guest'), + rabbitmq_config.get('password', 'guest') + ) + vhost = rabbitmq_config.get('vhost', '/') + logger.debug(f"Using vhost: {vhost}") + use_tls = rabbitmq_config.get('tls', False) + + if use_tls: + context = ssl.create_default_context() + ssl_options = pika.SSLOptions(context, rabbitmq_config.get('host', 'localhost')) + parameters = pika.ConnectionParameters( + host=rabbitmq_config.get('host', 'localhost'), + port=rabbitmq_config.get('port', 5671), + virtual_host=vhost, + credentials=credentials, + ssl_options=ssl_options, + heartbeat=rabbitmq_config.get('heartbeat', 600) + ) + else: + parameters = pika.ConnectionParameters( + host=rabbitmq_config.get('host', 'localhost'), + port=rabbitmq_config.get('port', 5672), + virtual_host=vhost, + credentials=credentials, + heartbeat=rabbitmq_config.get('heartbeat', 600) + ) + self.connection = pika.BlockingConnection(parameters) + + if self.connection.is_open: + logger.debug( + "Connection to RabbitMQ is open. Attempting to create channel...") + self.channel = self.connection.channel() + + if self.channel and self.channel.is_open: + logger.debug( + "Successfully connected to RabbitMQ and channel is open.") + return True + else: + logger.error("Channel creation failed. Retrying...") + else: + logger.error( + "Connection opened but channel could not be created. Retrying...") + + except pika.exceptions.AMQPConnectionError as e: + logger.error( + "Connection failed (attempt %d) to %s:%s vhost=%s — %s: %r", + attempt, + rabbitmq_config.get("host"), + rabbitmq_config.get("port"), + rabbitmq_config.get("vhost", "/"), + e.__class__.__name__, + e + ) + + time.sleep(retry_delay) + + logger.error( + "Failed to connect and create channel after %d attempts", + max_retries) + return False + + def setup_infrastructure(self) -> None: + if not self.channel or not self.channel.is_open: + logger.error("Channel is not available. Exiting.") + sys.exit(1) + exchanges: Dict[str, str] = self.config.get('exchanges', {}) + queue_config: Dict[str, Any] = self.config.get('queue', {}) + + try: + # Input exchange to receive commands + input_exchange: str = exchanges.get('input', 'ex.bridge.output') + self.channel.exchange_declare( + exchange=input_exchange, + exchange_type='topic', + durable=True + ) + logger.debug("Declared input exchange: %s", input_exchange) + + # Output exchange to send results + output_exchange: str = exchanges.get('output', 'ex.sim.result') + self.channel.exchange_declare( + exchange=output_exchange, + exchange_type='topic', + durable=True + ) + logger.debug("Declared output exchange: %s", output_exchange) + + # Queue for receiving input messages + self.channel.queue_declare( + queue=self.input_queue_name, + durable=queue_config.get('durable', True) + ) + + # Bind queue to input exchange + self.channel.queue_bind( + exchange=input_exchange, + queue=self.input_queue_name, + routing_key=f"*.{self.agent_id}" + ) + logger.debug( + "Declared and bound input queue: %s", + self.input_queue_name) + + # Set QoS (prefetch count) + self.channel.basic_qos( + prefetch_count=queue_config.get('prefetch_count', 1) + ) + except pika.exceptions.ChannelClosedByBroker as e: + logger.error( + "Channel closed by broker while setting up infrastructure: %s", e) + sys.exit(1) + + def register_message_handler( + self, + handler_func: Callable[ + [pika.adapters.blocking_connection.BlockingChannel, + pika.spec.Basic.Deliver, + BasicProperties, + bytes], + None] + ) -> None: + """ + Register a function to handle incoming messages. + + Args: + handler_func (callable): Function to handle messages + """ + self.message_handler = handler_func + + def start_consuming(self) -> None: + """ + Start consuming messages from the input queue. + """ + if not self.message_handler: + logger.error( + "No message handler registered. Cannot start consuming.") + return + + if not self.channel or not self.channel.is_open: + logger.error( + "Channel is not initialized. Attempting to reconnect...") + self.connect() + if not self.channel: + logger.error( + "Failed to initialize channel after reconnecting.") + return + + try: + self.channel.basic_consume( + queue=self.input_queue_name, + on_message_callback=self.message_handler + ) + logger.debug( + "Started consuming messages from queue: %s", + self.input_queue_name) + self.channel.start_consuming() + except KeyboardInterrupt: + logger.info( + "Stopping message consumption due to keyboard interrupt") + if self.channel: + self.channel.stop_consuming() + except pika.exceptions.AMQPError as e: + logger.error("Error while consuming messages: %s", e) + self.close() + + def send_message( + self, + exchange: str, + routing_key: str, + body: str, + properties: Optional[BasicProperties] = None + ) -> bool: + """ + Send a message to a specified exchange with a routing key. + + Args: + exchange (str): The exchange to publish to + routing_key (str): The routing key for the message + body (str): The message body + properties (pika.BasicProperties, optional): Message properties + + Returns: + bool: True if successful, False otherwise + """ + try: + self.channel.basic_publish( + exchange=exchange, + routing_key=routing_key, + body=body, + properties=properties or pika.BasicProperties( + delivery_mode=2 # Persistent message + ) + ) + logger.debug( + "Sent message to exchange %s with routing key %s", + exchange, + routing_key) + return True + except pika.exceptions.AMQPError as e: + logger.error("Failed to send message: %s", e) + return False + except Exception as e: + logger.error("Unexpected error: %s", e) + return False + + def send_result(self, destination: str, result: Dict[str, Any]) -> bool: + """ + Send simulation results to the specified destination. + + Args: + destination (str): Destination identifier (e.g., 'dt', 'pt') + result (dict): Result data to be sent + + Returns: + bool: True if successful, False otherwise + """ + exchanges: Dict[str, str] = self.config.get('exchanges', {}) + output_exchange: str = exchanges.get('output', 'ex.sim.result') + + # Prepare the payload with the destination + payload: Dict[str, Any] = { + **result, # Result data + 'source': self.agent_id, # Agent identifier + 'destinations': [destination] # Recipient + } + + # Generate message ID + message_id: str = str(uuid.uuid4()) + + # Serialize to YAML + payload_yaml: str = yaml.dump(payload, default_flow_style=False) + + # Routing key: .result. + routing_key: str = f"{self.agent_id}.result.{destination}" + + properties: BasicProperties = pika.BasicProperties( + delivery_mode=2, # Persistent message + content_type='application/x-yaml', + message_id=message_id + ) + + success: bool = self.send_message( + output_exchange, routing_key, payload_yaml, properties) + + if success: + logger.debug( + "Sent result to %s with message ID: %s and payload: %s", + destination, + message_id, + payload) + else: + logger.error("Failed to send result to %s", destination) + + return success + + def close(self) -> None: + """ + Close the RabbitMQ connection. + """ + if self.channel and self.channel.is_open: + try: + self.channel.stop_consuming() + except pika.exceptions.AMQPError: + pass + logger.debug("Stopped consuming messages") + + if self.connection and self.connection.is_open: + self.connection.close() + logger.info("Closed RabbitMQ connection") diff --git a/agents/simul8/simul8_agent/src/core/__init__.py b/agents/simul8/simul8_agent/src/core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agents/simul8/simul8_agent/src/core/agent.py b/agents/simul8/simul8_agent/src/core/agent.py new file mode 100644 index 0000000..58390b7 --- /dev/null +++ b/agents/simul8/simul8_agent/src/core/agent.py @@ -0,0 +1,110 @@ +""" +Simul8Agent implementation - An implementation of the Simul8Agent class using the Connect +abstraction to manage communication and handle simulation processing. +""" + +from typing import Any, Dict, Optional +import sys +import uuid + +from ..interfaces.config_manager import IConfigManager +from ..utils.config_manager import ConfigManager +from ..utils.logger import get_logger +from ..utils.performance_monitor import PerformanceMonitor +from ..comm.connect import Connect + +# Configure logger +logger = get_logger() + + +class Simul8Agent: + """ + An agent that interfaces with a Simul8 simulation through a communication layer. + This component handles message reception, processing, and result distribution + while remaining decoupled from the specific messaging technology. + """ + + def __init__( + self, + agent_id: str, + config_path: Optional[str] = None, + broker_type: str = "rabbitmq") -> None: + """ + Initialize the Simul8 agent. + + Args: + agent_id (str): The ID of the agent + config_path (Optional[str]): Path to the configuration file (optional) + broker_type (str): The type of message broker to use (default: "rabbitmq") + """ + self.agent_id: str = agent_id + logger.info("Initializing Simul8 agent with ID: %s", self.agent_id) + + # Load configuration + self.config_manager: IConfigManager = ConfigManager(config_path) + self.config: Dict[str, Any] = self.config_manager.get_config() + + # Initialize performance monitor + self.performance_monitor = PerformanceMonitor(config=self.config) + # Initialize the communication layer + self.comm = Connect(self.agent_id, self.config, broker_type) + # Set up the communication infrastructure + self.comm.connect() + self.comm.setup() + self.comm.register_message_handler() + logger.debug("Simul8 agent initialized successfully") + + def start(self) -> None: + """ + Start the agent and begin consuming messages. + """ + try: + logger.info("Simul8 agent running and listening for requests") + self.comm.start_consuming() + except KeyboardInterrupt: + logger.info("Stopping Simul8 agent due to keyboard interrupt") + self.stop() + except ConnectionError as e: + # Specific handling for ConnectionError + logger.error("Connection error while consuming messages: %s", e) + self.stop() + except TimeoutError as e: + # Specific handling for TimeoutError + logger.error("Timeout error while consuming messages: %s", e) + self.stop() + except Exception as e: + # For all other unexpected errors + logger.error("Unexpected error while consuming messages: %s", e) + # This will log the full stack trace + logger.exception("Stack trace:") + self.stop() + + def stop(self) -> None: + """ + Stop the agent and close all connections. + """ + logger.info("Stopping Simul8 agent") + self.comm.close() + + # Log performance summary before stopping + summary = self.performance_monitor.get_summary() + if summary: + logger.info("Performance Summary:") + for metric, value in summary.items(): + logger.info(f" {metric}: {value:.2f}") + + def send_result(self, destination: str, result: Dict[str, Any]) -> bool: + """ + Send operation results to the specified destination. + + Args: + destination (str): The destination identifier + result (Dict[str, Any]): The result data to be sent + + Returns: + bool: True if successful, False otherwise + """ + success = self.comm.send_result(destination, result) + if success: + self.performance_monitor.record_result_sent() + return success diff --git a/agents/simul8/simul8_agent/src/core/batch.py b/agents/simul8/simul8_agent/src/core/batch.py new file mode 100644 index 0000000..53866ef --- /dev/null +++ b/agents/simul8/simul8_agent/src/core/batch.py @@ -0,0 +1,237 @@ +""" +batch.py - Simul8 Simulation Batch Processor + +This module provides functionality to process Simul8 simulation requests received through +the Connect messaging abstraction layer. +""" + +import os +import sys +import time +from typing import Dict, List, Any, Tuple, Optional + +import yaml + +from ..utils.logger import get_logger +from ..utils.create_response import create_response +from ..comm.interfaces import IMessageBroker +from .simul8_simulator import Simul8Simulator, Simul8SimulationError + +# Configure logger +logger = get_logger() + + +def handle_batch_simulation( + msg_dict: Dict[str, Any], + source: str, + rabbitmq_manager: IMessageBroker, + path_simulation: str, + response_templates: Dict[str, Any] +) -> None: + """Handle a batch simulation request.""" + sim_file: Optional[str] = None # Initialize this first! + + logger.debug(f"Starting handle_batch_simulation with msg_dict keys: {list(msg_dict.keys())}") + + # Initialize performance monitor + operation_id = msg_dict.get('simulation', {}).get('request_id', 'unknown') + logger.debug(f"Operation ID: {operation_id}") + + try: + logger.debug(f"About to record simul8 start") + + + logger.debug(f"Getting simulation data from message") + data: Dict[str, Any] = msg_dict.get('simulation', {}) + logger.debug(f"Simulation data keys: {list(data.keys())}") + + bridge_meta = data.get('bridge_meta', 'unknown') + request_id = data.get('request_id', 'unknown') + sim_file = data.get('file') + + logger.debug(f"bridge_meta={bridge_meta}, request_id={request_id}, sim_file={sim_file}") + logger.debug(f"path_simulation={path_simulation}") + + if not sim_file: + raise ValueError("No simulation file specified in request") + try: + # Handle Simul8 simulation + _handle_simulation(data, source, rabbitmq_manager, path_simulation, + response_templates, sim_file=sim_file) + except Simul8SimulationError as e: + logger.error(f"DEBUG: Simul8 simulation error: {str(e)}") + raise e + logger.debug(f"validating simulation data") + function_name = _validate_simulation_data(data) + logger.debug(f"Validation complete, function_name={function_name}") + + sim_path = path_simulation + logger.debug(f"extracting I/O specs") + inputs, outputs = _extract_io_specs(data) + logger.debug(f"I/O extraction complete, inputs={inputs}, outputs={outputs}") + + logger.debug(f"Starting simulation '{sim_file}' at path '{sim_path}'") + sim = Simul8Simulator(sim_path, sim_file, function_name) + + logger.debug("Simulator created, about to record startup complete") + # Record startup complete + + except Exception as e: + logger.error(f"Exception caught in handle_batch_simulation: {type(e).__name__}: {str(e)}") + logger.error(f"sim_file value at exception: {sim_file}") + logger.error(f"Exception traceback:", exc_info=True) + + # Now call your error handler + _handle_error(e, sim_file, rabbitmq_manager, source, response_templates) + +def _handle_simulation( + data: Dict[str, Any], + source: str, + message_broker: IMessageBroker, + path_simulation: str, + response_templates: Dict[str, Any], + sim_file: str +) -> None: + sim: Optional[Simul8Simulator] = None # Initialize sim to None + + """Process a Simul8 simulation request.""" + try: + # Extract run_time from inputs if present + bridge_meta = data.get('bridge_meta', 'unknown') + request_id = data.get('request_id', 'unknown') + inputs, outputs = _extract_io_specs(data) + run_time = int(inputs.get('run_time', 500)) + + logger.info("Starting Simul8 simulation '%s'", sim_file) + sim = Simul8Simulator(run_time=run_time) + + # Set the expected outputs from YAML + sim.expected_outputs = outputs if outputs else {} + logger.debug(f"Expected outputs set to: {sim.expected_outputs}") + + _send_progress(message_broker, source, sim_file, 0, response_templates) + + # Create full file path + file_path = os.path.join(path_simulation, sim_file) if path_simulation else sim_file + + # Run the simulation + results = sim.run(file_path=file_path, inputs=inputs) + + # Get metadata if needed + metadata = sim.get_metadata() if response_templates.get( + 'success', {}).get('include_metadata', False) else None + success_response = create_response( + 'success', sim_file, 'batch', response_templates, + outputs=results, metadata=metadata, bridge_meta=bridge_meta, + request_id=request_id + ) + _send_response(message_broker, source, success_response) + logger.info("Simul8 simulation '%s' completed successfully", sim_file) + finally: + # Only cleanup if sim was actually created + if sim is not None: + try: + sim.cleanup() + logger.debug("Simulator cleanup completed") + except Exception as cleanup_error: + logger.error(f"Error during cleanup: {cleanup_error}") + + + +def _validate_simulation_data( + data: Dict[str, Any]) -> Tuple[str, Optional[str]]: + """Validate and extract simulation parameters.""" + sim_file = data.get('file') + if not sim_file: + raise ValueError("Missing 'file' in simulation config") + return data.get('function_name') + + +def _extract_io_specs(data: Dict[str, Any] + ) -> Tuple[Dict[str, Any], List[str]]: + """Extract input and output specifications from data.""" + inputs = data.get('inputs', {}) + # Filter out run_time and other non-CSV parameters + filtered_inputs = {k: v for k, v in inputs.items() if k not in ['run_time', 'runtime']} + outputs = data.get('outputs', []) + + if not outputs: + raise ValueError("No outputs specified in simulation config") + return filtered_inputs, outputs + + + + +def _send_progress( + broker: IMessageBroker, + source: str, + sim_file: str, + percentage: int, + response_templates: Dict, + bridge_meta: str = 'unknown', + request_id: str = 'unknown' +) -> None: + """Send progress update if configured.""" + if response_templates.get('progress', {}).get('include_percentage', False): + progress_response = create_response( + 'progress', + sim_file, + 'batch', + response_templates, + percentage=percentage, + bridge_meta=bridge_meta, + request_id=request_id) + broker.send_result(source, progress_response) + + +def _get_metadata(sim: Simul8Simulator) -> Dict[str, Any]: + """Retrieve simulation metadata.""" + return sim.get_metadata() + + +def _send_response(broker: IMessageBroker, source: str, + response: Dict[str, Any]) -> None: + """Send response through message broker.""" + logger.debug(yaml.dump(response)) + broker.send_result(source, response) + + +def _handle_error(error: Exception, + sim_file: Optional[str], + broker: IMessageBroker, + source: str, + response_templates: Dict + ) -> None: + """Handle errors and send error response.""" + error_type = _determine_error_type(error) + error_response = create_response( + 'error', + sim_file or "unknown", + 'batch', + response_templates, + error={ + 'message': str(error), + 'type': error_type, + 'traceback': sys.exc_info() if response_templates.get( + 'error', + {}).get( + 'include_stacktrace', + False) else None}, + bridge_meta=response_templates.get('bridge_meta', 'unknown'), + request_id=response_templates.get('request_id', 'unknown') + ) + _send_response(broker, source, error_response) + + +def _determine_error_type(error: Exception) -> str: + """Map Python exceptions to error types.""" + if isinstance(error, FileNotFoundError): + return 'missing_file' + if isinstance(error, Simul8SimulationError): + return 'simul8_start_failure' if 'simul8 engine' in str( + error) else 'execution_error' + if isinstance(error, TimeoutError): + return 'timeout' + if isinstance(error, ValueError): + return 'invalid_config' + return 'execution_error' diff --git a/agents/simul8/simul8_agent/src/core/simul8_simulator.py b/agents/simul8/simul8_agent/src/core/simul8_simulator.py new file mode 100644 index 0000000..d7539ce --- /dev/null +++ b/agents/simul8/simul8_agent/src/core/simul8_simulator.py @@ -0,0 +1,478 @@ +""" +simul8_simulator.py - Simul8 COM Interface for Simulations + +This module provides a class for interfacing with Simul8 via COM to run discrete event simulations. +It handles the lifecycle of Simul8 application instances, event handling, result collection, +and proper resource management. + +Part of the simulation service infrastructure that enables distributed +Simul8 computational workloads. +""" + +import os +import time +from pathlib import Path +import pythoncom +from win32com import client +from win32com.client.gencache import EnsureDispatch +from typing import Dict, List, Optional, Any, Union, cast + +from ..utils.logger import get_logger +from ..utils.csv_parser import yaml_csv_to_file +from ..utils.config_loader import load_config + +# Configure logger +logger = get_logger() + + +class Simul8SimulationError(Exception): + """Custom exception for Simul8 simulation errors.""" + + +class Simul8Simulator: + """ + Manages the lifecycle of a Simul8 simulation with proper resource management, + event handling and result collection. + """ + + def __init__( + self, + path: str = None, + file: str = None, + run_time: int = 1000) -> None: + """ + Initialize a Simul8 simulator. + + Args: + path: Directory path containing the simulation file (optional) + file: Name of the Simul8 simulation file (.S8) + run_time: Simulation run time in minutes (default: 1000) + """ + self.sim_path = Path(path).resolve() if path else None + self.sim_file = file + self.run_time = run_time + self.s8 = None + self.events = None + self.listen_for_messages = True + self.start_time = None + self.results = {} + self.expected_outputs = {} # Change from [] to {} + + # Only validate if both path and file are provided + if self.sim_path and self.sim_file: + self._validate() + + def _validate(self) -> None: + """Validate the simulation path and file.""" + if not self.sim_path.is_dir(): + raise FileNotFoundError( + f"Simulation directory not found: {self.sim_path}") + + sim_file_path = self.sim_path / self.sim_file + if not sim_file_path.exists(): + raise FileNotFoundError( + f"Simulation file '{self.sim_file}' not found at {self.sim_path}") + + if not str(sim_file_path).lower().endswith('.s8'): + logger.warning( + "Simulation file '%s' does not have .S8 extension", self.sim_file) + + def start(self) -> None: + """Initialize COM and create Simul8 instance.""" + logger.debug("Starting Simul8 engine") + try: + self.start_time = time.time() + + # Initialize COM LibrariesF + pythoncom.CoInitialize() + + # Create Simul8 instance + self.s8 = EnsureDispatch("Simul8.S8Simulation") + + # Set up event handling + self.events = client.WithEvents(self.s8, self._create_event_handler()) + + logger.debug("Simul8 engine started successfully") + except Exception as e: + logger.error("Failed to start Simul8 engine: %s", str(e)) + self.cleanup() + raise Simul8SimulationError(f"Failed to start Simul8 engine: {str(e)}") from e + + def _create_event_handler(self): + """Create an event handler class for this simulation instance.""" + simulation = self + + class EventHandler: + def OnS8SimulationOpened(self): + logger.info("The Simulation has been opened.") + # simulation.s8.RunSim(simulation.run_time) + simulation.s8.RunSim(simulation.run_time) + + def OnS8SimulationEndRun(self): + logger.info("The simulation run has ended.") + + # Collect results + n = 1 + logger.debug("Total results count: %d", simulation.s8.ResultsCount) + while n <= simulation.s8.ResultsCount: + # try: + result = simulation.s8.Results(n) + # simulation.results[result.Name] = result.Value + # logger.debug("Result %d: %s = %s", n, result.Name, result.Value) + # except Exception as e: + # logger.error("Error retrieving result %d: %s", n, e) + + n += 1 + + # End the message loop + simulation.listen_for_messages = False + + return EventHandler + + def run(self, file_path: Optional[str] = None, inputs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + if self.s8 is None: + self.start() + + + # Reset results + self.results = {} + self.listen_for_messages = True + + # Store original working directory to restore later + original_cwd = os.getcwd() + + try: + # Determine file path - store as instance attribute + self.actual_file_path = file_path + if not self.actual_file_path and self.sim_path and self.sim_file: + self.actual_file_path = str(self.sim_path / self.sim_file) + + if not self.actual_file_path: + raise Simul8SimulationError("No simulation file specified") + + # Change working directory to simulation file directory + sim_directory = os.path.dirname(self.actual_file_path) + logger.debug(f"Original working directory: {original_cwd}") + logger.debug(f"Actual file path: {self.actual_file_path}") + logger.debug(f"Sim directory: {sim_directory}") + os.chdir(sim_directory) + logger.debug(f"Changed working directory to: {os.getcwd()}") + + logger.debug("Opening simulation file: %s", self.actual_file_path) + logger.debug("inputs: %s", inputs) + + # Set input parameters if provided + self._set_simulation_inputs(inputs) + + self.s8.Open(self.actual_file_path) + + while self.listen_for_messages: + pythoncom.PumpWaitingMessages() + + self._collect_simulation_results() + return self.results + + except Exception as e: + logger.error("Simulation error: %s", str(e), exc_info=True) + raise Simul8SimulationError(f"Simulation error: {str(e)}") from e + finally: + # Restore original working directory + os.chdir(original_cwd) + + # Close the simulation + pass + + def _set_simulation_inputs(self, inputs: Dict[str, Any]) -> None: + """ + Write simulation inputs to input.csv file in the same directory as the S8 file. + + Args: + inputs: Dictionary of input values with CSV structure + + Raises: + Simul8SimulationError: If inputs are invalid or file creation fails + """ + + if not inputs: + raise Simul8SimulationError( + "No inputs provided - Simul8 simulation requires input data with structure: " + "{'columns': ['col1', 'col2'], 'r1': ['val1', 'val2'], ...}" + ) + + logger.info(f"Processing {len(inputs)} input parameters") + + try: + # Validate that inputs have the correct CSV structure + from ..utils.csv_parser import validate_csv_structure + validate_csv_structure(inputs) + + # Determine where the S8 file is located + sim_directory = None + + if hasattr(self, 'actual_file_path') and self.actual_file_path: + sim_directory = os.path.dirname(self.actual_file_path) + elif self.sim_path and self.sim_file: + sim_directory = str(self.sim_path) + else: + # Load config to get simulation path + try: + config = load_config() + config_sim_path = config.get('simulation', {}).get('path') + if config_sim_path and os.path.exists(config_sim_path): + sim_directory = config_sim_path + else: + # Fallback to examples directory + current_dir = os.path.dirname(os.path.abspath(__file__)) + examples_dir = os.path.normpath(os.path.join(current_dir, "..", "..", "docs", "examples")) + + if os.path.exists(examples_dir): + sim_directory = examples_dir + else: + sim_directory = os.getcwd() + except Exception as e: + logger.warning(f"Could not load config: {e}") + # Fallback to examples directory + current_dir = os.path.dirname(os.path.abspath(__file__)) + examples_dir = os.path.normpath(os.path.join(current_dir, "..", "..", "docs", "examples")) + + if os.path.exists(examples_dir): + sim_directory = examples_dir + else: + sim_directory = os.getcwd() + + input_file_path = os.path.join(sim_directory, "input.csv") + + # Create the CSV file from the validated inputs + logger.info("Processing structured CSV data for Simul8") + + yaml_csv_to_file(inputs, file_path=input_file_path) + + logger.debug(f"Created input file at: {input_file_path}") + + # Verify the file was created + if os.path.exists(input_file_path): + with open(input_file_path, 'r') as f: + content = f.read() + logger.debug(f"File content:\n{content}") + else: + raise Simul8SimulationError(f"Failed to create input.csv at: {input_file_path}") + + # Store the path for cleanup later + if not hasattr(self, '_temp_files'): + self._temp_files = [] + self._temp_files.append(input_file_path) + + except Exception as e: + logger.error(f"Failed to create input file: {str(e)}", exc_info=True) + raise Simul8SimulationError(f"Error creating input file: {str(e)}") + def _collect_simulation_results(self) -> None: + """ + Read simulation results from OUTPUTDATA.csv and map to expected output names. + """ + from ..utils.csv_parser import read_csv_to_dict + + # Look for the output file in multiple locations + sim_directory = os.path.dirname(self.actual_file_path) + logger.debug(f"Looking for output files. Sim directory: {sim_directory}") + logger.debug(f"Current working directory: {os.getcwd()}") + + # Try different file names and locations + possible_files = [ + ("OUTPUTDATA.csv", sim_directory), + ("OUTPUT.csv", sim_directory), + ("OUTPUTDATA.csv", os.getcwd()), + ("OUTPUT.csv", os.getcwd()), + ("OUTPUTDATA.csv", os.path.dirname(sim_directory)), # parent dir + ("OUTPUT.csv", os.path.dirname(sim_directory)) # parent dir + ] + + output_file_path = None + + for filename, directory in possible_files: + potential_path = os.path.join(directory, filename) + logger.debug(f"Checking for output file: {potential_path}") + + if os.path.exists(potential_path): + output_file_path = potential_path + logger.debug(f"Found output file: {output_file_path}") + break + + if not output_file_path: + logger.warning("Output file not found in any location") + for filename, directory in possible_files: + logger.debug(f" - {os.path.join(directory, filename)}") + self.results = {"error": "No output file found"} + return + + try: + logger.debug(f"Reading results from: {output_file_path}") + + # Read and display the raw file content + with open(output_file_path, 'r') as f: + content = f.read() + + # Create output mapping from expected outputs (from YAML) + + # Create mapping from CSV headers to YAML output names + output_mapping = {} + if self.expected_outputs: + # First, get the CSV headers to see what we're working with + with open(output_file_path, 'r') as f: + import csv + reader = csv.reader(f) + csv_headers = next(reader, []) + csv_headers = [header.strip() for header in csv_headers if header.strip()] + + + # Get YAML output names in order + yaml_output_names = list(self.expected_outputs.keys()) + + # Map CSV headers to YAML output names in order + for i, csv_header in enumerate(csv_headers): + if i < len(yaml_output_names): + yaml_name = yaml_output_names[i] + output_mapping[csv_header] = yaml_name + else: + output_mapping[csv_header] = csv_header + + else: + output_mapping = {} + # Parse the CSV file with header-based approach + results = read_csv_to_dict(output_file_path, output_mapping=output_mapping) + + # Store the parsed results + if results: + self.results.update(results) + logger.debug(f"Collected {len(results)} results from output file") + else: + self.results = {"error": "No results parsed from output file"} + + # Add this file to temp_files for cleanup + if not hasattr(self, '_temp_files'): + self._temp_files = [] + self._temp_files.append(output_file_path) + + except Exception as e: + logger.error(f"Failed to read output file: {str(e)}") + self.results['error'] = f"Error reading results: {str(e)}" + def get_metadata(self) -> Dict[str, Any]: + """Get metadata about the simulation execution.""" + metadata = {} + if self.start_time: + metadata['execution_time'] = time.time() - self.start_time + + if self.s8: + try: + metadata['simul8_version'] = self.s8.Version + except Exception as e: + logger.warning("Error retrieving Simul8 version: %s", str(e)) + + return metadata + + def cleanup(self) -> None: + """Clean up COM resources and temporary files.""" + + # Clean up temporary files first + if hasattr(self, '_temp_files'): + for temp_file in self._temp_files: + try: + if os.path.exists(temp_file): + os.remove(temp_file) + logger.debug(f"Deleted temporary file: {temp_file}") + else: + logger.debug(f"Temporary file not found (already deleted?): {temp_file}") + except Exception as e: + logger.warning(f"Failed to delete temporary file {temp_file}: {str(e)}") + + # Clear the list + self._temp_files = [] + + # Clean up COM resources + if self.s8: + try: + # Try to quit the application properly + try: + # First try to close any open simulation + try: + + self.s8.Close() + logger.debug("Closed Simul8 simulation") + except Exception as close_error: + logger.debug(f"Error closing simulation: {str(close_error)}") + + time.sleep(0.5) + + except Exception as quit_error: + logger.warning("Error quitting Simul8 application: %s", str(quit_error)) + + finally: + try: + del self.s8 + logger.debug("Released Simul8 COM object reference") + except Exception as del_error: + logger.warning("Error deleting COM object: %s", str(del_error)) + + except Exception as e: + logger.warning("Error during COM cleanup: %s", str(e)) + finally: + self.s8 = None + if hasattr(self, 'events'): + self.events = None + + # Uninitialize COM (this should match the CoInitialize call) + try: + # Force garbage collection to release any remaining COM references + import gc + gc.collect() + time.sleep(0.2) + + pythoncom.CoUninitialize() + logger.debug("COM uninitialized") + except Exception as e: + logger.warning("Error uninitializing COM: %s", str(e)) + + # As a last resort, if cleanup seems to have failed, try force killing processes + # This can be enabled via configuration if needed + try: + self.force_kill_simul8_processes() + except Exception as config_error: + logger.debug(f"Could not check force cleanup config: {str(config_error)}") + # Optionally, you can uncomment the next line for development/testing: + # self.force_kill_simul8_processes() + + def force_kill_simul8_processes(self) -> None: + """Force kill any remaining Simul8 processes as a last resort.""" + try: + import subprocess + import psutil + + # Find and terminate Simul8 processes + killed_processes = [] + for proc in psutil.process_iter(['pid', 'name', 'exe']): + try: + # Check for Simul8 executable names (common variations) + proc_name = proc.info['name'].lower() + if any(s8_name in proc_name for s8_name in ['simul8', 's8.exe', 'simul8.exe']): + proc.terminate() + killed_processes.append(proc.info['pid']) + logger.warning(f"Terminated Simul8 process: {proc.info['name']} (PID: {proc.info['pid']})") + except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): + pass + + if killed_processes: + time.sleep(1) # Give processes time to terminate gracefully + + # Force kill any that didn't terminate + for proc in psutil.process_iter(['pid', 'name']): + try: + if proc.info['pid'] in killed_processes: + if proc.is_running(): + proc.kill() + logger.warning(f"Force killed Simul8 process PID: {proc.info['pid']}") + except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): + pass + + except ImportError: + logger.warning("psutil not available - cannot force kill Simul8 processes") + except Exception as e: + logger.error(f"Error force killing Simul8 processes: {str(e)}") diff --git a/agents/simul8/simul8_agent/src/interfaces/__init__.py b/agents/simul8/simul8_agent/src/interfaces/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agents/simul8/simul8_agent/src/interfaces/agent.py b/agents/simul8/simul8_agent/src/interfaces/agent.py new file mode 100644 index 0000000..7938d61 --- /dev/null +++ b/agents/simul8/simul8_agent/src/interfaces/agent.py @@ -0,0 +1,39 @@ +""" +This module defines the `ISimul8Agent` interface. +""" +from abc import ABC, abstractmethod +from typing import Any, Dict + + +class ISimul8Agent(ABC): + """ + Interface for Simul8 Agent that handles message reception, processing, + and result distribution via RabbitMQ. + """ + @abstractmethod + def __init__(self, agent_id: str, config_path: str = None) -> None: + """ + Initialize the Simul8 agent with the specified ID, and optionally a configuration file. + + Args: + agent_id (str): The ID of the agent + config_path (str, optional): The path to the configuration file + """ + @abstractmethod + def start(self) -> None: + """ + Start consuming messages from the input queue. + """ + @abstractmethod + def stop(self) -> None: + """ + Stop the agent and close connections. + """ + @abstractmethod + def get_config(self) -> Dict[str, Any]: + """ + Retrieve the agent's configuration as a dictionary. + + Returns: + Dict[str, Any]: The agent's configuration + """ diff --git a/agents/simul8/simul8_agent/src/interfaces/config_manager.py b/agents/simul8/simul8_agent/src/interfaces/config_manager.py new file mode 100644 index 0000000..ec7c425 --- /dev/null +++ b/agents/simul8/simul8_agent/src/interfaces/config_manager.py @@ -0,0 +1,26 @@ +""" +This module defines the `IConfigManager` interface. +""" +from abc import ABC, abstractmethod +from typing import Dict, Any + + +class IConfigManager(ABC): + """ + Interface for managing configuration. + """ + @abstractmethod + def get_config(self) -> Dict[str, Any]: + """ + Retrieve the loaded configuration as a dictionary. + """ + @abstractmethod + def get_default_config(self) -> Dict[str, Any]: + """ + Retrieve the default configuration as a dictionary. + """ + @abstractmethod + def _validate_config(self, config_data: Dict[str, Any]) -> Dict[str, Any]: + """ + Validate the configuration using the Pydantic model. + """ diff --git a/agents/simul8/simul8_agent/src/main.py b/agents/simul8/simul8_agent/src/main.py new file mode 100644 index 0000000..22f102c --- /dev/null +++ b/agents/simul8/simul8_agent/src/main.py @@ -0,0 +1,193 @@ +""" +Main entry point for the simul8 Agent application. +""" +from pathlib import Path +import logging +import click +from .utils.logger import setup_logger +from .interfaces.agent import ISimul8Agent +from .core.agent import Simul8Agent +from .utils.config_loader import load_config + + +@click.command() +@click.option('--config-file', '-c', type=click.Path(exists=True), + default=None, help='Path to custom configuration file') +@click.option('--generate-config', is_flag=True, + help='Generate a default configuration file in the current directory') +@click.option('--generate-project', is_flag=True, + help='Generate default project files in the current directory') +def main(config_file=None, generate_config=False, + generate_project=False) -> None: + """ + An agent service to manage simul8 simulations. + """ + if generate_config: + generate_default_config() + return + if generate_project: + generate_default_project() + return + if config_file: + run_agent(config_file) + else: + config_path = Path('config.yaml') + if not config_path.exists(): + print(""" +Error: Configuration file 'config.yaml' not found. + +To generate a default configuration file, run: +simul8-agent --generate-config + +You may customize the generated file as needed and re-run the program. + +Alternatively, if you already have a custom configuration file, use the +--config-file option to specify its path: +simul8-agent --config-file /path/to/your/config.yaml + """) + return + else: + run_agent(str(config_path)) + + +def generate_default_config(): + """Copy the template configuration file to the current directory if not already present.""" + config_path = Path.cwd() / 'config.yaml' + if config_path.exists(): + print(f"File already exists at path: {config_path}") + return + try: + try: + from importlib.resources import files + template_path = files('simul8_agent.config').joinpath( + 'config.yaml.template') + with open(template_path, 'rb') as src, open(config_path, 'wb') as dst: + dst.write(src.read()) + except (ImportError, AttributeError): + import pkg_resources + template_content = pkg_resources.resource_string('simul8_agent.config', + 'config.yaml.template') + with open(config_path, 'wb') as dst: + dst.write(template_content) + print(f"Configuration template copied to: {config_path}") + except FileNotFoundError: + print("Error: Template configuration file not found.") + except Exception as e: + print(f"Error generating configuration file: {e}") + + +def generate_default_project(): + """Copy all template project files to the current directory, only if they don't already exist.""" + + existing_files = [] + created_files = [] + + # Mapping from output filename to importlib resource location + files_to_generate = { + 'config.yaml': ('simul8_agent.config', 'config.yaml.template'), + 'simulation_batch.s8': ('simul8_agent.resources', 'simulation_batch.s8'), + 'client/use_simul8_agent.py': ('simul8_agent.resources', 'use_simul8_agent.py'), + 'client/use.yaml': ('simul8_agent.resources', 'use.yaml.template'), + 'client/simulation.yaml': ('simul8_agent.api', 'simulation.yaml.template'), + 'client/README.md': ('simul8_agent.resources', 'README.md'), + } + + # Descriptions for each file + file_descriptions = { + 'config.yaml': "Configuration file for the simul8 agent", + 'simulation_batch.s8': "A simulation file for simul8", + 'client/use_simul8_agent.py': "Python script to use the simul8 agent", + 'client/use.yaml': "Client-side usage configuration (use.yaml)", + 'client/simulation.yaml': "Example API payload to communicate with the simul8 agent", + 'client/README.md': "README file for the client directory", + } + + try: + # Ensure client directory exists + Path("client").mkdir(parents=True, exist_ok=True) + + try: + from importlib.resources import files + for output_name, (package, + resource_name) in files_to_generate.items(): + output_path = Path(output_name) + if output_path.exists(): + existing_files.append(output_name) + continue + resource_path = files(package).joinpath(resource_name) + with open(resource_path, 'rb') as src, open(output_path, 'wb') as dst: + dst.write(src.read()) + created_files.append(output_name) + except (ImportError, AttributeError): + import pkg_resources + for output_name, (package, + resource_name) in files_to_generate.items(): + output_path = Path(output_name) + if output_path.exists(): + existing_files.append(output_name) + continue + template_content = pkg_resources.resource_string( + package, resource_name) + with open(output_path, 'wb') as dst: + dst.write(template_content) + created_files.append(output_name) + + # Print result summary + print("\nProject generation summary:\n") + + if created_files: + print("🆕 Files created:") + for f in created_files: + description = file_descriptions.get( + f, "No description available") + print(f" - {f:<35} : {description}") + + if existing_files: + print("\n📄 Files already present (skipped):") + for f in existing_files: + description = file_descriptions.get( + f, "No description available") + print(f" - {f:<35} : {description}") + + if not created_files: + print("\nAll project files already exist. Nothing was created.") + else: + print( + "\nYou can now customize these files as needed and start using the simul8 agent.") + + except FileNotFoundError: + print("❌ Error: One or more template files were not found.") + except Exception as e: + print(f"❌ Error generating project files: {e}") + + +def run_agent(config_file): + """Initializes and starts a single simul8 agent instance.""" + broker_type = "rabbitmq" + config = load_config(config_file) + logging_level = config['logging']['level'] + logging_file = config['logging']['file'] + + logger: logging.Logger = setup_logger( + level=getattr(logging, logging_level.upper(), logging.INFO), + log_file=logging_file) + + agent_id = config['agent']['agent_id'] + agent: ISimul8Agent = Simul8Agent( + agent_id, + broker_type=broker_type, + config_path=config_file) + + try: + logger.debug("Starting simul8 agent with config: %s", config) + agent.start() + except KeyboardInterrupt: + logger.info("Shutting down agent due to keyboard interrupt") + agent.stop() + except Exception as e: + logger.error("Error running agent: %s", e) + agent.stop() + + +if __name__ == "__main__": + main() diff --git a/agents/simul8/simul8_agent/src/utils/__init__.py b/agents/simul8/simul8_agent/src/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agents/simul8/simul8_agent/src/utils/config_loader.py b/agents/simul8/simul8_agent/src/utils/config_loader.py new file mode 100644 index 0000000..95a85c2 --- /dev/null +++ b/agents/simul8/simul8_agent/src/utils/config_loader.py @@ -0,0 +1,146 @@ +""" +config_loader.py - Configuration loader utility + +This module provides functionality to load configuration from YAML files, +with support for environment variable substitution and validation. +""" + +import os +from typing import Dict, Any, Optional, Union +from pathlib import Path +from importlib import resources +import yaml +from ..utils.logger import get_logger + +# Configure logger +logger = get_logger() + +DEFAULT_CONFIG_PATH = Path(__file__).parent.parent / \ + "config" / "config.yaml.template" + + +def get_base_dir() -> Path: + """ + Find the base directory by looking for main.py file by traversing up from the current file. + + Returns: + Path object pointing to the base directory + """ + current_dir: Path = Path(__file__).resolve().parent + + while current_dir.parent != current_dir: + if (current_dir / "main.py").exists(): + return current_dir + if (current_dir / "app.py").exists() or (current_dir / "run.py").exists(): + return current_dir + current_dir = current_dir.parent + + cwd: Path = Path.cwd() + if (cwd / "main.py").exists() or (cwd / + "app.py").exists() or (cwd / "run.py").exists(): + return cwd + + test_dir: Path = Path(__file__).resolve().parent + while test_dir.parent != test_dir: + if (test_dir / "config").is_dir() and (test_dir / + "config" / "config.yaml.template").exists(): + return test_dir + test_dir = test_dir.parent + + return cwd + + +def load_config( + config_path: Optional[Union[str, Path]] = None) -> Dict[str, Any]: + """ + Load configuration from a YAML file. + + Args: + config_path: Path to the configuration file (optional, defaults to 'config/config.yaml') + + Returns: + Dictionary containing the configuration + + Raises: + FileNotFoundError: If the configuration file does not exist + yaml.YAMLError: If the YAML file is invalid + """ + if config_path is None: + try: + logger.debug("Loading default configuration file") + with resources.open_text("simul8_agent.config", "config.yaml.template") as f: + config = yaml.safe_load(f) + except FileNotFoundError as exc: + raise FileNotFoundError( + "Default configuration file not found inside the package." + ) from exc + else: + logger.debug("Loading configuration file from path: %s", config_path) + config_file: Path = Path(config_path) + if not config_file.exists(): + raise FileNotFoundError( + f"Configuration file not found: {config_file}") + with open(config_file, 'r', encoding='utf-8') as f: + config = yaml.safe_load(f) + config = _substitute_env_vars(config) + + return config + + +def _substitute_env_vars( + config: Union[Dict[str, Any], list, str] +) -> Union[Dict[str, Any], list, str]: + """ + Recursively substitute environment variables in configuration values. + Environment variables should be in the format ${ENV_VAR} or ${ENV_VAR:default_value} + + Args: + config: Configuration dictionary + + Returns: + Configuration with environment variables substituted + """ + if isinstance(config, dict): + return {k: _substitute_env_vars(v) for k, v in config.items()} + if isinstance(config, list): + return [_substitute_env_vars(item) for item in config] + if isinstance(config, str) and "${" in config and "}" in config: + start_idx: int = config.find("${") + end_idx: int = config.find("}", start_idx) + if start_idx != -1 and end_idx != -1: + env_var: str = config[start_idx + 2:end_idx] + + if ":" in env_var: + env_name, default = env_var.split(":", 1) + else: + env_name, default = env_var, "" + + env_value: str = os.environ.get(env_name, default) + return config[:start_idx] + env_value + config[end_idx + 1:] + + return config + + +def get_config_value(config: Dict[str, Any], + path: str, default: Any = None) -> Any: + """ + Get a configuration value by its dotted path. + + Args: + config: Configuration dictionary + path: Dotted path to the configuration value (e.g., 'rabbitmq.host') + default: Default value to return if the path does not exist + + Returns: + Configuration value or default + """ + keys: list[str] = path.split('.') + value: Any = config + + for key in keys: + if isinstance(value, dict) and key in value: + value = value[key] + else: + return default + + return value diff --git a/agents/simul8/simul8_agent/src/utils/config_manager.py b/agents/simul8/simul8_agent/src/utils/config_manager.py new file mode 100644 index 0000000..677bf0f --- /dev/null +++ b/agents/simul8/simul8_agent/src/utils/config_manager.py @@ -0,0 +1,308 @@ +""" +Configuration manager for the Simul8 Agent using Pydantic for validation. +""" + +from pathlib import Path +from typing import Optional, Dict, Any, Literal +from enum import Enum +from pydantic import BaseModel, Field, ValidationError, ConfigDict + +from .logger import get_logger +from .config_loader import load_config + +logger = get_logger() + + +class LogLevel(str, Enum): + """Supported logging levels.""" + DEBUG = "DEBUG" + INFO = "INFO" + WARNING = "WARNING" + ERROR = "ERROR" + CRITICAL = "CRITICAL" + + +class Config(BaseModel): + """Main configuration model using Pydantic for validation.""" + model_config = ConfigDict(extra='ignore') + + # Agent configuration + agent_id: str = Field(default="simul8") + + # RabbitMQ configuration + rabbitmq_host: str = Field(default="localhost") + rabbitmq_port: int = Field(default=5672) + rabbitmq_username: str = Field(default="guest") + rabbitmq_password: str = Field(default="guest") + rabbitmq_heartbeat: int = Field(default=600) + rabbitmq_virtual_host: str = Field(default="/") + rabbitmq_tls: bool = Field(default=False) + + # Simulation folder path + simulation_path: str = Field(default=".") + + # Exchanges configuration + input_exchange: str = Field(default="ex.bridge.output") + output_exchange: str = Field(default="ex.sim.result") + + # Queue configuration + queue_durable: bool = Field(default=True) + queue_prefetch_count: int = Field(default=1) + + # Logging configuration + log_level: LogLevel = Field(default=LogLevel.INFO) + log_file: str = Field(default="logs/simul8_agent.log") + + # Performance configuration + performance_enabled: bool = Field(default=False) + performance_log_dir: str = Field(default="performance_logs") + performance_log_filename: str = Field(default="performance_metrics.csv") + + # Response templates + # Success template + success_status: Literal["success"] = Field(default="success") + simulation_type: Literal["batch", "streaming"] = Field(default="batch") + success_timestamp_format: str = Field(default="%Y-%m-%dT%H:%M:%SZ") + success_include_metadata: bool = Field(default=True) + success_metadata_fields: list[str] = Field( + default=["execution_time", "memory_usage", "simul8_version"] + ) + + # Error template + error_status: Literal["error"] = Field(default="error") + error_include_stacktrace: bool = Field(default=False) + error_timestamp_format: str = Field(default="%Y-%m-%dT%H:%M:%SZ") + error_codes: Dict[str, int] = Field( + default={ + "invalid_config": 400, + "simul8_start_failure": 500, + "execution_error": 500, + "timeout": 504, + "missing_file": 404 + } + ) + + # Progress template + progress_status: Literal["in_progress"] = Field(default="in_progress") + progress_include_percentage: bool = Field(default=True) + progress_update_interval: int = Field(default=5) + progress_timestamp_format: str = Field(default="%Y-%m-%dT%H:%M:%SZ") + + def to_dict(self) -> Dict[str, Any]: + """Convert the model to a dictionary with nested structure.""" + return { + "agent": { + "agent_id": self.agent_id + }, + "rabbitmq": { + "host": self.rabbitmq_host, + "port": self.rabbitmq_port, + "username": self.rabbitmq_username, + "password": self.rabbitmq_password, + "heartbeat": self.rabbitmq_heartbeat, + "vhost": self.rabbitmq_virtual_host, + "tls": self.rabbitmq_tls + }, + "simulation": { + "path": self.simulation_path + }, + "exchanges": { + "input": self.input_exchange, + "output": self.output_exchange + }, + "queue": { + "durable": self.queue_durable, + "prefetch_count": self.queue_prefetch_count + }, + "logging": { + "level": self.log_level.value, + "file": self.log_file + }, + "performance": { + "enabled": self.performance_enabled, + "log_dir": self.performance_log_dir, + "log_filename": self.performance_log_filename + }, + + "response_templates": { + "success": { + "status": self.success_status, + "simulation": { + "type": self.simulation_type + }, + "timestamp_format": self.success_timestamp_format, + "include_metadata": self.success_include_metadata, + "metadata_fields": self.success_metadata_fields + }, + "error": { + "status": self.error_status, + "include_stacktrace": self.error_include_stacktrace, + "error_codes": self.error_codes, + "timestamp_format": self.error_timestamp_format + }, + "progress": { + "status": self.progress_status, + "include_percentage": self.progress_include_percentage, + "update_interval": self.progress_update_interval, + "timestamp_format": self.progress_timestamp_format + } + } + } + + @classmethod + def from_dict(cls, config_dict: Dict[str, Any]) -> 'Config': + """Create a Config instance from a nested dictionary.""" + # Extract values from nested structure + flat_config = {} + + # Extract agent section if present + if agent := config_dict.get("agent", {}): + flat_config["agent_id"] = agent.get("agent_id", "simul8") + + # Extract rabbitmq section if present + if rabbitmq := config_dict.get("rabbitmq", {}): + flat_config["rabbitmq_host"] = rabbitmq.get("host", "localhost") + flat_config["rabbitmq_port"] = rabbitmq.get("port", 5672) + flat_config["rabbitmq_username"] = rabbitmq.get( + "username", "guest") + flat_config["rabbitmq_password"] = rabbitmq.get( + "password", "guest") + flat_config["rabbitmq_heartbeat"] = rabbitmq.get("heartbeat", 600) + flat_config["rabbitmq_virtual_host"] = rabbitmq.get( + "vhost", "/") + flat_config["rabbitmq_tls"] = rabbitmq.get("tls", False) + + if simulation := config_dict.get("simulation", {}): + flat_config["simulation_path"] = simulation.get( + "path", ".") + + # Extract exchanges section if present + if exchanges := config_dict.get("exchanges", {}): + flat_config["input_exchange"] = exchanges.get( + "input", "ex.bridge.output") + flat_config["output_exchange"] = exchanges.get( + "output", "ex.sim.result") + + # Extract queue section if present + if queue := config_dict.get("queue", {}): + flat_config["queue_durable"] = queue.get("durable", True) + flat_config["queue_prefetch_count"] = queue.get( + "prefetch_count", 1) + + # Extract logging section if present + if logging := config_dict.get("logging", {}): + flat_config["log_level"] = logging.get("level", LogLevel.INFO) + flat_config["log_file"] = logging.get( + "file", "logs/simul8_agent.log") + + # Extract performance section if present + if performance := config_dict.get("performance", {}): + flat_config["performance_enabled"] = performance.get( + "enabled", False) + flat_config["performance_log_dir"] = performance.get( + "log_dir", "performance_logs") + flat_config["performance_log_filename"] = performance.get( + "log_filename", "performance_metrics.csv") + + + # Extract response_templates section if present + if templates := config_dict.get("response_templates", {}): + # Success template + if success := templates.get("success", {}): + flat_config["success_status"] = success.get( + "status", "success") + if simulation := success.get("simulation", {}): + flat_config["simulation_type"] = simulation.get( + "type", "batch") + flat_config["success_timestamp_format"] = success.get( + "timestamp_format", "%Y-%m-%dT%H:%M:%SZ") + flat_config["success_include_metadata"] = success.get( + "include_metadata", True) + flat_config["success_metadata_fields"] = success.get("metadata_fields", + ["execution_time", + "memory_usage", + "simul8_version"]) + + # Error template + if error := templates.get("error", {}): + flat_config["error_status"] = error.get("status", "error") + flat_config["error_include_stacktrace"] = error.get( + "include_stacktrace", False) + flat_config["error_timestamp_format"] = error.get( + "timestamp_format", "%Y-%m-%dT%H:%M:%SZ") + flat_config["error_codes"] = error.get("error_codes", { + "invalid_config": 400, + "simul8_start_failure": 500, + "execution_error": 500, + "timeout": 504, + "missing_file": 404 + }) + + # Progress template + if progress := templates.get("progress", {}): + flat_config["progress_status"] = progress.get( + "status", "in_progress") + flat_config["progress_include_percentage"] = progress.get( + "include_percentage", True) + flat_config["progress_update_interval"] = progress.get( + "update_interval", 5) + flat_config["progress_timestamp_format"] = progress.get( + "timestamp_format", "%Y-%m-%dT%H:%M:%SZ") + + return cls(**flat_config) + + +class ConfigManager: + """ + Manager for loading and providing access to application configuration. + """ + + def __init__(self, config_path: Optional[str] = None) -> None: + """ + Initialize the configuration manager. + + Args: + config_path (Optional[str]): Path to the configuration file. + If None, uses the default location. + """ + self.config_path: Path = Path(config_path) if config_path else Path( + __file__).parent.parent.parent.parent / "config.yaml" + try: + raw_config = load_config(self.config_path) + self.config = self._validate_config(raw_config) + except (FileNotFoundError, ValidationError) as e: + logger.warning("Configuration error: %s, using defaults.", str(e)) + self.config = self.get_default_config() + except (IOError, PermissionError) as e: + logger.error("File access error: %s, using defaults.", str(e)) + self.config = self.get_default_config() + except Exception as e: + logger.error("Unexpected error: %s, using defaults.", str(e)) + logger.exception("Full traceback:") + self.config = self.get_default_config() + + def _validate_config(self, config_data: Dict[str, Any]) -> Dict[str, Any]: + """Validate configuration using Pydantic model.""" + try: + # Create Config instance from nested dictionary + config_instance = Config.from_dict(config_data) + # Convert back to nested dictionary format + validated_config = config_instance.to_dict() + logger.info("Configuration validated successfully.") + return validated_config + except ValidationError as e: + logger.error("Configuration validation failed: %s", str(e)) + raise + + def get_default_config(self) -> Dict[str, Any]: + """Get default configuration as dictionary.""" + return Config().to_dict() + + def get_config(self) -> Dict[str, Any]: + """ + Get the loaded configuration. + + Returns: + Dict[str, Any]: Configuration parameters + """ + return self.config diff --git a/agents/simul8/simul8_agent/src/utils/create_response.py b/agents/simul8/simul8_agent/src/utils/create_response.py new file mode 100644 index 0000000..7750336 --- /dev/null +++ b/agents/simul8/simul8_agent/src/utils/create_response.py @@ -0,0 +1,159 @@ +""" +create_response.py + +This module provides a shared utility for creating standardized response objects +based on templates defined in configuration. It supports various response types +including success, error, progress, and streaming messages. + +Used by both batch and streaming simulation processors to ensure consistent +response formatting across the simulation service. +""" + +from typing import Dict, Any +from datetime import datetime + + +def _handle_success_response( + response: Dict[str, Any], + sim_type: str, + kwargs: Dict[str, Any] +) -> None: + """Handle success template type specific logic.""" + # For batch, this is 'outputs', for streaming this is 'data' + if sim_type == 'batch': + response['simulation']['outputs'] = kwargs.get('outputs', {}) + else: + response['simulation']['outputs'] = kwargs.get('data', {}) + + +def _handle_error_response( + response: Dict[str, Any], + template: Dict[str, Any], + kwargs: Dict[str, Any] +) -> None: + """Handle error template type specific logic.""" + error_info: Dict[str, Any] = kwargs.get('error', {}) + response['error'] = { + 'message': error_info.get('message', 'Unknown error'), + 'code': template.get('error_codes', {}).get( + error_info.get('type', 'execution_error'), 500) + } + + # Add error type if available + if 'type' in error_info: + response['error']['type'] = error_info['type'] + + # Add details if available + if 'details' in error_info: + response['error']['details'] = error_info['details'] + + # Add stack trace if configured + if template.get('include_stacktrace', False) and 'traceback' in error_info: + response['error']['traceback'] = error_info['traceback'] + + +def _handle_progress_response( + response: Dict[str, Any], + template: Dict[str, Any], + kwargs: Dict[str, Any] +) -> None: + """Handle progress template type specific logic.""" + if template.get('include_percentage', False) and 'percentage' in kwargs: + response['progress'] = { + 'percentage': kwargs['percentage'] + } + + # Add message if available + if 'message' in kwargs: + if 'progress' not in response: + response['progress'] = {} + response['progress']['message'] = kwargs['message'] + + # Add streaming data if available (for streaming mode) + if 'data' in kwargs and kwargs['data']: + response['data'] = kwargs['data'] + + +def _handle_streaming_response( + response: Dict[str, Any], + kwargs: Dict[str, Any] +) -> None: + """Handle streaming template type specific logic.""" + # Add streaming data + if 'data' in kwargs: + response['data'] = kwargs['data'] + + +def create_response( + template_type: str, + sim_file: str, + sim_type: str, + response_templates: Dict[str, Any], + bridge_meta: str, + request_id: str, + **kwargs: Any +) -> Dict[str, Any]: + """ + Create a response based on the template defined in the configuration. + + Args: + template_type: Type of template to use ('success', 'error', 'progress', 'streaming') + sim_file: Name of the simulation file + sim_type: Type of simulation ('batch' or 'streaming') + response_templates: Dictionary containing response template configurations + **kwargs: Additional fields to include in the response + + Returns: + Formatted response dictionary + """ + template: Dict[str, Any] = response_templates.get(template_type, {}) + + # Create base response structure + response: Dict[str, Any] = { + 'simulation': { + 'name': sim_file, + 'type': sim_type + }, + 'status': ( + 'completed' if template_type == 'success' + else template.get('status', template_type) + ), + 'bridge_meta': bridge_meta, + 'request_id': request_id + } + + # Add timestamp according to configured format + timestamp_format: str = template.get( + 'timestamp_format', '%Y-%m-%dT%H:%M:%SZ') + response['timestamp'] = datetime.now().strftime(timestamp_format) + + # Add sequence number if available (for streaming) + if 'sequence' in kwargs: + response['sequence'] = kwargs['sequence'] + + # Add metadata if configured + if template.get('include_metadata', False) and 'metadata' in kwargs: + response['metadata'] = kwargs.get('metadata') + + # Handle specific template types using helper functions + template_handlers = { + 'success': lambda: _handle_success_response(response, sim_type, kwargs), + 'error': lambda: _handle_error_response(response, template, kwargs), + 'progress': lambda: _handle_progress_response(response, template, kwargs), + 'streaming': lambda: _handle_streaming_response(response, kwargs) + } + + # Execute the appropriate handler if it exists + handler = template_handlers.get(template_type) + if handler: + handler() + + # Add any additional keys passed in kwargs that aren't handled by specific + # cases + excluded_keys = ['outputs', 'data', 'error', + 'metadata', 'percentage', 'sequence', 'message'] + for key, value in kwargs.items(): + if key not in excluded_keys: + response[key] = value + + return response diff --git a/agents/simul8/simul8_agent/src/utils/csv_parser.py b/agents/simul8/simul8_agent/src/utils/csv_parser.py new file mode 100644 index 0000000..79ace24 --- /dev/null +++ b/agents/simul8/simul8_agent/src/utils/csv_parser.py @@ -0,0 +1,229 @@ +import os +import csv +import tempfile +from typing import Dict, List, Any, Union, Optional + +from .logger import get_logger + +logger = get_logger() + +class CSVFormatError(Exception): + """Exception raised when there's an error in CSV formatting or processing.""" + pass + +def validate_csv_structure(csv_data: Dict[str, Any]) -> None: + """ + Validate that the input data has the correct CSV structure for Simul8. + + Args: + csv_data: Dictionary to validate + + Raises: + CSVFormatError: If the structure is invalid + """ + if not csv_data: + raise CSVFormatError("No CSV data provided - Simul8 simulation requires input data") + + if 'columns' not in csv_data: + raise CSVFormatError( + "CSV data must contain 'columns' key - " + "Expected format: {'columns': ['col1', 'col2'], 'r1': ['val1', 'val2'], ...}" + ) + + columns = csv_data['columns'] + if not isinstance(columns, list): + raise CSVFormatError("'columns' must be a list") + + if not columns: + raise CSVFormatError("'columns' cannot be empty") + + # Check for row data (keys starting with 'r') + row_keys = [key for key in csv_data.keys() + if key.startswith('r') and key != 'columns'] + + if not row_keys: + raise CSVFormatError( + "No row data found - Expected at least one row like 'r1': ['val1', 'val2', ...]" + ) + + # Validate each row + for row_key in row_keys: + row_data = csv_data[row_key] + if not isinstance(row_data, list): + raise CSVFormatError(f"Row '{row_key}' must be a list, got {type(row_data)}") + + if len(row_data) != len(columns): + raise CSVFormatError( + f"Row '{row_key}' has {len(row_data)} values but {len(columns)} columns expected" + ) + + logger.debug(f"CSV structure validation passed: {len(columns)} columns, {len(row_keys)} rows") + +def yaml_csv_to_file( + csv_data: Dict[str, Any], + file_path: Optional[str] = None, + delimiter: str = ',' +) -> str: + """ + Convert YAML CSV structure to a proper CSV file. + + Expected format: + { + 'columns': ['col1', 'col2', 'col3'], + 'r1': ['val1', 'val2', 'val3'], + 'r2': ['val4', 'val5', 'val6'], + ... + } + + Args: + csv_data: Dictionary containing columns and row data + file_path: Output CSV file path (creates temp file if None) + delimiter: CSV delimiter + + Returns: + Path to the created CSV file + """ + # Validate the CSV structure first + validate_csv_structure(csv_data) + + columns = csv_data['columns'] + + # Create a temporary file if no file_path is provided + if not file_path: + temp_dir = tempfile.gettempdir() + file_path = os.path.join(temp_dir, f"simul8_yaml_csv_{os.getpid()}.csv") + + logger.debug(f"Converting YAML CSV data to file: {file_path}") + + # Ensure directory exists + os.makedirs(os.path.dirname(os.path.abspath(file_path)), exist_ok=True) + + try: + with open(file_path, 'w', newline='', encoding='utf-8') as csvfile: + writer = csv.writer(csvfile, delimiter=delimiter) + + # Write header row + writer.writerow(columns) + + # Write data rows - look for keys that start with 'r' (row indicators) + row_keys = [key for key in csv_data.keys() + if key.startswith('r') and key != 'columns'] + + # Sort row keys to maintain order (r1, r2, r3, etc.) + row_keys.sort(key=lambda x: int(x[1:]) if x[1:].isdigit() else float('inf')) + + for row_key in row_keys: + row_data = csv_data[row_key] + writer.writerow(row_data) + + logger.debug(f"Successfully created CSV file from YAML data at {file_path}") + return file_path + + except Exception as e: + logger.error(f"Failed to create CSV file from YAML data: {str(e)}") + raise CSVFormatError(f"Error creating CSV file from YAML data: {str(e)}") +def read_csv_to_dict( + file_path: str, + delimiter: str = ',', + transpose: bool = False, + output_mapping: Optional[Dict[str, str]] = None +) -> Dict[str, Any]: + """ + Read a CSV file into a dictionary. + + Args: + file_path: Path to the CSV file + delimiter: CSV delimiter + transpose: If True, assumes first column contains keys, second contains values + output_mapping: Dictionary mapping CSV column names to desired output names + + Returns: + Dictionary containing the CSV data + """ + if not os.path.exists(file_path): + raise FileNotFoundError(f"CSV file not found: {file_path}") + + logger.debug(f"Reading CSV file: {file_path}") + + try: + with open(file_path, 'r', newline='', encoding='utf-8') as csvfile: + # Read all content first to debug + csvfile.seek(0) + raw_content = csvfile.read().strip() + logger.debug(f"Raw CSV content: '{raw_content}'") + + if not raw_content: + logger.warning("CSV file is empty") + return {} + + # Reset file pointer + csvfile.seek(0) + reader = csv.reader(csvfile, delimiter=delimiter) + rows = list(reader) + + logger.debug(f"All rows: {rows}") + + if not rows: + logger.warning("No rows found in CSV") + return {} + + # Get the header row (first row) + header_row = rows[0] + headers = [str(col).strip() for col in header_row if str(col).strip()] + + logger.debug(f"Headers found: {headers}") + + if not headers: + logger.warning("No valid headers found") + return {} + + # Find the data row (first non-empty row after header) + data_row = None + for row in rows[1:]: + if any(str(cell).strip() for cell in row): + data_row = row + break + + if not data_row: + logger.warning("No data row found") + return {header: None for header in headers} + + logger.debug(f"Data row: {data_row}") + + # Create result dictionary + results = {} + + for i, header in enumerate(headers): + if i < len(data_row): + value_str = str(data_row[i]).strip() + + if not value_str: + value = None + else: + # Try to convert to number + try: + if '.' in value_str: + value = float(value_str) + elif value_str.isdigit(): + value = int(value_str) + else: + value = value_str + except ValueError: + value = value_str + + # Apply output mapping if provided + final_key = output_mapping.get(header, header) if output_mapping else header + results[final_key] = value + logger.debug(f"Added to results: {final_key} = {value}") + else: + # Header exists but no corresponding data + final_key = output_mapping.get(header, header) if output_mapping else header + results[final_key] = None + logger.debug(f"Added to results (no data): {final_key} = None") + + logger.debug(f"Successfully parsed CSV data: {results}") + return results + + except Exception as e: + logger.error(f"Failed to read CSV file: {str(e)}") + raise CSVFormatError(f"Error reading CSV file: {str(e)}") \ No newline at end of file diff --git a/agents/simul8/simul8_agent/src/utils/logger.py b/agents/simul8/simul8_agent/src/utils/logger.py new file mode 100644 index 0000000..2030f11 --- /dev/null +++ b/agents/simul8/simul8_agent/src/utils/logger.py @@ -0,0 +1,99 @@ +""" +This module provides utilities for configuring and managing loggers with support +for file-based logging, console output, and optional colorized log messages. +It ensures proper log file rotation and customizable logging formats. +""" +import logging +import sys +from logging.handlers import RotatingFileHandler +from pathlib import Path +import colorlog + +DEFAULT_LOG_FORMAT: str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' +DEFAULT_LOG_LEVEL: int = logging.INFO +MAX_LOG_SIZE: int = 5 * 1024 * 1024 # 5 MB +BACKUP_COUNT: int = 3 + + +def setup_logger( + name: str = 'SIMUL8-AGENT', + level: int = DEFAULT_LOG_LEVEL, + log_format: str = DEFAULT_LOG_FORMAT, + log_file: str = 'logs/simul8-agent.log', + enable_console: bool = True +) -> logging.Logger: + """ + Configures a logger with handlers for file and console, with optional + colorization for console logs. + + Args: + name: Name of the logger + level: Logging level + log_format: Format of the log messages + log_file: Path to the log file + enable_console: Enables logging to the console + + Returns: + Configured logger instance + """ + logger: logging.Logger = logging.getLogger(name) + logger.setLevel(level) + + # If the logger already has handlers, return it as is + if logger.handlers: + return logger + + # Ensure the log file directory exists + log_path: Path = Path(log_file) + log_path.parent.mkdir(parents=True, exist_ok=True) + + # Configure file handler with rotation + file_handler: RotatingFileHandler = RotatingFileHandler( + filename=log_file, + maxBytes=MAX_LOG_SIZE, + backupCount=BACKUP_COUNT, + encoding='utf-8' + ) + file_handler.setLevel(logging.DEBUG) + file_formatter: logging.Formatter = logging.Formatter(log_format) + file_handler.setFormatter(file_formatter) + logger.addHandler(file_handler) + + # Configure console handler with color if enabled + if enable_console: + # Create a ColorFormatter for console logs + console_handler: logging.StreamHandler = logging.StreamHandler( + sys.stdout) + console_handler.setLevel(level) + + # Define a colorized log format for console output + console_format = '%(log_color)s%(asctime)s - %(name)s - %(levelname)s - %(message)s' + color_formatter = colorlog.ColoredFormatter( + console_format, + datefmt='%Y-%m-%d %H:%M:%S', + log_colors={ + 'DEBUG': 'cyan', + 'INFO': 'green', + 'WARNING': 'yellow', + 'ERROR': 'red', + 'CRITICAL': 'bold_red', + } + ) + + console_handler.setFormatter(color_formatter) + logger.addHandler(console_handler) + + return logger + + +def get_logger(name: str = 'SIMUL8-AGENT') -> logging.Logger: + """ + Returns an instance of the already configured logger. + + Args: + name: Name of the logger + + Returns: + Logger instance + """ + return logging.getLogger(name) diff --git a/agents/simul8/simul8_agent/src/utils/performance_monitor.py b/agents/simul8/simul8_agent/src/utils/performance_monitor.py new file mode 100644 index 0000000..ae38fa6 --- /dev/null +++ b/agents/simul8/simul8_agent/src/utils/performance_monitor.py @@ -0,0 +1,273 @@ +""" +Performance monitoring utilities for the MATLAB agent. +""" +import csv +import os +import time +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, List, Optional, Any + +import psutil + +from ..utils.logger import get_logger + +logger = get_logger() + + +@dataclass +class PerformanceMetrics: + """Data class to store performance metrics for a single operation.""" + operation_id: str + timestamp: float + request_received_time: float + matlab_start_time: float + matlab_startup_duration: float + simulation_duration: float + matlab_stop_time: float + result_send_time: float + cpu_percent: float + memory_rss_mb: float + total_duration: float + + +class PerformanceMonitor: + """ + A class to monitor and collect performance metrics for the MATLAB agent. + """ + _instance = None + _initialized = False + + def __new__(cls, *args, **kwargs): + if cls._instance is None: + cls._instance = super(PerformanceMonitor, cls).__new__(cls) + return cls._instance + + def __init__(self, config: Optional[Dict[str, Any]] = None): + """ + Initialize the performance monitor. + + Args: + config (Optional[Dict[str, Any]]): Configuration dictionary containing performance settings + """ + if not self._initialized: + self.enabled = False + self.output_dir = Path('performance_logs') + self.current_metrics = None + self.metrics_history = [] + self.process = None + self.csv_path = None + + if config: + perf_config = config.get('performance', {}) + self.enabled = perf_config.get('enabled', False) + log_dir = perf_config.get('log_dir', 'performance_logs') + log_filename = perf_config.get( + 'log_filename', 'performance_metrics.csv') + + if os.path.isabs(log_dir): + self.output_dir = Path(log_dir) + else: + self.output_dir = Path.cwd() / log_dir + + if self.enabled: + try: + self.output_dir.mkdir(parents=True, exist_ok=True) + logger.debug( + "Created performance log directory: %s", + self.output_dir) + + self.process = psutil.Process() + self.csv_path = self.output_dir / log_filename + + if not self.csv_path.exists(): + self._write_csv_headers() + logger.debug( + "Created performance metrics file: %s", self.csv_path) + + logger.debug("Performance monitoring enabled. Logs will be saved to %s", + self.output_dir) + except Exception as e: + logger.error( + "Failed to initialize performance monitoring: %s", e) + self.enabled = False + else: + logger.debug("Performance monitoring is disabled") + + self._initialized = True + + def _write_csv_headers(self): + """Write CSV headers to the output file.""" + if not self.enabled: + return + + try: + with open(self.csv_path, 'w', newline='', encoding='utf-8') as f: + writer = csv.writer(f) + writer.writerow([ + 'Operation ID', + 'Timestamp', + 'Request Received Time', + 'MATLAB Start Time', + 'MATLAB Startup Duration (s)', + 'Simulation Duration (s)', + 'MATLAB Stop Time', + 'Result Send Time', + 'CPU Usage (%)', + 'Memory RSS (MB)', + 'Total Duration (s)' + ]) + except Exception as e: + logger.error("Failed to write CSV headers: %s", e) + self.enabled = False + + def start_operation(self, operation_id: str): + """ + Start monitoring a new operation. + + Args: + operation_id (str): Unique identifier for the operation + """ + if not self.enabled: + return + + self.current_metrics = PerformanceMetrics( + operation_id=operation_id, + timestamp=time.time(), + request_received_time=time.time(), + matlab_start_time=0.0, + matlab_startup_duration=0.0, + simulation_duration=0.0, + matlab_stop_time=0.0, + result_send_time=0.0, + cpu_percent=self.process.cpu_percent(), + memory_rss_mb=self.process.memory_info().rss / (1024 * 1024), + total_duration=0.0 + ) + logger.debug("Started monitoring operation %s", operation_id) + + def record_matlab_start(self): + """Record the start of MATLAB engine initialization.""" + if not self.enabled or not self.current_metrics: + return + + self.current_metrics.matlab_start_time = time.time() + self._update_system_metrics() + + def record_matlab_startup_complete(self): + """Record the completion of MATLAB engine initialization.""" + if not self.enabled or not self.current_metrics: + return + + startup_duration = time.time() - self.current_metrics.matlab_start_time + self.current_metrics.matlab_startup_duration = startup_duration + self._update_system_metrics() + logger.debug("MATLAB startup duration: %.2fs", startup_duration) + + def record_simulation_complete(self): + """Record the completion of the simulation.""" + if not self.enabled or not self.current_metrics: + return + + self.current_metrics.simulation_duration = ( + time.time() - self.current_metrics.matlab_start_time - + self.current_metrics.matlab_startup_duration + ) + self._update_system_metrics() + + def record_matlab_stop(self): + """Record the stop of MATLAB engine.""" + if not self.enabled or not self.current_metrics: + return + + self.current_metrics.matlab_stop_time = time.time() + self._update_system_metrics() + + def record_result_sent(self): + """Record when results are sent.""" + if not self.enabled or not self.current_metrics: + return + + self.current_metrics.result_send_time = time.time() + self._update_system_metrics() + + def _update_system_metrics(self): + """Update system resource metrics.""" + if not self.enabled or not self.current_metrics: + return + + self.current_metrics.cpu_percent = self.process.cpu_percent() + self.current_metrics.memory_rss_mb = ( + self.process.memory_info().rss / (1024 * 1024) + ) + + def complete_operation(self): + """Complete the current operation and save metrics.""" + if not self.enabled or not self.current_metrics: + return + + self.current_metrics.total_duration = ( + time.time() - self.current_metrics.request_received_time + ) + self.metrics_history.append(self.current_metrics) + self._save_metrics_to_csv(self.current_metrics) + logger.debug( + "Completed operation %s in %.2fs", + self.current_metrics.operation_id, + self.current_metrics.total_duration + ) + self.current_metrics = None + + def _save_metrics_to_csv(self, metrics: PerformanceMetrics): + """ + Save metrics to CSV file. + + Args: + metrics (PerformanceMetrics): The metrics to save + """ + if not self.enabled: + return + + with open(self.csv_path, 'a', newline='', encoding='utf-8') as f: + writer = csv.writer(f) + writer.writerow([ + metrics.operation_id, + metrics.timestamp, + metrics.request_received_time, + metrics.matlab_start_time, + metrics.matlab_startup_duration, + metrics.simulation_duration, + metrics.matlab_stop_time, + metrics.result_send_time, + metrics.cpu_percent, + metrics.memory_rss_mb, + metrics.total_duration + ]) + + def get_summary(self) -> Dict[str, float]: + """ + Get a summary of performance metrics across all operations. + + Returns: + Dict[str, float]: Summary statistics + """ + if not self.enabled or not self.metrics_history: + return {} + + startup_times = [ + m.matlab_startup_duration for m in self.metrics_history] + simulation_times = [m.simulation_duration for m in self.metrics_history] + total_times = [m.total_duration for m in self.metrics_history] + + return { + 'avg_startup_time': sum(startup_times) / len(startup_times), + 'min_startup_time': min(startup_times), + 'max_startup_time': max(startup_times), + 'avg_simulation_time': sum(simulation_times) / len(simulation_times), + 'min_simulation_time': min(simulation_times), + 'max_simulation_time': max(simulation_times), + 'avg_total_time': sum(total_times) / len(total_times), + 'min_total_time': min(total_times), + 'max_total_time': max(total_times), + 'total_operations': len(self.metrics_history) + } diff --git a/agents/simul8/simulation_batch.s8 b/agents/simul8/simulation_batch.s8 new file mode 100644 index 0000000..8dbcd2e Binary files /dev/null and b/agents/simul8/simulation_batch.s8 differ