From f8fad844d74e705249429bd5fd2295e7f8203cef Mon Sep 17 00:00:00 2001 From: whb0514 <112596503+whb0514@users.noreply.github.com> Date: Mon, 2 Sep 2024 13:31:06 +0800 Subject: [PATCH] =?UTF-8?q?PLUS4=E7=9A=84moonraker?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 8 +- .readthedocs.yaml | 6 +- LICENSE | 2 +- README.md | 55 +- docs/api_changes.md | 47 +- docs/changelog.md | 181 + docs/configuration.md | 1637 ++++++- docs/contributing.md | 47 +- docs/doc-requirements.txt | 4 +- docs/index.md | 4 +- docs/installation.md | 501 +- docs/moonraker.conf | 3 +- docs/printer_objects.md | 19 +- docs/src/css/extras.css | 7 + docs/user_changes.md | 79 +- docs/web_api.md | 4307 ++++++++++++++--- mkdocs.yml | 118 +- moonraker/__init__.py | 5 + moonraker/__main__.py | 9 + moonraker/assets/__init__.py | 1 + moonraker/assets/default_allowed_services | 10 + moonraker/assets/welcome.html | 283 +- moonraker/common.py | 1302 +++++ moonraker/components/announcements.py | 51 +- moonraker/components/application.py | 1180 +++++ moonraker/components/authorization.py | 684 ++- moonraker/components/button.py | 41 +- moonraker/components/data_store.py | 113 +- moonraker/components/database.py | 2163 ++++++--- moonraker/components/dbus_manager.py | 32 +- moonraker/components/extensions.py | 178 +- moonraker/components/file_manager/__init__.py | 2 +- .../components/file_manager/file_manager.py | 1880 ++++--- moonraker/components/file_manager/metadata.py | 529 +- moonraker/components/gpio.py | 317 +- moonraker/components/history.py | 737 ++- moonraker/components/http_client.py | 111 +- moonraker/components/job_queue.py | 114 +- moonraker/components/job_state.py | 50 +- moonraker/components/klippy_apis.py | 226 +- moonraker/components/klippy_connection.py | 816 ++++ moonraker/components/ldap.py | 13 +- moonraker/components/machine.py | 1606 +++++- moonraker/components/mqtt.py | 321 +- moonraker/components/notifier.py | 225 +- moonraker/components/octoprint_compat.py | 79 +- moonraker/components/paneldue.py | 141 +- moonraker/components/power.py | 651 ++- moonraker/components/proc_stats.py | 130 +- moonraker/components/secrets.py | 32 +- moonraker/components/sensor.py | 346 ++ moonraker/components/shell_command.py | 248 +- moonraker/components/simplyprint.py | 1690 +++++++ moonraker/components/spoolman.py | 424 ++ moonraker/components/template.py | 45 +- .../components/update_manager/__init__.py | 2 +- .../components/update_manager/app_deploy.py | 481 +- .../components/update_manager/base_deploy.py | 39 +- moonraker/components/update_manager/common.py | 97 + .../components/update_manager/git_deploy.py | 1261 +++-- .../update_manager/system_deploy.py | 556 +++ .../update_manager/update_manager.py | 1206 ++--- .../components/update_manager/zip_deploy.py | 702 ++- moonraker/components/webcam.py | 347 +- moonraker/components/websockets.py | 498 ++ moonraker/components/wled.py | 67 +- moonraker/components/zeroconf.py | 370 +- moonraker/confighelper.py | 833 +++- moonraker/eventloop.py | 74 +- moonraker/loghelper.py | 165 + moonraker/moonraker.py | 522 +- moonraker/server.py | 712 +++ moonraker/utils/__init__.py | 281 ++ moonraker/utils/cansocket.py | 199 + moonraker/utils/filelock.py | 111 + moonraker/utils/ioctl_macros.py | 77 + moonraker/utils/json_wrapper.py | 33 + moonraker/utils/pip_utils.py | 247 + moonraker/utils/source_info.py | 88 + moonraker/utils/sysfs_devs.py | 467 ++ moonraker/utils/versions.py | 383 ++ pyproject.toml | 71 + scripts/backup-database.sh | 50 + scripts/data-path-fix.sh | 65 + scripts/dbtool.py | 14 +- scripts/fetch-apikey.sh | 2 +- scripts/finish-upgrade.sh | 104 + scripts/install-moonraker.sh | 170 +- scripts/make_sysdeps.py | 57 + scripts/moonraker-requirements.txt | 27 +- scripts/moonraker-speedups.txt | 2 + scripts/pdm_build_dist.py | 80 + .../zeroconf-0.131.0-py3-none-any.whl | Bin 0 -> 115593 bytes scripts/restore-database.sh | 55 + scripts/set-policykit-rules.sh | 4 + scripts/system-dependencies.json | 13 + tests/conftest.py | 6 +- tests/test_config.py | 8 +- tests/test_database.py | 4 +- tests/test_klippy_connection.py | 6 +- tests/test_server.py | 12 +- 101 files changed, 27357 insertions(+), 7021 deletions(-) create mode 100644 docs/changelog.md create mode 100644 docs/src/css/extras.css create mode 100644 moonraker/__init__.py create mode 100644 moonraker/__main__.py create mode 100644 moonraker/assets/__init__.py create mode 100644 moonraker/assets/default_allowed_services create mode 100644 moonraker/common.py create mode 100644 moonraker/components/application.py create mode 100644 moonraker/components/klippy_connection.py create mode 100644 moonraker/components/sensor.py create mode 100644 moonraker/components/simplyprint.py create mode 100644 moonraker/components/spoolman.py create mode 100644 moonraker/components/update_manager/common.py create mode 100644 moonraker/components/update_manager/system_deploy.py create mode 100644 moonraker/components/websockets.py create mode 100644 moonraker/loghelper.py create mode 100644 moonraker/server.py create mode 100644 moonraker/utils/__init__.py create mode 100644 moonraker/utils/cansocket.py create mode 100644 moonraker/utils/filelock.py create mode 100644 moonraker/utils/ioctl_macros.py create mode 100644 moonraker/utils/json_wrapper.py create mode 100644 moonraker/utils/pip_utils.py create mode 100644 moonraker/utils/source_info.py create mode 100644 moonraker/utils/sysfs_devs.py create mode 100644 moonraker/utils/versions.py create mode 100644 pyproject.toml create mode 100644 scripts/backup-database.sh create mode 100644 scripts/data-path-fix.sh create mode 100644 scripts/finish-upgrade.sh create mode 100644 scripts/make_sysdeps.py create mode 100644 scripts/moonraker-speedups.txt create mode 100644 scripts/pdm_build_dist.py create mode 100644 scripts/python_wheels/zeroconf-0.131.0-py3-none-any.whl create mode 100644 scripts/restore-database.sh create mode 100644 scripts/system-dependencies.json diff --git a/.gitignore b/.gitignore index e72ad8f..fd664c3 100644 --- a/.gitignore +++ b/.gitignore @@ -3,5 +3,11 @@ __pycache__/ *.py[cod] *$py.class .devel -.venv +.venv +venv +start_moonraker +*.env +.pdm-python +build +dist diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 7e23dd5..216eb59 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -1,10 +1,14 @@ version: 2 +build: + os: ubuntu-22.04 + tools: + python: "3.11" + mkdocs: configuration: mkdocs.yml fail_on_warning: false python: - version: 3.8 install: - requirements: docs/doc-requirements.txt diff --git a/LICENSE b/LICENSE index f288702..e62ec04 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ - GNU GENERAL PUBLIC LICENSE +GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. diff --git a/README.md b/README.md index 16a58ef..384152d 100644 --- a/README.md +++ b/README.md @@ -1,40 +1,43 @@ -

QIDI's logo

-

GPL-V3.0 License

-# Document Instructions -The 3D printers of QIDI are based on Klipper.Based on the Klipper open source project, we have made some modifications to its source code to meet some of the user's needs.At the same time, we have also made modifications to Moonraker, so that the screens we set can correspond to the operations on the page. -Thanks to the developers and maintainers of these open source projects.Please consider using or supporting these powerful projects. -- **Klipper** -- **Moonraker** +# Moonraker - API Web Server for Klipper -1. This document provides the modified moonraker version of QIDI. -2. This document only provides methods for replacing source code for updates. -***Please note that manual updates may affect normal after-sales service.*** +Moonraker is a Python 3 based web server that exposes APIs with which +client applications may use to interact with the 3D printing firmware +[Klipper](https://github.com/KevinOConnor/klipper). Communication between +the Klippy host and Moonraker is done over a Unix Domain Socket. Tornado +is used to provide Moonraker's server functionality. -## Detailed update process -1. Connect your printer device through SSH. -2. Confirm which software you need to replace.Download the corresponding file and replace the software through SSH connection.The following are the paths of each software within the system. +Documentation for users and developers can be found on +[Read the Docs](https://moonraker.readthedocs.io/en/latest/). - Software|Directory - ---|--- - klipper|/home/mks/ - moonraker|/home/mks/ +### Clients -3. If there is no need to update xindi, simply replace it. For example, if I replace the klipper folder, save it and restart it. +Note that Moonraker does not come bundled with a client, you will need to +install one. The following clients are currently available: -## Report Issues and Make Suggestions -You can contact [After-Sales Service](https://qidi3d.com/pages/warranty-policy-after-sales-support) to report issues and make suggestions. +- [Mainsail](https://github.com/mainsail-crew/mainsail) by [Mainsail-Crew](https://github.com/mainsail-crew) +- [Fluidd](https://github.com/fluidd-core/fluidd) by Cadriel +- [KlipperScreen](https://github.com/jordanruthe/KlipperScreen) by jordanruthe +- [mooncord](https://github.com/eliteSchwein/mooncord) by eliteSchwein +### Raspberry Pi Images +Moonraker is available pre-installed with the following Raspberry Pi images: +- [MainsailOS](https://github.com/mainsail-crew/MainsailOS) by [Mainsail-Crew](https://github.com/mainsail-crew) + - Includes Klipper, Moonraker, and Mainsail +- [FluiddPi](https://github.com/fluidd-core/FluiddPi) by Cadriel + - Includes Klipper, Moonraker, and Fluidd +### Docker Containers +The following projects deploy Moonraker via Docker: +- [prind](https://github.com/mkuf/prind) by mkuf + - A suite of containers which allow you to run Klipper in + Docker. Includes support for OctoPrint and Moonraker. +### Changes - - - - - - +Please refer to the [changelog](https://moonraker.readthedocs.io/en/latest/changelog) +for a list of notable changes to Moonraker. diff --git a/docs/api_changes.md b/docs/api_changes.md index c16e2b7..29d6a92 100644 --- a/docs/api_changes.md +++ b/docs/api_changes.md @@ -1,10 +1,53 @@ ## -This document keeps a record of all changes to Moonraker's web APIs. +This document keeps a record of notable changes to Moonraker's Web API. + +### July 18th 2023 +- Moonraker API Version 1.3.0 +- Added [Spoolman](web_api.md#spoolman-apis) APIs. +- Added [Rollback](web_api.md#rollback-to-the-previous-version) API to + the `update_manager` +- The `update_manager` status response has new fields for items of the + `git_repo` and `web` types: + - `recovery_url`: Url of the repo a "hard" recovery will fetch from + - `rollback_version`: Version the extension will revert to when a rollback + is requested + - `warnings`: An array of strings containing various warnings detected + during repo init. Some warnings may explain an invalid state while + others may alert users to potential issues, such as a `git_repo` remote + url not matching the expected (ie: configured) url. + - Additionally, the `need_channel_update` field has been removed as the method + changing channels is done exclusively in the configuration. + +### February 20th 2023 +- The following new endpoints are available when at least one `[sensor]` + section has been configured: + - `GET /server/sensors/list` + - `GET /server/sensors/sensor` + - `GET /server/sensors/measurements` + + See [web_api.md](web_api.md) for details on these new endpoints. +- A `sensors:sensor_update` notification has been added. When at least one + monitored sensor is reporting a changed value Moonraker will broadcast this + notification. + + See [web_api.md](web_api.md) for details on this new notification. + +### February 17 2023 +- Moonraker API Version 1.2.1 +- An error in the return value for some file manager endpoints has + been corrected. Specifically, the returned result contains an `item` object + with a `path` field that was prefixed with the root (ie: "gcodes"). + This is inconsistent with the websocket notification and has been corrected + to remove the prefix. This affects the following endpoints: + - `POST /server/files/directory` | `server.files.post_directory` + - `DELETE /server/files/directory` | `server.files.delete_directory` + - `POST /server/files/move` | `server.files.move` + - `POST /server/files/copy` | `server.files.copy` ### March 4th 2022 - Moonraker API Version 1.0.1 - The `server.websocket.id` endpoint has been deprecated. It is - recommended to use `server.connection.idenitfy` method to identify + recommended to use `server.connection.identify` method to identify your client. This method returns a `connection_id` which is the websocket's unique id. See [the documentation](web_api.md#identify-connection) for details. diff --git a/docs/changelog.md b/docs/changelog.md new file mode 100644 index 0000000..7fcc5c1 --- /dev/null +++ b/docs/changelog.md @@ -0,0 +1,181 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog]. + +## [Unreleased] + +### Added +- **notifier**: The `attach` option now supports Jinja2 templates. +- **notifier**: The `attach` option may now contain multiple attachments, + each separated by a newline. +- **notifier**: Added support for a configurable `body_format` +- **power**: Added support for generic `http` type switches. +- **metadata**: Added support for OrcaSlicer +- **zeroconf**: Added support for a configurable mDNS hostname. +- **zeroconf**: Added support for UPnP/SSDP Discovery. +- **spoolman**: Added integration to the + [Spoolman](https://github.com/Donkie/Spoolman) filament manager. +- **update_manager**: Added support for update rollbacks +- **update_manager**: Added support for stable `git_repo` updates +- **server**: Added a `--unixsocket` command line option +- **server**: Command line options may also be specified as env variables +- **server**: Added a `route_prefix` option +- **webcam**: Webcam APIs can now specify cameras by `uid` or `name` +- **deps**: Added support for optional `msgspec` and `uvloop` packages +- **extensions**: Agents may now register remote methods with Klipper +- **file_manager**: Add `check_klipper_config_path` option +- **button**: Added `debounce_period` option +- **history**: Added a check for previous jobs not finished (ie: when power is + lost during a print). These jobs will report their status as `interrupted`. +- **build**: Added support for optional speedup dependencies `uvloop` and `msgspec` +- **update_manager**: Added support for "zipped" application updates +- **file_manager**: Added `enable_config_write_access` option +- **machine**: Add support for system peripheral queries +- **mqtt**: Added the `status_interval` option to support rate limiting +- **mqtt**: Added the `enable_tls` option to support ssl/tls connections +- **history**: Added `user` field to job history data +- **history**: Added support for auxiliary history fields +- **spoolman**: Report spool ids set during a print in history auxiliary data +- **sensor**: Added support for history fields reported in auxiliary data +- **power**: Added support for `uhubctl` devices +- **update_manager**: Add support for pinned git commits + +### Fixed + +- **simplyprint**: Fixed import error preventing the component from loading. +- **update_manager**: Moonraker will now restart the correct "moonraker" and + "klipper" services if they are not the default values. +- **job_queue**: Fixed transition when auto is disabled +- **history**: Added modification time to file existence checks. +- **dbus_manager**: Fixed PolKit warning when PolKit features are not used. +- **job_queue**: Fixed a bug where the `job_transition_gcode` runs when the + queue is started. It will now only run between jobs during automatic + transition. +- **klippy_connection**: Fixed a race condition that can result in + skipped subscription updates. +- **configheler**: Fixed inline comment parsing. +- **authorization**: Fixed blocking call to `socket.getfqdn()` +- **power**: Fixed "on_when_job_queued" behavior when the internal device + state is stale. + +### Changed + +- **build**: Bumped apprise to version `1.8.0`. +- **build**: Bumped lmdb to version `1.4.1` +- **build**: Bumped tornado to version `6.4.0` +- **build**: Bumped jinja2 to version `3.1.4` +- **build**: Bumped zeroconf to version `0.131.0` +- **build**: Bumped libnacl to version `2.1.0` +- **build**: Bumped distro to version `1.9.0` +- **build**: Bumped pillow to version `10.3.0` +- **build**: Bumped streaming-form-data to version `1.15.0` +- **machine**: Added `ratos-configurator` to list of default allowed services +- **update_manager**: It is now required that an application be "allowed" + for Moonraker to restart it after an update. +- **update_manager**: Git repo validation no longer requires a match for the + remote URL and/or branch. +- **update_manager**: Fixed potential security vulnerabilities in `web` type updates. + This change adds a validation step to the install, front-end developers may refer to + the [configuration documentation](./configuration.md#web-type-front-end-configuration) + for details. +- **update_manager**: The `env` option for the `git_repo` type has been deprecated, new + configurations should use the `virtualenv` option. +- **update_manager**: The `install_script` option for the `git_repo` has been + deprecated, new configurations should use the `system_dependencies` option. +- **update_manager**: APIs that return status report additional fields. + See the [API Documentation](./web_api.md#get-update-status) for details. +- **proc_stats**: Improved performance of Raspberry Pi CPU throttle detection. +- **power**: Bound services are now processed during initialization when + `initial_state` is configured. +- **gpio**: Migrate from libgpiod to python-periphery +- **authorization**: The authorization module is now loaded as part of Moonraker's + core. +- **database**: Migrated the underlying database from LMDB to Sqlite. +- **history**: Use dedicated SQL tables to store job history and job totals. +- **authorization**: Use a dedicated SQL table to store user data. + +## [0.8.0] - 2023-02-23 + +!!! Note + This is the first tagged release since a changelog was introduced. The list + below contains notable changes introduced beginning in Feburary 2023. Prior + notable changes were kept in [user_changes.md] and [api_changes.md]. + +### Added + +- Added this changelog! +- Added pyproject.toml with support for builds through [pdm](https://pdm.fming.dev/latest/). +- **sensor**: New component for generic sensor configuration. + - [Configuration Docs](configuration.md#sensor) + - [API Docs](web_api.md#sensor-apis) + - [Websocket Notification Docs](web_api.md#sensor-events) +- **file_manager**: Added new [scan metadata](web_api.md#scan-gcode-metadata) endpoint. +- **file_manager**: Added new [thumbnails](web_api.md#get-gcode-thumbnails) endpoint. +- **file_manager**: Added [file_system_observer](configuration.md#file_manager) + configuration option. +- **file_manager**: Added [enable_observer_warnings](configuration.md#file_manager) + configuration option. +- **file_manager**: Added ability to upload to symbolic links. +- **metadata**: Added support for Simplify3D V5 metadata parsing +- **machine**: Added [shutdown_action](configuration.md#machine) configuration + option. +- **machine**: Added service detection to the `supervisord_cli` provider. +- **machine**: Added `octoeverywhere` to the list of default allowed service. +- **power**: Added support for "Hue" device groups. +- **websockets**: Added support for [direct bridge](web_api.md#bridge-websocket) + connections. +- **update_manager**: Added new [refresh](web_api.md#refresh-update-status) endpoint. +- **update_manager**: Added support for pinned pip upgrades. +- **websockets**: Added support for post connection authentication over the websocket. +- **scripts**: Added database backup and restore scripts. + +### Changed + +- Converted Moonraker source into a Python package. +- The source from `moonraker.py` has been moved to `server.py`. The remaining code in + `moonraker.py` serves as a legacy entry point for launching Moonraker. +- **file_manager**: Improved inotify synchronization with API requests. +- **file_manager**: Endpoint return values are now consistent with their + respective websocket notifications. +- **machine**: The [provider](configuration.md#machine) configuration option + now expects `supervisord_cli` instead of `supervisord`. +- **update_manager**: Relaxed requirement for git repo tag detection. Now only two + parts are required (ie: v1.5 and v1.5.0 are acceptable). + +### Deprecated + +- **file_manager**: The `enable_inotify_warnings` configuration option has been + deprecated in favor of `enable_observer_warnings`. + +### Fixed + +- **file_manager**: Fix edge condition where `create_file` notifications + may be sent before a `create_dir` notification. +- **power** - Fixed URL encoding issues for http devices. +- **template**: A ConfigError is now raised when a template fails to + render during configuration. +- **machine**: Fixed support for Supervisord Version 4 and above. +- **update_manager**: Added package resolution step to the APT backend. +- **update_manger**: Fixed PackageKit resolution step for 64-bit systems. +- **update_manager**: Fixed Python requirements file parsing. Comments are now ignored. + +### Removed + +- Pycurl dependency. Moonraker no longer uses Tornado's curl based http client. + +## [0.7.1] - 2021-07-08 + +- Experimental pre-release + + +[keep a changelog]: https://keepachangelog.com/en/1.0.0/ +[semantic versioning]: https://semver.org/spec/v2.0.0.html +[user_changes.md]: user_changes.md +[api_changes.md]: api_changes.md + + +[unreleased]: https://github.com/Arksine/moonraker/compare/v0.8.0...HEAD +[0.8.0]: https://github.com/Arksine/moonraker/compare/v0.7.1...v0.8.0 +[0.7.1]: https://github.com/Arksine/moonraker/releases/tag/v0.7.1 \ No newline at end of file diff --git a/docs/configuration.md b/docs/configuration.md index 06448ec..22a8c6a 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1,14 +1,58 @@ # -This document describes Moonraker's full configuration. By default Mooonraker +This document describes Moonraker's full configuration. By default Moonraker loads the configuration file from `~/moonraker.conf`, however prebuilt images such as MainsailOS and FluiddPi configure Moonraker to load the -configuration from `~/klipper_config/moonraker.conf`. +configuration from `~/printer_data/config/moonraker.conf`. As this document references configuration for both Klipper (`printer.cfg`) and Moonraker (`moonraker.conf`), each example contains a comment indicating which configuration file is being referenced A basic [sample configuration](./moonraker.conf) in the `docs` directory. +Moonraker uses an ini style configuration very close to that of Klipper. +Comments are supported and may be specified by either a `#` or `;` character. +Inline comments are also supported and are evaluated according to the following +rules: + +- At least one whitespace character must separate the configuration data and the + comment specifier. +- Specifiers that are not preceded by whitespace will be considered part of + the configuration. +- If it is necessary for a value to include whitespace followed by one + of the comment specifiers, the specifier may be escaped using a backslash, + ie: ` \#`. +- Only specifiers preceded by whitespace may be escaped. + +For example: + +```ini +# This is a comment +[section_name] # This is a comment +opt_one: http://this.is/#not-a-comment +opt_two: This is also \# not a comment +opt_three: This is the value # this is a comment +opt_four: Once again\# not a comment +``` + +- Option `opt_one` resolves to a value of `http://this.is/#not-a-comment`. + The `#` is not preceded by whitespace and not evaluated as an inline comment. +- Option `opt_two`, resolves to a value of `This is also # not a comment`. The + ` \#` is evaluated as valid escape sequence. The backslash is removed and the + resulting `#` is stored in the value. +- Option `opt_three` resolves to a value of `This is the value`. The comment + specifier is preceded by whitespace, thus the remainder of the line is + evaluated as a comment and removed from the option. +- Option `opt_four` resolves to a value of `Once again\# not a comment`. + The `\` character is not preceded by whitespace and not evaluated as + an escape sequence, thus the escape character is not removed from the value. + +Moonraker uses strict parsing rules. A configuration file may not +contain multiple sections of the same name. A section may not contain +multiple options of the same name. However, configuration files included +using [include directives](#include-directives) may contain sections +specified in other files, and those sections may contain options +specified in other files. + ## Core Components Moonraker's core components are always loaded regardless of configuration. @@ -25,29 +69,51 @@ This section is required. [server] host: 0.0.0.0 # The host address in which to bind the HTTP server. Default is to bind -# to all interfaces +# to all ipv4 interfaces. If set to "all" the server will bind to all +# ipv4 an ipv6 interfaces. port: 7125 # The port the HTTP server will listen on. Default is 7125 ssl_port: 7130 # The port to listen on for SSL (HTTPS) connections. Note that the HTTPS # server will only be started of the certificate and key options outlined # below are provided. The default is 7130. -ssl_certificate_path: -# The path to a self signed ssl certificate. The default is no path, which -# disables HTTPS. -ssl_key_path: -# The path to the private key used to signed the certificate. The default -# is no path, which disables HTTPS. klippy_uds_address: /tmp/klippy_uds -# The address of Unix Domain Socket used to communicate with Klippy. Default -# is /tmp/klippy_uds +# The address of Unix Domain Socket used to communicate with Klippy. This +# option accepts Jinja2 Templates, where the configured data path is +# passed to the template context, for example: +# klippy_uds_address: {data_path}/comms/klippy.sock +# +# Default is /tmp/klippy_uds. +route_prefix: +# A prefix prepended to the path for each HTTP endpoint. For example +# if the route_prefix is set to moonraker/printer1, then the server info +# endpoint is available at: +# http://myprinter.local/moonraker/printer1/server/info +# +# This is primarily useful for installations that feature multiple instances +# of Moonraker, as it allows a reverse proxy identify the correct instance based +# on the path and redirect requests without a rewrite. Note that frontends must feature +# support for HTTP endpoints with a route prefix to communicate with Moonraker when +# this option is set. The default is no route prefix. max_upload_size: 1024 # The maximum size allowed for a file upload (in MiB). Default is 1024 MiB. +max_websocket_connections: +# The maximum number of concurrently open websocket connections. +# The default is 50. enable_debug_logging: False -# When set to True Moonraker will log in verbose mode. During this stage -# of development the default is False. +# ***DEPRECATED*** +# Verbose logging is enabled by the '-v' command line option. ``` +!!! Note + Previously the `[server]` section contained `ssl_certificate_path` and + `ssl_key_path` options. These options are now deprecated, as both locations + are determined by the `data path` and `alias` configured on the command + line, ie `/certs/.cert`. By default the certificate + path resolves to `$HOME/moonraker_data/certs/moonraker.cert` and the key + path resolves to `$HOME/moonraker_data/certs/moonraker.key`. Both files + may be symbolic links. + ### `[file_manager]` The `file_manager` section provides configuration for Moonraker's file @@ -55,21 +121,16 @@ management functionality. If omitted defaults will be used. ```ini # moonraker.conf - -config_path: -# The path to a directory where configuration files are located. This -# directory may contain Klipper config files (printer.cfg) or Moonraker -# config files (moonraker.conf). Clients may also write their own config -# files to this directory. Note that this may not be the system root -# (ie: "/") and moonraker must have read and write access permissions -# for this directory. -log_path: -# An optional path to a directory where log files are located. Users may -# configure various applications to store logs here and Moonraker will serve -# them at "/server/files/logs/*". The default is no log paths. queue_gcode_uploads: False # When set to True the file manager will add uploads to the job_queue when # the `start_print` flag has been set. The default if False. +check_klipper_config_path: True +# By default Moonraker will validate that Klipper's configuration file exists +# within the data path's "config" folder, as this is a requirement for +# Moonraker to write to the configuration. If this validation check fails +# Moonraker will warn the user. Installations that do not wish to use Moonraker +# to manage Klipper's configuration may set this option to False to bypass the +# location check. The default is True. enable_object_processing: False # When set to True gcode files will be run through a "preprocessor" # during metadata extraction if object tags are detected. This preprocessor @@ -77,17 +138,30 @@ enable_object_processing: False # "cancel object" functionality. Note that this process is file I/O intensive, # it is not recommended for usage on low resource SBCs such as a Pi Zero. # The default is False. +file_system_observer: inotify +# The observer used to monitor file system changes. May be inotify or none. +# When set to none file system observation is disabled. The default is +# inotify. +enable_observer_warnings: True +# When set to True Moonraker will generate warnings when an observer +# encounters an error. This may be useful to determine if the observer +# malfunctioning. The default is True. +enable_inotify_warnings: True +# *** DEPRECATED - SEE "enable_observer_warnings" *** +# When set to True Moonraker will generate warnings when inotify attempts +# to add a duplicate watch or when inotify encounters an error. On some +# file systems inotify may not work as expected, this gives users the +# option to suppress warnings when necessary. The default is True. +enable_config_write_access: True +# When enabled the configuration folder is writable over the API. Some +# installations, such as those in public areas, may wish to lock out +# configuration changes. The default is True. ``` -!!! Warning - Moonraker currently supports two paths with read/write access, the - `config_path` configured in the `file_manager` and the `virtual_sdcard` path - configured through Klipper in `printer.cfg`. These paths are monitored for - changes, thus they must not overlap. Likewise, these paths may not be a - parent or child of folders containing sensitive files such as the `database`, - Moonraker's source, or Klipper's source. If either of the above conditions - are present Moonraker will generate a warning and revoke access to the - offending path. +!!! Note + Previously the `[file_manager]` section contained `config_path` and + `log_path` options. These options are now deprecated, as both locations + are determined by the `data path` configured on the command line. !!! Tip It is also possible to enable object processing directly in the slicer. @@ -106,8 +180,37 @@ with system services such as systemd. provider: systemd_dbus # The provider implementation used to collect system service information # and run service actions (ie: start, restart, stop). This can be "none", -# "systemd_dbus", or "systemd_cli". If the provider is set to "none" service -# action APIs will be disabled. The default is systemd_dbus. +# "supervisord_cli", "systemd_dbus", or "systemd_cli". If the provider is +# set to "none" service action APIs will be disabled. +# The default is systemd_dbus. +shutdown_action: poweroff +# Determines the action Moonraker will take when a shutdown is requested. +# This option may be set to "halt" or "poweroff. Not all linux distributions +# support poweroff, in such scenarios it is necessary to specify 'halt'. +# The default is "poweroff". +sudo_password: +# The password for the linux user. When set Moonraker can run linux commands +# that require elevated permissions. This option accepts Jinja2 Templates, +# see the [secrets] section for details. It is strongly recommended to only +# set this option when required and to use the aforementioned secrets module +# when doing so. The default is no sudo password is set. +validate_service: +# Enables validation of Moonraker's systemd service unit. If Moonraker +# detects that a change is necessary it will attempt to do so. Custom +# installations and installations that do systemd should set this to False. +# The default is True. +validate_config: +# Enables validation of Moonraker's configuration. If Moonraker detects +# deprecated options it will attempt to correct them. The default is True. +force_validation: +# By default Moonraker will not attempt to revalidate if a previous attempt +# at validation successfully completed. Setting this value to True will force +# Moonraker to perform validation. The default is False. +supervisord_config_path: +# Path to the supervisord config file. This is required when for multiple +# supervisord are instances running on single machine and the default +# '/var/run/supervisord.sock' is occupied by other services. +# The default is no path. ``` !!! Note @@ -119,10 +222,34 @@ provider: systemd_dbus service. This service is necessary for the DBus provider to issue `reboot` and `shutdown` commands. In this scenario, Moonraker will fall back to CLI based `reboot` and `shutdown` commands. These commands require - that Moonraker be able to run `sudo` commands without a password. + that Moonraker be able to run `sudo` commands without a password or that the + `sudo_password` option is set. + Alternatively it may be possible to enable the `systemd-logind` service, consult with your distributions's documentation. +#### Allowed Services + +The `machine` component uses the configured provider to manage services +on the system (ie: restart a service). Moonraker is authorized to manage +the `moonraker` and `klipper` services, including those that match common +multi-instance patterns, such as `moonraker-1`, `klipper_2`, and `moonraker1`. + +Moonraker may be authorized to manage additional services by modifying +`/moonraker.asvc`. By default this file includes the +following services: + +- `klipper_mcu` +- `webcamd` +- `MoonCord` +- `KlipperScreen` +- `moonraker-telegam-bot` +- `sonar` +- `crowsnest` + +Note that systemd units are case sensitive, so the case must match +when adding a value to `moonraker.asvc`. + #### Reboot / Shutdown from Klipper It is possible to call the `shutdown_machine` and `reboot_machine` @@ -142,24 +269,11 @@ gcode: ### `[database]` -The `database` section provides configuration for Moonraker's lmdb database. -If omitted defaults will be used. - -```ini -moonraker.conf - -database_path: ~/.moonraker_database -# The path to the folder that stores Moonraker's lmdb database files. -# It is NOT recommended to place this file in a location that is served by -# Moonraker (such as the "config_path" or the location where gcode -# files are stored). If the folder does not exist an attempt will be made -# to create it. The default is ~/.moonraker_database. -``` - !!! Note - Previously the `enable_database_debug` option was available for internal - development to test changes to write protected namespaces. This option - been deprecated and disabled. + This section no long has configuration options. Previously the + `database_path` option was used to determine the locatation of + the database folder, it is now determined by the `data path` + configured on the command line. ### `[data_store]` @@ -239,7 +353,7 @@ may be configured directly through front-ends and added to the database, however it is also possible for users to configure one or more webcams in `moonraker.conf`. If a webcam is configured in `moonraker.conf` it takes precedent over a webcam in the database by the same name. The options -available may not apply to all front-ends, refer to your front-end's +available may not apply to all front ends, refer to your front end's documentation for details on camera configuration. ```ini @@ -247,12 +361,24 @@ documentation for details on camera configuration. location: printer # A description of the webcam location, ie: what the webcam is observing. # The default is "printer". +icon: +# A name of the icon to use for the camera. See the tip following this +# example for known values. The default is mdiWebcam. +enabled: True +# An optional boolean value to indicate if this webcam should be enabled. +# Default is True. service: mjpegstreamer # The name of the application or service hosting the webcam stream. Front- -# ends may use this configuration to determine how to launch or start the -# program. The default is "mjpegstreamer". +# ends may use this configuration to determine how to connect to the service +# and interpret its stream. See the tip following this example for +# currently known values. The default is "mjpegstreamer". +location: printer +# A string describing the location of the camera. Default is printer. target_fps: 15 # An integer value specifying the target framerate. The default is 15 fps. +target_fps_idle: 5 +# An integer value specifying the target framerate when the printer is idle. +# The default is 5 fps. stream_url: # The url for the camera stream request. This may be a full url or a # relative path (ie: /webcam?action=stream) if the stream is served on the @@ -260,7 +386,7 @@ stream_url: snapshot_url: # The url for the camera snapshot request. This may be a full url or a # relative path (ie: /webcam?action=stream) if the stream is served on the -# same host as Moonraker at port 80. This parameter must be provided. +# same host as Moonraker at port 80. The default is an empty url. flip_horizontal: False # A boolean value indicating whether the stream should be flipped # horizontally. The default is false. @@ -270,12 +396,46 @@ flip_vertical: False rotation: 0 # An integer value indicating the amount of clockwise rotation to apply # to the stream. May be 0, 90, 180, or 270. The default is 0. +aspect_ratio: 4:3 +# The aspect ratio to display for the camera. Note that this option +# is specific to certain services, otherwise it is ignored. +# The default is 4:3. ``` -## Optional Components +!!! Tip + The following are known `icon` values: -Optional Components are only loaded if present in `moonraker.conf`. This -includes components that may not have any configuration. + | Icon Description | [webcam] icon value | Supported Frontends | + | ---------------- | --------------------| -------- | + | Printer | `mdiPrinter3d` | Mainsail | + | Nozzle | `mdiPrinter3dNozzle` | Mainsail | + | Bed | `mdiRadiatorDisabled` | Mainsail | + | Webcam | `mdiWebcam` | Mainsail | + | Filament | `mdiAlbum` | Mainsail | + | Door | `mdiDoor` | Mainsail | + | MCU | `mdiRaspberryPi` | Mainsail | + | Hot | `mdiCampfire` | Mainsail | + + The documentation for + [Mainsail](https://docs.mainsail.xyz/overview/settings/webcams#service) + and [Fluidd](https://docs.fluidd.xyz/features/cameras) + contain descriptions for their respective streaming service options. + Below is a table of values mapping currently known service types to + the values accepted by the webcam's `service` option: + + | Service Type | [webcam] service value | Supported Frontends | + | ------------- | --------------------- | ------------------- | + | MJPEG-Streamer | `mjpegstreamer` | Mainsail, Fluidd | + | Adaptive MJPEG-Streamer | `mjpegstreamer-adaptive` | Mainsail, Fluidd | + | UV4L-MJPEG | `uv4l-mjpeg` | Mainsail | + | IP-Camera | `ipstream` | Mainsail, Fluidd | + | WebRTC (camera-streamer) | `webrtc-camerastreamer` | Mainsail, Fluidd | + | WebRTC (go2rtc) | `webrtc-go2rtc` | Mainsail, Fluidd | + | WebRTC (MediaMTX) | `webrtc-mediamtx` | Mainsail | + | WebRTC (Janus) | `webrtc-janus` | Mainsail | + | HLS Streamer | `hlsstream` | Mainsail, Fluidd | + | jMuxer | `jmuxer-stream` | Mainsail | + | HTTP Page | `iframe`| Fluidd | ### `[authorization]` @@ -286,11 +446,18 @@ authorization module. # moonraker.conf [authorization] +enable_api_key: True +# Enables API Key authentication. The default is True. login_timeout: # The time, in days, after which a user is forced to re-enter their # credentials to log in. This period begins when a logged out user # first logs in. Successive logins without logging out will not # renew the timeout. The default is 90 days. +max_login_attempts: +# Maximum number of consecutive failed login attempts before an IP address +# is locked out. Failed logins are tracked per IP and are reset upon a +# successful login. Locked out IPs are reset when Moonraker restarts. +# By default there is no maximum number of logins. trusted_clients: 192.168.1.30 192.168.1.0/24 @@ -301,8 +468,8 @@ trusted_clients: # must be expressed in CIDR notation (see http://ip.sb/cidr for more info). # For example, an entry of 192.168.1.0/24 will authorize IPs in the range of # 192.168.1.1 - 192.168.1.254. Note that when specifying IPv4 ranges the -# last segment of the ip address must be 0. The default is no clients are -# trusted. +# last segment of the ip address must be 0. The default is no IPs or +# domains are trusted. cors_domains: http://klipper-printer.local http://second-printer.local:7125 @@ -330,6 +497,19 @@ default_source: moonraker # "moonraker" The default is "moonraker". ``` +!!! Tip + When configuring the `trusted_clients` option it is generally recommended + to stick with IP ranges and avoid including domain names. When attempting to + authenticate a request against a domain name Moonraker must perform a DNS + lookup. If the DNS service is not available then authentication will fail + and an error will be returned. In addition, DNS lookups will introduce delay + in the response. + +## Optional Components + +Optional Components are only loaded if present in `moonraker.conf`. This +includes components that may not have any configuration. + ### `[ldap]` The `ldap` module may be used by `[authorization]` to perform user @@ -364,7 +544,16 @@ group_dn: CN=moonraker,OU=Groups,DC=ldap,DC=local # authentication. This option accepts Jinja2 Templates, see the [secrets] # section for details. The default is no group requirement. is_active_directory: True -# Enables support for Microsoft Active Directory. The default is False. +# Enables support for Microsoft Active Directory. This option changes the +# field used to lookup a user by username to sAMAccountName. +# The default is False. +user_filter: (&(objectClass=user)(cn=USERNAME)) +# Allows filter of users by custom LDAP query. Must contain the USERNAME +# token, it will be replaced by the user's username during lookup. Will +# override the change done by is_active_directory. This option accepts +# Jinja2 Templates, see the [secrets] section for details. +# The default is empty, which will change the lookup query depending on +# is_active_directory. ``` ### `[octoprint_compat]` @@ -486,8 +675,11 @@ The following configuration options are available for all power device types: type: # The type of device. Can be either gpio, klipper_device, rf, # tplink_smartplug, tasmota, shelly, homeseer, homeassistant, loxonev1, -# smartthings, mqtt or hue. +# smartthings, mqtt, hue, http or uhubctl. # This parameter must be provided. +initial_state: off +# The state the power device should be initialized to. May be on or +# off. When this option is not specified no initial state will be set. off_when_shutdown: False # If set to True the device will be powered off when Klipper enters # the "shutdown" state. This option applies to all device types. @@ -516,22 +708,23 @@ restart_klipper_when_powered: False restart_delay: 1. # If "restart_klipper_when_powered" is set, this option specifies the amount # of time (in seconds) to delay the restart. Default is 1 second. -bound_service: -# Can be set to any service Moonraker is authorized to manage with the -# exception of the moonraker service itself. See the tip below this section -# for details on what services are authorized. When a bound service has -# been set the service will be started when the device powers on and stopped -# when the device powers off. The default is no service is bound to the -# device. +bound_services: +# A newline separated list of services that are "bound" to the state of this +# device. When the device is powered on all bound services will be started. +# When the device is powered off all bound services are stopped. +# +# The items in this list are limited to those specified in the allow list, +# see the [machine] configuration documentation for details. Additionally, +# the Moonraker service can not be bound to a power device. Note that +# service names are case sensitive. +# +# When the "initial_state" option is explcitly configured bound services +# will be synced with the current state. For example, if the initial_state +# is "off", all bound services will be stopped after device initialization. +# +# The default is no services are bound to the device. ``` -!!! Tip - Moonraker is authorized to manage the `klipper`, `klipper_mcu`, - `webcamd`, `MoonCord`, `KlipperScreen`, and `moonraker-telegram-bot` - services. It can also manage multiple instances of a service, ie: - `klipper_1`, `klipper_2`. Keep in mind that service names are case - sensitive. - !!! Note If a device has been bound to the `klipper` service and the `restart_klipper_when_powered` option is set to `True`, the restart @@ -554,10 +747,6 @@ pin: gpiochip0/gpio26 # !gpiochip0/gpio26 # !gpio26 # This parameter must be provided for "gpio" type devices -initial_state: off -# The initial state for GPIO type devices. May be on or -# off. When moonraker starts the device will be set to this -# state. Default is off. timer: # A time (in seconds) after which the device will power off after being. # switched on. This effectively turns the device into a momentary switch. @@ -645,10 +834,11 @@ pin: PA13 # The variable below should be initialized to the startup value. If your # device is configured to be on at startup use "variable_value: 1" variable_value: 0 +gcode: {% if 'VALUE' not in params %} {action_raise_error("Parameter 'VALUE' missing from 'SET_FLARE'")} {% endif %} - {% set state = params.VALUE %} + {% set state = params.VALUE|int %} {% if state %} # turn the neopixel on SET_LED LED=extruder_flare RED=0.75 BLUE=0.2 GREEN=0.2 SYNC=0 @@ -707,10 +897,6 @@ pin: gpiochip0/gpio26 # !gpiochip0/gpio26 # !gpio26 # This parameter must be provided for "gpio" type devices -initial_state: off -# The initial state for GPIO type devices. May be on or -# off. When moonraker starts the device will be set to this -# state. Default is off. timer: # A time (in seconds) after which the device will power off after being. # switched on. This effectively turns the device into a momentary switch. @@ -804,6 +990,9 @@ password: mypassword The following options are available for `shelly` device types: +!!! Note + Currently only Gen 1 Shelly devices support Authentication + ```ini # moonraker.conf @@ -883,7 +1072,7 @@ protocol: port: # The port the Home Assistant server is listening on. Default is 8123. device: -# The device ID of the switch to control. This parameter must be provided. +# The entity ID of the switch to control. This parameter must be provided. token: # A token used for request authorization. This option accepts # Jinja2 Templates, see the [secrets] section for details. This parameter @@ -904,6 +1093,7 @@ Example: [power homeassistant_switch] type: homeassistant +protocol: http address: 192.168.1.126 port: 8123 device: switch.1234567890abcdefghij @@ -1071,6 +1261,58 @@ token: smartthings-bearer-token device: smartthings-device-id ``` +#### Domoticz (HTTP) + +Here an example for a Domoticz Light/Switch device with idx 1234. +https://www.domoticz.com/wiki/Domoticz_API/JSON_URL%27s#Turn_a_light.2Fswitch_on.2Foff + +Authentication with basic header stored in Moonraker.secrets (see the [secrets] +documentation for details). +You have to convert your "username:password" to base64 and put in Moonraker.secrets file. + +!!! Note + If http unsecure is required, configure Domoticz to allow basic auth on http. + https://www.domoticz.com/wiki/Security#API_Protection + +```ini +# moonraker.conf + +[power printer_domoticz] +type: http +on_url: https://domoticz-ip<:port>/json.htm?type=command¶m=switchlight&switchcmd=On&idx=1234 +off_url: https://domoticz-ip<:port>/json.htm?type=command¶m=switchlight&switchcmd=Off&idx=1234 +status_url: https://domoticz-ip<:port>/json.htm?type=command¶m=getdevices&rid=1234 +request_template: + {% do http_request.add_header("Authorization", "Basic %s" % secrets.domoticz_credentials.base64userpass) %} + {% do http_request.send() %} +response_template: + # Domoticz does not return device state in the response to on and off + # commands making it necessary to request device status. + {% if command in ["on", "off"] %} + # Some delay is necessary to ensure that Domoticz has finished processing + # the command. This example sleeps for 1 second, more or less may be required + # depending on the type of switch, speed of the Domoticz host, etc. + {% do async_sleep(1.0) %} + # Set the request method, clear the body, set the url + {% do http_request.set_method("GET") %} + {% do http_request.set_body(None) %} + {% do http_request.set_url(urls.status) %} + # Note: The Authorization header was set in the "request_template". Since the + # http request object is shared between both templates it is not necessary to + # add it again unless we perform a "reset()" on the request. + {% set response = http_request.send() %} + # Raise an exception if we don't get a successful response. This is handled + # for us after executing the response template, however sending a request here + # requires that + {% do response.raise_for_status() %} + {% endif %} + # We use the `last_response` method to fetch the result and decode the + # json response. + {% set resp = http_request.last_response().json() %} + # The expression below will render "on" or "off". + {resp.result[0].Status.lower()} +``` + #### Hue Device Configuration The following options are available for `hue` device types: @@ -1081,6 +1323,9 @@ The following options are available for `hue` device types: address: # A valid ip address or hostname of the Philips Hue Bridge. This # parameter must be provided. +port: +# A port number if an alternative Zigbee bridge is used on a HTTP port +# different from the default 80/443 user: # The api key used for request authorization. This option accepts # Jinja2 Templates, see the [secrets] section for details. @@ -1090,9 +1335,295 @@ device_id: # The device id of the light/socket you want to control. # An explanation on how you could get the device id, can be found here: # https://developers.meethue.com/develop/get-started-2/#turning-a-light-on-and-off +device_type: light +# Set to light to control a single hue light, or group to control a hue light group. +# If device_type is set to light, the device_id should be the light id, +# and if the device_type is group, the device_id should be the group id. +# The default is "light". ``` +#### USB (uhubctl) devices + +Support for toggling USB powered devices via [uhubctl](https://github.com/mvp/uhubctl). + +!!! Note + The host machine must have `uhubctl` installed as a prerequisite. In addition, + the required [udev rules](https://github.com/mvp/uhubctl#linux-usb-permissions) + must be installed on the host to give Moonraker permission to toggle hub + power without sudo. + +```ini +location: +# Device location of the USB Hub connected to the device to control. The +# location corresponds to the "-l" option of "uhubctl". This parameter +# must be provided. +port: +# Port of the USB device to control. The port corresponds to the "-p" +# option of "ububctl". This parameter must be provided + +``` + +!!! Tip + The `uhubctl` software can be used to list all compatible hubs on the + system by simply executing `uhubctl` with no arguments. The following + is example output from a Raspberry Pi 3B+: + + ``` + Current status for hub 1-1.1 [0424:2514, USB 2.00, 3 ports, ppps] + Port 1: 0503 power highspeed enable connect [0424:7800] + Port 2: 0100 power + Port 3: 0100 power + Current status for hub 1-1 [0424:2514, USB 2.00, 4 ports, ppps] + Port 1: 0503 power highspeed enable connect [0424:2514, USB 2.00, 3 ports, ppps] + Port 2: 0100 power + Port 3: 0103 power enable connect [1d50:614e Klipper rp2040 45503571290B1068] + Port 4: 0100 power + Current status for hub 1 [1d6b:0002 Linux 6.6.28+rpt-rpi-v7 dwc_otg_hcd DWC OTG Controller 3f980000.usb, USB 2.00, 1 ports, ppps] + Port 1: 0503 power highspeed enable connect [0424:2514, USB 2.00, 4 ports, ppps] + ``` + +##### Example + +```ini +# moonraker.confg + +# Example for controlling a device connected to a Raspberry Pi 3B+. +# Location 1-1 Port 2 controls power for all 4 exposed ports. +[power my_usb_dev] +type: uhubctl +location: 1-1 +port: 2 +``` + +#### Generic HTTP Devices + +Support for configurable HTTP switches. This device type may be used when +no specific implementation is available for a switch. + +```ini +on_url: +off_url: +status_url: +# The urls used to control a device and report its status. These options +# accept Jinja2 templates with access to "secrets", see the [secrets] +# documentation for details. It is required that any special characters +# be escaped per RFC 3986 section 2. These options must be provided. +request_template: +# An optional Jinja2 template used to customize the http request. This +# template can set the request method, additional headers, and the body. +# When this option is not specified all commands will use a "GET" method +# with no body and no additional headers. +response_template: +# A Jinja2 template used to process the http response for each command. This +# template should always render to "on" or "off" based on the response. See +# the following section for details on the fields provided to the Jinja2 +# context. This parameter must be provided. + +``` + +###### The template context + +The `request_template` and `response_template` options are each provided +a Jinja2 context with the following fields: + +- `command`: The command associated with this call. Will be one of "on" + "off", or "status". +- `async_sleep`: An alias for the `asyncio.sleep` method. This may be used + to add delays if necessary. +- `log_debug`: An alias for `logging.debug`. This can be used to log messages + and data to `moonraker.log` to aid in debugging an implmentation. Note that + verbose logging must be + [enabled](installation.md#debug-options-for-developers) for these messages + to appear in the log. +- `http_request`: A request object used to build and send http requests. + This object exposes several methods detailed in the following section. + When a `request_template` is configured it will share the same http + request object with the `response_template`. +- `urls`: A `Dict` object containing the configured urls for each command. + Specifically this object contains "on", "off", and "status" fields, where + each field points to the url specified in the configuration. + +###### The HTTP Request object + +The HTTP Request Object is a wrapper around Moonraker's internal HTTP Client +that facilitates building HTTP requests. By default the request object will be +initialized as a "GET" request with the URL configured for the specified command +(ie: if the command is `on` then the request is initialized with the `on_url`). +The request provides the following methods that may be called from a Jinja2 +script: + +__`http_request.set_method(method)`__ + +> Sets the request method (ie: `GET`, `POST`, `PUT`). + + +__`http_request.set_url(url)`__ + +> Sets the request URL. Reserved characters in the url must be encoded +per [RFC3986](https://www.rfc-editor.org/rfc/rfc3986#section-2). + +__`http_request.set_body(body)`__ + + +> Sets the request body. This may be a `string`, `List`, or `Dict` object. +`List` and `Dict` objects will be encoded to json and the `Content-Type` +header will be set to `application/json`. + +__`http_request.add_header(name, value)`__ + +> Adds a request header. + +__`http_request.set_headers(headers)`__ + +> Sets the request headers to supplied `Dict` object. This will overwrite any +headers previously added or set. + +__`http_request.reset()`__ + +> Resets the request object to the default values. The request method will be +set to `GET`, the body will be empty, and the headers will be cleared. The +url will be reset to the configured URL for the current command. + +__`http_request.last_response()`__ + +> Returns the most recent [HTTP response](#the-http-response-object). If no +request has been sent this will return `None`. + +__`http_request.send(**kwargs)`__ + +> Sends the request and returns an [HTTP response](#the-http-response-object). + + +###### The HTTP Response object + +A response object provides access to http response data. The methods and +properties available will look familiar for those who have experience with +the Python `requests` module. + +__`http_response.json()`__ + +> Decodes the body and returns a resulting `Dict`. + +__`http_response.has_error()`__ + +> Returns if the response is an error. This is typically true if +the response returns a status code outside of the 200-299 range. + +__`http_response.raise_for_status(message=None)`__ + +> Raises an exception if the response is an error. The optional "message" +may be specified to replace the error message received from the response. + +__`http_response.text`__ + +> A property that returns the body as a UTF-8 encoded string. + +__`http_response.content`__ + +> A property that returns the body as a python `bytes` object. + +__`http_response.url`__ + +> A property that returns the url of the request associated with this response. + +__`http_response.final_url`__ + +> A property that returns "effective" url of the request after all redirects. + +__`http_reponse.headers`__ + +> A property that returns the response headers as a python `Dict`. + +__`http_response.status_code`__ + +> A property that returns the HTTP status code received with the response. + +###### Examples + +The following examples re-implement some of the `[power]` modules existing +types using generic http. The first example shows how a [tasmota](#tasmota-configuration) +switch may be implemented. Tasmota depends on `GET` http requests for all actions, +making it the most simple type of generic implementation: + +```ini +# moonraker.conf + +[power generic_tasmota] +type: http +on_url: + # Build the query string so we can encode it. This example assumes a password is + # supplied in a "secrets" file. If no password is required the "password" field can + # be omitted or set to an empty string + {% set qs = {"user": "admin", "password": secrets.tasmota.password, "cmnd": "Power1 on"} %} + http://tasmota-switch.lan/cm?{qs|urlencode} +off_url: + {% set qs = {"user": "admin", "password": secrets.tasmota.password, "cmnd": "Power1 off"} %} + http://tasmota-switch.lan/cm?{qs|urlencode} +status_url: + {% set qs = {"user": "admin", "password": secrets.tasmota.password, "cmnd": "Power1"} %} + http://tasmota-switch.lan/cm?{qs|urlencode} +response_template: + # The module will perform the "GET" request using the appropriate url. + # We use the `last_response` method to fetch the result and decode the + # json response. Tasmota devices return a similar response for all + # commands, so the response does not require special processing. + {% set resp = http_request.last_response().json() %} + # The expression below will render "on" or "off". + {resp["POWER1"].lower()} +``` + +The next example implements a [Home Assistant](#home-assistant-configuration-http) +device. Home Assistant requires `POST` requests for the on and off commands, +and a `GET` request for the status command. The Home Assistant API uses Token +based authentication, requiring that the request add an `Authorization` header. +Finally, the on and off HTTP requests do not consistently return device state, +making necessary to send a status request after an on or off request. + +```ini +# moonraker.conf + +[power generic_homeassistant] +type: http +on_url: http://homeassistant.lan:8123/api/services/switch/turn_on +off_url: http://homeassistant.lan:8123/api/services/switch/turn_off +status_url: http://homeassistant.lan:8123/api/states/switch.test_switch +request_template: + # Home Assistant uses token authorization, add the correct authorization header + {% do http_request.add_header("Authorization", "Bearer %s" % secrets.homeassistant.token) %} + {% if command in ["on", "off"] %} + # On and Off commands are POST requests. Additionally they require that we add + # a json body. The content type header will be automatically set for us in this + # instance. + {% do http_request.set_method("POST") %} + {% do http_request.set_body({"entity_id": "switch.test_switch"}) %} + {% endif %} + {% do http_request.send() %} +response_template: + # Home Assistant does not return device state in the response to on and off + # commands making it necessary to request device status. + {% if command in ["on", "off"] %} + # Some delay is necessary to ensure that Home Assistant has finished processing + # the command. This example sleeps for 1 second, more or less may be required + # depending on the type of switch, speed of the Home Assistant host, etc. + {% do async_sleep(1.0) %} + # Set the request method, clear the body, set the url + {% do http_request.set_method("GET") %} + {% do http_request.set_body(None) %} + {% do http_request.set_url(urls.status) %} + # Note: The Authorization header was set in the "request_template". Since the + # http request object is shared between both templates it is not necessary to + # add it again unless we perform a "reset()" on the request. + {% set response = http_request.send() %} + # Raise an exception if we don't get a successful response. This is handled + # for us after executing the response template, however sending a request here + # requires that + {% do response.raise_for_status() %} + {% endif %} + {% set resp = http_request.last_response().json() %} + {resp["state"]} +``` + #### Toggling device state from Klipper It is possible to toggle device power from the Klippy host, this can be done @@ -1102,13 +1633,20 @@ with a gcode_macro, such as: [gcode_macro POWER_OFF_PRINTER] gcode: - {action_call_remote_method("set_device_power", - device="printer", - state="off")} + {action_call_remote_method( + "set_device_power", device="printer", state="off" + )} ``` + +The `device` parameter must be the name of a configured power device. +The `state` parameter must be `on`, `off`, or `toggle`. In the example above +a device configured as `[power printer]` will be powered off. + + The `POWER_OFF_PRINTER` gcode can be run to turn off the "printer" device. This could be used in conjunction with Klipper's idle timeout to turn the printer off when idle with a configuration similar to that of below: + ```ini # printer.cfg @@ -1126,6 +1664,85 @@ gcode: UPDATE_DELAYED_GCODE ID=delayed_printer_off DURATION=60 ``` +##### Power on a device when a print starts + +Some users have their logic wired to a separate power supply from heaters, +fans, etc. This keeps Klipper in the "ready" state when power is removed +from such devices. It is possible to configure Klipper to power up such +devices just before a print is started by overriding the `SDCARD_PRINT_FILE` +gcode command. + +The following example presumes that the user a `[power heaters]` +device configured in `moonraker.conf`: + +```ini +# printer.cfg + +# Create a Macro to Power on the Heaters. This is necessary to be +# sure that the template evaluates the call in the correct order. +[gcode_macro POWER_ON_HEATERS] +gcode: + {action_call_remote_method( + "set_device_power", device="heaters", state="on" + )} + +# Override SDCARD_PRINT_FILE +[gcode_macro SDCARD_PRINT_FILE] +rename_existing: SDCPF +gcode: + # Step 1: Call the remote method to turn on the power device + POWER_ON_HEATERS + # Step 2: Pause while the device powers up. The following example + # pauses for 4 seconds. It may be necessary to tweak this value. + G4 P4000 + # Step 3: Call the renamed command to start the print + SDCPF {rawparams} + +``` + +!!! Warning + The `SDCARD_PRINT_FILE` G-Code command will be executed when a Moonraker + forwards a request to start a print. Do not put this command in a G-Code + file or in a macro that is run from a G-Code file. This will result in an + `SD Busy` error and abort the print. + + +##### Force a power device to change state during a print + +Another exotic use case is the addition of a "conditional" peripheral, +such as an MMU device. The user may not wish to power on this device +for every print, and instead power it on from within the "Start G-GCode" +conditionally. Additionaly we do not want this device to be turned on/off +unintentionally during a print. The `set_device_power` remote method takes +an optional `force` argument that can be used to accommodate this scenario. + +The following example presumes that the user has a `[power mmu]` device +configured in `moonraker.conf` with the `locked_when_printing` option +set to `True`. The slicer would be configured to set `USE_MMU=1` for +the print start macro when the MMU is in use. + +```ini +# printer.cfg + +[gcode_macro POWER_ON_MMU] +gcode: + {action_call_remote_method( + "set_device_power", device="mmu", state="on", force=True + )} + +[gcode_macro PRINT_START] +gcode: + {% set use_mmu = params.USE_MMU|default(0)|int %} + {% if use_mmu $} + # Turn on power supply for extruders/bed + POWER_ON_MMU + # Add a bit of delay to give the switch time + G4 P2000 + {% endif %} + # Add the rest of your "Start G-Code"... +``` + + #### Power on G-Code Uploads To power on a device after an upload, `queue_gcode_uploads: True` must @@ -1133,7 +1750,6 @@ be set in the `[file_manager]`, `load_on_startup: True` must be set in `[job_queue]` and `one_when_job_queued: True` must be set in `[power dev_name]`, where "dev_name" the the name of your power device. For example: - ```ini # moonraker.conf @@ -1141,9 +1757,7 @@ where "dev_name" the the name of your power device. For example: # is set and Klipper cannot immediately start the print. [file_manager] queue_gcode_uploads: True -# Set the config_path and log_path options to the correct locations -#config_path: -#log_path: + # Configure the Job Queue to start a queued print when Klipper reports as # ready. @@ -1183,18 +1797,16 @@ disk or cloned from unofficial sources are not supported. # moonraker.conf [update_manager] -enable_repo_debug: False -# When set to True moonraker will bypass repo validation and allow -# updates from unofficial remotes and/or branches. Updates on -# detached repos are also allowed. This option is intended for -# developers and should not be used on production machines. The -# default is False. enable_auto_refresh: False -# When set to True Moonraker will attempt to fetch status about -# available updates roughly every 24 hours, between 12am-4am. +# When set to True, Moonraker will check roughly every 1 hour (only within +# the update window) whether it's time to fetch status about available updates. # When set to False Moonraker will only fetch update state on startup # and clients will need to request that Moonraker updates state. The # default is False. +refresh_window: 0-5 +# The hours between which the periodic update check will be done. +# Default is 0-5, meaning the refresh can only occur from midnight until 5am. +# It can go over midnight, e.g. 22-6. refresh_interval: 672 # The interval (in hours) after which the update manager will check # for new updates. This interval is applies to updates for Moonraker, @@ -1214,15 +1826,27 @@ enable_packagekit: True # updates will be processed via PackageKit over D-Bus. When set to False # the "apt cli" fallback will be used. The default is True. channel: dev -# The update channel applied to Klipper and Moonraker. May dev or -# beta. The dev channel will update to the latest commit pushed +# The update channel applied to Klipper and Moonraker. May dev, beta or +# stable. The dev channel will update to the latest commit pushed # to the repo, whereas the beta channel will update to the latest -# commit tagged by Moonraker. The beta channel will see less frequent -# updates and should be more stable. Users on the beta channel will have -# more opportunity to review breaking changes before choosing to update. +# commit tagged by Moonraker. The beta and stable channels will see less +# frequent updates and should be more stable. Users on the beta channel will +# have more opportunity to review breaking changes before choosing to update. # The default is dev. ``` +!!! Note + Configuration is automatically detected for Moonraker and Klipper, however + it is possible to override the `channel` and `pinned_commit` options on + a per application basis for each. This can be done by specifying the + configuration in `moonraker.conf`. For example: + + ```ini + [update_manager klipper] + channel: dev + pinned_commit: 79930ed99a1fc284f41af5755908aa1fab948ce1 + ``` + #### Extension Configuration The update manager may be configured manage additional software, henceforth referred to as "extensions". In general terms, an extension may be defined @@ -1243,13 +1867,33 @@ down into 3 basic types: To benefit the community Moonraker facilitates updates for 3rd party "Klippy Extras" and "Moonraker Components". While many of these extensions are well developed and tested, users should always be - careful when using such extensions. Moonraker and Klipper provide + careful when using such code extensions. Moonraker and Klipper provide no official support for such extensions, thus users experiencing an issue should not create bug reports on the Klipper or Moonraker issue - trackers without first reproducing the issue with all unofficial - extensions disabled. + trackers without first reproducing the issue using pristine versions + of Moonraker and/or Klipper. -##### Web type (front-end) configuration +#### Web type (front-end) configuration + +!!! Note + Front-end developers that wish to deploy updates via Moonraker + should host releases on their GitHub repo. In the root of each + release a `release_info.json` file should be present. This + file must contain a JSON object with the following fields: + + - `project_name`: The name of the GitHub project + - `project_owner`: The User or Organization that owns the project + - `version`: The current release version + + For example, a `release_info.json` for Mainsail might contain the + following: + ```json + { + "project_name": "mainsail", + "project_owner": "mainsail-crew", + "version": "v2.5.1" + } + ``` ```ini # moonraker.conf @@ -1266,7 +1910,11 @@ repo: # For example, this could be set to fluidd-core/fluidd to update Fluidd or # mainsail-crew/mainsail to update Mainsail. This parameter must be provided. path: -# The path to the front-end's files on disk. This parameter must be provided. +# The path to the front-end's files on disk. This folder must contain a +# a previously installed client. The folder must not be located within a +# git repo and it must not be located within a path that Moonraker has +# reserved, ie: it cannot share a path with another extension. This parameter +# must be provided. persistent_files: # A list of newline separated file names that should persist between # updates. This is useful for static configuration files, or perhaps @@ -1275,18 +1923,18 @@ refresh_interval: # This overrides the refresh_interval set in the primary [update_manager] # section. info_tags: -# Optional information tags about this extensions that are reported via +# Optional information tags about this extension that are reported via # Moonraker's API as a list of strings. Each tag should be separated by # a new line. For example: # info_tags: # desc=My Client App # action=webcam_restart -# Front-ends may use these tags to perform additional actions or display +# Frontends may use these tags to perform additional actions or display # information, see your extension documentation for details on configuration. # The default is an empty list. ``` -##### All other extensions +#### Git Repo Configuration !!! Note Git repos must have at least one tag for Moonraker to identify its @@ -1294,6 +1942,9 @@ info_tags: semantic version format, `vX.Y.Z`, where X, Y, and Z are all unsigned integer values. For example, a repos first tag might be `v0.0.1`. + Moonraker can update repos without tags, however front ends may disable + update controls when version information is not reported by Moonraker. + ```ini # moonraker.conf @@ -1301,17 +1952,15 @@ info_tags: # systemd service [update_manager extension_name] type: git_repo -# Can be git_repo or zip. This value is set depending on how an extension -# chooses to deploy updates, see its documentation for details This -# parameter must be provided. +# Currently must be git_repo. This value is set depending on how an +# extension chooses to deploy updates, see its documentation for details. +# This parameter must be provided. channel: dev # The update channel. The available value differs depending on the # "type" option. # type: git_repo - May be dev or beta. The dev channel will update to # the latest pushed commit, whereas the beta channel # will update to the latest tagged commit. -# type: zip - May be be stable or beta. When beta is specified -# "pre-release" updates are available. # The default is dev. path: # The absolute path to the client's files on disk. This parameter must be @@ -1327,33 +1976,50 @@ primary_branch: # The name of the primary branch used for release code on this repo. This # option allows clients to specify 'main', or their own unique name, as # the branch used for repo validity checks. The default is master. +virtualenv: +# An optional path to the virtualenv folder for Python Applications. For +# example, Moonraker's default virtualenv is located at ~/moonraker-env. +# When a virtualenv is specified Moonraker can update its Python +# dependencies when it detects a change to the requirements file. The +# default is no virtualenv. env: +# *** DEPRECATED FOR NEW CONFIGURATIONS - USE the 'virtualenv' OPTION *** +# # The path to the extension's virtual environment executable on disk. For # example, Moonraker's venv is located at ~/moonraker-env/bin/python. # The default is no env, which disables updating python packages. requirements: # This is the location in the repository to the extension's python # requirements file. This location is relative to the root of the repository. -# This parameter must be provided if the "env" option is set, otherwise it -# should be omitted. +# This parameter must be provided if the 'virtualenv' or 'env' option is set, +# otherwise it must be omitted. +system_dependencies: +# A path, relative to the repository, to a json file containing operating +# system package dependencies. Application developers should refer to the +# "System Dependencies File Format" section of this document for details on how +# this file should be formatted. The default is no system dependencies. install_script: -# The file location, relative to the repository, for the installation script. -# The update manager parses this file for "system" packages that need updating. -# The default is no install script, which disables system package updates +# *** DEPRECATED FOR NEW CONFIGURATIONS - USE the 'system_dependencies' OPTION *** +# +# The file location, relative to the repository, for the installation script +# associated with this application. Moonraker will not run this script, instead +# it will parse the script searching for new "system" package dependencies that +# require installation. Packages in the script must be defined as follows for +# Moonraker to successfully parse them: +# PKGLIST="packagename1 packagename2 packagename3" +# PKGLIST="${PKGLIST} packagename4 packagename5" +# +# Note that the "packagenameX" items in the example above should be the names +# of valid system packages. The second line in the example is optional and +# additional lines in the same format may be added. +# +# The default is no install script. enable_node_updates: # When set to True, Moonraker will assume that this repo relies upon node # and will attempt to execute "npm ci --only=prod" when it detects a change # to package-lock.json. Note that if your project does not have a # package-lock.json in its root directory then the plugin will fail to load. # Default is False. -host_repo: -# The GitHub repo in which zipped releases are hosted. Note that this does -# not need to match the repository in the "origin" option, as it is possible -# to use a central GitHub repository to host multiple extension builds. As -# an example, Moonraker's repo hosts builds for both Moonraker and Klipper. -# This option defaults to the repo extracted from the "origin" option, -# however if the origin is not hosted on GitHub then this parameter must -# be provided. is_system_service: True # This should be set to False for repos that are not installed as a service # or do not need to restart a service after updates. This option sets the @@ -1372,25 +2038,134 @@ managed_services: # - The name configured in the extension's section header. # If the section header is [update_manager KlipperScreen] # then KlipperScreen would be a valid value. -# klipper - The klipper service will be restarted after an update -# moonraker - The moonraker service will be restarted after an update +# klipper - The Klipper service associated with this instance of +# Moonraker will be restarted after an update. +# moonraker - The Moonraker service will be restarted after an update. +# +# NOTE: Moonraker will resolve the service names for the "klipper" and +# "moonraker" services if they are not the default values. Specific names +# such as "klipper-1" or "moonraker_2" should not be entered in this option. +# # When this option is specified it overrides the "is_system_service" option. # Thus it is not required to specify both, only one or the other. The -# default depends on "is_system_service" as explained above. +# default is no managed services if "is_system_service" is set to False, +# otherwise the default is the service named in the section header. refresh_interval: # This overrides the refresh_interval set in the primary [update_manager] # section. info_tags: -# Optional information tags about this application that will be reported -# front-ends as a list of strings. Each tag should be separated by a new line. +# Optional information tags about this application that will be reported to +# frontends as a list of strings. Each tag should be separated by a new line. # For example: # info_tags: # desc=Special Application -# Front-ends my use these tags to perform additional actions or display +# Frontends my use these tags to perform additional actions or display # information, see your extension documentation for details on configuration. -# The default is an empty list. +# The default is an empty list (no info tags). +pinned_commit: +# A git commit hash to "pin" updates to. When specified Moonraker will not +# update the repo beyond the pinned commit. If the repo is already beyond +# the specified commit, or if the commit is not in the repo, futher updates +# are disabled until the pinned_commit is changed. It is recommended to +# specify the complete hash, however abbreviated hashes with a minimum of +# 8 characters are accepted. The "pinned_commit" overrides the update +# behavior set by the "channel" option. The default is no pinned commit. ``` +!!! Note + If this application requires a restart after an update it may be necessary + to grant Moonraker permission to manage its service. See the + [allowed services](#allowed-services) section for details on which + services Moonraker is allowed to manage and how to add additional services. + + Also not that systemd services are case sensitive. The `extension_name` + in the section header and the value provided in the `managed_servies` + option must match the case of the systemd unit file. + +#### Zip Application Configuration + +The `zip` type can be used to deploy zipped application updates through GitHub +releases. They can be thought of as a combination of the `web` and `git_repo` +types. Like `web` types, zipped applications must include a `release_info.json` +file (see the [web type](#web-type-front-end-configuration) not for details). +In addition, `zip` types can be configured to update dependencies and manage +services. + +The `zip` type is ideal for applications that need to be built before deployment. +The thing to keep in mind is that any application updated through Moonraker needs +either be cross-platform, or it needs to deploy binaries for multiple platforms +and be able to choose the correct one based on the system. + +```ini +channel: stable +# May be stable or beta. When beta is specified "pre-release" +# updates are available. The default is stable. +repo: +# This is the GitHub repo of the application, in the format of owner/repo_name. +# For example, this could be set to Donkie/Spoolman to update Spoolman. +# This parameter must be provided. +path: +# The path to the Application files on disk. This folder must contain a +# a previously installed application and a valid release_info.json file. +# The folder must not be located within a git repo and it must not be located +# within a path that Moonraker has reserved, ie: it cannot share a path with +# another extension. This parameter must be provided. +refresh_interval: +# This overrides the refresh_interval set in the primary [update_manager] +# section. +persistent_files: +# A list of newline separated file names that should persist between +# updates. This is useful for virtualenv's and other files/folders that +# should not be deleted when Moonraker overwrites the folder. The default +# is no persistent files. +virtualenv: +requirements: +system_dependencies: +enable_node_updates: +is_system_service: True +managed_services: +info_tags: +# See the git_repo type documentation for detailed descriptions of the above +# options. +``` + +#### The System Dependencies File Format + +When an application depends on OS packages it is possible to specify them +in a file that Moonraker can refer to. During an update Moonraker will +use this file to install new dependencies if they are detected. + +Below is an example of Moonraker's system dependcies file, located at +in the repository at +[scripts/system-dependencies.json](https://github.com/Arksine/moonraker/blob/master/scripts/system-dependencies.json): + +```json +{ + "debian": [ + "python3-virtualenv", + "python3-dev", + "python3-libgpiod", + "liblmdb-dev", + "libopenjp2-7", + "libsodium-dev", + "zlib1g-dev", + "libjpeg-dev", + "packagekit", + "wireless-tools", + "curl" + ] +} +``` + +The general format is an object, where each key is the name of a linux +distribution, and the value is an array of strings each naming a dependency. +Moonraker uses Python's [distro](https://distro.readthedocs.io/en/latest/) +package to match the detected operating system against keys in the system +dependencies file. It will first attempt to match against the return value +of `distro.id()`, the fall back on the values reported by `distro.like()`. +Following this logic, the `debian` key will be applied to Debian, Raspberry +Pi OS, Ubuntu, and likely other Debian derived distributions. + ### `[mqtt]` Enables an MQTT Client. When configured most of Moonraker's APIs are available @@ -1411,6 +2186,11 @@ address: # parameter must be provided. port: # Port the Broker is listening on. Default is 1883. +enable_tls: False +# Enables SSL/TLS connections when set to true. Note that if a user intends +# to connect to a local MQTT service using a self signed certificate then +# it will be necessary to install the root CA certificate on the machine +# hosting Moonraker. Default is False. username: # An optional username used to log in to the Broker. This option accepts # Jinja2 Templates, see the [secrets] section for details. The default is @@ -1451,8 +2231,10 @@ instance_name: # The default is the machine's hostname. status_objects: # A newline separated list of Klipper objects whose state will be -# published in the payload of the following topic: -# {instance_name}/klipper/status +# published. There are two different ways to publish the states - you +# can use either or both depending on your need. See the +# "publish_split_status" options for details. +# # For example, this option could be set as follows: # # status_objects: @@ -1473,6 +2255,27 @@ status_objects: # # If not configured then no objects will be tracked and published to # the klipper/status topic. +status_interval: +# The interval (in seconds) between published status updates. This value +# can be used to limit the rate of updates published. By default Moonraker +# will publish Klipper status updates as it receives them. +publish_split_status: False +# Configures how to publish status updates to MQTT. +# +# When set to False (default), all Klipper object state updates will be +# published to a single mqtt state with the following topic: +# {instance_name}/klipper/status +# +# When set to True, all Klipper object state updates will be published to +# separate mqtt topics derived from the object and item in the following +# format: +# {instance_name}/klipper/state/{objectname}/{statename} +# +# The actual value of the state is published as "value" to the topic above. +# For example, if the heater_bed temperature was 24.0, this is the payload: +# {"eventtime": {timestamp}, "value": 24.0} +# It would be published to this topic: +# {instance_name}/klipper/state/heater_bed/temperature default_qos: 0 # The default QOS level used when publishing or subscribing to topics. # Must be an integer value from 0 to 2. The default is 0. @@ -1634,13 +2437,20 @@ gcode: ``` ### `[zeroconf]` -Enable Zeroconf service registration allowing external services to more -easily detect and use Moonraker instances. +Enables support for Zeroconf (Apple Bonjour) discovery, allowing external services +detect and use Moonraker instances. ```ini # moonraker.conf [zeroconf] +mdns_hostname: +# The hostname used when registering the multicast DNS serivce. +# The instance will be available at: +# http://{mdns_hostname}.local:{port}/ +# The default is the operating system's configured hostname. +enable_ssdp: +# Enables discovery over UPnP/SSDP in ad. The default is False ``` ### `[button]` @@ -1672,10 +2482,13 @@ pin: gpiochip0/gpio26 # ^!gpiochip0/gpio26 # ~!gpiochip0/gpio26 # This parameter must be provided -min_event_time: .05 -# The minimum time (in seconds) between events to trigger a response. This is -# is used to debounce buttons. This value must be at least .01 seconds. -# The default is .05 seconds (50 milliseconds). +debounce_period: .05 +# The time (in seconds) an event is delayed to debounce the response. +# The minimum debounce period is .01 seconds. The default is .05 seconds. +minimum_event_time: 0 +# The minimum event duration (in seconds) required to trigger a response. +# This can be used as a secondary debounce procedure. The default is 0 +# seconds (no minumum duration). on_press: on_release: # Jinja2 templates to be executed when a button event is detected. At least one @@ -1777,26 +2590,18 @@ separate from `moonraker.conf`. This allows users to safely distribute their configuration and log files without revealing credentials and other sensitive information. -```ini -# moonraker.conf +!!! Note + This section no longer has configuration options. Previously the + `secrets_path` option was used to specify the location of the file. + The secrets file name and location is now determined by the `data path` + and `alias` command line options, ie: `/moonraker.secrets`. + For a typical single instance installation this resolves to + `$HOME/printer_data/moonraker.secrets`. This may be a symbolic link. -[secrets] -secrets_path: -# A valid path to the "secrets" file. A secrets file should either be -# in "ini" format (ie: the same format as moonraker.conf) or "json" -# format. If the file is a "json" file, the top level item must be -# an Object. When this parameter is not specified no file will be -# loaded. -``` - -!!! Warning - For maximum security the secrets file should be located in a folder - not served by Moonraker. - -Example ini file: +Example ini secrets file: ```ini -# moonraker_secrets.ini +# /home/pi/printer_data/moonraker.secrets [mqtt_credentials] username: mqtt_user @@ -1807,7 +2612,7 @@ token: long_token_string ``` -Example json file: +Example json secrets file: ```json { @@ -1872,18 +2677,24 @@ domain: switch ### `[notifier]` -Enables the notification service. Multiple "notifiers" may be configured, -each with their own section, ie: `[notifier my_discord_server]`, `[notifier my_phone]`. -All notifiers require an url for a service to be set up. Moonraker uses [Apprise](https://github.com/caronc/apprise) internally. -You can find the available services and their corresponding urls here: [https://github.com/caronc/apprise/wiki](https://github.com/caronc/apprise/wiki). +Enables the notification service. Multiple "notifiers" may be configured, +each with their own section, ie: `[notifier my_discord_server]`, +`[notifier my_phone]`. + +All notifiers require an url for a service to be set up. Moonraker depends on +[Apprise](https://github.com/caronc/apprise) to emit notifications. +Available services and their corresponding at urls may be found on the +[Apprise Wiki](https://github.com/caronc/apprise/wiki). ```ini # moonraker.conf [notifier telegram] url: tgram://{bottoken}/{ChatID} -# The url for your notifier. This URL accepts Jinja2 templates, so you can use [secrets] if you want. +# The url for your notifier. This URL accepts Jinja2 templates, +# so you can use [secrets] if you want. This parameter must be +# provided. events: * # The events this notifier should trigger to. '*' means all events. # You can use multiple events, comma separated. @@ -1894,18 +2705,56 @@ events: * # cancelled # paused # resumed +# This parameter must be provided. body: "Your printer status has changed to {event_name}" -# The body of the notification. This option accepts Jinja2 templates. -# You can use {event_name} to print the current event trigger name. And {event_args} for -# the arguments that came with it. +# The body of the notification. This option accepts Jinja2 templates, where +# the template is passed a context containing the following fields: +# event_name: The name of the event that triggered the notification +# (ie: started, complete, error, etc) +# event_args: A list containing the arguments passed to the event. +# See the "Tip" below for additional details on this field. +# event_message: An additional message passed to the notification when +# triggered. This is commonly used when the notification +# is received from Klippy using a gcode_macro. +# The default is a body containining the "name" of the notification as entered +# in the section header. +body_format: +# The formatting to use for the body, can be `text`, `html` and `markdown`. +# The default is `text`. title: -# The optional title of the notification. Just as the body, this option accepts Jinja2 templates. +# The optional title of the notification. This option accepts Jinja2 templates, +# the template will receive a context with the same fields as the body. The +# default is an empty string as the title. attach: -# An optional attachment. Can be an url of a webcam for example. Note: this isn't available for all -# notification services. You can check if it's supported on the Apprise Wiki. Be aware that links in -# your internal network can only be viewed within your network. +# One or more items to attach to the notification. This may be a path to a +# local file or a url (such as a webcam snapshot). Multiple attachments must be +# separated by a newline. This option accepts Jinja2 templates, the tempalte +# will recieve the same context as the "body" and "title" options. The default +# is no attachment will be sent with the notification. +# +# Note: Attachments are not available for all notification services, you can +# check if it's supported on the Apprise Wiki. Be aware that links to items +# hosted on your local network can only be viewed within that network. ``` +!!! Tip + The `event_args` field of the Jinja2 context passed to templates in + this section receives a list of "arguments" passed to the event. For + those familiar with Python this list is known as "variable arguments". + Currently the notifier only supports two kinds of events: those + triggered by a change in the job state and those triggered from a remote + method call frm a `gcode_macro`. + + For `remote method` events the `event_args` field will always be + an empty list. For `job state` events the `event_args` field will + contain two items. The first item (`event_args[0]`) contains the + job state recorded prior to the event, the second item (`event_args[1]`) + contains the current job state. In most cases users will be interested + in the current job state (`event_args[1]`). + + The `job state` is a dict that contains the values reported by + Klipper's [print_stats](printer_objects.md#print_stats) object. + #### An example: ```ini # moonraker.conf @@ -1926,15 +2775,415 @@ url: tgram://{bottoken}/{ChatID} events: error body: {event_args[1].message} attach: http://192.168.1.100/webcam/?action=snapshot + +[notifier my_telegram_notifier] +url: tgram://{bottoken}/{ChatID} +events: gcode +body: {event_message} +attach: http://192.168.1.100/webcam/?action=snapshot ``` +#### Notifying from Klipper +It is possible to invoke your notifiers from the Klippy host, this can be done +with a gcode_macro, such as: +```ini +# printer.cfg + +[gcode_macro NOTIFY_FILAMENT_CHANGE] +gcode: + {action_call_remote_method("notify", + name="my_telegram_notifier", + message="Filament change needed!")} +``` + +### `[simplyprint]` + +Enables support for print monitoring through +[SimplyPrint](https://simplyprint.io), +publicly launched Moonraker integration Nov 21st 2022. + +```ini +# moonraker.conf +[simplyprint] +webcam_name: +# Optional name of a configured webcam for use by the SimplyPrint service. +# This can either be a webcam configured through the `[webcam]` module or +# a webcam added via a front-end like Mainsail. The default is to attempt +# to autodetect a webcam. +power_device: +# The name of a configured [power] device available to toggle over using +# the SimplyPrint service. For example, to toggle a device specified +# as [power printer] may be configured as: +# power_device: printer +# By default no power device is configured. +filament_sensor: +# The name of a configured filament sensor to be monitored by SimplyPrint. +# The filament sensor must be configured in Klipper and the full name, +# including the prefix, must be specified. For example, to monitor a sensor +# specified as [filament_switch_sensor fsensor] may be configured as: +# filament_sensor: filament_switch_sensor fsensor +# By default no filament sensor is monitored. +ambient_sensor: +# The name of a configured temperature sensor used to report the ambient +# temperature. The sensor must be configured in Klipper and the full name, +# including the prefix, must be specified. For example, an ambient sensor +# specified in Klipper as [temperature_sensor chamber] may be configured as: +# ambient_sensor: temperature_sensor chamber +# If no ambient_sensor is configured then SimplyPrint will use the extruder +# to estimate ambient temperature when the heater is idle and cool. +``` + +!!! Note + This module collects and uploads the following data to SimplyPrint: + + - Klipper's version, connection state, and date pulled + - Moonraker's version + - Currenly connected front-end and version + - Current python version + - Linux distribution and version + - Network connection type (wifi or ethernet) + - wifi SSID (if connected) + - LAN IP address + - LAN hostname + - CPU model + - CPU core count + - Total system memory + - CPU usage + - Memory usage + - Current extruder selected + - Extruder and bed temperatures + - Mesh data (if Klipper has `bed_mesh` configured) + - Current print state + - Loaded file metadata, including estimated filament usage and print time + - Current print filament usage + - Current print time elapse + - Estimated ambient temperature + - Webcam configuration (if available) + - Webcam images. + - Power device state (if configured) + - Filament sensor state (if configured) + +More on how your data is used in the SimplyPrint privacy policy here; +[https://simplyprint.io/legal/privacy](https://simplyprint.io/legal/privacy) + +### `[sensor]` + +Enables data collection from additional sensor sources. Multiple "sensor" +sources may be configured, each with their own section, ie: `[sensor current]`, +`[sensor voltage]`. + +#### Options common to all sensor devices + +The following configuration options are available for all sensor types: + +```ini +# moonraker.conf + +[sensor my_sensor] +type: +# The type of device. Supported types: mqtt +# This parameter must be provided. +name: +# The friendly display name of the sensor. +# The default is the sensor source name. +parameter_{parameter_name}: +# Optional parameter descriptions. Each sensor can report +# one or parameters. Frontends can use this data to accurately +# present sensor details to the user. The {parameter_name} must +# be a valid measurement reported by the sensor. The value should be +# a newline separated list of key-value pairs describing the +# the measurement. Currently the only key used is "units". For +# example, the configuration for a parameter may look like the follwing: +# +# parameter_energy: +# units=kWh +# +history_field_{field_name}: +# Optional history field description. When provided the named +# field will be tracked in Moonraker's Job History component. +# The "field_name" portion of the option is the identifier used +# when reported in the history. Multiple history fields may be +# added and tracked for a sensor. See the "History Fields" note +# for a detailed explanation of this option. +``` + +!!! note "History Fields" + A `history_field_{name}` option must contain a series of key-value pairs. + The key and value must be separated by an equal sign (=), and each + pair must be separated by a newline. The following keys are + available: + + - `parameter`: The name of the sensor parameter which is used to + provide values for this field. This name must match a field name + set in the specific sensor implementation (ie: see the + "state_response_template" option for the MQTT type.) This must + be provided. + - `desc`: A brief description of the field. + - `strategy`: The tracking strategy used to calculate the value + stored in the history. See below for available strategies. + The default is "basic". + - `units`: An optional unit specifier for the value + - `init_tracker`: When set to true the tracked value will be initialized + to the last sensor measurement when a job starts. The "delta" + strategy will initialize its "last value", setting this measurement + as the reference rather than the first received after the print starts. + Default is false. + - `exclude_paused`: When set to true the values received when + a job is paused will be ignored. Default is false. + - `report_total`: When set to true the value reported for all + jobs will be accumulated and reported in the history totals. + Default is false. + - `report_maximum`: When set to true maximum value for all jobs + will be reported in the history totals. Default is false. + - `precision`: An integer value indicating the precision to use when + reporting decimal values. This precision applies to both job history + AND job totals. The default is no precision, ie: no rounding will + occur. + + Note that job totals for history fields only persist for a currently + configured sensor and history field name. If the name of the sensor + changes, the name of the field changes, or if either are removed + from the configuration, then their totals will be discarded. This + prevents the accumulation of stale totals. + + Moonraker provides several history tracking strategies that can be used + accommodate how values should be tracked and stored in the job history: + + - `basic`: This strategy should be used if the value should be stored + in history directly as it is received. Simply put, the last value + received before a job completes wiill the the value stored in the job + history. + - `accumulate`: When a job starts, the tracked value initialized to 0 or + the last received measurement. New measurements will be added to the + tracked value as they are received. The total cumulative value will be + reported when the job ends. + - `delta`: When a job starts the tracked value is 0. The total value + will be the delta between the final measurement received before the job + ends and the first measurement received when after job begins. Note that + if `exclude_paused` is set then the tracker will accumulate deltas + between pauses. If the measurement does not update frequently this could + significantly alter the final result. + - `average`: Reports an average of all measurements received during the job. + - `maximum`: Reports the maximum value of all measurements received during + the job. + - `minimum`: Reports the minimum value of all measurements received during + the job. + - `collect`: Measurements are stored in a list as they are received. + Duplicate measurements are discarded. A maximum of 100 entries may + be stored, the oldest measurements will be discarded when this limit + is exceeded. This strategy is useful for a sensor that reports some + data infrequently and its desirable to include all measurements in the + job history. For example, the `spoolman` component uses this strategy + to report all spool IDs set during a job. When this strategy is enabled + the `track_total` and `track_maximum` options are ignored, as it is not + possible to report totals for a collection. + + Example: + + ``` + history_field_total_energy: + parameter=energy + desc=Printer power consumption + strategy=delta + units=kWh + init_tracker=false + exclude_paused=false + report_total=true + report_maximum=true + precision=6 + ``` + + +#### MQTT Sensor Configuration + +The following options are available for `mqtt` sensor types: + +```ini +# moonraker.conf + +qos: +# The MQTT QOS level to use when publishing and subscribing to topics. +# The default is to use the setting supplied in the [mqtt] section. +state_topic: +# The mqtt topic to subscribe to for sensor state updates. This parameter +# must be provided. +state_response_template: +# A template used to parse the payload received with the state topic. A +# "payload" variable is provided the template's context. This template must +# call the provided set_result() method to pass sensor values to Moonraker. +# `set_result()` expects two parameters, the name of the measurement (as +# string) and the value of the measurement (either integer or float number). +# +# This allows for sensor that can return multiple readings (e.g. temperature/ +# humidity sensors or powermeters). +# For example: +# {% set notification = payload|fromjson %} +# {set_result("temperature", notification["temperature"]|float)} +# {set_result("humidity", notification["humidity"]|float)} +# {set_result("pressure", notification["pressure"]|float)} +# +# The above example assumes a json response with multiple fields in a struct +# is received. Individual measurements are extracted from that struct, coerced +# to a numeric format and passed to Moonraker. This parameter must be provided. +``` + +!!! Note + Moonraker's MQTT client must be properly configured to add a MQTT sensor. + See the [mqtt](#mqtt) section for details. + +!!! Tip + MQTT is the most robust way of collecting sensor data from networked + devices through Moonraker. A well implemented MQTT sensor will publish all + changes in state to the `state_topic`. Moonraker receives these changes, + updates its internal state, and notifies connected clients. + +Example: + +```ini +# moonraker.conf + +# Example configuration for a Shelly Pro 1PM (Gen2) switch with +# integrated power meter running the Shelly firmware over MQTT. +[sensor mqtt_powermeter] +type: mqtt +name: Powermeter +parameter_power: + units=W +parameter_voltage: + units=V +parameter_current: + units=mA +parameter_energy: + units=kWh +# Use a different display name +state_topic: shellypro1pm-8cb113caba09/status/switch:0 +# The response is a JSON object with a multiple fields that we convert to +# float values before passing them to Moonraker. +state_response_template: + {% set notification = payload|fromjson %} + {set_result("power", notification["apower"]|float)} + {set_result("voltage", notification["voltage"]|float)} + {set_result("current", notification["current"]|float)} + {set_result("energy", notification["aenergy"]["by_minute"][0]|float * 0.000001)} +``` + +Tasmota Example: + +!!! Note + It may be necessary to set Tasmota's Telemetry Period to a low value + to acheive a decent response. This can be done in the with the + `TelePeriod` command via the console. For example, the command + to set the telemetry period to 10 seconds is: + + `cmnd/%device_name%/TelePeriod` with a payload of `10`. + +```ini +[sensor tasmota_power] +type: mqtt +state_topic: tele/tasmota_switch/SENSOR +state_response_template: + {% set resp = payload|fromjson %} + {% set edata = resp["ENERGY"] %} + {set_result("energy", edata["Total"])} + {set_result("voltage", edata["Voltage"])} + {set_result("power", edata["Power"])} + {set_result("current", edata["Current"])} +parameter_power: + units=W +parameter_voltage: + units=V +parameter_current: + units=mA +parameter_energy: + units=kWh +history_field_energy_consumption: + parameter=energy + desc=Printer energy consumption + strategy=delta + units=kWh + init_tracker=true + precision=6 + exclude_paused=false + report_total=true + report_maximum=true +history_field_average_current: + parameter=current + desc=Average current draw + strategy=average + units=A + report_total=false + report_maximum=true +# Mulitple history fields may track the same sensor parameter: +history_field_max_current: + parameter=current + desc=Maximum current draw + strategy=maximum + units=A + init_tracker=true + report_total=false + report_maximum=false +``` + +### `[spoolman]` + +Enables integration with the [Spoolman](https://github.com/Donkie/Spoolman) +filament manager. Moonraker will automatically send filament usage updates to +the Spoolman database. + +Front ends can also utilize this config to provide a built-in management tool. + +```ini +# moonraker.conf + +[spoolman] +server: http://192.168.0.123:7912 +# URL to the Spoolman instance. This parameter must be provided. +sync_rate: 5 +# The interval, in seconds, between sync requests with the +# Spoolman server. The default is 5. +``` + +#### Setting the active spool from Klipper + +The `spoolman` module registers the `spoolman_set_active_spool` remote method +with Klipper. This method may be used to set the active spool ID, or clear it, +using gcode macros. For example, the following could be added to Klipper's +`printer.cfg`: + +```ini +# printer.cfg + +[gcode_macro SET_ACTIVE_SPOOL] +gcode: + {% if params.ID %} + {% set id = params.ID|int %} + {action_call_remote_method( + "spoolman_set_active_spool", + spool_id=id + )} + {% else %} + {action_respond_info("Parameter 'ID' is required")} + {% endif %} + +[gcode_macro CLEAR_ACTIVE_SPOOL] +gcode: + {action_call_remote_method( + "spoolman_set_active_spool", + spool_id=None + )} +``` + +With the above configuration it is possible to run the `SET_ACTIVE_SPOOL ID=1` +command to set the currently tracked spool ID to `1`, and the `CLEAR_ACTIVE_SPOOL` +to clear spool tracking (useful when unloading filament for example). + ## Include directives It is possible to include configuration from other files via include directives. Include directives in Moonraker are specified identically to those in Klipper, ie: `[include relative_path]`. The `relative_path` -is a path relative to the configuration file's parent, and may include -wildcards. For example: +is a path relative to the configuration file's parent folder, and may +include wildcards. For example: ```ini # moonraker.conf @@ -1946,11 +3195,10 @@ wildcards. For example: ``` If a section is duplicated in an included file the options from both -sections will be merged, with the latest file parsed taking precedence. -When wildcards are specified all matches are parsed in alphabetical -order. If includes are nested (ie: an included file specifies an -`[include]` directive), those includes will be parsed after all matches -of the previous include. +sections will be merged, with the latest section parsed taking precedence. +The order in which a section is parsed depends on the location of the +include directive. When wildcards are specified all matches are parsed in +alphabetical order. ## Jinja2 Templates @@ -2006,12 +3254,10 @@ for core component configuration if no section was present. On April 6th 2022 the fallback was deprecated. Moonraker will still function normally if `core components` are configured in the `[server]` section, -however Moonraker now generates warnings when it detected this condition, +however Moonraker now generates warnings when it detects this condition, such as: ``` -[server]: Option 'config_path' has been moved to section [file_manager]. Please correct your configuration, see https://moonraker.readthedocs.io/en/latest/configuration for detailed documentation. -[server]: Option 'log_path' has been moved to section [file_manager]. Please correct your configuration, see https://moonraker.readthedocs.io/en/latest/configuration for detailed documentation. [server]: Option 'temperature_store_size' has been moved to section [data_store]. Please correct your configuration, see https://moonraker.readthedocs.io/en/latest/configuration for detailed documentation. [server]: Option 'gcode_store_size' has been moved to section [data_store]. Please correct your configuration, see https://moonraker.readthedocs.io/en/latest/configuration for detailed documentation ``` @@ -2027,8 +3273,6 @@ host: 0.0.0.0 port: 7125 temperature_store_size: 600 gcode_store_size: 1000 -config_path: ~/klipper_config -log_path: ~/klipper_logs ``` @@ -2041,10 +3285,6 @@ You will need to change it to the following; host: 0.0.0.0 port: 7125 -[file_manager] -config_path: ~/klipper_config -log_path: ~/klipper_logs - [data_store] temperature_store_size: 600 gcode_store_size: 1000 @@ -2060,8 +3300,3 @@ make the changes. Once the changes are complete you may use the UI to restart Moonraker and the warnings should clear. - -!!! Note - Some users have asked why Moonraker does not automate these changes. - Currently Moonraker has no mechanism to modify the configuration directly, - however this functionality will be added in the future. diff --git a/docs/contributing.md b/docs/contributing.md index 13f29aa..8bece89 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -1,10 +1,28 @@ # Contributing to Moonraker -While Moonraker exists as a service independently from Klipper, it relies -on Klipper to be useful. Thus, the tentative plan is to eventually merge -the Moonraker application into the Klipper repo after Moonraker matures, -at which point this repo will be archived. As such, contibuting guidelines -are near those of Klipper: +Prior to submitting a pull request prospective contributors must read this +entire document. Care should be taken to [format git commits](#git-commit-format) +correctly. This eases the review process and provides the reviewer with +confidence that the submission will be of sufficient quality. + +Prospective contributors should consider the following: + +- Does the contribution have significant impact? Bug fixes to existing + functionality and new features requested by 100+ users qualify as + items of significant impact. +- Has the submission been well tested? Submissions with substantial code + change must include details about the testing procedure and results. +- Does the submission include blocking code? Moonraker is an asynchronous + application, thus blocking code must be avoided. +- If any dependencies are included, are they pure python? Many low-powered SBCs + running Armbian do not have prebuilt wheels and are not capable of building wheels + themselves, thus breaking updates on these systems. +- Does the submission change the API? If so, could the change potentially break + frontends using the API? +- Does the submission include updates to the documentation? + +When performing reviews these are the questions that will be asked during the +initial stages. #### New Module Contributions @@ -105,24 +123,23 @@ By making a contribution to this project, I certify that: ``` #### Code Style Python methods should be fully annotated. Variables should be annotated where -the type cannot be inferred. Moonraker uses the `mypy` static type checker for -code validation with the following options: +the type cannot be inferred. Moonraker uses `mypy` version 1.5.1 for static +type checking with the following options: - `--ignore-missing-imports` - `--follow-imports=silent` -No line in the source code should exceed 80 characters. Be sure there is no +No line in the source code should exceed 88 characters. Be sure there is no trailing whitespace. To validate code before submission one may use -`pycodestyle` with the following options: +`flake8` version 6.1.0 with the following options: - `--ignore=E226,E301,E302,E303,W503,W504` - - `--max-line-length=80` - - `--max-doc-length=80` + - `--max-line-length=88` + - `--max-doc-length=88` Generally speaking, each line in submitted documentation should also be no -longer than 80 characters, however there are situations where this isn't -possible, such as long hyperlinks or example return values. Documentation -isn't linted, so it +longer than 88 characters, however there are situations where this isn't +possible, such as long hyperlinks or example return values. -Don't peek into the member variables of another class. Use getters or +Avoid peeking into the member variables of another class. Use getters or properties to access object state. diff --git a/docs/doc-requirements.txt b/docs/doc-requirements.txt index 27b47d6..eb2937f 100644 --- a/docs/doc-requirements.txt +++ b/docs/doc-requirements.txt @@ -1,2 +1,2 @@ -mkdocs==1.3.0 -pymdown-extensions==9.1 +mkdocs-material==9.5.4 +compact_tables@git+https://github.com/Arksine/markdown-compact-tables@v1.0.0 diff --git a/docs/index.md b/docs/index.md index 77de29b..b797f16 100644 --- a/docs/index.md +++ b/docs/index.md @@ -2,7 +2,7 @@ Moonraker is a Python 3 based web server that exposes APIs with which client applications may use to interact with the 3D printing firmware -[Klipper](https://github.com/KevinOConnor/klipper). Communcation between +[Klipper](https://github.com/Klipper3d/klipper). Communication between the Klippy host and Moonraker is done over a Unix Domain Socket. Tornado is used to provide Moonraker's server functionality. @@ -14,7 +14,7 @@ Client developers may refer to the [Client API](web_api.md) documentation. Backend developers should refer to the -[contibuting](contributing.md) section for basic contribution +[contributing](contributing.md) section for basic contribution guidelines prior to creating a pull request. The [components](components.md) document provides a brief overview of how to create a component and interact with Moonraker's diff --git a/docs/installation.md b/docs/installation.md index 396b574..a47f13b 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -31,10 +31,10 @@ missing one or both, you can simply add the bare sections to `printer.cfg`: [display_status] [virtual_sdcard] -path: ~/gcode_files +path: ~/printer_data/gcodes ``` -### Enabling the Unix Socket +### Enabling Klipper's Unix Domain Socket Server After Klipper is installed it may be necessary to modify its `defaults` file in order to enable the Unix Domain Socket. Begin by opening the file in your @@ -69,12 +69,9 @@ KLIPPY_ARGS="/home/pi/klipper/klippy/klippy.py /home/pi/printer.cfg -l /tmp/klip the default LSB script. In this case, you need to modify the klipper.service file. -You may also want to take this opportunity to change the location of -printer.cfg to match Moonraker's `config_path` option (see the -[configuration document](configuration.md#primary-configuration) -for more information on the config_path). For example, if the `config_path` -is set to `~/printer_config`, your klipper defaults file might look -like the following: +You may also want to take this opportunity to configure `printer.cfg` and +`klippy.log` so they are located in Moonraker's `data_path`, for example: + ``` # Configuration for /etc/init.d/klipper @@ -82,14 +79,17 @@ KLIPPY_USER=pi KLIPPY_EXEC=/home/pi/klippy-env/bin/python -KLIPPY_ARGS="/home/pi/klipper/klippy/klippy.py /home/pi/printer_config/printer.cfg -l /tmp/klippy.log -a /tmp/klippy_uds" +KLIPPY_ARGS="/home/pi/klipper/klippy/klippy.py /home/pi/printer_data/config/printer.cfg -l /home/pi/printer_data/logs/klippy.log -a /tmp/klippy_uds" ``` -If necessary, create the config directory and move printer.cfg to it: +Moonraker's install script will create the data folder, however you +may wish to create it now and move `printer.cfg` to the correct +location, ie: ``` -cd ~ -mkdir printer_config -mv printer.cfg printer_config +mkdir ~/printer_data +mkdir ~/printer_data/logs +mkdir ~/printer_data/config +mv printer.cfg ~/printer_data/config ``` ### Installing Moonraker @@ -101,10 +101,15 @@ cd ~ git clone https://github.com/Arksine/moonraker.git ``` -Now is a good time to create [moonraker.conf](configuration.md). If you are -using the `config_path`, create it in the specified directory otherwise create -it in the HOME directory. The [sample moonraker.conf](./moonraker.conf) in -the `docs` directory may be used as a starting point. +The install script will attempt to create a basic configuration if +`moonraker.conf` does not exist at the expected location, however if you +prefer to have Moonraker start with a robust configuration you may create +it now. By default the configuration file should be located at +`$HOME/printer_data/config/moonraker.conf`, however the location of the +data path may be configured using the script's command line options. +The [sample moonraker.conf](./moonraker.conf) may be used as a starting +point, full details can be found in the +[confguration documentation](./configuration.md). For a default installation run the following commands: ``` @@ -112,29 +117,40 @@ cd ~/moonraker/scripts ./install-moonraker.sh ``` -Or to install with `moonraker.conf` in the `config_path`: -``` -cd ~/moonraker/scripts -./install-moonraker.sh -f -c /home/pi/printer_config/moonraker.conf -``` - The install script has a few command line options that may be useful, particularly for those upgrading: -- `-r`: - Rebuilds the virtual environment for existing installations. - Sometimes this is necessary when a dependency has been added. - `-f`: Force an overwrite of Moonraker's systemd script. By default the the systemd script will not be modified if it exists. -- `-c /home/pi/moonraker.conf`: - Specifies the path to Moonraker's config file. The default location - is `/home//moonraker.conf`. When using this option to modify - an existing installation it is necessary to add `-f` as well. +- `-a `: + The installer uses this option to determine the name of the service + to install. If `-d` is not provided then this options will also be + used to determine the name of the data path folder. If omitted this + defaults to `moonraker`. +- `-d `: + Specifies the path to Moonraker's data folder. This folder organizes + files and directories used by moonraker. See the `Data Folder Structure` + section for details. If omitted this defaults to `$HOME/printer_data`. +- `-c ` + Specifies the path to Moonraker's configuation file. By default the + configuration is expected at `/config/moonraker.conf`. ie: + `/home/pi/printer_data/config/moonraker.conf`. +- `-l ` + Specifies the path to Moonraker's log file. By default Moonraker logs + to `/logs/moonraker.log`. ie: + `/home/pi/printer_data/logs/moonraker.log`. - `-z`: Disables `systemctl` commands during install (ie: daemon-reload, restart). This is useful for installations that occur outside of a standard environment where systemd is not running. +- `-x`: + Skips installation of [polkit rules](#policykit-permissions). This may be + necessary to install Moonraker on systems that do not have policykit + installed. +- `-s`: + Installs Moonraker's [speedup](#optional-speedups) Python packages in the + Python environment. Additionally, installation may be customized with the following environment variables: @@ -143,17 +159,20 @@ variables: - `MOONRAKER_REBUILD_ENV` - `MOONRAKER_FORCE_DEFAULTS` - `MOONRAKER_DISABLE_SYSTEMCTL` +- `MOONRAKER_SKIP_POLKIT` - `MOONRAKER_CONFIG_PATH` -- `MOONRAKER_LOG_PATH` +- `MOONAKER_LOG_PATH` +- `MOONRAKER_DATA_PATH` +- `MOONRAKER_SPEEDUPS` When the script completes it should start both Moonraker and Klipper. In -`/tmp/klippy.log` you should find the following entry: +`klippy.log` you should find the following entry: `webhooks client : Client info {'program': 'Moonraker', 'version': ''}` Now you may install a client, such as [Mainsail](https://github.com/mainsail-crew/mainsail) or -[Fluidd](https://github.com/cadriel/fluidd). +[Fluidd](https://github.com/fluidd-core/fluidd). !!! Note Moonraker's install script no longer includes the nginx dependency. @@ -162,42 +181,267 @@ Now you may install a client, such as debian/ubuntu distros). +### Data Folder Structure + +As mentioned previously, files and folders used by Moonraker are organized +in a primary data folder. The example below illustrates the folder +structure using the default data path of `$HOME/printer_data`. + +``` +/home/pi/printer_data +├── backup +│   └── 20220822T202419Z +│   ├── config +│   │   └── moonraker.conf +│   └── service +│   └── moonraker.service +├── certs +│   ├── moonraker.cert (optional) +│   └── moonraker.key (optional) +├── config +│   ├── moonraker.conf +│   └── printer.cfg +├── database +│   └── moonraker-sql.db +├── gcodes +│   ├── test_gcode_one.gcode +│   └── test_gcode_two.gcode +├── logs +│   ├── klippy.log +│   └── moonraker.log +├── systemd +│ └── moonraker.env +├── moonraker.secrets (optional) +└── moonraker.asvc +``` + +If it is not desirable for the files and folders to exist in these specific +locations it is acceptable to use symbolic links. For example, it is common +for the gcode folder to be located at `$HOME/gcode_files`. Rather than +reconfigure Klipper's `virtual_sdcard` it may be desirable to create a +`gcodes` symbolic link in the data path pointing to this location. + +!!! Note + It is still possible to directly configure the paths to the configuration + and log files if you do not wish to use the default file names of + `moonraker.conf` and `moonraker.log` + +When Moonraker attempts to update legacy installations symbolic links +are used to avoid an unrecoverable error. Additionally a `backup` +folder is created which contains the prior configuration and/or +systemd service unit, ie: + +``` +/home/pi/printer_data +├── backup +│   └── 20220822T202419Z +│   ├── config +│   │   ├── include +│   │   │   ├── extras.conf +│   │   │   ├── power.conf +│   │   │   └── updates.conf +│   │   └── moonraker.conf +│   └── service +│   └── moonraker.service +├── certs +│   ├── moonraker.cert -> /home/pi/certs/certificate.pem +│   └── moonraker.key -> /home/pi/certs/key.pem +├── config -> /home/pi/klipper_config +├── database -> /home/pi/.moonraker_database +├── gcodes -> /home/pi/gcode_files +├── logs -> /home/pi/logs +├── systemd +│ └── moonraker.env +└── moonraker.secrets -> /home/pi/moonraker_secrets.ini +``` + +!!! Warning + The gcode and config paths should not contain symbolic links + that result in an "overlap" of on another. Moonraker uses + inotify to watch files in each of these folders and takes action + when a file change is detected. The action taken depends on the + "root" folder, thus it is important that they be distinct. + +### The systemd service file + +The default installation will create `/etc/systemd/system/moonraker.service`. +Below is a common example of service file, installed on a Raspberry Pi: + +```ini +# systemd service file for moonraker +[Unit] +Description=API Server for Klipper SV1 +Requires=network-online.target +After=network-online.target + +[Install] +WantedBy=multi-user.target + +[Service] +Type=simple +User=pi +SupplementaryGroups=moonraker-admin +RemainAfterExit=yes +WorkingDirectory=/home/pi/moonraker +EnvironmentFile=/home/pi/printer_data/systemd/moonraker.env +ExecStart=/home/pi/moonraker-env/bin/python $MOONRAKER_ARGS +Restart=always +RestartSec=10 +``` + +Following are some items to take note of: + +- The `Description` contains a string that Moonraker uses to validate + the version of the service file, (notice `SV1` at the end, ie: Service + Version 1). +- The `moonraker-admin` supplementary group is used to grant policykit + permissions. +- The `EnvironmentFile` field contains Moonraker's arguments. See the + [environment file section](#the-environment-file) for details. +- The `ExecStart` field begins with the python executable, followed by + by the enviroment variable `MOONRAKER_ARGS`. This variable is set in + the environment file. + + ### Command line usage This section is intended for users that need to write their own installation script. Detailed are the command line arguments available to Moonraker: ``` -usage: moonraker.py [-h] [-c ] [-l ] [-n] +usage: moonraker.py [-h] [-d ] [-c ] [-l ] [-u ] [-n] [-v] [-g] [-o] Moonraker - Klipper API Server -optional arguments: +options: -h, --help show this help message and exit + -d , --datapath + Location of Moonraker Data File Path -c , --configfile - Location of moonraker configuration file + Path to Moonraker's configuration file -l , --logfile - log file name and location + Path to Moonraker's log file + -u , --unixsocket + Path to Moonraker's unix domain socket -n, --nologfile disable logging to a file + -v, --verbose Enable verbose logging + -g, --debug Enable Moonraker debug features + -o, --asyncio-debug Enable asyncio debug flag ``` The default configuration is: -- config file path- `~/moonraker.conf` -- log file path - `/tmp/moonraker.log` -- logging to a file is enabled -If one needs to start moonraker without generating a log file, the +- `data path`: `$HOME/printer_data` +- `config file`: `$HOME/printer_data/config/moonraker.conf` +- `log file`: `$HOME/printer_data/logs/moonraker.log` +- `unix socket`: `$HOME/printer_data/comms/moonraker.sock` +- logging to a file is enabled +- Verbose logging is disabled +- Moonraker's debug features are disabled +- The asyncio debug flag is set to false + +!!! Tip + While the `data path` option may be omitted it is recommended that it + always be included for new installations. This allows Moonraker + to differentiate between new and legacy installations. + +!!! Warning + Moonraker's `--unixsocket` option should not be confused with Klipper's + `--api-server` option. The `unixsocket` option for Moonraker specifies + the path where Moonraker will create a unix domain socket that serves its + JSON-RPC API. + +If is necessary to run Moonraker without logging to a file the `-n` option may be used, for example: ``` -~/moonraker-env/bin/python ~/moonraker/moonraker/moonraker.py -n -c /path/to/moonraker.conf +~/moonraker-env/bin/python ~/moonraker/moonraker/moonraker.py -d ~/printer_data -n ``` -In general it is not recommended to install moonraker with this option. -While moonraker will still log to stdout, all requests for support must -be accompanied by moonraker.log. -These options may be changed by editing -`/etc/systemd/system/moonraker.service`. The `install-moonraker.sh` script -may also be used to modify the config file location. +!!! Tip + It is not recommended to install Moonraker with file logging disabled + While moonraker will still log to stdout, all requests for support + must be accompanied by `moonraker.log`. + +Each command line argument has an associated enviroment variable that may +be used to specify options in place of the command line. + +- `MOONRAKER_DATA_PATH=""`: equivalent to `-d ` +- `MOONRAKER_CONFIG_PATH=""`: equivalent to `-c ` +- `MOONRAKER_LOG_PATH=""`: equivalent to `-l ` +- `MOONRAKER_UDS_PATH=""`: equivalent to `-u ` +- `MOONRAKER_DISABLE_FILE_LOG="y"`: equivalent to `-n` +- `MOONRAKER_VERBOSE_LOGGING="y"`: equivalent to `-v` +- `MOONRAKER_ENABLE_DEBUG="y"`: equivalent to `-g`. +- `MOONRAKER_ASYNCIO_DEBUG="y"`: equivalent to `-o` + +!!! Note + Command line arguments take priority over environment variables when + both are specified. + +[The environment file](#the-environment-file) may be used to set Moonraker's +command line arguments and/or environment variables. + +### The environment file + +The environment file, `moonraker.env`. is created in the data path during +installation. A default installation's environment file will contain the path +to `moonraker.py` and the data path option, ie: + +``` +MOONRAKER_DATA_PATH="/home/pi/printer_data" +MOONRAKER_ARGS="-m moonraker" +PYTHONPATH="/home/pi/moonraker" +``` + +A legacy installation converted to the updated flexible service unit +might contain the following. Note that this example uses command line +arguments instead of environment variables, either would be acceptable: + +``` +MOONRAKER_ARGS="/home/pi/moonraker/moonraker/moonraker.py -d /home/pi/printer_data -c /home/pi/klipper_config/moonraker.conf -l /home/pi/klipper_logs/moonraker.log" +``` + +Post installation it is simple to customize +[arguments and/or environment variables](#command-line-usage) +supplied to Moonraker by editing this file and restarting the service. +The following example sets a custom config file path, log file path, +enables verbose logging, and enables debug features: + +``` +MOONRAKER_DATA_PATH="/home/pi/printer_data" +MOONRAKER_CONFIG_PATH="/home/pi/printer_data/config/moonraker-1.conf" +MOONRAKER_LOG_PATH="/home/pi/printer_data/logs/moonraker-1.log" +MOONRAKER_VERBOSE_LOGGING="y" +MOONRAKER_ENABLE_DEBUG="y" +MOONRAKER_ARGS="-m moonraker" +PYTHONPATH="/home/pi/moonraker" +``` + +# Optional Speedups + +Moonraker supports two optional Python packages that can be used to reduce +its CPU load: + +- [msgspec](https://github.com/jcrist/msgspec): Replaces the builtin `json` + encoder/decoder. Requires Python >= 3.8. +- [uvloop](https://github.com/MagicStack/uvloop/): Replaces the default asyncio + eventloop implementation. + +If these packages are installed in Moonraker's python environment Moonraker will +load them. For existing installations this can be done manually with a command +like: + +``` +~/moonraker-env/bin/pip install -r ~/moonraker/scripts/moonraker-speedups.txt +``` + +After installing the speedup packages it is possible to revert back to the +default implementation by specifying one or both of the following +environment variables in [moonraker.env](#the-environment-file): + +- `MOONRAKER_ENABLE_MSGSPEC="n"` +- `MOONRAKER_ENABLE_UVLOOP="n"` + ### PolicyKit Permissions @@ -267,6 +511,37 @@ enable_system_updates: False Previously installed PolicyKit rules can be removed by running `set-policykit-rules.sh -c` +### Completing Privileged Upgrades + +At times an update to Moonraker may require a change to the systemd service +file, which requires sudo permission to complete. Moonraker will present +an announcement when it need's the user's password and the process can +be completed by entering the password through Moonraker's landing page. + +Some users prefer not to provide these credentials via the web browser and +instead would like to do so over ssh. These users may run + `scripts/finish-upgrade.sh` to provide Moonraker the necessary credentials + via ssh: + +``` +Utility to complete privileged upgrades for Moonraker + +usage: finish-upgrade.sh [-h] [-a
] [-p ] [-k ] + +optional arguments: + -h show this message + -a
address for Moonraker instance + -p port for Moonraker instance + -k API Key for authorization +``` + +By default the script will connect to a Moonraker instances on the local +machine at port 7125. If the instance is not bound to localhost or is +bound to another port the user may specify a custom address and port. + +The API Key (`-k`) option is only necessary if the localhost is not authorized +to access Moonraker's API. + ### Retrieving the API Key Some clients may require an API Key to connect to Moonraker. After the @@ -290,6 +565,86 @@ Retrieve the API Key via the browser from a trusted client: {"result": "8ce6ae5d354a4365812b83140ed62e4b"} +### Database Backup and Restore + +Moonraker stores persistent data using an Sqlite database. By default +the database file is located at `/database/moonraker-sql.db`. +API Endpoints are available to backup and restore the database. All +backups are stored at `/backup/database/` and +restored from the same location. Database files may contain sensitive +information, therefore they are not served by Moonraker. Another protocol +such as SCP, SMB, etc is required to transfer a backup off of the host. + +Alternatively it is possible to perform a manual backup by copying the +existing database file when the Moonraker service has been stopped. +Restoration can be performed by stopping the Moonraker service and +overwriting the existing database with the backup. + +#### LDMB Database (deprecated) + +Previous versions of Moonraker used a [LMDB Database](http://www.lmdb.tech/doc/) +for persistent storage of procedurally generated data. LMDB database files are +platform dependent, and thus cannot be easily transferred between different +machines. A file generated on a Raspberry Pi cannot be directly transferred +to an x86 machine. Likewise, a file generated on a 32-bit version of Linux +cannot be transferred to a 64-bit machine. + +Moonraker includes two scripts, `backup-database.sh` and `restore-database.sh` +to help facilitate database backups and transfers. + +```shell +~/moonraker/scripts/backup-database.sh -h +Moonraker Database Backup Utility + +usage: backup-database.sh [-h] [-e ] [-d ] [-o ] + +optional arguments: + -h show this message + -e Moonraker Python Environment + -d Moonraker LMDB database to backup + -o backup file to save to +``` + +```shell +~/moonraker/scripts/restore-database.sh -h +Moonraker Database Restore Utility + +usage: restore-database.sh [-h] [-e ] [-d ] [-i ] + +optional arguments: + -h show this message + -e Moonraker Python Environment + -d Moonraker LMDB database path to restore to + -i backup file to restore from +``` + +Both scripts include default values for the Moonraker Environment and Database +Path. These are `$HOME/moonraker-env` and `$HOME/printer_data/database` +respectively. The `backup` script defaults the output value to +`$HOME/database.backup`. The `restore` script requires that the user specify +the input file using the `-i` option. + +To backup a database for a default Moonraker installation the user may ssh into +the machine and run the following command: + +```shell +~/moonraker/scripts/backup-database.sh -o ~/moonraker-database.backup +``` + +And to restore the database: +```shell +sudo service moonraker stop +~/moonraker/scripts/restore-database.sh -i ~/moonraker-database.backup +sudo service moonraker start +``` + +The backup file contains [cdb like](https://manpages.org/cdb/5) entries +for each key/value pair in the database. All keys and values are base64 +encoded, however the data is not encrypted. Moonraker's database may +contain credentials and other sensitive information, so users should treat +this file accordingly. It is not recommended to keep backups in any folder +served by Moonraker. + ### Recovering a broken repo Currently Moonraker is deployed using `git`. Without going into the gritty @@ -327,16 +682,44 @@ git clone https://github.com/Klipper3d/klipper.git sudo systemctl restart klipper ``` -### Additional Notes +### Debug options for developers -- Make sure that Moonraker and Klipper both have read and write access to the - directory set in the `path` option for the `[virtual_sdcard]` in - `printer.cfg`. -- Upon first starting Moonraker is not aware of the gcode file path, thus - it cannot serve gcode files, add directories, etc. After Klippy enters - the "ready" state it sends Moonraker the gcode file path. - Once Moonraker receives the path it will retain it regardless of Klippy's - state, and update it if the path is changed in printer.cfg. +Moonraker accepts several command line arguments that can be used to +assist both front end developers and developers interested in extending +Moonraker. -Please see [configuration.md](configuration.md) for details on how to -configure moonraker.conf. +- The `-v` (`--verbose`) argument enables verbose logging. This includes + logging that reports information on all requests received and responses. +- The `-g` (`--debug`) argument enables Moonraker's debug features, + including: + - Debug endpoints + - The `update_manager` will bypass strict git repo validation, allowing + updates from unofficial remotes and repos in a `detached HEAD` state. +- The `-o` (`--asyncio-debug`) argument enables the asyncio debug flag. This + will substantially increase logging and is intended for low level debugging + of the asyncio event loop. + +!!! Warning + The debug option should not be enabled in production environments. The + database debug endpoints grant read/write access to all namespaces, + including those typically exclusive to Moonraker. Items such as user + credentials are exposed. + +Installations using systemd can enable debug options by editing `moonraker.env` +via ssh: + +``` +nano ~/printer_data/systemd/moonraker.env +``` + +Once the file is open, append the debug option(s) (`-v` and `-g` in this example) to the +value of `MOONRAKER_ARGS`: +``` +MOONRAKER_ARGS="/home/pi/moonraker/moonraker/moonraker.py -d /home/pi/printer_data -c /home/pi/klipper_config/moonraker.conf -l /home/pi/klipper_logs/moonraker.log -v -g" +``` + +Save the file, exit the text editor, and restart the Moonraker service: + +``` +sudo systemctl restart moonraker +``` diff --git a/docs/moonraker.conf b/docs/moonraker.conf index c2d7822..9951da2 100644 --- a/docs/moonraker.conf +++ b/docs/moonraker.conf @@ -6,8 +6,7 @@ [server] # Bind server defaults of 0.0.0.0, port 7125 -enable_debug_logging: True -config_path: ~/printer_config +enable_debug_logging: False [authorization] enabled: True diff --git a/docs/printer_objects.md b/docs/printer_objects.md index 5b00338..645594f 100644 --- a/docs/printer_objects.md +++ b/docs/printer_objects.md @@ -47,7 +47,7 @@ The `gcode_move` object reports the current gcode state: - `speed_factor`: AKA "feedrate", this is the current speed multiplier - `speed`: The current gcode speed in mm/s. - `extrude_factor`: AKA "extrusion multiplier". -- `absolute_coorinates`: true if the machine axes are moved using +- `absolute_coordinates`: true if the machine axes are moved using absolute coordinates, false if using relative coordinates. - `absolute_extrude`: true if the extruder is moved using absolute coordinates, false if using relative coordinates. @@ -236,7 +236,11 @@ The `virtual_sdcard` object reports the state of the virtual sdcard: "print_duration": 0.0, "filament_used": 0.0, "state": "standby", - "message": "" + "message": "", + "info": { + "total_layer": null, + "current_layer": null + } } ``` The `print_stats` object reports `virtual_sdcard` print state: @@ -260,6 +264,17 @@ The `print_stats` object reports `virtual_sdcard` print state: - `"error"` - Note that if an error is detected the print will abort - `message`: If an error is detected, this field contains the error message generated. Otherwise it will be a null string. +- `info`: This is a dict containing information about the print provided by the + slicer. Currently this is limited to the `total_layer` and `current_layer` values. + Note that these values are set by the + [SET_PRINT_STATS_INFO](https://www.klipper3d.org/G-Codes.html#set_print_stats_info) + gcode command. It is necessary to configure the slicer to include this command + in the print. `SET_PRINT_STATS_INFO TOTAL_LAYER=total_layer_count` should + be called in the slicer's "start gcode" to initalize the total layer count. + `SET_PRINT_STATS_INFO CURRENT_LAYER=current_layer` should be called in the + slicer's "on layer change" gcode. The user must substitute the + `total_layer_count` and `current_layer` with the appropriate + "placeholder syntax" for the slicer. !!! Note After a print has started all of the values above will persist until diff --git a/docs/src/css/extras.css b/docs/src/css/extras.css new file mode 100644 index 0000000..1dbffb7 --- /dev/null +++ b/docs/src/css/extras.css @@ -0,0 +1,7 @@ +[data-md-color-scheme="slate"] { + --md-table-color: rgb(20, 20, 20); +} + +thead th { + background-color: var(--md-table-color) +} diff --git a/docs/user_changes.md b/docs/user_changes.md index 3a1c66c..058fe9d 100644 --- a/docs/user_changes.md +++ b/docs/user_changes.md @@ -1,6 +1,81 @@ ## -This file will track changes that require user intervention, -such as a configuration change or a reinstallation. +This file tracks configuration changes and deprecations. Additionally +changest to Moonraker that require user intervention will be tracked +here. + +### December 24th 2023 +- The `gpio` component no longer depends on `libgpiod`. Instead, + Moonraker now uses the [python-periphery](https://github.com/vsergeev/python-periphery) + library to manage GPIOs. This comes with several benefits: + - Distributions that do no ship with `libgpiod` will not fail during + installation if the `python3-libgpiod` package isn't present. + - Distributions with a Kernel Version of 5.5 or higher support bias + flags (ie: pull up or pull down). Previously this functionality + was tied to the `libgpiod` version. Specifically, Debian Buster + ships with a Kernel that supports bias, however the `libgpiod` + version does not. + - Version 2.0+ of `libgpiod` includes dramatic API changes that are + wholly incompatible with prior versions. Therefore maintaining + future versions would effectively require supporting two APIs. +- The `[button]` component now includes a `debounce_period` option. + This addition is the result of a behavior change in how gpio state + changes are debounced. Debouncing will now delay the event by the + time specified in the `debounce_period`. Additional state changes + received during this delay will not trigger a button event. The + `[button]` module retains the `minimum_event_time` option which will + ignore events shorter than the specified time. + +### July 18th 2023 +- The following changes have been made to `[update_manager ]` + extensions of the `git_repo` type: + - The `env` option has been deprecated. New configurations should + use the `virtualenv` option in its place. + - The `install_script` option has been deprecated. New configurations + should use the `system_dependencies` option to specify system package + dependencies. +- Configuration options for `[spoolman]` have been added +- Configuration options for `[sensor]` have been added + +### Februrary 8th 2023 +- The `provider` option in the `[machine]` section no longer accepts + `supervisord` as an option. It has been renamed to `supervisord_cli`. + +### January 2nd 2023 +- The `bound_service` option for `[power]` devices has been deprecated in + favor of `bound_services`. Currently this change does not generate a + warning as it can be reliably resolved internally. + +### October 14th 2022 +- The systemd service file is now versioned. Moonraker can now detect when + the file is out of date and automate corrections as necessary. +- Moonraker's command line options are now specified in an environment file, + making it possible to change these options without modifying the service file + and reloading the systemd daemon. The default location of the environment + file is `~/printer_data/systemd/moonraker.env`. +- Moonraker now manages files and folders in a primary data folder supplied + by the `-d` (`--data-path`) command line option. As a result, the following + options have been deprecated: + - `ssl_certificate_path` in `[server]` + - `ssl_key_path` in `[server]` + - `database_path` in `[database]` + - `config_path` in `[file_manager]` + - `log_path` in `[file_manager]` + - `secrets_path` in `[secrets]` +- Debugging options are now supplied to Moonraker via the command line. + The `-v` (`--verbose`) option enables verbose logging, while the `-g` + (`--debug`) option enables debug features, including access to debug + endpoints and the repo debug feature in `update_manager`. As a result, + the following options are deprecated: + - `enable_debug_logging` in `[server]` + - `enable_repo_debug` in `[update_manager]` + +### July 27th 2022 +- The behavior of `[include]` directives has changed. Included files + are now parsed as they are encountered. If sections are duplicated + options in the last section parsed take precendence. If you are + using include directives to override configuration in `moonraker.conf` + the directives should be moved to the bottom of the file. +- Configuration files now support inline comments. ### April 6th 2022 - The ability to configure core components in the `[server]`section diff --git a/docs/web_api.md b/docs/web_api.md index 9f7c085..fff5b98 100644 --- a/docs/web_api.md +++ b/docs/web_api.md @@ -1,8 +1,8 @@ # Most API methods are supported over the Websocket, HTTP, and MQTT -(if configured) transports. File Transfer and `/access` requests are only -available over HTTP. The Websocket is required to receive server generated +(if configured) transports. File Transfer requests (upload and download) +exclusive to HTTP. The Websocket is required to receive server generated events such as gcode responses. For information on how to set up the Websocket, please see the Appendix at the end of this document. @@ -74,7 +74,7 @@ of a request. ### JSON-RPC API Overview The Websocket and MQTT transports use the [JSON-RPC 2.0](https://jsonrpc.org) -protocol. The Websocket transmits objects in a text frame, whereas MQTT +protocol. The Websocket transmits JSON-RPC objects in a text frame, whereas MQTT transmits them in the payload of a topic. When MQTT is configured Moonraker subscribes to an api request topic. After an api request is processed Moonraker publishes the return value to a response topic. By default these topics are @@ -83,6 +83,9 @@ publishes the return value to a response topic. By default these topics are unique identifier for each instance of Moonraker and defaults to the machine's host name. +In addition, most JSON-RPC methods are available via the +[JSONRPC HTTP request](#json-rpc-over-http). + An encoded request should look something like: ```json { @@ -135,6 +138,78 @@ test interface with example usage for most of the requests below. It also includes a basic JSON-RPC implementation that uses promises to return responses and errors (see json-rpc.js). +### Websocket Connections + +#### Primary websocket + +The primary websocket supports Moonraker's JSON-RPC API. Most applications that +desire a websocket connection will make use of the primary websocket. + +The primary websocket is available at: +``` + ws://host_or_ip:port/websocket` +``` + +The primary websocket will remain connected until the application disconnects +or Moonraker is shutdown. + +#### Bridge websocket + +The "bridge" websocket provides a near direct passthrough to Klipper's API +Server. Klipper uses its own RPC protocol, which is effectively a simplified +version of the JSON-RPC specification. Developers should refer to +[Klipper's API documentation](https://www.klipper3d.org/API_Server.html) +for details on the protocol and available APIs. + +!!! Note + The bridge websocket is described as "near direct passthrough" because + Moonraker handles the ETX (`0x03`) terminator internally. Applications + can expect to receive complete JSON encoded messages in a text frame + without the ETX terminator. Likewise applications should send JSON encoded + messages without the ETX terminator. Messages may be sent using either + text frames or binary frames. + +The bridge websocket provides access to diagnostic APIs that are not generally +suitable for Moonraker's primary connection. These requests stream a +substantial amount of data; bridge connections allow Moonraker to avoid +decoding and re-encoding this data, reducing CPU load on the host. The "dump" +requests, such as `motion_report/dump_stepper` and `adxl345/dump_adxl345`, are +examples of APIs that should make use of the bridge websocket. + +The bridge websocket is available at: +``` +ws://host_or_ip:port/klippysocket +``` + +The availability of bridge connections depends on Klippy's availablility. +If Klippy is not running or its API server is not enabled then a bridge +websocket connection cannot be established. Established bridge connections +will close when Klippy is shutdown or restarted. Such connections will also +be closed if Moonraker is restarted or shutdown. + +!!! Note + If JWT or API Key authentication is required the application must use a + [oneshot token](#generate-a-oneshot-token) when connecting to a bridge + socket. Since Moonraker does not decode bridge requests it is not possible + to authenticate post connection. + +### Unix Socket Connection + +All JSON-RPC APIs available over the Websocket transport are also available +over the Unix Domain Socket connection. Moonraker creates the socket file at +`/comms/moonraker.sock` (ie: `~/printer_data/comms/moonraker.sock`). +The Unix Socket expects UTF-8 encoded JSON-RPC byte strings. Each JSON-RPC +request must be terminated with an ETX character (`0x03`). + +The Unix Socket is desirable for front ends and extensions running on the +local machine as authentication is not necessary. There should be a small +performance improvement due to the simplified transport protocol, however +the impact of this is likely negligible. + +The `moontest` repo contains a +[python script](https://github.com/Arksine/moontest/blob/master/scripts/unix_socket_test.py) +to test comms over the unix socket. + ### Jinja2 Template API Calls Some template options in Moonraker's configuration, such as those in the @@ -167,19 +242,453 @@ on_release: payload="Button Released") %} ``` -### Printer Administration +### Server Administration + +#### Query Server Info +HTTP request: +```http +GET /server/info +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.info", + "id": 9546 +} +``` +Returns: + +An object containing various fields that report server state. + +```json +{ + "klippy_connected": true, + "klippy_state": "ready", + "components": [ + "database", + "file_manager", + "klippy_apis", + "machine", + "data_store", + "shell_command", + "proc_stats", + "history", + "octoprint_compat", + "update_manager", + "power" + ], + "failed_components": [], + "registered_directories": ["config", "gcodes", "config_examples", "docs"], + "warnings": [ + "Invalid config option 'api_key_path' detected in section [authorization]. Remove the option to resolve this issue. In the future this will result in a startup error.", + "Unparsed config section [fake_section] detected. This may be the result of a component that failed to load. In the future this will result in a startup error." + ], + "websocket_count": 2, + "moonraker_version": "v0.7.1-105-ge4f103c", + "api_version": [1, 0, 0], + "api_version_string": "1.0.0" +} +``` +!!! warning + This object also includes `plugins` and `failed_plugins` fields that + are now deprecated. They duplicate the information in + `components` and `failed_components`, and will be removed in the future. + +Note that `klippy_state` will match the `state` value received from +`/printer/info`. The `klippy_connected` item tracks the state of the +unix domain socket connect to Klippy. The `components` key will return a list +of enabled components. This can be used by clients to check if an optional +component is available. Optional components that do not load correctly will +not prevent the server from starting, thus any components that failed to load +will be reported in the `failed_components` field. + +The `websocket_count` field reports the total number of connected websockets. + +#### Get Server Configuration +HTTP request: +```http +GET /server/config +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.config", + "id": 5616 +} +``` +Returns: + +An object containing the server's full configuration. Note that +this includes auxiliary configuration sections not part of `moonraker.conf`, +for example the `update_manager static debian moonraker` section. +Options not specified in `moonraker.conf` with default values are also +included. + +```json +{ + "config": { + "server": { + "host": "0.0.0.0", + "port": 7125, + "ssl_port": 7130, + "enable_debug_logging": true, + "enable_asyncio_debug": false, + "klippy_uds_address": "/tmp/klippy_uds", + "max_upload_size": 210, + "ssl_certificate_path": null, + "ssl_key_path": null + }, + "dbus_manager": {}, + "database": { + "database_path": "~/.moonraker_database", + "enable_database_debug": false + }, + "file_manager": { + "enable_object_processing": true, + "queue_gcode_uploads": true, + "config_path": "~/printer_config", + "log_path": "~/logs" + }, + "klippy_apis": {}, + "machine": { + "provider": "systemd_dbus" + }, + "shell_command": {}, + "data_store": { + "temperature_store_size": 1200, + "gcode_store_size": 1000 + }, + "proc_stats": {}, + "job_state": {}, + "job_queue": { + "load_on_startup": true, + "automatic_transition": false, + "job_transition_delay": 2, + "job_transition_gcode": "\nM118 Transitioning to next job..." + }, + "http_client": {}, + "announcements": { + "dev_mode": false, + "subscriptions": [] + }, + "authorization": { + "login_timeout": 90, + "force_logins": false, + "cors_domains": [ + "*.home", + "http://my.mainsail.xyz", + "http://app.fluidd.xyz", + "*://localhost:*" + ], + "trusted_clients": [ + "192.168.1.0/24" + ] + }, + "zeroconf": {}, + "octoprint_compat": { + "enable_ufp": true, + "flip_h": false, + "flip_v": false, + "rotate_90": false, + "stream_url": "/webcam/?action=stream", + "webcam_enabled": true + }, + "history": {}, + "secrets": { + "secrets_path": "~/moonraker_secrets.ini" + }, + "mqtt": { + "address": "eric-work.home", + "port": 1883, + "username": "{secrets.mqtt_credentials.username}", + "password_file": null, + "password": "{secrets.mqtt_credentials.password}", + "mqtt_protocol": "v3.1.1", + "instance_name": "pi-debugger", + "default_qos": 0, + "status_objects": { + "webhooks": null, + "toolhead": "position,print_time", + "idle_timeout": "state", + "gcode_macro M118": null + }, + "api_qos": 0, + "enable_moonraker_api": true + }, + "template": {} + }, + "orig": { + "DEFAULT": {}, + "server": { + "enable_debug_logging": "True", + "max_upload_size": "210" + }, + "file_manager": { + "config_path": "~/printer_config", + "log_path": "~/logs", + "queue_gcode_uploads": "True", + "enable_object_processing": "True" + }, + "machine": { + "provider": "systemd_dbus" + }, + "announcements": {}, + "job_queue": { + "job_transition_delay": "2.", + "job_transition_gcode": "\nM118 Transitioning to next job...", + "load_on_startup": "True" + }, + "authorization": { + "trusted_clients": "\n192.168.1.0/24", + "cors_domains": "\n*.home\nhttp://my.mainsail.xyz\nhttp://app.fluidd.xyz\n*://localhost:*" + }, + "zeroconf": {}, + "octoprint_compat": {}, + "history": {}, + "secrets": { + "secrets_path": "~/moonraker_secrets.ini" + }, + "mqtt": { + "address": "eric-work.home", + "port": "1883", + "username": "{secrets.mqtt_credentials.username}", + "password": "{secrets.mqtt_credentials.password}", + "enable_moonraker_api": "True", + "status_objects": "\nwebhooks\ntoolhead=position,print_time\nidle_timeout=state\ngcode_macro M118" + } + }, + "files": [ + { + "filename": "moonraker.conf", + "sections": [ + "server", + "file_manager", + "machine", + "announcements", + "job_queue", + "authorization", + "zeroconf", + "octoprint_compat", + "history", + "secrets" + ] + }, + { + "filename": "include/extras.conf", + "sections": [ + "mqtt" + ] + } + ] +} +``` + +#### Request Cached Temperature Data +HTTP request: +```http +GET /server/temperature_store?include_monitors=false +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.temperature_store", + "params": { + "include_monitors": false + }, + "id": 2313 +} +``` + +Parameters: + +- `include_monitors`: _Optional, defaults to `false`._ When set to `true` + the response will include sensors reported as `temperature monitors` by + Klipper. A temperature monitor may report `null` values in the `temperatures` + field, applications should be sure that they are modified to handle this + condition before setting `inlcude_monitors` to `true`. + +Returns: + +An object where the keys are the available temperature sensor names, and with +the value being an array of stored temperatures. The array is updated every +1 second by default, containing a total of 1200 values (20 minutes). The +array is organized from oldest temperature to most recent (left to right). +Note that when the host starts each array is initialized to 0s. +```json +{ + "extruder": { + "temperatures": [21.05, 21.12, 21.1, 21.1, 21.1], + "targets": [0, 0, 0, 0, 0], + "powers": [0, 0, 0, 0, 0] + }, + "temperature_fan my_fan": { + "temperatures": [21.05, 21.12, 21.1, 21.1, 21.1], + "targets": [0, 0, 0, 0, 0], + "speeds": [0, 0, 0, 0, 0] + }, + "temperature_sensor my_sensor": { + "temperatures": [21.05, 21.12, 21.1, 21.1, 21.1] + } +} +``` + +#### Request Cached GCode Responses +HTTP request: +```http +GET /server/gcode_store?count=100 +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.gcode_store", + "params": { + "count": 100 + }, + "id": 7643 +} +``` + +The `count` argument is optional, limiting number of returned items +in the response to the value specified. If omitted, the entire gcode +store will be returned (up to 1000 responses). + +Returns: + +An object with the field `gcode_store` that contains an array +of objects. Each object will contain `message`, `time`, and +`type` fields. The `time` field is reported in Unix Time. +The `type` field will either be `command` or `response`. +```json +{ + "gcode_store": [ + { + "message": "FIRMWARE_RESTART", + "time": 1615832299.1167388, + "type": "command" + }, + { + "message": "// Klipper state: Ready", + "time": 1615832309.9977088, + "type": "response" + }, + { + "message": "M117 This is a test", + "time": 1615834094.8662775, + "type": "command" + }, + { + "message": "G4 P1000", + "time": 1615834098.761729, + "type": "command" + }, + { + "message": "STATUS", + "time": 1615834104.2860553, + "type": "command" + }, + { + "message": "// Klipper state: Ready", + "time": 1615834104.3299904, + "type": "response" + } + ] +} +``` + +#### Rollover Logs + +Requests a manual rollover for log files registered with Moonraker's +log management facility. Currently these are limited to `moonraker.log` +and `klippy.log`. + +HTTP request: +```http +POST /server/logs/rollover +Content-Type: application/json + +{ + "application": "moonraker" +} +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.logs.rollover", + "params": { + "application": "moonraker" + }, + "id": 4656 +} +``` + +Parameters: + +- `application` - (Optional) The name of the application to rollover. + Currently can be `moonraker` or `klipper`. The default is to rollover + all logs. + +!!! Note + Moonraker must be able to manage Klipper's systemd service to + perform a manual rollover. The rollover will fail under the following + conditions: + + - Moonraker cannot detect Klipper's systemd unit + - Moonraker cannot detect the location of Klipper's files + - A print is in progress + +Returns: An object in the following format: + +```json +{ + "rolled_over": [ + "moonraker", + "klipper" + ], + "failed": {} +} +``` + +- `rolled_over` - An array of application names successfully rolled over. +- `failed` - An object containing information about failed applications. The + key will match an application name and its value will be an error message. + +#### Restart Server +HTTP request: +```http +POST /server/restart +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.restart", + "id": 4656 +} +``` +Returns: + +`ok` upon receipt of the restart request. After the request +is returns, the server will restart. Any existing connection +will be disconnected. A restart will result in the creation +of a new server instance where the configuration is reloaded. #### Identify Connection This method provides a way for persistent clients to identify themselves to Moonraker. This information may be used by Moonraker perform an action or present information based on if a specific client is connected. Currently this method is only available -to websocket connections. This endpoint should only be called -once per session, repeated calls will result in an error. +to websocket and unix socket connections. Once this endpoint returns +success it cannot be called again, repeated calls will result in an error. HTTP request: `Not Available` -JSON-RPC request (Websocket Only): +JSON-RPC request (Websocket/Unix Socket Only): ```json { "jsonrpc": "2.0", @@ -188,25 +697,35 @@ JSON-RPC request (Websocket Only): "client_name": "moontest", "version": "0.0.1", "type": "web", - "url": "http://github.com/arksine/moontest" + "url": "http://github.com/arksine/moontest", + "access_token": "", + "api_key": "" }, "id": 4656 } ``` -All parameters are required. Below is an explanation of each parameter. +Parameters: -- `client_name`: The name of your client, such as `Mainsail`, `Fluidd`, - `KlipperScreen`, `MoonCord`, etc. -- `version`: The current version of the connected client -- `type`: Application type. May be one of `web`, `mobile`, `desktop`, - `display`, `bot`, `agent` or `other`. These should be self explanatory, - use `other` if your client does not fit any of the prescribed options. -- `url`: The url for your client's homepage +- `client_name`: (required) The name of your client, such as `Mainsail`, + `Fluidd`, `KlipperScreen`, `MoonCord`, etc. +- `version`: (required) The current version of the connected client +- `type`: (required) Application type. May be one of `web`, `mobile`, + `desktop`, `display`, `bot`, `agent` or `other`. These should be self + explanatory, use `other` if your client does not fit any of the prescribed + options. +- `url`: (required) The url for your client's homepage +- `access_token`: (optional) A JSON Web Token that may be used to assign a + logged in user to the connection. See the [authorization](#authorization) + section for APIs used to create and refresh the access token. +- `api_key`:. (optional) The system API Key. This key may be used to grant + access to clients that do not wish to implement user authentication. Note + that if the `access_token` is also supplied then this parameter will be + ignored. !!! Note When identifying as an `agent`, only one instance should be connected - to moonraker at a time. If multiple agents of the same `client_name` + to Moonraker at a time. If multiple agents of the same `client_name` attempt to identify themselves this endpoint will return an error. See the [extension APIs](#extension-apis) for more information about `agents`. @@ -229,7 +748,7 @@ The connection's unique identifier. HTTP request: `Not Available` -JSON-RPC request (Websocket Only): +JSON-RPC request (Websocket/Unix Socket Only): ```json { "jsonrpc": "2.0", @@ -246,6 +765,55 @@ The connected websocket's unique identifier. } ``` +#### JSON-RPC over HTTP + +Exposes the JSON-RPC interface over HTTP. All JSON-RPC methods with +corresponding HTTP APIs are available. Methods exclusive to other +transports, such as [Identify Connection](#identify-connection), are +not available. + +HTTP request: +```http +POST /server/jsonrpc +Content-Type: application/json +{ + "jsonrpc": "2.0", + "method": "printer.info", + "id": 5153 +} +``` +!!! Note + If authentication is required it must be part of the HTTP request, + either using the API Key Header (`X-Api-Key`) or JWT Bearer Token. + +Returns: + +The full JSON-RPC response. + +```json +{ + "jsonrpc": "2.0", + "id": 5153, + "result": { + "state": "ready", + "state_message": "Printer is ready", + "hostname": "my-pi-hostname", + "software_version": "v0.9.1-302-g900c7396", + "cpu_info": "4 core ARMv7 Processor rev 4 (v7l)", + "klipper_path": "/home/pi/klipper", + "python_path": "/home/pi/klippy-env/bin/python", + "log_file": "/tmp/klippy.log", + "config_file": "/home/pi/printer.cfg" + } +} +``` + +!!! Note + This request will never return an HTTP error. When an error is + encountered a JSON-RPC error response will be returned. + +### Printer Administration + #### Get Klippy host information HTTP Request: @@ -407,7 +975,7 @@ An object where the top level items are "eventtime" and "status". The "homing_origin": [0, 0, 0, 0], "position": [0, 0, 0, 0], "speed": 1500, - "speed_factor": 1, + "speed_factor": 1 }, "toolhead": { "position": [0, 0, 0, 0], @@ -416,8 +984,8 @@ An object where the top level items are "eventtime" and "status". The } } ``` -See [printer_objects.md](printer_objects.md) for details on the printer objects -available for query. +See [Klipper's status reference](https://www.klipper3d.org/Status_Reference.html) for +details on the printer objects available for query. #### Subscribe to printer object status HTTP request: @@ -480,8 +1048,8 @@ the `/printer/objects/query`: } ``` -See [printer_objects.md](printer_objects.md) for details on the printer objects -available for subscription. +See [Klipper's status reference](https://www.klipper3d.org/Status_Reference.html) for +details on the printer objects available for subscription. Status updates for subscribed objects are sent asynchronously over the websocket. See the [notify_status_update](#subscriptions) @@ -512,369 +1080,6 @@ endstop identifier, with a string value of "open" or "TRIGGERED". } ``` -#### Query Server Info -HTTP request: -```http -GET /server/info -``` -JSON-RPC request: -```json -{ - "jsonrpc": "2.0", - "method": "server.info", - "id": 9546 -} -``` -Returns: - -An object containing various fields that report server state. - -```json - { - "klippy_connected": true, - "klippy_state": "ready", - "components": [ - "database", - "file_manager", - "klippy_apis", - "machine", - "data_store", - "shell_command", - "proc_stats", - "history", - "octoprint_compat", - "update_manager", - "power" - ], - "failed_components": [], - "registered_directories": ["config", "gcodes", "config_examples", "docs"], - "warnings": [ - "Invalid config option 'api_key_path' detected in section [authorization]. Remove the option to resolve this issue. In the future this will result in a startup error.", - "Unparsed config section [fake_section] detected. This may be the result of a component that failed to load. In the future this will result in a startup error." - ], - "websocket_count": 2, - "moonraker_version": "v0.7.1-105-ge4f103c", - "api_version": [1, 0, 0], - "api_version_string": "1.0.0" - } -``` -!!! warning - This object also includes `plugins` and `failed_plugins` fields that - are now deprecated. They duplicate the information in - `components` and `failed_components`, and will be removed in the future. - -Note that `klippy_state` will match the `state` value received from -`/printer/info`. The `klippy_connected` item tracks the state of the -unix domain socket connect to Klippy. The `components` key will return a list -of enabled components. This can be used by clients to check if an optional -component is available. Optional components that do not load correctly will -not prevent the server from starting, thus any components that failed to load -will be reported in the `failed_components` field. - -The `websocket_count` field reports the total number of connected websockets. - -#### Get Server Configuration -HTTP request: -```http -GET /server/config -``` -JSON-RPC request: -```json -{ - "jsonrpc": "2.0", - "method": "server.config", - "id": 5616, -} -``` -Returns: - -An object containing the server's full configuration. Note that -this includes auxiliary configuration sections not part of `moonraker.conf`, -for example the `update_manager static debian moonraker` section. -Options not specified in `moonraker.conf` with default values are also -included. - -```json -{ - { - "config": { - "server": { - "host": "0.0.0.0", - "port": 7125, - "ssl_port": 7130, - "enable_debug_logging": true, - "enable_asyncio_debug": false, - "klippy_uds_address": "/tmp/klippy_uds", - "max_upload_size": 210, - "ssl_certificate_path": null, - "ssl_key_path": null - }, - "dbus_manager": {}, - "database": { - "database_path": "~/.moonraker_database", - "enable_database_debug": false - }, - "file_manager": { - "enable_object_processing": true, - "queue_gcode_uploads": true, - "config_path": "~/printer_config", - "log_path": "~/logs" - }, - "klippy_apis": {}, - "machine": { - "provider": "systemd_dbus" - }, - "shell_command": {}, - "data_store": { - "temperature_store_size": 1200, - "gcode_store_size": 1000 - }, - "proc_stats": {}, - "job_state": {}, - "job_queue": { - "load_on_startup": true, - "automatic_transition": false, - "job_transition_delay": 2, - "job_transition_gcode": "\nM118 Transitioning to next job..." - }, - "http_client": {}, - "announcements": { - "dev_mode": false, - "subscriptions": [] - }, - "authorization": { - "login_timeout": 90, - "force_logins": false, - "cors_domains": [ - "*.home", - "http://my.mainsail.xyz", - "http://app.fluidd.xyz", - "*://localhost:*" - ], - "trusted_clients": [ - "192.168.1.0/24" - ] - }, - "zeroconf": {}, - "octoprint_compat": { - "enable_ufp": true, - "flip_h": false, - "flip_v": false, - "rotate_90": false, - "stream_url": "/webcam/?action=stream", - "webcam_enabled": true - }, - "history": {}, - "secrets": { - "secrets_path": "~/moonraker_secrets.ini" - }, - "mqtt": { - "address": "eric-work.home", - "port": 1883, - "username": "{secrets.mqtt_credentials.username}", - "password_file": null, - "password": "{secrets.mqtt_credentials.password}", - "mqtt_protocol": "v3.1.1", - "instance_name": "pi-debugger", - "default_qos": 0, - "status_objects": { - "webhooks": null, - "toolhead": "position,print_time", - "idle_timeout": "state", - "gcode_macro M118": null - }, - "api_qos": 0, - "enable_moonraker_api": true - }, - "template": {} - }, - "orig": { - "DEFAULT": {}, - "server": { - "enable_debug_logging": "True", - "max_upload_size": "210" - }, - "file_manager": { - "config_path": "~/printer_config", - "log_path": "~/logs", - "queue_gcode_uploads": "True", - "enable_object_processing": "True" - }, - "machine": { - "provider": "systemd_dbus" - }, - "announcements": {}, - "job_queue": { - "job_transition_delay": "2.", - "job_transition_gcode": "\nM118 Transitioning to next job...", - "load_on_startup": "True" - }, - "authorization": { - "trusted_clients": "\n192.168.1.0/24", - "cors_domains": "\n*.home\nhttp://my.mainsail.xyz\nhttp://app.fluidd.xyz\n*://localhost:*" - }, - "zeroconf": {}, - "octoprint_compat": {}, - "history": {}, - "secrets": { - "secrets_path": "~/moonraker_secrets.ini" - }, - "mqtt": { - "address": "eric-work.home", - "port": "1883", - "username": "{secrets.mqtt_credentials.username}", - "password": "{secrets.mqtt_credentials.password}", - "enable_moonraker_api": "True", - "status_objects": "\nwebhooks\ntoolhead=position,print_time\nidle_timeout=state\ngcode_macro M118" - } - }, - "files": [ - { - "filename": "moonraker.conf", - "sections": [ - "server", - "file_manager", - "machine", - "announcements", - "job_queue", - "authorization", - "zeroconf", - "octoprint_compat", - "history", - "secrets" - ] - }, - { - "filename": "include/extras.conf", - "sections": [ - "mqtt" - ] - } - ] - } -} -``` -#### Request Cached Temperature Data -HTTP request: -```http -GET /server/temperature_store -``` -JSON-RPC request: -```json -{ - "jsonrpc": "2.0", - "method": "server.temperature_store", - "id": 2313 -} -``` -Returns: - -An object where the keys are the available temperature sensor names, and with -the value being an array of stored temperatures. The array is updated every -1 second by default, containing a total of 1200 values (20 minutes). The -array is organized from oldest temperature to most recent (left to right). -Note that when the host starts each array is initialized to 0s. -```json -{ - "extruder": { - "temperatures": [21.05, 21.12, 21.1, 21.1, 21.1], - "targets": [0, 0, 0, 0, 0], - "powers": [0, 0, 0, 0, 0] - }, - "temperature_fan my_fan": { - "temperatures": [21.05, 21.12, 21.1, 21.1, 21.1], - "targets": [0, 0, 0, 0, 0], - "speeds": [0, 0, 0, 0, 0], - }, - "temperature_sensor my_sensor": { - "temperatures": [21.05, 21.12, 21.1, 21.1, 21.1] - } -} -``` - -#### Request Cached GCode Responses -HTTP request: -```http -GET /server/gcode_store?count=100 -``` -JSON-RPC request: -```json -{ - "jsonrpc": "2.0", - "method": "server.gcode_store", - "params": { - "count": 100 - }, - "id": 7643} -``` - -The `count` argument is optional, limiting number of returned items -in the response to the value specified. If omitted, the entire gcode -store will be returned (up to 1000 responses). - -Returns: - -An object with the field `gcode_store` that contains an array -of objects. Each object will contain `message`, `time`, and -`type` fields. The `time` field is reported in Unix Time. -The `type` field will either be `command` or `response`. -```json -{ - "gcode_store": [ - { - "message": "FIRMWARE_RESTART", - "time": 1615832299.1167388, - "type": "command" - }, - { - "message": "// Klipper state: Ready", - "time": 1615832309.9977088, - "type": "response" - }, - { - "message": "M117 This is a test", - "time": 1615834094.8662775, - "type": "command" - }, - { - "message": "G4 P1000", - "time": 1615834098.761729, - "type": "command" - }, - { - "message": "STATUS", - "time": 1615834104.2860553, - "type": "command" - }, - { - "message": "// Klipper state: Ready", - "time": 1615834104.3299904, - "type": "response" - } - ] -} -``` - -#### Restart Server -HTTP request: -```http -POST /server/restart -``` -JSON-RPC request: -```json -{ - "jsonrpc": "2.0", - "method": "server.restart", - "id": 4656 -} -``` -Returns: - -`ok` upon receipt of the restart request. After the request -is returns, the server will restart. Any existing connection -will be disconnected. A restart will result in the creation -of a new server instance where the configuration is reloaded. - ### GCode APIs #### Run a gcode: @@ -1013,7 +1218,7 @@ Returns: `ok` -### Machine Commands +### Machine Requests #### Get System Info HTTP request: @@ -1071,6 +1276,10 @@ Returns: Information about the host system in the following format: "klipper_mcu", "moonraker" ], + "instance_ids": { + "moonraker": "moonraker", + "klipper": "klipper" + }, "service_state": { "klipper": { "active_state": "active", @@ -1120,6 +1329,18 @@ Returns: Information about the host system in the following format: } ] } + }, + "canbus": { + "can0": { + "tx_queue_len": 128, + "bitrate": 500000, + "driver": "mcp251x" + }, + "can1": { + "tx_queue_len": 128, + "bitrate": 500000, + "driver": "gs_usb" + } } } } @@ -1165,9 +1386,18 @@ This request will not return. The machine will reboot and the socket connection will drop. #### Restart a system service -Restarts a system service via `sudo systemctl restart {name}`. Currently -the `moonraker`, `klipper`, `MoonCord`, `KlipperScreen` and `webcamd` -services are supported. +Uses: `sudo systemctl restart {name}` + +Services allowed: + +* `crowsnest` +* `MoonCord` +* `moonraker` +* `moonraker-telegram-bot` +* `klipper` +* `KlipperScreen` +* `sonar` +* `webcamd` HTTP request: ```http @@ -1186,8 +1416,10 @@ JSON-RPC request: Returns: -`ok` when complete. Note that if `moonraker` is chosen, the return -value will be sent prior to the service restart. +`ok` when complete. +!!! note + If `moonraker` is chosen, the return + value will be sent prior to the service restart. #### Stop a system service Stops a system service via `sudo systemctl stop `. Currently @@ -1356,6 +1588,732 @@ object reports total cpu usage, while each `cpuX` field is usage per core. The `websocket_connections` field reports the number of active websockets currently connected to moonraker. +#### Get Sudo Info +Retrieve sudo information status. Optionally checks if Moonraker has +permission to run commands as root. + +HTTP request: +```http +GET /machine/sudo/info?check_access=false +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "machine.sudo.info", + "params": { + "check_access": false + }, + "id": 7896 +} +``` + +Parameters: + +- `check_access`: A boolean value, when set to `true` Moonraker will + attempt to run a command with elevated permissions. The result will + be returned in the `sudo_access` field of the response. Defaults to + `false`. + +Returns: + +An object in the following format: +```json +{ + "sudo_access": null, + "linux_user": "pi", + "sudo_requested": false, + "request_messages": [] +} +``` + +- `sudo_access`: The result of a request to check access. Returns + `true` if Moonraker has sudo permission, `false` if it does not, + and `null` if `check_access` is `false`. +- `linux_user`: The current linux user running Moonraker. +- `sudo_requested`: Returns true if Moonraker is currently requesting + sudo access. +- `request_messages`: An array of strings, each string describing + a pending sudo request. The array will be empty if no sudo + requests are pending. + +#### Set sudo password +Sets/updates the sudo password currently used by Moonraker. When +the password is set using this endpoint the change is not persistent +across restarts. If Moonraker has one or more pending sudo requests +they will be processed. + +HTTP request: +```http +POST /machine/sudo/password +Content-Type: application/json + +{ + "password": "linux_user_password" +} +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "machine.sudo.password", + "params": { + "password": "linux_user_password" + }, + "id": 7896 +} +``` + +Parameters: + +- `password`: The linux user password used to grant elevated + permission. This parameter must be provided. + +Returns: + +An object in the following format: +```json +{ + "sudo_responses": [ + "Sudo password successfully set." + ], + "is_restarting": false +} +``` + +- `sudo_responses`: An array of one or more sudo responses. + If there are pending sudo requests each request will provide + a response. +- `is_restarting`: A boolean value indicating that a sudo request + prompted Moonraker to restart its service. + +This request will return an error if the supplied password is +incorrect or if any pending sudo requests fail. + +#### List USB Devices + +Returns a list of all USB devices currently detected on the system. + +```http title="HTTP Request" +GET /machine/peripherals/usb +``` + +```json title="JSON-RPC Request" +{ + "jsonrpc": "2.0", + "method": "machine.peripherals.usb", + "id": 7896 +} +``` + +/// api-example-response +```json +{ + "usb_devices": [ + { + "device_num": 1, + "bus_num": 1, + "vendor_id": "1d6b", + "product_id": "0002", + "usb_location": "1:1", + "manufacturer": "Linux 6.1.0-rpi7-rpi-v8 dwc_otg_hcd", + "product": "DWC OTG Controller", + "serial": "3f980000.usb", + "class": "Hub", + "subclass": null, + "protocol": "Single TT", + "description": "Linux Foundation 2.0 root hub" + }, + { + "device_num": 3, + "bus_num": 1, + "vendor_id": "046d", + "product_id": "0825", + "usb_location": "1:3", + "manufacturer": "Logitech, Inc.", + "product": "Webcam C270", + "serial": "", + "class": "Miscellaneous Device", + "subclass": null, + "protocol": "Interface Association", + "description": "Logitech, Inc. Webcam C270" + }, + { + "device_num": 2, + "bus_num": 1, + "vendor_id": "1a40", + "product_id": "0101", + "usb_location": "1:2", + "manufacturer": "Terminus Technology Inc.", + "product": "USB 2.0 Hub", + "serial": null, + "class": "Hub", + "subclass": null, + "protocol": "Single TT", + "description": "Terminus Technology Inc. Hub" + }, + { + "device_num": 5, + "bus_num": 1, + "vendor_id": "0403", + "product_id": "6001", + "usb_location": "1:5", + "manufacturer": "FTDI", + "product": "FT232R USB UART", + "serial": "", + "class": null, + "subclass": null, + "protocol": null, + "description": "Future Technology Devices International, Ltd FT232 Serial (UART) IC" + }, + { + "device_num": 4, + "bus_num": 1, + "vendor_id": "1d50", + "product_id": "614e", + "usb_location": "1:4", + "manufacturer": "Klipper", + "product": "stm32f407xx", + "serial": "", + "class": "Communications", + "subclass": null, + "protocol": null, + "description": "OpenMoko, Inc. Klipper 3d-Printer Firmware" + } + ] +} +``` +/// + +/// api-response-schema + open: True +Response + +| Field | Type | Description | +| ------------- | :---: | ------------------------------------------------------ | +| `usb_devices` | array | An array of objects containing USB device information. | + + + USB Device + +| Field | Type | Description | +| -------------- | :-----: | --------------------------------------------------- | +| `bus_num` | int | The USB bus number as reported by the host. | +| `device_num` | int | The USB device number as reported by the host. | +| `usb_location` | string | A combination of the bus number and device number, | +| | | yielding a unique location ID on the host system. |^ +| `vendor_id` | string | The vendor ID as reported by the driver. | +| `product_id` | string | The product ID as reported by the driver. | +| `manufacturer` | string? | The manufacturer name as reported by the driver. | +| | | This will be `null` if no manufacturer is found. |^ +| `product` | string? | The product description as reported by the driver. | +| | | This will be `null` if no description is found. |^ +| `class` | string? | The class description as reported by the driver. | +| | | This will be `null` if no description is found. |^ +| `subclass` | string? | The subclass description as reported by the driver. | +| | | This will be `null` if no description is found. |^ +| `protocol` | string? | The protocol description as reported by the driver. | +| | | This will be `null` if no description is found. |^ +| `description` | string? | The full device description string as reported by | +| | | the usb.ids file. This will be `null` if no |^ +| | | description is found. |^ +/// + +#### List Serial Devices + +Returns a list of all serial devices detected on the system. These may be USB +CDC-ACM devices or hardware UARTs. + +```http title="HTTP Request" +GET /machine/peripherals/serial +``` + +```json title="JSON-RPC Request" +{ + "jsonrpc": "2.0", + "method": "machine.peripherals.serial", + "id": 7896 +} +``` + +/// api-example-response +```json +{ + "serial_devices": [ + { + "device_type": "hardware_uart", + "device_path": "/dev/ttyS0", + "device_name": "ttyS0", + "driver_name": "serial8250", + "path_by_hardware": null, + "path_by_id": null, + "usb_location": null + }, + { + "device_type": "usb", + "device_path": "/dev/ttyACM0", + "device_name": "ttyACM0", + "driver_name": "cdc_acm", + "path_by_hardware": "/dev/serial/by-path/platform-3f980000.usb-usb-0:1.2:1.0", + "path_by_id": "/dev/serial/by-id/usb-Klipper_stm32f407xx_unique_serial-if00", + "usb_location": "1:4" + }, + { + "device_type": "usb", + "device_path": "/dev/ttyUSB0", + "device_name": "ttyUSB0", + "driver_name": "ftdi_sio", + "path_by_hardware": "/dev/serial/by-path/platform-3f980000.usb-usb-0:1.4:1.0-port0", + "path_by_id": "/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_unique_serial-if00-port0", + "usb_location": "1:5" + }, + { + "device_type": "hardware_uart", + "device_path": "/dev/ttyAMA0", + "device_name": "ttyAMA0", + "driver_name": "uart-pl011", + "path_by_hardware": null, + "path_by_id": null, + "usb_location": null + } + ] +} +``` +/// + +/// api-response-schema + open: True +Response + +| Field | Type | Description | +| ---------------- | ----- | --------------------------------------------------------- | +| `serial_devices` | array | An array of objects containing serial device information. | + + +Serial Device + +| Field | Type | Description | +| ------------------ | :-----: | ----------------------------------------------------------- | +| `device_type` | string | The type of serial device. Can be `hardware_uart` or `usb`. | +| `device_path` | string | The absolute file path to the device. | +| `device_name` | string | The device file name as reported by sysfs. | +| `driver_name` | string | The name of the device driver. | +| `path_by_hardware` | string? | A symbolic link to the device based on its physical | +| | | connection, ie: usb port. Will be `null` if no |^ +| | | matching link exists. |^ +| `path_by_id` | string? | A symbolic link the the device based on its reported IDs. | +| | | Will be `null` if no matching link exists. |^ +| `usb_location` | string? | An identifier derived from the reported usb bus and . | +| | | device numbers Can be used to match results from |^ +| | | `/machine/peripherals/usb`. Will be `null` for non-usb |^ +| | | devices. |^ +/// + +#### List Video Capture Devices + +Retrieves a list of V4L2 video capture devices on the system. If +the python3-libcamera system package is installed this request will +also return libcamera devices. + +```http title="HTTP Request" +GET /machine/peripherals/video +``` + +```json title="JSON-RPC Request" +{ + "jsonrpc": "2.0", + "method": "machine.peripherals.video", + "id": 7896 +} +``` + +/// api-example-response +```json +{ + "v4l2_devices": [ + { + "device_name": "video0", + "device_path": "/dev/video0", + "camera_name": "unicam", + "driver_name": "unicam", + "hardware_bus": "platform:3f801000.csi", + "modes": [], + "capabilities": [ + "VIDEO_CAPTURE", + "EXT_PIX_FORMAT", + "READWRITE", + "STREAMING", + "IO_MC" + ], + "version": "6.1.63", + "path_by_hardware": "/dev/v4l/by-path/platform-3f801000.csi-video-index0", + "path_by_id": null, + "alt_name": "unicam-image", + "usb_location": null + }, + { + "device_name": "video1", + "device_path": "/dev/video1", + "camera_name": "UVC Camera (046d:0825)", + "driver_name": "uvcvideo", + "hardware_bus": "usb-3f980000.usb-1.1", + "modes": [ + { + "format": "YUYV", + "description": "YUYV 4:2:2", + "flags": [], + "resolutions": [ + "640x480", + "160x120", + "176x144", + "320x176", + "320x240", + "352x288", + "432x240", + "544x288", + "640x360", + "752x416", + "800x448", + "800x600", + "864x480", + "960x544", + "960x720", + "1024x576", + "1184x656", + "1280x720", + "1280x960" + ] + }, + { + "format": "MJPG", + "description": "Motion-JPEG", + "flags": [ + "COMPRESSED" + ], + "resolutions": [ + "640x480", + "160x120", + "176x144", + "320x176", + "320x240", + "352x288", + "432x240", + "544x288", + "640x360", + "752x416", + "800x448", + "800x600", + "864x480", + "960x544", + "960x720", + "1024x576", + "1184x656", + "1280x720", + "1280x960" + ] + } + ], + "capabilities": [ + "VIDEO_CAPTURE", + "EXT_PIX_FORMAT", + "STREAMING" + ], + "version": "6.1.63", + "path_by_hardware": "/dev/v4l/by-path/platform-3f980000.usb-usb-0:1.1:1.0-video-index0", + "path_by_id": "/dev/v4l/by-id/usb-046d_0825_66EF0390-video-index0", + "alt_name": "UVC Camera (046d:0825)", + "usb_location": "1:3" + }, + { + "device_name": "video14", + "device_path": "/dev/video14", + "camera_name": "bcm2835-isp", + "driver_name": "bcm2835-isp", + "hardware_bus": "platform:bcm2835-isp", + "modes": [], + "capabilities": [ + "VIDEO_CAPTURE", + "EXT_PIX_FORMAT", + "STREAMING" + ], + "version": "6.1.63", + "path_by_hardware": null, + "path_by_id": null, + "alt_name": "bcm2835-isp-capture0", + "usb_location": null + }, + { + "device_name": "video15", + "device_path": "/dev/video15", + "camera_name": "bcm2835-isp", + "driver_name": "bcm2835-isp", + "hardware_bus": "platform:bcm2835-isp", + "modes": [], + "capabilities": [ + "VIDEO_CAPTURE", + "EXT_PIX_FORMAT", + "STREAMING" + ], + "version": "6.1.63", + "path_by_hardware": null, + "path_by_id": null, + "alt_name": "bcm2835-isp-capture1", + "usb_location": null + }, + { + "device_name": "video21", + "device_path": "/dev/video21", + "camera_name": "bcm2835-isp", + "driver_name": "bcm2835-isp", + "hardware_bus": "platform:bcm2835-isp", + "modes": [], + "capabilities": [ + "VIDEO_CAPTURE", + "EXT_PIX_FORMAT", + "STREAMING" + ], + "version": "6.1.63", + "path_by_hardware": "/dev/v4l/by-path/platform-bcm2835-isp-video-index1", + "path_by_id": null, + "alt_name": "bcm2835-isp-capture0", + "usb_location": null + }, + { + "device_name": "video22", + "device_path": "/dev/video22", + "camera_name": "bcm2835-isp", + "driver_name": "bcm2835-isp", + "hardware_bus": "platform:bcm2835-isp", + "modes": [], + "capabilities": [ + "VIDEO_CAPTURE", + "EXT_PIX_FORMAT", + "STREAMING" + ], + "version": "6.1.63", + "path_by_hardware": "/dev/v4l/by-path/platform-bcm2835-isp-video-index2", + "path_by_id": null, + "alt_name": "bcm2835-isp-capture1", + "usb_location": null + } + ], + "libcamera_devices": [ + { + "libcamera_id": "/base/soc/i2c0mux/i2c@1/ov5647@36", + "model": "ov5647", + "modes": [ + { + "format": "SGBRG10_CSI2P", + "resolutions": [ + "640x480", + "1296x972", + "1920x1080", + "2592x1944" + ] + } + ] + }, + { + "libcamera_id": "/base/soc/usb@7e980000/usb-port@1/usb-port@1-1.1:1.0-046d:0825", + "model": "UVC Camera (046d:0825)", + "modes": [ + { + "format": "MJPEG", + "resolutions": [ + "160x120", + "176x144", + "320x176", + "320x240", + "352x288", + "432x240", + "544x288", + "640x360", + "640x480", + "752x416", + "800x448", + "864x480", + "800x600", + "960x544", + "1024x576", + "960x720", + "1184x656", + "1280x720", + "1280x960" + ] + }, + { + "format": "YUYV", + "resolutions": [ + "160x120", + "176x144", + "320x176", + "320x240", + "352x288", + "432x240", + "544x288", + "640x360", + "640x480", + "752x416", + "800x448", + "864x480", + "800x600", + "960x544", + "1024x576", + "960x720", + "1184x656", + "1280x720", + "1280x960" + ] + } + ] + } + ] +} +``` +/// + +/// api-response-schema + open: True +Response + +| Field | Type | Description | +| ------------------- | :---: | ------------------------------------- | +| `v4l2_devices` | array | An array of V4L2 Device objects. | +| `libcamera_devices` | array | An array of Libcamera Device objects. | + +V4L2 Device + +| Field | Type | Description | +| ------------------ | :-----: | -------------------------------------------------------- | +| `device_name` | string | The V4L2 name assigned to the device. This is typically | +| | | the name of the file associated with the device. |^ +| `device_path` | string | The absolute system path to the device file. | +| `camera_name` | string | The camera name reported by the device driver. | +| `driver_name` | string | The name of the driver loaded for the device. | +| `alt_name` | string? | An alternative device name optionally reported by | +| | | sysfs. Will be `null` if the name file does not exist. |^ +| `hardware_bus` | string | A description of the hardware location of the device | +| `modes` | array | An array of V4L2 mode objects. | +| `capabilities` | array | An array of strings indicating the capabilities the | +| | | device supports as reported by V4L2. |^ +| `version` | string | The device version as reported by V4L2. | +| `path_by_hardware` | string? | A symbolic link to the device based on its physical | +| | | connection, ie: usb port.. Will be `null` if no |^ +| | | matching link exists. |^ +| `path_by_id` | string? | A symbolic link the the device based on its reported | +| | | ID. Will be `null` if no matching link exists. |^ +| `usb_location` | string? | An identifier derived from the reported usb bus and | +| | | device numbers. Will be `null` for non-usb devices. |^ + +V4L2 Mode + +| Field | Type | Description | +| ------------- | :----: | ------------------------------------------------------------ | +| `description` | string | The description of the mode reported by the V4L2 driver. | +| `flags` | array | An array of strings describing flags reported by the driver. | +| `format` | string | The pixel format of the mode. | +| `resolutions` | array | An array of strings describing the resolutions supported by | +| | | the mode. Each entry is reported as `x` |^ + +Libcamera Device + +| Field | Type | Description | +| -------------- | :----: | ------------------------------------------------------- | +| `libcamera_id` | string | The ID of the device as reported by libcamera. | +| `model` | string | The model name of the device. | +| `modes` | array | An array of `Libcamera Mode` objects, each describing a | +| | | mode supported by the device. |^ + +Libcamera Mode + +| Field | Type | Description | +| ------------- | :----: | ----------------------------------------------------------- | +| `format` | string | The pixel format of the mode. | +| `resolutions` | array | An array of strings describing the resolutions supported by | +| | | the mode. Each entry is reported as `x` |^ +/// + +#### Query Unassigned Canbus UUIDs + +Queries the provided canbus interface for unassigned Klipper or Katapult +node IDs. + +!!! Warning + It is recommended that frontends provide users with an explanation + of how UUID queries work and the potential pitfalls when querying + a bus with multiple unassigned nodes. An "unassigned" node is a + CAN node that has not been activated by Katapult or Klipper. If + either Klipper or Katapult has connected to the node, it will be + assigned a Node ID and therefore will no longer respond to queries. + A device reset is required to remove the assignment. + + When multiple unassigned nodes are on the network, each responds to + the query at roughly the same time. This results in arbitration + errors. Nodes will retry the send until the response reports success. + However, nodes track the count of arbitration errors, and once a + specific threshold is reached they will go into a "bus off" state. A + device reset is required to reset the counter and recover from "bus off". + + For this reason, it is recommended that users only issue a query when + a single unassigned node is on the network. If a user does wish to + query multiple unassigned nodes it is vital that they reset all nodes + on the network before running Klipper. + +```http title="HTTP Request" +GET /machine/peripherals/canbus?interface=can0 +``` + +```json title="JSON-RPC Request" +{ + "jsonrpc": "2.0", + "method": "machine.peripherals.canbus", + "params": { + "interface": "can0" + }, + "id": 7896 +} +``` + +/// api-parameters + open: True +| Name | Type | Description | +| ----------- | :----: | ----------------------------------------------------- | +| `interface` | string | The cansocket interface to query. Default is `can0`. | +/// + +/// api-example-response +```json +{ + "can_uuids": [ + { + "uuid": "11AABBCCDD", + "application": "Klipper" + } + ] +} +``` +/// + +/// api-response-schema + open: True +Response + +| Field | Type | Description | +| ----------- | :---: | ---------------------------------------------------------------- | +| `can_uuids` | array | An array of discovered CAN UUID objects, or an empty array if no | +| | | unassigned CAN nodes are found. |^ + +Can UUID + +| Field | Type | Description | +| ------------- | :----: | ----------------------------------------------------------- | +| `uuid` | string | The UUID of the unassigned node. | +| `application` | string | The name of the application running on the unassigned Node. | +| | | Should be "Klipper" or "Katapult". |^ +/// + ### File Operations Most file operations are available over both APIs, however file upload and @@ -1436,23 +2394,70 @@ A list of objects, where each object contains file data. "modified": 1615768477.5133543, "size": 189713016, "permissions": "rw" - }, + } ] ``` -#### Get gcode metadata -Get metadata for a specified gcode file. If the file is located in -a subdirectory, then the file name should include the path relative to -the "gcodes" root. For example, if the file is located at: +#### List registered roots +Reports all "root" directories registered with Moonraker. Information +such as location on disk and permissions are included. + +HTTP request: +```http +GET /server/files/roots ``` -http://host.local/server/files/gcodes/my_sub_dir/my_print.gcode + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.files.roots", + "id": 4644 +} ``` -Then the `{filename}` argument should be `my_sub_dir/my_print.gcode`. + +Returns: +A list of objects, where each object contains file data: + +```json +[ + { + "name": "config", + "path": "/home/pi/printer_data/config", + "permissions": "rw" + }, + { + "name": "logs", + "path": "/home/pi/printer_data/logs", + "permissions": "r" + }, + { + "name": "gcodes", + "path": "/home/pi/printer_data/gcodes", + "permissions": "rw" + }, + { + "name": "config_examples", + "path": "/home/pi/klipper/config", + "permissions": "r" + }, + { + "name": "docs", + "path": "/home/pi/klipper/docs", + "permissions": "r" + } +] +``` + +#### Get GCode Metadata + +Get metadata for a specified gcode file. HTTP request: ```http GET /server/files/metadata?filename={filename} ``` + JSON-RPC request: ```json { @@ -1465,6 +2470,13 @@ JSON-RPC request: } ``` +Parameters: + +- `filename`: Path to the gcode file, relative to the `gcodes` root. + For example, if the file is located at + `http://host/server/files/gcodes/tools/drill_head.gcode`, + the `filename` should be specified as `tools/drill_head.gcode` + Returns: Metadata for the requested file if it exists. If any fields failed @@ -1505,11 +2517,104 @@ modified time, and size. "filename": "3DBenchy_0.15mm_PLA_MK3S_2h6m.gcode" } ``` -!!! note +!!! Note The `print_start_time` and `job_id` fields are initialized to `null`. They will be updated for each print job if the user has the `[history]` component configured +#### Scan GCode Metadata + +Initiate a metadata scan for a selected file. If the file has already +been scanned the endpoint will force a rescan + +HTTP request: +```http +GET /server/files/metascan?filename={filename} +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.files.metascan", + "params": { + "filename": "{filename}" + }, + "id": 3545 +} +``` + +Parameters: + +- `filename`: Path to the gcode file, relative to the `gcodes` root. + For example, if the file is located at + `http://host/server/files/gcodes/tools/drill_head.gcode`, + the `filename` should be specified as `tools/drill_head.gcode` + +Returns: + +- An object containing the metadata resulting from the scan, matching + the return value of the [Get Metdata Endpoint](#get-gcode-metadata). + +#### Get GCode Thumbnails + +Returns thumbnail information for a supplied gcode file. If no thumbnail +information is available + +HTTP request: +```http +GET /server/files/thumbnails?filename={filename} +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.files.thumbnails", + "params": { + "filename": "{filename}" + }, + "id": 3545 +} +``` + +Parameters: + +- `filename`: Path to the gcode file, relative to the `gcodes` root. + For example, if the file is located at + `http://host/server/files/gcodes/tools/drill_head.gcode`, + the `filename` should be specified as `tools/drill_head.gcode` + +Returns: + +An array of objects containing thumbnail information. If no +thumbnail information exists for the specified file then the +returned array wil be empty. + +```json +[ + { + "width": 32, + "height": 32, + "size": 1551, + "thumbnail_path": "test/.thumbs/CE2_FanCover-120mm-Mesh-32x32.png" + }, + { + "width": 300, + "height": 300, + "size": 31819, + "thumbnail_path": "test/.thumbs/CE2_FanCover-120mm-Mesh.png" + } +] +``` + +!!! Note + This information is the same as reported in the `thumbnails` field + of a [metadata](#get-gcode-metadata) object, with one exception. + The `thumbnail_path` field in the result above contains a + path relative to the `gcodes` root, whereas the `relative_path` + field reported in the metadata is relative to the gcode file's + parent folder. + #### Get directory information Returns a list of files and subdirectories given a supplied path. Unlike `/server/files/list`, this command does not walk through @@ -1586,7 +2691,7 @@ following format: "size": 2388774, "permissions": "rw", "filename": "CE2_calicat.gcode" - }, + } ], "disk_usage": { "total": 7522213888, @@ -1623,8 +2728,11 @@ Returns: Information about the created directory ```json { "item": { - "path": "gcodes/testdir", - "root": "gcodes" + "path": "my_new_dir", + "root": "gcodes", + "modified": 1676983427.3732708, + "size": 4096, + "permissions": "rw" }, "action": "create_dir" } @@ -1643,7 +2751,7 @@ JSON-RPC request: "jsonrpc": "2.0", "method": "server.files.delete_directory", "params": { - "path": "gcodes/my_new_dir", + "path": "gcodes/my_subdir", "force": false }, "id": 6545 @@ -1657,8 +2765,12 @@ Returns: Information about the deleted directory ```json { "item": { - "path": "gcodes/testdir", - "root": "gcodes" + "path": "my_subdir", + "root": "gcodes", + "modified": 0, + "size": 0, + "permissions": "" + }, "action": "delete_dir" } @@ -1680,11 +2792,13 @@ and `config`". This API may also be used to rename a file or directory. Be aware that an attempt to rename a directory to a directory that already exists will result -in *moving* the source directory into the destination directory. +in *moving* the source directory into the destination directory. Also be aware +that renaming a file to a file that already exists will result in overwriting +the existing file. HTTP request: ```http -POST /server/files/move?source=gcodes/my_file.gcode&dest=gcodes/subdir/my_file.gcode +POST /server/files/move?source=gcodes/testdir/my_file.gcode&dest=gcodes/subdir/my_file.gcode ``` JSON-RPC request: ```json @@ -1692,7 +2806,7 @@ JSON-RPC request: "jsonrpc": "2.0", "method": "server.files.move", "params": { - "source": "gcodes/my_file.gcode", + "source": "gcodes/testdir/my_file.gcode", "dest": "gcodes/subdir/my_file.gcode" }, "id": 5664 @@ -1702,20 +2816,27 @@ JSON-RPC request: Returns: Information about the moved file or directory ```json { - "result": { - "item": { - "root": "gcodes", - "path": "test4/test3" - }, - "source_item": { - "path": "gcodes/test4/test3", - "root": "gcodes" - }, - "action": "move_dir" - } + "item": { + "root": "gcodes", + "path": "subdir/my_file.gcode", + "modified": 1676940082.8595376, + "size": 384096, + "permissions": "rw" + }, + "source_item": { + "path": "testdir/my_file.gcode", + "root": "gcodes" + }, + "action": "move_file" } ``` +!!! Note + The `item` field contains file info for the destination. The `source_item` + contains the `path` and `root` the item was moved from. The `action` field + will be `move_file` if the source is a file or `move_dir` if the source is + a directory. + #### Copy a file or directory Copies a file or directory from one location to another. A successful copy has the prerequisites as a move with one exception, a copy may complete if the @@ -1744,12 +2865,94 @@ Returns: Information about the copied file or directory { "item": { "root": "gcodes", - "path": "test4/Voron_v2_350_afterburner_Filament Cover_0.2mm_ABS.gcode" + "path": "subdir/my_file.gcode", + "modified": 1676940082.8595376, + "size": 384096, + "permissions": "rw" }, "action": "create_file" } ``` +!!! Note + The `item` field contains file info for the destination. The `action` field + will be `create_file` if a new file was created, `modify_file` if an exiting + file was overwitten, or `create_dir` if an entire directory was copied. + +#### Create a ZIP archive + +Creates a `zip` file consisting of one or more files. + +HTTP request: +```http +POST /server/files/zip +Content-Type: application/json + +{ + "dest": "config/errorlogs.zip", + "items": [ + "config/printer.cfg", + "logs", + "gcodes/subfolder" + ], + "store_only": false +} +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.files.zip", + "params": { + "dest": "config/errorlogs.zip", + "items": [ + "config/printer.cfg", + "logs", + "gcodes/subfolder" + ], + "store_only": false + }, + "id": 5623 +} +``` + +Parameters: + +- `dest` - (Optional) - Relative path to the destination zip. The first element + of the path must be valid `root` with write access. If the path contains subfolders + the parent folder must exist. The default is `config/collection-{timestamp}.zip`, + where `{timestamp}` is generated based on the localtime. +- `items` - (Required) - An array of relative paths containing files and or folders + to include in the archive. Each item must meet the following requirements: + - The first element of the item must be a registered `root` with read access. + - Each item must point to a valid file or folder. + - Moonraker must have permission to read the specified files and/or directories. + - If the path is to a directory then all files with read permissions are included. + Subfolders are not included recursively. +- `store_only` - (Optional) - If set to `true` then the archive will not compress its + contents. Otherwise the traditional `deflation` algorithm is used to compress the + archives contents. The default is `false`. + +Returns: An object in the following format: + +```json +{ + "destination": { + "root": "config", + "path": "errorlogs.zip", + "modified": 1676984423.8892415, + "size": 420, + "permissions": "rw" + }, + "action": "zip_files" +} +``` + +- `destination` - an object containing the destination `root` and a path to the file + relative to the root. +- `action` - The file action, will be `zip_files` + #### File download Retrieves file `filename` at root `root`. The `filename` must include the relative path if it is not in the root folder. @@ -1813,9 +3016,13 @@ is only included when the supplied root is set to `gcodes`. { "item": { "path": "Lock Body Shim 1mm_0.2mm_FLEX_MK3S_2h30m.gcode", - "root": "gcodes" + "root": "gcodes", + "modified": 1676984527.636818, + "size": 71973, + "permissions": "rw" }, "print_started": false, + "print_queued": false, "action": "create_file" } ``` @@ -1844,13 +3051,22 @@ Returns: Information about the deleted file { "item": { "path": "Lock Body Shim 1mm_0.2mm_FLEX_MK3S_2h30m.gcode", - "root": "gcodes" + "root": "gcodes", + "size": 0, + "modified": 0, + "permissions": "" }, "action": "delete_file" } ``` #### Download klippy.log +!!! Note + Logs are now available in the `logs` root. Front ends should consider + presenting all available logs using "file manager" type of UI. That said, + If Klipper has not been configured to write logs in the `logs` root then + this endpoint is available as a fallback. + HTTP request: ```http GET /server/files/klippy.log @@ -1862,6 +3078,12 @@ Returns: The requested file #### Download moonraker.log +!!! Note + Logs are now available in the `logs` root. Front ends should consider + presenting all available logs using "file manager" type of UI. That said, + If Moonraker has not been configured to write logs in the `logs` root then + this endpoint is available as a fallback. + HTTP request: ```http GET /server/files/moonraker.log @@ -1882,11 +3104,22 @@ Moonraker's HTTP APIs. JWTs should be included in the `Authorization` header as a `Bearer` type for each HTTP request. If using an API Key it should be included in the `X-Api-Key` header for each HTTP Request. +Websocket authentication can be achieved via the request itself or +post connection. Unlike HTTP requests it is not necessasry to pass a +token and/or API Key to each request. The +[identify connection](#identify-connection) endpoint takes optional +`access_token` and `api_key` parameters that may be used to authentiate +a user already logged in, otherwise the `login` API may be used for +authentication. Websocket connections will stay authenticated until +the connection is closed or the user logs out. + !!! note - For requests in which clients cannot modify headers it is acceptable - to pass the JWT via the query string's `access_token` argument. - Alternatively client developers may request a `oneshot_token` and - send the result via the `token` query string argument. + ECMAScript imposes limitations on certain requests that prohibit the + developer from modifying the HTTP headers (ie: The request to open a + websocket, "download" requests that open a dialog). In these cases + it is recommended for the developer to request a `oneshot_token`, then + send the result via the `token` query string argument in the desired + request. !!! warning It is strongly recommended that arguments for the below APIs are @@ -1904,7 +3137,20 @@ Content-Type: application/json "source": "moonraker" } ``` -JSON-RPC request: Not Available + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "access.login", + "params": { + "username": "my_user", + "password": "my_password", + "source": "moonraker" + }, + "id": 1323 +} +``` Arguments: - `username`: The user login name. This argument is required. @@ -1939,7 +3185,15 @@ HTTP Request: ```http POST /access/logout ``` -JSON-RPC request: Not Available + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "access.logout", + "id": 1323 +} +``` Returns: An object containing the logged out username and action summary. ```json @@ -1955,7 +3209,15 @@ HTTP Request: ```http GET /access/user ``` -JSON-RPC request: Not Available + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "access.get_user", + "id": 1323 +} +``` Returns: An object containing the currently logged in user name, the source and the date on which the user was created (in unix time). @@ -1978,7 +3240,19 @@ Content-Type: application/json "password": "my_password" } ``` -JSON-RPC request: Not Available + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "access.post_user", + "params": { + "username": "my_user", + "password": "my_password" + }, + "id": 1323 +} +``` Returns: An object containing the created user name, an auth token, a refresh token, the source, and an action summary. Creating a user also @@ -2015,7 +3289,18 @@ Content-Type: application/json "username": "my_username" } ``` -JSON-RPC request: Not Available + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "access.delete_user", + "params": { + "username": "my_username" + }, + "id": 1323 +} +``` Returns: The username of the deleted user and an action summary. This effectively logs the user out, as all outstanding tokens will be invalid. @@ -2031,7 +3316,15 @@ HTTP Request: ```http GET /access/users/list ``` -JSON-RPC request: Not Available + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "access.users.list", + "id": 1323 +} +``` Returns: A list of created users on the system ```json @@ -2062,7 +3355,19 @@ Content-Type: application/json "new_password": "my_new_pass" } ``` -JSON-RPC request: Not Available + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "access.user.password", + "params": { + "password": "my_current_password", + "new_password": "my_new_pass" + }, + "id": 1323 +} +``` Returns: The username and action summary. ```json @@ -2087,7 +3392,17 @@ Content-Type: application/json } ``` -JSON-RPC request: Not Available +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "access.refresh_jwt", + "params": { + "refresh_token": "eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAiTW9vbnJha2VyIiwgImlhdCI6IDE2MTg4Nzc0ODUuNzcyMjg5OCwgImV4cCI6IDE2MjY2NTM0ODUuNzcyMjg5OCwgInVzZXJuYW1lIjogInRlc3R1c2VyIiwgInRva2VuX3R5cGUiOiAicmVmcmVzaCJ9.Y5YxGuYSzwJN2WlunxlR7XNa2Y3GWK-2kt-MzHvLbP8" + }, + "id": 1323 +} +``` Returns: The username, new auth token, the source and action summary. ```json @@ -2115,7 +3430,15 @@ HTTP request: ```http GET /access/oneshot_token ``` -JSON-RPC request: Not Available + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "access.oneshot_token", + "id": 1323 +} +``` Returns: @@ -2130,7 +3453,15 @@ HTTP Request: ```http GET /access/info ``` -JSON-RPC request: Not Available + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "access.info", + "id": 1323 +} +``` Returns: An object containing information about authorization endpoints, such as default_source and available_sources. @@ -2149,7 +3480,15 @@ HTTP request: ```http GET /access/api_key ``` -JSON-RPC request: Not Available + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "access.get_api_key", + "id": 1323 +} +``` Returns: @@ -2160,13 +3499,22 @@ HTTP request: ```http POST /access/api_key ``` -JSON-RPC request: Not Available + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "access.post_api_key", + "id": 1323 +} +``` Returns: The newly generated API key. This overwrites the previous key. Note that the API key change is applied immediately, all subsequent HTTP requests -from untrusted clients must use the new key. +from untrusted clients must use the new key. Changing the API Key will +not affect open websockets authenticated using the previous API Key. ### Database APIs The following endpoints provide access to Moonraker's lmdb database. The @@ -2207,13 +3555,16 @@ the request. The entire settings object could be accessed by providing may be read by omitting the `key` argument, however as explained below it is not possible to modify a namespace without specifying a key. -#### List namespaces -Lists all available namespaces. +#### List Database Info + +Lists all namespaces with read and/or write access. Also lists database +backup files. HTTP request: ```http GET /server/database/list ``` + JSON-RPC request: ```json { @@ -2225,14 +3576,21 @@ JSON-RPC request: Returns: -An object containing an array of namespaces in the following format: +An object containing an array of namespaces and an array of backup files. ```json { "namespaces": [ "gcode_metadata", - "history", - "moonraker", - "test_namespace" + "webcams", + "update_manager", + "announcements", + "database", + "moonraker" + ], + "backups": [ + "sqldb-backup-20240513-134542.db", + "testbackup.db", + "testbackup2.db" ] } ``` @@ -2352,6 +3710,180 @@ deleted item. } ``` +#### Compact Database + +Compacts and defragments the the sqlite database using the `VACUUM` command. +This API cannot be requested when Klipper is printing. + +HTTP request: +```http +POST /server/database/compact +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.database.compact", + "id": 4654 +} +``` +Returns: +An object containing the size of the database on disk before and after +the database is compacted. +```json +{ + "previous_size": 139264, + "new_size": 122880 +} +``` + +#### Backup Database + +Creates a backup of the current database. The backup will be +created in the `/backup/database/`. + +This API cannot be requested when Klipper is printing. + +HTTP request: +```http +POST /server/database/backup +Content-Type: application/json + +{ + "filename": "sql-db-backup.db" +} +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.database.post_backup", + "params": { + "filename": "sql-db-backup.db" + }, + "id": 4654 +} +``` + +Parameters: + +- `filename`: An optional file name for the backup file. The default + is `sqldb-backup--`. + + +Returns: +An object containing the path on disk to the backup. +```json +{ + "backup_path": "/home/test/printer_data/backup/database/sql-db-backup.db" +} +``` + +#### Delete a backup + +Deletes a previously backed up database. + +HTTP request: +```http +DELETE /server/database/backup +Content-Type: application/json + +{ + "filename": "sql-db-backup.db" +} +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.database.delete_backup", + "params": { + "filename": "sql-db-backup.db" + }, + "id": 4654 +} +``` + +Parameters: + +- `filename`: The name of the backup file to delete. Must be a valid + filename reported in by the [database list](#list-database-info) API. + This parameter must be provided. + +Returns: +An object containing the path on disk to the backup file that was removed. +```json +{ + "backup_path": "/home/test/printer_data/backup/database/sql-db-backup.db" +} +``` + +#### Restore Database + +Restores a previously backed up sqlite database file. The backup +must be located at `/backup/database/`. The +`` must be a valid filename reported in by the +[database list](#list-database-info) API. + +This API cannot be requested when Klipper is printing. + +!!! Note + Moonraker will restart immediately after this request is processed. + +HTTP request: +```http +POST /server/database/restore +Content-Type: application/json + +{ + "filename": "sql-db-backup.db" +} +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.database.restore", + "params": { + "filename": "sql-db-backup.db" + }, + "id": 4654 +} +``` + +Parameters: + +- `filename`: The name of the database file to restore. Must be a valid + filename reported in by the [database list](#list-database-info) API. + This parameter must be provided. + +Returns: +An object containing a list of restored namespaces and restored tables. +```json +{ + "restored_tables": [ + "table_registry", + "namespace_store", + "authorized_users", + "job_history", + "job_totals" + ], + "restored_namespaces": [ + "database", + "fluidd", + "gcode_metadata", + "mainsail", + "moonraker", + "update_manager", + "webcams" + ] +} +``` + ### Job Queue APIs The following endpoints may be used to manage Moonraker's job queue. @@ -2464,7 +3996,8 @@ Content-Type: application/json "job1.gcode", "job2.gcode", "subdir/job3.gcode" - ] + ], + "reset": false } ``` @@ -2478,12 +4011,18 @@ JSON-RPC request: "job1.gcode", "job2.gcode", "subdir/job3.gcode" - ] + ], + "reset": false }, "id": 4654 } ``` +Parameters: + +- `reset`: A boolean value indicating whether Moonraker should clear the + existing queued jobs before adding the new jobs. Defaults to `false`. + Returns: The current state of the job queue: @@ -2535,7 +4074,7 @@ JSON-RPC request: "method": "server.job_queue.delete_job", "params": { "job_ids": [ - "0000000066D991F0". + "0000000066D991F0", "0000000066D99D80" ] }, @@ -2665,6 +4204,56 @@ The current state of the job queue: } ``` +#### Perform a Queue Jump + +Jumps a job to the front of the queue. + +HTTP request: +```http +POST /server/job_queue/jump?job_id=0000000066D991F0 +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.job_queue.jump", + "params": { + "job_id": "0000000066D991F0" + }, + "id": 4654 +} +``` + +Returns: + +The current state of the job queue: + +```json +{ + "queued_jobs": [ + { + "filename": "job2.gcode", + "job_id": "0000000066D991F0", + "time_added": 1636151050.7766452, + "time_in_queue": 21.88680004119873 + }, + { + "filename": "job1.gcode", + "job_id": "0000000066D99C90", + "time_added": 1636151050.7666452, + "time_in_queue": 21.89680004119873 + }, + { + "filename": "subdir/job3.gcode", + "job_id": "0000000066D99D80", + "time_added": 1636151050.7866452, + "time_in_queue": 21.90680004119873 + } + ], + "queue_state": "loading" +} +``` + ### Announcement APIs The following endpoints are available to manage announcements. See [the appendix](#announcements) for details on how @@ -2698,7 +4287,6 @@ sorted by `date` and a list of feeds Moonraker is currently subscribed to: ```json { - { "entries": [ { "entry_id": "arksine/moonlight/issue/3", @@ -2757,7 +4345,6 @@ sorted by `date` and a list of feeds Moonraker is currently subscribed to: "moonlight" ] } -} ``` #### Update announcements @@ -2982,6 +4569,7 @@ The name of the new feed and the action taken. The `action` will be ``` ### Webcam APIs + The following APIs are available to manage webcam configuration: #### List Webcams @@ -3010,37 +4598,55 @@ A list of configured webcams: "name": "testcam3", "location": "door", "service": "mjpegstreamer", + "enabled": true, + "icon": "mdiWebcam", "target_fps": 20, + "target_fps_idle": 5, "stream_url": "http://camera.lan/webcam?action=stream", "snapshot_url": "http://camera.lan/webcam?action=snapshot", "flip_horizontal": false, "flip_vertical": true, "rotation": 90, - "source": "config" + "aspect_ratio": "4:3", + "extra_data": {}, + "source": "config", + "uid": "55d3801e-fdc1-438d-8728-2fff8b83b909" }, { "name": "tc2", "location": "printer", "service": "mjpegstreamer", + "enabled": true, + "icon": "mdiWebcam", "target_fps": 15, + "target_fps_idle": 5, "stream_url": "http://printer.lan/webcam?action=stream", "snapshot_url": "http://printer.lan/webcam?action=snapshot", "flip_horizontal": false, "flip_vertical": false, "rotation": 0, - "source": "database" + "aspect_ratio": "4:3", + "extra_data": {}, + "source": "database", + "uid": "65e51c8a-6763-41d4-8e76-345bb6e8e7c3" }, { "name": "TestCam", "location": "printer", "service": "mjpegstreamer", + "enabled": true, + "icon": "mdiWebcam", "target_fps": 15, + "target_fps_idle": 5, "stream_url": "/webcam/?action=stream", "snapshot_url": "/webcam/?action=snapshot", "flip_horizontal": false, "flip_vertical": false, "rotation": 0, - "source": "database" + "aspect_ratio": "4:3", + "extra_data": {}, + "source": "database", + "uid": "341778f9-387f-455b-8b69-ff68442d41d9" } ] } @@ -3050,15 +4656,15 @@ A list of configured webcams: HTTP request: ```http -GET /server/webcams/item?name=cam_name +GET /server/webcams/item?uid=341778f9-387f-455b-8b69-ff68442d41d9 ``` JSON-RPC request: ```json { "jsonrpc": "2.0", "method": "server.webcams.get_item", - "parmams": { - "name": "cam_name" + "params": { + "uid": "341778f9-387f-455b-8b69-ff68442d41d9" }, "id": 4654 } @@ -3066,9 +4672,11 @@ JSON-RPC request: Parameters: -- `name`: The name of the camera to request information for. If the named - camera is not available the request will return with an error. This - parameter must be provided. +- `uid`: The webcam's assigned unique ID. This parameter is optional, when + not specified the request will fallback to the `name` parameter. +- `name`: The name of the webcam to request information for. If the named + webcam is not available the request will return with an error. This + parameter must be provided when the `uid` is omitted. Returns: @@ -3080,18 +4688,27 @@ The full configuration for the requested webcam: "name": "TestCam", "location": "printer", "service": "mjpegstreamer", + "enabled": true, + "icon": "mdiWebcam", "target_fps": 15, + "target_fps_idle": 5, "stream_url": "/webcam/?action=stream", "snapshot_url": "/webcam/?action=snapshot", "flip_horizontal": false, "flip_vertical": false, "rotation": 0, - "source": "database" + "aspect_ratio": "4:3", + "extra_data": {}, + "source": "database", + "uid": "341778f9-387f-455b-8b69-ff68442d41d9" } } ``` #### Add or update a webcam +Adds a new webcam entry or updates an existing entry. When updating +an entry only the fields provided will be modified. + !!! Note A webcam configured via `moonraker.conf` cannot be updated or overwritten using this API. @@ -3113,7 +4730,7 @@ JSON-RPC request: { "jsonrpc": "2.0", "method": "server.webcams.post_item", - "parmams": { + "params": { "name": "cam_name", "snapshot_url": "/webcam?action=snapshot", "stream_url": "/webcam?action=stream" @@ -3124,31 +4741,51 @@ JSON-RPC request: Parameters: +- `uid`: The unique ID of the webcam. This parameter may be specified to + modify an existing webcam. New entries must omit the `uid`. - `name`: The name of the camera to add or update. This parameter must - be provided. + be provided for new entries. - `location`: A description of the webcam location, ie: what the webcam is - observing. The default is "printer". + observing. The default is `printer` for new entries. +- `icon`: The name of the icon to use for the camera. The default is `mdiWebcam` + for new entries. +- `enabled`: A boolean value to indicate if this webcam should be enabled. + Default is True for new entries. - `service`: The name of the webcam application streaming service. The default - is "mjpegstreamer". -- `target_fps`: The target framerate. The default is 15 + is "mjpegstreamer" for new entries. +- `target_fps`: The target framerate. The default is 15 for new entries. +- `target_fps_idle`: The target framerate when the printer is idle. + The default is 5 for new entries. - `stream_url`: The url for the camera stream request. This may be a full url or a url relative to Moonraker's host machine. If the url is relative it is assumed that the stream is available over http on port 80. This parameter - must be provided. + must be provided for new entries. - `snapshot_url`: The url for the camera snapshot request. This may be a full url or a url relative to Moonraker's host machine. If the url is relative - it is assumed that the snapshot is available over http on port 80. This - parameter must be provided. + it is assumed that the snapshot is available over http on port 80. The + default is an empty string for new entries. - `flip_horizontal`: A boolean value indicating whether the stream should be - flipped horizontally. The default is false. + flipped horizontally. The default is false for new entries. - `flip_vertical`: A boolean value indicating whether the stream should be - flipped vertically. The default is false. + flipped vertically. The default is false for new entries. - `rotation`: An integer value indicating the amount of clockwise rotation to - apply to the stream. May be 0, 90, 180, or 270. The default is 0. + apply to the stream. May be 0, 90, 180, or 270. The default is 0 for new entries. +- `aspect_ratio`: The aspect ratio to display for the camera. Note that this option + is specific to certain services, otherwise it is ignored. The default is `4:3` + for new entries. +- `extra_data`: Additional webcam data set by the front end in the form of a json + object. This may be used to store any additional webcam options and/or data. The + default is an empty object for new entries. + +!!! Tip + When modifying existing entries it is possible to rename an existing item by + specifying its current `uid` and a new value for `name`. Keep in mind that + names must be unique, an attempt to rename an existing webcam to another name + that is reserved will result in an error. Returns: -The full configuration for the added webcam: +The full configuration for the added/updated webcam: ```json { @@ -3156,13 +4793,19 @@ The full configuration for the added webcam: "name": "TestCam", "location": "printer", "service": "mjpegstreamer", + "enabled": true, + "icon": "mdiWebcam", "target_fps": 15, + "target_fps_idle": 5, "stream_url": "/webcam/?action=stream", "snapshot_url": "/webcam/?action=snapshot", "flip_horizontal": false, "flip_vertical": false, "rotation": 0, - "source": "database" + "aspect_ratio": "4:3", + "extra_data": {}, + "source": "database", + "uid": "341778f9-387f-455b-8b69-ff68442d41d9" } } ``` @@ -3175,15 +4818,15 @@ The full configuration for the added webcam: HTTP request: ```http -DELETE /server/webcams/item?name=cam_name +DELETE /server/webcams/item?uid=341778f9-387f-455b-8b69-ff68442d41d9 ``` JSON-RPC request: ```json { "jsonrpc": "2.0", "method": "server.webcams.delete_item", - "parmams": { - "name": "cam_name" + "params": { + "uid": "341778f9-387f-455b-8b69-ff68442d41d9" }, "id": 4654 } @@ -3191,9 +4834,11 @@ JSON-RPC request: Parameters: -- `name`: The name of the camera to delete. If the named camera is not +- `uid`: The webcam's assigned unique ID. This parameter is optional, when + not specified the request will fallback to the `name` parameter. +- `name`: The name of the webcam to delete. If the named webcam is not available the request will return with an error. This parameter must - be provided. + be provided when the `uid` is omitted. Returns: @@ -3211,7 +4856,8 @@ The full configuration of the deleted webcam: "flip_horizontal": false, "flip_vertical": false, "rotation": 0, - "source": "database" + "source": "database", + "uid": "341778f9-387f-455b-8b69-ff68442d41d9" } } ``` @@ -3224,15 +4870,15 @@ reachable. HTTP request: ```http -POST /server/webcams/test?name=cam_name +POST /server/webcams/test?uid=341778f9-387f-455b-8b69-ff68442d41d9 ``` JSON-RPC request: ```json { "jsonrpc": "2.0", "method": "server.webcams.test", - "parmams": { - "name": "cam_name" + "params": { + "uid": "341778f9-387f-455b-8b69-ff68442d41d9" }, "id": 4654 } @@ -3240,9 +4886,11 @@ JSON-RPC request: Parameters: -- `name`: The name of the camera to test. If the named camera is not +- `uid`: The webcam's assigned unique ID. This parameter is optional, when + not specified the request will fallback to the `name` parameter. +- `name`: The name of the webcam to test. If the named webcam is not available the request will return with an error. This parameter must - be provided. + be provided when the `uid` is omitted. Returns: Test results in the following format @@ -3255,25 +4903,80 @@ Returns: Test results in the following format } ``` +### Notifier APIs +The following APIs are available to view and tests notifiers. + +#### List Notifiers + +HTTP request: +```http +GET /server/notifiers/list +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.notifiers.list", + "id": 4654 +} +``` + +Returns: + +A list of configured notifiers: + +```json +{ + "notifiers": [ + { + "name": "print_start", + "url": "tgram://{bottoken}/{ChatID}", + "events": [ + "started" + ], + "body": "Your printer started printing '{event_args[1].filename}'", + "title": null, + "attach": null + }, + { + "name": "print_complete", + "url": "tgram://{bottoken}/{ChatID}", + "events": [ + "complete" + ], + "body": "Your printer completed printing '{event_args[1].filename}", + "title": null, + "attach": "http://192.168.1.100/webcam/?action=snapshot" + }, + { + "name": "print_error", + "url": "tgram://{bottoken}/{ChatID}", + "events": [ + "error" + ], + "body": "{event_args[1].message}", + "title": null, + "attach": "http://192.168.1.100/webcam/?action=snapshot" + } + ] +} +``` + ### Update Manager APIs The following endpoints are available when the `[update_manager]` component has been configured: #### Get update status -Retrieves the current state of each "package" available for update. Typically -this will consist of information regarding `moonraker`, `klipper`, `system` -packages, along with configured clients. If moonraker has not yet received -information from Klipper then its status will be omitted. One may request that -the update info be refreshed by setting the `refresh` argument to `true`. Note -that the `refresh` argument is ignored if an update is in progress or if a print -is in progress. In these cases the current status will be returned immediately -and no refresh will take place. If the `refresh` argument is omitted its value -defaults to `false`. +Retrieves the current state of each item available for update. Items may +include the linux package manager (`system`), applications such as `moonraker` and +`klipper`, web clients such as `mainsail` and `fluidd`, and other configured +applications/extensions. HTTP request: ```http GET /machine/update/status?refresh=false ``` + JSON-RPC request: ```json { @@ -3285,6 +4988,21 @@ JSON-RPC request: "id": 4644 } ``` + +Parameters: + +- `refresh`: (Optional) When set to true state for all updaters will be refreshed. + The default is `false`. A request to refresh is aborted under the following + conditions: + - An update is in progress + - A print is in progress + - The update manager hasn't completed initialization + - A previous refresh has occured within the last 60 seconds + +!!! Note + The `refresh` parameter is deprecated. Client developers should use the + [refresh endpoint](#refresh-application-state) to request a refresh. + Returns: Status information for each update package. Note that `mainsail` @@ -3308,9 +5026,9 @@ and `fluidd` are present as clients configured in `moonraker.conf` "moonraker": { "channel": "dev", "debug_enabled": true, - "need_channel_update": false, "is_valid": true, "configured_type": "git_repo", + "corrupt": false, "info_tags": [], "detected_type": "git_repo", "remote_alias": "arksine", @@ -3319,6 +5037,7 @@ and `fluidd` are present as clients configured in `moonraker.conf` "repo_name": "moonraker", "version": "v0.7.1-364", "remote_version": "v0.7.1-364", + "rollback_version": "v0.7.1-360", "current_hash": "ecfad5cff15fff1d82cb9bdc64d6b548ed53dfaf", "remote_hash": "ecfad5cff15fff1d82cb9bdc64d6b548ed53dfaf", "is_dirty": false, @@ -3326,35 +5045,51 @@ and `fluidd` are present as clients configured in `moonraker.conf` "commits_behind": [], "git_messages": [], "full_version_string": "v0.7.1-364-gecfad5c", - "pristine": true + "pristine": true, + "recovery_url": "https://github.com/Arksine/moonraker.git", + "remote_url": "https://github.com/Arksine/moonraker.git", + "warnings": [], + "anomalies": [ + "Unofficial remote url: https://github.com/Arksine/moonraker-fork.git", + "Repo not on offical remote/branch, expected: origin/master, detected: altremote/altbranch", + "Detached HEAD detected" + ] }, "mainsail": { "name": "mainsail", "owner": "mainsail-crew", "version": "v2.1.1", "remote_version": "v2.1.1", + "rollback_version": "v2.0.0", "configured_type": "web", "channel": "stable", "info_tags": [ "desc=Mainsail Web Client", "action=some_action" - ] + ], + "warnings": [], + "anomalies": [], + "is_valid": true }, "fluidd": { "name": "fluidd", - "owner": "cadriel", - "version": "?", + "owner": "fluidd-core", + "version": "v1.16.2", "remote_version": "v1.16.2", - "configured_type": "web_beta", + "rollback_version": "v1.15.0", + "configured_type": "web", "channel": "beta", - "info_tags": [] + "info_tags": [], + "warnings": [], + "anomalies": [], + "is_valid": true }, "klipper": { "channel": "dev", "debug_enabled": true, - "need_channel_update": false, "is_valid": true, "configured_type": "git_repo", + "corrupt": false, "info_tags": [], "detected_type": "git_repo", "remote_alias": "origin", @@ -3363,6 +5098,7 @@ and `fluidd` are present as clients configured in `moonraker.conf` "repo_name": "klipper", "version": "v0.10.0-1", "remote_version": "v0.10.0-41", + "rollback_version": "v0.9.1-340", "current_hash": "4c8d24ae03eadf3fc5a28efb1209ce810251d02d", "remote_hash": "e3cbe7ea3663a8cd10207a9aecc4e5458aeb1f1f", "is_dirty": false, @@ -3383,11 +5119,15 @@ and `fluidd` are present as clients configured in `moonraker.conf` "subject": "stm32: Wait for transmission to complete before returning from spi_transfer()", "message": "It's possible for the SCLK pin to still be updating even after the\nlast byte of data has been read from the receive pin. (In particular\nin spi mode 0 and 1.) Exiting early from spi_transfer() in this case\ncould result in the CS pin being raised before the final updates to\nSCLK pin.\n\nAdd an additional wait at the end of spi_transfer() to avoid this\nissue.\n\nSigned-off-by: Kevin O'Connor ", "tag": null - }, + } ], "git_messages": [], "full_version_string": "v0.10.0-1-g4c8d24ae-shallow", - "pristine": true + "pristine": true, + "recovery_url": "https://github.com/Klipper3d/klipper.git", + "remote_url": "https://github.com/Klipper3d/klipper.git", + "warnings": [], + "anomalies": [] } } } @@ -3404,23 +5144,18 @@ Below is an explanation for each field: - `github_limit_reset_time`: the time when the rate limit will reset, reported as seconds since the epoch (aka Unix Time). -The `moonraker`, `klipper` packages, along with and clients configured -as applications have the following fields: +Extensions configured with the `git_repo` type will contain the following +fields: - `configured_type`: the application type configured by the user - `detected_type`: the application type as detected by Moonraker. - `channel`: the currently configured update channel. For Moonraker and Klipper this is set in the `[update_manager]` configuration. For clients the channel is determined by the configured type -- `need_channel_update`: This will be set to `true` if Moonraker has - detected that a channel swap is necessary (ie: the configured type does - not match the detected type). The channel swap will be performed on the - next update. -- `pristine`: For `zip` and `zip_beta` types this is set to `true` if an - applications source checksum matches the one generated when the app was - built. This value will be set to the opposite of "dirty" for git repos. - Note that a zip application can still be updated if the repo is not - pristine. +- `pristine`: Indicates that there are no modified files or untracked + source files in a `git_repo`. A repo with untracked files can still + be updated, however a repo with modified files (ie: `dirty`) cannot + be updated. - `owner`: the owner of the repo / application - `branch`: the name of the current git branch. This should typically be "master". @@ -3428,21 +5163,22 @@ as applications have the following fields: "origin". - `version`: abbreviated version of the current repo on disk - `remote_version`: abbreviated version of the latest available update +- `rollback_version`: version the repo will revert to when a rollback is + requested - `full_version_string`: The complete version string of the current repo. - `current_hash`: hash of the most recent commit on disk - `remote_hash`: hash of the most recent commit pushed to the remote -- `is_valid`: true if installation is a valid git repo on the master branch - and an "origin" set to the official remote. For `zip` and `zip_beta` - types this will report false if Moonraker is unable to fetch the - current repo state from GitHub. -- `is_dirty`: true if the repo has been modified. This will always be false - for `zip` and `zip_beta` types. -- `detached`: true if the repo is currently in a detached state. For `zip` - and `zip_beta` types it is considered detached if the local release info - does not match what is present on the remote. -- `debug_enabled`: True when `enable_repo_debug` has been configured. This - will bypass repo validation allowing detached updates, and updates from - a remote/branch other than than the primary (typically origin/master). +- `is_valid`: true if the `git_repo` is valid and can be updated. +- `corrupt`: Indicates that the git repo has been corrupted. When a repo + is in this state it a hard recovery (ie: re-cloning the repo) is necessary. + Note that the most common cause of repo corruption is removing power from + the host machine without safely shutting down. Damaged storage can also + lead to repo corruption. +- `is_dirty`: true if a `git_repo` has modified files. A dirty repo cannot + be updated. +- `detached`: true if the `git_repo` is currently in a detached state. +- `debug_enabled`: True when debug flag has been set via the command line. + When debug is enabled Moonraker will allow detached updates. - `commits_behind`: A list of commits behind. Up to 30 "untagged" commits will be reported. Moonraker checks the last 100 commits for tags, any commits beyond the last 30 with a tag will also be reported. @@ -3456,27 +5192,100 @@ as applications have the following fields: configuration for each client. Client developers my define what tags, if any, users will configure. They can choose to use those tags to display information or perform an additional action after an update if necessary. +- `recovery_url`: The url Moonraker will use to re-clone the repo when a + hard recovery is requested. If this reports a "?" then a hard recovery is + not possible. +- `remote_url`: The url for the currently configured remote. +- `warnings`: An array of strings that describe warnings detected during + repo init. These warnings provide additional context when the `is_valid` + field reports `true`. +- `anomalies`: An array of strings that describe anomalies found during + initialization. An anomaly can be defined as an unexpected condition, they + will not result in an invalid state, nor will they prevent an update. For + example, when the detected remote url does not match the configured/expected + url Moonraker will fall back to the detected url and report this condition + as an anomaly. -Web clients have the following fields: +Extensions configured with the `web` type will contain the following fields: - `channel`: channel to fetch updates from -- `configured_type`: will be `web` or `web_beta` +- `configured_type`: will be `web` - `name`: name of the configured client - `owner`: the owner of the client - `version`: version of the installed client. - `remote_version`: version of the latest release published to GitHub +- `rollback_version`: version the client will revert to when a rollback is + requested - `info_tags`: These are tags defined in the `[update_manager client_name]` configuration for each client. Client developers my define what tags, if any, users will configure. They can choose to use those tags to display information or perform an additional action after an update if necessary. +- `is_valid`: A boolean that reports true if an update is possible, false + if an update cannot be performed. +- `warnings`: An array of strings that describe warnings detected during + updater init. These warnings add context when the `is_valid` field reports + `true`. +- `anomalies`: An array of strings that describe anomalies found during + initialization. An anomaly can be defined as an unexpected condition, they + will not result in an invalid state, nor will they prevent an update. + For example, when the configured repo to check for updates does not match + the detected repo Moonraker will fall back to the detected repo and report + this condition as an anomaly. -The `system` package has the following fields: + +The `system` object contains the following fields: - `package_count`: the number of system packages available for update - `package_list`: an array containing the names of packages available for update -### Perform a full update +#### Refresh update status + +Refreshes the internal update state for the requested item(s). + +HTTP request: +```http +POST /machine/update/refresh?name=klipper +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "machine.update.refresh", + "params": { + "name": "klipper" + }, + "id": 4644 +} +``` + +Parameters: + +- `name`: (Optional) The name of the specified application. If omitted + all registered applications will be refreshed. + +Returns: + +An object containing full update status matching the response in the +[status endpoint](#get-update-status). + +!!! Note + This endpoint will raise 503 error under the following conditions: + + - An update is in progress + - A print is in progress + - The update manager hasn't completed initialization + +!!! Warning + Applications should use care when calling this method as a refresh + is CPU intensive and may be time consuming. Moonraker can be + configured to refresh state periodically, thus it is recommended + that applications avoid their own procedural implementations. + Instead it is best to call this API only when a user requests a + refresh. + +#### Perform a full update Attempts to update all configured items in Moonraker. Updates are performed in the following order: @@ -3631,6 +5440,31 @@ Returns: `ok` when complete +#### Rollback to the previous version + +HTTP request: + +```http +POST /machine/update/rollback?name=moonraker +``` + +JSON-RPC request: + +```json +{ + "jsonrpc": "2.0", + "method": "machine.update.rollback", + "params": { + "name": "moonraker" + }, + "id": 4564 +} +``` + +Returns: + +`ok` when complete + ### Power APIs The APIs below are available when the `[power]` component has been configured. @@ -3643,7 +5477,7 @@ JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"machine.device_power.devices", + "method": "machine.device_power.devices", "id": 5646 } ``` @@ -3826,7 +5660,7 @@ JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"machine.wled.strips", + "method": "machine.wled.strips", "id": 7123 } ``` @@ -3835,56 +5669,7 @@ Returns: Strip information for all wled strips. ```json { - "result": { - "strips": { - "lights": { - "strip": "lights", - "status": "on", - "chain_count": 79, - "preset": -1, - "brightness": 255, - "intensity": -1, - "speed": -1, - "error": null - }, - "desk": { - "strip": "desk", - "status": "on", - "chain_count": 60, - "preset": 8, - "brightness": -1, - "intensity": -1, - "speed": -1, - "error": null - } - } - } -} -``` - -#### Get strip status -HTTP request: -```http -GET /machine/wled/status?strip1&strip2 -``` -JSON-RPC request: -```json -{ - "jsonrpc": "2.0", - "method":"machine.wled.status", - "params": { - "lights": null, - "desk": null - }, - "id": 7124 -} -``` -Returns: - -Strip information for requested strips. -```json -{ - "result": { + "strips": { "lights": { "strip": "lights", "status": "on", @@ -3909,6 +5694,51 @@ Strip information for requested strips. } ``` +#### Get strip status +HTTP request: +```http +GET /machine/wled/status?strip1&strip2 +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "machine.wled.status", + "params": { + "lights": null, + "desk": null + }, + "id": 7124 +} +``` +Returns: + +Strip information for requested strips. +```json +{ + "lights": { + "strip": "lights", + "status": "on", + "chain_count": 79, + "preset": -1, + "brightness": 255, + "intensity": -1, + "speed": -1, + "error": null + }, + "desk": { + "strip": "desk", + "status": "on", + "chain_count": 60, + "preset": 8, + "brightness": -1, + "intensity": -1, + "speed": -1, + "error": null + } +} +``` + #### Turn strip on Turns the specified strips on to the initial colors or intial preset. @@ -3920,7 +5750,7 @@ JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"machine.wled.on", + "method": "machine.wled.on", "params": { "lights": null, "desk": null @@ -3933,27 +5763,25 @@ Returns: Strip information for requested strips. ```json { - "result": { - "lights": { - "strip": "lights", - "status": "on", - "chain_count": 79, - "preset": -1, - "brightness": 255, - "intensity": -1, - "speed": -1, - "error": null - }, - "desk": { - "strip": "desk", - "status": "on", - "chain_count": 60, - "preset": 8, - "brightness": -1, - "intensity": -1, - "speed": -1, - "error": null - } + "lights": { + "strip": "lights", + "status": "on", + "chain_count": 79, + "preset": -1, + "brightness": 255, + "intensity": -1, + "speed": -1, + "error": null + }, + "desk": { + "strip": "desk", + "status": "on", + "chain_count": 60, + "preset": 8, + "brightness": -1, + "intensity": -1, + "speed": -1, + "error": null } } ``` @@ -3969,7 +5797,7 @@ JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"machine.wled.off", + "method": "machine.wled.off", "params": { "lights": null, "desk": null @@ -3982,27 +5810,25 @@ Returns: The new state of the specified strips. ```json { - "result": { - "lights": { - "strip": "lights", - "status": "off", - "chain_count": 79, - "preset": -1, - "brightness": 255, - "intensity": -1, - "speed": -1, - "error": null - }, - "desk": { - "strip": "desk", - "status": "off", - "chain_count": 60, - "preset": 8, - "brightness": -1, - "intensity": -1, - "speed": -1, - "error": null - } + "lights": { + "strip": "lights", + "status": "off", + "chain_count": 79, + "preset": -1, + "brightness": 255, + "intensity": -1, + "speed": -1, + "error": null + }, + "desk": { + "strip": "desk", + "status": "off", + "chain_count": 60, + "preset": 8, + "brightness": -1, + "intensity": -1, + "speed": -1, + "error": null } } ``` @@ -4012,13 +5838,13 @@ Turns each strip off if it is on and on if it is off. HTTP request: ```http -POST /machine/wled/off?strip1&strip2 +POST /machine/wled/toggle?strip1&strip2 ``` JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"machine.wled.toggle", + "method": "machine.wled.toggle", "params": { "lights": null, "desk": null @@ -4031,27 +5857,25 @@ Returns: The new state of the specified strips. ```json { - "result": { - "lights": { - "strip": "lights", - "status": "on", - "chain_count": 79, - "preset": -1, - "brightness": 255, - "intensity": -1, - "speed": -1, - "error": null - }, - "desk": { - "strip": "desk", - "status": "off", - "chain_count": 60, - "preset": 8, - "brightness": -1, - "intensity": -1, - "speed": -1, - "error": null - } + "lights": { + "strip": "lights", + "status": "on", + "chain_count": 79, + "preset": -1, + "brightness": 255, + "intensity": -1, + "speed": -1, + "error": null + }, + "desk": { + "strip": "desk", + "status": "off", + "chain_count": 60, + "preset": 8, + "brightness": -1, + "intensity": -1, + "speed": -1, + "error": null } } ``` @@ -4095,9 +5919,9 @@ Returns information for the specified strip. ```json { "jsonrpc": "2.0", - "method":"machine.wled.get_strip", + "method": "machine.wled.get_strip", "params": { - "strip": "lights", + "strip": "lights" }, "id": 7128 } @@ -4107,7 +5931,7 @@ Calls the action with the arguments for the specified strip. ```json { "jsonrpc": "2.0", - "method":"machine.wled.post_strip", + "method": "machine.wled.post_strip", "params": { "strip": "lights", "action": "on", @@ -4134,21 +5958,497 @@ Returns: State of the strip. ```json { - "result": { - "lights": { - "strip": "lights", - "status": "on", - "chain_count": 79, - "preset": 1, - "brightness": 50, - "intensity": 255, - "speed": 255, - "error": null + "lights": { + "strip": "lights", + "status": "on", + "chain_count": 79, + "preset": 1, + "brightness": 50, + "intensity": 255, + "speed": 255, + "error": null + } +} +``` + +### Sensor APIs +The APIs below are available when the `[sensor]` component has been configured. + +#### Get Sensor List +HTTP request: +```http +GET /server/sensors/list?extended=False +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.sensors.list", + "params": { + "extended": false + } + "id": 5646 +} +``` + +Parameters: + +- `extended`: When set to `true` then each sensor will also include + parameter info and history field configuration. The default is false. + + +Returns: + +An array of objects containing info for each configured sensor. The +`parameter_info` and `history_fields` items will only be present when +the `extended` parameter is set to true. + +```json +{ + "sensors": { + "sensor1": { + "id": "sensor1", + "friendly_name": "Sensor 1", + "type": "mqtt", + "values": { + "value1": 0, + "value2": 119.8 + }, + "parameter_info": [ + { + "units": "kWh", + "name": "value1" + }, + { + "units": "V", + "name": "value2" + } + ], + "history_fields": [ + { + "field": "power_consumption", + "provider": "sensor sensor1", + "description": "Printer Power Consumption", + "strategy": "delta", + "units": "kWh", + "init_tracker": true, + "exclude_paused": false, + "report_total": true, + "report_maximum": true, + "precision": 6, + "parameter": "value1" + }, + { + "field": "max_voltage", + "provider": "sensor sensor1", + "description": "Maximum voltage", + "strategy": "maximum", + "units": "V", + "init_tracker": true, + "exclude_paused": false, + "report_total": false, + "report_maximum": false, + "precision": 6, + "parameter": "value2" + } + ] } } } ``` +#### Get Sensor Information +Returns the status for a single configured sensor. + +HTTP request: +```http +GET /server/sensors/info?sensor=sensor1&extended=false +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.sensors.info", + "params": { + "sensor": "sensor1", + "extended": false + }, + "id": 4564 +} +``` + +Parameters: + +- `extended`: When set to `true` then the response will also include + parameter info and history field configuration. The default is false. + + +Returns: + +An object containing sensor information for the requested sensor. The +`parameter_info` and `history_fields` items will only be present when +the `extended` parameter is set to true. + +```json +{ + "id": "sensor1", + "friendly_name": "Sensor 1", + "type": "mqtt", + "values": { + "value1": 0.0, + "value2": 120.0 + }, + "parameter_info": [ + { + "units": "kWh", + "name": "value1" + }, + { + "units": "V", + "name": "value2" + } + ], + "history_fields": [ + { + "field": "power_consumption", + "provider": "sensor sensor1", + "description": "Printer Power Consumption", + "strategy": "delta", + "units": "kWh", + "init_tracker": true, + "exclude_paused": false, + "report_total": true, + "report_maximum": true, + "precision": 6, + "parameter": "value1" + }, + { + "field": "max_voltage", + "provider": "sensor sensor1", + "description": "Maximum voltage", + "strategy": "maximum", + "units": "V", + "init_tracker": true, + "exclude_paused": false, + "report_total": false, + "report_maximum": false, + "precision": 6, + "parameter": "value2" + } + ] +} +``` + +#### Get Sensor Measurements +Returns all recorded measurements for a configured sensor. + +HTTP request: +```http +GET /server/sensors/measurements?sensor=sensor1 +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.sensors.measurements", + "params": { + "sensor": "sensor1" + }, + "id": 4564 +} +``` +Returns: + +An object containing all recorded measurements for the requested sensor: +```json +{ + "sensor1": { + "value1": [ + 3.1, + 3.2, + 3.0 + ], + "value2": [ + 120.0, + 120.0, + 119.9 + ] + } +} +``` + +#### Get Batch Sensor Measurements +Returns recorded measurements for all sensors. + +HTTP request: +```http +GET /server/sensors/measurements +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.sensors.measurements", + "id": 4564 +} +``` +Returns: + +An object containing all measurements for every configured sensor: +```json +{ + "sensor1": { + "value1": [ + 3.1, + 3.2, + 3.0 + ], + "value2": [ + 120.0, + 120.0, + 119.9 + ] + }, + "sensor2": { + "value_a": [ + 1, + 1, + 0 + ] + } +} +``` + +### Spoolman APIs +The following APIs are available to interact with the Spoolman integration: + +#### Get Spoolman Status +Returns the current status of the spoolman module. + +HTTP request: +```http +GET /server/spoolman/status +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.spoolman.status", + "id": 4654 +} +``` + +Returns: + +An object containing details about the current status: + +```json +{ + "spoolman_connected": false, + "pending_reports": [ + { + "spool_id": 1, + "filament_used": 10 + } + ], + "spool_id": 2 +} +``` + +- `spoolman_connected`: A boolean indicating if Moonraker is connected to + Spoolman. When `false` Spoolman is unavailable. +- `pending_reports`: A list of objects containing spool data that has + yet to be reported to Spoolman. +- `spool_id`: The current Spool ID. Can be an integer value or `null`. + +#### Set active spool +Set the ID of the spool that Moonraker should report usage to Spoolman of. + +HTTP request: +```http +POST /server/spoolman/spool_id +Content-Type: application/json + +{ + "spool_id": 1 +} +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.spoolman.post_spool_id", + "params": { + "spool_id": 1 + }, + "id": 4654 +} +``` + +Returns: + +The id of the now active spool: + +```json +{ + "spool_id": 1 +} +``` + +!!! note + Send an empty object, `{}`, to un-set the spool ID and stop any reporting. + The response `spool_id` will then be set to *null* + +#### Get active spool +Retrieve the ID of the spool to which Moonraker reports usage for Spoolman. + +HTTP request: +```http +GET /server/spoolman/spool_id +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.spoolman.get_spool_id", + "id": 4654 +} +``` + +Returns: + +The id of the active spool: + +```json +{ + "spool_id": 1 +} +``` + +!!! note + The `spool_id` can be *null* if there is no active spool. + +#### Proxy + +Moonraker supplies a proxy endpoint where you have full access to the Spoolman +API without having to configure the endpoint yourself. + +See Spoolman's [OpenAPI Description](https://donkie.github.io/Spoolman/) for +detailed information about it's API. + +HTTP request: +```http +POST /server/spoolman/proxy +Content-Type: application/json + +{ + "request_method": "POST", + "path": "/v1/spool", + "query": "a=1&b=4", + "body": { + "filament_id": 1 + } +} +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "server.spoolman.proxy", + "params": { + "use_v2_response": true, + "request_method": "POST", + "path": "/v1/spool", + "query": "a=1&b=4", + "body": { + "filament_id": 1 + } + }, + "id": 4654 +} +``` + +The following parameters are available. `request_method` and `path` are required, the rest are optional. + +- `request_method`: The HTTP request method, e.g. `GET`, `POST`, `DELETE`, etc.. +- `path`: The endpoint, including API version, e.g. `/v1/filament`. +- `query`: The query part of the URL, e.g. `filament_material=PLA&vendor_name=Prima`. +- `body`: The request body for the request. +- `use_v2_response`: Returns the spoolman response in version 2 format. + Default is false. + +!!! Note + The version 2 response has been added to eliminate ambiguity between + Spoolman errors and Moonraker errors. With version 1 a frontend + is not able to reliably to determine if the error is sourced from + Spoolman or Moonraker. Version 2 responses will return success + unless Moonraker is the source of the error. + + The version 2 response is currently opt-in to avoid breaking + existing implementations, however in the future it will be + required, at which point the version 1 response will be removed. + The version 1 response is now deprecated. + +Returns: + +- Version 1 + +> The json response from the Spoolman server. Errors are proxied directly. +For example, if a request returns 404, Moonraker will return a 404 error +or the JSON-RPC equivalent of -32601, Method Not Found. + +- Version 2 + +> Returns the spoolman response wrapped in an object. The object contains +two fields, `error` and `response`. A successful request will place the +returned value in the `response` field and `error` will be `null.` When +Spoolman returns an error the `response` field will be `null` and the +`error` field will contain details about the error. +```json +{ + "response": { + "id": 2, + "registered": "2023-11-23T12:18:31Z", + "first_used": "2023-11-22T12:17:56.123000Z", + "last_used": "2023-11-23T10:17:59.900000Z", + "filament": { + "id": 2, + "registered": "2023-11-23T12:17:44Z", + "name": "Reactor Red", + "vendor": { + "id": 2, + "registered": "2023-06-26T21:00:42Z", + "name": "Fusion" + }, + "material": "PLA", + "price": 25, + "density": 1.24, + "diameter": 1.75, + "weight": 1000, + "color_hex": "BD0B0B" + }, + "remaining_weight": 950, + "used_weight": 50, + "remaining_length": 318519.4384459262, + "used_length": 16764.18097083822, + "archived": false + }, + "error": null +} +``` +> On Spoolman error: +```json +{ + "response": null, + "error": { + "status_code": 404, + "message": "No spool with ID 3 found." + } +} +``` + + ### OctoPrint API emulation Partial support of OctoPrint API is implemented with the purpose of allowing uploading of sliced prints to a moonraker instance. @@ -4209,7 +6509,7 @@ An object containing stubbed OctoPrint login/user verification "admin": true, "apikey": null, "permissions": [], - "groups": ["admins", "users"], + "groups": ["admins", "users"] } ``` @@ -4390,7 +6690,7 @@ JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"server.history.list", + "method": "server.history.list", "params":{ "limit": 50, "start": 10, @@ -4429,8 +6729,62 @@ An array of requested historical jobs: "print_duration": 18.37201827496756, "status": "completed", "start_time": 1615764496.622146, - "total_duration": 18.37201827496756 - }, + "total_duration": 18.37201827496756, + "user": "testuser", + "auxiliary_data": [ + { + "provider": "sensor hist_test", + "name": "power_consumption", + "value": 4.119977, + "description": "Printer Power Consumption", + "units": "kWh" + }, + { + "provider": "sensor hist_test", + "name": "max_current", + "value": 2.768851, + "description": "Maximum current draw", + "units": "A" + }, + { + "provider": "sensor hist_test", + "name": "min_current", + "value": 0.426725, + "description": "Minmum current draw", + "units": "A" + }, + { + "provider": "sensor hist_test", + "name": "avg_current", + "value": 1.706872, + "description": "Average current draw", + "units": "A" + }, + { + "provider": "sensor hist_test", + "name": "status", + "value": 2, + "description": "Power Switch Status", + "units": null + }, + { + "provider": "sensor hist_test", + "name": "filament", + "value": 19.08058495194607, + "description": "filament usage tracker", + "units": "mm" + }, + { + "provider": "spoolman", + "name": "spool_ids", + "value": [ + 1 + ], + "description": "Spool IDs used", + "units": null + } + ] + } ] } ``` @@ -4444,7 +6798,7 @@ JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"server.history.totals", + "method": "server.history.totals", "id": 5656 } ``` @@ -4461,7 +6815,27 @@ An object containing the following total job statistics: "total_filament_used": 11615.718840001999, "longest_job": 11665.191012736992, "longest_print": 11348.794790096988 - } + }, + "auxiliary_totals": [ + { + "provider": "sensor hist_test", + "field": "power_consumption", + "maximum": 4.119977, + "total": 4.119977 + }, + { + "provider": "sensor hist_test", + "field": "avg_current", + "maximum": 1.706872, + "total": null + }, + { + "provider": "sensor hist_test", + "field": "filament", + "maximum": 19.08058495194607, + "total": 19.08058495194607 + } + ] } ``` @@ -4479,6 +6853,7 @@ JSON-RPC request: "method": "server.history.reset_totals", "id": 5534 } +``` Returns: @@ -4493,7 +6868,27 @@ The totals prior to the reset: "total_filament_used": 11615.718840001999, "longest_job": 11665.191012736992, "longest_print": 11348.794790096988 - } + }, + "last_auxiliary_totals": [ + { + "provider": "sensor hist_test", + "field": "power_consumption", + "maximum": 4.119977, + "total": 4.119977 + }, + { + "provider": "sensor hist_test", + "field": "avg_current", + "maximum": 1.706872, + "total": null + }, + { + "provider": "sensor hist_test", + "field": "filament", + "maximum": 19.08058495194607, + "total": 19.08058495194607 + } + ] } ``` @@ -4506,7 +6901,7 @@ JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"server.history.get_job", + "method": "server.history.get_job", "params":{"uid": "{uid}"}, "id": 4564 } @@ -4591,7 +6986,7 @@ JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"server.mqtt.publish", + "method": "server.mqtt.publish", "params":{ "topic": "home/test/pub", "payload": "hello", @@ -4653,7 +7048,7 @@ JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"server.mqtt.subscribe", + "method": "server.mqtt.subscribe", "params":{ "topic": "home/test/sub", "qos": 0, @@ -4711,7 +7106,7 @@ JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"server.extensions.list", + "method": "server.extensions.list", "id": 4564 } ``` @@ -4755,7 +7150,7 @@ JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"server.extensions.request", + "method": "server.extensions.request", "params":{ "agent": "moonagent", "method": "moontest.hello_world", @@ -4791,7 +7186,7 @@ JSON-RPC request: ```json { "jsonrpc": "2.0", - "method":"connection.send_event", + "method": "connection.send_event", "params":{ "event": "my_event", "data": {"my_arg": "optional data"} @@ -4817,6 +7212,377 @@ Returns: returned. Once received, Moonraker will broadcast this event via the [agent event notification](#agent-events) to all other connections. +#### Register a method with Klipper + +Allows agents to register remote methods with Klipper. These methods +may be called in `gcode_macros`. + +!!! Note + This API is only available to websocket connections that have + identified themselves as an `agent` type. + +HTTP Request: Not Available + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "connection.register_remote_method", + "params": { + "method_name": "firemon_alert_heated" + } +} +``` + +Parameters: + +- `method_name`: The name of the desired method. Agents should make sure that + the name is unique. One recommendation is to prefix the agent's name + to each method it registers. + +Returns: + +`ok` if registration is successful. An error is returned if the method name +is already registered. + +!!! Note + Methods registered by agents will persist until the agent disconnects. + Upon connection, it is only necessary that they register their desired + methods once. + +Example: + +Presume an application named `firemon` has connected to Moonraker's websocket +and identified itself as an `agent`. After identification it registers a +remote method named `firemon_alert_heated`. + +In addition, the user the following `gcode_macro` configured in `printer.cfg`: + +```ini +# printer.cfg + +[gcode_macro ALERT_HEATED] +gcode: + {% if not params %} + {action_call_remote_method("firemon_alert_heated")} + {% else %} + {% set htr = params.HEATER|default("unknown") %} + {% set tmp = params.TEMP|default(0)|float %} + {action_call_remote_method( + "firemon_alert_heated", heater=htr, temp=tmp)} + {% endif %} + + +``` + +When the `ALERT_HEATED HEATER=extruder TEMP=200` gcode is executed by Klipper, +the agent will receive the following: + +```json +{ + "jsonrpc": "2.0", + "method": "firemon_alert_heated", + "params": { + "heater": "extruder", + "temp": 200 + } +} +``` + +When the `ALERT_HEATED` gcode is executed with no parameters, the agent will +receive the following: + +```json +{ + "jsonrpc": "2.0", + "method": "monitor_alert_heated" +} +``` + +!!! Note + Methods called from Klipper never contain the "id" field, as Klipper + does not accept return values to remote methods. + +### Debug APIs + +The APIs in this section are available when Moonraker the debug argument +(`-g`) has been supplied via the command line. Some APIs may also depend +on Moonraker's configuration, ie: an optional component may choose to +register a debug API. + +!!! Warning + Debug APIs may expose security vulnerabilities. They should only be + enabled by developers on secured machines. + +#### List Database Info (debug) + +Debug version of [List Database Info](#list-database-info). Returns +all namespaces, including those exlusively reserved for Moonraker. +In addition, registered SQL tables are reported. + + +HTTP request: +```http +GET /debug/database/list +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "debug.database.list", + "id": 8694 +} +``` + +Returns: + +An object containing an array of namespaces, an array of tables, and +an array of backup files. +```json +{ + "namespaces": [ + "gcode_metadata", + "webcams", + "update_manager", + "announcements", + "database", + "moonraker" + ], + "backups": [ + "sqldb-backup-20240513-134542.db", + "testbackup.db", + "testbackup2.db" + ], + "tables": [ + "job_history", + "job_totals", + "namespace_store", + "table_registry", + "authorized_users" + ] +} +``` + +#### Get Database Item (debug) + +Debug version of [Get Database Item](#get-database-item). Keys within +protected and forbidden namespaces are accessible. + +!!! Warning + Moonraker's forbidden namespaces may include items such as user credentials. + This endpoint should NOT be implemented in front ends directly. + +HTTP request: +```http +GET /debug/database/item?namespace={namespace}&key={key} +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "debug.database.get_item", + "params": { + "namespace": "{namespace}", + "key": "{key}" + }, + "id": 5644 +} +``` + +#### Add Database Item (debug) + +Debug version of [Add Database Item](#add-database-item). Keys within +protected and forbidden namespaces may be added. + +!!! Warning + This endpoint should be used for testing/debugging purposes only. + Modifying protected namespaces outside of Moonraker can result in + broken functionality and is not supported for production environments. + Issues opened with reports/queries related to this endpoint will be + redirected to this documentation and closed. + +```http +POST /debug/database/item +Content-Type: application/json + +{ + "namespace": "my_client", + "key": "settings.some_count", + "value": 100 +} +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "debug.database.post_item", + "params": { + "namespace": "{namespace}", + "key": "{key}", + "value": 100 + }, + "id": 4654 +} +``` + +#### Delete Database Item (debug) + +Debug version of [Delete Database Item](#delete-database-item). Keys within +protected and forbidden namespaces may be removed. + +!!! Warning + This endpoint should be used for testing/debugging purposes only. + Modifying protected namespaces outside of Moonraker can result in + broken functionality and is not supported for production environments. + Issues opened with reports/queries related to this endpoint will be + redirected to this documentation and closed. + +HTTP request: +```http +DELETE /debug/database/item?namespace={namespace}&key={key} +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "debug.database.delete_item", + "params": { + "namespace": "{namespace}", + "key": "{key}" + }, + "id": 4654 +} +``` + +#### Get Database Table + +Requests all the contents of a specified table. + +HTTP request: +```http +GET /debug/database/table?table=job_history +``` + +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "debug.database.table", + "params": { + "table": "job_history" + }, + "id": 4654 +} +``` + +Parameters: + +- `table`: The name of the table to request. This parameter must + be provided. + +Returns: + +An object with the table's name and a list of all rows contained +within the table. The `rowid` will always be included for each +row, however it may be represented by an alias. In the example +below the alias for `rowid` is `job_id`. + +```json +{ + "table_name": "job_history", + "rows": [ + { + "job_id": 1, + "user": "No User", + "filename": "active_test.gcode", + "status": "completed", + "start_time": 1690749153.2661753, + "end_time": 1690749173.076986, + "print_duration": 0.0, + "total_duration": 19.975574419135228, + "filament_used": 0.0, + "metadata": { + "size": 211, + "modified": 1635771217.0, + "uuid": "627371e0-faa5-4ced-8bb4-7017d29226fa", + "slicer": "Unknown", + "gcode_start_byte": 8, + "gcode_end_byte": 211 + }, + "auxiliary_data": [], + "instance_id": "default" + }, + { + "job_id": 2, + "user": "No User", + "filename": "active_test.gcode", + "status": "completed", + "start_time": 1701262034.9242446, + "end_time": 1701262054.7332363, + "print_duration": 0.0, + "total_duration": 19.990913168992847, + "filament_used": 0.0, + "metadata": { + "size": 211, + "modified": 1635771217.0, + "uuid": "627371e0-faa5-4ced-8bb4-7017d29226fa", + "slicer": "Unknown", + "gcode_start_byte": 8, + "gcode_end_byte": 211 + }, + "auxiliary_data": { + "spool_ids": [ + 2 + ] + }, + "instance_id": "default" + } + ] +} +``` + +#### Test a notifier (debug) + +You can trigger a notifier manually using this endpoint. + +HTTP request: +```http +POST /debug/notifiers/test?name=notifier_name +``` +JSON-RPC request: +```json +{ + "jsonrpc": "2.0", + "method": "debug.notifiers.test", + "params": { + "name": "notifier_name" + }, + "id": 4654 +} +``` + +Parameters: + +- `name`: The name of the notifier to test. + +Returns: Test results in the following format + +```json +{ + "status": "success", + "stats": { + "print_duration": 0.0, + "total_duration": 0.0, + "filament_used": 0.0, + "filename": "notifier_test.gcode", + "state": "standby", + "message": "" + } +} +``` + ### Websocket notifications Printer generated events are sent over the websocket as JSON-RPC 2.0 notifications. These notifications are sent to all connected clients @@ -4908,22 +7674,26 @@ to alert all connected clients of the change: { "action": "{action}", "item": { - "path": "{file or directory path}", + "path": "{file or directory path relative to root}", "root": "{root}", "size": 46458, - "modified": 545465 + "modified": 545465, + "permissions": "rw" }, "source_item": { - "path": "{file or directory path}", + "path": "{file or directory path relative to root}", "root": "{root_name}" } } ] } ``` -The `source_item` field is only present for `move_item` and -`copy_item` actions. The `action` field will be set -to one of the following values: + +!!! Note + The `source_item` field is only present for `move_file` and + `move_dir` actions. + +The `action` field will be set to one of the following values: - `create_file` - `create_dir` @@ -4935,9 +7705,16 @@ to one of the following values: - `root_update` Most of the above actions are self explanatory. The `root_update` -notification is sent when a `root` folder has changed its location, -for example when a user configures a different gcode file path -in Klipper. +notification is sent when a `root` folder has changed its location. +This should be a rare event as folders are now managed in using the +data folder structure. + +Notifications are bundled where applicable. For example, when a +directory containing children is deleted a single `delete_dir` notification +is pushed. Likewise, when a directory is moved or copied, a single +`move_dir` or `create_dir` notification is pushed. Children that are +moved, copied, or deleted as a result of a parent's action will +not receive individual notifications. #### Update Manager Response The update manager will send asynchronous messages to the client during an @@ -5092,6 +7869,21 @@ sent when an existing user is deleted. } ``` +#### Authorized User Logged Out +If the `[authorization]` module is enabled the following notification is +sent when an existing user is logged out. +```json +{ + "jsonrpc": "2.0", + "method": "notify_user_logged_out", + "params": [ + { + "username": "" + } + ] +} +``` + #### Service State Changed Moonraker monitors the state of systemd services it is authorized to track. When the state of a service changes the following notification is sent: @@ -5285,6 +8077,124 @@ a specified `wake_time` for a dismissed announcement has expired. The `params` array will contain an object with the `entry_id` of the announcement that is no longer dismissed. +#### Sudo alert event +Moonraker will emit the `notify_sudo_alert` notification when +a component has requested sudo access. The event is also emitted +when a sudo request has been granted. + +```json +{ + "jsonrpc": "2.0", + "method": "notify_sudo_alert", + "params": [ + { + "sudo_requested": true, + "sudo_messages": [ + "Sudo password required to update Moonraker's systemd service." + ] + } + ] +} +``` + +The `params` array contains an object with the following fields: + +- `sudo_requested`: Returns true if Moonraker is currently requesting + sudo access. +- `request_messages`: An array of strings, each string describing + a pending sudo request. The array will be empty if no sudo + requests are pending. + +#### Webcams changed event + +Moonraker will emit the `notify_webcams_changed` event when a configured +webcam is added, removed, or updated. + +```json +{ + "jsonrpc": "2.0", + "method": "notify_webcams_changed", + "params": [ + { + "webcams": [ + { + "name": "tc2", + "location": "printer", + "service": "mjpegstreamer", + "enabled": true, + "icon": "mdiWebcam", + "target_fps": 15, + "target_fps_idle": 5, + "stream_url": "http://printer.lan/webcam?action=stream", + "snapshot_url": "http://printer.lan/webcam?action=snapshot", + "flip_horizontal": false, + "flip_vertical": false, + "rotation": 0, + "aspect_ratio": "4:3", + "extra_data": {}, + "source": "database" + }, + { + "name": "TestCam", + "location": "printer", + "service": "mjpegstreamer", + "enabled": true, + "icon": "mdiWebcam", + "target_fps": 15, + "target_fps_idle": 5, + "stream_url": "/webcam/?action=stream", + "snapshot_url": "/webcam/?action=snapshot", + "flip_horizontal": false, + "flip_vertical": false, + "rotation": 0, + "aspect_ratio": "4:3", + "extra_data": {}, + "source": "database" + } + ] + } + ] +} +``` + +The `webcams` field contans an array of objects like those returned by the +[list webcams](#list-webcams) API. + +#### Spoolman active spool ID changed + +Moonraker will emit the `notify_active_spool_set` event when the active spool +ID for the Spoolman integration has been changed. + +See the [Spoolman API](#spoolman-apis) for more information. + +```json +{ + "jsonrpc": "2.0", + "method": "notify_active_spool_set", + "params": [ + { + "spool_id": 1 + } + ] +} +``` + +#### Spoolman Status Changed + +Moonraker will emit the `notify_spoolman_status_changed` event when the +connection state to the Spoolman service has changed: + +```json +{ + "jsonrpc": "2.0", + "method": "notify_spoolman_status_changed", + "params": [ + { + "spoolman_connected": false + } + ] +} +``` #### Agent Events Moonraker will emit the `notify_agent_event` notification when it @@ -5315,6 +8225,31 @@ disconnects clients will receive a `disconnected` event with the data field omitted. All other events are determined by the agent, where each event may or may not include optional `data`. +#### Sensor Events + +Moonraker will emit a `sensors:sensor_update` notification when a measurement +from at least one monitored sensor changes. + +```json +{ + "jsonrpc": "2.0", + "method": "sensors:sensor_update", + "params": [ + { + "sensor1": { + "humidity": 28.9, + "temperature": 22.4 + } + } + ] +} +``` + +When a sensor reading changes, all connections will receive a +`sensors:sensor_update` event where the params contains a data struct +with the sensor id as the key and the sensors letest measurements as value +struct. + ### Appendix #### Websocket setup @@ -5741,7 +8676,7 @@ each entry is an object containing the following fields: the announcement. When a client first connects to Moonraker it is recommended that the -[list announcements](#list-announcements) API is called to retreive +[list announcements](#list-announcements) API is called to retrieve the current list of entries. A client may then watch for the [announcement update](#announcement-update-event) and [announcement dismissed](#announcement-dismissed-event) notifications diff --git a/mkdocs.yml b/mkdocs.yml index 309db2e..b3646e3 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -2,24 +2,116 @@ site_name: Moonraker site_url: https://moonraker.readthedocs.io repo_url: https://github.com/Arksine/moonraker nav: - - 'User Documentation': - - Installation: installation.md - - Configuration : configuration.md - - User Changes: user_changes.md - - 'Client Developers': - - Client API: web_api.md + - Installation: installation.md + - Configuration : configuration.md + - 'Developer Documentation': + - Remote API: web_api.md - Printer Objects: printer_objects.md - - API Changes: api_changes.md - - 'Backend Developers': - - Contributing: contributing.md - Components: components.md + - Contribution Guidelines: contributing.md + - Changelog: changelog.md theme: - name: readthedocs + name: material + palette: + - scheme: default + primary: blue grey + accent: light blue + toggle: + icon: material/weather-sunny + name: Switch to Dark Mode + - scheme: slate + primary: black + accent: light blue + toggle: + icon: material/weather-night + name: Switch to Light Mode + font: + text: Roboto + code: Roboto Mono + features: + - navigation.top + - navigation.instant + - navigation.indexes + - navigation.expand + - toc.follow + - content.tabs.link + - search.share + - search.highlight + - search.suggest + - content.code.copy + - content.code.annotations plugins: - search markdown_extensions: + - abbr - admonition - - pymdownx.superfences - - pymdownx.highlight: - use_pygments: false + - attr_list + - def_list + - footnotes + - md_in_html + - toc: + permalink: true + - pymdownx.arithmatex: + generic: true + - pymdownx.betterem: + smart_enable: all + - pymdownx.caret + - pymdownx.details + - pymdownx.emoji: + emoji_index: !!python/name:materialx.emoji.twemoji + emoji_generator: !!python/name:materialx.emoji.to_svg + - pymdownx.highlight - pymdownx.inlinehilite + - pymdownx.keys + - pymdownx.mark + - pymdownx.smartsymbols + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.tilde + - pymdownx.blocks.details: + types: + - name: details-new + class: new + - name: details-settings + class: settings + - name: details-note + class: note + - name: details-abstract + class: abstract + - name: details-info + class: info + - name: details-tip + class: tip + - name: details-success + class: success + - name: details-question + class: question + - name: details-warning + class: warning + - name: details-failure + class: failure + - name: details-danger + class: danger + - name: details-bug + class: bug + - name: details-example + class: example + - name: details-quote + class: quote + - name: api-example-response + class: example + title: "Example Response" + - name: api-response-schema + class: info + title: "Response Schema" + - name: api-parameters + class: info + title: "Parameters" + - tables + - compact_tables: + auto_insert_break: true +extra_css: + - src/css/extras.css \ No newline at end of file diff --git a/moonraker/__init__.py b/moonraker/__init__.py new file mode 100644 index 0000000..c055a26 --- /dev/null +++ b/moonraker/__init__.py @@ -0,0 +1,5 @@ +# Top level package definition for Moonraker +# +# Copyright (C) 2022 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license diff --git a/moonraker/__main__.py b/moonraker/__main__.py new file mode 100644 index 0000000..25e6321 --- /dev/null +++ b/moonraker/__main__.py @@ -0,0 +1,9 @@ +# Package entry point for Moonraker +# +# Copyright (C) 2022 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from .server import main + +main() diff --git a/moonraker/assets/__init__.py b/moonraker/assets/__init__.py new file mode 100644 index 0000000..d2c26f8 --- /dev/null +++ b/moonraker/assets/__init__.py @@ -0,0 +1 @@ +# Assets Package Definition diff --git a/moonraker/assets/default_allowed_services b/moonraker/assets/default_allowed_services new file mode 100644 index 0000000..c8a1af5 --- /dev/null +++ b/moonraker/assets/default_allowed_services @@ -0,0 +1,10 @@ +klipper_mcu +webcamd +MoonCord +KlipperScreen +moonraker-telegram-bot +moonraker-obico +sonar +crowsnest +octoeverywhere +ratos-configurator diff --git a/moonraker/assets/welcome.html b/moonraker/assets/welcome.html index 102afc9..407d13b 100644 --- a/moonraker/assets/welcome.html +++ b/moonraker/assets/welcome.html @@ -123,6 +123,137 @@ background-color: rgb(160, 64, 8); } } + + .modal { + display: none; + position: fixed; + z-index: 1; + left: 0; + top: 0; + width: 100%; + height: 100%; + overflow: auto; + background-color: rgb(0,0,0); + background-color: rgba(0,0,0,0.4); + } + + .modal-card { + background: none; + position: relative; + border: 0px; + border-radius: 1rem; + background-color: #1a1a1a; + margin: 20% auto 2rem auto; + padding: 0rem; + border: 0px; + width: 50%; + animation-name: fadein; + animation-duration: .5s; + } + + .modal-card h1 { + background-color: #006f7e; + text-align: center; + line-height: 3rem; + font-size: 1.1rem; + height: 3rem; + margin: 0; + border-top-left-radius: 1rem; + border-top-right-radius: 1rem; + } + + .modal-content { + background-color: #3e3e3e; + padding: 1rem; + margin: 0; + height: auto + } + + .modal-content .entry { + display: inline-block; + width: 100%; + } + .modal-content .entry:not(:last-child) { + margin-bottom: .5rem; + } + .modal-content .value { + float: right; + display: inline; + } + .modal-content input { + width: 100%; + padding: 8px; + border-radius: 4px; + -moz-border-radius: 4px; + -webkit-border-radius: 4px; + font-size: 1rem; color: #222; + background: #F7F7F7; + + } + + .modal-footer { + display: inline-block; + background-color: #3e3e3e; + margin: 0; + height: auto; + width: 100%; + border-bottom-left-radius: 1rem; + border-bottom-right-radius: 1rem; + } + + .modal-button { + float: right; + background: #cecece; + border: none; + width: auto; + overflow: visible; + font-size: 1rem; + font-weight: bold; + color: rgb(0, 0, 0); + padding: .4rem .5rem; + margin: 0rem .5rem .5rem 0rem; + border-radius: .5rem; + -webkit-border-radius: .5rem; + -moz-border-radius: .5rem; + } + + .modal-button:hover { + color: rgb(8, 154, 45); + text-decoration: none; + cursor: pointer; + } + + .modal-status { + display: none; + position: relative; + border: 0; + border-radius: 1rem; + background-color: #3e3e3e; + margin: auto; + padding: 0rem; + width: 50%; + animation-name: fadebottom; + animation-duration: .5s; + } + + .modal-status:hover { + cursor: pointer; + } + + .modal-status .content { + display: inline-block; + margin: 1rem; + } + + @keyframes fadebottom { + from {top: 10em; opacity: 0} + to {top: 0em; opacity: 1} + } + + @keyframes fadein { + from {opacity: 0} + to {opacity: 1} + } - {% end %} + {% endfor %} - {% end %} + {% endif %} {% if warnings %}

Warnings

{% for warn in warnings %}
{{ warn }}
- {% end %} + {% endfor %}
- {% end %} + {% endif %} + + diff --git a/moonraker/common.py b/moonraker/common.py new file mode 100644 index 0000000..b467b98 --- /dev/null +++ b/moonraker/common.py @@ -0,0 +1,1302 @@ +# Common classes used throughout Moonraker +# +# Copyright (C) 2023 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import sys +import logging +import copy +import re +import inspect +import dataclasses +import time +from enum import Enum, Flag, auto +from abc import ABCMeta, abstractmethod +from .utils import ServerError, Sentinel +from .utils import json_wrapper as jsonw + +# Annotation imports +from typing import ( + TYPE_CHECKING, + Any, + Optional, + Callable, + Coroutine, + Type, + TypeVar, + Union, + Dict, + List, + Awaitable, + ClassVar, + Tuple, + Generic +) + +if TYPE_CHECKING: + from .server import Server + from .components.websockets import WebsocketManager + from .components.authorization import Authorization + from .components.history import History + from .components.database import DBProviderWrapper + from .utils import IPAddress + from asyncio import Future + _C = TypeVar("_C", str, bool, float, int) + _F = TypeVar("_F", bound="ExtendedFlag") + ConvType = Union[str, bool, float, int] + ArgVal = Union[None, int, float, bool, str] + RPCCallback = Callable[..., Coroutine] + AuthComp = Optional[Authorization] + +_T = TypeVar("_T") +ENDPOINT_PREFIXES = ["printer", "server", "machine", "access", "api", "debug"] + +class ExtendedFlag(Flag): + @classmethod + def from_string(cls: Type[_F], flag_name: str) -> _F: + str_name = flag_name.upper() + for name, member in cls.__members__.items(): + if name == str_name: + return cls(member.value) + raise ValueError(f"No flag member named {flag_name}") + + @classmethod + def from_string_list(cls: Type[_F], flag_list: List[str]) -> _F: + ret = cls(0) + for flag in flag_list: + flag = flag.upper() + ret |= cls.from_string(flag) + return ret + + @classmethod + def all(cls: Type[_F]) -> _F: + return ~cls(0) + + if sys.version_info < (3, 11): + def __len__(self) -> int: + return bin(self._value_).count("1") + + def __iter__(self): + for i in range(self._value_.bit_length()): + val = 1 << i + if val & self._value_ == val: + yield self.__class__(val) + +class RequestType(ExtendedFlag): + """ + The Request Type is also known as the "Request Method" for + HTTP/REST APIs. The use of "Request Method" nomenclature + is discouraged in Moonraker as it could be confused with + the JSON-RPC "method" field. + """ + GET = auto() + POST = auto() + DELETE = auto() + +class TransportType(ExtendedFlag): + HTTP = auto() + WEBSOCKET = auto() + MQTT = auto() + INTERNAL = auto() + +class ExtendedEnum(Enum): + @classmethod + def from_string(cls, enum_name: str): + str_name = enum_name.upper() + for name, member in cls.__members__.items(): + if name == str_name: + return cls(member.value) + raise ValueError(f"No enum member named {enum_name}") + + def __str__(self) -> str: + return self._name_.lower() # type: ignore + +class JobEvent(ExtendedEnum): + STANDBY = 1 + STARTED = 2 + PAUSED = 3 + RESUMED = 4 + COMPLETE = 5 + ERROR = 6 + CANCELLED = 7 + + @property + def finished(self) -> bool: + return self.value >= 5 + + @property + def aborted(self) -> bool: + return self.value >= 6 + + @property + def is_printing(self) -> bool: + return self.value in [2, 4] + +class KlippyState(ExtendedEnum): + DISCONNECTED = 1 + STARTUP = 2 + READY = 3 + ERROR = 4 + SHUTDOWN = 5 + + @classmethod + def from_string(cls, enum_name: str, msg: str = ""): + str_name = enum_name.upper() + for name, member in cls.__members__.items(): + if name == str_name: + instance = cls(member.value) + if msg: + instance.set_message(msg) + return instance + raise ValueError(f"No enum member named {enum_name}") + + + def set_message(self, msg: str) -> None: + self._state_message: str = msg + + @property + def message(self) -> str: + if hasattr(self, "_state_message"): + return self._state_message + return "" + + def startup_complete(self) -> bool: + return self.value > 2 + +class RenderableTemplate(metaclass=ABCMeta): + @abstractmethod + def __str__(self) -> str: + ... + + @abstractmethod + def render(self, context: Dict[str, Any] = {}) -> str: + ... + + @abstractmethod + async def render_async(self, context: Dict[str, Any] = {}) -> str: + ... + +@dataclasses.dataclass +class UserInfo: + username: str + password: str + created_on: float = dataclasses.field(default_factory=time.time) + salt: str = "" + source: str = "moonraker" + jwt_secret: Optional[str] = None + jwk_id: Optional[str] = None + groups: List[str] = dataclasses.field(default_factory=lambda: ["admin"]) + + def as_tuple(self) -> Tuple[Any, ...]: + return dataclasses.astuple(self) + + def as_dict(self) -> Dict[str, Any]: + return dataclasses.asdict(self) + +@dataclasses.dataclass(frozen=True) +class APIDefinition: + endpoint: str + http_path: str + rpc_methods: List[str] + request_types: RequestType + transports: TransportType + callback: Callable[[WebRequest], Coroutine] + auth_required: bool + _cache: ClassVar[Dict[str, APIDefinition]] = {} + + def __str__(self) -> str: + tprt_str = "|".join([tprt.name for tprt in self.transports if tprt.name]) + val: str = f"(Transports: {tprt_str})" + if TransportType.HTTP in self.transports: + req_types = "|".join([rt.name for rt in self.request_types if rt.name]) + val += f" (HTTP Request: {req_types} {self.http_path})" + if self.rpc_methods: + methods = " ".join(self.rpc_methods) + val += f" (RPC Methods: {methods})" + val += f" (Auth Required: {self.auth_required})" + return val + + def request( + self, + args: Dict[str, Any], + request_type: RequestType, + transport: Optional[APITransport] = None, + ip_addr: Optional[IPAddress] = None, + user: Optional[UserInfo] = None + ) -> Coroutine: + return self.callback( + WebRequest(self.endpoint, args, request_type, transport, ip_addr, user) + ) + + @property + def need_object_parser(self) -> bool: + return self.endpoint.startswith("objects/") + + def rpc_items(self) -> zip[Tuple[RequestType, str]]: + return zip(self.request_types, self.rpc_methods) + + @classmethod + def create( + cls, + endpoint: str, + request_types: Union[List[str], RequestType], + callback: Callable[[WebRequest], Coroutine], + transports: Union[List[str], TransportType] = TransportType.all(), + auth_required: bool = True, + is_remote: bool = False + ) -> APIDefinition: + if isinstance(request_types, list): + request_types = RequestType.from_string_list(request_types) + if isinstance(transports, list): + transports = TransportType.from_string_list(transports) + if endpoint in cls._cache: + return cls._cache[endpoint] + http_path = f"/printer/{endpoint.strip('/')}" if is_remote else endpoint + prf_match = re.match(r"/([^/]+)", http_path) + if TransportType.HTTP in transports: + # Validate the first path segment for definitions that support the + # HTTP transport. We want to restrict components from registering + # using unknown paths. + if prf_match is None or prf_match.group(1) not in ENDPOINT_PREFIXES: + prefixes = [f"/{prefix} " for prefix in ENDPOINT_PREFIXES] + raise ServerError( + f"Invalid endpoint name '{endpoint}', must start with one of " + f"the following: {prefixes}" + ) + rpc_methods: List[str] = [] + if is_remote: + # Request Types have no meaning for remote requests. Therefore + # both GET and POST http requests are accepted. JRPC requests do + # not need an associated RequestType, so the unknown value is used. + request_types = RequestType.GET | RequestType.POST + rpc_methods.append(http_path[1:].replace('/', '.')) + elif transports != TransportType.HTTP: + name_parts = http_path[1:].split('/') + if len(request_types) > 1: + for rtype in request_types: + if rtype.name is None: + continue + func_name = rtype.name.lower() + "_" + name_parts[-1] + rpc_methods.append(".".join(name_parts[:-1] + [func_name])) + else: + rpc_methods.append(".".join(name_parts)) + if len(request_types) != len(rpc_methods): + raise ServerError( + "Invalid API definition. Number of websocket methods must " + "match the number of request methods" + ) + + api_def = cls( + endpoint, http_path, rpc_methods, request_types, + transports, callback, auth_required + ) + cls._cache[endpoint] = api_def + return api_def + + @classmethod + def pop_cached_def(cls, endpoint: str) -> Optional[APIDefinition]: + return cls._cache.pop(endpoint, None) + + @classmethod + def get_cache(cls) -> Dict[str, APIDefinition]: + return cls._cache + + @classmethod + def reset_cache(cls) -> None: + cls._cache.clear() + +class APITransport: + @property + def transport_type(self) -> TransportType: + return TransportType.INTERNAL + + @property + def user_info(self) -> Optional[UserInfo]: + return None + + @property + def ip_addr(self) -> Optional[IPAddress]: + return None + + def screen_rpc_request( + self, api_def: APIDefinition, req_type: RequestType, args: Dict[str, Any] + ) -> None: + return None + + def send_status( + self, status: Dict[str, Any], eventtime: float + ) -> None: + raise NotImplementedError + +class BaseRemoteConnection(APITransport): + def on_create(self, server: Server) -> None: + self.server = server + self.eventloop = server.get_event_loop() + self.wsm: WebsocketManager = self.server.lookup_component("websockets") + self.rpc: JsonRPC = self.server.lookup_component("jsonrpc") + self._uid = id(self) + self.is_closed: bool = False + self.queue_busy: bool = False + self.pending_responses: Dict[int, Future] = {} + self.message_buf: List[Union[bytes, str]] = [] + self._connected_time: float = 0. + self._identified: bool = False + self._client_data: Dict[str, str] = { + "name": "unknown", + "version": "", + "type": "", + "url": "" + } + self._need_auth: bool = False + self._user_info: Optional[UserInfo] = None + + @property + def user_info(self) -> Optional[UserInfo]: + return self._user_info + + @user_info.setter + def user_info(self, uinfo: UserInfo) -> None: + self._user_info = uinfo + self._need_auth = False + + @property + def need_auth(self) -> bool: + return self._need_auth + + @property + def uid(self) -> int: + return self._uid + + @property + def hostname(self) -> str: + return "" + + @property + def start_time(self) -> float: + return self._connected_time + + @property + def identified(self) -> bool: + return self._identified + + @property + def client_data(self) -> Dict[str, str]: + return self._client_data + + @client_data.setter + def client_data(self, data: Dict[str, str]) -> None: + self._client_data = data + self._identified = True + + @property + def transport_type(self) -> TransportType: + return TransportType.WEBSOCKET + + def screen_rpc_request( + self, api_def: APIDefinition, req_type: RequestType, args: Dict[str, Any] + ) -> None: + self.check_authenticated(api_def) + + async def _process_message(self, message: str) -> None: + try: + response = await self.rpc.dispatch(message, self) + if response is not None: + self.queue_message(response) + except Exception: + logging.exception("Websocket Command Error") + + def queue_message(self, message: Union[bytes, str, Dict[str, Any]]): + self.message_buf.append( + jsonw.dumps(message) if isinstance(message, dict) else message + ) + if self.queue_busy: + return + self.queue_busy = True + self.eventloop.register_callback(self._write_messages) + + def authenticate( + self, + token: Optional[str] = None, + api_key: Optional[str] = None + ) -> None: + auth: AuthComp = self.server.lookup_component("authorization", None) + if auth is None: + return + if token is not None: + self.user_info = auth.validate_jwt(token) + elif api_key is not None and self.user_info is None: + self.user_info = auth.validate_api_key(api_key) + elif self._need_auth: + raise self.server.error("Unauthorized", 401) + + def check_authenticated(self, api_def: APIDefinition) -> None: + if not self._need_auth: + return + auth: AuthComp = self.server.lookup_component("authorization", None) + if auth is None: + return + if api_def.auth_required: + raise self.server.error("Unauthorized", 401) + + def on_user_logout(self, user: str) -> bool: + if self._user_info is None: + return False + if user == self._user_info.username: + self._user_info = None + return True + return False + + async def _write_messages(self): + if self.is_closed: + self.message_buf = [] + self.queue_busy = False + return + while self.message_buf: + msg = self.message_buf.pop(0) + await self.write_to_socket(msg) + self.queue_busy = False + + async def write_to_socket(self, message: Union[bytes, str]) -> None: + raise NotImplementedError("Children must implement write_to_socket") + + def send_status(self, + status: Dict[str, Any], + eventtime: float + ) -> None: + if not status: + return + self.queue_message({ + 'jsonrpc': "2.0", + 'method': "notify_status_update", + 'params': [status, eventtime]}) + + def call_method_with_response( + self, + method: str, + params: Optional[Union[List, Dict[str, Any]]] = None, + ) -> Awaitable: + fut = self.eventloop.create_future() + msg: Dict[str, Any] = { + 'jsonrpc': "2.0", + 'method': method, + 'id': id(fut) + } + if params: + msg["params"] = params + self.pending_responses[id(fut)] = fut + self.queue_message(msg) + return fut + + def call_method( + self, + method: str, + params: Optional[Union[List, Dict[str, Any]]] = None + ) -> None: + msg: Dict[str, Any] = { + "jsonrpc": "2.0", + "method": method + } + if params: + msg["params"] = params + self.queue_message(msg) + + def send_notification(self, name: str, data: List) -> None: + self.wsm.notify_clients(name, data, [self._uid]) + + def resolve_pending_response( + self, response_id: int, result: Any + ) -> bool: + fut = self.pending_responses.pop(response_id, None) + if fut is None: + return False + if isinstance(result, ServerError): + fut.set_exception(result) + else: + fut.set_result(result) + return True + + def close_socket(self, code: int, reason: str) -> None: + raise NotImplementedError("Children must implement close_socket()") + + +class WebRequest: + def __init__( + self, + endpoint: str, + args: Dict[str, Any], + request_type: RequestType = RequestType(0), + transport: Optional[APITransport] = None, + ip_addr: Optional[IPAddress] = None, + user: Optional[UserInfo] = None + ) -> None: + self.endpoint = endpoint + self.args = args + self.transport = transport + self.request_type = request_type + self.ip_addr: Optional[IPAddress] = ip_addr + self.current_user = user + + def get_endpoint(self) -> str: + return self.endpoint + + def get_request_type(self) -> RequestType: + return self.request_type + + def get_action(self) -> str: + return self.request_type.name or "" + + def get_args(self) -> Dict[str, Any]: + return self.args + + def get_subscribable(self) -> Optional[APITransport]: + return self.transport + + def get_client_connection(self) -> Optional[BaseRemoteConnection]: + if isinstance(self.transport, BaseRemoteConnection): + return self.transport + return None + + def get_ip_address(self) -> Optional[IPAddress]: + return self.ip_addr + + def get_current_user(self) -> Optional[UserInfo]: + return self.current_user + + def _get_converted_arg(self, + key: str, + default: Union[Sentinel, _T], + dtype: Type[_C] + ) -> Union[_C, _T]: + if key not in self.args: + if default is Sentinel.MISSING: + raise ServerError(f"No data for argument: {key}") + return default + val = self.args[key] + try: + if dtype is not bool: + return dtype(val) + else: + if isinstance(val, str): + val = val.lower() + if val in ["true", "false"]: + return True if val == "true" else False # type: ignore + elif isinstance(val, bool): + return val # type: ignore + raise TypeError + except Exception: + raise ServerError( + f"Unable to convert argument [{key}] to {dtype}: " + f"value recieved: {val}") + + def get(self, + key: str, + default: Union[Sentinel, _T] = Sentinel.MISSING + ) -> Union[_T, Any]: + val = self.args.get(key, default) + if val is Sentinel.MISSING: + raise ServerError(f"No data for argument: {key}") + return val + + def get_str(self, + key: str, + default: Union[Sentinel, _T] = Sentinel.MISSING + ) -> Union[str, _T]: + return self._get_converted_arg(key, default, str) + + def get_int(self, + key: str, + default: Union[Sentinel, _T] = Sentinel.MISSING + ) -> Union[int, _T]: + return self._get_converted_arg(key, default, int) + + def get_float(self, + key: str, + default: Union[Sentinel, _T] = Sentinel.MISSING + ) -> Union[float, _T]: + return self._get_converted_arg(key, default, float) + + def get_boolean(self, + key: str, + default: Union[Sentinel, _T] = Sentinel.MISSING + ) -> Union[bool, _T]: + return self._get_converted_arg(key, default, bool) + + def _parse_list( + self, + key: str, + sep: str, + ltype: Type[_C], + count: Optional[int], + default: Union[Sentinel, _T] + ) -> Union[List[_C], _T]: + if key not in self.args: + if default is Sentinel.MISSING: + raise ServerError(f"No data for argument: {key}") + return default + value = self.args[key] + if isinstance(value, str): + try: + ret = [ltype(val.strip()) for val in value.split(sep) if val.strip()] + except Exception as e: + raise ServerError( + f"Invalid list format received for argument '{key}', " + "parsing failed." + ) from e + elif isinstance(value, list): + for val in value: + if not isinstance(val, ltype): + raise ServerError( + f"Invalid list format for argument '{key}', expected all " + f"values to be of type {ltype.__name__}." + ) + # List already parsed + ret = value + else: + raise ServerError( + f"Invalid value received for argument '{key}'. Expected List type, " + f"received {type(value).__name__}" + ) + if count is not None and len(ret) != count: + raise ServerError( + f"Invalid list received for argument '{key}', count mismatch. " + f"Expected {count} items, got {len(ret)}." + ) + return ret + + def get_list( + self, + key: str, + default: Union[Sentinel, _T] = Sentinel.MISSING, + sep: str = ",", + count: Optional[int] = None + ) -> Union[_T, List[str]]: + return self._parse_list(key, sep, str, count, default) + + +class JsonRPC: + def __init__(self, server: Server) -> None: + self.methods: Dict[str, Tuple[RequestType, APIDefinition]] = {} + self.sanitize_response = False + self.verbose = server.is_verbose_enabled() + + def _log_request(self, rpc_obj: Dict[str, Any], trtype: TransportType) -> None: + if not self.verbose: + return + self.sanitize_response = False + output = rpc_obj + method: Optional[str] = rpc_obj.get("method") + params: Dict[str, Any] = rpc_obj.get("params", {}) + if isinstance(method, str): + if ( + method.startswith("access.") or + method == "machine.sudo.password" + ): + self.sanitize_response = True + if params and isinstance(params, dict): + output = copy.deepcopy(rpc_obj) + output["params"] = {key: "" for key in params} + elif method == "server.connection.identify": + output = copy.deepcopy(rpc_obj) + for field in ["access_token", "api_key"]: + if field in params: + output["params"][field] = "" + logging.debug(f"{trtype} Received::{jsonw.dumps(output).decode()}") + + def _log_response( + self, resp_obj: Optional[Dict[str, Any]], trtype: TransportType + ) -> None: + if not self.verbose: + return + if resp_obj is None: + return + output = resp_obj + if self.sanitize_response and "result" in resp_obj: + output = copy.deepcopy(resp_obj) + output["result"] = "" + self.sanitize_response = False + logging.debug(f"{trtype} Response::{jsonw.dumps(output).decode()}") + + def register_method( + self, + name: str, + request_type: RequestType, + api_definition: APIDefinition + ) -> None: + self.methods[name] = (request_type, api_definition) + + def get_method(self, name: str) -> Optional[Tuple[RequestType, APIDefinition]]: + return self.methods.get(name, None) + + def remove_method(self, name: str) -> None: + self.methods.pop(name, None) + + async def dispatch( + self, + data: Union[str, bytes], + transport: APITransport + ) -> Optional[bytes]: + transport_type = transport.transport_type + try: + obj: Union[Dict[str, Any], List[dict]] = jsonw.loads(data) + except Exception: + if isinstance(data, bytes): + data = data.decode() + msg = f"{transport_type} data not valid json: {data}" + logging.exception(msg) + err = self.build_error(-32700, "Parse error") + return jsonw.dumps(err) + if isinstance(obj, list): + responses: List[Dict[str, Any]] = [] + for item in obj: + self._log_request(item, transport_type) + resp = await self.process_object(item, transport) + if resp is not None: + self._log_response(resp, transport_type) + responses.append(resp) + if responses: + return jsonw.dumps(responses) + else: + self._log_request(obj, transport_type) + response = await self.process_object(obj, transport) + if response is not None: + self._log_response(response, transport_type) + return jsonw.dumps(response) + return None + + async def process_object( + self, + obj: Dict[str, Any], + transport: APITransport + ) -> Optional[Dict[str, Any]]: + req_id: Optional[int] = obj.get('id', None) + rpc_version: str = obj.get('jsonrpc', "") + if rpc_version != "2.0": + return self.build_error(-32600, "Invalid Request", req_id) + method_name = obj.get('method', Sentinel.MISSING) + if method_name is Sentinel.MISSING: + self.process_response(obj, transport) + return None + if not isinstance(method_name, str): + return self.build_error( + -32600, "Invalid Request", req_id, method_name=str(method_name) + ) + method_info = self.methods.get(method_name, None) + if method_info is None: + return self.build_error( + -32601, "Method not found", req_id, method_name=method_name + ) + request_type, api_definition = method_info + transport_type = transport.transport_type + if transport_type not in api_definition.transports: + return self.build_error( + -32601, f"Method not found for transport {transport_type.name}", + req_id, method_name=method_name + ) + params: Dict[str, Any] = {} + if 'params' in obj: + params = obj['params'] + if not isinstance(params, dict): + return self.build_error( + -32602, "Invalid params:", req_id, method_name=method_name + ) + return await self.execute_method( + method_name, request_type, api_definition, req_id, transport, params + ) + + def process_response( + self, obj: Dict[str, Any], conn: APITransport + ) -> None: + if not isinstance(conn, BaseRemoteConnection): + logging.debug(f"RPC Response to non-socket request: {obj}") + return + response_id = obj.get("id") + if response_id is None: + logging.debug(f"RPC Response with null ID: {obj}") + return + result = obj.get("result") + if result is None: + name = conn.client_data["name"] + error = obj.get("error") + msg = f"Invalid Response: {obj}" + code = -32600 + if isinstance(error, dict): + msg = error.get("message", msg) + code = error.get("code", code) + msg = f"{name} rpc error: {code} {msg}" + ret = ServerError(msg, 418) + else: + ret = result + conn.resolve_pending_response(response_id, ret) + + async def execute_method( + self, + method_name: str, + request_type: RequestType, + api_definition: APIDefinition, + req_id: Optional[int], + transport: APITransport, + params: Dict[str, Any] + ) -> Optional[Dict[str, Any]]: + try: + transport.screen_rpc_request(api_definition, request_type, params) + result = await api_definition.request( + params, request_type, transport, transport.ip_addr, transport.user_info + ) + except TypeError as e: + return self.build_error( + -32602, f"Invalid params:\n{e}", req_id, True, method_name + ) + except ServerError as e: + code = e.status_code + if code == 404: + code = -32601 + elif code == 401: + code = -32602 + return self.build_error(code, str(e), req_id, True, method_name) + except Exception as e: + return self.build_error(-31000, str(e), req_id, True, method_name) + + if req_id is None: + return None + else: + return self.build_result(result, req_id) + + def build_result(self, result: Any, req_id: int) -> Dict[str, Any]: + return { + 'jsonrpc': "2.0", + 'result': result, + 'id': req_id + } + + def build_error( + self, + code: int, + msg: str, + req_id: Optional[int] = None, + is_exc: bool = False, + method_name: str = "" + ) -> Dict[str, Any]: + if method_name: + method_name = f"Requested Method: {method_name}, " + log_msg = f"JSON-RPC Request Error - {method_name}Code: {code}, Message: {msg}" + if is_exc and self.verbose: + logging.exception(log_msg) + else: + logging.info(log_msg) + return { + 'jsonrpc': "2.0", + 'error': {'code': code, 'message': msg}, + 'id': req_id + } + + +# *** Job History Common Clases *** + +class FieldTracker(Generic[_T]): + history: History = None # type: ignore + def __init__( + self, + value: _T = None, # type: ignore + reset_callback: Optional[Callable[[], _T]] = None, + exclude_paused: bool = False, + ) -> None: + self.tracked_value = value + self.exclude_paused = exclude_paused + self.reset_callback: Optional[Callable[[], _T]] = reset_callback + + def set_reset_callback(self, cb: Optional[Callable[[], _T]]) -> None: + self.reset_callback = cb + + def set_exclude_paused(self, exclude: bool) -> None: + self.exclude_paused = exclude + + def reset(self) -> None: + raise NotImplementedError() + + def update(self, value: _T) -> None: + raise NotImplementedError() + + def get_tracked_value(self) -> _T: + return self.tracked_value + + def has_totals(self) -> bool: + return False + + @classmethod + def class_init(cls, history: History) -> None: + cls.history = history + + +class BasicTracker(FieldTracker[Any]): + def __init__( + self, + value: Any = None, + reset_callback: Optional[Callable[[], Any]] = None, + exclude_paused: bool = False + ) -> None: + super().__init__(value, reset_callback, exclude_paused) + + def reset(self) -> None: + if self.reset_callback is not None: + self.tracked_value = self.reset_callback() + + def update(self, value: Any) -> None: + if self.history.tracking_enabled(self.exclude_paused): + self.tracked_value = value + + def has_totals(self) -> bool: + return isinstance(self.tracked_value, (int, float)) + + +class DeltaTracker(FieldTracker[Union[int, float]]): + def __init__( + self, + value: Union[int, float] = 0, + reset_callback: Optional[Callable[[], Union[float, int]]] = None, + exclude_paused: bool = False + ) -> None: + super().__init__(value, reset_callback, exclude_paused) + self.last_value: Union[float, int, None] = None + + def reset(self) -> None: + self.tracked_value = 0 + self.last_value = None + if self.reset_callback is not None: + self.last_value = self.reset_callback() + if not isinstance(self.last_value, (float, int)): + logging.info("DeltaTracker reset to invalid type") + self.last_value = None + + def update(self, value: Union[int, float]) -> None: + if not isinstance(value, (int, float)): + return + if self.history.tracking_enabled(self.exclude_paused): + if self.last_value is not None: + self.tracked_value += value - self.last_value + self.last_value = value + + def has_totals(self) -> bool: + return True + + +class CumulativeTracker(FieldTracker[Union[int, float]]): + def __init__( + self, + value: Union[int, float] = 0, + reset_callback: Optional[Callable[[], Union[float, int]]] = None, + exclude_paused: bool = False + ) -> None: + super().__init__(value, reset_callback, exclude_paused) + + def reset(self) -> None: + if self.reset_callback is not None: + self.tracked_value = self.reset_callback() + if not isinstance(self.tracked_value, (float, int)): + logging.info(f"{self.__class__.__name__} reset to invalid type") + self.tracked_value = 0 + else: + self.tracked_value = 0 + + def update(self, value: Union[int, float]) -> None: + if not isinstance(value, (int, float)): + return + if self.history.tracking_enabled(self.exclude_paused): + self.tracked_value += value + + def has_totals(self) -> bool: + return True + +class AveragingTracker(CumulativeTracker): + def __init__( + self, + value: Union[int, float] = 0, + reset_callback: Optional[Callable[[], Union[float, int]]] = None, + exclude_paused: bool = False + ) -> None: + super().__init__(value, reset_callback, exclude_paused) + self.count = 0 + + def reset(self) -> None: + super().reset() + self.count = 0 + + def update(self, value: Union[int, float]) -> None: + if not isinstance(value, (int, float)): + return + if self.history.tracking_enabled(self.exclude_paused): + lv = self.tracked_value + self.count += 1 + self.tracked_value = (lv * (self.count - 1) + value) / self.count + + +class MaximumTracker(CumulativeTracker): + def __init__( + self, + value: Union[int, float] = 0, + reset_callback: Optional[Callable[[], Union[float, int]]] = None, + exclude_paused: bool = False + ) -> None: + super().__init__(value, reset_callback, exclude_paused) + self.initialized = False + + def reset(self) -> None: + self.initialized = False + if self.reset_callback is not None: + self.tracked_value = self.reset_callback() + if not isinstance(self.tracked_value, (int, float)): + self.tracked_value = 0 + logging.info("MaximumTracker reset to invalid type") + else: + self.initialized = True + else: + self.tracked_value = 0 + + def update(self, value: Union[float, int]) -> None: + if not isinstance(value, (int, float)): + return + if self.history.tracking_enabled(self.exclude_paused): + if not self.initialized: + self.tracked_value = value + self.initialized = True + else: + self.tracked_value = max(self.tracked_value, value) + +class MinimumTracker(CumulativeTracker): + def __init__( + self, + value: Union[int, float] = 0, + reset_callback: Optional[Callable[[], Union[float, int]]] = None, + exclude_paused: bool = False + ) -> None: + super().__init__(value, reset_callback, exclude_paused) + self.initialized = False + + def reset(self) -> None: + self.initialized = False + if self.reset_callback is not None: + self.tracked_value = self.reset_callback() + if not isinstance(self.tracked_value, (int, float)): + self.tracked_value = 0 + logging.info("MinimumTracker reset to invalid type") + else: + self.initialized = True + else: + self.tracked_value = 0 + + def update(self, value: Union[float, int]) -> None: + if not isinstance(value, (int, float)): + return + if self.history.tracking_enabled(self.exclude_paused): + if not self.initialized: + self.tracked_value = value + self.initialized = True + else: + self.tracked_value = min(self.tracked_value, value) + +class CollectionTracker(FieldTracker[List[Any]]): + MAX_SIZE = 100 + def __init__( + self, + value: List[Any] = [], + reset_callback: Optional[Callable[[], List[Any]]] = None, + exclude_paused: bool = False + ) -> None: + super().__init__(list(value), reset_callback, exclude_paused) + + def reset(self) -> None: + if self.reset_callback is not None: + self.tracked_value = self.reset_callback() + if not isinstance(self.tracked_value, list): + logging.info("CollectionTracker reset to invalid type") + self.tracked_value = [] + else: + self.tracked_value.clear() + + def update(self, value: Any) -> None: + if value in self.tracked_value: + return + if self.history.tracking_enabled(self.exclude_paused): + self.tracked_value.append(value) + if len(self.tracked_value) > self.MAX_SIZE: + self.tracked_value.pop(0) + + def has_totals(self) -> bool: + return False + + +class TrackingStrategy(ExtendedEnum): + BASIC = 1 + DELTA = 2 + ACCUMULATE = 3 + AVERAGE = 4 + MAXIMUM = 5 + MINIMUM = 6 + COLLECT = 7 + + def get_tracker(self, **kwargs) -> FieldTracker: + trackers: Dict[TrackingStrategy, Type[FieldTracker]] = { + TrackingStrategy.BASIC: BasicTracker, + TrackingStrategy.DELTA: DeltaTracker, + TrackingStrategy.ACCUMULATE: CumulativeTracker, + TrackingStrategy.AVERAGE: AveragingTracker, + TrackingStrategy.MAXIMUM: MaximumTracker, + TrackingStrategy.MINIMUM: MinimumTracker, + TrackingStrategy.COLLECT: CollectionTracker + } + return trackers[self](**kwargs) + + +class HistoryFieldData: + def __init__( + self, + field_name: str, + provider: str, + desc: str, + strategy: str, + units: Optional[str] = None, + reset_callback: Optional[Callable[[], _T]] = None, + exclude_paused: bool = False, + report_total: bool = False, + report_maximum: bool = False, + precision: Optional[int] = None + ) -> None: + self._name = field_name + self._provider = provider + self._desc = desc + self._strategy = TrackingStrategy.from_string(strategy) + self._units = units + self._tracker = self._strategy.get_tracker( + reset_callback=reset_callback, + exclude_paused=exclude_paused + ) + self._report_total = report_total + self._report_maximum = report_maximum + self._precision = precision + + @property + def name(self) -> str: + return self._name + + @property + def provider(self) -> str: + return self._provider + + @property + def tracker(self) -> FieldTracker: + return self._tracker + + def __eq__(self, value: object) -> bool: + if isinstance(value, HistoryFieldData): + return value._provider == self._provider and value._name == self._name + raise ValueError("Invalid type for comparison") + + def get_configuration(self) -> Dict[str, Any]: + return { + "field": self._name, + "provider": self._provider, + "description": self._desc, + "strategy": self._strategy.name.lower(), + "units": self._units, + "init_tracker": self._tracker.reset_callback is not None, + "exclude_paused": self._tracker.exclude_paused, + "report_total": self._report_total, + "report_maximum": self._report_maximum, + "precision": self._precision + } + + def as_dict(self) -> Dict[str, Any]: + val = self._tracker.get_tracked_value() + if self._precision is not None and isinstance(val, float): + val = round(val, self._precision) + return { + "provider": self._provider, + "name": self.name, + "value": val, + "description": self._desc, + "units": self._units + } + + def has_totals(self) -> bool: + return ( + self._tracker.has_totals() and + (self._report_total or self._report_maximum) + ) + + def get_totals( + self, last_totals: List[Dict[str, Any]], reset: bool = False + ) -> Dict[str, Any]: + if not self.has_totals(): + return {} + if reset: + maximum: Optional[float] = 0 if self._report_maximum else None + total: Optional[float] = 0 if self._report_total else None + else: + cur_val: Union[float, int] = self._tracker.get_tracked_value() + maximum = cur_val if self._report_maximum else None + total = cur_val if self._report_total else None + for obj in last_totals: + if obj["provider"] == self._provider and obj["field"] == self._name: + if maximum is not None: + maximum = max(cur_val, obj["maximum"] or 0) + if total is not None: + total = cur_val + (obj["total"] or 0) + break + if self._precision is not None: + if maximum is not None: + maximum = round(maximum, self._precision) + if total is not None: + total = round(total, self._precision) + return { + "provider": self._provider, + "field": self._name, + "maximum": maximum, + "total": total + } + +class SqlTableDefType(type): + def __new__( + metacls, + clsname: str, + bases: Tuple[type, ...], + cls_attrs: Dict[str, Any] + ): + if clsname != "SqlTableDefinition": + for item in ("name", "prototype"): + if not cls_attrs[item]: + raise ValueError( + f"Class attribute `{item}` must be set for class {clsname}" + ) + if cls_attrs["version"] < 1: + raise ValueError( + f"The 'version' attribute of {clsname} must be greater than 0" + ) + cls_attrs["prototype"] = inspect.cleandoc(cls_attrs["prototype"].strip()) + prototype = cls_attrs["prototype"] + proto_match = re.match( + r"([a-zA-Z][0-9a-zA-Z_-]+)\s*\((.+)\)\s*;?$", prototype, re.DOTALL + ) + if proto_match is None: + raise ValueError(f"Invalid SQL Table prototype:\n{prototype}") + table_name = cls_attrs["name"] + parsed_name = proto_match.group(1) + if table_name != parsed_name: + raise ValueError( + f"Table name '{table_name}' does not match parsed name from " + f"table prototype '{parsed_name}'" + ) + return super().__new__(metacls, clsname, bases, cls_attrs) + +class SqlTableDefinition(metaclass=SqlTableDefType): + name: str = "" + version: int = 0 + prototype: str = "" + def __init__(self) -> None: + if self.__class__ == SqlTableDefinition: + raise ServerError("Cannot directly instantiate SqlTableDefinition") + + def migrate( + self, last_version: int, db_provider: DBProviderWrapper + ) -> None: + raise NotImplementedError("Children must implement migrate") diff --git a/moonraker/components/announcements.py b/moonraker/components/announcements.py index 4bd4730..55c223e 100644 --- a/moonraker/components/announcements.py +++ b/moonraker/components/announcements.py @@ -11,20 +11,20 @@ import asyncio import logging import email.utils import xml.etree.ElementTree as etree +from ..common import RequestType from typing import ( TYPE_CHECKING, Awaitable, List, Dict, Any, - Optional, - Union + Optional ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest - from http_client import HttpClient - from components.database import MoonrakerDatabase + from ..confighelper import ConfigHelper + from ..common import WebRequest + from .http_client import HttpClient + from .database import MoonrakerDatabase MOONLIGHT_URL = "https://arksine.github.io/moonlight" @@ -58,23 +58,23 @@ class Announcements: ) self.server.register_endpoint( - "/server/announcements/list", ["GET"], + "/server/announcements/list", RequestType.GET, self._list_announcements ) self.server.register_endpoint( - "/server/announcements/dismiss", ["POST"], + "/server/announcements/dismiss", RequestType.POST, self._handle_dismiss_request ) self.server.register_endpoint( - "/server/announcements/update", ["POST"], + "/server/announcements/update", RequestType.POST, self._handle_update_request ) self.server.register_endpoint( - "/server/announcements/feed", ["POST", "DELETE"], + "/server/announcements/feed", RequestType.POST | RequestType.DELETE, self._handle_feed_request ) self.server.register_endpoint( - "/server/announcements/feeds", ["GET"], + "/server/announcements/feeds", RequestType.GET, self._handle_list_feeds ) self.server.register_notification( @@ -143,12 +143,7 @@ class Announcements: async def _handle_update_request( self, web_request: WebRequest ) -> Dict[str, Any]: - subs: Optional[Union[str, List[str]]] - subs = web_request.get("subscriptions", None) - if isinstance(subs, str): - subs = [sub.strip() for sub in subs.split(",") if sub.strip()] - elif subs is None: - subs = list(self.subscriptions.keys()) + subs = web_request.get_list("subscriptions", list(self.subscriptions.keys())) for sub in subs: if sub not in self.subscriptions: raise self.server.error(f"No subscription for {sub}") @@ -176,13 +171,13 @@ class Announcements: async def _handle_feed_request( self, web_request: WebRequest ) -> Dict[str, Any]: - action = web_request.get_action() + req_type = web_request.get_request_type() name: str = web_request.get("name") name = name.lower() changed: bool = False db: MoonrakerDatabase = self.server.lookup_component("database") result = "skipped" - if action == "POST": + if req_type == RequestType.POST: if name not in self.subscriptions: feed = RssFeed(name, self.entry_mgr, self.dev_mode) self.subscriptions[name] = feed @@ -193,7 +188,7 @@ class Announcements: "moonraker", "announcements.stored_feeds", self.stored_feeds ) result = "added" - elif action == "DELETE": + elif req_type == RequestType.DELETE: if name not in self.stored_feeds: raise self.server.error(f"Feed '{name}' not stored") if name in self.configured_feeds: @@ -241,8 +236,15 @@ class Announcements: "feed": feed } self.entry_mgr.add_entry(entry) + self.eventloop.create_task(self._notify_internal()) return entry + async def _notify_internal(self) -> None: + entries = await self.entry_mgr.list_entries() + self.server.send_event( + "announcements:entries_updated", {"entries": entries} + ) + async def remove_announcement(self, entry_id: str) -> None: ret = await self.entry_mgr.remove_entry(entry_id) if ret is not None: @@ -260,6 +262,15 @@ class Announcements: ) -> List[Dict[str, Any]]: return await self.entry_mgr.list_entries(include_dismissed) + def register_feed(self, name: str) -> None: + name = name.lower() + if name in self.subscriptions: + logging.info(f"Feed {name} already configured") + return + logging.info(f"Registering feed {name}") + self.configured_feeds.append(name) + self.subscriptions[name] = RssFeed(name, self.entry_mgr, self.dev_mode) + def close(self): self.entry_mgr.close() diff --git a/moonraker/components/application.py b/moonraker/components/application.py new file mode 100644 index 0000000..8099b30 --- /dev/null +++ b/moonraker/components/application.py @@ -0,0 +1,1180 @@ +# Klipper Web Server Rest API +# +# Copyright (C) 2020 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import os +import mimetypes +import logging +import traceback +import ssl +import pathlib +import urllib.parse +import tornado +import tornado.iostream +import tornado.httputil +import tornado.web +from asyncio import Lock +from inspect import isclass +from tornado.escape import url_unescape, url_escape +from tornado.routing import Rule, PathMatches, RuleRouter +from tornado.http1connection import HTTP1Connection +from tornado.httpserver import HTTPServer +from tornado.log import access_log +from ..utils import ServerError, source_info, parse_ip_address +from ..common import ( + JsonRPC, + WebRequest, + APIDefinition, + APITransport, + TransportType, + RequestType, + KlippyState +) +from ..utils import json_wrapper as jsonw +from streaming_form_data import StreamingFormDataParser, ParseFailedException +from streaming_form_data.targets import FileTarget, ValueTarget, SHA256Target + +# Annotation imports +from typing import ( + TYPE_CHECKING, + Any, + Optional, + Callable, + Coroutine, + Union, + Dict, + List, + AsyncGenerator, + Type +) +if TYPE_CHECKING: + from tornado.websocket import WebSocketHandler + from tornado.httputil import HTTPMessageDelegate, HTTPServerRequest + from ..server import Server + from ..eventloop import EventLoop + from ..confighelper import ConfigHelper + from ..common import UserInfo + from .klippy_connection import KlippyConnection as Klippy + from ..utils import IPAddress + from .websockets import WebsocketManager, WebSocket + from .file_manager.file_manager import FileManager + from .announcements import Announcements + from .machine import Machine + from io import BufferedReader + from .authorization import Authorization + from .template import TemplateFactory, JinjaTemplate + MessageDelgate = Optional[HTTPMessageDelegate] + AuthComp = Optional[Authorization] + APICallback = Callable[[WebRequest], Coroutine] + +# mypy: disable-error-code="attr-defined,name-defined" + +# 50 MiB Max Standard Body Size +MAX_BODY_SIZE = 50 * 1024 * 1024 +MAX_WS_CONNS_DEFAULT = 50 +EXCLUDED_ARGS = ["_", "token", "access_token", "connection_id"] +AUTHORIZED_EXTS = [".png", ".jpg"] +DEFAULT_KLIPPY_LOG_PATH = "/tmp/klippy.log" + +class MutableRouter(RuleRouter): + def __init__(self, application: tornado.web.Application) -> None: + self.application = application + self.pattern_to_rule: Dict[str, Rule] = {} + super(MutableRouter, self).__init__(None) + + def get_target_delegate(self, + target: Any, + request: tornado.httputil.HTTPServerRequest, + **target_params + ) -> MessageDelgate: + if isclass(target) and issubclass(target, tornado.web.RequestHandler): + return self.application.get_handler_delegate( + request, target, **target_params + ) + return super(MutableRouter, self).get_target_delegate( + target, request, **target_params) + + def has_rule(self, pattern: str) -> bool: + return pattern in self.pattern_to_rule + + def add_handler(self, + pattern: str, + target: Any, + target_params: Optional[Dict[str, Any]] = None + ) -> None: + if pattern in self.pattern_to_rule: + self.remove_handler(pattern) + new_rule = Rule(PathMatches(pattern), target, target_params) + self.pattern_to_rule[pattern] = new_rule + self.rules.append(new_rule) + + def remove_handler(self, pattern: str) -> None: + rule = self.pattern_to_rule.pop(pattern, None) + if rule is not None: + try: + self.rules.remove(rule) + except Exception: + logging.exception(f"Unable to remove rule: {pattern}") + +class PrimaryRouter(MutableRouter): + def __init__(self, config: ConfigHelper) -> None: + server = config.get_server() + max_ws_conns = config.getint('max_websocket_connections', MAX_WS_CONNS_DEFAULT) + self.verbose_logging = server.is_verbose_enabled() + app_args: Dict[str, Any] = { + 'serve_traceback': self.verbose_logging, + 'websocket_ping_interval': 10, + 'websocket_ping_timeout': 30, + 'server': server, + 'max_websocket_connections': max_ws_conns, + 'log_function': self.log_request + } + super().__init__(tornado.web.Application(**app_args)) + + @property + def tornado_app(self) -> tornado.web.Application: + return self.application + + def find_handler( + self, request: HTTPServerRequest, **kwargs: Any + ) -> Optional[HTTPMessageDelegate]: + hdlr = super().find_handler(request, **kwargs) + if hdlr is not None: + return hdlr + return self.application.get_handler_delegate(request, AuthorizedErrorHandler) + + def log_request(self, handler: tornado.web.RequestHandler) -> None: + status_code = handler.get_status() + if ( + not self.verbose_logging and + status_code in [200, 204, 206, 304] + ): + # don't log successful requests in release mode + return + if status_code < 400: + log_method = access_log.info + elif status_code < 500: + log_method = access_log.warning + else: + log_method = access_log.error + request_time = 1000.0 * handler.request.request_time() + user: Optional[UserInfo] = handler.current_user + username = "No User" + if user is not None: + username = user.username + log_method( + f"{status_code} {handler._request_summary()} " + f"[{username}] {request_time:.2f}ms" + ) + +class InternalTransport(APITransport): + def __init__(self, server: Server) -> None: + self.server = server + + async def call_method(self, + method_name: str, + request_arguments: Dict[str, Any] = {}, + **kwargs + ) -> Any: + rpc: JsonRPC = self.server.lookup_component("jsonrpc") + method_info = rpc.get_method(method_name) + if method_info is None: + raise self.server.error(f"No method {method_name} available") + req_type, api_definition = method_info + if TransportType.INTERNAL not in api_definition.transports: + raise self.server.error(f"No method {method_name} available") + args = request_arguments or kwargs + return await api_definition.request(args, req_type, self) + +class MoonrakerApp: + def __init__(self, config: ConfigHelper) -> None: + self.server = config.get_server() + self.json_rpc = JsonRPC(self.server) + self.http_server: Optional[HTTPServer] = None + self.secure_server: Optional[HTTPServer] = None + self.template_cache: Dict[str, JinjaTemplate] = {} + self.registered_base_handlers: List[str] = [ + "/server/redirect", + "/server/jsonrpc" + ] + self.max_upload_size = config.getint('max_upload_size', 1024) + self.max_upload_size *= 1024 * 1024 + + # SSL config + self.cert_path: pathlib.Path = self._get_path_option( + config, 'ssl_certificate_path') + self.key_path: pathlib.Path = self._get_path_option( + config, 'ssl_key_path') + + # Route Prefix + home_pattern = "/" + self._route_prefix: str = "" + route_prefix = config.get("route_prefix", None) + if route_prefix is not None: + rparts = route_prefix.strip("/").split("/") + rp = "/".join( + [url_escape(part, plus=False) for part in rparts if part] + ) + if not rp: + raise config.error( + f"Invalid value for option 'route_prefix': {route_prefix}" + ) + self._route_prefix = f"/{rp}" + home_pattern = f"{self._route_prefix}/?" + self.internal_transport = InternalTransport(self.server) + + mimetypes.add_type('text/plain', '.log') + mimetypes.add_type('text/plain', '.gcode') + mimetypes.add_type('text/plain', '.cfg') + + # Set up HTTP routing. Our "mutable_router" wraps a Tornado Application + self.mutable_router = PrimaryRouter(config) + for (ptrn, hdlr) in ( + (home_pattern, WelcomeHandler), + (f"{self._route_prefix}/server/redirect", RedirectHandler), + (f"{self._route_prefix}/server/jsonrpc", RPCHandler) + ): + self.mutable_router.add_handler(ptrn, hdlr, None) + + # Register handlers + logfile = self.server.get_app_args().get('log_file') + if logfile: + self.register_static_file_handler( + "moonraker.log", logfile, force=True) + self.register_static_file_handler( + "klippy.log", DEFAULT_KLIPPY_LOG_PATH, force=True) + self.register_upload_handler("/server/files/upload") + + # Register Server Components + self.server.register_component("jsonrpc", self.json_rpc) + self.server.register_component("internal_transport", self.internal_transport) + + def _get_path_option( + self, config: ConfigHelper, option: str + ) -> pathlib.Path: + path: Optional[str] = config.get(option, None, deprecate=True) + app_args = self.server.get_app_args() + data_path = app_args["data_path"] + certs_path = pathlib.Path(data_path).joinpath("certs") + if not certs_path.exists(): + try: + certs_path.mkdir() + except Exception: + pass + ext = "key" if "key" in option else "cert" + item = certs_path.joinpath(f"moonraker.{ext}") + if item.exists() or path is None: + return item + item = pathlib.Path(path).expanduser().resolve() + if not item.exists(): + raise self.server.error( + f"Invalid path for option '{option}', " + f"{path} does not exist" + ) + return item + + @property + def route_prefix(self): + return self._route_prefix + + def parse_endpoint(self, http_path: str) -> str: + if not self._route_prefix or not http_path.startswith(self._route_prefix): + return http_path + return http_path[len(self._route_prefix):] + + def listen(self, host: str, port: int, ssl_port: int) -> None: + if host.lower() == "all": + host = "" + self.http_server = self._create_http_server(port, host) + if self.https_enabled(): + if port == ssl_port: + self.server.add_warning( + "Failed to start HTTPS server. Server options 'port' and " + f"'ssl_port' match, both set to {port}. Modify the " + "configuration to use different ports." + ) + return + logging.info(f"Starting secure server on port {ssl_port}") + ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + ssl_ctx.load_cert_chain(self.cert_path, self.key_path) + self.secure_server = self._create_http_server( + ssl_port, host, ssl_options=ssl_ctx + ) + else: + logging.info( + "SSL Certificate/Key not configured, aborting HTTPS Server startup" + ) + + def _create_http_server( + self, port: int, address: str, **kwargs + ) -> Optional[HTTPServer]: + args: Dict[str, Any] = dict(max_body_size=MAX_BODY_SIZE, xheaders=True) + args.update(kwargs) + svr = HTTPServer(self.mutable_router, **args) + try: + svr.listen(port, address) + except Exception as e: + svr_type = "HTTPS" if "ssl_options" in args else "HTTP" + logging.exception(f"{svr_type} Server Start Failed") + self.server.add_warning( + f"Failed to start {svr_type} server: {e}. See moonraker.log " + "for more details." + ) + return None + return svr + + def get_server(self) -> Server: + return self.server + + def https_enabled(self) -> bool: + return self.cert_path.exists() and self.key_path.exists() + + async def close(self) -> None: + if self.http_server is not None: + self.http_server.stop() + await self.http_server.close_all_connections() + if self.secure_server is not None: + self.secure_server.stop() + await self.secure_server.close_all_connections() + APIDefinition.reset_cache() + + def register_endpoint( + self, + endpoint: str, + request_types: Union[List[str], RequestType], + callback: APICallback, + transports: Union[List[str], TransportType] = TransportType.all(), + wrap_result: bool = True, + content_type: Optional[str] = None, + auth_required: bool = True, + is_remote: bool = False + ) -> None: + if isinstance(request_types, list): + request_types = RequestType.from_string_list(request_types) + if isinstance(transports, list): + transports = TransportType.from_string_list(transports) + api_def = APIDefinition.create( + endpoint, request_types, callback, transports, auth_required, is_remote + ) + http_path = api_def.http_path + if http_path in self.registered_base_handlers: + if not is_remote: + raise self.server.error( + f"Local endpoint '{endpoint}' already registered" + ) + return + logging.debug(f"Registering API: {api_def}") + if TransportType.HTTP in transports: + params: dict[str, Any] = {} + params["api_definition"] = api_def + params["wrap_result"] = wrap_result + params["content_type"] = content_type + self.mutable_router.add_handler( + f"{self._route_prefix}{http_path}", DynamicRequestHandler, params + ) + self.registered_base_handlers.append(http_path) + for request_type, method_name in api_def.rpc_items(): + self.json_rpc.register_method(method_name, request_type, api_def) + + def register_static_file_handler( + self, pattern: str, file_path: str, force: bool = False + ) -> None: + if pattern[0] != "/": + pattern = "/server/files/" + pattern + if os.path.isfile(file_path) or force: + pattern += '()' + elif os.path.isdir(file_path): + if pattern[-1] != "/": + pattern += "/" + pattern += "(.*)" + else: + logging.info(f"Invalid file path: {file_path}") + return + logging.debug(f"Registering static file: ({pattern}) {file_path}") + params = {'path': file_path} + self.mutable_router.add_handler( + f"{self._route_prefix}{pattern}", FileRequestHandler, params + ) + + def register_upload_handler( + self, pattern: str, location_prefix: str = "server/files" + ) -> None: + params: Dict[str, Any] = {'max_upload_size': self.max_upload_size} + location_prefix = location_prefix.strip("/") + if self._route_prefix: + location_prefix = f"{self._route_prefix.strip('/')}/{location_prefix}" + params['location_prefix'] = location_prefix + self.mutable_router.add_handler( + f"{self._route_prefix}{pattern}", FileUploadHandler, params + ) + + def register_websocket_handler( + self, pattern: str, handler: Type[WebSocketHandler] + ) -> None: + self.mutable_router.add_handler( + f"{self._route_prefix}{pattern}", handler, None + ) + + def register_debug_endpoint( + self, + endpoint: str, + request_types: Union[List[str], RequestType], + callback: APICallback, + transports: Union[List[str], TransportType] = TransportType.all(), + wrap_result: bool = True + ) -> None: + if not self.server.is_debug_enabled(): + return + if not endpoint.startswith("/debug"): + raise self.server.error( + "Debug Endpoints must be registered in the '/debug' path" + ) + self.register_endpoint( + endpoint, request_types, callback, transports, wrap_result + ) + + def remove_endpoint(self, endpoint: str) -> None: + api_def = APIDefinition.pop_cached_def(endpoint) + if api_def is not None: + logging.debug(f"Removing Endpoint: {endpoint}") + if api_def.http_path in self.registered_base_handlers: + self.registered_base_handlers.remove(api_def.http_path) + self.mutable_router.remove_handler(api_def.http_path) + for method_name in api_def.rpc_methods: + self.json_rpc.remove_method(method_name) + + async def load_template(self, asset_name: str) -> JinjaTemplate: + if asset_name in self.template_cache: + return self.template_cache[asset_name] + eventloop = self.server.get_event_loop() + asset = await eventloop.run_in_thread( + source_info.read_asset, asset_name + ) + if asset is None: + raise tornado.web.HTTPError(404, "Asset Not Found") + template: TemplateFactory = self.server.lookup_component("template") + asset_tmpl = template.create_ui_template(asset) + self.template_cache[asset_name] = asset_tmpl + return asset_tmpl + +def _set_cors_headers(req_hdlr: tornado.web.RequestHandler) -> None: + request = req_hdlr.request + origin: Optional[str] = request.headers.get("Origin") + if origin is None: + return + req_hdlr.set_header("Access-Control-Allow-Origin", origin) + if req_hdlr.request.method == "OPTIONS": + req_hdlr.set_header( + "Access-Control-Allow-Methods", + "GET, POST, PUT, DELETE, OPTIONS" + ) + req_hdlr.set_header( + "Access-Control-Allow-Headers", + "Origin, Accept, Content-Type, X-Requested-With, " + "X-CRSF-Token, Authorization, X-Access-Token, " + "X-Api-Key" + ) + req_pvt_header = req_hdlr.request.headers.get( + "Access-Control-Request-Private-Network", None + ) + if req_pvt_header == "true": + req_hdlr.set_header("Access-Control-Allow-Private-Network", "true") + + +class AuthorizedRequestHandler(tornado.web.RequestHandler): + def initialize(self) -> None: + self.server: Server = self.settings['server'] + self.auth_required: bool = True + self.cors_enabled = False + + def set_default_headers(self) -> None: + if getattr(self, "cors_enabled", False): + _set_cors_headers(self) + + async def prepare(self) -> None: + auth: AuthComp = self.server.lookup_component('authorization', None) + if auth is not None: + origin: Optional[str] = self.request.headers.get("Origin") + self.cors_enabled = await auth.check_cors(origin) + if self.cors_enabled: + _set_cors_headers(self) + self.current_user = await auth.authenticate_request( + self.request, self.auth_required + ) + + def options(self, *args, **kwargs) -> None: + # Enable CORS if configured + if self.cors_enabled: + self.set_status(204) + self.finish() + else: + super(AuthorizedRequestHandler, self).options() + + def get_associated_websocket(self) -> Optional[WebSocket]: + # Return associated websocket connection if an id + # was provided by the request + conn = None + conn_id: Any = self.get_argument('connection_id', None) + if conn_id is not None: + try: + conn_id = int(conn_id) + except Exception: + pass + else: + wsm: WebsocketManager = self.server.lookup_component("websockets") + conn = wsm.get_client_ws(conn_id) + return conn + + def write_error(self, status_code: int, **kwargs) -> None: + err = {'code': status_code, 'message': self._reason} + if 'exc_info' in kwargs: + err['traceback'] = "\n".join( + traceback.format_exception(*kwargs['exc_info'])) + self.set_header("Content-Type", "application/json; charset=UTF-8") + self.finish(jsonw.dumps({'error': err})) + +# Due to the way Python treats multiple inheritance its best +# to create a separate authorized handler for serving files +class AuthorizedFileHandler(tornado.web.StaticFileHandler): + def initialize(self, + path: str, + default_filename: Optional[str] = None + ) -> None: + super(AuthorizedFileHandler, self).initialize(path, default_filename) + self.server: Server = self.settings['server'] + self.cors_enabled = False + + def set_default_headers(self) -> None: + if getattr(self, "cors_enabled", False): + _set_cors_headers(self) + + async def prepare(self) -> None: + auth: AuthComp = self.server.lookup_component('authorization', None) + if auth is not None: + origin: Optional[str] = self.request.headers.get("Origin") + self.cors_enabled = await auth.check_cors(origin) + if self.cors_enabled: + _set_cors_headers(self) + self.current_user = await auth.authenticate_request( + self.request, self._check_need_auth() + ) + + def options(self, *args, **kwargs) -> None: + # Enable CORS if configured + if self.cors_enabled: + self.set_status(204) + self.finish() + else: + super(AuthorizedFileHandler, self).options() + + def write_error(self, status_code: int, **kwargs) -> None: + err = {'code': status_code, 'message': self._reason} + if 'exc_info' in kwargs: + err['traceback'] = "\n".join( + traceback.format_exception(*kwargs['exc_info'])) + self.set_header("Content-Type", "application/json; charset=UTF-8") + self.finish(jsonw.dumps({'error': err})) + + def _check_need_auth(self) -> bool: + if self.request.method != "GET": + return True + ext = os.path.splitext(self.request.path)[-1].lower() + if ext in AUTHORIZED_EXTS: + return False + return True + +class DynamicRequestHandler(AuthorizedRequestHandler): + def initialize( + self, + api_definition: Optional[APIDefinition] = None, + wrap_result: bool = True, + content_type: Optional[str] = None + ) -> None: + super(DynamicRequestHandler, self).initialize() + assert api_definition is not None + self.api_defintion = api_definition + self.wrap_result = wrap_result + self.content_type = content_type + self.auth_required = api_definition.auth_required + + # Converts query string values with type hints + def _convert_type(self, value: str, hint: str) -> Any: + type_funcs: Dict[str, Callable] = { + "int": int, "float": float, + "bool": lambda x: x.lower() == "true", + "json": jsonw.loads} + if hint not in type_funcs: + logging.info(f"No conversion method for type hint {hint}") + return value + func = type_funcs[hint] + try: + converted = func(value) + except Exception: + logging.exception("Argument conversion error: Hint: " + f"{hint}, Arg: {value}") + return value + return converted + + def _default_parser(self) -> Dict[str, Any]: + args = {} + for key in self.request.arguments.keys(): + if key in EXCLUDED_ARGS: + continue + key_parts = key.rsplit(":", 1) + val = self.get_argument(key) + if len(key_parts) == 1: + args[key] = val + else: + args[key_parts[0]] = self._convert_type(val, key_parts[1]) + return args + + def _object_parser(self) -> Dict[str, Dict[str, Any]]: + args: Dict[str, Any] = {} + for key in self.request.arguments.keys(): + if key in EXCLUDED_ARGS: + continue + val = self.get_argument(key) + if not val: + args[key] = None + else: + args[key] = val.split(',') + logging.debug(f"Parsed Arguments: {args}") + return {'objects': args} + + def parse_args(self) -> Dict[str, Any]: + try: + if self.api_defintion.need_object_parser: + args: Dict[str, Any] = self._object_parser() + else: + args = self._default_parser() + except Exception: + raise ServerError( + "Error Parsing Request Arguments. " + "Is the Content-Type correct?") + content_type = self.request.headers.get('Content-Type', "").strip() + if content_type.startswith("application/json"): + try: + args.update(jsonw.loads(self.request.body)) + except jsonw.JSONDecodeError: + pass + for key, value in self.path_kwargs.items(): + if value is not None: + args[key] = value + return args + + def _log_debug(self, header: str, args: Any) -> None: + if self.server.is_verbose_enabled(): + resp = args + endpoint = self.api_defintion.endpoint + if isinstance(args, dict): + if ( + endpoint.startswith("/access") or + endpoint.startswith("/machine/sudo/password") + ): + resp = {key: "" for key in args} + elif isinstance(args, str): + if args.startswith(""): + resp = "" + logging.debug(f"{header}::{resp}") + + async def get(self, *args, **kwargs) -> None: + await self._process_http_request(RequestType.GET) + + async def post(self, *args, **kwargs) -> None: + await self._process_http_request(RequestType.POST) + + async def delete(self, *args, **kwargs) -> None: + await self._process_http_request(RequestType.DELETE) + + async def _process_http_request(self, req_type: RequestType) -> None: + if req_type not in self.api_defintion.request_types: + raise tornado.web.HTTPError(405) + args = self.parse_args() + transport = self.get_associated_websocket() + req = f"{self.request.method} {self.request.path}" + self._log_debug(f"HTTP Request::{req}", args) + try: + ip = parse_ip_address(self.request.remote_ip or "") + result = await self.api_defintion.request( + args, req_type, transport, ip, self.current_user + ) + except ServerError as e: + raise tornado.web.HTTPError( + e.status_code, reason=str(e)) from e + if self.wrap_result: + result = {'result': result} + self._log_debug(f"HTTP Response::{req}", result) + if result is None: + self.set_status(204) + elif isinstance(result, dict): + self.set_header("Content-Type", "application/json; charset=UTF-8") + result = jsonw.dumps(result) + elif self.content_type is not None: + self.set_header("Content-Type", self.content_type) + self.finish(result) + +class RPCHandler(AuthorizedRequestHandler, APITransport): + def initialize(self) -> None: + super(RPCHandler, self).initialize() + self.auth_required = False + + @property + def transport_type(self) -> TransportType: + return TransportType.HTTP + + @property + def user_info(self) -> Optional[UserInfo]: + return self.current_user + + @property + def ip_addr(self) -> Optional[IPAddress]: + return parse_ip_address(self.request.remote_ip or "") + + def screen_rpc_request( + self, api_def: APIDefinition, req_type: RequestType, args: Dict[str, Any] + ) -> None: + if self.current_user is None and api_def.auth_required: + raise self.server.error("Unauthorized", 401) + if api_def.endpoint == "objects/subscribe": + raise self.server.error( + "Subscriptions not available for HTTP transport", 404 + ) + + def send_status(self, status: Dict[str, Any], eventtime: float) -> None: + # Can't handle status updates. This should not be called, but + # we don't want to raise an exception if it is + pass + + async def post(self, *args, **kwargs) -> None: + content_type = self.request.headers.get('Content-Type', "").strip() + if not content_type.startswith("application/json"): + raise tornado.web.HTTPError( + 400, "Invalid content type, application/json required" + ) + rpc: JsonRPC = self.server.lookup_component("jsonrpc") + result = await rpc.dispatch(self.request.body, self) + if result is not None: + self.set_header("Content-Type", "application/json; charset=UTF-8") + self.finish(result) + +class FileRequestHandler(AuthorizedFileHandler): + def set_extra_headers(self, path: str) -> None: + # The call below shold never return an empty string, + # as the path should have already been validated to be + # a file + assert isinstance(self.absolute_path, str) + basename = os.path.basename(self.absolute_path) + ascii_basename = self._escape_filename_to_ascii(basename) + utf8_basename = self._escape_filename_to_utf8(basename) + self.set_header( + "Content-Disposition", + f"attachment; filename=\"{ascii_basename}\"; " + f"filename*=UTF-8\'\'{utf8_basename}") + + async def delete(self, path: str) -> None: + app: MoonrakerApp = self.server.lookup_component("application") + endpoint = app.parse_endpoint(self.request.path or "") + path = endpoint.lstrip("/").split("/", 2)[-1] + path = url_unescape(path, plus=False) + file_manager: FileManager + file_manager = self.server.lookup_component('file_manager') + try: + filename = await file_manager.delete_file(path) + except self.server.error as e: + raise tornado.web.HTTPError(e.status_code, str(e)) + self.set_header("Content-Type", "application/json; charset=UTF-8") + self.finish(jsonw.dumps({'result': filename})) + + async def get(self, path: str, include_body: bool = True) -> None: + # Set up our path instance variables. + self.path = self.parse_url_path(path) + del path # make sure we don't refer to path instead of self.path again + absolute_path = self.get_absolute_path(self.root, self.path) + self.absolute_path = self.validate_absolute_path( + self.root, absolute_path) + if self.absolute_path is None: + return + file_manager: FileManager + file_manager = self.server.lookup_component('file_manager') + try: + file_manager.check_reserved_path(self.absolute_path, False) + except self.server.error as e: + raise tornado.web.HTTPError(e.status_code, str(e)) + + self.modified = self.get_modified_time() + self.set_headers() + + self.request.headers.pop("If-None-Match", None) + if self.should_return_304(): + self.set_status(304) + return + + request_range = None + range_header = self.request.headers.get("Range") + if range_header: + # As per RFC 2616 14.16, if an invalid Range header is specified, + # the request will be treated as if the header didn't exist. + request_range = tornado.httputil._parse_request_range(range_header) + + size = self.get_content_size() + if request_range: + start, end = request_range + if start is not None and start < 0: + start += size + if start < 0: + start = 0 + if ( + start is not None + and (start >= size or (end is not None and start >= end)) + ) or end == 0: + # As per RFC 2616 14.35.1, a range is not satisfiable only: if + # the first requested byte is equal to or greater than the + # content, or when a suffix with length 0 is specified. + # https://tools.ietf.org/html/rfc7233#section-2.1 + # A byte-range-spec is invalid if the last-byte-pos value is + # present and less than the first-byte-pos. + self.set_status(416) # Range Not Satisfiable + self.set_header("Content-Type", "text/plain") + self.set_header("Content-Range", "bytes */%s" % (size,)) + return + if end is not None and end > size: + # Clients sometimes blindly use a large range to limit their + # download size; cap the endpoint at the actual file size. + end = size + # Note: only return HTTP 206 if less than the entire range has been + # requested. Not only is this semantically correct, but Chrome + # refuses to play audio if it gets an HTTP 206 in response to + # ``Range: bytes=0-``. + if size != (end or size) - (start or 0): + self.set_status(206) # Partial Content + self.set_header( + "Content-Range", tornado.httputil._get_content_range( + start, end, size) + ) + else: + start = end = None + + if start is not None and end is not None: + content_length = end - start + elif end is not None: + content_length = end + elif start is not None: + end = size + content_length = size - start + else: + end = size + content_length = size + self.set_header("Content-Length", content_length) + + if include_body: + evt_loop = self.server.get_event_loop() + content = self.get_content_nonblock( + evt_loop, self.absolute_path, start, end) + async for chunk in content: + try: + self.write(chunk) + await self.flush() + except tornado.iostream.StreamClosedError: + return + else: + assert self.request.method == "HEAD" + + def _escape_filename_to_ascii(self, basename: str) -> str: + ret = basename.encode("ascii", "replace").decode() + return ret.replace('"', '\\"') + + def _escape_filename_to_utf8(self, basename: str) -> str: + return urllib.parse.quote(basename, encoding="utf-8") + + @classmethod + async def get_content_nonblock( + cls, + evt_loop: EventLoop, + abspath: str, + start: Optional[int] = None, + end: Optional[int] = None + ) -> AsyncGenerator[bytes, None]: + file: BufferedReader = await evt_loop.run_in_thread(open, abspath, "rb") + try: + if start is not None: + file.seek(start) + if end is not None: + remaining = end - (start or 0) # type: Optional[int] + else: + remaining = None + while True: + chunk_size = 64 * 1024 + if remaining is not None and remaining < chunk_size: + chunk_size = remaining + chunk = await evt_loop.run_in_thread(file.read, chunk_size) + if chunk: + if remaining is not None: + remaining -= len(chunk) + yield chunk + else: + if remaining is not None: + assert remaining == 0 + return + finally: + await evt_loop.run_in_thread(file.close) + + @classmethod + def _get_cached_version(cls, abs_path: str) -> Optional[str]: + return None + +@tornado.web.stream_request_body +class FileUploadHandler(AuthorizedRequestHandler): + def initialize(self, + location_prefix: str = "server/files", + max_upload_size: int = MAX_BODY_SIZE + ) -> None: + self.location_prefix = location_prefix + super(FileUploadHandler, self).initialize() + self.file_manager: FileManager = self.server.lookup_component( + 'file_manager') + self.max_upload_size = max_upload_size + self.parse_lock = Lock() + self.parse_failed: bool = False + + async def prepare(self) -> None: + ret = super(FileUploadHandler, self).prepare() + if ret is not None: + await ret + content_type: str = self.request.headers.get("Content-Type", "") + logging.info( + f"Upload Request Received from {self.request.remote_ip}\n" + f"Content-Type: {content_type}" + ) + fm: FileManager = self.server.lookup_component("file_manager") + fm.check_write_enabled() + if self.request.method == "POST": + assert isinstance(self.request.connection, HTTP1Connection) + self.request.connection.set_max_body_size(self.max_upload_size) + tmpname = self.file_manager.gen_temp_upload_path() + self._targets = { + 'root': ValueTarget(), + 'print': ValueTarget(), + 'path': ValueTarget(), + 'checksum': ValueTarget(), + } + self._file = FileTarget(tmpname) + self._sha256_target = SHA256Target() + self._parser = StreamingFormDataParser(self.request.headers) + self._parser.register('file', self._file) + self._parser.register('file', self._sha256_target) + for name, target in self._targets.items(): + self._parser.register(name, target) + + async def data_received(self, chunk: bytes) -> None: + if self.request.method == "POST" and not self.parse_failed: + async with self.parse_lock: + evt_loop = self.server.get_event_loop() + try: + await evt_loop.run_in_thread(self._parser.data_received, chunk) + except ParseFailedException: + logging.exception("Chunk Parsing Error") + self.parse_failed = True + + async def post(self) -> None: + if self.parse_failed: + self._file.on_finish() + try: + os.remove(self._file.filename) + except Exception: + pass + raise tornado.web.HTTPError(500, "File Upload Parsing Failed") + form_args = {} + chk_target = self._targets.pop('checksum') + calc_chksum = self._sha256_target.value.lower() + if chk_target.value: + # Validate checksum + recd_cksum = chk_target.value.decode().lower() + if calc_chksum != recd_cksum: + # remove temporary file + try: + os.remove(self._file.filename) + except Exception: + pass + raise tornado.web.HTTPError( + 422, + f"File checksum mismatch: expected {recd_cksum}, " + f"calculated {calc_chksum}" + ) + for name, target in self._targets.items(): + if target.value: + form_args[name] = target.value.decode() + form_args['filename'] = self._file.multipart_filename + form_args['tmp_file_path'] = self._file.filename + debug_msg = "\nFile Upload Arguments:" + for name, value in form_args.items(): + debug_msg += f"\n{name}: {value}" + debug_msg += f"\nChecksum: {calc_chksum}" + form_args["current_user"] = self.current_user + logging.debug(debug_msg) + logging.info(f"Processing Uploaded File: {self._file.multipart_filename}") + try: + result = await self.file_manager.finalize_upload(form_args) + except ServerError as e: + raise tornado.web.HTTPError( + e.status_code, str(e)) + # Return 201 and add the Location Header + item: Dict[str, Any] = result.get('item', {}) + root: Optional[str] = item.get('root', None) + fpath: Optional[str] = item.get('path', None) + if root is not None and fpath is not None: + path_parts = fpath.split("/") + fpath = "/".join([url_escape(p, plus=False) for p in path_parts]) + proto = self.request.protocol + if not isinstance(proto, str): + proto = "http" + host = self.request.host + if not isinstance(host, str): + si = self.server.get_host_info() + port = si['port'] if proto == "http" else si['ssl_port'] + host = f"{si['address']}:{port}" + location = f"{proto}://{host}/{self.location_prefix}/{root}/{fpath}" + self.set_header("Location", location) + logging.debug(f"Upload Location header set: {location}") + self.set_status(201) + self.set_header("Content-Type", "application/json; charset=UTF-8") + self.finish(jsonw.dumps(result)) + +# Default Handler for unregistered endpoints +class AuthorizedErrorHandler(AuthorizedRequestHandler): + async def prepare(self) -> None: + ret = super(AuthorizedErrorHandler, self).prepare() + if ret is not None: + await ret + self.set_status(404) + raise tornado.web.HTTPError(404) + + def check_xsrf_cookie(self) -> None: + pass + + def write_error(self, status_code: int, **kwargs) -> None: + err = {'code': status_code, 'message': self._reason} + if 'exc_info' in kwargs: + err['traceback'] = "\n".join( + traceback.format_exception(*kwargs['exc_info'])) + self.set_header("Content-Type", "application/json; charset=UTF-8") + self.finish(jsonw.dumps({'error': err})) + +class RedirectHandler(AuthorizedRequestHandler): + def initialize(self) -> None: + super().initialize() + self.auth_required = False + + async def get(self, *args, **kwargs) -> None: + url: Optional[str] = self.get_argument('url', None) + if url is None: + try: + body_args: Dict[str, Any] = jsonw.loads(self.request.body) + except jsonw.JSONDecodeError: + body_args = {} + if 'url' not in body_args: + raise tornado.web.HTTPError( + 400, "No url argument provided") + url = body_args['url'] + assert url is not None + # validate the url origin + auth: AuthComp = self.server.lookup_component('authorization', None) + if auth is None or not await auth.check_cors(url.rstrip("/")): + raise tornado.web.HTTPError( + 400, f"Unauthorized URL redirect: {url}") + self.redirect(url) + +class WelcomeHandler(tornado.web.RequestHandler): + def initialize(self) -> None: + self.server: Server = self.settings['server'] + + async def get(self) -> None: + summary: List[str] = [] + auth: AuthComp = self.server.lookup_component("authorization", None) + if auth is not None: + try: + await auth.authenticate_request(self.request) + except tornado.web.HTTPError: + authorized = False + else: + authorized = True + if authorized: + summary.append( + "Your device is authorized to access Moonraker's API." + ) + else: + summary.append( + "Your device is not authorized to access Moonraker's API. " + "This is normal if you intend to use API Key " + "authentication or log in as an authenticated user. " + "Otherwise you need to add your IP address to the " + "'trusted_clients' option in the [authorization] section " + "of moonraker.conf." + ) + cors_enabled = auth.cors_enabled() + if cors_enabled: + summary.append( + "CORS is enabled. Cross origin requests will be allowed " + "for origins that match one of the patterns specified in " + "the 'cors_domain' option of the [authorization] section." + ) + else: + summary.append( + "All cross origin requests will be blocked by the browser. " + "The 'cors_domains' option in [authorization] must be " + "configured to enable CORS." + ) + else: + authorized = True + cors_enabled = False + summary.append( + "The [authorization] component is not enabled in " + "moonraker.conf. All connections will be considered trusted." + ) + summary.append( + "All cross origin requests will be blocked by the browser. " + "The [authorization] section in moonraker.conf must be " + "configured to enable CORS." + ) + kconn: Klippy = self.server.lookup_component("klippy_connection") + kstate = kconn.state + if kstate != KlippyState.DISCONNECTED: + summary.append(f"Klipper reports {kstate.message.lower()}") + else: + summary.append( + "Moonraker is not currently connected to Klipper. Make sure " + "that the klipper service has successfully started and that " + "its unix is enabled." + ) + ancomp: Announcements + ancomp = self.server.lookup_component("announcements") + wsm: WebsocketManager = self.server.lookup_component("websockets") + machine: Machine = self.server.lookup_component("machine") + svc_info = machine.get_moonraker_service_info() + sudo_req_msg = "
".join(machine.sudo_request_messages) + context: Dict[str, Any] = { + "remote_ip": self.request.remote_ip, + "authorized": authorized, + "cors_enabled": cors_enabled, + "version": self.server.get_app_args()["software_version"], + "ws_count": wsm.get_count(), + "klippy_state": kstate, + "warnings": self.server.get_warnings(), + "summary": summary, + "announcements": await ancomp.get_announcements(), + "sudo_requested": machine.sudo_requested, + "sudo_request_message": sudo_req_msg, + "linux_user": machine.linux_user, + "local_ip": machine.public_ip or "unknown", + "service_name": svc_info.get("unit_name", "unknown"), + "hostname": self.server.get_host_info()["hostname"], + } + app: MoonrakerApp = self.server.lookup_component("application") + welcome_template = await app.load_template("welcome.html") + ret = await welcome_template.render_async(context) + self.finish(ret) + +def load_component(config: ConfigHelper) -> MoonrakerApp: + return MoonrakerApp(config) diff --git a/moonraker/components/authorization.py b/moonraker/components/authorization.py index d020598..d9ffda0 100644 --- a/moonraker/components/authorization.py +++ b/moonraker/components/authorization.py @@ -17,16 +17,16 @@ import ipaddress import re import socket import logging -import json from tornado.web import HTTPError from libnacl.sign import Signer, Verifier +from ..utils import json_wrapper as jsonw +from ..common import RequestType, TransportType, SqlTableDefinition, UserInfo # Annotation imports from typing import ( TYPE_CHECKING, Any, Tuple, - Set, Optional, Union, Dict, @@ -34,15 +34,16 @@ from typing import ( ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest + from ..confighelper import ConfigHelper + from ..common import WebRequest + from .websockets import WebsocketManager from tornado.httputil import HTTPServerRequest - from tornado.web import RequestHandler from .database import MoonrakerDatabase as DBComp + from .database import DBProviderWrapper from .ldap import MoonrakerLDAP IPAddr = Union[ipaddress.IPv4Address, ipaddress.IPv6Address] IPNetwork = Union[ipaddress.IPv4Network, ipaddress.IPv6Network] - OneshotToken = Tuple[IPAddr, Optional[Dict[str, Any]], asyncio.Handle] + OneshotToken = Tuple[IPAddr, Optional[UserInfo], asyncio.Handle] # Helpers for base64url encoding and decoding def base64url_encode(data: bytes) -> bytes: @@ -57,8 +58,10 @@ def base64url_decode(data: str) -> bytes: ONESHOT_TIMEOUT = 5 TRUSTED_CONNECTION_TIMEOUT = 3600 +FQDN_CACHE_TIMEOUT = 84000 PRUNE_CHECK_TIME = 300. +USER_TABLE = "authorized_users" AUTH_SOURCES = ["moonraker", "ldap"] HASH_ITER = 100000 API_USER = "_API_KEY_USER_" @@ -70,17 +73,71 @@ JWT_HEADER = { 'typ': "JWT" } +class UserSqlDefinition(SqlTableDefinition): + name = USER_TABLE + prototype = ( + f""" + {USER_TABLE} ( + username TEXT PRIMARY KEY NOT NULL, + password TEXT NOT NULL, + created_on REAL NOT NULL, + salt TEXT NOT NULL, + source TEXT NOT NULL, + jwt_secret TEXT, + jwk_id TEXT, + groups pyjson + ) + """ + ) + version = 1 + + def migrate(self, last_version: int, db_provider: DBProviderWrapper) -> None: + if last_version == 0: + users: Dict[str, Dict[str, Any]] + users = db_provider.get_namespace("authorized_users") + api_user = users.pop(API_USER, {}) + if not isinstance(api_user, dict): + api_user = {} + user_vals: List[Tuple[Any, ...]] = [ + UserInfo( + username=API_USER, + password=api_user.get("api_key", uuid.uuid4().hex), + created_on=api_user.get("created_on", time.time()) + ).as_tuple() + ] + for key, user in users.items(): + if not isinstance(user, dict): + logging.info( + f"Auth migration, skipping invalid value: {key} {user}" + ) + continue + user_vals.append(UserInfo(**user).as_tuple()) + placeholders = ",".join("?" * len(user_vals[0])) + conn = db_provider.connection + with conn: + conn.executemany( + f"INSERT OR IGNORE INTO {USER_TABLE} VALUES({placeholders})", + user_vals + ) + db_provider.wipe_local_namespace("authorized_users") + class Authorization: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() self.login_timeout = config.getint('login_timeout', 90) self.force_logins = config.getboolean('force_logins', False) self.default_source = config.get('default_source', "moonraker").lower() + self.enable_api_key = config.getboolean('enable_api_key', True) + self.max_logins = config.getint("max_login_attempts", None, above=0) + self.failed_logins: Dict[IPAddr, int] = {} + self.fqdn_cache: Dict[IPAddr, Dict[str, Any]] = {} if self.default_source not in AUTH_SOURCES: - raise config.error( + self.server.add_warning( "[authorization]: option 'default_source' - Invalid " - f"value '{self.default_source}'" + f"value '{self.default_source}', falling back to " + "'moonraker'." ) + self.default_source = "moonraker" self.ldap: Optional[MoonrakerLDAP] = None if config.has_section("ldap"): self.ldap = self.server.load_component(config, "ldap", None) @@ -90,78 +147,32 @@ class Authorization: " however [ldap] section failed to load or not configured" ) database: DBComp = self.server.lookup_component('database') - database.register_local_namespace('authorized_users', forbidden=True) - self.user_db = database.wrap_namespace('authorized_users') - self.users: Dict[str, Dict[str, Any]] = self.user_db.as_dict() - api_user: Optional[Dict[str, Any]] = self.users.get(API_USER, None) - if api_user is None: - self.api_key = uuid.uuid4().hex - self.users[API_USER] = { - 'username': API_USER, - 'api_key': self.api_key, - 'created_on': time.time() - } - else: - self.api_key = api_user['api_key'] + self.user_table = database.register_table(UserSqlDefinition()) + self.users: Dict[str, UserInfo] = {} + self.api_key = uuid.uuid4().hex hi = self.server.get_host_info() self.issuer = f"http://{hi['hostname']}:{hi['port']}" self.public_jwks: Dict[str, Dict[str, Any]] = {} - for username, user_info in list(self.users.items()): - if username == API_USER: - # Validate the API User - for item in ["username", "api_key", "created_on"]: - if item not in user_info: - self.users[API_USER] = { - 'username': API_USER, - 'api_key': self.api_key, - 'created_on': time.time() - } - break - continue - else: - # validate created users - valid = True - for item in ["username", "password", "salt", "created_on"]: - if item not in user_info: - logging.info( - f"Authorization: User {username} does not " - f"contain field {item}, removing") - del self.users[username] - valid = False - break - if not valid: - continue - # generate jwks for valid users - if 'jwt_secret' in user_info: - try: - priv_key = self._load_private_key(user_info['jwt_secret']) - jwk_id = user_info['jwk_id'] - except (self.server.error, KeyError): - logging.info("Invalid key found for user, removing") - user_info.pop('jwt_secret', None) - user_info.pop('jwk_id', None) - self.users[username] = user_info - continue - self.public_jwks[jwk_id] = self._generate_public_jwk(priv_key) - # sync user changes to the database - self.user_db.sync(self.users) - self.trusted_users: Dict[IPAddr, Any] = {} + self.trusted_users: Dict[IPAddr, Dict[str, Any]] = {} self.oneshot_tokens: Dict[str, OneshotToken] = {} - self.permitted_paths: Set[str] = set() # Get allowed cors domains self.cors_domains: List[str] = [] for domain in config.getlist('cors_domains', []): bad_match = re.search(r"^.+\.[^:]*\*", domain) if bad_match is not None: - raise config.error( - f"Unsafe CORS Domain '{domain}'. Wildcards are not" - " permitted in the top level domain.") + self.server.add_warning( + f"[authorization]: Unsafe domain '{domain}' in option " + f"'cors_domains'. Wildcards are not permitted in the" + " top level domain." + ) + continue if domain.endswith("/"): self.server.add_warning( f"[authorization]: Invalid domain '{domain}' in option " "'cors_domains'. Domain's cannot contain a trailing " - "slash.") + "slash." + ) else: self.cors_domains.append( domain.replace(".", "\\.").replace("*", ".*")) @@ -217,52 +228,122 @@ class Authorization: self._prune_conn_handler) # Register Authorization Endpoints - self.permitted_paths.add("/server/redirect") - self.permitted_paths.add("/access/login") - self.permitted_paths.add("/access/refresh_jwt") - self.permitted_paths.add("/access/info") self.server.register_endpoint( - "/access/login", ['POST'], self._handle_login, - transports=['http']) + "/access/login", RequestType.POST, self._handle_login, + transports=TransportType.HTTP | TransportType.WEBSOCKET, + auth_required=False + ) self.server.register_endpoint( - "/access/logout", ['POST'], self._handle_logout, - transports=['http']) + "/access/logout", RequestType.POST, self._handle_logout, + transports=TransportType.HTTP | TransportType.WEBSOCKET + ) self.server.register_endpoint( - "/access/refresh_jwt", ['POST'], self._handle_refresh_jwt, - transports=['http']) + "/access/refresh_jwt", RequestType.POST, self._handle_refresh_jwt, + transports=TransportType.HTTP | TransportType.WEBSOCKET, + auth_required=False + ) self.server.register_endpoint( - "/access/user", ['GET', 'POST', 'DELETE'], - self._handle_user_request, transports=['http']) + "/access/user", RequestType.all(), self._handle_user_request, + transports=TransportType.HTTP | TransportType.WEBSOCKET + ) self.server.register_endpoint( - "/access/users/list", ['GET'], self._handle_list_request, - transports=['http']) + "/access/users/list", RequestType.GET, self._handle_list_request, + transports=TransportType.HTTP | TransportType.WEBSOCKET + ) self.server.register_endpoint( - "/access/user/password", ['POST'], self._handle_password_reset, - transports=['http']) + "/access/user/password", RequestType.POST, self._handle_password_reset, + transports=TransportType.HTTP | TransportType.WEBSOCKET + ) + # Custom endpoint: find a user by username and reset password (only suitable for ordinary user) self.server.register_endpoint( - "/access/api_key", ['GET', 'POST'], - self._handle_apikey_request, transports=['http']) + "/access/user/password_by_name", RequestType.POST, self._handle_password_reset_by_name, + transports=TransportType.HTTP | TransportType.WEBSOCKET + ) self.server.register_endpoint( - "/access/oneshot_token", ['GET'], - self._handle_oneshot_request, transports=['http']) + "/access/api_key", RequestType.GET | RequestType.POST, + self._handle_apikey_request, + transports=TransportType.HTTP | TransportType.WEBSOCKET + ) self.server.register_endpoint( - "/access/info", ['GET'], - self._handle_info_request, transports=['http']) - self.server.register_notification("authorization:user_created") - self.server.register_notification("authorization:user_deleted") - - def _sync_user(self, username: str) -> None: - self.user_db[username] = self.users[username] + "/access/oneshot_token", RequestType.GET, self._handle_oneshot_request, + transports=TransportType.HTTP | TransportType.WEBSOCKET + ) + self.server.register_endpoint( + "/access/info", RequestType.GET, self._handle_info_request, + transports=TransportType.HTTP | TransportType.WEBSOCKET, + auth_required=False + ) + wsm: WebsocketManager = self.server.lookup_component("websockets") + wsm.register_notification("authorization:user_created") + wsm.register_notification( + "authorization:user_deleted", event_type="logout" + ) + wsm.register_notification( + "authorization:user_logged_out", event_type="logout" + ) async def component_init(self) -> None: + # Populate users from database + cursor = await self.user_table.execute(f"SELECT * FROM {USER_TABLE}") + self.users = {row[0]: UserInfo(**dict(row)) for row in await cursor.fetchall()} + need_sync = self._initialize_users() + if need_sync: + await self._sync_user_table() self.prune_timer.start(delay=PRUNE_CHECK_TIME) + async def _sync_user(self, username: str) -> None: + user = self.users[username] + vals = user.as_tuple() + placeholders = ",".join("?" * len(vals)) + async with self.user_table as tx: + await tx.execute( + f"REPLACE INTO {USER_TABLE} VALUES({placeholders})", vals + ) + + async def _sync_user_table(self) -> None: + async with self.user_table as tx: + await tx.execute(f"DELETE FROM {USER_TABLE}") + user_vals: List[Tuple[Any, ...]] + user_vals = [user.as_tuple() for user in self.users.values()] + if not user_vals: + return + placeholders = ",".join("?" * len(user_vals[0])) + await tx.executemany( + f"INSERT INTO {USER_TABLE} VALUES({placeholders})", user_vals + ) + + def _initialize_users(self) -> bool: + need_sync = False + api_user: Optional[UserInfo] = self.users.get(API_USER, None) + if api_user is None: + need_sync = True + self.users[API_USER] = UserInfo(username=API_USER, password=self.api_key) + else: + self.api_key = api_user.password + for username, user_info in list(self.users.items()): + if username == API_USER: + continue + # generate jwks for valid users + if user_info.jwt_secret is not None: + try: + priv_key = self._load_private_key(user_info.jwt_secret) + jwk_id = user_info.jwk_id + assert jwk_id is not None + except (self.server.error, KeyError, AssertionError): + logging.info("Invalid jwk found for user, removing") + user_info.jwt_secret = None + user_info.jwk_id = None + self.users[username] = user_info + need_sync = True + continue + self.public_jwks[jwk_id] = self._generate_public_jwk(priv_key) + return need_sync + async def _handle_apikey_request(self, web_request: WebRequest) -> str: - action = web_request.get_action() - if action.upper() == 'POST': + if web_request.get_request_type() == RequestType.POST: self.api_key = uuid.uuid4().hex - self.users[API_USER]['api_key'] = self.api_key - self._sync_user(API_USER) + self.users[API_USER].password = self.api_key + await self._sync_user(API_USER) return self.api_key async def _handle_oneshot_request(self, web_request: WebRequest) -> str: @@ -272,34 +353,65 @@ class Authorization: return self.get_oneshot_token(ip, user_info) async def _handle_login(self, web_request: WebRequest) -> Dict[str, Any]: - return await self._login_jwt_user(web_request) + ip = web_request.get_ip_address() + if ip is not None and self.check_logins_maxed(ip): + raise HTTPError( + 401, "Unauthorized, Maximum Login Attempts Reached" + ) + try: + ret = await self._login_jwt_user(web_request) + except asyncio.CancelledError: + raise + except Exception: + if ip is not None: + failed = self.failed_logins.get(ip, 0) + self.failed_logins[ip] = failed + 1 + raise + if ip is not None: + self.failed_logins.pop(ip, None) + return ret async def _handle_logout(self, web_request: WebRequest) -> Dict[str, str]: user_info = web_request.get_current_user() if user_info is None: raise self.server.error("No user logged in") - username: str = user_info['username'] + username: str = user_info.username if username in RESERVED_USERS: raise self.server.error( f"Invalid log out request for user {username}") - self.users[username].pop("jwt_secret", None) - jwk_id: str = self.users[username].pop("jwk_id", None) - self._sync_user(username) - self.public_jwks.pop(jwk_id, None) + jwk_id: Optional[str] = self.users[username].jwk_id + self.users[username].jwt_secret = None + self.users[username].jwk_id = None + if jwk_id is not None: + self.public_jwks.pop(jwk_id, None) + await self._sync_user(username) + eventloop = self.server.get_event_loop() + eventloop.delay_callback( + .005, self.server.send_event, "authorization:user_logged_out", + {'username': username} + ) return { "username": username, "action": "user_logged_out" } - async def _handle_info_request( - self, web_request: WebRequest - ) -> Dict[str, Any]: + async def _handle_info_request(self, web_request: WebRequest) -> Dict[str, Any]: sources = ["moonraker"] if self.ldap is not None: sources.append("ldap") + login_req = self.force_logins and len(self.users) > 1 + request_trusted: Optional[bool] = None + user = web_request.get_current_user() + req_ip = web_request.ip_addr + if user is not None and user.username == TRUSTED_USER: + request_trusted = True + elif req_ip is not None: + request_trusted = await self._check_authorized_ip(req_ip) return { "default_source": self.default_source, - "available_sources": sources + "available_sources": sources, + "login_required": login_req, + "trusted": request_trusted } async def _handle_refresh_jwt(self, @@ -307,46 +419,46 @@ class Authorization: ) -> Dict[str, str]: refresh_token: str = web_request.get_str('refresh_token') try: - user_info = self._decode_jwt(refresh_token, token_type="refresh") + user_info = self.decode_jwt(refresh_token, token_type="refresh") except Exception: raise self.server.error("Invalid Refresh Token", 401) - username: str = user_info['username'] - if 'jwt_secret' not in user_info or "jwk_id" not in user_info: + username: str = user_info.username + if user_info.jwt_secret is None or user_info.jwk_id is None: raise self.server.error("User not logged in", 401) - private_key = self._load_private_key(user_info['jwt_secret']) - jwk_id: str = user_info['jwk_id'] + private_key = self._load_private_key(user_info.jwt_secret) + jwk_id: str = user_info.jwk_id token = self._generate_jwt(username, jwk_id, private_key) return { 'username': username, 'token': token, - 'source': user_info.get("source", "moonraker"), + 'source': user_info.source, 'action': 'user_jwt_refresh' } - async def _handle_user_request(self, - web_request: WebRequest - ) -> Dict[str, Any]: - action = web_request.get_action() - if action == "GET": + async def _handle_user_request( + self, web_request: WebRequest + ) -> Dict[str, Any]: + req_type = web_request.get_request_type() + if req_type == RequestType.GET: user = web_request.get_current_user() if user is None: return { - 'username': None, - 'source': None, - 'created_on': None, + "username": None, + "source": None, + "created_on": None, } else: return { - 'username': user['username'], - 'source': user.get("source", "moonraker"), - 'created_on': user.get('created_on') + "username": user.username, + "source": user.source, + "created_on": user.created_on } - elif action == "POST": + elif req_type == RequestType.POST: # Create User return await self._login_jwt_user(web_request, create=True) - elif action == "DELETE": + elif req_type == RequestType.DELETE: # Delete User - return self._delete_jwt_user(web_request) + return await self._delete_jwt_user(web_request) raise self.server.error("Invalid Request Method") async def _handle_list_request(self, @@ -354,12 +466,12 @@ class Authorization: ) -> Dict[str, List[Dict[str, Any]]]: user_list = [] for user in self.users.values(): - if user['username'] == API_USER: + if user.username == API_USER: continue user_list.append({ - 'username': user['username'], - 'source': user.get("source", "moonraker"), - 'created_on': user['created_on'] + 'username': user.username, + 'source': user.source, + 'created_on': user.created_on }) return { 'users': user_list @@ -373,26 +485,49 @@ class Authorization: user_info = web_request.get_current_user() if user_info is None: raise self.server.error("No Current User") - username = user_info['username'] - if user_info.get("source", "moonraker") == "ldap": + username = user_info.username + if user_info.source == "ldap": raise self.server.error( f"Can´t Reset password for ldap user {username}") if username in RESERVED_USERS: raise self.server.error( f"Invalid Reset Request for user {username}") - salt = bytes.fromhex(user_info['salt']) + salt = bytes.fromhex(user_info.salt) hashed_pass = hashlib.pbkdf2_hmac( 'sha256', password.encode(), salt, HASH_ITER).hex() - if hashed_pass != user_info['password']: + if hashed_pass != user_info.password: raise self.server.error("Invalid Password") new_hashed_pass = hashlib.pbkdf2_hmac( 'sha256', new_pass.encode(), salt, HASH_ITER).hex() - self.users[username]['password'] = new_hashed_pass - self._sync_user(username) + self.users[username].password = new_hashed_pass + await self._sync_user(username) return { 'username': username, 'action': "user_password_reset" } + + async def _handle_password_reset_by_name(self, + web_request: WebRequest + ) -> Dict[str, str]: + username: str = web_request.get_str('username') + new_pass: str = web_request.get_str('new_password') + + user_info = self.users[username] + if user_info.source == "ldap": + raise self.server.error( + f"Can´t Reset password for ldap user {username}") + if username in RESERVED_USERS: + raise self.server.error( + f"Invalid Reset Request for user {username}") + salt = bytes.fromhex(user_info.salt) + new_hashed_pass = hashlib.pbkdf2_hmac( + 'sha256', new_pass.encode(), salt, HASH_ITER).hex() + self.users[username].password = new_hashed_pass + await self._sync_user(username) + return { + 'username': username, + 'action': "user_password_reset_by_name" + } async def _login_jwt_user( self, web_request: WebRequest, create: bool = False @@ -404,7 +539,7 @@ class Authorization: ).lower() if source not in AUTH_SOURCES: raise self.server.error(f"Invalid 'source': {source}") - user_info: Dict[str, Any] + user_info: UserInfo if username in RESERVED_USERS: raise self.server.error( f"Invalid Request for user {username}") @@ -424,15 +559,14 @@ class Authorization: salt = secrets.token_bytes(32) hashed_pass = hashlib.pbkdf2_hmac( 'sha256', password.encode(), salt, HASH_ITER).hex() - user_info = { - 'username': username, - 'password': hashed_pass, - 'salt': salt.hex(), - 'source': source, - 'created_on': time.time() - } + user_info = UserInfo( + username=username, + password=hashed_pass, + salt=salt.hex(), + source=source, + ) self.users[username] = user_info - self._sync_user(username) + await self._sync_user(username) action = "user_created" if source == "ldap": # Dont notify user created @@ -442,66 +576,73 @@ class Authorization: if username not in self.users: raise self.server.error(f"Unregistered User: {username}") user_info = self.users[username] - auth_src = user_info.get("source", "moonraker") + auth_src = user_info.source if auth_src != source: raise self.server.error( f"Moonraker cannot authenticate user '{username}', must " f"specify source '{auth_src}'", 401 ) - salt = bytes.fromhex(user_info['salt']) + salt = bytes.fromhex(user_info.salt) hashed_pass = hashlib.pbkdf2_hmac( 'sha256', password.encode(), salt, HASH_ITER).hex() action = "user_logged_in" - if hashed_pass != user_info['password']: + if hashed_pass != user_info.password: raise self.server.error("Invalid Password") - jwt_secret_hex: Optional[str] = user_info.get('jwt_secret', None) + jwt_secret_hex: Optional[str] = user_info.jwt_secret if jwt_secret_hex is None: private_key = Signer() jwk_id = base64url_encode(secrets.token_bytes()).decode() - user_info['jwt_secret'] = private_key.hex_seed().decode() - user_info['jwk_id'] = jwk_id + user_info.jwt_secret = private_key.hex_seed().decode() # type: ignore + user_info.jwk_id = jwk_id self.users[username] = user_info - self._sync_user(username) + await self._sync_user(username) self.public_jwks[jwk_id] = self._generate_public_jwk(private_key) else: private_key = self._load_private_key(jwt_secret_hex) - jwk_id = user_info['jwk_id'] + if user_info.jwk_id is None: + user_info.jwk_id = base64url_encode(secrets.token_bytes()).decode() + jwk_id = user_info.jwk_id token = self._generate_jwt(username, jwk_id, private_key) refresh_token = self._generate_jwt( username, jwk_id, private_key, token_type="refresh", exp_time=datetime.timedelta(days=self.login_timeout)) + conn = web_request.get_client_connection() if create: event_loop = self.server.get_event_loop() event_loop.delay_callback( .005, self.server.send_event, "authorization:user_created", {'username': username}) + elif conn is not None: + conn.user_info = user_info return { 'username': username, 'token': token, - 'source': user_info.get("source", "moonraker"), + 'source': user_info.source, 'refresh_token': refresh_token, 'action': action } - def _delete_jwt_user(self, web_request: WebRequest) -> Dict[str, str]: + async def _delete_jwt_user(self, web_request: WebRequest) -> Dict[str, str]: username: str = web_request.get_str('username') current_user = web_request.get_current_user() if current_user is not None: - curname = current_user.get('username', None) - if curname is not None and curname == username: - raise self.server.error( - f"Cannot delete logged in user {curname}") + curname = current_user.username + if curname == username: + raise self.server.error(f"Cannot delete logged in user {curname}") if username in RESERVED_USERS: raise self.server.error( f"Invalid Request for reserved user {username}") - user_info: Optional[Dict[str, Any]] = self.users.get(username) + user_info: Optional[UserInfo] = self.users.get(username) if user_info is None: raise self.server.error(f"No registered user: {username}") - if 'jwk_id' in user_info: - self.public_jwks.pop(user_info['jwk_id'], None) + if user_info.jwk_id is not None: + self.public_jwks.pop(user_info.jwk_id, None) del self.users[username] - del self.user_db[username] + async with self.user_table as tx: + await tx.execute( + f"DELETE FROM {USER_TABLE} WHERE username = ?", (username,) + ) event_loop = self.server.get_event_loop() event_loop.delay_callback( .005, self.server.send_event, @@ -530,26 +671,25 @@ class Authorization: } header = {'kid': jwk_id} header.update(JWT_HEADER) - jwt_header = base64url_encode(json.dumps(header).encode()) - jwt_payload = base64url_encode(json.dumps(payload).encode()) + jwt_header = base64url_encode(jsonw.dumps(header)) + jwt_payload = base64url_encode(jsonw.dumps(payload)) jwt_msg = b".".join([jwt_header, jwt_payload]) sig = private_key.signature(jwt_msg) jwt_sig = base64url_encode(sig) return b".".join([jwt_msg, jwt_sig]).decode() - def _decode_jwt(self, - token: str, - token_type: str = "access" - ) -> Dict[str, Any]: + def decode_jwt( + self, token: str, token_type: str = "access", check_exp: bool = True + ) -> UserInfo: message, sig = token.rsplit('.', maxsplit=1) enc_header, enc_payload = message.split('.') - header: Dict[str, Any] = json.loads(base64url_decode(enc_header)) + header: Dict[str, Any] = jsonw.loads(base64url_decode(enc_header)) sig_bytes = base64url_decode(sig) # verify header if header.get('typ') != "JWT" or header.get('alg') != "EdDSA": raise self.server.error("Invalid JWT header") - jwk_id = header.get('kid') + jwk_id: Optional[str] = header.get('kid') if jwk_id not in self.public_jwks: raise self.server.error("Invalid key ID") @@ -558,7 +698,7 @@ class Authorization: public_key.verify(sig_bytes + message.encode()) # validate claims - payload: Dict[str, Any] = json.loads(base64url_decode(enc_payload)) + payload: Dict[str, Any] = jsonw.loads(base64url_decode(enc_payload)) if payload['token_type'] != token_type: raise self.server.error( f"JWT Token type mismatch: Expected {token_type}, " @@ -567,16 +707,34 @@ class Authorization: raise self.server.error("Invalid JWT Issuer", 401) if payload['aud'] != "Moonraker": raise self.server.error("Invalid JWT Audience", 401) - if payload['exp'] < int(time.time()): + if check_exp and payload['exp'] < int(time.time()): raise self.server.error("JWT Expired", 401) # get user - user_info: Optional[Dict[str, Any]] = self.users.get( + user_info: Optional[UserInfo] = self.users.get( payload.get('username', ""), None) if user_info is None: raise self.server.error("Unknown user", 401) return user_info + def validate_jwt(self, token: str) -> UserInfo: + try: + user_info = self.decode_jwt(token) + except Exception as e: + if isinstance(e, self.server.error): + raise + raise self.server.error( + f"Failed to decode JWT: {e}", 401 + ) from e + return user_info + + def validate_api_key(self, api_key: str) -> UserInfo: + if not self.enable_api_key: + raise self.server.error("API Key authentication is disabled", 401) + if api_key and api_key == self.api_key: + return self.users[API_USER] + raise self.server.error("Invalid API Key", 401) + def _load_private_key(self, secret: str) -> Signer: try: key = Signer(bytes.fromhex(secret)) @@ -611,17 +769,19 @@ class Authorization: exp_time: float = user_info['expires_at'] if cur_time >= exp_time: self.trusted_users.pop(ip, None) - logging.info( - f"Trusted Connection Expired, IP: {ip}") + logging.info(f"Trusted Connection Expired, IP: {ip}") + for ip, fqdn_info in list(self.fqdn_cache.items()): + exp_time = fqdn_info["expires_at"] + if cur_time >= exp_time: + domain: str = fqdn_info["domain"] + self.fqdn_cache.pop(ip, None) + logging.info(f"Cached FQDN Expired, IP: {ip}, domain: {domain}") return eventtime + PRUNE_CHECK_TIME def _oneshot_token_expire_handler(self, token): self.oneshot_tokens.pop(token, None) - def get_oneshot_token(self, - ip_addr: IPAddr, - user: Optional[Dict[str, Any]] - ) -> str: + def get_oneshot_token(self, ip_addr: IPAddr, user: Optional[UserInfo]) -> str: token = base64.b32encode(os.urandom(20)).decode() event_loop = self.server.get_event_loop() hdl = event_loop.delay_callback( @@ -629,68 +789,76 @@ class Authorization: self.oneshot_tokens[token] = (ip_addr, user, hdl) return token - def _check_json_web_token(self, - request: HTTPServerRequest - ) -> Optional[Dict[str, Any]]: + def _check_json_web_token( + self, request: HTTPServerRequest, required: bool = True + ) -> Optional[UserInfo]: auth_token: Optional[str] = request.headers.get("Authorization") if auth_token is None: auth_token = request.headers.get("X-Access-Token") if auth_token is None: qtoken = request.query_arguments.get('access_token', None) if qtoken is not None: - auth_token = qtoken[-1].decode() + auth_token = qtoken[-1].decode(errors="ignore") + elif auth_token.startswith("Bearer "): + auth_token = auth_token[7:] else: - if auth_token.startswith("Bearer "): - auth_token = auth_token[7:] - elif auth_token.startswith("Basic "): - raise HTTPError(401, "Basic Auth is not supported") - else: - raise HTTPError( - 401, f"Invalid Authorization Header: {auth_token}") + return None if auth_token: try: - return self._decode_jwt(auth_token) + return self.decode_jwt(auth_token, check_exp=required) except Exception: logging.exception(f"JWT Decode Error {auth_token}") - raise HTTPError(401, f"Error decoding JWT: {auth_token}") + raise HTTPError(401, "JWT Decode Error") return None - def _check_authorized_ip(self, ip: IPAddr) -> bool: + async def _check_authorized_ip(self, ip: IPAddr) -> bool: if ip in self.trusted_ips: return True for rng in self.trusted_ranges: if ip in rng: return True - fqdn = socket.getfqdn(str(ip)).lower() - if fqdn in self.trusted_domains: - return True + if self.trusted_domains: + if ip in self.fqdn_cache: + fqdn: str = self.fqdn_cache[ip]["domain"] + else: + eventloop = self.server.get_event_loop() + try: + fut = eventloop.run_in_thread(socket.getfqdn, str(ip)) + fqdn = await asyncio.wait_for(fut, 5.0) + except asyncio.TimeoutError: + logging.info("Call to socket.getfqdn() timed out") + return False + else: + fqdn = fqdn.lower() + self.fqdn_cache[ip] = { + "expires_at": time.time() + FQDN_CACHE_TIMEOUT, + "domain": fqdn + } + return fqdn in self.trusted_domains return False - def _check_trusted_connection(self, - ip: Optional[IPAddr] - ) -> Optional[Dict[str, Any]]: + async def _check_trusted_connection( + self, ip: Optional[IPAddr] + ) -> Optional[UserInfo]: if ip is not None: curtime = time.time() exp_time = curtime + TRUSTED_CONNECTION_TIMEOUT if ip in self.trusted_users: - self.trusted_users[ip]['expires_at'] = exp_time - return self.trusted_users[ip] - elif self._check_authorized_ip(ip): + self.trusted_users[ip]["expires_at"] = exp_time + return self.trusted_users[ip]["user"] + elif await self._check_authorized_ip(ip): logging.info( f"Trusted Connection Detected, IP: {ip}") self.trusted_users[ip] = { - 'username': TRUSTED_USER, - 'password': None, - 'created_on': curtime, - 'expires_at': exp_time + "user": UserInfo(TRUSTED_USER, "", curtime), + "expires_at": exp_time } - return self.trusted_users[ip] + return self.trusted_users[ip]["user"] return None - def _check_oneshot_token(self, - token: str, - cur_ip: Optional[IPAddr] - ) -> Optional[Dict[str, Any]]: + def _check_oneshot_token( + self, token: str, cur_ip: Optional[IPAddr] + ) -> Optional[UserInfo]: if token in self.oneshot_tokens: ip_addr, user, hdl = self.oneshot_tokens.pop(token) hdl.cancel() @@ -702,27 +870,33 @@ class Authorization: else: return None - def check_authorized(self, - request: HTTPServerRequest - ) -> Optional[Dict[str, Any]]: - if ( - request.path in self.permitted_paths - or request.method == "OPTIONS" - ): + def check_logins_maxed(self, ip_addr: IPAddr) -> bool: + if self.max_logins is None: + return False + return self.failed_logins.get(ip_addr, 0) >= self.max_logins + + async def authenticate_request( + self, request: HTTPServerRequest, auth_required: bool = True + ) -> Optional[UserInfo]: + if request.method == "OPTIONS": return None - - # Check JSON Web Token - jwt_user = self._check_json_web_token(request) - if jwt_user is not None: - return jwt_user - + + # Allow local request try: - ip = ipaddress.ip_address(request.remote_ip) # type: ignore + # logging.info(f"request.remote_ip: {request.remote_ip}, is_loopback: {ipaddress.ip_address(request.remote_ip).is_loopback}") # type: ignore + ip = ipaddress.ip_address(request.remote_ip) # type: ignore + if ip.is_loopback: + return None except ValueError: logging.exception( f"Unable to Create IP Address {request.remote_ip}") ip = None + # Check JSON Web Token + jwt_user = self._check_json_web_token(request, auth_required) + if jwt_user is not None: + return jwt_user + # Check oneshot access token ost: Optional[List[bytes]] = request.arguments.get('token', None) if ost is not None: @@ -731,26 +905,29 @@ class Authorization: return ost_user # Check API Key Header - key: Optional[str] = request.headers.get("X-Api-Key") - if key and key == self.api_key: - return self.users[API_USER] + if self.enable_api_key: + key: Optional[str] = request.headers.get("X-Api-Key") + if key and key == self.api_key: + return self.users[API_USER] - # If the force_logins option is enabled and at least one - # user is created this is an unauthorized request + # If the force_logins option is enabled and at least one user is created + # then trusted user authentication is disabled if self.force_logins and len(self.users) > 1: - raise HTTPError(401, "Unauthorized") + if not auth_required: + return None + raise HTTPError(401, "Unauthorized, Force Logins Enabled") - # Check if IP is trusted - trusted_user = self._check_trusted_connection(ip) + # Check if IP is trusted. If this endpoint doesn't require authentication + # then it is acceptable to return None + trusted_user = await self._check_trusted_connection(ip) if trusted_user is not None: return trusted_user + if not auth_required: + return None raise HTTPError(401, "Unauthorized") - def check_cors(self, - origin: Optional[str], - req_hdlr: Optional[RequestHandler] = None - ) -> bool: + async def check_cors(self, origin: Optional[str]) -> bool: if origin is None or not self.cors_domains: return False for regex in self.cors_domains: @@ -759,7 +936,6 @@ class Authorization: if match.group() == origin: logging.debug(f"CORS Pattern Matched, origin: {origin} " f" | pattern: {regex}") - self._set_cors_headers(origin, req_hdlr) return True else: logging.debug(f"Partial Cors Match: {match.group()}") @@ -774,37 +950,13 @@ class Authorization: except ValueError: pass else: - if self._check_authorized_ip(ipaddr): - logging.debug( - f"Cors request matched trusted IP: {ip}") - self._set_cors_headers(origin, req_hdlr) + if await self._check_authorized_ip(ipaddr): + logging.debug(f"Cors request matched trusted IP: {ip}") return True logging.debug(f"No CORS match for origin: {origin}\n" f"Patterns: {self.cors_domains}") return False - def _set_cors_headers(self, - origin: str, - req_hdlr: Optional[RequestHandler] - ) -> None: - if req_hdlr is None: - return - req_hdlr.set_header("Access-Control-Allow-Origin", origin) - if req_hdlr.request.method == "OPTIONS": - req_hdlr.set_header( - "Access-Control-Allow-Methods", - "GET, POST, PUT, DELETE, OPTIONS") - req_hdlr.set_header( - "Access-Control-Allow-Headers", - "Origin, Accept, Content-Type, X-Requested-With, " - "X-CRSF-Token, Authorization, X-Access-Token, " - "X-Api-Key") - if req_hdlr.request.headers.get( - "Access-Control-Request-Private-Network", None) == "true": - req_hdlr.set_header( - "Access-Control-Allow-Private-Network", - "true") - def cors_enabled(self) -> bool: return self.cors_domains is not None diff --git a/moonraker/components/button.py b/moonraker/components/button.py index 7ce5f84..b244228 100644 --- a/moonraker/components/button.py +++ b/moonraker/components/button.py @@ -6,7 +6,6 @@ from __future__ import annotations import asyncio import logging -from confighelper import SentinelClass from typing import ( TYPE_CHECKING, @@ -14,11 +13,9 @@ from typing import ( Dict ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from .gpio import GpioFactory - from app import InternalTransport as ITransport + from ..confighelper import ConfigHelper + from .application import InternalTransport as ITransport -SENTINEL = SentinelClass.get_instance() class ButtonManager: def __init__(self, config: ConfigHelper) -> None: @@ -29,7 +26,7 @@ class ButtonManager: for section in prefix_sections: cfg = config[section] # Reserve the "type" option for future use - btn_type = cfg.get('type', "gpio") + btn_type = cfg.get('type', "gpio") # noqa: F841 try: btn = GpioButton(cfg) except Exception as e: @@ -48,25 +45,21 @@ class GpioButton: self.server = config.get_server() self.eventloop = self.server.get_event_loop() self.name = config.get_name().split()[-1] - self.itransport: ITransport = self.server.lookup_component( - 'internal_transport') + self.itransport: ITransport = self.server.lookup_component("internal_transport") self.mutex = asyncio.Lock() - gpio: GpioFactory = self.server.load_component(config, 'gpio') - self.gpio_event = gpio.register_gpio_event( - config.get('pin'), self._on_gpio_event) - min_event_time = config.getfloat( - 'minimum_event_time', .05, minval=.010) - self.gpio_event.setup_debounce(min_event_time, self._on_gpio_error) - self.press_template = config.gettemplate( - "on_press", None, is_async=True) - self.release_template = config.gettemplate( - "on_release", None, is_async=True) + self.gpio_event = config.getgpioevent("pin", self._on_gpio_event) + self.min_event_time = config.getfloat("minimum_event_time", 0, minval=0.0) + debounce_period = config.getfloat("debounce_period", .05, minval=0.01) + self.gpio_event.setup_debounce(debounce_period, self._on_gpio_error) + self.press_template = config.gettemplate("on_press", None, is_async=True) + self.release_template = config.gettemplate("on_release", None, is_async=True) if ( self.press_template is None and self.release_template is None ): raise config.error( - f"[{config.get_name()}]: No template option configured") + f"[{config.get_name()}]: No template option configured" + ) self.notification_sent: bool = False self.user_data: Dict[str, Any] = {} self.context: Dict[str, Any] = { @@ -101,11 +94,11 @@ class GpioButton: data['aux'] = result self.server.send_event("button:button_event", data) - async def _on_gpio_event(self, - eventtime: float, - elapsed_time: float, - pressed: int - ) -> None: + async def _on_gpio_event( + self, eventtime: float, elapsed_time: float, pressed: int + ) -> None: + if elapsed_time < self.min_event_time: + return template = self.press_template if pressed else self.release_template if template is None: return diff --git a/moonraker/components/data_store.py b/moonraker/components/data_store.py index cee78c6..739cf38 100644 --- a/moonraker/components/data_store.py +++ b/moonraker/components/data_store.py @@ -8,6 +8,7 @@ from __future__ import annotations import logging import time from collections import deque +from ..common import RequestType # Annotation imports from typing import ( @@ -16,19 +17,23 @@ from typing import ( Optional, Dict, List, - Tuple, Deque, ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest - from . import klippy_apis - APIComp = klippy_apis.KlippyAPI + from ..confighelper import ConfigHelper + from ..common import WebRequest + from .klippy_connection import KlippyConnection + from .klippy_apis import KlippyAPI as APIComp GCQueue = Deque[Dict[str, Any]] - TempStore = Dict[str, Dict[str, Deque[float]]] + TempStore = Dict[str, Dict[str, Deque[Optional[float]]]] TEMP_UPDATE_TIME = 1. +def _round_null(val: Optional[float], ndigits: int) -> Optional[float]: + if val is None: + return val + return round(val, ndigits) + class DataStore: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() @@ -36,16 +41,15 @@ class DataStore: self.gcode_store_size = config.getint('gcode_store_size', 1000) # Temperature Store Tracking - self.last_temps: Dict[str, Tuple[float, ...]] = {} + kconn: KlippyConnection = self.server.lookup_component("klippy_connection") + self.subscription_cache = kconn.get_subscription_cache() self.gcode_queue: GCQueue = deque(maxlen=self.gcode_store_size) self.temperature_store: TempStore = {} + self.temp_monitors: List[str] = [] eventloop = self.server.get_event_loop() self.temp_update_timer = eventloop.register_timer( self._update_temperature_store) - # Register status update event - self.server.register_event_handler( - "server:status_update", self._set_current_temps) self.server.register_event_handler( "server:gcode_response", self._update_gcode_store) self.server.register_event_handler( @@ -56,11 +60,13 @@ class DataStore: # Register endpoints self.server.register_endpoint( - "/server/temperature_store", ['GET'], - self._handle_temp_store_request) + "/server/temperature_store", RequestType.GET, + self._handle_temp_store_request + ) self.server.register_endpoint( - "/server/gcode_store", ['GET'], - self._handle_gcode_store_request) + "/server/gcode_store", RequestType.GET, + self._handle_gcode_store_request + ) async def _init_sensors(self) -> None: klippy_apis: APIComp = self.server.lookup_component('klippy_apis') @@ -71,8 +77,10 @@ class DataStore: except self.server.error as e: logging.info(f"Error Configuring Sensors: {e}") return - sensors: List[str] - sensors = result.get("heaters", {}).get("available_sensors", []) + heaters: Dict[str, List[str]] = result.get("heaters", {}) + sensors = heaters.get("available_sensors", []) + self.temp_monitors = heaters.get("available_monitors", []) + sensors.extend(self.temp_monitors) if sensors: # Add Subscription @@ -85,59 +93,56 @@ class DataStore: return logging.info(f"Configuring available sensors: {sensors}") new_store: TempStore = {} + valid_fields = ("temperature", "target", "power", "speed") for sensor in sensors: - fields = list(status.get(sensor, {}).keys()) + reported_fields = [ + f for f in list(status.get(sensor, {}).keys()) if f in valid_fields + ] + if not reported_fields: + logging.info(f"No valid fields reported for sensor: {sensor}") + self.temperature_store.pop(sensor, None) + continue if sensor in self.temperature_store: new_store[sensor] = self.temperature_store[sensor] + for field in list(new_store[sensor].keys()): + if field not in reported_fields: + new_store[sensor].pop(field, None) + else: + initial_val: Optional[float] + initial_val = _round_null(status[sensor][field], 2) + new_store[sensor][field].append(initial_val) else: - new_store[sensor] = { - 'temperatures': deque(maxlen=self.temp_store_size)} - for item in ["target", "power", "speed"]: - if item in fields: - new_store[sensor][f"{item}s"] = deque( - maxlen=self.temp_store_size) - if sensor not in self.last_temps: - self.last_temps[sensor] = (0., 0., 0., 0.) + new_store[sensor] = {} + for field in reported_fields: + if field not in new_store[sensor]: + initial_val = _round_null(status[sensor][field], 2) + new_store[sensor][field] = deque( + [initial_val], maxlen=self.temp_store_size + ) self.temperature_store = new_store - # Prune unconfigured sensors in self.last_temps - for sensor in list(self.last_temps.keys()): - if sensor not in self.temperature_store: - del self.last_temps[sensor] - # Update initial temperatures - self._set_current_temps(status) - self.temp_update_timer.start() + self.temp_update_timer.start(delay=1.) else: logging.info("No sensors found") - self.last_temps = {} self.temperature_store = {} + self.temp_monitors = [] self.temp_update_timer.stop() - def _set_current_temps(self, data: Dict[str, Any]) -> None: - for sensor in self.temperature_store: - if sensor in data: - last_val = self.last_temps[sensor] - self.last_temps[sensor] = ( - round(data[sensor].get('temperature', last_val[0]), 2), - data[sensor].get('target', last_val[1]), - data[sensor].get('power', last_val[2]), - data[sensor].get('speed', last_val[3])) - def _update_temperature_store(self, eventtime: float) -> float: - # XXX - If klippy is not connected, set values to zero - # as they are unknown? - for sensor, vals in self.last_temps.items(): - self.temperature_store[sensor]['temperatures'].append(vals[0]) - for val, item in zip(vals[1:], ["targets", "powers", "speeds"]): - if item in self.temperature_store[sensor]: - self.temperature_store[sensor][item].append(val) + for sensor_name, sensor in self.temperature_store.items(): + sdata: Dict[str, Any] = self.subscription_cache.get(sensor_name, {}) + for field, store in sensor.items(): + store.append(_round_null(sdata.get(field, store[-1]), 2)) return eventtime + TEMP_UPDATE_TIME - async def _handle_temp_store_request(self, - web_request: WebRequest - ) -> Dict[str, Dict[str, List[float]]]: + async def _handle_temp_store_request( + self, web_request: WebRequest + ) -> Dict[str, Dict[str, List[Optional[float]]]]: + include_monitors = web_request.get_boolean("include_monitors", False) store = {} for name, sensor in self.temperature_store.items(): - store[name] = {k: list(v) for k, v in sensor.items()} + if not include_monitors and name in self.temp_monitors: + continue + store[name] = {f"{k}s": list(v) for k, v in sensor.items()} return store async def close(self) -> None: diff --git a/moonraker/components/database.py b/moonraker/components/database.py index 4ee3444..ced670e 100644 --- a/moonraker/components/database.py +++ b/moonraker/components/database.py @@ -1,67 +1,101 @@ -# Mimimal database for moonraker storage +# Sqlite database for Moonraker persistent storage # -# Copyright (C) 2021 Eric Callahan +# Copyright (C) 2021-2024 Eric Callahan # # This file may be distributed under the terms of the GNU GPLv3 license. from __future__ import annotations -import os -import json +import pathlib import struct import operator +import inspect import logging -from asyncio import Future, Task -from io import BytesIO +import contextlib +import time +from asyncio import Future, Task, Lock from functools import reduce -from threading import Lock as ThreadLock -import lmdb -from utils import SentinelClass, ServerError +from queue import Queue +from threading import Thread +import sqlite3 +from ..utils import Sentinel, ServerError +from ..utils import json_wrapper as jsonw +from ..common import RequestType, SqlTableDefinition # Annotation imports from typing import ( TYPE_CHECKING, Any, - Awaitable, Callable, - Mapping, TypeVar, Tuple, Optional, Union, Dict, List, - cast + Set, + Type, + Sequence, + Generator ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest - DBRecord = Union[int, float, bool, str, List[Any], Dict[str, Any]] - DBType = Optional[DBRecord] + from ..confighelper import ConfigHelper + from ..common import WebRequest + from .klippy_connection import KlippyConnection + from lmdb import Environment as LmdbEnvironment + from types import TracebackType + DBRecord = Optional[Union[int, float, bool, str, List[Any], Dict[str, Any]]] + DBType = DBRecord + SqlParams = Union[List[Any], Tuple[Any, ...], Dict[str, Any]] _T = TypeVar("_T") -DATABASE_VERSION = 1 -MAX_NAMESPACES = 100 -MAX_DB_SIZE = 200 * 2**20 +DATABASE_VERSION = 2 +SQL_DB_FILENAME = "moonraker-sql.db" +NAMESPACE_TABLE = "namespace_store" +REGISTRATION_TABLE = "table_registry" +SCHEMA_TABLE = ( + "sqlite_schema" if sqlite3.sqlite_version_info >= (3, 33, 0) + else "sqlite_master" +) -RECORD_ENCODE_FUNCS = { +RECORD_ENCODE_FUNCS: Dict[Type, Callable[..., bytes]] = { int: lambda x: b"q" + struct.pack("q", x), float: lambda x: b"d" + struct.pack("d", x), bool: lambda x: b"?" + struct.pack("?", x), str: lambda x: b"s" + x.encode(), - list: lambda x: json.dumps(x).encode(), - dict: lambda x: json.dumps(x).encode(), + list: lambda x: jsonw.dumps(x), + dict: lambda x: jsonw.dumps(x), + type(None): lambda x: b"\x00", } -RECORD_DECODE_FUNCS = { +RECORD_DECODE_FUNCS: Dict[int, Callable[..., DBRecord]] = { ord("q"): lambda x: struct.unpack("q", x[1:])[0], ord("d"): lambda x: struct.unpack("d", x[1:])[0], ord("?"): lambda x: struct.unpack("?", x[1:])[0], ord("s"): lambda x: bytes(x[1:]).decode(), - ord("["): lambda x: json.load(BytesIO(x)), - ord("{"): lambda x: json.load(BytesIO(x)), + ord("["): lambda x: jsonw.loads(bytes(x)), + ord("{"): lambda x: jsonw.loads(bytes(x)), + 0: lambda _: None } -SENTINEL = SentinelClass.get_instance() +def encode_record(value: DBRecord) -> bytes: + try: + enc_func = RECORD_ENCODE_FUNCS[type(value)] + return enc_func(value) + except Exception: + raise ServerError( + f"Error encoding val: {value}, type: {type(value)}" + ) + +def decode_record(bvalue: bytes) -> DBRecord: + fmt = bvalue[0] + try: + decode_func = RECORD_DECODE_FUNCS[fmt] + return decode_func(bvalue) + except Exception: + val = bytes(bvalue).decode() + raise ServerError( + f"Error decoding value {val}, format: {chr(fmt)}" + ) def getitem_with_default(item: Dict, field: Any) -> Any: if not isinstance(item, Dict): @@ -71,111 +105,210 @@ def getitem_with_default(item: Dict, field: Any) -> Any: item[field] = {} return item[field] +def parse_namespace_key(key: Union[List[str], str]) -> List[str]: + try: + key_list = key if isinstance(key, list) else key.split('.') + except Exception: + key_list = [] + if not key_list or "" in key_list: + raise ServerError(f"Invalid Key Format: '{key}'") + return key_list + +def generate_lmdb_entries( + db_folder: pathlib.Path +) -> Generator[Tuple[str, str, bytes], Any, None]: + if not db_folder.joinpath("data.mdb").is_file(): + return + MAX_LMDB_NAMESPACES = 100 + MAX_LMDB_SIZE = 200 * 2**20 + inst_attempted: bool = False + while True: + try: + import lmdb + lmdb_env: LmdbEnvironment = lmdb.open( + str(db_folder), map_size=MAX_LMDB_SIZE, max_dbs=MAX_LMDB_NAMESPACES + ) + except ModuleNotFoundError: + if inst_attempted: + logging.info( + "Attempt to install LMDB failed, aborting conversion." + ) + return + import sys + from ..utils import pip_utils + inst_attempted = True + logging.info("LMDB module not found, attempting installation...") + pip_cmd = f"{sys.executable} -m pip" + pip_exec = pip_utils.PipExecutor(pip_cmd, logging.info) + pip_exec.install_packages(["lmdb"]) + except Exception: + logging.exception( + "Failed to open lmdb database, aborting conversion" + ) + return + else: + break + lmdb_namespaces: List[Tuple[str, object]] = [] + with lmdb_env.begin(buffers=True) as txn: + # lookup existing namespaces + with txn.cursor() as cursor: + remaining = cursor.first() + while remaining: + key = bytes(cursor.key()) + if not key: + continue + db = lmdb_env.open_db(key, txn) + lmdb_namespaces.append((key.decode(), db)) + remaining = cursor.next() + # Copy all records + for (ns, db) in lmdb_namespaces: + logging.info(f"Converting LMDB namespace '{ns}'") + with txn.cursor(db=db) as cursor: + remaining = cursor.first() + while remaining: + key_buf = cursor.key() + value = b"" + try: + decoded_key = bytes(key_buf).decode() + value = bytes(cursor.value()) + except Exception: + logging.info("Database Key/Value Decode Error") + decoded_key = '' + remaining = cursor.next() + if not decoded_key or not value: + hk = bytes(key_buf).hex() + logging.info( + f"Invalid key or value '{hk}' found in " + f"lmdb namespace '{ns}'" + ) + continue + if ns == "moonraker": + if decoded_key == "database": + # Convert "database" field in the "moonraker" namespace + # to its own namespace if possible + db_info = decode_record(value) + if isinstance(db_info, dict): + for db_key, db_val in db_info.items(): + yield ("database", db_key, encode_record(db_val)) + continue + elif decoded_key == "database_version": + yield ("database", decoded_key, value) + continue + yield (ns, decoded_key, value) + lmdb_env.close() class MoonrakerDatabase: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() self.eventloop = self.server.get_event_loop() - self.namespaces: Dict[str, object] = {} - self.thread_lock = ThreadLock() - self.database_path = os.path.expanduser(config.get( - 'database_path', "~/.moonraker_database")) - if not os.path.isdir(self.database_path): - os.mkdir(self.database_path) - self.lmdb_env = lmdb.open(self.database_path, map_size=MAX_DB_SIZE, - max_dbs=MAX_NAMESPACES) - with self.lmdb_env.begin(write=True, buffers=True) as txn: - # lookup existing namespaces - with txn.cursor() as cursor: - remaining = cursor.first() - while remaining: - key = bytes(cursor.key()) - self.namespaces[key.decode()] = self.lmdb_env.open_db( - key, txn) - remaining = cursor.next() - if "moonraker" not in self.namespaces: - mrdb = self.lmdb_env.open_db(b"moonraker", txn) - self.namespaces["moonraker"] = mrdb - txn.put(b'database_version', - self._encode_value(DATABASE_VERSION), - db=mrdb) - # Iterate through all records, checking for invalid keys - for ns, db in self.namespaces.items(): - with txn.cursor(db=db) as cursor: - remaining = cursor.first() - while remaining: - key_buf = cursor.key() - try: - decoded_key = bytes(key_buf).decode() - except Exception: - logging.info("Database Key Decode Error") - decoded_key = '' - if not decoded_key: - hex_key = bytes(key_buf).hex() - try: - invalid_val = self._decode_value(cursor.value()) - except Exception: - invalid_val = "" - logging.info( - f"Invalid Key '{hex_key}' found in namespace " - f"'{ns}', dropping value: {repr(invalid_val)}") - try: - remaining = cursor.delete() - except Exception: - logging.exception("Error Deleting LMDB Key") - else: - continue - remaining = cursor.next() - + self.registered_namespaces: Set[str] = set(["moonraker", "database"]) + self.registered_tables: Set[str] = set([NAMESPACE_TABLE, REGISTRATION_TABLE]) + self.backup_lock = Lock() + instance_id: str = self.server.get_app_args()["instance_uuid"] + db_path = self._get_database_folder(config) + self._sql_db = db_path.joinpath(SQL_DB_FILENAME) + self.db_provider = SqliteProvider(config, self._sql_db) + stored_iid = self.get_item("moonraker", "instance_id", None).result() + if stored_iid is not None: + if instance_id != stored_iid: + self.server.add_log_rollover_item( + "uuid_mismatch", + "Database: Stored Instance ID does not match current Instance " + f"ID.\n\nCurrent UUID: {instance_id}\nStored UUID: {stored_iid}" + ) + else: + self.insert_item("moonraker", "instance_id", instance_id) + dbinfo: Dict[str, Any] = self.get_item("database", default={}).result() # Protected Namespaces have read-only API access. Write access can # be granted by enabling the debug option. Forbidden namespaces # have no API access. This cannot be overridden. - self.protected_namespaces = set(self.get_item( - "moonraker", "database.protected_namespaces", - ["moonraker"]).result()) - self.forbidden_namespaces = set(self.get_item( - "moonraker", "database.forbidden_namespaces", - []).result()) - # Remove stale debug counter + ptns: Set[str] = set(dbinfo.get("protected_namespaces", [])) + fbns: Set[str] = set(dbinfo.get("forbidden_namespaces", [])) + self.protected_namespaces: Set[str] = ptns.union(["moonraker"]) + self.forbidden_namespaces: Set[str] = fbns.union(["database"]) + # Initialize Debug Counter config.getboolean("enable_database_debug", False, deprecate=True) - try: - self.delete_item("moonraker", "database.debug_counter") - except Exception: - pass + self.debug_counter: Dict[str, int] = {"get": 0, "post": 0, "delete": 0} + db_counter: Optional[Dict[str, int]] = dbinfo.get("debug_counter") + if isinstance(db_counter, dict): + self.debug_counter.update(db_counter) + self.server.add_log_rollover_item( + "database_debug_counter", + f"Database Debug Counter: {self.debug_counter}" + ) # Track unsafe shutdowns - unsafe_shutdowns: int = self.get_item( - "moonraker", "database.unsafe_shutdowns", 0).result() - msg = f"Unsafe Shutdown Count: {unsafe_shutdowns}" + self.unsafe_shutdowns: int = dbinfo.get("unsafe_shutdowns", 0) + msg = f"Unsafe Shutdown Count: {self.unsafe_shutdowns}" self.server.add_log_rollover_item("database", msg) + self.insert_item("database", "database_version", DATABASE_VERSION) + self.server.register_endpoint( + "/server/database/list", RequestType.GET, self._handle_list_request + ) + self.server.register_endpoint( + "/server/database/item", RequestType.all(), self._handle_item_request + ) + self.server.register_endpoint( + "/server/database/backup", RequestType.POST | RequestType.DELETE, + self._handle_backup_request + ) + self.server.register_endpoint( + "/server/database/restore", RequestType.POST, self._handle_restore_request + ) + self.server.register_endpoint( + "/server/database/compact", RequestType.POST, self._handle_compact_request + ) + self.server.register_debug_endpoint( + "/debug/database/list", RequestType.GET, self._handle_list_request + ) + self.server.register_debug_endpoint( + "/debug/database/item", RequestType.all(), self._handle_item_request + ) + self.server.register_debug_endpoint( + "/debug/database/table", RequestType.GET, self._handle_table_request + ) + # self.server.register_debug_endpoint( + # "/debug/database/row", RequestType.all(), + # self._handle_row_request + # ) - # Increment unsafe shutdown counter. This will be reset if - # moonraker is safely restarted - self.insert_item("moonraker", "database.unsafe_shutdowns", - unsafe_shutdowns + 1) - self.server.register_endpoint( - "/server/database/list", ['GET'], self._handle_list_request) - self.server.register_endpoint( - "/server/database/item", ["GET", "POST", "DELETE"], - self._handle_item_request) + async def component_init(self) -> None: + await self.db_provider.async_init() + # Increment unsafe shutdown counter. This will be reset if moonraker is + # safely restarted + await self.insert_item( + "database", "unsafe_shutdowns", self.unsafe_shutdowns + 1 + ) def get_database_path(self) -> str: - return self.database_path + return str(self._sql_db) - def _run_command(self, - command_func: Callable[..., _T], - *args - ) -> Future[_T]: - def func_wrapper(): - with self.thread_lock: - return command_func(*args) + @property + def database_path(self) -> pathlib.Path: + return self._sql_db - if self.server.is_running(): - return cast(Future, self.eventloop.run_in_thread(func_wrapper)) - else: - ret = func_wrapper() - fut = self.eventloop.create_future() - fut.set_result(ret) - return fut + def _get_database_folder(self, config: ConfigHelper) -> pathlib.Path: + app_args = self.server.get_app_args() + dep_path = config.get("database_path", None, deprecate=True) + db_path = pathlib.Path(app_args["data_path"]).joinpath("database") + if ( + app_args["is_default_data_path"] and + not db_path.joinpath(SQL_DB_FILENAME).exists() + ): + # Allow configured DB fallback + dep_path = dep_path or "~/.moonraker_database" + legacy_db = pathlib.Path(dep_path).expanduser().resolve() + try: + same = legacy_db.samefile(db_path) + except Exception: + same = False + if not same and legacy_db.joinpath("data.mdb").is_file(): + logging.info( + f"Reverting to legacy database folder: {legacy_db}" + ) + db_path = legacy_db + if not db_path.is_dir(): + db_path.mkdir() + return db_path # *** Nested Database operations*** # The insert_item(), delete_item(), and get_item() methods may operate on @@ -186,33 +319,761 @@ class MoonrakerDatabase: # identify the database record. Subsequent keys are optional and are # used to access elements in the deserialized objects. - def insert_item(self, - namespace: str, - key: Union[List[str], str], - value: DBType - ) -> Future[None]: - return self._run_command(self._insert_impl, namespace, key, value) + def insert_item( + self, namespace: str, key: Union[List[str], str], value: DBType + ) -> Future[None]: + return self.db_provider.execute_db_function( + self.db_provider.insert_item, namespace, key, value + ) - def _insert_impl(self, - namespace: str, - key: Union[List[str], str], - value: DBType - ) -> None: - key_list = self._process_key(key) - if namespace not in self.namespaces: - self.namespaces[namespace] = self.lmdb_env.open_db( - namespace.encode()) + def update_item( + self, namespace: str, key: Union[List[str], str], value: DBType + ) -> Future[None]: + return self.db_provider.execute_db_function( + self.db_provider.update_item, namespace, key, value + ) + + def delete_item( + self, namespace: str, key: Union[List[str], str] + ) -> Future[Any]: + return self.db_provider.execute_db_function( + self.db_provider.delete_item, namespace, key + ) + + def get_item( + self, + namespace: str, + key: Optional[Union[List[str], str]] = None, + default: Any = Sentinel.MISSING + ) -> Future[Any]: + return self.db_provider.execute_db_function( + self.db_provider.get_item, namespace, key, default + ) + + # *** Batch operations*** + # The insert_batch(), move_batch(), delete_batch(), and get_batch() + # methods can be used to perform record level batch operations on + # a namespace in a single transaction. + + def insert_batch( + self, namespace: str, records: Dict[str, Any] + ) -> Future[None]: + return self.db_provider.execute_db_function( + self.db_provider.insert_batch, namespace, records + ) + + def move_batch( + self, namespace: str, source_keys: List[str], dest_keys: List[str] + ) -> Future[None]: + return self.db_provider.execute_db_function( + self.db_provider.move_batch, namespace, source_keys, dest_keys + ) + + def delete_batch( + self, namespace: str, keys: List[str] + ) -> Future[Dict[str, Any]]: + return self.db_provider.execute_db_function( + self.db_provider.delete_batch, namespace, keys + ) + + def get_batch( + self, namespace: str, keys: List[str] + ) -> Future[Dict[str, Any]]: + return self.db_provider.execute_db_function( + self.db_provider.get_batch, namespace, keys + ) + + # *** Namespace level operations*** + + def update_namespace( + self, namespace: str, values: Dict[str, DBRecord] + ) -> Future[None]: + return self.db_provider.execute_db_function( + self.db_provider.insert_batch, namespace, values + ) + + def clear_namespace(self, namespace: str) -> Future[None]: + return self.db_provider.execute_db_function( + self.db_provider.clear_namespace, namespace + ) + + def sync_namespace( + self, namespace: str, values: Dict[str, DBRecord] + ) -> Future[None]: + return self.db_provider.execute_db_function( + self.db_provider.sync_namespace, namespace, values + ) + + def ns_length(self, namespace: str) -> Future[int]: + return self.db_provider.execute_db_function( + self.db_provider.get_namespace_length, namespace + ) + + def ns_keys(self, namespace: str) -> Future[List[str]]: + return self.db_provider.execute_db_function( + self.db_provider.get_namespace_keys, namespace, + ) + + def ns_values(self, namespace: str) -> Future[List[Any]]: + return self.db_provider.execute_db_function( + self.db_provider.get_namespace_values, namespace + ) + + def ns_items(self, namespace: str) -> Future[List[Tuple[str, Any]]]: + return self.db_provider.execute_db_function( + self.db_provider.get_namespace_items, namespace + ) + + def ns_contains( + self, namespace: str, key: Union[List[str], str] + ) -> Future[bool]: + return self.db_provider.execute_db_function( + self.db_provider.namespace_contains, namespace + ) + + # SQL direct query methods + def sql_execute( + self, sql: str, params: SqlParams = [] + ) -> Future[SqliteCursorProxy]: + return self.db_provider.execute_db_function( + self.db_provider.sql_execute, sql, params + ) + + def sql_executemany( + self, sql: str, params: Sequence[SqlParams] = [] + ) -> Future[SqliteCursorProxy]: + return self.db_provider.execute_db_function( + self.db_provider.sql_executemany, sql, params + ) + + def sql_executescript(self, sql: str) -> Future[SqliteCursorProxy]: + return self.db_provider.execute_db_function( + self.db_provider.sql_executescript, sql + ) + + def sql_commit(self) -> Future[None]: + return self.db_provider.execute_db_function(self.db_provider.sql_commit) + + def sql_rollback(self) -> Future[None]: + return self.db_provider.execute_db_function(self.db_provider.sql_rollback) + + def queue_sql_callback( + self, callback: Callable[[sqlite3.Connection], Any] + ) -> Future[Any]: + return self.db_provider.execute_db_function(callback) + + def compact_database(self) -> Future[Dict[str, int]]: + return self.db_provider.execute_db_function( + self.db_provider.compact_database + ) + + def backup_database(self, bkp_path: pathlib.Path) -> Future[None]: + return self.db_provider.execute_db_function( + self.db_provider.backup_database, bkp_path + ) + + def restore_database(self, restore_path: pathlib.Path) -> Future[Dict[str, Any]]: + return self.db_provider.execute_db_function( + self.db_provider.restore_database, restore_path + ) + + def register_local_namespace( + self, namespace: str, forbidden: bool = False, parse_keys: bool = False + ) -> NamespaceWrapper: + if namespace in self.registered_namespaces: + raise self.server.error(f"Namespace '{namespace}' already registered") + self.registered_namespaces.add(namespace) + self.db_provider.register_namespace(namespace) + if forbidden: + if namespace not in self.forbidden_namespaces: + self.forbidden_namespaces.add(namespace) + self.insert_item( + "database", "forbidden_namespaces", + sorted(self.forbidden_namespaces) + ) + elif namespace not in self.protected_namespaces: + self.protected_namespaces.add(namespace) + self.insert_item( + "database", "protected_namespaces", sorted(self.protected_namespaces) + ) + return NamespaceWrapper(namespace, self, parse_keys) + + def wrap_namespace( + self, namespace: str, parse_keys: bool = True + ) -> NamespaceWrapper: + if namespace not in self.db_provider.namespaces: + raise self.server.error(f"Namespace '{namespace}' not found", 404) + return NamespaceWrapper(namespace, self, parse_keys) + + def unregister_local_namespace(self, namespace: str) -> None: + if namespace in self.registered_namespaces: + self.registered_namespaces.remove(namespace) + if namespace in self.forbidden_namespaces: + self.forbidden_namespaces.remove(namespace) + self.insert_item( + "database", "forbidden_namespaces", sorted(self.forbidden_namespaces) + ) + if namespace in self.protected_namespaces: + self.protected_namespaces.remove(namespace) + self.insert_item( + "database", "protected_namespaces", sorted(self.protected_namespaces) + ) + + def drop_empty_namespace(self, namespace: str) -> Future[None]: + return self.db_provider.execute_db_function( + self.db_provider.drop_empty_namespace, namespace + ) + + def get_provider_wrapper(self) -> DBProviderWrapper: + return self.db_provider.get_provider_wapper() + + def get_backup_dir(self) -> pathlib.Path: + bkp_dir = pathlib.Path(self.server.get_app_arg("data_path")) + return bkp_dir.joinpath("backup/database").resolve() + + def register_table(self, table_def: SqlTableDefinition) -> SqlTableWrapper: + if table_def.name in self.registered_tables: + raise self.server.error(f"Table '{table_def.name}' already registered") + self.registered_tables.add(table_def.name) + self.db_provider.register_table(table_def) + return SqlTableWrapper(self, table_def) + + async def _handle_compact_request(self, web_request: WebRequest) -> Dict[str, int]: + kconn: KlippyConnection = self.server.lookup_component("klippy_connection") + if kconn.is_printing(): + raise self.server.error("Cannot compact when Klipper is printing") + async with self.backup_lock: + return await self.compact_database() + + async def _handle_backup_request(self, web_request: WebRequest) -> Dict[str, Any]: + async with self.backup_lock: + request_type = web_request.get_request_type() + if request_type == RequestType.POST: + kconn: KlippyConnection + kconn = self.server.lookup_component("klippy_connection") + if kconn.is_printing(): + raise self.server.error("Cannot backup when Klipper is printing") + suffix = time.strftime("%Y%m%d-%H%M%S", time.localtime()) + db_name = web_request.get_str("filename", f"sqldb-backup-{suffix}.db") + bkp_dir = self.get_backup_dir() + bkp_path = bkp_dir.joinpath(db_name).resolve() + if bkp_dir not in bkp_path.parents: + raise self.server.error(f"Invalid name {db_name}.") + await self.backup_database(bkp_path) + elif request_type == RequestType.DELETE: + db_name = web_request.get_str("filename") + bkp_dir = self.get_backup_dir() + bkp_path = bkp_dir.joinpath(db_name).resolve() + if bkp_dir not in bkp_path.parents: + raise self.server.error(f"Invalid name {db_name}.") + if not bkp_path.is_file(): + raise self.server.error( + f"Backup file {db_name} does not exist", 404 + ) + await self.eventloop.run_in_thread(bkp_path.unlink) + else: + raise self.server.error("Invalid request type") + return { + "backup_path": str(bkp_path) + } + + async def _handle_restore_request(self, web_request: WebRequest) -> Dict[str, Any]: + kconn: KlippyConnection = self.server.lookup_component("klippy_connection") + if kconn.is_printing(): + raise self.server.error("Cannot restore when Klipper is printing") + async with self.backup_lock: + db_name = web_request.get_str("filename") + bkp_dir = self.get_backup_dir() + restore_path = bkp_dir.joinpath(db_name).resolve() + if bkp_dir not in restore_path.parents: + raise self.server.error(f"Invalid name {db_name}.") + restore_info = await self.restore_database(restore_path) + self.server.restart(.1) + return restore_info + + async def _handle_list_request( + self, web_request: WebRequest + ) -> Dict[str, List[str]]: + path = web_request.get_endpoint() + ns_list = set(self.db_provider.namespaces) + bkp_dir = self.get_backup_dir() + backups: List[str] = [] + if bkp_dir.is_dir(): + backups = [bkp.name for bkp in bkp_dir.iterdir() if bkp.is_file()] + if not path.startswith("/debug/"): + ns_list -= self.forbidden_namespaces + return { + "namespaces": list(ns_list), + "backups": backups + } + else: + return { + "namespaces": list(ns_list), + "backups": backups, + "tables": list(self.db_provider.tables) + } + + async def _handle_item_request(self, web_request: WebRequest) -> Dict[str, Any]: + req_type = web_request.get_request_type() + is_debug = web_request.get_endpoint().startswith("/debug/") + namespace = web_request.get_str("namespace") + if namespace in self.forbidden_namespaces and not is_debug: + raise self.server.error( + f"Read/Write access to namespace '{namespace}' is forbidden", 403 + ) + if req_type == RequestType.GET: + key = web_request.get("key", None) + if key is not None and not isinstance(key, (list, str)): + raise self.server.error( + "Value for argument 'key' is an invalid type: " + f"{type(key).__name__}" + ) + val = await self.get_item(namespace, key) + else: + if namespace in self.protected_namespaces and not is_debug: + raise self.server.error( + f"Write access to namespace '{namespace}' is forbidden", 403 + ) + key = web_request.get("key") + if not isinstance(key, (list, str)): + raise self.server.error( + "Value for argument 'key' is an invalid type: " + f"{type(key).__name__}" + ) + if req_type == RequestType.POST: + val = web_request.get("value") + await self.insert_item(namespace, key, val) + elif req_type == RequestType.DELETE: + val = await self.delete_item(namespace, key) + await self.drop_empty_namespace(namespace) + else: + raise self.server.error(f"Invalid request type {req_type}") + + if is_debug: + name = req_type.name or str(req_type).split(".", 1)[-1] + self.debug_counter[name.lower()] += 1 + await self.insert_item( + "database", "debug_counter", self.debug_counter + ) + self.server.add_log_rollover_item( + "database_debug_counter", + f"Database Debug Counter: {self.debug_counter}", + log=False + ) + return {'namespace': namespace, 'key': key, 'value': val} + + async def close(self) -> None: + if not self.db_provider.is_restored(): + # Don't overwrite unsafe shutdowns on a restored database + await self.insert_item( + "database", "unsafe_shutdowns", self.unsafe_shutdowns + ) + # Stop command thread + await self.db_provider.stop() + + async def _handle_table_request(self, web_request: WebRequest) -> Dict[str, Any]: + table = web_request.get_str("table") + if table not in self.db_provider.tables: + raise self.server.error(f"Table name '{table}' does not exist", 404) + cur = await self.sql_execute(f"SELECT rowid, * FROM {table}") + return { + "table_name": table, + "rows": [dict(r) for r in await cur.fetchall()] + } + + async def _handle_row_request(self, web_request: WebRequest) -> Dict[str, Any]: + req_type = web_request.get_request_type() + table = web_request.get_str("table") + if table not in self.db_provider.tables: + raise self.server.error( + f"Table name '{table}' does not exist", 404 + ) + if req_type == RequestType.POST: + row_id = web_request.get_int("id", None) + values = web_request.get("values") + assert isinstance(values, dict) + keys = set(values.keys()) + cur = await self.sql_execute(f"PRAGMA table_info('{table}')") + columns = set([r["name"] for r in await cur.fetchall()]) + if row_id is None: + # insert + if keys != columns: + raise self.server.error( + "Keys in value to insert do not match columns of tables" + ) + val_str = ",".join([f":{col}" for col in columns]) + cur = await self.sql_execute( + f"INSERT INTO {table} VALUES({val_str})", values + ) + else: + # update + if not keys.issubset(columns): + raise self.server.error( + "Keys in value to update are not a subset of available columns" + ) + col_str = ",".join([f"{col}" for col in columns if col in keys]) + vals = [values[col] for col in columns if col in keys] + vals.append(row_id) + val_str = ",".join("?" * len(vals)) + cur = await self.sql_execute( + f"UPDATE {table} SET ({col_str}) = ({val_str}) WHERE rowid = ?", + vals + ) + if not cur.rowcount: + raise self.server.error(f"No row with id {row_id} to update") + else: + row_id = web_request.get_int("id") + cur = await self.sql_execute( + f"SELECT rowid, * FROM {table} WHERE rowid = ?", (row_id,) + ) + item = dict(await cur.fetchone() or {}) + if req_type == RequestType.DELETE: + await self.sql_execute( + f"DELETE FROM {table} WHERE rowid = ?", (row_id,) + ) + return { + "row": item + } + +class SqliteProvider(Thread): + def __init__(self, config: ConfigHelper, db_path: pathlib.Path) -> None: + super().__init__() + self.server = config.get_server() + self.asyncio_loop = self.server.get_event_loop().asyncio_loop + self._namespaces: Set[str] = set() + self._tables: Set[str] = set() + self._db_path = db_path + self.restored: bool = False + self.command_queue: Queue[Tuple[Future, Optional[Callable], Tuple[Any, ...]]] + self.command_queue = Queue() + sqlite3.register_converter("record", decode_record) + sqlite3.register_converter("pyjson", jsonw.loads) + sqlite3.register_converter("pybool", lambda x: bool(x)) + sqlite3.register_adapter(list, jsonw.dumps) + sqlite3.register_adapter(dict, jsonw.dumps) + self.sync_conn = sqlite3.connect( + str(db_path), timeout=1., detect_types=sqlite3.PARSE_DECLTYPES + ) + self.sync_conn.row_factory = sqlite3.Row + self.setup_database() + + @property + def namespaces(self) -> Set[str]: + return self._namespaces + + @property + def tables(self) -> Set[str]: + return self._tables + + def async_init(self) -> Future[str]: + self.sync_conn.close() + self.start() + fut = self.asyncio_loop.create_future() + self.command_queue.put_nowait((fut, lambda x: "sqlite", tuple())) + return fut + + def run(self) -> None: + loop = self.asyncio_loop + conn = sqlite3.connect( + str(self._db_path), timeout=1., detect_types=sqlite3.PARSE_DECLTYPES + ) + conn.row_factory = sqlite3.Row + while True: + future, func, args = self.command_queue.get() + if func is None: + break + try: + ret = func(conn, *args) + except Exception as e: + loop.call_soon_threadsafe(future.set_exception, e) + else: + loop.call_soon_threadsafe(future.set_result, ret) + conn.close() + loop.call_soon_threadsafe(future.set_result, None) + + def execute_db_function( + self, command_func: Callable[..., _T], *args + ) -> Future[_T]: + fut = self.asyncio_loop.create_future() + if self.is_alive(): + self.command_queue.put_nowait((fut, command_func, args)) + else: + ret = command_func(self.sync_conn, *args) + fut.set_result(ret) + return fut + + def setup_database(self) -> None: + self.server.add_log_rollover_item( + "sqlite_intro", + "Loading Sqlite database provider. " + f"Sqlite Version: {sqlite3.sqlite_version}" + ) + cur = self.sync_conn.execute( + f"SELECT name FROM {SCHEMA_TABLE} WHERE type='table'" + ) + cur.arraysize = 100 + self._tables = set([row[0] for row in cur.fetchall()]) + logging.debug(f"Detected SQL Tables: {self._tables}") + if NAMESPACE_TABLE not in self._tables: + self._create_default_tables() + self._migrate_from_lmdb() + elif REGISTRATION_TABLE not in self._tables: + self._create_registration_table() + # Find namespaces + cur = self.sync_conn.execute( + f"SELECT DISTINCT namespace FROM {NAMESPACE_TABLE}" + ) + cur.arraysize = 100 + self._namespaces = set([row[0] for row in cur.fetchall()]) + logging.debug(f"Detected namespaces: {self._namespaces}") + + def _migrate_from_lmdb(self) -> None: + db_folder = self._db_path.parent + if not db_folder.joinpath("data.mdb").is_file(): + return + logging.info("Converting LMDB Database to Sqlite...") + with self.sync_conn: + self.sync_conn.executemany( + f"INSERT INTO {NAMESPACE_TABLE} VALUES (?,?,?)", + generate_lmdb_entries(db_folder) + ) + + def _create_default_tables(self) -> None: + self._create_registration_table() + if NAMESPACE_TABLE in self._tables: + return + namespace_proto = inspect.cleandoc( + f""" + {NAMESPACE_TABLE} ( + namespace TEXT NOT NULL, + key TEXT NOT NULL, + value record NOT NULL, + PRIMARY KEY (namespace, key) + ) + """ + ) + with self.sync_conn: + self.sync_conn.execute(f"CREATE TABLE {namespace_proto}") + self._save_registered_table(NAMESPACE_TABLE, namespace_proto, 1) + self.server.add_log_rollover_item( + "db_default_table", f"Created default SQL table {NAMESPACE_TABLE}" + ) + + def _create_registration_table(self) -> None: + if REGISTRATION_TABLE in self._tables: + return + reg_tbl_proto = inspect.cleandoc( + f""" + {REGISTRATION_TABLE} ( + name TEXT NOT NULL PRIMARY KEY, + prototype TEXT NOT NULL, + version INT + ) + """ + ) + with self.sync_conn: + self.sync_conn.execute(f"CREATE TABLE {reg_tbl_proto}") + self._tables.add(REGISTRATION_TABLE) + + def _save_registered_table( + self, table_name: str, prototype: str, version: int + ) -> None: + with self.sync_conn: + self.sync_conn.execute( + f"INSERT INTO {REGISTRATION_TABLE} VALUES(?, ?, ?) " + "ON CONFLICT(name) DO UPDATE SET " + "prototype=excluded.prototype, version=excluded.version", + (table_name, prototype, version) + ) + self._tables.add(table_name) + + def _lookup_registered_table(self, table_name: str) -> Tuple[str, int]: + cur = self.sync_conn.execute( + f"SELECT prototype, version FROM {REGISTRATION_TABLE} " + f"WHERE name = ?", + (table_name,) + ) + ret = cur.fetchall() + if not ret: + return "", 0 + return tuple(ret[0]) # type: ignore + + def _insert_record( + self, conn: sqlite3.Connection, namespace: str, key: str, val: DBType + ) -> bool: + if val is None: + return False + try: + with conn: + conn.execute( + f"INSERT INTO {NAMESPACE_TABLE} VALUES(?, ?, ?) " + "ON CONFLICT(namespace, key) DO UPDATE SET value=excluded.value", + (namespace, key, encode_record(val)) + ) + except sqlite3.Error: + if self.server.is_verbose_enabled(): + logging.error("Error inserting record for key") + return False + return True + + def _get_record( + self, + conn: sqlite3.Connection, + namespace: str, + key: str, + default: Union[Sentinel, DBRecord] = Sentinel.MISSING + ) -> DBRecord: + cur = conn.execute( + f"SELECT value FROM {NAMESPACE_TABLE} WHERE namespace = ? and key = ?", + (namespace, key) + ) + val = cur.fetchone() + if val is None: + if default is Sentinel.MISSING: + raise self.server.error( + f"Key '{key}' in namespace '{namespace}' not found", 404 + ) + return default + return val[0] + + # Namespace Query Ops + + def get_namespace( + self, conn: sqlite3.Connection, namespace: str, must_exist: bool = True + ) -> Dict[str, Any]: + if namespace not in self._namespaces: + if not must_exist: + return {} + raise self.server.error(f"Namespace {namespace} not found", 404) + cur = conn.execute( + f"SELECT key, value FROM {NAMESPACE_TABLE} WHERE namespace = ?", + (namespace,) + ) + cur.arraysize = 200 + return dict(cur.fetchall()) + + def iter_namespace( + self, + conn: sqlite3.Connection, + namespace: str, + count: int = 1000 + ) -> Generator[Dict[str, Any], Any, None]: + if self.is_alive(): + raise self.server.error("Cannot iterate a namespace asynchronously") + if namespace not in self._namespaces: + return + offset: int = 0 + total = self.get_namespace_length(conn, namespace) + while offset < total: + cur = conn.execute( + f"SELECT key, value FROM {NAMESPACE_TABLE} WHERE namespace = ? " + f"LIMIT ? OFFSET ?", + (namespace, count, offset) + ) + cur.arraysize = count + ret = cur.fetchall() + if not ret: + return + yield dict(ret) + offset += count + + def clear_namespace(self, conn: sqlite3.Connection, namespace: str) -> None: + with conn: + conn.execute( + f"DELETE FROM {NAMESPACE_TABLE} WHERE namespace = ?", (namespace,) + ) + + def drop_empty_namespace(self, conn: sqlite3.Connection, namespace: str) -> None: + if namespace in self._namespaces: + if self.get_namespace_length(conn, namespace) == 0: + self._namespaces.remove(namespace) + + def sync_namespace( + self, conn: sqlite3.Connection, namespace: str, values: Dict[str, DBRecord] + ) -> None: + def generate_params(): + for key, val in values.items(): + yield (namespace, key, val) + with conn: + conn.execute( + f"DELETE FROM {NAMESPACE_TABLE} WHERE namespace = ?", (namespace,) + ) + conn.executemany( + f"INSERT INTO {NAMESPACE_TABLE} VALUES(?, ?, ?)", generate_params() + ) + + def get_namespace_length(self, conn: sqlite3.Connection, namespace: str) -> int: + cur = conn.execute( + f"SELECT COUNT(namespace) FROM {NAMESPACE_TABLE} WHERE namespace = ?", + (namespace,) + ) + return cur.fetchone()[0] + + def get_namespace_keys(self, conn: sqlite3.Connection, namespace: str) -> List[str]: + cur = conn.execute( + f"SELECT key FROM {NAMESPACE_TABLE} WHERE namespace = ?", + (namespace,) + ) + cur.arraysize = 200 + return [row[0] for row in cur.fetchall()] + + def get_namespace_values( + self, conn: sqlite3.Connection, namespace: str + ) -> List[Any]: + cur = conn.execute( + f"SELECT value FROM {NAMESPACE_TABLE} WHERE namespace = ?", + (namespace,) + ) + cur.arraysize = 200 + return [row[0] for row in cur.fetchall()] + + def get_namespace_items( + self, conn: sqlite3.Connection, namespace: str + ) -> List[Tuple[str, Any]]: + cur = conn.execute( + f"SELECT key, value FROM {NAMESPACE_TABLE} WHERE namespace = ?", + (namespace,) + ) + cur.arraysize = 200 + return cur.fetchall() + + def namespace_contains( + self, conn: sqlite3.Connection, namespace: str, key: Union[List[str], str] + ) -> bool: + try: + key_list = parse_namespace_key(key) + if len(key_list) == 1: + cur = conn.execute( + f"SELECT key FROM {NAMESPACE_TABLE} " + "WHERE namespace = ? and key = ?", + (namespace, key) + ) + return cur.fetchone() is not None + record = self._get_record(conn, namespace, key_list[0]) + reduce(operator.getitem, key_list[1:], record) # type: ignore + except Exception: + return False + return True + + def insert_item( + self, + conn: sqlite3.Connection, + namespace: str, + key: Union[List[str], str], + value: DBType + ) -> None: + key_list = parse_namespace_key(key) record = value if len(key_list) > 1: - record = self._get_record(namespace, key_list[0], force=True) + record = self._get_record(conn, namespace, key_list[0], default={}) if not isinstance(record, dict): prev_type = type(record) record = {} logging.info( f"Warning: Key {key_list[0]} contains a value of type " - f"{prev_type}. Overwriting with an object.") - item: Dict[str, Any] = reduce( - getitem_with_default, key_list[1:-1], record) + f"{prev_type}. Overwriting with an object." + ) + item: DBType = reduce(getitem_with_default, key_list[1:-1], record) if not isinstance(item, dict): rpt_key = ".".join(key_list[:-1]) raise self.server.error( @@ -220,72 +1081,53 @@ class MoonrakerDatabase: "not a dictionary object, cannot insert" ) item[key_list[-1]] = value - if not self._insert_record(namespace, key_list[0], record): - logging.info( - f"Error inserting key '{key}' in namespace '{namespace}'") + if not self._insert_record(conn, namespace, key_list[0], record): + logging.info(f"Error inserting key '{key}' in namespace '{namespace}'") + else: + self._namespaces.add(namespace) - def update_item(self, - namespace: str, - key: Union[List[str], str], - value: DBType - ) -> Future[None]: - return self._run_command(self._update_impl, namespace, key, value) - - def _update_impl(self, - namespace: str, - key: Union[List[str], str], - value: DBType - ) -> None: - key_list = self._process_key(key) - record = self._get_record(namespace, key_list[0]) + def update_item( + self, + conn: sqlite3.Connection, + namespace: str, + key: Union[List[str], str], + value: DBType + ) -> None: + key_list = parse_namespace_key(key) + record = self._get_record(conn, namespace, key_list[0]) if len(key_list) == 1: if isinstance(record, dict) and isinstance(value, dict): record.update(value) else: - if value is None: - raise self.server.error( - f"Item at key '{key}', namespace '{namespace}': " - "Cannot assign a record level null value") record = value else: try: assert isinstance(record, dict) item: Dict[str, Any] = reduce( - operator.getitem, key_list[1:-1], record) + operator.getitem, key_list[1:-1], record + ) except Exception: raise self.server.error( - f"Key '{key}' in namespace '{namespace}' not found", - 404) + f"Key '{key}' in namespace '{namespace}' not found", 404 + ) if not isinstance(item, dict) or key_list[-1] not in item: rpt_key = ".".join(key_list[:-1]) raise self.server.error( f"Item at key '{rpt_key}' in namespace '{namespace}'is " "not a dictionary object, cannot update" ) - if isinstance(item[key_list[-1]], dict) \ - and isinstance(value, dict): + if isinstance(item[key_list[-1]], dict) and isinstance(value, dict): item[key_list[-1]].update(value) else: item[key_list[-1]] = value - if not self._insert_record(namespace, key_list[0], record): - logging.info( - f"Error updating key '{key}' in namespace '{namespace}'") + if not self._insert_record(conn, namespace, key_list[0], record): + logging.info(f"Error updating key '{key}' in namespace '{namespace}'") - def delete_item(self, - namespace: str, - key: Union[List[str], str], - drop_empty_db: bool = False - ) -> Future[Any]: - return self._run_command(self._delete_impl, namespace, key, - drop_empty_db) - - def _delete_impl(self, - namespace: str, - key: Union[List[str], str], - drop_empty_db: bool = False - ) -> Any: - key_list = self._process_key(key) - val = record = self._get_record(namespace, key_list[0]) + def delete_item( + self, conn: sqlite3.Connection, namespace: str, key: Union[List[str], str] + ) -> Any: + key_list = parse_namespace_key(key) + val = record = self._get_record(conn, namespace, key_list[0]) remove_record = True if len(key_list) > 1: try: @@ -299,512 +1141,516 @@ class MoonrakerDatabase: 404) remove_record = False if record else True if remove_record: - db = self.namespaces[namespace] - with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn: - ret = txn.delete(key_list[0].encode()) - with txn.cursor() as cursor: - if not cursor.first() and drop_empty_db: - txn.drop(db) - del self.namespaces[namespace] + with conn: + conn.execute( + f"DELETE FROM {NAMESPACE_TABLE} WHERE namespace = ? and key = ?", + (namespace, key_list[0]) + ) else: - ret = self._insert_record(namespace, key_list[0], record) - if not ret: - logging.info( - f"Error deleting key '{key}' from namespace " - f"'{namespace}'") + ret = self._insert_record(conn, namespace, key_list[0], record) + if not ret: + logging.info( + f"Error deleting key '{key}' from namespace '{namespace}'" + ) return val - def get_item(self, - namespace: str, - key: Optional[Union[List[str], str]] = None, - default: Any = SENTINEL - ) -> Future[Any]: - return self._run_command(self._get_impl, namespace, key, default) - - def _get_impl(self, - namespace: str, - key: Optional[Union[List[str], str]] = None, - default: Any = SENTINEL - ) -> Any: + def get_item( + self, + conn: sqlite3.Connection, + namespace: str, + key: Optional[Union[List[str], str]] = None, + default: Any = Sentinel.MISSING + ) -> Any: try: if key is None: - return self._get_namespace(namespace) - key_list = self._process_key(key) - ns = self._get_record(namespace, key_list[0]) - val = reduce(operator.getitem, # type: ignore - key_list[1:], ns) + return self.get_namespace(conn, namespace) + key_list = parse_namespace_key(key) + rec = self._get_record(conn, namespace, key_list[0]) + val = reduce(operator.getitem, key_list[1:], rec) # type: ignore except Exception as e: - if not isinstance(default, SentinelClass): + if default is not Sentinel.MISSING: return default if isinstance(e, self.server.error): raise raise self.server.error( - f"Key '{key}' in namespace '{namespace}' not found", 404) + f"Key '{key}' in namespace '{namespace}' not found", 404 + ) return val - # *** Batch operations*** - # The insert_batch(), move_batch(), delete_batch(), and get_batch() - # methods can be used to perform record level batch operations on - # a namespace in a single transaction. - - def insert_batch(self, - namespace: str, - records: Dict[str, Any] - ) -> Future[None]: - return self._run_command(self._insert_batch_impl, namespace, records) - - def _insert_batch_impl(self, - namespace: str, - records: Dict[str, Any] - ) -> None: - if namespace not in self.namespaces: - self.namespaces[namespace] = self.lmdb_env.open_db( - namespace.encode()) - db = self.namespaces[namespace] - with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn: + def insert_batch( + self, conn: sqlite3.Connection, namespace: str, records: Dict[str, Any] + ) -> None: + def generate_params(): for key, val in records.items(): - ret = txn.put(key.encode(), self._encode_value(val)) - if not ret: - logging.info(f"Error inserting record {key} into " - f"namespace {namespace}") + yield (namespace, key, encode_record(val)) + with conn: + conn.executemany( + f"INSERT INTO {NAMESPACE_TABLE} VALUES(?, ?, ?) " + "ON CONFLICT(namespace, key) DO UPDATE SET value=excluded.value", + generate_params() + ) + self._namespaces.add(namespace) - def move_batch(self, - namespace: str, - source_keys: List[str], - dest_keys: List[str] - ) -> Future[None]: - return self._run_command(self._move_batch_impl, namespace, - source_keys, dest_keys) + def move_batch( + self, + conn: sqlite3.Connection, + namespace: str, + source_keys: List[str], + dest_keys: List[str] + ) -> None: + def generate_params(): + for src, dest in zip(source_keys, dest_keys): + yield (dest, namespace, src) + with conn: + conn.executemany( + f"UPDATE OR REPLACE {NAMESPACE_TABLE} SET key = ? " + "WHERE namespace = ? and key = ?", + generate_params() + ) - def _move_batch_impl(self, - namespace: str, - source_keys: List[str], - dest_keys: List[str] - ) -> None: - db = self._get_db(namespace) - if len(source_keys) != len(dest_keys): - raise self.server.error( - "Source key list and destination key list must " - "be of the same length") - with self.lmdb_env.begin(write=True, db=db) as txn: - for source, dest in zip(source_keys, dest_keys): - val = txn.pop(source.encode()) - if val is not None: - txn.put(dest.encode(), val) - - def delete_batch(self, - namespace: str, - keys: List[str] - ) -> Future[Dict[str, Any]]: - return self._run_command(self._del_batch_impl, namespace, keys) - - def _del_batch_impl(self, - namespace: str, - keys: List[str] - ) -> Dict[str, Any]: - db = self._get_db(namespace) - result: Dict[str, Any] = {} - with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn: + def delete_batch( + self, conn: sqlite3.Connection, namespace: str, keys: List[str] + ) -> Dict[str, Any]: + def generate_params(): for key in keys: - val = txn.pop(key.encode()) - if val is not None: - result[key] = self._decode_value(val) - return result - - def get_batch(self, - namespace: str, - keys: List[str] - ) -> Future[Dict[str, Any]]: - return self._run_command(self._get_batch_impl, namespace, keys) - - def _get_batch_impl(self, - namespace: str, - keys: List[str] - ) -> Dict[str, Any]: - db = self._get_db(namespace) - result: Dict[str, Any] = {} - encoded_keys: List[bytes] = [k.encode() for k in keys] - with self.lmdb_env.begin(buffers=True, db=db) as txn: - with txn.cursor() as cursor: - vals = cursor.getmulti(encoded_keys) - result = {bytes(k).decode(): self._decode_value(v) - for k, v in vals} - return result - - # *** Namespace level operations*** - - def update_namespace(self, - namespace: str, - value: Mapping[str, DBRecord] - ) -> Future[None]: - return self._run_command(self._update_ns_impl, namespace, value) - - def _update_ns_impl(self, - namespace: str, - value: Mapping[str, DBRecord] - ) -> None: - if not value: - return - db = self._get_db(namespace) - with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn: - # We only need to update the keys that changed - for key, val in value.items(): - stored = txn.get(key.encode()) - if stored is not None: - decoded = self._decode_value(stored) - if val == decoded: - continue - ret = txn.put(key.encode(), self._encode_value(val)) - if not ret: - logging.info(f"Error inserting key '{key}' " - f"in namespace '{namespace}'") - - def clear_namespace(self, - namespace: str, - drop_empty_db: bool = False - ) -> Future[None]: - return self._run_command(self._clear_ns_impl, namespace, drop_empty_db) - - def _clear_ns_impl(self, - namespace: str, - drop_empty_db: bool = False - ) -> None: - db = self._get_db(namespace) - with self.lmdb_env.begin(write=True, db=db) as txn: - txn.drop(db, delete=drop_empty_db) - if drop_empty_db: - del self.namespaces[namespace] - - def sync_namespace(self, - namespace: str, - value: Mapping[str, DBRecord] - ) -> Future[None]: - return self._run_command(self._sync_ns_impl, namespace, value) - - def _sync_ns_impl(self, - namespace: str, - value: Mapping[str, DBRecord] - ) -> None: - if not value: - raise self.server.error("Cannot sync to an empty value") - db = self._get_db(namespace) - new_keys = set(value.keys()) - with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn: - with txn.cursor() as cursor: - remaining = cursor.first() - while remaining: - bkey, bval = cursor.item() - key = bytes(bkey).decode() - if key not in value: - remaining = cursor.delete() - else: - decoded = self._decode_value(bval) - if decoded != value[key]: - new_val = self._encode_value(value[key]) - txn.put(key.encode(), new_val) - new_keys.remove(key) - remaining = cursor.next() - for key in new_keys: - val = value[key] - ret = txn.put(key.encode(), self._encode_value(val)) - if not ret: - logging.info(f"Error inserting key '{key}' " - f"in namespace '{namespace}'") - - def ns_length(self, namespace: str) -> Future[int]: - return self._run_command(self._ns_length_impl, namespace) - - def _ns_length_impl(self, namespace: str) -> int: - db = self._get_db(namespace) - with self.lmdb_env.begin(db=db) as txn: - stats = txn.stat(db) - return stats['entries'] - - def ns_keys(self, namespace: str) -> Future[List[str]]: - return self._run_command(self._ns_keys_impl, namespace) - - def _ns_keys_impl(self, namespace: str) -> List[str]: - keys: List[str] = [] - db = self._get_db(namespace) - with self.lmdb_env.begin(db=db) as txn: - with txn.cursor() as cursor: - remaining = cursor.first() - while remaining: - keys.append(cursor.key().decode()) - remaining = cursor.next() - return keys - - def ns_values(self, namespace: str) -> Future[List[Any]]: - return self._run_command(self._ns_values_impl, namespace) - - def _ns_values_impl(self, namespace: str) -> List[Any]: - values: List[Any] = [] - db = self._get_db(namespace) - with self.lmdb_env.begin(db=db, buffers=True) as txn: - with txn.cursor() as cursor: - remaining = cursor.first() - while remaining: - values.append(self._decode_value(cursor.value())) - remaining = cursor.next() - return values - - def ns_items(self, namespace: str) -> Future[List[Tuple[str, Any]]]: - return self._run_command(self._ns_items_impl, namespace) - - def _ns_items_impl(self, namespace: str) -> List[Tuple[str, Any]]: - ns = self._get_namespace(namespace) - return list(ns.items()) - - def ns_contains(self, - namespace: str, - key: Union[List[str], str] - ) -> Future[bool]: - return self._run_command(self._ns_contains_impl, namespace, key) - - def _ns_contains_impl(self, - namespace: str, - key: Union[List[str], str] - ) -> bool: - self._get_db(namespace) - try: - key_list = self._process_key(key) - record = self._get_record(namespace, key_list[0]) - if len(key_list) == 1: - return True - reduce(operator.getitem, # type: ignore - key_list[1:], record) - except Exception: - return False - return True - - def register_local_namespace(self, - namespace: str, - forbidden: bool = False - ) -> None: - if self.server.is_running(): - raise self.server.error( - "Cannot register a namespace while the " - "server is running") - if namespace not in self.namespaces: - self.namespaces[namespace] = self.lmdb_env.open_db( - namespace.encode()) - if forbidden: - if namespace not in self.forbidden_namespaces: - self.forbidden_namespaces.add(namespace) - self.insert_item( - "moonraker", "database.forbidden_namespaces", - list(self.forbidden_namespaces)) - elif namespace not in self.protected_namespaces: - self.protected_namespaces.add(namespace) - self.insert_item("moonraker", "database.protected_namespaces", - sorted(self.protected_namespaces)) - - def wrap_namespace(self, - namespace: str, - parse_keys: bool = True - ) -> NamespaceWrapper: - if self.server.is_running(): - raise self.server.error( - "Cannot wrap a namespace while the " - "server is running") - if namespace not in self.namespaces: - raise self.server.error( - f"Namespace '{namespace}' not found", 404) - return NamespaceWrapper(namespace, self, parse_keys) - - def _get_db(self, namespace: str) -> object: - if namespace not in self.namespaces: - raise self.server.error(f"Namespace '{namespace}' not found", 404) - return self.namespaces[namespace] - - def _process_key(self, key: Union[List[str], str]) -> List[str]: - try: - key_list = key if isinstance(key, list) else key.split('.') - except Exception: - key_list = [] - if not key_list or "" in key_list: - raise self.server.error(f"Invalid Key Format: '{key}'") - return key_list - - def _insert_record(self, namespace: str, key: str, val: DBType) -> bool: - db = self._get_db(namespace) - if val is None: - return False - with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn: - ret = txn.put(key.encode(), self._encode_value(val)) - return ret - - def _get_record(self, - namespace: str, - key: str, - force: bool = False - ) -> DBRecord: - db = self._get_db(namespace) - with self.lmdb_env.begin(buffers=True, db=db) as txn: - value = txn.get(key.encode()) - if value is None: - if force: - return {} - raise self.server.error( - f"Key '{key}' in namespace '{namespace}' not found", 404) - return self._decode_value(value) - - def _get_namespace(self, namespace: str) -> Dict[str, Any]: - db = self._get_db(namespace) - result = {} - invalid_key_result = None - with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn: - with txn.cursor() as cursor: - has_remaining = cursor.first() - while has_remaining: - db_key, value = cursor.item() - k = bytes(db_key).decode() - if not k: - invalid_key_result = self._decode_value(value) - logging.info( - f"Invalid Key '{db_key}' found in namespace " - f"'{namespace}', dropping value: " - f"{repr(invalid_key_result)}") - try: - has_remaining = cursor.delete() - except Exception: - logging.exception("Error Deleting LMDB Key") - has_remaining = cursor.next() - else: - result[k] = self._decode_value(value) - has_remaining = cursor.next() - return result - - def _encode_value(self, value: DBRecord) -> bytes: - try: - enc_func = RECORD_ENCODE_FUNCS[type(value)] - return enc_func(value) - except Exception: - raise self.server.error( - f"Error encoding val: {value}, type: {type(value)}") - - def _decode_value(self, bvalue: bytes) -> DBRecord: - fmt = bvalue[0] - try: - decode_func = RECORD_DECODE_FUNCS[fmt] - return decode_func(bvalue) - except Exception: - val = bytes(bvalue).decode() - raise self.server.error( - f"Error decoding value {val}, format: {chr(fmt)}") - - async def _handle_list_request(self, - web_request: WebRequest - ) -> Dict[str, List[str]]: - await self.eventloop.run_in_thread(self.thread_lock.acquire) - try: - ns_list = set(self.namespaces.keys()) - self.forbidden_namespaces - finally: - self.thread_lock.release() - return {'namespaces': list(ns_list)} - - async def _handle_item_request(self, - web_request: WebRequest - ) -> Dict[str, Any]: - action = web_request.get_action() - namespace = web_request.get_str("namespace") - if namespace in self.forbidden_namespaces: - raise self.server.error( - f"Read/Write access to namespace '{namespace}'" - " is forbidden", 403) - key: Any - valid_types: Tuple[type, ...] - if action != "GET": - if namespace in self.protected_namespaces: - raise self.server.error( - f"Write access to namespace '{namespace}'" - " is forbidden", 403) - key = web_request.get("key") - valid_types = (list, str) + yield (namespace, key) + if sqlite3.sqlite_version_info < (3, 35): + vals = self.get_batch(conn, namespace, keys) + with conn: + conn.executemany( + f"DELETE FROM {NAMESPACE_TABLE} WHERE namespace = ? and key = ?", + generate_params() + ) + return vals else: - key = web_request.get("key", None) - valid_types = (list, str, type(None)) - if not isinstance(key, valid_types): - raise self.server.error( - "Value for argument 'key' is an invalid type: " - f"{type(key).__name__}") - if action == "GET": - val = await self.get_item(namespace, key) - elif action == "POST": - val = web_request.get("value") - await self.insert_item(namespace, key, val) - elif action == "DELETE": - val = await self.delete_item(namespace, key, drop_empty_db=True) - return {'namespace': namespace, 'key': key, 'value': val} + placeholders = ",".join("?" * len(keys)) + sql = ( + f"DELETE FROM {NAMESPACE_TABLE} " + f"WHERE namespace = ? and key IN ({placeholders}) " + "RETURNING key, value" + ) + params = [namespace] + keys + with conn: + cur = conn.execute(sql, params) + cur.arraysize = 200 + return dict(cur.fetchall()) + + def get_batch( + self, conn: sqlite3.Connection, namespace: str, keys: List[str] + ) -> Dict[str, Any]: + placeholders = ",".join("?" * len(keys)) + sql = ( + f"SELECT key, value FROM {NAMESPACE_TABLE} " + f"WHERE namespace = ? and key IN ({placeholders})" + ) + ph_vals = [namespace] + keys + cur = conn.execute(sql, ph_vals) + cur.arraysize = 200 + return dict(cur.fetchall()) + + # SQL Direct Manipulation + def sql_execute( + self, + conn: sqlite3.Connection, + statement: str, + params: SqlParams + ) -> SqliteCursorProxy: + cur = conn.execute(statement, params) + cur.arraysize = 100 + return SqliteCursorProxy(self, cur) + + def sql_executemany( + self, + conn: sqlite3.Connection, + statement: str, + params: Sequence[SqlParams] + ) -> SqliteCursorProxy: + cur = conn.executemany(statement, params) + cur.arraysize = 100 + return SqliteCursorProxy(self, cur) + + def sql_executescript( + self, + conn: sqlite3.Connection, + script: str + ) -> SqliteCursorProxy: + cur = conn.executescript(script) + cur.arraysize = 100 + return SqliteCursorProxy(self, cur) + + def sql_commit(self, conn: sqlite3.Connection) -> None: + conn.commit() + + def sql_rollback(self, conn: sqlite3.Connection) -> None: + conn.rollback() + + def register_namespace(self, namespace: str) -> None: + self._namespaces.add(namespace) + + def register_table(self, table_def: SqlTableDefinition) -> None: + if self.is_alive(): + raise self.server.error( + "Table registration must occur during during init." + ) + if table_def.name in self._tables: + logging.info(f"Found registered table {table_def.name}") + if table_def.name in (NAMESPACE_TABLE, REGISTRATION_TABLE): + raise self.server.error( + f"Cannot register table '{table_def.name}', it is reserved" + ) + detected_proto, version = self._lookup_registered_table(table_def.name) + else: + logging.info(f"Creating table {table_def.name}...") + with self.sync_conn: + self.sync_conn.execute(f"CREATE TABLE {table_def.prototype}") + detected_proto = table_def.prototype + version = 0 + if table_def.version > version: + table_def.migrate(version, self.get_provider_wapper()) + self._save_registered_table( + table_def.name, table_def.prototype, table_def.version + ) + elif detected_proto != table_def.prototype: + self.server.add_warning( + f"Table '{table_def.name}' defintion does not match stored " + "definition. See the log for details." + ) + logging.info( + f"Expected table prototype:\n{table_def.prototype}\n\n" + f"Stored table prototype:\n{detected_proto}" + ) + + def compact_database(self, conn: sqlite3.Connection) -> Dict[str, int]: + if self.restored: + raise self.server.error( + "Cannot compact restored database, awaiting restart" + ) + cur_size = self._db_path.stat().st_size + conn.execute("VACUUM") + new_size = self._db_path.stat().st_size + return { + "previous_size": cur_size, + "new_size": new_size + } + + def backup_database( + self, conn: sqlite3.Connection, bkp_path: pathlib.Path + ) -> None: + if self.restored: + raise self.server.error( + "Cannot backup restored database, awaiting restart" + ) + parent = bkp_path.parent + if not parent.exists(): + parent.mkdir(parents=True, exist_ok=True) + elif bkp_path.exists(): + bkp_path.unlink() + bkp_conn = sqlite3.connect(str(bkp_path)) + conn.backup(bkp_conn) + bkp_conn.close() + + def restore_database( + self, conn: sqlite3.Connection, restore_path: pathlib.Path + ) -> Dict[str, Any]: + if self.restored: + raise self.server.error("Database already restored") + if not restore_path.is_file(): + raise self.server.error(f"Restoration File {restore_path} does not exist") + restore_conn = sqlite3.connect(str(restore_path)) + restore_info = self._validate_restore_db(restore_conn) + restore_conn.backup(conn) + restore_conn.close() + self.restored = True + return restore_info + + def _validate_restore_db( + self, restore_conn: sqlite3.Connection + ) -> Dict[str, Any]: + cursor = restore_conn.execute( + f"SELECT name FROM {SCHEMA_TABLE} WHERE type = 'table'" + ) + cursor.arraysize = 100 + tables = [row[0] for row in cursor.fetchall()] + if NAMESPACE_TABLE not in tables: + restore_conn.close() + raise self.server.error( + f"Invalid database for restoration, missing table '{NAMESPACE_TABLE}'" + ) + missing_tables = self._tables.difference(tables) + if missing_tables: + logging.info(f"Database to restore missing tables: {missing_tables}") + cursor = restore_conn.execute( + f"SELECT DISTINCT namespace FROM {NAMESPACE_TABLE}" + ) + cursor.arraysize = 100 + namespaces = [row[0] for row in cursor.fetchall()] + missing_ns = self._namespaces.difference(namespaces) + if missing_ns: + logging.info(f"Database to restore missing namespaces: {missing_ns}") + return { + "restored_tables": tables, + "restored_namespaces": namespaces + } + + def get_provider_wapper(self) -> DBProviderWrapper: + return DBProviderWrapper(self) + + def is_restored(self) -> bool: + return self.restored + + def stop(self) -> Future[None]: + fut = self.asyncio_loop.create_future() + if not self.is_alive(): + fut.set_result(None) + else: + self.command_queue.put_nowait((fut, None, tuple())) + return fut + +class DBProviderWrapper: + def __init__(self, provider: SqliteProvider) -> None: + self.server = provider.server + self.provider = provider + self._sql_conn = provider.sync_conn + + @property + def connection(self) -> sqlite3.Connection: + return self._sql_conn + + def iter_namespace( + self, namespace: str, batch_count: int = 100 + ) -> Generator[Dict[str, Any], Any, None]: + yield from self.provider.iter_namespace(self._sql_conn, namespace, batch_count) + + def get_namespace_keys(self, namespace: str) -> List[str]: + return self.provider.get_namespace_keys(self._sql_conn, namespace) + + def get_namespace_values(self, namespace: str) -> List[Any]: + return self.provider.get_namespace_values(self._sql_conn, namespace) + + def get_namespace_items(self, namespace: str) -> List[Tuple[str, Any]]: + return self.provider.get_namespace_items(self._sql_conn, namespace) + + def get_namespace_length(self, namespace: str) -> int: + return self.provider.get_namespace_length(self._sql_conn, namespace) + + def get_namespace(self, namespace: str) -> Dict[str, Any]: + return self.provider.get_namespace(self._sql_conn, namespace, must_exist=False) + + def clear_namespace(self, namespace: str) -> None: + self.provider.clear_namespace(self._sql_conn, namespace) + + def get_item( + self, + namespace: str, + key: Union[str, List[str]], + default: Any = Sentinel.MISSING + ) -> Any: + return self.provider.get_item(self._sql_conn, namespace, key, default) + + def delete_item(self, namespace: str, key: Union[str, List[str]]) -> Any: + return self.provider.delete_item(self._sql_conn, namespace, key) + + def insert_item( + self, namespace: str, key: Union[str, List[str]], value: DBType + ) -> None: + self.provider.insert_item(self._sql_conn, namespace, key, value) + + def update_item( + self, namespace: str, key: Union[str, List[str]], value: DBType + ) -> None: + self.provider.update_item(self._sql_conn, namespace, key, value) + + def get_batch(self, namespace: str, keys: List[str]) -> Dict[str, Any]: + return self.provider.get_batch(self._sql_conn, namespace, keys) + + def delete_batch(self, namespace: str, keys: List[str]) -> Dict[str, Any]: + return self.provider.delete_batch(self._sql_conn, namespace, keys) + + def insert_batch(self, namespace: str, records: Dict[str, Any]) -> None: + self.provider.insert_batch(self._sql_conn, namespace, records) + + def move_batch( + self, namespace: str, source_keys: List[str], dest_keys: List[str] + ) -> None: + self.provider.move_batch(self._sql_conn, namespace, source_keys, dest_keys) + + def wipe_local_namespace(self, namespace: str) -> None: + """ + Unregister persistent local namespace + """ + self.provider.clear_namespace(self._sql_conn, namespace) + self.provider.drop_empty_namespace(self._sql_conn, namespace) + db: MoonrakerDatabase = self.server.lookup_component("database") + db.unregister_local_namespace(namespace) + + +class SqliteCursorProxy: + def __init__(self, provider: SqliteProvider, cursor: sqlite3.Cursor) -> None: + self._db_provider = provider + self._cursor = cursor + self._description = cursor.description + self._rowcount = cursor.rowcount + self._lastrowid = cursor.lastrowid + self._array_size = cursor.arraysize + + @property + def rowcount(self) -> int: + return self._rowcount + + @property + def lastrowid(self) -> Optional[int]: + return self._lastrowid + + @property + def description(self): + return self._description + + @property + def arraysize(self) -> int: + return self._array_size + + def set_arraysize(self, size: int) -> Future[None]: + def wrapper(_) -> None: + self._cursor.arraysize = size + self._array_size = size + return self._db_provider.execute_db_function(wrapper) + + def fetchone(self) -> Future[Optional[sqlite3.Row]]: + def fetch_wrapper(_) -> Optional[sqlite3.Row]: + return self._cursor.fetchone() + return self._db_provider.execute_db_function(fetch_wrapper) + + def fetchmany(self, size: Optional[int] = None) -> Future[List[sqlite3.Row]]: + def fetch_wrapper(_) -> List[sqlite3.Row]: + if size is None: + return self._cursor.fetchmany() + return self._cursor.fetchmany(size) + return self._db_provider.execute_db_function(fetch_wrapper) + + def fetchall(self) -> Future[List[sqlite3.Row]]: + def fetch_wrapper(_) -> List[sqlite3.Row]: + return self._cursor.fetchall() + return self._db_provider.execute_db_function(fetch_wrapper) + +class SqlTableWrapper(contextlib.AbstractAsyncContextManager): + def __init__( + self, + database: MoonrakerDatabase, + table_def: SqlTableDefinition + ) -> None: + self._database = database + self._table_def = table_def + self._db_provider = database.db_provider + + @property + def version(self) -> int: + return self._table_def.version + + async def __aenter__(self) -> SqlTableWrapper: + return self + + async def __aexit__( + self, + exc_type: Optional[type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + if exc_value is not None: + await self.rollback() + else: + await self.commit() + + def get_provider_wrapper(self) -> DBProviderWrapper: + return self._database.get_provider_wrapper() + + def queue_callback( + self, callback: Callable[[sqlite3.Connection], Any] + ) -> Future[Any]: + return self._db_provider.execute_db_function(callback) + + def execute( + self, sql: str, params: SqlParams = [] + ) -> Future[SqliteCursorProxy]: + return self._db_provider.execute_db_function( + self._db_provider.sql_execute, sql, params + ) + + def executemany( + self, sql: str, params: Sequence[SqlParams] = [] + ) -> Future[SqliteCursorProxy]: + return self._db_provider.execute_db_function( + self._db_provider.sql_executemany, sql, params + ) + + def executescript(self, sql: str) -> Future[SqliteCursorProxy]: + return self._db_provider.execute_db_function( + self._db_provider.sql_executescript, sql + ) + + def commit(self) -> Future[None]: + return self._db_provider.execute_db_function( + self._db_provider.sql_commit + ) + + def rollback(self) -> Future[None]: + return self._db_provider.execute_db_function( + self._db_provider.sql_rollback + ) - async def close(self) -> None: - # Decrement unsafe shutdown counter - unsafe_shutdowns: int = await self.get_item( - "moonraker", "database.unsafe_shutdowns", 0) - await self.insert_item( - "moonraker", "database.unsafe_shutdowns", - unsafe_shutdowns - 1) - await self.eventloop.run_in_thread(self.thread_lock.acquire) - try: - # log db stats - msg = "" - with self.lmdb_env.begin() as txn: - for db_name, db in self.namespaces.items(): - stats = txn.stat(db) - msg += f"\n{db_name}:\n" - msg += "\n".join([f"{k}: {v}" for k, v in stats.items()]) - logging.info(f"Database statistics:\n{msg}") - self.lmdb_env.sync() - self.lmdb_env.close() - finally: - self.thread_lock.release() class NamespaceWrapper: - def __init__(self, - namespace: str, - database: MoonrakerDatabase, - parse_keys: bool - ) -> None: + def __init__( + self, + namespace: str, + database: MoonrakerDatabase, + parse_keys: bool = False + ) -> None: self.namespace = namespace self.db = database self.eventloop = database.eventloop self.server = database.server # If parse keys is true, keys of a string type # will be passed straight to the DB methods. - self.parse_keys = parse_keys + self._parse_keys = parse_keys - def insert(self, - key: Union[List[str], str], - value: DBType - ) -> Awaitable[None]: - if isinstance(key, str) and not self.parse_keys: + @property + def parse_keys(self) -> bool: + return self._parse_keys + + @parse_keys.setter + def parse_keys(self, val: bool) -> None: + self._parse_keys = val + + def get_provider_wrapper(self) -> DBProviderWrapper: + return self.db.get_provider_wrapper() + + def insert( + self, key: Union[List[str], str], value: DBType + ) -> Future[None]: + if isinstance(key, str) and not self._parse_keys: key = [key] return self.db.insert_item(self.namespace, key, value) - def update_child(self, - key: Union[List[str], str], - value: DBType - ) -> Awaitable[None]: - if isinstance(key, str) and not self.parse_keys: + def update_child( + self, key: Union[List[str], str], value: DBType + ) -> Future[None]: + if isinstance(key, str) and not self._parse_keys: key = [key] return self.db.update_item(self.namespace, key, value) - def update(self, value: Mapping[str, DBRecord]) -> Awaitable[None]: + def update(self, value: Dict[str, DBRecord]) -> Future[None]: return self.db.update_namespace(self.namespace, value) - def sync(self, value: Mapping[str, DBRecord]) -> Awaitable[None]: + def sync(self, value: Dict[str, DBRecord]) -> Future[None]: return self.db.sync_namespace(self.namespace, value) - def get(self, - key: Union[List[str], str], - default: Any = None - ) -> Future[Any]: - if isinstance(key, str) and not self.parse_keys: + def get(self, key: Union[List[str], str], default: Any = None) -> Future[Any]: + if isinstance(key, str) and not self._parse_keys: key = [key] return self.db.get_item(self.namespace, key, default) def delete(self, key: Union[List[str], str]) -> Future[Any]: - if isinstance(key, str) and not self.parse_keys: + if isinstance(key, str) and not self._parse_keys: key = [key] return self.db.delete_item(self.namespace, key) @@ -828,15 +1674,12 @@ class NamespaceWrapper: def as_dict(self) -> Dict[str, Any]: self._check_sync_method("as_dict") - return self.db._get_namespace(self.namespace) + return self.db.get_item(self.namespace).result() def __getitem__(self, key: Union[List[str], str]) -> Future[Any]: - return self.get(key, default=SENTINEL) + return self.get(key, default=Sentinel.MISSING) - def __setitem__(self, - key: Union[List[str], str], - value: DBType - ) -> None: + def __setitem__(self, key: Union[List[str], str], value: DBType) -> None: self.insert(key, value) def __delitem__(self, key: Union[List[str], str]): @@ -844,12 +1687,12 @@ class NamespaceWrapper: def __contains__(self, key: Union[List[str], str]) -> bool: self._check_sync_method("__contains__") - if isinstance(key, str) and not self.parse_keys: + if isinstance(key, str) and not self._parse_keys: key = [key] return self.db.ns_contains(self.namespace, key).result() def contains(self, key: Union[List[str], str]) -> Future[bool]: - if isinstance(key, str) and not self.parse_keys: + if isinstance(key, str) and not self._parse_keys: key = [key] return self.db.ns_contains(self.namespace, key) @@ -862,15 +1705,14 @@ class NamespaceWrapper: def items(self) -> Future[List[Tuple[str, Any]]]: return self.db.ns_items(self.namespace) - def pop(self, - key: Union[List[str], str], - default: Any = SENTINEL - ) -> Union[Future[Any], Task[Any]]: + def pop( + self, key: Union[List[str], str], default: Any = Sentinel.MISSING + ) -> Union[Future[Any], Task[Any]]: if not self.server.is_running(): try: val = self.delete(key).result() except Exception: - if isinstance(default, SentinelClass): + if default is Sentinel.MISSING: raise val = default fut = self.eventloop.create_future() @@ -881,20 +1723,21 @@ class NamespaceWrapper: try: val = await self.delete(key) except Exception: - if isinstance(default, SentinelClass): + if default is Sentinel.MISSING: raise val = default return val return self.eventloop.create_task(_do_pop()) - def clear(self) -> Awaitable[None]: + def clear(self) -> Future[None]: return self.db.clear_namespace(self.namespace) def _check_sync_method(self, func_name: str) -> None: - if self.server.is_running(): + if self.db.db_provider.is_alive(): raise self.server.error( f"Cannot call method {func_name} while " - "the eventloop is running") + "the eventloop is running" + ) def load_component(config: ConfigHelper) -> MoonrakerDatabase: return MoonrakerDatabase(config) diff --git a/moonraker/components/dbus_manager.py b/moonraker/components/dbus_manager.py index b53f770..69a9627 100644 --- a/moonraker/components/dbus_manager.py +++ b/moonraker/components/dbus_manager.py @@ -5,6 +5,7 @@ # This file may be distributed under the terms of the GNU GPLv3 license. from __future__ import annotations import os +import asyncio import pathlib import logging import dbus_next @@ -16,11 +17,13 @@ from typing import ( TYPE_CHECKING, List, Optional, + Any, ) if TYPE_CHECKING: - from confighelper import ConfigHelper + from ..confighelper import ConfigHelper +STAT_PATH = "/proc/self/stat" DOC_URL = ( "https://moonraker.readthedocs.io/en/latest/" "installation/#policykit-permissions" @@ -34,7 +37,11 @@ class DbusManager: self.bus: Optional[MessageBus] = None self.polkit: Optional[ProxyInterface] = None self.warned: bool = False - proc_data = pathlib.Path(f"/proc/self/stat").read_text() + st_path = pathlib.Path(STAT_PATH) + self.polkit_subject: List[Any] = [] + if not st_path.is_file(): + return + proc_data = st_path.read_text() start_clk_ticks = int(proc_data.split()[21]) self.polkit_subject = [ "unix-process", @@ -51,6 +58,8 @@ class DbusManager: try: self.bus = MessageBus(bus_type=BusType.SYSTEM) await self.bus.connect() + except asyncio.CancelledError: + raise except Exception: logging.info("Unable to Connect to D-Bus") return @@ -60,20 +69,31 @@ class DbusManager: "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority") - except self.DbusError: - self.server.add_warning( - "Unable to find DBus PolKit Interface, this suggests PolKit " - "is not installed on your OS.") + except asyncio.CancelledError: + raise + except Exception as e: + if self.server.is_debug_enabled(): + logging.exception("Failed to get PolKit interface") + else: + logging.info(f"Failed to get PolKit interface: {e}") + self.polkit = None async def check_permission(self, action: str, err_msg: str = "" ) -> bool: if self.polkit is None: + self.server.add_warning( + "Unable to find DBus PolKit Interface, this suggests PolKit " + "is not installed on your OS.", + "dbus_polkit" + ) return False try: ret = await self.polkit.call_check_authorization( # type: ignore self.polkit_subject, action, {}, 0, "") + except asyncio.CancelledError: + raise except Exception as e: self._check_warned() self.server.add_warning( diff --git a/moonraker/components/extensions.py b/moonraker/components/extensions.py index 2d7f3eb..cd74a9a 100644 --- a/moonraker/components/extensions.py +++ b/moonraker/components/extensions.py @@ -4,8 +4,11 @@ # # This file may be distributed under the terms of the GNU GPLv3 license. from __future__ import annotations -from websockets import WebSocket - +import asyncio +import pathlib +import logging +from ..common import BaseRemoteConnection, RequestType, TransportType +from ..utils import get_unix_peer_credentials # Annotation imports from typing import ( @@ -18,25 +21,36 @@ from typing import ( ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest + from ..server import Server + from ..confighelper import ConfigHelper + from ..common import WebRequest + from .klippy_connection import KlippyConnection as Klippy + +UNIX_BUFFER_LIMIT = 20 * 1024 * 1024 class ExtensionManager: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() - self.agents: Dict[str, WebSocket] = {} + self.agents: Dict[str, BaseRemoteConnection] = {} + self.agent_methods: Dict[int, List[str]] = {} + self.uds_server: Optional[asyncio.AbstractServer] = None self.server.register_endpoint( - "/connection/send_event", ["POST"], self._handle_agent_event, - transports=["websocket"] + "/connection/register_remote_method", RequestType.POST, + self._register_agent_method, + transports=TransportType.WEBSOCKET ) self.server.register_endpoint( - "/server/extensions/list", ["GET"], self._handle_list_extensions + "/connection/send_event", RequestType.POST, self._handle_agent_event, + transports=TransportType.WEBSOCKET ) self.server.register_endpoint( - "/server/extensions/request", ["POST"], self._handle_call_agent + "/server/extensions/list", RequestType.GET, self._handle_list_extensions + ) + self.server.register_endpoint( + "/server/extensions/request", RequestType.POST, self._handle_call_agent ) - def register_agent(self, connection: WebSocket) -> None: + def register_agent(self, connection: BaseRemoteConnection) -> None: data = connection.client_data name = data["name"] client_type = data["type"] @@ -55,16 +69,20 @@ class ExtensionManager: } connection.send_notification("agent_event", [evt]) - def remove_agent(self, connection: WebSocket) -> None: + def remove_agent(self, connection: BaseRemoteConnection) -> None: name = connection.client_data["name"] if name in self.agents: + klippy: Klippy = self.server.lookup_component("klippy_connection") + registered_methods = self.agent_methods.pop(connection.uid, []) + for method in registered_methods: + klippy.unregister_method(method) del self.agents[name] evt: Dict[str, Any] = {"agent": name, "event": "disconnected"} connection.send_notification("agent_event", [evt]) async def _handle_agent_event(self, web_request: WebRequest) -> str: - conn = web_request.get_connection() - if not isinstance(conn, WebSocket): + conn = web_request.get_client_connection() + if conn is None: raise self.server.error("No connection detected") if conn.client_data["type"] != "agent": raise self.server.error( @@ -82,6 +100,16 @@ class ExtensionManager: conn.send_notification("agent_event", [evt]) return "ok" + async def _register_agent_method(self, web_request: WebRequest) -> str: + conn = web_request.get_client_connection() + if conn is None: + raise self.server.error("No connection detected") + method_name = web_request.get_str("method_name") + klippy: Klippy = self.server.lookup_component("klippy_connection") + klippy.register_method_from_agent(conn, method_name) + self.agent_methods.setdefault(conn.uid, []).append(method_name) + return "ok" + async def _handle_list_extensions( self, web_request: WebRequest ) -> Dict[str, List[Dict[str, Any]]]: @@ -101,7 +129,129 @@ class ExtensionManager: if agent not in self.agents: raise self.server.error(f"Agent {agent} not connected") conn = self.agents[agent] - return await conn.call_method(method, args) + return await conn.call_method_with_response(method, args) + + async def start_unix_server(self) -> None: + sockfile: str = self.server.get_app_args()["unix_socket_path"] + sock_path = pathlib.Path(sockfile).expanduser().resolve() + logging.info(f"Creating Unix Domain Socket at '{sock_path}'") + try: + self.uds_server = await asyncio.start_unix_server( + self.on_unix_socket_connected, sock_path, limit=UNIX_BUFFER_LIMIT + ) + except asyncio.CancelledError: + raise + except Exception: + logging.exception(f"Failed to create Unix Domain Socket: {sock_path}") + self.uds_server = None + + def on_unix_socket_connected( + self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter + ) -> None: + peercred = get_unix_peer_credentials(writer, "Unix Client Connection") + UnixSocketClient(self.server, reader, writer, peercred) + + async def close(self) -> None: + if self.uds_server is not None: + self.uds_server.close() + await self.uds_server.wait_closed() + self.uds_server = None + +class UnixSocketClient(BaseRemoteConnection): + def __init__( + self, + server: Server, + reader: asyncio.StreamReader, + writer: asyncio.StreamWriter, + peercred: Dict[str, int] + ) -> None: + self.on_create(server) + self.writer = writer + self._peer_cred = peercred + self._connected_time = self.eventloop.get_loop_time() + pid = self._peer_cred.get("process_id") + uid = self._peer_cred.get("user_id") + gid = self._peer_cred.get("group_id") + self.wsm.add_client(self) + logging.info( + f"Unix Socket Opened - Client ID: {self.uid}, " + f"Process ID: {pid}, User ID: {uid}, Group ID: {gid}" + ) + self.eventloop.register_callback(self._read_messages, reader) + + async def _read_messages(self, reader: asyncio.StreamReader) -> None: + errors_remaining: int = 10 + while not reader.at_eof(): + try: + data = await reader.readuntil(b'\x03') + decoded = data[:-1].decode(encoding="utf-8") + except (ConnectionError, asyncio.IncompleteReadError): + break + except asyncio.CancelledError: + logging.exception("Unix Client Stream Read Cancelled") + raise + except Exception: + logging.exception("Unix Client Stream Read Error") + errors_remaining -= 1 + if not errors_remaining or self.is_closed: + break + continue + errors_remaining = 10 + self.eventloop.register_callback(self._process_message, decoded) + logging.debug("Unix Socket Disconnection From _read_messages()") + await self._on_close(reason="Read Exit") + + async def write_to_socket(self, message: Union[bytes, str]) -> None: + if isinstance(message, str): + data = message.encode() + b"\x03" + else: + data = message + b"\x03" + try: + self.writer.write(data) + await self.writer.drain() + except asyncio.CancelledError: + raise + except Exception: + logging.debug("Unix Socket Disconnection From write_to_socket()") + await self._on_close(reason="Write Exception") + + async def _on_close( + self, + code: Optional[int] = None, + reason: Optional[str] = None + ) -> None: + if self.is_closed: + return + self.is_closed = True + kconn: Klippy = self.server.lookup_component("klippy_connection") + kconn.remove_subscription(self) + if not self.writer.is_closing(): + self.writer.close() + try: + await self.writer.wait_closed() + except Exception: + pass + self.message_buf = [] + for resp in self.pending_responses.values(): + resp.set_exception( + self.server.error("Client Socket Disconnected", 500) + ) + self.pending_responses = {} + logging.info( + f"Unix Socket Closed: ID: {self.uid}, " + f"Close Code: {code}, " + f"Close Reason: {reason}" + ) + if self._client_data["type"] == "agent": + extensions: ExtensionManager + extensions = self.server.lookup_component("extensions") + extensions.remove_agent(self) + self.wsm.remove_client(self) + + def close_socket(self, code: int, reason: str) -> None: + if not self.is_closed: + self.eventloop.register_callback(self._on_close, code, reason) + def load_component(config: ConfigHelper) -> ExtensionManager: return ExtensionManager(config) diff --git a/moonraker/components/file_manager/__init__.py b/moonraker/components/file_manager/__init__.py index 8311e5c..078eeb0 100644 --- a/moonraker/components/file_manager/__init__.py +++ b/moonraker/components/file_manager/__init__.py @@ -9,7 +9,7 @@ from . import file_manager as fm from typing import TYPE_CHECKING if TYPE_CHECKING: - from confighelper import ConfigHelper + from ...confighelper import ConfigHelper def load_component(config: ConfigHelper) -> fm.FileManager: return fm.load_component(config) diff --git a/moonraker/components/file_manager/file_manager.py b/moonraker/components/file_manager/file_manager.py index 82f72ed..64d5733 100644 --- a/moonraker/components/file_manager/file_manager.py +++ b/moonraker/components/file_manager/file_manager.py @@ -10,13 +10,17 @@ import sys import pathlib import shutil import logging -import json import tempfile import asyncio +import zipfile +import time +import math from copy import deepcopy from inotify_simple import INotify from inotify_simple import flags as iFlags -from utils import MOONRAKER_PATH +from ...utils import source_info +from ...utils import json_wrapper as jsonw +from ...common import RequestType, TransportType # Annotation imports from typing import ( @@ -29,23 +33,25 @@ from typing import ( List, Set, Coroutine, + Awaitable, Callable, TypeVar, + Type, cast, ) if TYPE_CHECKING: from inotify_simple import Event as InotifyEvent - from confighelper import ConfigHelper - from websockets import WebRequest - from components import database - from components import klippy_apis - from components import shell_command - from components.job_queue import JobQueue + from ...confighelper import ConfigHelper + from ...common import WebRequest, UserInfo + from ..klippy_connection import KlippyConnection + from ..job_queue import JobQueue + from ..job_state import JobState + from ..secrets import Secrets + from ..klippy_apis import KlippyAPI as APIComp + from ..database import MoonrakerDatabase as DBComp + from ..shell_command import ShellCommandFactory as SCMDComp StrOrPath = Union[str, pathlib.Path] - DBComp = database.MoonrakerDatabase - APIComp = klippy_apis.KlippyAPI - SCMDComp = shell_command.ShellCommandFactory _T = TypeVar("_T") VALID_GCODE_EXTS = ['.gcode', '.g', '.gco', '.ufp', '.nc'] @@ -59,61 +65,117 @@ class FileManager: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() self.event_loop = self.server.get_event_loop() - self.reserved_paths: Dict[str, pathlib.Path] = {} + self.reserved_paths: Dict[str, Tuple[pathlib.Path, bool]] = {} self.full_access_roots: Set[str] = set() self.file_paths: Dict[str, str] = {} - self.add_reserved_path("moonraker", MOONRAKER_PATH) + app_args = self.server.get_app_args() + self.datapath = pathlib.Path(app_args["data_path"]) + srcdir = str(source_info.source_path()) + self.add_reserved_path("moonraker", srcdir, False) db: DBComp = self.server.load_component(config, "database") db_path = db.get_database_path() - self.add_reserved_path("database", db_path) - gc_path: str = db.get_item( - "moonraker", "file_manager.gcode_path", "").result() - self.gcode_metadata = MetadataStorage(config, gc_path, db) - self.inotify_handler = INotifyHandler(config, self, - self.gcode_metadata) - self.write_mutex = asyncio.Lock() - self.notify_sync_lock: Optional[NotifySyncLock] = None + self.add_reserved_path("database", db_path, False) + self.add_reserved_path("certs", self.datapath.joinpath("certs"), False) + self.add_reserved_path("systemd", self.datapath.joinpath("systemd"), False) + self.add_reserved_path("backup", self.datapath.joinpath("backup"), False) + self.gcode_metadata = MetadataStorage(config, db) + self.sync_lock = NotifySyncLock(config) + avail_observers: Dict[str, Type[BaseFileSystemObserver]] = { + "none": BaseFileSystemObserver, + "inotify": InotifyObserver + } + observer = config.get("file_system_observer", "inotify").lower() + obs_class = avail_observers.get(observer) + if obs_class is None: + self.server.add_warning( + f"[file_manager]: Invalid value '{observer}' for option " + "'file_system_observer'. Falling back to no observer." + ) + obs_class = BaseFileSystemObserver + if observer == "none": + logging.info("File System Observation is disabled") + else: + logging.info(f"Using File System Observer: {observer}") + self.fs_observer = obs_class( + config, self, self.gcode_metadata, self.sync_lock + ) + self.scheduled_notifications: Dict[str, asyncio.TimerHandle] = {} self.fixed_path_args: Dict[str, Any] = {} - self.queue_gcodes: bool = config.getboolean('queue_gcode_uploads', - False) + self.queue_gcodes: bool = config.getboolean('queue_gcode_uploads', False) + self.check_klipper_path = config.getboolean("check_klipper_config_path", True) # Register file management endpoints self.server.register_endpoint( - "/server/files/list", ['GET'], self._handle_filelist_request) + "/server/files/list", RequestType.GET, self._handle_filelist_request + ) self.server.register_endpoint( - "/server/files/metadata", ['GET'], self._handle_metadata_request) + "/server/files/metadata", RequestType.GET, self._handle_metadata_request + ) self.server.register_endpoint( - "/server/files/directory", ['GET', 'POST', 'DELETE'], - self._handle_directory_request) + "/server/files/metascan", RequestType.POST, self._handle_metascan_request + ) self.server.register_endpoint( - "/server/files/move", ['POST'], self._handle_file_move_copy) + "/server/files/thumbnails", RequestType.GET, self._handle_list_thumbs + ) self.server.register_endpoint( - "/server/files/copy", ['POST'], self._handle_file_move_copy) + "/server/files/roots", RequestType.GET, self._handle_list_roots + ) self.server.register_endpoint( - "/server/files/delete_file", ['DELETE'], self._handle_file_delete, - transports=["websocket"]) + "/server/files/directory", RequestType.all(), + self._handle_directory_request + ) + self.server.register_endpoint( + "/server/files/move", RequestType.POST, self._handle_file_move_copy + ) + self.server.register_endpoint( + "/server/files/copy", RequestType.POST, self._handle_file_move_copy + ) + self.server.register_endpoint( + "/server/files/zip", RequestType.POST, self._handle_zip_files + ) + self.server.register_endpoint( + "/server/files/delete_file", RequestType.DELETE, self._handle_file_delete, + transports=TransportType.WEBSOCKET + ) # register client notificaitons self.server.register_notification("file_manager:filelist_changed") self.server.register_event_handler( "server:klippy_identified", self._update_fixed_paths) - # Register Klippy Configuration Path - config_path = config.get('config_path', None) - if config_path is not None: - self.register_directory('config', config_path, full_access=True) + # Register Data Folders + secrets: Secrets = self.server.load_component(config, "secrets") + self.add_reserved_path("secrets", secrets.get_secrets_file(), False) - # Register logs path - log_path = config.get('log_path', None) - if log_path is not None: - self.register_directory('logs', log_path) + config.get('config_path', None, deprecate=True) + cfg_writeble = config.getboolean("enable_config_write_access", True) + self.register_data_folder("config", full_access=cfg_writeble) - # If gcode path is in the database, register it - if gc_path: - self.register_directory('gcodes', gc_path, full_access=True) + config.get('log_path', None, deprecate=True) + self.register_data_folder("logs") + gc_path = self.register_data_folder("gcodes", full_access=True) + if gc_path.is_dir(): + prune: bool = True + saved_gc_dir: str = db.get_item( + "moonraker", "file_manager.gcode_path", "" + ).result() + is_empty = next(gc_path.iterdir(), None) is None + if is_empty and saved_gc_dir: + saved_path = pathlib.Path(saved_gc_dir) + if ( + saved_path.is_dir() and + next(saved_path.iterdir(), None) is not None + ): + logging.info( + f"Legacy GCode Path found at '{saved_path}', " + "aborting metadata prune" + ) + prune = False + if prune: + self.gcode_metadata.prune_storage() async def component_init(self): - self.inotify_handler.initalize_roots() + self.fs_observer.initialize() def _update_fixed_paths(self) -> None: kinfo = self.server.get_klippy_info() @@ -131,6 +193,7 @@ class FileManager: # Register path for example configs klipper_path = paths.get('klipper_path', None) if klipper_path is not None: + self.reserved_paths.pop("klipper", None) self.add_reserved_path("klipper", klipper_path) example_cfg_path = os.path.join(klipper_path, "config") self.register_directory("config_examples", example_cfg_path) @@ -144,6 +207,66 @@ class FileManager: self.server.register_static_file_handler( "klippy.log", log_path, force=True) + # Validate config file + if self.check_klipper_path: + cfg_file: Optional[str] = paths.get("config_file") + cfg_parent = self.file_paths.get("config") + if cfg_file is not None and cfg_parent is not None: + cfg_path = pathlib.Path(cfg_file).expanduser() + par_path = pathlib.Path(cfg_parent) + if ( + par_path in cfg_path.parents or + par_path.resolve() in cfg_path.resolve().parents + ): + self.server.remove_warning("klipper_config") + else: + self.server.add_warning( + "file_manager: Klipper configuration file not located in " + "'config' folder.\n\n" + f"Klipper Config Path: {cfg_path}\n\n" + f"Config Folder: {par_path}", + warn_id="klipper_config" + ) + + def validate_gcode_path(self, gc_path: str) -> None: + gc_dir = pathlib.Path(gc_path).expanduser() + if "gcodes" in self.file_paths: + expected = self.file_paths["gcodes"] + if not gc_dir.exists() or not gc_dir.samefile(expected): + self.server.add_warning( + "GCode path received from Klipper does not match expected " + "location.\n\n" + f"Received: '{gc_dir}'\nExpected: '{expected}'\n\n" + "Modify the [virtual_sdcard] section Klipper's " + "configuration to correct this error.\n\n" + f"[virtual_sdcard]\npath: {expected}", + warn_id="gcode_path" + ) + else: + self.server.remove_warning("gcode_path") + + def register_data_folder( + self, folder_name: str, full_access: bool = False + ) -> pathlib.Path: + new_path = self.datapath.joinpath(folder_name) + if not new_path.exists(): + try: + new_path.mkdir() + except Exception: + pass + self.register_directory(folder_name, str(new_path), full_access) + return new_path + + def disable_write_access(self): + self.full_access_roots.clear() + + def check_write_enabled(self): + if not self.full_access_roots: + raise self.server.error( + "Write access is currently disabled. Check notifications " + "for warnings." + ) + def register_directory(self, root: str, path: Optional[str], @@ -159,98 +282,80 @@ class FileManager: f"Supplied path ({path}) for ({root}) is invalid. Make sure\n" "that the path exists and is not the file system root.") return False - permissions = os.R_OK + # Check Folder Permissions + missing_perms = [] + try: + # Test read + os.listdir(path) + except PermissionError: + missing_perms.append("READ") + except Exception: + logging.exception(f"Error testing read access for root {root}") if full_access: - if not self._check_root_safe(root, path): - return False - permissions |= os.W_OK + if ( + os.access in os.supports_effective_ids and + not os.access(path, os.W_OK, effective_ids=True) + ): + missing_perms.append("WRITE") self.full_access_roots.add(root) - if not os.access(path, permissions): - self.server.add_warning( - f"Moonraker does not have permission to access path " - f"({path}) for ({root}).") - return False + if missing_perms: + mpstr = " | ".join(missing_perms) + self.server.add_log_rollover_item( + f"fm_reg_perms_{root}", + f"file_manager: Moonraker has detected the following missing " + f"permissions for root folder '{root}': {mpstr}" + ) if path != self.file_paths.get(root, ""): self.file_paths[root] = path self.server.register_static_file_handler(root, path) if root == "gcodes": - db: DBComp = self.server.lookup_component("database") - db.insert_item("moonraker", "file_manager.gcode_path", path) # scan for metadata changes self.gcode_metadata.update_gcode_path(path) if full_access: # Refresh the file list and add watches - self.inotify_handler.add_root_watch(root, path) + self.fs_observer.add_root_watch(root, path) elif self.server.is_running(): - self.event_loop.register_callback( - self.inotify_handler.notify_filelist_changed, - "root_update", root, path) + self._sched_changed_event("root_update", root, path, immediate=True) return True - def _paths_overlap(self, - path_one: StrOrPath, - path_two: StrOrPath - ) -> bool: - if isinstance(path_one, str): - path_one = pathlib.Path(path_one) - path_one = path_one.expanduser().resolve() - if isinstance(path_two, str): - path_two = pathlib.Path(path_two) - path_two = path_two.expanduser().resolve() - return ( - path_one == path_two or - path_one in path_two.parents or - path_two in path_one.parents - ) - - def _check_root_safe(self, new_root: str, new_path: StrOrPath) -> bool: - # Make sure that registered full access paths - # do no overlap one another, nor a reserved path - if isinstance(new_path, str): - new_path = pathlib.Path(new_path) - new_path = new_path.expanduser().resolve() - for reg_root, reg_path in self.file_paths.items(): - exp_reg_path = pathlib.Path(reg_path).expanduser().resolve() + def check_reserved_path( + self, + req_path: StrOrPath, + need_write: bool, + raise_error: bool = True + ) -> bool: + if isinstance(req_path, str): + req_path = pathlib.Path(req_path) + req_path = req_path.expanduser().resolve() + if ".git" in req_path.parts: + if raise_error: + raise self.server.error( + "Access to .git folders is forbidden", 403 + ) + return True + for name, (res_path, can_read) in self.reserved_paths.items(): if ( - reg_root not in self.full_access_roots or - (reg_root == new_root and new_path == exp_reg_path) + (res_path == req_path or res_path in req_path.parents) and + (need_write or not can_read) ): - continue - if self._paths_overlap(new_path, exp_reg_path): - self.server.add_warning( - f"Failed to register '{new_root}': '{new_path}', path " - f"overlaps registered root '{reg_root}': '{exp_reg_path}'") - return False - for res_name, res_path in self.reserved_paths.items(): - if self._paths_overlap(new_path, res_path): - self.server.add_warning( - f"Failed to register '{new_root}': '{new_path}', path " - f"overlaps reserved path '{res_name}': '{res_path}'") - return False - return True + if not raise_error: + return True + raise self.server.error( + f"Access to file {req_path.name} forbidden by reserved " + f"path '{name}'", 403 + ) + return False - def add_reserved_path(self, name: str, res_path: StrOrPath) -> bool: + def add_reserved_path( + self, name: str, res_path: StrOrPath, read_access: bool = True + ) -> bool: + if name in self.reserved_paths: + return False if isinstance(res_path, str): res_path = pathlib.Path(res_path) res_path = res_path.expanduser().resolve() - if ( - name in self.reserved_paths and - res_path == self.reserved_paths[name] - ): - return True - self.reserved_paths[name] = res_path - check_passed = True - for reg_root, reg_path in list(self.file_paths.items()): - if reg_root not in self.full_access_roots: - continue - exp_reg_path = pathlib.Path(reg_path).expanduser().resolve() - if self._paths_overlap(res_path, exp_reg_path): - self.server.add_warning( - f"Full access root '{reg_root}' overlaps reserved path " - f"'{name}', removing access") - self.file_paths.pop(reg_root, None) - check_passed = False - return check_passed + self.reserved_paths[name] = (res_path, read_access) + return True def get_directory(self, root: str = "gcodes") -> str: return self.file_paths.get(root, "") @@ -270,10 +375,22 @@ class FileManager: def get_metadata_storage(self) -> MetadataStorage: return self.gcode_metadata - def check_file_exists(self, root: str, filename: str) -> bool: - root_dir = self.file_paths.get(root, "") - file_path = os.path.join(root_dir, filename) - return os.path.exists(file_path) + def check_file_exists( + self, + root: str, + filename: str, + modified: Optional[float] = None + ) -> bool: + if root not in self.file_paths: + return False + root_dir = pathlib.Path(self.file_paths[root]) + file_path = root_dir.joinpath(filename) + if file_path.is_file(): + if modified is None: + return True + fstat = file_path.stat() + return math.isclose(fstat.st_mtime, modified) + return False def can_access_path(self, path: StrOrPath) -> bool: if isinstance(path, str): @@ -282,18 +399,12 @@ class FileManager: for registered in self.file_paths.values(): reg_root_path = pathlib.Path(registered).resolve() if reg_root_path in path.parents: - return True + return not self.check_reserved_path(path, False, False) return False def upload_queue_enabled(self) -> bool: return self.queue_gcodes - def sync_inotify_event(self, path: str) -> Optional[NotifySyncLock]: - if self.notify_sync_lock is None or \ - not self.notify_sync_lock.check_need_sync(path): - return None - return self.notify_sync_lock - async def _handle_filelist_request(self, web_request: WebRequest ) -> List[Dict[str, Any]]: @@ -313,74 +424,128 @@ class FileManager: metadata['filename'] = requested_file return metadata + async def _handle_metascan_request( + self, web_request: WebRequest + ) -> Dict[str, Any]: + async with self.sync_lock: + requested_file: str = web_request.get_str('filename') + gcpath = pathlib.Path(self.file_paths["gcodes"]).joinpath(requested_file) + if not gcpath.is_file(): + raise self.server.error(f"File '{requested_file}' does not exist", 404) + if gcpath.suffix not in VALID_GCODE_EXTS: + raise self.server.error(f"File {gcpath} is not a valid gcode file") + # remove metadata and force a rescan + ret = self.gcode_metadata.remove_file_metadata(requested_file) + if ret is not None: + await ret + path_info = self.get_path_info(gcpath, "gcodes") + evt = self.gcode_metadata.parse_metadata(requested_file, path_info) + await evt.wait() + metadata: Optional[Dict[str, Any]] + metadata = self.gcode_metadata.get(requested_file, None) + if metadata is None: + raise self.server.error( + f"Failed to parse metadata for file '{requested_file}'", 500) + metadata['filename'] = requested_file + return metadata + + async def _handle_list_roots( + self, web_request: WebRequest + ) -> List[Dict[str, Any]]: + root_list: List[Dict[str, Any]] = [] + for name, path in self.file_paths.items(): + perms = "rw" if name in self.full_access_roots else "r" + root_list.append({ + "name": name, + "path": path, + "permissions": perms + }) + return root_list + + async def _handle_list_thumbs( + self, web_request: WebRequest + ) -> List[Dict[str, Any]]: + requested_file: str = web_request.get_str("filename") + metadata: Optional[Dict[str, Any]] + metadata = self.gcode_metadata.get(requested_file, None) + if metadata is None: + return [] + if "thumbnails" not in metadata: + return [] + thumblist: List[Dict[str, Any]] = metadata["thumbnails"] + for info in thumblist: + relpath: Optional[str] = info.pop("relative_path", None) + if relpath is None: + continue + thumbpath = pathlib.Path(requested_file).parent.joinpath(relpath) + info["thumbnail_path"] = str(thumbpath) + return thumblist + async def _handle_directory_request(self, web_request: WebRequest ) -> Dict[str, Any]: directory = web_request.get_str('path', "gcodes") root, dir_path = self._convert_request_path(directory) - action = web_request.get_action() - if action == 'GET': + req_type = web_request.get_request_type() + if req_type == RequestType.GET: is_extended = web_request.get_boolean('extended', False) # Get list of files and subdirectories for this target dir_info = self._list_directory(dir_path, root, is_extended) return dir_info - async with self.write_mutex: - result = { - 'item': {'path': directory, 'root': root}, - 'action': "create_dir"} - if action == 'POST' and root in self.full_access_roots: + async with self.sync_lock: + self.check_reserved_path(dir_path, True) + action = "create_dir" + if req_type == RequestType.POST and root in self.full_access_roots: # Create a new directory + self.sync_lock.setup("create_dir", dir_path) try: os.mkdir(dir_path) except Exception as e: raise self.server.error(str(e)) - elif action == 'DELETE' and root in self.full_access_roots: + self.fs_observer.on_item_create(root, dir_path, is_dir=True) + elif req_type == RequestType.DELETE and root in self.full_access_roots: # Remove a directory - result['action'] = "delete_dir" + action = "delete_dir" if directory.strip("/") == root: raise self.server.error( "Cannot delete root directory") if not os.path.isdir(dir_path): raise self.server.error( f"Directory does not exist ({directory})") + self.sync_lock.setup("delete_dir", dir_path) force = web_request.get_boolean('force', False) if force: # Make sure that the directory does not contain a file # loaded by the virtual_sdcard - await self._handle_operation_check(dir_path) - self.notify_sync_lock = NotifySyncLock(dir_path) + self._handle_operation_check(dir_path) try: await self.event_loop.run_in_thread( shutil.rmtree, dir_path) except Exception: - self.notify_sync_lock.cancel() - self.notify_sync_lock = None raise - await self.notify_sync_lock.wait(30.) - self.notify_sync_lock = None else: try: os.rmdir(dir_path) except Exception as e: raise self.server.error(str(e)) + self.fs_observer.on_item_delete(root, dir_path, is_dir=True) else: raise self.server.error("Operation Not Supported", 405) - return result + return self._sched_changed_event(action, root, dir_path) - async def _handle_operation_check(self, requested_path: str) -> bool: + def _handle_operation_check(self, requested_path: str) -> bool: if not self.get_relative_path("gcodes", requested_path): # Path not in the gcodes path return True - # Get virtual_sdcard status - kapis: APIComp = self.server.lookup_component('klippy_apis') - result: Dict[str, Any] - result = await kapis.query_objects({'print_stats': None}, {}) - pstats = result.get('print_stats', {}) - loaded_file: str = pstats.get('filename', "") - state: str = pstats.get('state', "") + kconn: KlippyConnection + kconn = self.server.lookup_component("klippy_connection") + job_state: JobState = self.server.lookup_component("job_state") + last_stats = job_state.get_last_stats() + loaded_file: str = last_stats.get('filename', "") + state: str = last_stats.get('state', "") gc_path = self.file_paths.get('gcodes', "") full_path = os.path.join(gc_path, loaded_file) - is_printing = state in ["printing", "paused"] + is_printing = kconn.is_ready() and state in ["printing", "paused"] if loaded_file and is_printing: if os.path.isdir(requested_path): # Check to see of the loaded file is in the request @@ -418,46 +583,152 @@ class FileManager: if dest_root not in self.full_access_roots: raise self.server.error( f"Destination path is read-only: {dest_root}") - async with self.write_mutex: - result: Dict[str, Any] = {'item': {'root': dest_root}} + self.check_reserved_path(source_path, False) + self.check_reserved_path(dest_path, True) + async with self.sync_lock: if not os.path.exists(source_path): raise self.server.error(f"File {source_path} does not exist") # make sure the destination is not in use if os.path.exists(dest_path): - await self._handle_operation_check(dest_path) + self._handle_operation_check(dest_path) + src_info: Tuple[Optional[str], ...] = (None, None) if ep == "/server/files/move": if source_root not in self.full_access_roots: raise self.server.error( f"Source path is read-only, cannot move: {source_root}") # if moving the file, make sure the source is not in use - await self._handle_operation_check(source_path) + self._handle_operation_check(source_path) op_func: Callable[..., str] = shutil.move - result['source_item'] = { - 'path': source, - 'root': source_root - } - result['action'] = "move_dir" if os.path.isdir(source_path) \ - else "move_file" + action = "move_dir" if os.path.isdir(source_path) else "move_file" + src_info = (source_root, source_path) elif ep == "/server/files/copy": if os.path.isdir(source_path): - result['action'] = "create_dir" + action = "create_dir" op_func = shutil.copytree else: - result['action'] = "create_file" + action = "create_file" + source_base = os.path.basename(source_path) + if ( + os.path.isfile(dest_path) or + os.path.isfile(os.path.join(dest_path, source_base)) + ): + action = "modify_file" op_func = shutil.copy2 - self.notify_sync_lock = NotifySyncLock(dest_path) + else: + raise self.server.error(f"Invalid endpoint {ep}") + self.sync_lock.setup(action, dest_path, move_copy=True) try: full_dest = await self.event_loop.run_in_thread( op_func, source_path, dest_path) + if dest_root == "gcodes" and self.fs_observer.has_fast_observe: + await self.sync_lock.wait_inotify_event(full_dest) except Exception as e: - self.notify_sync_lock.cancel() - self.notify_sync_lock = None - raise self.server.error(str(e)) - self.notify_sync_lock.update_dest(full_dest) - await self.notify_sync_lock.wait(600.) - self.notify_sync_lock = None - result['item']['path'] = self.get_relative_path(dest_root, full_dest) - return result + raise self.server.error(str(e)) from e + if action.startswith("move"): + ret = self.fs_observer.on_item_move( + source_root, dest_root, source_path, full_dest + ) + else: + ret = self.fs_observer.on_item_copy(dest_root, full_dest) + if ret is not None: + await ret + return self._sched_changed_event( + action, dest_root, full_dest, src_info[0], src_info[1] + ) + + async def _handle_zip_files( + self, web_request: WebRequest + ) -> Dict[str, Any]: + async with self.sync_lock: + store_only = web_request.get_boolean("store_only", False) + suffix = time.strftime("%Y%m%d-%H%M%S", time.localtime()) + dest: str = web_request.get_str( + "dest", f"config/collection-{suffix}.zip" + ) + dest_root, dest_str_path = self._convert_request_path(dest) + if dest_root not in self.full_access_roots: + raise self.server.error( + f"Destination Root '{dest_root}' is read-only" + ) + dest_path = pathlib.Path(dest_str_path) + self.check_reserved_path(dest_path, True) + if dest_path.is_dir(): + raise self.server.error( + f"Cannot create archive at '{dest_path}'. Path exists " + "as a directory." + ) + elif not dest_path.parent.exists(): + raise self.server.error( + f"Cannot create archive at '{dest_path}'. Parent " + "directory does not exist." + ) + items = web_request.get_list("items") + if not items: + raise self.server.error( + "At least one file or directory must be specified" + ) + self.sync_lock.setup("create_file", dest_path) + await self.event_loop.run_in_thread( + self._zip_files, items, dest_path, store_only + ) + self.fs_observer.on_item_create(dest_root, dest_path) + ret = self._sched_changed_event("create_file", dest_root, str(dest_path)) + return { + "destination": ret["item"], + "action": "zip_files" + } + + def _zip_files( + self, + item_list: List[str], + destination: StrOrPath, + store_only: bool = False + ) -> None: + if isinstance(destination, str): + destination = pathlib.Path(destination).expanduser().resolve() + tmpdir = pathlib.Path(tempfile.gettempdir()) + temp_dest = tmpdir.joinpath(destination.name) + processed: Set[Tuple[int, int]] = set() + cptype = zipfile.ZIP_STORED if store_only else zipfile.ZIP_DEFLATED + with zipfile.ZipFile(str(temp_dest), "w", compression=cptype) as zf: + for item in item_list: + root, str_path = self._convert_request_path(item) + root_path = pathlib.Path(self.file_paths[root]) + item_path = pathlib.Path(str_path) + self.check_reserved_path(item_path, False) + if not item_path.exists(): + raise self.server.error( + f"No file/directory exits at '{item}'" + ) + if item_path.is_file(): + st = item_path.stat() + ident = (st.st_dev, st.st_ino) + if ident in processed: + continue + processed.add(ident) + rel_path = item_path.relative_to(root_path.parent) + zf.write(str(item_path), arcname=str(rel_path)) + continue + elif not item_path.is_dir(): + raise self.server.error( + f"Item at path '{item}' is not a valid file or " + "directory" + ) + for child_path in item_path.iterdir(): + if child_path.is_file(): + if self.check_reserved_path(child_path, False, False): + continue + st = child_path.stat() + ident = (st.st_dev, st.st_ino) + if ident in processed: + continue + processed.add(ident) + rel_path = child_path.relative_to(root_path.parent) + try: + zf.write(str(child_path), arcname=str(rel_path)) + except PermissionError: + continue + shutil.move(str(temp_dest), str(destination)) def _list_directory(self, path: str, @@ -467,6 +738,7 @@ class FileManager: if not os.path.isdir(path): raise self.server.error( f"Directory does not exist ({path})") + self.check_reserved_path(path, False) flist: Dict[str, Any] = {'dirs': [], 'files': []} for fname in os.listdir(path): full_path = os.path.join(path, fname) @@ -498,16 +770,33 @@ class FileManager: } return flist - def get_path_info(self, path: str, root: str) -> Dict[str, Any]: - fstat = os.stat(path) - real_path = os.path.realpath(path) - permissions = "rw" - if ( - (os.path.islink(path) and os.path.isfile(real_path)) or - not os.access(real_path, os.R_OK | os.W_OK) or - root not in self.full_access_roots - ): - permissions = "r" + def get_path_info( + self, path: StrOrPath, root: str, raise_error: bool = True + ) -> Dict[str, Any]: + if isinstance(path, str): + path = pathlib.Path(path) + real_path = path.resolve() + try: + fstat = path.stat() + except Exception: + if raise_error: + raise + return {"modified": 0, "size": 0, "permissions": ""} + if ".git" in real_path.parts: + permissions = "" + else: + permissions = "rw" + if ( + root not in self.full_access_roots or + (path.is_symlink() and path.is_file()) + ): + permissions = "r" + for name, (res_path, can_read) in self.reserved_paths.items(): + if (res_path == real_path or res_path in real_path.parents): + if not can_read: + permissions = "" + break + permissions = "r" return { 'modified': fstat.st_mtime, 'size': fstat.st_size, @@ -524,23 +813,25 @@ class FileManager: form_args: Dict[str, Any] ) -> Dict[str, Any]: # lookup root file path - async with self.write_mutex: + async with self.sync_lock: try: upload_info = self._parse_upload_args(form_args) + self.check_reserved_path(upload_info["dest_path"], True) + self.sync_lock.setup("create_file", upload_info["dest_path"]) root = upload_info['root'] + if root not in self.full_access_roots: + raise self.server.error(f"Invalid root request: {root}") if root == "gcodes" and upload_info['ext'] in VALID_GCODE_EXTS: result = await self._finish_gcode_upload(upload_info) - elif root in self.full_access_roots: - result = await self._finish_standard_upload(upload_info) else: - raise self.server.error(f"Invalid root request: {root}") + result = await self._finish_standard_upload(upload_info) except Exception: try: os.remove(form_args['tmp_file_path']) except Exception: pass raise - return result + return result def _parse_upload_args(self, upload_args: Dict[str, Any] @@ -573,10 +864,14 @@ class FileManager: if unzip_ufp: filename = os.path.splitext(filename)[0] + ".gcode" dest_path = os.path.splitext(dest_path)[0] + ".gcode" - if os.path.islink(dest_path): - raise self.server.error(f"Cannot overwrite symlink: {dest_path}") - if os.path.isfile(dest_path) and not os.access(dest_path, os.W_OK): - raise self.server.error(f"File is read-only: {dest_path}") + if ( + os.path.isfile(dest_path) and + os.access in os.supports_effective_ids and + not os.access(dest_path, os.W_OK, effective_ids=True) + ): + logging.info( + f"Destination file exists and appears to be read-only: {dest_path}" + ) return { 'root': root, 'filename': filename, @@ -585,32 +880,34 @@ class FileManager: 'tmp_file_path': upload_args['tmp_file_path'], 'start_print': start_print, 'unzip_ufp': unzip_ufp, - 'ext': f_ext + 'ext': f_ext, + "is_link": os.path.islink(dest_path), + "user": upload_args.get("current_user") } - async def _finish_gcode_upload(self, - upload_info: Dict[str, Any] - ) -> Dict[str, Any]: + async def _finish_gcode_upload( + self, upload_info: Dict[str, Any] + ) -> Dict[str, Any]: # Verify that the operation can be done if attempting to upload a gcode can_start: bool = False try: check_path: str = upload_info['dest_path'] - can_start = await self._handle_operation_check(check_path) + can_start = self._handle_operation_check(check_path) except self.server.error as e: if e.status_code == 403: raise self.server.error( "File is loaded, upload not permitted", 403) - self.notify_sync_lock = NotifySyncLock(upload_info['dest_path']) finfo = await self._process_uploaded_file(upload_info) await self.gcode_metadata.parse_metadata( upload_info['filename'], finfo).wait() started: bool = False queued: bool = False if upload_info['start_print']: + user: Optional[UserInfo] = upload_info.get("user") if can_start: kapis: APIComp = self.server.lookup_component('klippy_apis') try: - await kapis.start_print(upload_info['filename']) + await kapis.start_print(upload_info['filename'], user=user) except self.server.error: # Attempt to start print failed pass @@ -619,35 +916,26 @@ class FileManager: if self.queue_gcodes and not started: job_queue: JobQueue = self.server.lookup_component('job_queue') await job_queue.queue_job( - upload_info['filename'], check_exists=False) + upload_info['filename'], check_exists=False, user=user) queued = True + self.fs_observer.on_item_create("gcodes", upload_info["dest_path"]) + result = dict(self._sched_changed_event( + "create_file", "gcodes", upload_info["dest_path"], + immediate=upload_info["is_link"] + )) + result.update({"print_started": started, "print_queued": queued}) + return result - await self.notify_sync_lock.wait(300.) - self.notify_sync_lock = None - return { - 'item': { - 'path': upload_info['filename'], - 'root': "gcodes" - }, - 'print_started': started, - 'print_queued': queued, - 'action': "create_file" - } - - async def _finish_standard_upload(self, - upload_info: Dict[str, Any] - ) -> Dict[str, Any]: - self.notify_sync_lock = NotifySyncLock(upload_info['dest_path']) + async def _finish_standard_upload( + self, upload_info: Dict[str, Any] + ) -> Dict[str, Any]: await self._process_uploaded_file(upload_info) - await self.notify_sync_lock.wait(5.) - self.notify_sync_lock = None - return { - 'item': { - 'path': upload_info['filename'], - 'root': upload_info['root'] - }, - 'action': "create_file" - } + dest_path: str = upload_info["dest_path"] + root: str = upload_info["root"] + self.fs_observer.on_item_create(root, dest_path) + return self._sched_changed_event( + "create_file", root, dest_path, immediate=upload_info["is_link"] + ) async def _process_uploaded_file(self, upload_info: Dict[str, Any] @@ -669,8 +957,11 @@ class FileManager: finfo = self.get_path_info(tmp_path, upload_info['root']) finfo['ufp_path'] = tmp_path else: - shutil.move(upload_info['tmp_file_path'], - upload_info['dest_path']) + dest_path = upload_info['dest_path'] + if upload_info["is_link"]: + dest_path = os.path.realpath(dest_path) + shutil.move( + upload_info['tmp_file_path'], dest_path) finfo = self.get_path_info(upload_info['dest_path'], upload_info['root']) except Exception: @@ -704,7 +995,8 @@ class FileManager: key = (st.st_dev, st.st_ino) if key not in visited_dirs: visited_dirs.add(key) - scan_dirs.append(dname) + if not self.check_reserved_path(full_path, False, False): + scan_dirs.append(dname) dir_names[:] = scan_dirs for name in files: ext = os.path.splitext(name)[-1].lower() @@ -776,26 +1068,396 @@ class FileManager: return await self.delete_file(file_path) async def delete_file(self, path: str) -> Dict[str, Any]: - async with self.write_mutex: + async with self.sync_lock: root, full_path = self._convert_request_path(path) - filename = self.get_relative_path(root, full_path) + self.check_reserved_path(full_path, True) if root not in self.full_access_roots: raise self.server.error( f"Path not available for DELETE: {path}", 405) if not os.path.isfile(full_path): raise self.server.error(f"Invalid file path: {path}") try: - await self._handle_operation_check(full_path) + self._handle_operation_check(full_path) except self.server.error as e: if e.status_code == 403: raise + self.sync_lock.setup("delete_file", full_path) os.remove(full_path) - return { - 'item': {'path': filename, 'root': root}, - 'action': "delete_file"} + self.fs_observer.on_item_delete(root, full_path) + return self._sched_changed_event("delete_file", root, full_path) + + def _sched_changed_event( + self, + action: str, + root: str, + full_path: str, + source_root: Optional[str] = None, + source_path: Optional[str] = None, + immediate: bool = False + ) -> Dict[str, Any]: + rel_path = self.get_relative_path(root, full_path) + path_info = self.get_path_info(full_path, root, raise_error=False) + path_info.update({"path": rel_path, "root": root}) + notify_info: Dict[str, Any] = { + "action": action, + "item": path_info + } + if source_path is not None and source_root is not None: + src_rel_path = self.get_relative_path(source_root, source_path) + notify_info['source_item'] = {'path': src_rel_path, 'root': source_root} + immediate |= not self.fs_observer.has_fast_observe + delay = .005 if immediate else 1. + key = f"{action}-{root}-{rel_path}" + handle = self.event_loop.delay_callback( + delay, self._do_notify, key, notify_info + ) + if not immediate: + self.scheduled_notifications[key] = handle + return notify_info + + def _do_notify(self, key: str, notify_info: Dict[str, Any]) -> None: + self.scheduled_notifications.pop(key, None) + self.server.send_event("file_manager:filelist_changed", notify_info) + + def cancel_notification(self, key: str) -> None: + handle = self.scheduled_notifications.pop(key, None) + if handle is not None: + handle.cancel() def close(self) -> None: - self.inotify_handler.close() + for hdl in self.scheduled_notifications.values(): + hdl.cancel() + self.scheduled_notifications.clear() + self.fs_observer.close() + + +class NotifySyncLock(asyncio.Lock): + def __init__(self, config: ConfigHelper) -> None: + super().__init__() + self.server = config.get_server() + self.action: str = "" + self.dest_path: Optional[pathlib.Path] = None + self.check_pending = False + self.move_copy_fut: Optional[asyncio.Future] = None + self.sync_waiters: List[asyncio.Future] = [] + self.pending_paths: Set[pathlib.Path] = set() + self.acquired_paths: Set[pathlib.Path] = set() + + def setup( + self, action: str, path: StrOrPath, move_copy: bool = False + ) -> None: + if not self.locked(): + raise self.server.error( + "Cannot call setup unless the lock has been acquired" + ) + # Called by a file manager request. Sets the destination path to sync + # with the file system observer (inotify). + if self.dest_path is not None: + logging.debug( + "NotifySync Error: Setup requested while a path is still pending" + ) + self.finish() + if isinstance(path, str): + path = pathlib.Path(path) + self.dest_path = path + self.action = action + self.check_pending = move_copy + + async def wait_inotify_event(self, current_path: StrOrPath) -> None: + # Called by a file manager move copy request to wait for metadata + # analysis to complete. We need to be careful here to avoid a deadlock + # or a long wait time when inotify isn't available. + if not self.check_pending: + return + if isinstance(current_path, str): + current_path = pathlib.Path(current_path) + self.dest_path = current_path + if current_path in self.acquired_paths: + # Notifcation has been recieved, no need to wait + return + self.move_copy_fut = self.server.get_event_loop().create_future() + mcfut = self.move_copy_fut + has_pending = current_path in self.pending_paths + timeout = 1200. if has_pending else 1. + for _ in range(5): + try: + assert mcfut is not None + await asyncio.wait_for(asyncio.shield(mcfut), timeout) + except asyncio.TimeoutError: + if timeout > 2.: + break + has_pending = current_path in self.pending_paths + timeout = 1200. if has_pending else 1. + else: + break + else: + logging.info( + f"Failed to receive an inotify event, dest path: {current_path}" + ) + self.move_copy_fut = None + + def finish(self) -> None: + # Called by a file manager request upon completion. The inotify observer + # can now emit the websocket notification + for waiter in self.sync_waiters: + if not waiter.done(): + waiter.set_result((self.action, self.dest_path)) + self.sync_waiters.clear() + self.dest_path = None + self.action = "" + self.pending_paths.clear() + self.acquired_paths.clear() + if self.move_copy_fut is not None and not self.move_copy_fut.done(): + self.move_copy_fut.set_exception( + self.server.error("Move/Copy Interrupted by call to finish") + ) + self.move_copy_fut = None + self.check_pending = False + + def add_pending_path(self, action: str, pending_path: StrOrPath) -> None: + # Called by the inotify observer whenever a create or move event + # is detected. This is only necessary to track for move/copy actions, + # since we don't get the final destination until the request is complete. + if ( + not self.check_pending or + self.dest_path is None or + action != self.action + ): + return + if isinstance(pending_path, str): + pending_path = pathlib.Path(pending_path) + if self.dest_path in [pending_path, pending_path.parent]: + self.pending_paths.add(pending_path) + + def check_in_request( + self, action: str, inotify_path: StrOrPath + ) -> Optional[asyncio.Future]: + # Called by the inotify observer to check if request synchronization + # is necessary. If so, this method will return a future the inotify + # observer can await. + if self.dest_path is None: + return None + if isinstance(inotify_path, str): + inotify_path = pathlib.Path(inotify_path) + waiter: Optional[asyncio.Future] = None + if self.check_pending: + # The final path of move/copy requests aren't known until the request + # complete. It may be the destination path recieved from the request + # or it may be a child as of that path. + if self.move_copy_fut is not None: + # Request is complete, metadata analysis pending. We can explicitly + # check for a path match + if self.dest_path == inotify_path: + if not self.move_copy_fut.done(): + self.move_copy_fut.set_result(None) + waiter = self.server.get_event_loop().create_future() + elif self.dest_path in [inotify_path, inotify_path.parent]: + # Request is still processing. This might be the notification for + # the request, it will be checked when the move/copy request awaits + self.acquired_paths.add(inotify_path) + waiter = self.server.get_event_loop().create_future() + elif self.dest_path == inotify_path: + waiter = self.server.get_event_loop().create_future() + if waiter is not None: + self._check_action(action, inotify_path) + self.sync_waiters.append(waiter) + return waiter + + def _check_action(self, action: str, path: StrOrPath) -> bool: + # We aren't going to set a hard filter on the sync action, however + # we will log mismatches as they shouldn't occur + if action != self.action: + logging.info( + f"\nInotify action mismatch:\n" + f"Expected action: {self.action}, Inotify action: {action}\n" + f"Requested path: {self.dest_path}\n" + f"Inotify path: {path}\n" + f"Is move/copy: {self.check_pending}" + ) + return False + return True + + def release(self) -> None: + super().release() + self.finish() + + +class BaseFileSystemObserver: + def __init__( + self, + config: ConfigHelper, + file_manager: FileManager, + gcode_metadata: MetadataStorage, + sync_lock: NotifySyncLock + ) -> None: + self.server = config.get_server() + self.event_loop = self.server.get_event_loop() + self.enable_warn = config.getboolean("enable_observer_warnings", True) + self.file_manager = file_manager + self.gcode_metadata = gcode_metadata + self.sync_lock = sync_lock + + @property + def has_fast_observe(self) -> bool: + return False + + def initialize(self) -> None: + pass + + def add_root_watch(self, root: str, root_path: str) -> None: + # Just emit the notification + if self.server.is_running(): + fm = self.file_manager + fm._sched_changed_event("root_update", root, root_path, immediate=True) + + def try_move_metadata( + self, + prev_root: str, + new_root: str, + prev_path: str, + new_path: str, + is_dir: bool = False + ) -> Union[bool, Awaitable]: + if new_root == "gcodes": + if prev_root == "gcodes": + # moved within the gcodes root, move metadata + fm = self.file_manager + gcm = self.gcode_metadata + prev_rel_path = fm.get_relative_path("gcodes", prev_path) + new_rel_path = fm.get_relative_path("gcodes", new_path) + if is_dir: + gcm.move_directory_metadata(prev_rel_path, new_rel_path) + else: + return gcm.move_file_metadata(prev_rel_path, new_rel_path) + else: + # move from a non-gcodes root to gcodes root needs a rescan + self.clear_metadata(prev_root, prev_path, is_dir) + return False + elif prev_root == "gcodes": + # moved out of the gcodes root, remove metadata + self.clear_metadata(prev_root, prev_path, is_dir) + return True + + def clear_metadata( + self, root: str, path: str, is_dir: bool = False + ) -> None: + if root == "gcodes": + rel_path = self.file_manager.get_relative_path(root, str(path)) + if is_dir: + self.gcode_metadata.remove_directory_metadata(rel_path) + else: + self.gcode_metadata.remove_file_metadata(rel_path) + + def parse_gcode_metadata(self, file_path: str) -> asyncio.Event: + rel_path = self.file_manager.get_relative_path("gcodes", file_path) + ext = os.path.splitext(rel_path)[-1].lower() + try: + path_info = self.file_manager.get_path_info(file_path, "gcodes") + except Exception: + path_info = {} + if ( + ext not in VALID_GCODE_EXTS or + path_info.get('size', 0) == 0 + ): + evt = asyncio.Event() + evt.set() + return evt + if ext == ".ufp": + rel_path = os.path.splitext(rel_path)[0] + ".gcode" + path_info['ufp_path'] = file_path + return self.gcode_metadata.parse_metadata(rel_path, path_info) + + def _scan_directory_metadata( + self, start_path: pathlib.Path + ) -> Optional[Awaitable]: + # Use os.walk find files in sd path and subdirs + mevts: List[Coroutine] = [] + st = start_path.stat() + visited_dirs = {(st.st_dev, st.st_ino)} + for parent, dirs, files in os.walk(start_path, followlinks=True): + scan_dirs: List[str] = [] + # Filter out directories that have already been visted. This + # prevents infinite recrusion "followlinks" is set to True + parent_dir = pathlib.Path(parent) + for dname in dirs: + dir_path = parent_dir.joinpath(dname) + if not dir_path.exists(): + continue + st = dir_path.stat() + key = (st.st_dev, st.st_ino) + if key not in visited_dirs: + visited_dirs.add(key) + scan_dirs.append(dname) + dirs[:] = scan_dirs + for fname in files: + file_path = parent_dir.joinpath(fname) + if ( + not file_path.is_file() or + file_path.suffix not in VALID_GCODE_EXTS + ): + continue + mevt = self.parse_gcode_metadata(str(file_path)) + mevts.append(mevt.wait()) + if mevts: + return asyncio.gather(*mevts) + return None + + def on_item_copy(self, root: str, item_path: StrOrPath) -> Optional[Awaitable]: + if self.has_fast_observe: + return None + if isinstance(item_path, str): + item_path = pathlib.Path(item_path) + if root != "gcodes": + return None + if item_path.is_file() and item_path.suffix in VALID_GCODE_EXTS: + ret = self.parse_gcode_metadata(str(item_path)) + return ret.wait() + elif item_path.is_dir(): + return self._scan_directory_metadata(item_path) + return None + + def on_item_move( + self, + src_root: str, + dest_root: str, + src_path: StrOrPath, + dest_path: StrOrPath + ) -> Optional[Awaitable]: + if self.has_fast_observe: + return None + if isinstance(src_path, str): + src_path = pathlib.Path(src_path) + if isinstance(dest_path, str): + dest_path = pathlib.Path(dest_path) + is_dir = dest_path.is_dir() + ret = self.try_move_metadata( + src_root, dest_root, str(src_path), str(dest_path), is_dir + ) + if not isinstance(ret, bool): + return ret + elif ret is False: + # Need metadata scan + if is_dir: + return self._scan_directory_metadata(dest_path) + elif dest_path.is_file() and dest_path.suffix in VALID_GCODE_EXTS: + mevt = self.parse_gcode_metadata(str(dest_path)) + return mevt.wait() + return None + + def on_item_create( + self, root: str, item_path: StrOrPath, is_dir: bool = False + ) -> None: + pass + + def on_item_delete( + self, root: str, item_path: StrOrPath, is_dir: bool = False + ) -> None: + if self.has_fast_observe: + return + self.clear_metadata(root, str(item_path), is_dir) + + def close(self) -> None: + pass INOTIFY_BUNDLE_TIME = .25 @@ -803,19 +1465,20 @@ INOTIFY_MOVE_TIME = 1. class InotifyNode: def __init__(self, - ihdlr: INotifyHandler, + iobsvr: InotifyObserver, parent: InotifyNode, name: str ) -> None: - self.ihdlr = ihdlr - self.event_loop = ihdlr.event_loop + self.iobsvr = iobsvr + self.event_loop = iobsvr.event_loop self.name = name self.parent_node = parent self.child_nodes: Dict[str, InotifyNode] = {} - self.watch_desc = self.ihdlr.add_watch(self) + self.watch_desc = self.iobsvr.add_watch(self) self.pending_node_events: Dict[str, asyncio.Handle] = {} self.pending_deleted_children: Set[Tuple[str, bool]] = set() self.pending_file_events: Dict[str, str] = {} + self.queued_move_notificatons: List[List[str]] = [] self.is_processing_metadata = False async def _finish_create_node(self) -> None: @@ -835,9 +1498,12 @@ class InotifyNode: mfuts = [e.wait() for e in mevts] await asyncio.gather(*mfuts) self.is_processing_metadata = False - self.ihdlr.log_nodes() - self.ihdlr.notify_filelist_changed( + self.iobsvr.log_nodes() + self.iobsvr.notify_filelist_changed( "create_dir", root, node_path) + for args in self.queued_move_notificatons: + self.iobsvr.notify_filelist_changed(*args) + self.queued_move_notificatons.clear() def _finish_delete_child(self) -> None: # Items deleted in a child (node or file) are batched. @@ -853,8 +1519,8 @@ class InotifyNode: for (name, is_node) in self.pending_deleted_children: item_path = os.path.join(node_path, name) item_type = "dir" if is_node else "file" - self.ihdlr.clear_metadata(root, item_path, is_node) - self.ihdlr.notify_filelist_changed( + self.iobsvr.clear_metadata(root, item_path, is_node) + self.iobsvr.notify_filelist_changed( f"delete_{item_type}", root, item_path) self.pending_deleted_children.clear() @@ -870,18 +1536,23 @@ class InotifyNode: for fname in os.listdir(dir_path): item_path = os.path.join(dir_path, fname) if os.path.isdir(item_path): + fm = self.iobsvr.file_manager + if fm.check_reserved_path(item_path, True, False): + continue new_child = self.create_child_node(fname, False) - metadata_events.extend(new_child.scan_node(visited_dirs)) + if new_child is not None: + metadata_events.extend(new_child.scan_node(visited_dirs)) elif os.path.isfile(item_path) and self.get_root() == "gcodes": - mevt = self.ihdlr.parse_gcode_metadata(item_path) + mevt = self.iobsvr.parse_gcode_metadata(item_path) metadata_events.append(mevt) return metadata_events - async def move_child_node(self, - child_name: str, - new_name: str, - new_parent: InotifyNode - ) -> None: + def move_child_node( + self, + child_name: str, + new_name: str, + new_parent: InotifyNode + ) -> None: self.flush_delete() child_node = self.pop_child_node(child_name) if child_node is None: @@ -895,17 +1566,25 @@ class InotifyNode: new_root = child_node.get_root() logging.debug(f"Moving node from '{prev_path}' to '{new_path}'") # Attempt to move metadata - move_success = await self.ihdlr.try_move_metadata( - prev_root, new_root, prev_path, new_path, is_dir=True) - if not move_success: - # Need rescan - mevts = child_node.scan_node() - if mevts: - mfuts = [e.wait() for e in mevts] - await asyncio.gather(*mfuts) - self.ihdlr.notify_filelist_changed( - "move_dir", new_root, new_path, - prev_root, prev_path) + move_res = self.iobsvr.try_move_metadata( + prev_root, new_root, prev_path, new_path, is_dir=True + ) + if new_root == "gcodes": + async def _notify_move_dir(): + if move_res is False: + # Need rescan + mevts = child_node.scan_node() + if mevts: + mfuts = [e.wait() for e in mevts] + await asyncio.gather(*mfuts) + self.iobsvr.notify_filelist_changed( + "move_dir", new_root, new_path, prev_root, prev_path + ) + self.iobsvr.queue_gcode_notification(_notify_move_dir()) + else: + self.iobsvr.notify_filelist_changed( + "move_dir", new_root, new_path, prev_root, prev_path + ) def schedule_file_event(self, file_name: str, evt_name: str) -> None: if file_name in self.pending_file_events: @@ -915,7 +1594,7 @@ class InotifyNode: pending_node.stop_event("create_node") self.pending_file_events[file_name] = evt_name - async def complete_file_write(self, file_name: str) -> None: + def complete_file_write(self, file_name: str) -> None: self.flush_delete() evt_name = self.pending_file_events.pop(file_name, None) if evt_name is None: @@ -934,12 +1613,15 @@ class InotifyNode: file_path = os.path.join(self.get_path(), file_name) root = self.get_root() if root == "gcodes": - mevt = self.ihdlr.parse_gcode_metadata(file_path) - if os.path.splitext(file_path)[1].lower() == ".ufp": - # don't notify .ufp files - return - await mevt.wait() - self.ihdlr.notify_filelist_changed(evt_name, root, file_path) + if self.iobsvr.need_create_notify(file_path): + async def _notify_file_write(): + mevt = self.iobsvr.parse_gcode_metadata(file_path) + await mevt.wait() + self.iobsvr.notify_filelist_changed(evt_name, root, file_path) + self.iobsvr.clear_processing_file(file_path) + self.iobsvr.queue_gcode_notification(_notify_file_write()) + else: + self.iobsvr.notify_filelist_changed(evt_name, root, file_path) def add_child_node(self, node: InotifyNode) -> None: self.child_nodes[node.name] = node @@ -954,11 +1636,16 @@ class InotifyNode: def create_child_node(self, name: str, notify: bool = True - ) -> InotifyNode: + ) -> Optional[InotifyNode]: self.flush_delete() if name in self.child_nodes: return self.child_nodes[name] - new_child = InotifyNode(self.ihdlr, self, name) + try: + new_child = InotifyNode(self.iobsvr, self, name) + except Exception: + # This node is already watched under another root, + # bypass creation + return None self.child_nodes[name] = new_child if notify: pending_node = self.search_pending_event("create_node") @@ -974,7 +1661,7 @@ class InotifyNode: child_node = self.child_nodes.pop(child_name, None) if child_node is None: return - self.ihdlr.remove_watch( + self.iobsvr.remove_watch( child_node.watch_desc, need_low_level_rm=False) child_node.remove_event("delete_child") self.pending_deleted_children.add((child_name, is_node)) @@ -984,7 +1671,7 @@ class InotifyNode: for cnode in self.child_nodes.values(): # Delete all of the children's children cnode.clear_watches() - self.ihdlr.remove_watch(self.watch_desc) + self.iobsvr.remove_watch(self.watch_desc) def get_path(self) -> str: return os.path.join(self.parent_node.get_path(), self.name) @@ -997,6 +1684,9 @@ class InotifyNode: return True return self.parent_node.is_processing() + def has_child_node(self, child_name: str): + return child_name in self.child_nodes + def add_event(self, evt_name: str, timeout: float) -> None: if evt_name in self.pending_node_events: self.reset_event(evt_name, timeout) @@ -1045,14 +1735,37 @@ class InotifyNode: return self return self.parent_node.search_pending_event(name) + def find_pending_node(self) -> Optional[InotifyNode]: + if ( + self.is_processing_metadata or + "create_node" in self.pending_node_events + ): + return self + return self.parent_node.find_pending_node() + + def queue_move_notification(self, args: List[str]) -> None: + if ( + self.is_processing_metadata or + "create_node" in self.pending_node_events + ): + self.queued_move_notificatons.append(args) + else: + if self.iobsvr.server.is_verbose_enabled(): + path = self.get_path() + logging.debug( + f"Node {path} received a move notification queue request, " + f"however node is not pending: {args}" + ) + self.iobsvr.notify_filelist_changed(*args) + class InotifyRootNode(InotifyNode): def __init__(self, - ihdlr: INotifyHandler, + iobsvr: InotifyObserver, root_name: str, root_path: str ) -> None: self.root_name = root_name - super().__init__(ihdlr, self, root_path) + super().__init__(iobsvr, self, root_path) def get_path(self) -> str: return self.name @@ -1068,90 +1781,65 @@ class InotifyRootNode(InotifyNode): def is_processing(self) -> bool: return self.is_processing_metadata -class NotifySyncLock: - def __init__(self, dest_path: str) -> None: - self.wait_fut: Optional[asyncio.Future] = None - self.sync_event = asyncio.Event() - self.dest_path = dest_path - self.notified_paths: Set[str] = set() - self.finished: bool = False - - def update_dest(self, dest_path: str) -> None: - self.dest_path = dest_path - - def check_need_sync(self, path: str) -> bool: - return self.dest_path in [path, os.path.dirname(path)] \ - and not self.finished - - async def wait(self, timeout: Optional[float] = None) -> None: - if self.finished or self.wait_fut is not None: - # Can only wait once - return - if self.dest_path not in self.notified_paths: - self.wait_fut = asyncio.Future() - if timeout is None: - await self.wait_fut - else: - try: - await asyncio.wait_for(self.wait_fut, timeout) - except asyncio.TimeoutError: - pass - self.sync_event.set() - self.finished = True - - async def sync(self, path, timeout: Optional[float] = None) -> None: - if not self.check_need_sync(path): - return - self.notified_paths.add(path) + def find_pending_node(self) -> Optional[InotifyNode]: if ( - self.wait_fut is not None and - not self.wait_fut.done() and - self.dest_path == path + self.is_processing_metadata or + "create_node" in self.pending_node_events ): - self.wait_fut.set_result(None) - # Transfer control to waiter - try: - await asyncio.wait_for(self.sync_event.wait(), timeout) - except Exception: - pass - else: - # Sleep an additional 5ms to give HTTP requests a chance to - # return prior to a notification - await asyncio.sleep(.005) + return self + return None - def cancel(self) -> None: - if self.finished: - return - if self.wait_fut is not None and not self.wait_fut.done(): - self.wait_fut.set_result(None) - self.sync_event.set() - self.finished = True - -class INotifyHandler: - def __init__(self, - config: ConfigHelper, - file_manager: FileManager, - gcode_metadata: MetadataStorage - ) -> None: - self.server = config.get_server() - self.event_loop = self.server.get_event_loop() - self.debug_enabled = config['server'].getboolean( - 'enable_debug_logging', False) - self.file_manager = file_manager - self.gcode_metadata = gcode_metadata +class InotifyObserver(BaseFileSystemObserver): + def __init__( + self, + config: ConfigHelper, + file_manager: FileManager, + gcode_metadata: MetadataStorage, + sync_lock: NotifySyncLock + ) -> None: + super().__init__(config, file_manager, gcode_metadata, sync_lock) + self.enable_warn = config.getboolean( + "enable_inotify_warnings", self.enable_warn, deprecate=True + ) self.inotify = INotify(nonblocking=True) self.event_loop.add_reader( self.inotify.fileno(), self._handle_inotify_read) - - self.node_loop_busy: bool = False - self.pending_inotify_events: List[InotifyEvent] = [] - self.watched_roots: Dict[str, InotifyRootNode] = {} self.watched_nodes: Dict[int, InotifyNode] = {} self.pending_moves: Dict[ int, Tuple[InotifyNode, str, asyncio.Handle]] = {} - self.create_gcode_notifications: Dict[str, Any] = {} self.initialized: bool = False + self.processing_gcode_files: Set[str] = set() + self.pending_coroutines: List[Coroutine] = [] + self._gc_notify_task: Optional[asyncio.Task] = None + + @property + def has_fast_observe(self) -> bool: + return True + + # Override and pass the callbacks from the request handlers. Inotify + # detects events quickly and takes any required actions + def on_item_create( + self, root: str, item_path: StrOrPath, is_dir: bool = False + ) -> None: + pass + + def on_item_delete( + self, root: str, item_path: StrOrPath, is_dir: bool = False + ) -> None: + pass + + def on_item_move( + self, + src_root: str, + dest_root: str, + src_path: StrOrPath, + dest_path: StrOrPath + ) -> Optional[Awaitable]: + return None + + def on_item_copy(self, root: str, item_path: StrOrPath) -> Optional[Awaitable]: + return None def add_root_watch(self, root: str, root_path: str) -> None: # remove all exisiting watches on root @@ -1159,17 +1847,44 @@ class INotifyHandler: old_root = self.watched_roots.pop(root) old_root.clear_watches() old_root.clear_events() - root_node = InotifyRootNode(self, root, root_path) + try: + root_node = InotifyRootNode(self, root, root_path) + except Exception: + logging.exception(f"Inotify: failed to create root node '{root}'") + self.server.add_warning( + f"file_manager: Failed to create inotify root node {root}. " + "See moonraker.log for details.", + log=False + ) + return self.watched_roots[root] = root_node if self.initialized: - mevts = root_node.scan_node() + try: + mevts = root_node.scan_node() + except Exception: + logging.exception(f"Inotify: failed to scan root '{root}'") + self.server.add_warning( + f"file_manager: Failed to scan inotify root node '{root}'. " + "See moonraker.log for details.", + log=False + ) + return self.log_nodes() self.event_loop.register_callback( self._notify_root_updated, mevts, root, root_path) - def initalize_roots(self): + def initialize(self) -> None: for root, node in self.watched_roots.items(): - evts = node.scan_node() + try: + evts = node.scan_node() + except Exception: + logging.exception(f"Inotify: failed to scan root '{root}'") + self.server.add_warning( + f"file_manager: Failed to scan inotify root node '{root}'. " + "See moonraker.log for details.", + log=False + ) + continue if not evts: continue root_path = node.get_path() @@ -1195,9 +1910,31 @@ class INotifyHandler: dir_path = node.get_path() try: watch: int = self.inotify.add_watch(dir_path, WATCH_FLAGS) - except OSError: - logging.exception( - f"Error adding watch, already exists: {dir_path}") + except Exception: + msg = ( + f"Error adding inotify watch to root '{node.get_root()}', " + f"path: {dir_path}" + ) + logging.exception(msg) + if self.enable_warn: + msg = f"file_manager: {msg}" + self.server.add_warning(msg, log=False) + raise + if watch in self.watched_nodes: + root = node.get_root() + cur_node = self.watched_nodes[watch] + existing_root = cur_node.get_root() + msg = ( + f"Inotify watch already exists for path '{dir_path}' in " + f"root '{existing_root}', cannot add watch to requested root " + f"'{root}'. This indicates that the roots overlap." + ) + if self.enable_warn: + msg = f"file_manager: {msg}" + self.server.add_warning(msg) + else: + logging.info(msg) + raise self.server.error("Watch already exists") self.watched_nodes[watch] = node return watch @@ -1212,50 +1949,9 @@ class INotifyHandler: except Exception: logging.exception(f"Error removing watch: '{node.get_path()}'") - def clear_metadata(self, - root: str, - path: str, - is_dir: bool = False - ) -> None: - if root == "gcodes": - rel_path = self.file_manager.get_relative_path(root, path) - if is_dir: - self.gcode_metadata.remove_directory_metadata(rel_path) - else: - self.gcode_metadata.remove_file_metadata(rel_path) - - async def try_move_metadata(self, - prev_root: str, - new_root: str, - prev_path: str, - new_path: str, - is_dir: bool = False - ) -> bool: - if new_root == "gcodes": - if prev_root == "gcodes": - # moved within the gcodes root, move metadata - prev_rel_path = self.file_manager.get_relative_path( - "gcodes", prev_path) - new_rel_path = self.file_manager.get_relative_path( - "gcodes", new_path) - if is_dir: - await self.gcode_metadata.move_directory_metadata( - prev_rel_path, new_rel_path) - else: - return await self.gcode_metadata.move_file_metadata( - prev_rel_path, new_rel_path) - else: - # move from a non-gcodes root to gcodes root needs a rescan - self.clear_metadata(prev_root, prev_path, is_dir) - return False - elif prev_root == "gcodes": - # moved out of the gcodes root, remove metadata - self.clear_metadata(prev_root, prev_path, is_dir) - return True - def log_nodes(self) -> None: - if self.debug_enabled: - debug_msg = f"Inotify Watches After Scan:" + if self.server.is_verbose_enabled(): + debug_msg = "Inotify Watches After Scan:" for wdesc, node in self.watched_nodes.items(): wdir = node.get_path() wroot = node.get_root() @@ -1263,29 +1959,10 @@ class INotifyHandler: f"Watch: {wdesc}" logging.debug(debug_msg) - def parse_gcode_metadata(self, file_path: str) -> asyncio.Event: - rel_path = self.file_manager.get_relative_path("gcodes", file_path) - ext = os.path.splitext(rel_path)[-1].lower() - try: - path_info = self.file_manager.get_path_info(file_path, "gcodes") - except Exception: - path_info = {} - if ( - ext not in VALID_GCODE_EXTS or - path_info.get('size', 0) == 0 - ): - evt = asyncio.Event() - evt.set() - return evt - if ext == ".ufp": - rel_path = os.path.splitext(rel_path)[0] + ".gcode" - path_info['ufp_path'] = file_path - return self.gcode_metadata.parse_metadata(rel_path, path_info) - def _handle_move_timeout(self, cookie: int, is_dir: bool): if cookie not in self.pending_moves: return - parent_node, name, hdl = self.pending_moves.pop(cookie) + parent_node, name, _ = self.pending_moves.pop(cookie) item_path = os.path.join(parent_node.get_path(), name) root = parent_node.get_root() self.clear_metadata(root, item_path, is_dir) @@ -1301,11 +1978,9 @@ class INotifyHandler: action = "delete_dir" self.notify_filelist_changed(action, root, item_path) - def _schedule_pending_move(self, - evt: InotifyEvent, - parent_node: InotifyNode, - is_dir: bool - ) -> None: + def _schedule_pending_move( + self, evt: InotifyEvent, parent_node: InotifyNode, is_dir: bool + ) -> None: hdl = self.event_loop.delay_callback( INOTIFY_MOVE_TIME, self._handle_move_timeout, evt.cookie, is_dir) @@ -1323,61 +1998,71 @@ class INotifyHandler: f"not currently tracked: name: {evt.name}, " f"flags: {flags}") continue - self.pending_inotify_events.append(evt) - if not self.node_loop_busy: - self.node_loop_busy = True - self.event_loop.register_callback(self._process_inotify_events) - - async def _process_inotify_events(self) -> None: - while self.pending_inotify_events: - evt = self.pending_inotify_events.pop(0) node = self.watched_nodes[evt.wd] if evt.mask & iFlags.ISDIR: - await self._process_dir_event(evt, node) + self._process_dir_event(evt, node) else: - await self._process_file_event(evt, node) - self.node_loop_busy = False + self._process_file_event(evt, node) - async def _process_dir_event(self, - evt: InotifyEvent, - node: InotifyNode - ) -> None: + def _process_dir_event(self, evt: InotifyEvent, node: InotifyNode) -> None: if evt.name in ['.', ".."]: # ignore events for self and parent return root = node.get_root() node_path = node.get_path() + full_path = os.path.join(node_path, evt.name) if evt.mask & iFlags.CREATE: - logging.debug(f"Inotify directory create: {root}, " - f"{node_path}, {evt.name}") - node.create_child_node(evt.name) + logging.debug(f"Inotify directory create: {root}, {node_path}, {evt.name}") + if self.file_manager.check_reserved_path(full_path, True, False): + logging.debug( + f"Inotify - ignoring create watch at reserved path: {full_path}" + ) + else: + self.sync_lock.add_pending_path("create_dir", full_path) + node.create_child_node(evt.name) elif evt.mask & iFlags.DELETE: - logging.debug(f"Inotify directory delete: {root}, " - f"{node_path}, {evt.name}") + logging.debug(f"Inotify directory delete: {root}, {node_path}, {evt.name}") node.schedule_child_delete(evt.name, True) elif evt.mask & iFlags.MOVED_FROM: - logging.debug(f"Inotify directory move from: {root}, " - f"{node_path}, {evt.name}") - self._schedule_pending_move(evt, node, True) + logging.debug( + f"Inotify directory move from: {root}, {node_path}, {evt.name}" + ) + if node.has_child_node(evt.name): + self._schedule_pending_move(evt, node, True) + else: + logging.debug( + f"Inotify - Child node with name {evt.name} does not exist" + ) elif evt.mask & iFlags.MOVED_TO: - logging.debug(f"Inotify directory move to: {root}, " - f"{node_path}, {evt.name}") + logging.debug(f"Inotify directory move to: {root}, {node_path}, {evt.name}") moved_evt = self.pending_moves.pop(evt.cookie, None) if moved_evt is not None: + self.sync_lock.add_pending_path("move_dir", full_path) # Moved from a currently watched directory prev_parent, child_name, hdl = moved_evt hdl.cancel() - await prev_parent.move_child_node(child_name, evt.name, node) + if self.file_manager.check_reserved_path(full_path, True, False): + # Previous node was renamed/moved to a reserved path. To API + # consumers this will appear as deleted + logging.debug( + f"Inotify - deleting prev folder {child_name} moved to " + f"reserved path: {full_path}" + ) + prev_parent.schedule_child_delete(child_name, True) + else: + prev_parent.move_child_node(child_name, evt.name, node) else: - # Moved from an unwatched directory, for our - # purposes this is the same as creating a - # directory - node.create_child_node(evt.name) + # Moved from an unwatched directory, for our purposes this is the same + # as creating a directory + if self.file_manager.check_reserved_path(full_path, True, False): + logging.debug( + f"Inotify - ignoring moved folder to reserved path: {full_path}" + ) + else: + self.sync_lock.add_pending_path("create_dir", full_path) + node.create_child_node(evt.name) - async def _process_file_event(self, - evt: InotifyEvent, - node: InotifyNode - ) -> None: + def _process_file_event(self, evt: InotifyEvent, node: InotifyNode) -> None: ext: str = os.path.splitext(evt.name)[-1].lower() root = node.get_root() node_path = node.get_path() @@ -1385,10 +2070,11 @@ class INotifyHandler: if evt.mask & iFlags.CREATE: logging.debug(f"Inotify file create: {root}, " f"{node_path}, {evt.name}") + self.sync_lock.add_pending_path("create_file", file_path) node.schedule_file_event(evt.name, "create_file") if os.path.islink(file_path): logging.debug(f"Inotify symlink create: {file_path}") - await node.complete_file_write(evt.name) + node.complete_file_write(evt.name) elif evt.mask & iFlags.DELETE: logging.debug(f"Inotify file delete: {root}, " f"{node_path}, {evt.name}") @@ -1405,40 +2091,119 @@ class INotifyHandler: f"{node_path}, {evt.name}") node.flush_delete() moved_evt = self.pending_moves.pop(evt.cookie, None) - # Don't emit file events if the node is processing metadata - can_notify = not node.is_processing() + pending_node = node.find_pending_node() if moved_evt is not None: # Moved from a currently watched directory + self.sync_lock.add_pending_path("move_file", file_path) prev_parent, prev_name, hdl = moved_evt hdl.cancel() prev_root = prev_parent.get_root() prev_path = os.path.join(prev_parent.get_path(), prev_name) - move_success = await self.try_move_metadata( - prev_root, root, prev_path, file_path) - if not move_success: - # Unable to move, metadata needs parsing - mevt = self.parse_gcode_metadata(file_path) - await mevt.wait() - if can_notify: - self.notify_filelist_changed( - "move_file", root, file_path, - prev_root, prev_path) - else: + move_res = self.try_move_metadata(prev_root, root, prev_path, file_path) if root == "gcodes": - mevt = self.parse_gcode_metadata(file_path) - await mevt.wait() - if can_notify: - self.notify_filelist_changed( - "create_file", root, file_path) - if not can_notify: - logging.debug("Metadata is processing, suppressing move " - f"notification: {file_path}") + coro = self._finish_gcode_move( + root, prev_root, file_path, prev_path, pending_node, move_res + ) + self.queue_gcode_notification(coro) + else: + args = ["move_file", root, file_path, prev_root, prev_path] + if pending_node is None: + self.notify_filelist_changed(*args) + else: + pending_node.queue_move_notification(args) + else: + if pending_node is not None: + logging.debug( + "Parent node is processing, suppressing 'create from move' " + f"notification: {file_path}" + ) + pending_node.reset_event("create_node", INOTIFY_BUNDLE_TIME) + if root == "gcodes": + self.parse_gcode_metadata(file_path) + return + self.sync_lock.add_pending_path("create_file", file_path) + if root == "gcodes": + if self.need_create_notify(file_path): + coro = self._finish_gcode_create_from_move(file_path) + self.queue_gcode_notification(coro) + else: + self.notify_filelist_changed("create_file", root, file_path) elif evt.mask & iFlags.MODIFY: + self.sync_lock.add_pending_path("modify_file", file_path) node.schedule_file_event(evt.name, "modify_file") elif evt.mask & iFlags.CLOSE_WRITE: logging.debug(f"Inotify writable file closed: {file_path}") # Only process files that have been created or modified - await node.complete_file_write(evt.name) + node.complete_file_write(evt.name) + + async def _finish_gcode_move( + self, + root: str, + prev_root: str, + file_path: str, + prev_path: str, + pending_node: Optional[InotifyNode], + move_result: Union[bool, Awaitable] + ) -> None: + if not isinstance(move_result, bool): + await move_result + elif not move_result: + # Unable to move, metadata needs parsing + mevt = self.parse_gcode_metadata(file_path) + await mevt.wait() + args = ["move_file", root, file_path, prev_root, prev_path] + if pending_node is None: + self.notify_filelist_changed(*args) + else: + pending_node.queue_move_notification(args) + + async def _finish_gcode_create_from_move(self, file_path: str) -> None: + mevt = self.parse_gcode_metadata(file_path) + await mevt.wait() + self.notify_filelist_changed("create_file", "gcodes", file_path) + self.clear_processing_file(file_path) + + def need_create_notify(self, file_path: str) -> bool: + # We don't want to emit duplicate notifications, which may occur + # during metadata processing if the file needs to undergo object + # processing. + ext = os.path.splitext(file_path)[1].lower() + if ext == ".ufp": + # Queue the ufp file for parsing and return False, we do not + # want to notify the ufp since it will be removed. + self.parse_gcode_metadata(file_path) + return False + elif ext not in VALID_GCODE_EXTS: + return True + rel_path = self.file_manager.get_relative_path("gcodes", file_path) + if ( + self.gcode_metadata.is_file_processing(rel_path) and + rel_path in self.processing_gcode_files + ): + logging.debug( + f"Inotify file create event received for file '{rel_path}' during " + f"metadata processing. Suppressing notification." + ) + return False + self.processing_gcode_files.add(rel_path) + return True + + def clear_processing_file(self, file_path: str) -> None: + rel_path = self.file_manager.get_relative_path("gcodes", file_path) + self.processing_gcode_files.discard(rel_path) + + def queue_gcode_notification(self, coro: Coroutine) -> None: + self.pending_coroutines.append(coro) + if self._gc_notify_task is None: + self._gc_notify_task = self.event_loop.create_task( + self._process_gcode_notifications() + ) + + async def _process_gcode_notifications(self) -> None: + while self.pending_coroutines: + coro = self.pending_coroutines.pop(0) + await coro + self._gc_notify_task = None def notify_filelist_changed(self, action: str, @@ -1448,31 +2213,23 @@ class INotifyHandler: source_path: Optional[str] = None ) -> None: rel_path = self.file_manager.get_relative_path(root, full_path) - file_info: Dict[str, Any] = {'size': 0, 'modified': 0} - is_valid = True + sync_fut = self.sync_lock.check_in_request(action, full_path) + file_info: Dict[str, Any] = {'size': 0, 'modified': 0, "permissions": ""} if os.path.exists(full_path): try: file_info = self.file_manager.get_path_info(full_path, root) except Exception: - is_valid = False + logging.debug( + f"Invalid Filelist Notification Request, root: {root}, " + f"path: {full_path} - Failed to get path info") + return elif action not in ["delete_file", "delete_dir"]: - is_valid = False - ext = os.path.splitext(rel_path)[-1].lower() - if ( - is_valid and - root == "gcodes" and - ext in VALID_GCODE_EXTS and - action == "create_file" - ): - prev_info = self.create_gcode_notifications.get(rel_path, {}) - if file_info == prev_info: - logging.debug("Ignoring duplicate 'create_file' " - f"notification: {rel_path}") - is_valid = False - else: - self.create_gcode_notifications[rel_path] = dict(file_info) - elif rel_path in self.create_gcode_notifications: - del self.create_gcode_notifications[rel_path] + logging.debug( + f"Invalid Filelist Notification Request, root: {root}, " + f"path: {full_path} - Action {action} received for file " + "that does not exit" + ) + return file_info['path'] = rel_path file_info['root'] = root result = {'action': action, 'item': file_info} @@ -1480,26 +2237,30 @@ class INotifyHandler: src_rel_path = self.file_manager.get_relative_path( source_root, source_path) result['source_item'] = {'path': src_rel_path, 'root': source_root} - sync_lock = self.file_manager.sync_inotify_event(full_path) - if sync_lock is not None: - # Delay this notification so that it occurs after an item - logging.debug(f"Syncing notification: {full_path}") - self.event_loop.register_callback( - self._sync_with_request, result, - sync_lock.sync(full_path), is_valid) - elif is_valid: - self.server.send_event("file_manager:filelist_changed", result) + key = f"{action}-{root}-{rel_path}" + self.event_loop.create_task( + self._finish_notify(result, sync_fut, key) + ) - async def _sync_with_request(self, - result: Dict[str, Any], - sync_fut: Coroutine, - is_valid: bool - ) -> None: - await sync_fut - if is_valid: - self.server.send_event("file_manager:filelist_changed", result) + async def _finish_notify( + self, + result: Dict[str, Any], + sync_fut: Optional[asyncio.Future], + notify_key: str + ) -> None: + if sync_fut is not None: + logging.debug(f"Syncing notification: {notify_key}") + await sync_fut + self.file_manager.cancel_notification(notify_key) + await asyncio.sleep(.005) + self.server.send_event("file_manager:filelist_changed", result) def close(self) -> None: + while self.pending_coroutines: + coro = self.pending_coroutines.pop(0) + coro.close() + if self._gc_notify_task is not None: + self._gc_notify_task.cancel() self.event_loop.remove_reader(self.inotify.fileno()) for watch in self.watched_nodes.keys(): try: @@ -1514,13 +2275,12 @@ METADATA_VERSION = 3 class MetadataStorage: def __init__(self, config: ConfigHelper, - gc_path: str, db: DBComp ) -> None: self.server = config.get_server() self.enable_object_proc = config.getboolean( 'enable_object_processing', False) - self.gc_path = gc_path + self.gc_path = "" db.register_local_namespace(METADATA_NAMESPACE) self.mddb = db.wrap_namespace( METADATA_NAMESPACE, parse_keys=False) @@ -1542,6 +2302,7 @@ class MetadataStorage: str, Tuple[Dict[str, Any], asyncio.Event]] = {} self.busy: bool = False + def prune_storage(self) -> None: # Check for removed gcode files while moonraker was shutdown if self.gc_path: del_keys: List[str] = [] @@ -1571,13 +2332,14 @@ class MetadataStorage: def update_gcode_path(self, path: str) -> None: if path == self.gc_path: return - self.metadata.clear() - self.mddb.clear() + if self.gc_path: + self.metadata.clear() + self.mddb.clear() self.gc_path = path def get(self, key: str, - default: _T = None + default: Optional[_T] = None ) -> Union[_T, Dict[str, Any]]: return deepcopy(self.metadata.get(key, default)) @@ -1586,6 +2348,12 @@ class MetadataStorage: self.metadata[key] = val self.mddb[key] = val + def is_processing(self) -> bool: + return len(self.pending_requests) > 0 + + def is_file_processing(self, fname: str) -> bool: + return fname in self.pending_requests + def _has_valid_data(self, fname: str, path_info: Dict[str, Any] @@ -1600,7 +2368,7 @@ class MetadataStorage: return False return True - def remove_directory_metadata(self, dir_name: str) -> None: + def remove_directory_metadata(self, dir_name: str) -> Optional[Awaitable]: if dir_name[-1] != "/": dir_name += "/" del_items: Dict[str, Any] = {} @@ -1614,15 +2382,16 @@ class MetadataStorage: self.mddb.delete_batch(list(del_items.keys())) eventloop = self.server.get_event_loop() # Remove thumbs in a nother thread - eventloop.run_in_thread(self._remove_thumbs, del_items) + return eventloop.run_in_thread(self._remove_thumbs, del_items) + return None - def remove_file_metadata(self, fname: str) -> None: + def remove_file_metadata(self, fname: str) -> Optional[Awaitable]: md: Optional[Dict[str, Any]] = self.metadata.pop(fname, None) if md is None: - return + return None self.mddb.pop(fname, None) eventloop = self.server.get_event_loop() - eventloop.run_in_thread(self._remove_thumbs, {fname: md}) + return eventloop.run_in_thread(self._remove_thumbs, {fname: md}) def _remove_thumbs(self, records: Dict[str, Dict[str, Any]]) -> None: for fname, metadata in records.items(): @@ -1642,10 +2411,7 @@ class MetadataStorage: except Exception: logging.debug(f"Error removing thumb at {thumb_path}") - async def move_directory_metadata(self, - prev_dir: str, - new_dir: str - ) -> None: + def move_directory_metadata(self, prev_dir: str, new_dir: str) -> None: if prev_dir[-1] != "/": prev_dir += "/" moved: List[Tuple[str, str, Dict[str, Any]]] = [] @@ -1662,14 +2428,12 @@ class MetadataStorage: source = [m[0] for m in moved] dest = [m[1] for m in moved] self.mddb.move_batch(source, dest) - eventloop = self.server.get_event_loop() - await eventloop.run_in_thread(self._move_thumbnails, moved) + # It shouldn't be necessary to move the thumbnails + # as they would be moved with the parent directory - async def move_file_metadata(self, - prev_fname: str, - new_fname: str, - move_thumbs: bool = True - ) -> bool: + def move_file_metadata( + self, prev_fname: str, new_fname: str + ) -> Union[bool, Awaitable]: metadata: Optional[Dict[str, Any]] metadata = self.metadata.pop(prev_fname, None) if metadata is None: @@ -1679,18 +2443,15 @@ class MetadataStorage: if self.metadata.pop(new_fname, None) is not None: self.mddb.pop(new_fname, None) return False + self.metadata[new_fname] = metadata self.mddb.move_batch([prev_fname], [new_fname]) - if move_thumbs: - eventloop = self.server.get_event_loop() - await eventloop.run_in_thread( - self._move_thumbnails, - [(prev_fname, new_fname, metadata)]) - return True + return self._move_thumbnails([(prev_fname, new_fname, metadata)]) - def _move_thumbnails(self, - records: List[Tuple[str, str, Dict[str, Any]]] - ) -> None: + async def _move_thumbnails( + self, records: List[Tuple[str, str, Dict[str, Any]]] + ) -> None: + eventloop = self.server.get_event_loop() for (prev_fname, new_fname, metadata) in records: prev_dir = os.path.dirname(os.path.join(self.gc_path, prev_fname)) new_dir = os.path.dirname(os.path.join(self.gc_path, new_fname)) @@ -1704,12 +2465,21 @@ class MetadataStorage: if not os.path.isfile(thumb_path): continue new_path = os.path.join(new_dir, path) + new_parent = os.path.dirname(new_path) try: - os.makedirs(os.path.dirname(new_path), exist_ok=True) - shutil.move(thumb_path, new_path) + if not os.path.exists(new_parent): + os.mkdir(new_parent) + # Wait for inotify to register the node before the move + await asyncio.sleep(.2) + await eventloop.run_in_thread( + shutil.move, thumb_path, new_path + ) + except asyncio.CancelledError: + raise except Exception: - logging.debug(f"Error moving thumb from {thumb_path}" - f" to {new_path}") + logging.exception( + f"Error moving thumb from {thumb_path} to {new_path}" + ) def parse_metadata(self, fname: str, @@ -1783,11 +2553,13 @@ class MetadataStorage: if self.enable_object_proc: timeout = 300. cmd += " --check-objects" - shell_cmd: SCMDComp = self.server.lookup_component('shell_command') - scmd = shell_cmd.build_shell_command(cmd, log_stderr=True) - result = await scmd.run_with_response(timeout=timeout) + result = bytearray() + sc: SCMDComp = self.server.lookup_component('shell_command') + scmd = sc.build_shell_command(cmd, callback=result.extend, log_stderr=True) + if not await scmd.run(timeout=timeout): + raise self.server.error("Extract Metadata returned with error") try: - decoded_resp: Dict[str, Any] = json.loads(result.strip()) + decoded_resp: Dict[str, Any] = jsonw.loads(result.strip()) except Exception: logging.debug(f"Invalid metadata response:\n{result}") raise diff --git a/moonraker/components/file_manager/metadata.py b/moonraker/components/file_manager/metadata.py index 63c909b..479afb8 100644 --- a/moonraker/components/file_manager/metadata.py +++ b/moonraker/components/file_manager/metadata.py @@ -17,6 +17,7 @@ import tempfile import zipfile import shutil import uuid +import logging from PIL import Image # Annotation imports @@ -35,40 +36,39 @@ if TYPE_CHECKING: UFP_MODEL_PATH = "/3D/model.gcode" UFP_THUMB_PATH = "/Metadata/thumbnail.png" -def log_to_stderr(msg: str) -> None: - sys.stderr.write(f"{msg}\n") - sys.stderr.flush() +logging.basicConfig(stream=sys.stderr, level=logging.INFO) +logger = logging.getLogger("metadata") -# regex helpers -def _regex_find_floats(pattern: str, - data: str, - strict: bool = False - ) -> List[float]: - # If strict is enabled, pattern requires a floating point - # value, otherwise it can be an integer value - fptrn = r'\d+\.\d*' if strict else r'\d+\.?\d*' +# Regex helpers. These methods take patterns with placeholders +# to insert the correct regex capture group for floats, ints, +# and strings: +# Float: (%F) = (\d*\.?\d+) +# Integer: (%D) = (\d+) +# String: (%S) = (.+) +def regex_find_floats(pattern: str, data: str) -> List[float]: + pattern = pattern.replace(r"(%F)", r"([0-9]*\.?[0-9]+)") matches = re.findall(pattern, data) if matches: # return the maximum height value found try: - return [float(h) for h in re.findall( - fptrn, " ".join(matches))] + return [float(h) for h in matches] except Exception: pass return [] -def _regex_find_ints(pattern: str, data: str) -> List[int]: +def regex_find_ints(pattern: str, data: str) -> List[int]: + pattern = pattern.replace(r"(%D)", r"([0-9]+)") matches = re.findall(pattern, data) if matches: # return the maximum height value found try: - return [int(h) for h in re.findall( - r'\d+', " ".join(matches))] + return [int(h) for h in matches] except Exception: pass return [] -def _regex_find_first(pattern: str, data: str) -> Optional[float]: +def regex_find_float(pattern: str, data: str) -> Optional[float]: + pattern = pattern.replace(r"(%F)", r"([0-9]*\.?[0-9]+)") match = re.search(pattern, data) val: Optional[float] = None if match: @@ -78,7 +78,8 @@ def _regex_find_first(pattern: str, data: str) -> Optional[float]: return None return val -def _regex_find_int(pattern: str, data: str) -> Optional[int]: +def regex_find_int(pattern: str, data: str) -> Optional[int]: + pattern = pattern.replace(r"(%D)", r"([0-9]+)") match = re.search(pattern, data) val: Optional[int] = None if match: @@ -88,12 +89,22 @@ def _regex_find_int(pattern: str, data: str) -> Optional[int]: return None return val -def _regex_find_string(pattern: str, data: str) -> Optional[str]: +def regex_find_string(pattern: str, data: str) -> Optional[str]: + pattern = pattern.replace(r"(%S)", r"(.*)") match = re.search(pattern, data) if match: return match.group(1).strip('"') return None +def regex_find_min_float(pattern: str, data: str) -> Optional[float]: + result = regex_find_floats(pattern, data) + return min(result) if result else None + +def regex_find_max_float(pattern: str, data: str) -> Optional[float]: + result = regex_find_floats(pattern, data) + return max(result) if result else None + + # Slicer parsing implementations class BaseSlicer(object): def __init__(self, file_path: str) -> None: @@ -111,28 +122,6 @@ class BaseSlicer(object): self.footer_data = footer_data self.size: int = fsize - def _parse_min_float(self, - pattern: str, - data: str, - strict: bool = False - ) -> Optional[float]: - result = _regex_find_floats(pattern, data, strict) - if result: - return min(result) - else: - return None - - def _parse_max_float(self, - pattern: str, - data: str, - strict: bool = False - ) -> Optional[float]: - result = _regex_find_floats(pattern, data, strict) - if result: - return max(result) - else: - return None - def _check_has_objects(self, data: str, pattern: Optional[str] = None @@ -144,12 +133,12 @@ class BaseSlicer(object): if match is not None: # Objects already processed fname = os.path.basename(self.path) - log_to_stderr( + logger.info( f"File '{fname}' currently supports cancellation, " "processing aborted" ) if match.group(1).startswith("DEFINE_OBJECT"): - log_to_stderr( + logger.info( "Legacy object processing detected. This is not " "compatible with official versions of Klipper." ) @@ -229,61 +218,73 @@ class BaseSlicer(object): try: os.mkdir(thumb_dir) except Exception: - log_to_stderr(f"Unable to create thumb dir: {thumb_dir}") + logger.info(f"Unable to create thumb dir: {thumb_dir}") return None thumb_base = os.path.splitext(os.path.basename(self.path))[0] parsed_matches: List[Dict[str, Any]] = [] - has_miniature: bool = False + #has_miniature: bool = False for match in thumb_matches: lines = re.split(r"\r?\n", match.replace('; ', '')) - info = _regex_find_ints(r".*", lines[0]) + info = regex_find_ints(r"(%D)", lines[0]) data = "".join(lines[1:-1]) if len(info) != 3: - log_to_stderr( + logger.info( f"MetadataError: Error parsing thumbnail" f" header: {lines[0]}") continue if len(data) != info[2]: - log_to_stderr( + logger.info( f"MetadataError: Thumbnail Size Mismatch: " f"detected {info[2]}, actual {len(data)}") continue thumb_name = f"{thumb_base}-{info[0]}x{info[1]}.png" thumb_path = os.path.join(thumb_dir, thumb_name) + thumb_jpg_name = f"{thumb_base}-{info[0]}x{info[1]}.jpg" + thumb_jpg_path = os.path.join(thumb_dir, thumb_jpg_name) rel_thumb_path = os.path.join(".thumbs", thumb_name) with open(thumb_path, "wb") as f: f.write(base64.b64decode(data.encode())) + with Image.open(thumb_path) as img: + if img.mode != "RGBA": + img = img.convert("RGBA") + new_img = Image.new("RGB", size=(info[0], info[1]), color=(255, 255, 255)) + img = img.resize((info[0], info[1])) + new_img.paste(img, (0, 0), mask=img) + new_img.save(thumb_jpg_path, "JPEG", quality=90) parsed_matches.append({ 'width': info[0], 'height': info[1], 'size': os.path.getsize(thumb_path), 'relative_path': rel_thumb_path}) - if info[0] == 32 and info[1] == 32: - has_miniature = True - if len(parsed_matches) > 0 and not has_miniature: - # find the largest thumb index - largest_match = parsed_matches[0] - for item in parsed_matches: - if item['size'] > largest_match['size']: - largest_match = item - # Create miniature thumbnail if one does not exist - thumb_full_name = largest_match['relative_path'].split("/")[-1] - thumb_path = os.path.join(thumb_dir, f"{thumb_full_name}") - rel_path_small = os.path.join(".thumbs", f"{thumb_base}-32x32.png") - thumb_path_small = os.path.join( - thumb_dir, f"{thumb_base}-32x32.png") - # read file - try: - with Image.open(thumb_path) as im: - # Create 32x32 thumbnail - im.thumbnail((32, 32)) - im.save(thumb_path_small, format="PNG") - parsed_matches.insert(0, { - 'width': im.width, 'height': im.height, - 'size': os.path.getsize(thumb_path_small), - 'relative_path': rel_path_small - }) - except Exception as e: - log_to_stderr(str(e)) + # find the smallest thumb index + smallest_match = parsed_matches[0] + max_size = min_size = smallest_match['size'] + for item in parsed_matches: + if item['size'] < smallest_match['size']: + smallest_match = item + if item["size"] < min_size: + min_size = item["size"] + if item["size"] > max_size: + max_size = item["size"] + # Create thumbnail for screen + thumb_full_name = smallest_match['relative_path'].split("/")[-1] + thumb_path = os.path.join(thumb_dir, f"{thumb_full_name}") + thumb_QD_full_name = f"{thumb_base}-{smallest_match['width']}x{smallest_match['height']}_QD.jpg" + thumb_QD_path = os.path.join(thumb_dir, f"{thumb_QD_full_name}") + rel_path_QD = os.path.join(".thumbs", thumb_QD_full_name) + try: + with Image.open(thumb_path) as img: + if img.mode != "RGBA": + img = img.convert("RGBA") + new_img = Image.new("RGB", size=(smallest_match['width'], smallest_match['height']), color=(255, 255, 255)) + img = img.resize((smallest_match['width'], smallest_match['height'])) + new_img.paste(img, (0, 0), mask=img) + new_img.save(thumb_QD_path, "JPEG", quality=90) + except Exception as e: + logger.info(str(e)) + parsed_matches.append({ + 'width': smallest_match['width'], 'height': smallest_match['height'], + 'size': (max_size + min_size) // 2, + 'relative_path': rel_path_QD}) return parsed_matches def parse_layer_count(self) -> Optional[int]: @@ -297,22 +298,19 @@ class UnknownSlicer(BaseSlicer): return {'slicer': "Unknown"} def parse_first_layer_height(self) -> Optional[float]: - return self._parse_min_float(r"G1\sZ\d+\.\d*", self.header_data) + return regex_find_min_float(r"G1\sZ(%F)\s", self.header_data) def parse_object_height(self) -> Optional[float]: - return self._parse_max_float(r"G1\sZ\d+\.\d*", self.footer_data) + return regex_find_max_float(r"G1\sZ(%F)\s", self.footer_data) def parse_first_layer_extr_temp(self) -> Optional[float]: - return _regex_find_first( - r"M109 S(\d+\.?\d*)", self.header_data) + return regex_find_float(r"M109 S(%F)", self.header_data) def parse_first_layer_bed_temp(self) -> Optional[float]: - return _regex_find_first( - r"M190 S(\d+\.?\d*)", self.header_data) + return regex_find_float(r"M190 S(%F)", self.header_data) def parse_chamber_temp(self) -> Optional[float]: - return _regex_find_first( - r"M191 S(\d+\.?\d*)", self.header_data) + return regex_find_float(r"M191 S(%F)", self.header_data) def parse_thumbnails(self) -> Optional[List[Dict[str, Any]]]: return None @@ -320,10 +318,12 @@ class UnknownSlicer(BaseSlicer): class PrusaSlicer(BaseSlicer): def check_identity(self, data: str) -> Optional[Dict[str, str]]: aliases = { + 'QIDIStudio': r"QIDIStudio\s(.*)", 'QIDISlicer': r"QIDISlicer\s(.*)\son", 'PrusaSlicer': r"PrusaSlicer\s(.*)\son", 'SuperSlicer': r"SuperSlicer\s(.*)\son", 'OrcaSlicer': r"OrcaSlicer\s(.*)\son", + 'MomentSlicer': r"MomentSlicer\s(.*)\son", 'SliCR-3D': r"SliCR-3D\s(.*)\son", 'BambuStudio': r"BambuStudio[^ ]*\s(.*)\n", 'A3dp-Slicer': r"A3dp-Slicer\s(.*)\son", @@ -343,20 +343,19 @@ class PrusaSlicer(BaseSlicer): def parse_first_layer_height(self) -> Optional[float]: # Check percentage - pct = _regex_find_first( - r"; first_layer_height = (\d+)%", self.footer_data) + pct = regex_find_float(r"; first_layer_height = (%F)%", self.footer_data) if pct is not None: if self.layer_height is None: # Failed to parse the original layer height, so it is not # possible to calculate a percentage return None return round(pct / 100. * self.layer_height, 6) - return _regex_find_first( - r"; first_layer_height = (\d+\.?\d*)", self.footer_data) + return regex_find_float(r"; first_layer_height = (%F)", self.footer_data) def parse_layer_height(self) -> Optional[float]: - self.layer_height = _regex_find_first( - r"; layer_height = (\d+\.?\d*)", self.footer_data) + self.layer_height = regex_find_float( + r"; layer_height = (%F)", self.footer_data + ) return self.layer_height def parse_object_height(self) -> Optional[float]: @@ -369,23 +368,31 @@ class PrusaSlicer(BaseSlicer): pass else: return max(matches) - return self._parse_max_float(r"G1\sZ\d+\.\d*\sF", self.footer_data) + return regex_find_max_float(r"G1\sZ(%F)\sF", self.footer_data) def parse_filament_total(self) -> Optional[float]: - return _regex_find_first( - r"filament\sused\s\[mm\]\s=\s(\d+\.\d*)", self.footer_data) + line = regex_find_string(r'filament\sused\s\[mm\]\s=\s(%S)\n', self.footer_data) + if line: + filament = regex_find_floats( + r"(%F)", line + ) + if filament: + return sum(filament) + return None def parse_filament_weight_total(self) -> Optional[float]: - return _regex_find_first( - r"total\sfilament\sused\s\[g\]\s=\s(\d+\.\d*)", self.footer_data) + return regex_find_float( + r"total\sfilament\sused\s\[g\]\s=\s(%F)", + self.footer_data + ) def parse_filament_type(self) -> Optional[str]: - return _regex_find_string( - r";\sfilament_type\s=\s(.*)", self.footer_data) + return regex_find_string(r";\sfilament_type\s=\s(%S)", self.footer_data) - def parse_filament_name(self) -> Optional[str]: - return _regex_find_string( - r";\sfilament_settings_id\s=\s(.*)", self.footer_data) + def parse_filament_name(self) -> Optional[str]: + return regex_find_string( + r";\sfilament_settings_id\s=\s(%S)", self.footer_data + ) def parse_estimated_time(self) -> Optional[float]: time_match = re.search( @@ -406,33 +413,36 @@ class PrusaSlicer(BaseSlicer): return round(total_time, 2) def parse_first_layer_extr_temp(self) -> Optional[float]: - return _regex_find_first( - r"; first_layer_temperature = (\d+\.?\d*)", self.footer_data) + return regex_find_float( + r"; first_layer_temperature = (%F)", self.footer_data + ) def parse_first_layer_bed_temp(self) -> Optional[float]: - return _regex_find_first( - r"; first_layer_bed_temperature = (\d+\.?\d*)", self.footer_data) + return regex_find_float( + r"; first_layer_bed_temperature = (%F)", self.footer_data + ) def parse_chamber_temp(self) -> Optional[float]: - return _regex_find_first( - r"; chamber_temperature = (\d+\.?\d*)", self.footer_data) + return regex_find_float( + r"; chamber_temperature = (%F)", self.footer_data + ) def parse_nozzle_diameter(self) -> Optional[float]: - return _regex_find_first( - r";\snozzle_diameter\s=\s(\d+\.\d*)", self.footer_data) + return regex_find_float( + r";\snozzle_diameter\s=\s(%F)", self.footer_data + ) def parse_layer_count(self) -> Optional[int]: - return _regex_find_int( - r"; total layers count = (\d+)", self.footer_data) + return regex_find_int(r"; total layers count = (%D)", self.footer_data) def parse_gimage(self) -> Optional[str]: - return _regex_find_string( + return regex_find_string( r";gimage:(.*)", self.footer_data) def parse_simage(self) -> Optional[str]: - return _regex_find_string( + return regex_find_string( r";simage:(.*)", self.footer_data) - + class Slic3rPE(PrusaSlicer): def check_identity(self, data: str) -> Optional[Dict[str, str]]: match = re.search(r"Slic3r\sPrusa\sEdition\s(.*)\son", data) @@ -444,8 +454,7 @@ class Slic3rPE(PrusaSlicer): return None def parse_filament_total(self) -> Optional[float]: - return _regex_find_first( - r"filament\sused\s=\s(\d+\.\d+)mm", self.footer_data) + return regex_find_float(r"filament\sused\s=\s(%F)mm", self.footer_data) def parse_thumbnails(self) -> Optional[List[Dict[str, Any]]]: return None @@ -461,15 +470,15 @@ class Slic3r(Slic3rPE): return None def parse_filament_total(self) -> Optional[float]: - filament = _regex_find_first( - r";\sfilament\_length\_m\s=\s(\d+\.\d*)", self.footer_data) + filament = regex_find_float( + r";\sfilament\_length\_m\s=\s(%F)", self.footer_data + ) if filament is not None: filament *= 1000 return filament def parse_filament_weight_total(self) -> Optional[float]: - return _regex_find_first( - r";\sfilament\smass\_g\s=\s(\d+\.\d*)", self.footer_data) + return regex_find_float(r";\sfilament\smass\_g\s=\s(%F)", self.footer_data) def parse_estimated_time(self) -> Optional[float]: return None @@ -485,61 +494,52 @@ class Cura(BaseSlicer): return None def has_objects(self) -> bool: - return self._check_has_objects( - self.header_data, r"\n;MESH:") + return self._check_has_objects(self.header_data, r"\n;MESH:") def parse_first_layer_height(self) -> Optional[float]: - return _regex_find_first(r";MINZ:(\d+\.?\d*)", self.header_data) + return regex_find_float(r";MINZ:(%F)", self.header_data) def parse_layer_height(self) -> Optional[float]: - self.layer_height = _regex_find_first( - r";Layer\sheight:\s(\d+\.?\d*)", self.header_data) + self.layer_height = regex_find_float( + r";Layer\sheight:\s(%F)", self.header_data + ) return self.layer_height def parse_object_height(self) -> Optional[float]: - return _regex_find_first(r";MAXZ:(\d+\.?\d*)", self.header_data) + return regex_find_float(r";MAXZ:(%F)", self.header_data) def parse_filament_total(self) -> Optional[float]: - filament = _regex_find_first( - r";Filament\sused:\s(\d+\.?\d*)m", self.header_data) + filament = regex_find_float(r";Filament\sused:\s(%F)m", self.header_data) if filament is not None: filament *= 1000 return filament def parse_filament_weight_total(self) -> Optional[float]: - return _regex_find_first( - r";Filament\sweight\s=\s.(\d+\.\d+).", self.header_data) + return regex_find_float(r";Filament\sweight\s=\s.(%F).", self.header_data) def parse_filament_type(self) -> Optional[str]: - return _regex_find_string( - r";Filament\stype\s=\s(.*)", self.header_data) + return regex_find_string(r";Filament\stype\s=\s(%S)", self.header_data) def parse_filament_name(self) -> Optional[str]: - return _regex_find_string( - r";Filament\sname\s=\s(.*)", self.header_data) + return regex_find_string(r";Filament\sname\s=\s(%S)", self.header_data) def parse_estimated_time(self) -> Optional[float]: - return self._parse_max_float(r";TIME:.*", self.header_data) + return regex_find_max_float(r";TIME:(%F)", self.header_data) def parse_first_layer_extr_temp(self) -> Optional[float]: - return _regex_find_first( - r"M109 S(\d+\.?\d*)", self.header_data) + return regex_find_float(r"M109 S(%F)", self.header_data) def parse_first_layer_bed_temp(self) -> Optional[float]: - return _regex_find_first( - r"M190 S(\d+\.?\d*)", self.header_data) + return regex_find_float(r"M190 S(%F)", self.header_data) def parse_chamber_temp(self) -> Optional[float]: - return _regex_find_first( - r"M191 S(\d+\.?\d*)", self.header_data) + return regex_find_float(r"M191 S(%F)", self.header_data) def parse_layer_count(self) -> Optional[int]: - return _regex_find_int( - r";LAYER_COUNT\:(\d+)", self.header_data) + return regex_find_int(r";LAYER_COUNT\:(%D)", self.header_data) def parse_nozzle_diameter(self) -> Optional[float]: - return _regex_find_first( - r";Nozzle\sdiameter\s=\s(\d+\.\d*)", self.header_data) + return regex_find_float(r";Nozzle\sdiameter\s=\s(%F)", self.header_data) def parse_thumbnails(self) -> Optional[List[Dict[str, Any]]]: # Attempt to parse thumbnails from file metadata @@ -565,7 +565,7 @@ class Cura(BaseSlicer): 'relative_path': rel_path_full }) # Create 32x32 thumbnail - im.thumbnail((32, 32), Image.ANTIALIAS) + im.thumbnail((32, 32), Image.Resampling.LANCZOS) im.save(thumb_path_small, format="PNG") thumbs.insert(0, { 'width': im.width, 'height': im.height, @@ -573,16 +573,16 @@ class Cura(BaseSlicer): 'relative_path': rel_path_small }) except Exception as e: - log_to_stderr(str(e)) + logger.info(str(e)) return None return thumbs def parse_gimage(self) -> Optional[str]: - return _regex_find_string( + return regex_find_string( r";gimage:(.*)", self.header_data) def parse_simage(self) -> Optional[str]: - return _regex_find_string( + return regex_find_string( r";simage:(.*)", self.header_data) class Simplify3D(BaseSlicer): @@ -598,39 +598,39 @@ class Simplify3D(BaseSlicer): return None def parse_first_layer_height(self) -> Optional[float]: - return self._parse_min_float(r"G1\sZ\d+\.\d*", self.header_data) + return regex_find_min_float(r"G1\sZ(%F)\s", self.header_data) def parse_layer_height(self) -> Optional[float]: - self.layer_height = _regex_find_first( - r";\s+layerHeight,(\d+\.?\d*)", self.header_data) + self.layer_height = regex_find_float( + r";\s+layerHeight,(%F)", self.header_data + ) return self.layer_height def parse_object_height(self) -> Optional[float]: - return self._parse_max_float(r"G1\sZ\d+\.\d*", self.footer_data) + return regex_find_max_float(r"G1\sZ(%F)\s", self.footer_data) def parse_filament_total(self) -> Optional[float]: - return _regex_find_first( - r";\s+(?:Filament\slength|Material\sLength):\s(\d+\.?\d*)\smm", + return regex_find_float( + r";\s+(?:Filament\slength|Material\sLength):\s(%F)\smm", self.footer_data ) def parse_filament_weight_total(self) -> Optional[float]: - return _regex_find_first( - r";\s+(?:Plastic\sweight|Material\sWeight):\s(\d+\.?\d*)\sg", + return regex_find_float( + r";\s+(?:Plastic\sweight|Material\sWeight):\s(%F)\sg", self.footer_data ) def parse_filament_name(self) -> Optional[str]: - return _regex_find_string( - r";\s+printMaterial,(.*)", self.header_data) + return regex_find_string( + r";\s+printMaterial,(%S)", self.header_data) def parse_filament_type(self) -> Optional[str]: - return _regex_find_string( - r";\s+makerBotModelMaterial,(.*)", self.footer_data) + return regex_find_string( + r";\s+makerBotModelMaterial,(%S)", self.footer_data) def parse_estimated_time(self) -> Optional[float]: - time_match = re.search( - r';\s+Build (t|T)ime:.*', self.footer_data) + time_match = re.search(r';\s+Build (t|T)ime:.*', self.footer_data) if not time_match: return None total_time = 0 @@ -690,8 +690,8 @@ class Simplify3D(BaseSlicer): return self._get_first_layer_temp("Heated Bed") def parse_nozzle_diameter(self) -> Optional[float]: - return _regex_find_first( - r";\s+(?:extruderDiameter|nozzleDiameter),(\d+\.\d*)", + return regex_find_float( + r";\s+(?:extruderDiameter|nozzleDiameter),(%F)", self.header_data ) @@ -708,28 +708,28 @@ class KISSlicer(BaseSlicer): return None def parse_first_layer_height(self) -> Optional[float]: - return _regex_find_first( - r";\s+first_layer_thickness_mm\s=\s(\d+\.?\d*)", self.header_data) + return regex_find_float( + r";\s+first_layer_thickness_mm\s=\s(%F)", self.header_data) def parse_layer_height(self) -> Optional[float]: - self.layer_height = _regex_find_first( - r";\s+max_layer_thickness_mm\s=\s(\d+\.?\d*)", self.header_data) + self.layer_height = regex_find_float( + r";\s+max_layer_thickness_mm\s=\s(%F)", self.header_data) return self.layer_height def parse_object_height(self) -> Optional[float]: - return self._parse_max_float( - r";\sEND_LAYER_OBJECT\sz.*", self.footer_data) + return regex_find_max_float( + r";\sEND_LAYER_OBJECT\sz=(%F)", self.footer_data) def parse_filament_total(self) -> Optional[float]: - filament = _regex_find_floats( - r";\s+Ext\s.*mm", self.footer_data, strict=True) + filament = regex_find_floats( + r";\s+Ext #\d+\s+=\s+(%F)\s*mm", self.footer_data) if filament: return sum(filament) return None def parse_estimated_time(self) -> Optional[float]: - time = _regex_find_first( - r";\sCalculated.*Build\sTime:\s(\d+\.?\d*)\sminutes", + time = regex_find_float( + r";\sCalculated.*Build\sTime:\s(%F)\sminutes", self.footer_data) if time is not None: time *= 60 @@ -737,16 +737,13 @@ class KISSlicer(BaseSlicer): return None def parse_first_layer_extr_temp(self) -> Optional[float]: - return _regex_find_first( - r"; first_layer_C = (\d+\.?\d*)", self.header_data) + return regex_find_float(r"; first_layer_C = (%F)", self.header_data) def parse_first_layer_bed_temp(self) -> Optional[float]: - return _regex_find_first( - r"; bed_C = (\d+\.?\d*)", self.header_data) + return regex_find_float(r"; bed_C = (%F)", self.header_data) def parse_chamber_temp(self) -> Optional[float]: - return _regex_find_first( - r"; chamber_C = (\d+\.?\d*)", self.header_data) + return regex_find_float(r"; chamber_C = (%F)", self.header_data) class IdeaMaker(BaseSlicer): @@ -760,54 +757,49 @@ class IdeaMaker(BaseSlicer): return None def has_objects(self) -> bool: - return self._check_has_objects( - self.header_data, r"\n;PRINTING:") + return self._check_has_objects(self.header_data, r"\n;PRINTING:") def parse_first_layer_height(self) -> Optional[float]: - layer_info = _regex_find_floats( - r";LAYER:0\s*.*\s*;HEIGHT.*", self.header_data) - if len(layer_info) >= 3: - return layer_info[2] - return None + return regex_find_float( + r";LAYER:0\s*.*\s*;HEIGHT:(%F)", self.header_data + ) def parse_layer_height(self) -> Optional[float]: - layer_info = _regex_find_floats( - r";LAYER:1\s*.*\s*;HEIGHT.*", self.header_data) - if len(layer_info) >= 3: - self.layer_height = layer_info[2] - return self.layer_height - return None + return regex_find_float( + r";LAYER:1\s*.*\s*;HEIGHT:(%F)", self.header_data + ) def parse_object_height(self) -> Optional[float]: - bounds = _regex_find_floats( - r";Bounding Box:.*", self.header_data) - if len(bounds) >= 6: - return bounds[5] - return None + return regex_find_float(r";Bounding Box:(?:\s+(%F))+", self.header_data) def parse_filament_total(self) -> Optional[float]: - filament = _regex_find_floats( - r";Material.\d\sUsed:.*", self.footer_data, strict=True) + filament = regex_find_floats( + r";Material.\d\sUsed:\s+(%F)", self.footer_data + ) if filament: return sum(filament) return None def parse_filament_type(self) -> Optional[str]: - return _regex_find_string( - r";Filament\stype\s=\s(.*)", self.header_data) + return ( + regex_find_string(r";Filament\sType\s.\d:\s(%S)", self.header_data) or + regex_find_string(r";Filament\stype\s=\s(%S)", self.header_data) + ) def parse_filament_name(self) -> Optional[str]: - return _regex_find_string( - r";Filament\sname\s=\s(.*)", self.header_data) + return ( + regex_find_string(r";Filament\sName\s.\d:\s(%S)", self.header_data) or + regex_find_string(r";Filament\sname\s=\s(%S)", self.header_data) + ) def parse_filament_weight_total(self) -> Optional[float]: pi = 3.141592653589793 - length = _regex_find_floats( - r";Material.\d\sUsed:.*", self.footer_data, strict=True) - diameter = _regex_find_floats( - r";Filament\sDiameter\s.\d:.*", self.header_data, strict=True) - density = _regex_find_floats( - r";Filament\sDensity\s.\d:.*", self.header_data, strict=True) + length = regex_find_floats( + r";Material.\d\sUsed:\s+(%F)", self.footer_data) + diameter = regex_find_floats( + r";Filament\sDiameter\s.\d:\s+(%F)", self.header_data) + density = regex_find_floats( + r";Filament\sDensity\s.\d:\s+(%F)", self.header_data) if len(length) == len(density) == len(diameter): # calc individual weight for each filament with m=pi/4*d²*l*rho weights = [(pi/4 * diameter[i]**2 * length[i] * density[i]/10**6) @@ -816,24 +808,20 @@ class IdeaMaker(BaseSlicer): return None def parse_estimated_time(self) -> Optional[float]: - return _regex_find_first( - r";Print\sTime:\s(\d+\.?\d*)", self.footer_data) + return regex_find_float(r";Print\sTime:\s(%F)", self.footer_data) def parse_first_layer_extr_temp(self) -> Optional[float]: - return _regex_find_first( - r"M109 T0 S(\d+\.?\d*)", self.header_data) + return regex_find_float(r"M109 T0 S(%F)", self.header_data) def parse_first_layer_bed_temp(self) -> Optional[float]: - return _regex_find_first( - r"M190 S(\d+\.?\d*)", self.header_data) + return regex_find_float(r"M190 S(%F)", self.header_data) def parse_chamber_temp(self) -> Optional[float]: - return _regex_find_first( - r"M191 S(\d+\.?\d*)", self.header_data) + return regex_find_float(r"M191 S(%F)", self.header_data) def parse_nozzle_diameter(self) -> Optional[float]: - return _regex_find_first( - r";Dimension:(?:\s\d+\.\d+){3}\s(\d+\.\d+)", self.header_data) + return regex_find_float( + r";Dimension:(?:\s\d+\.\d+){3}\s(%F)", self.header_data) class IceSL(BaseSlicer): def check_identity(self, data) -> Optional[Dict[str, Any]]: @@ -847,59 +835,59 @@ class IceSL(BaseSlicer): return None def parse_first_layer_height(self) -> Optional[float]: - return _regex_find_first( - r";\sz_layer_height_first_layer_mm\s:\s+(\d+\.\d+)", + return regex_find_float( + r";\sz_layer_height_first_layer_mm\s:\s+(%F)", self.header_data) def parse_layer_height(self) -> Optional[float]: - self.layer_height = _regex_find_first( - r";\sz_layer_height_mm\s:\s+(\d+\.\d+)", + self.layer_height = regex_find_float( + r";\sz_layer_height_mm\s:\s+(%F)", self.header_data) return self.layer_height def parse_object_height(self) -> Optional[float]: - return _regex_find_first( - r";\sprint_height_mm\s:\s+(\d+\.\d+)", self.header_data) + return regex_find_float( + r";\sprint_height_mm\s:\s+(%F)", self.header_data) def parse_first_layer_extr_temp(self) -> Optional[float]: - return _regex_find_first( - r";\sextruder_temp_degree_c_0\s:\s+(\d+\.?\d*)", self.header_data) + return regex_find_float( + r";\sextruder_temp_degree_c_0\s:\s+(%F)", self.header_data) def parse_first_layer_bed_temp(self) -> Optional[float]: - return _regex_find_first( - r";\sbed_temp_degree_c\s:\s+(\d+\.?\d*)", self.header_data) + return regex_find_float( + r";\sbed_temp_degree_c\s:\s+(%F)", self.header_data) def parse_chamber_temp(self) -> Optional[float]: - return _regex_find_first( - r";\schamber_temp_degree_c\s:\s+(\d+\.?\d*)", self.header_data) + return regex_find_float( + r";\schamber_temp_degree_c\s:\s+(%F)", self.header_data) def parse_filament_total(self) -> Optional[float]: - return _regex_find_first( - r";\sfilament_used_mm\s:\s+(\d+\.\d+)", self.header_data) + return regex_find_float( + r";\sfilament_used_mm\s:\s+(%F)", self.header_data) def parse_filament_weight_total(self) -> Optional[float]: - return _regex_find_first( - r";\sfilament_used_g\s:\s+(\d+\.\d+)", self.header_data) + return regex_find_float( + r";\sfilament_used_g\s:\s+(%F)", self.header_data) def parse_filament_name(self) -> Optional[str]: - return _regex_find_string( - r";\sfilament_name\s:\s+(.*)", self.header_data) + return regex_find_string( + r";\sfilament_name\s:\s+(%S)", self.header_data) def parse_filament_type(self) -> Optional[str]: - return _regex_find_string( - r";\sfilament_type\s:\s+(.*)", self.header_data) + return regex_find_string( + r";\sfilament_type\s:\s+(%S)", self.header_data) def parse_estimated_time(self) -> Optional[float]: - return _regex_find_first( - r";\sestimated_print_time_s\s:\s+(\d*\.*\d*)", self.header_data) + return regex_find_float( + r";\sestimated_print_time_s\s:\s+(%F)", self.header_data) def parse_layer_count(self) -> Optional[int]: - return _regex_find_int( - r";\slayer_count\s:\s+(\d+)", self.header_data) + return regex_find_int( + r";\slayer_count\s:\s+(%D)", self.header_data) def parse_nozzle_diameter(self) -> Optional[float]: - return _regex_find_first( - r";\snozzle_diameter_mm_0\s:\s+(\d+\.\d+)", self.header_data) + return regex_find_float( + r";\snozzle_diameter_mm_0\s:\s+(%F)", self.header_data) class KiriMoto(BaseSlicer): def check_identity(self, data) -> Optional[Dict[str, Any]]: @@ -917,20 +905,19 @@ class KiriMoto(BaseSlicer): return None def parse_first_layer_height(self) -> Optional[float]: - return _regex_find_first( - r"; firstSliceHeight = (\d+\.\d+)", self.header_data + return regex_find_float( + r"; firstSliceHeight = (%F)", self.header_data ) def parse_layer_height(self) -> Optional[float]: - self.layer_height = _regex_find_first( - r"; sliceHeight = (\d+\.\d+)", self.header_data + self.layer_height = regex_find_float( + r"; sliceHeight = (%F)", self.header_data ) return self.layer_height def parse_object_height(self) -> Optional[float]: - return self._parse_max_float( - r"G1 Z\d+\.\d+ (?:; z-hop end|F\d+\n)", - self.footer_data, strict=True + return regex_find_max_float( + r"G1 Z(%F) (?:; z-hop end|F\d+\n)", self.footer_data ) def parse_layer_count(self) -> Optional[int]: @@ -945,25 +932,25 @@ class KiriMoto(BaseSlicer): return None def parse_estimated_time(self) -> Optional[float]: - return _regex_find_int(r"; --- print time: (\d+)s", self.footer_data) + return regex_find_int(r"; --- print time: (%D)s", self.footer_data) def parse_filament_total(self) -> Optional[float]: - return _regex_find_first( - r"; --- filament used: (\d+\.?\d*) mm", self.footer_data + return regex_find_float( + r"; --- filament used: (%F) mm", self.footer_data ) def parse_first_layer_extr_temp(self) -> Optional[float]: - return _regex_find_first( - r"; firstLayerNozzleTemp = (\d+\.?\d*)", self.header_data + return regex_find_float( + r"; firstLayerNozzleTemp = (%F)", self.header_data ) def parse_first_layer_bed_temp(self) -> Optional[float]: - return _regex_find_first( - r"; firstLayerBedTemp = (\d+\.?\d*)", self.header_data + return regex_find_float( + r"; firstLayerBedTemp = (%F)", self.header_data ) -READ_SIZE = 512 * 1024 +READ_SIZE = 1024 * 1024 # 1 MiB SUPPORTED_SLICERS: List[Type[BaseSlicer]] = [ PrusaSlicer, Slic3rPE, Slic3r, Cura, Simplify3D, KISSlicer, IdeaMaker, IceSL, KiriMoto @@ -997,10 +984,10 @@ def process_objects(file_path: str, slicer: BaseSlicer, name: str) -> bool: preprocess_m486 ) except ImportError: - log_to_stderr("Module 'preprocess-cancellation' failed to load") + logger.info("Module 'preprocess-cancellation' failed to load") return False fname = os.path.basename(file_path) - log_to_stderr( + logger.info( f"Performing Object Processing on file: {fname}, " f"sliced by {name}" ) @@ -1018,7 +1005,7 @@ def process_objects(file_path: str, slicer: BaseSlicer, name: str) -> bool: elif isinstance(slicer, IdeaMaker): processor = preprocess_ideamaker else: - log_to_stderr( + logger.info( f"Object Processing Failed, slicer {name}" "not supported" ) @@ -1026,7 +1013,7 @@ def process_objects(file_path: str, slicer: BaseSlicer, name: str) -> bool: for line in processor(in_file): out_file.write(line) except Exception as e: - log_to_stderr(f"Object processing failed: {e}") + logger.info(f"Object processing failed: {e}") return False if os.path.islink(file_path): file_path = os.path.realpath(file_path) @@ -1084,7 +1071,7 @@ def extract_metadata( def extract_ufp(ufp_path: str, dest_path: str) -> None: if not os.path.isfile(ufp_path): - log_to_stderr(f"UFP file Not Found: {ufp_path}") + logger.info(f"UFP file Not Found: {ufp_path}") sys.exit(-1) thumb_name = os.path.splitext( os.path.basename(dest_path))[0] + ".png" @@ -1107,12 +1094,12 @@ def extract_ufp(ufp_path: str, dest_path: str) -> None: os.mkdir(dest_thumb_dir) shutil.move(tmp_thumb_path, dest_thumb_path) except Exception: - log_to_stderr(traceback.format_exc()) + logger.info(traceback.format_exc()) sys.exit(-1) try: os.remove(ufp_path) except Exception: - log_to_stderr(f"Error removing ufp file: {ufp_path}") + logger.info(f"Error removing ufp file: {ufp_path}") def main(path: str, filename: str, @@ -1124,12 +1111,12 @@ def main(path: str, extract_ufp(ufp, file_path) metadata: Dict[str, Any] = {} if not os.path.isfile(file_path): - log_to_stderr(f"File Not Found: {file_path}") + logger.info(f"File Not Found: {file_path}") sys.exit(-1) try: metadata = extract_metadata(file_path, check_objects) except Exception: - log_to_stderr(traceback.format_exc()) + logger.info(traceback.format_exc()) sys.exit(-1) fd = sys.stdout.fileno() data = json.dumps( @@ -1164,5 +1151,5 @@ if __name__ == "__main__": args = parser.parse_args() check_objects = args.check_objects enabled_msg = "enabled" if check_objects else "disabled" - log_to_stderr(f"Object Processing is {enabled_msg}") - main(args.path, args.filename, args.ufp, check_objects) \ No newline at end of file + logger.info(f"Object Processing is {enabled_msg}") + main(args.path, args.filename, args.ufp, check_objects) diff --git a/moonraker/components/gpio.py b/moonraker/components/gpio.py index 7055cdf..ab64a95 100644 --- a/moonraker/components/gpio.py +++ b/moonraker/components/gpio.py @@ -4,8 +4,13 @@ # # This file may be distributed under the terms of the GNU GPLv3 license. from __future__ import annotations +import os +import re +import asyncio +import pathlib import logging -from utils import load_system_module +import periphery +from ..utils import KERNEL_VERSION # Annotation imports from typing import ( @@ -18,167 +23,135 @@ from typing import ( ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from eventloop import EventLoop - GPIO_CALLBACK = Callable[[float, float, int], Optional[Awaitable[None]]] + from ..confighelper import ConfigHelper + from ..eventloop import EventLoop + +GpioEventCallback = Callable[[float, float, int], Optional[Awaitable[None]]] + +GPIO_PATTERN = r""" + (?P[~^])? + (?P!)? + (?:(?Pgpiochip[0-9]+)/)? + (?Pgpio(?P[0-9]+)) +""" + +BIAS_FLAG_TO_DESC: Dict[str, str] = { + "^": "pull_up", + "~": "pull_down", + "*": "disable" if KERNEL_VERSION >= (5, 5) else "default" +} class GpioFactory: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() - self.gpiod: Any = load_system_module("gpiod") - GpioEvent.init_constants(self.gpiod) - self.chips: Dict[str, Any] = {} self.reserved_gpios: Dict[str, GpioBase] = {} - version: str = self.gpiod.version_string() - self.gpiod_version = tuple(int(v) for v in version.split('.')) - self.server.add_log_rollover_item( - "gpiod_version", f"libgpiod version: {version}") - def _get_gpio_chip(self, chip_name) -> Any: - if chip_name in self.chips: - return self.chips[chip_name] - chip = self.gpiod.Chip(chip_name, self.gpiod.Chip.OPEN_BY_NAME) - self.chips[chip_name] = chip - return chip - - def setup_gpio_out(self, - pin_name: str, - initial_value: int = 0 - ) -> GpioOutputPin: + def setup_gpio_out(self, pin_name: str, initial_value: int = 0) -> GpioOutputPin: initial_value = int(not not initial_value) - pparams = self._parse_pin(pin_name) - pparams['initial_value'] = initial_value - line = self._request_gpio(pparams) + pparams = self._parse_pin(pin_name, initial_value) + gpio = self._request_gpio(pparams) try: - gpio_out = GpioOutputPin(line, pparams) + gpio_out = GpioOutputPin(gpio, pparams) except Exception: logging.exception("Error Instantiating GpioOutputPin") - line.release() + gpio.close() raise - full_name = pparams['full_name'] + full_name = pparams["full_name"] self.reserved_gpios[full_name] = gpio_out return gpio_out - def register_gpio_event(self, - pin_name: str, - callback: GPIO_CALLBACK - ) -> GpioEvent: - pin_params = self._parse_pin(pin_name, type="event") - line = self._request_gpio(pin_params) + def register_gpio_event( + self, pin_name: str, callback: GpioEventCallback + ) -> GpioEvent: + pin_params = self._parse_pin(pin_name, req_type="event") + gpio = self._request_gpio(pin_params) event_loop = self.server.get_event_loop() try: - gpio_event = GpioEvent(event_loop, line, pin_params, callback) + gpio_event = GpioEvent(event_loop, gpio, pin_params, callback) except Exception: logging.exception("Error Instantiating GpioEvent") - line.release() + gpio.close() raise - full_name = pin_params['full_name'] + full_name = pin_params["full_name"] self.reserved_gpios[full_name] = gpio_event return gpio_event - def _request_gpio(self, pin_params: Dict[str, Any]) -> Any: - full_name = pin_params['full_name'] + def _request_gpio(self, pin_params: Dict[str, Any]) -> periphery.GPIO: + full_name = pin_params["full_name"] if full_name in self.reserved_gpios: raise self.server.error(f"GPIO {full_name} already reserved") + chip_path = pathlib.Path("/dev").joinpath(pin_params["chip_id"]) + if not chip_path.exists(): + raise self.server.error(f"Chip path {chip_path} does not exist") try: - chip = self._get_gpio_chip(pin_params['chip_id']) - line = chip.get_line(pin_params['pin_id']) - args: Dict[str, Any] = { - 'consumer': "moonraker", - 'type': pin_params['request_type'] - } - if 'flags' in pin_params: - args['flags'] = pin_params['flags'] - if 'initial_value' in pin_params: - if self.gpiod_version < (1, 3): - args['default_vals'] = [pin_params['initial_value']] - else: - args['default_val'] = pin_params['initial_value'] - line.request(**args) + gpio = periphery.GPIO( + str(chip_path), + pin_params["pin_id"], + pin_params["direction"], + edge=pin_params.get("edge", "none"), + bias=pin_params.get("bias", "default"), + inverted=pin_params["inverted"], + label="moonraker" + ) except Exception: logging.exception( f"Unable to init {full_name}. Make sure the gpio is not in " "use by another program or exported by sysfs.") raise - return line + return gpio - def _parse_pin(self, - pin_name: str, - type: str = "out" - ) -> Dict[str, Any]: + def _parse_pin( + self, pin_desc: str, initial_value: int = 0, req_type: str = "out" + ) -> Dict[str, Any]: params: Dict[str, Any] = { - 'orig': pin_name, - 'invert': False, + "orig": pin_desc, + "inverted": False, + "request_type": req_type, + "initial_value": initial_value } - pin = pin_name - if type == "event": - params['request_type'] = self.gpiod.LINE_REQ_EV_BOTH_EDGES - flag: str = "disable" - if pin[0] == "^": - pin = pin[1:] - flag = "pullup" - elif pin[0] == "~": - pin = pin[1:] - flag = "pulldown" - if self.gpiod_version >= (1, 5): - flag_to_enum = { - "disable": self.gpiod.LINE_REQ_FLAG_BIAS_DISABLE, - "pullup": self.gpiod.LINE_REQ_FLAG_BIAS_PULL_UP, - "pulldown": self.gpiod.LINE_REQ_FLAG_BIAS_PULL_DOWN - } - params['flags'] = flag_to_enum[flag] - elif flag != "disable": - raise self.server.error( - f"Flag {flag} configured for event GPIO '{pin_name}'" - " requires libgpiod version 1.5 or later. " - f"Current Version: {self.gpiod.version_string()}") - elif type == "out": - params['request_type'] = self.gpiod.LINE_REQ_DIR_OUT - if pin[0] == "!": - pin = pin[1:] - params['invert'] = True - if 'flags' in params: - params['flags'] |= self.gpiod.LINE_REQ_FLAG_ACTIVE_LOW - else: - params['flags'] = self.gpiod.LINE_REQ_FLAG_ACTIVE_LOW - chip_id: str = "gpiochip0" - pin_parts = pin.split("/") - if len(pin_parts) == 2: - chip_id, pin = pin_parts - elif len(pin_parts) == 1: - pin = pin_parts[0] - # Verify pin - if not chip_id.startswith("gpiochip") or \ - not chip_id[-1].isdigit() or \ - not pin.startswith("gpio") or \ - not pin[4:].isdigit(): + pin_match = re.match(GPIO_PATTERN, pin_desc, re.VERBOSE) + if pin_match is None: raise self.server.error( - f"Invalid Gpio Pin: {pin_name}") - pin_id = int(pin[4:]) - params['pin_id'] = pin_id - params['chip_id'] = chip_id - params['full_name'] = f"{chip_id}:{pin}" + f"Invalid pin format {pin_desc}. Refer to the configuration " + "documentation for details on the pin format." + ) + bias_flag: Optional[str] = pin_match.group("bias") + params["inverted"] = pin_match.group("inverted") is not None + if req_type == "event": + params["direction"] = "in" + params["edge"] = "both" + params["bias"] = BIAS_FLAG_TO_DESC[bias_flag or "*"] + elif req_type == "out": + if bias_flag is not None: + raise self.server.error( + f"Invalid pin format {pin_desc}. Bias flag {bias_flag} " + "not available for output pins." + ) + initial_state = bool(initial_value) ^ params["inverted"] + params["direction"] = "low" if not initial_state else "high" + chip_id: str = pin_match.group("chip_id") or "gpiochip0" + pin_name: str = pin_match.group("pin_name") + params["pin_id"] = int(pin_match.group("pin_id")) + params["chip_id"] = chip_id + params["full_name"] = f"{chip_id}:{pin_name}" return params def close(self) -> None: - for line in self.reserved_gpios.values(): - line.release() - for chip in self.chips.values(): - chip.close() + for gpio in self.reserved_gpios.values(): + gpio.close() class GpioBase: - def __init__(self, - line: Any, - pin_params: Dict[str, Any] - ) -> None: - self.orig: str = pin_params['orig'] - self.name: str = pin_params['full_name'] - self.inverted: bool = pin_params['invert'] - self.line: Any = line - self.value: int = pin_params.get('initial_value', 0) + def __init__( + self, gpio: periphery.GPIO, pin_params: Dict[str, Any] + ) -> None: + self.orig: str = pin_params["orig"] + self.name: str = pin_params["full_name"] + self.inverted: bool = pin_params["inverted"] + self.gpio = gpio + self.value: int = pin_params.get("initial_value", 0) - def release(self) -> None: - self.line.release() + def close(self) -> None: + self.gpio.close() def is_inverted(self) -> bool: return self.inverted @@ -195,85 +168,107 @@ class GpioBase: class GpioOutputPin(GpioBase): def write(self, value: int) -> None: self.value = int(not not value) - self.line.set_value(self.value) + self.gpio.write(bool(self.value)) -MAX_ERRORS = 20 +MAX_ERRORS = 50 +ERROR_RESET_TIME = 5. class GpioEvent(GpioBase): - EVENT_FALLING_EDGE = 0 - EVENT_RISING_EDGE = 1 - def __init__(self, - event_loop: EventLoop, - line: Any, - pin_params: Dict[str, Any], - callback: GPIO_CALLBACK - ) -> None: - super().__init__(line, pin_params) + def __init__( + self, + event_loop: EventLoop, + gpio: periphery.GPIO, + pin_params: Dict[str, Any], + callback: GpioEventCallback + ) -> None: + super().__init__(gpio, pin_params) self.event_loop = event_loop - self.fd = line.event_get_fd() self.callback = callback self.on_error: Optional[Callable[[str], None]] = None - self.min_evt_time = 0. - self.last_event_time = 0. + self.debounce_period: float = 0 + self.last_event_time: float = 0. self.error_count = 0 + self.last_error_reset = 0. self.started = False + self.debounce_task: Optional[asyncio.Task] = None + os.set_blocking(self.gpio.fd, False) - @classmethod - def init_constants(cls, gpiod: Any) -> None: - cls.EVENT_RISING_EDGE = gpiod.LineEvent.RISING_EDGE - cls.EVENT_FALLING_EDGE = gpiod.LineEvent.FALLING_EDGE + def fileno(self) -> int: + return self.gpio.fd - def setup_debounce(self, - min_evt_time: float, - err_callback: Optional[Callable[[str], None]] - ) -> None: - self.min_evt_time = max(min_evt_time, 0.) + def setup_debounce( + self, debounce_period: float, err_callback: Optional[Callable[[str], None]] + ) -> None: + self.debounce_period = max(debounce_period, 0) self.on_error = err_callback def start(self) -> None: if not self.started: - self.value = self.line.get_value() + self.value = int(self.gpio.read()) self.last_event_time = self.event_loop.get_loop_time() - self.event_loop.add_reader(self.fd, self._on_event_trigger) + self.event_loop.add_reader(self.gpio.fd, self._on_event_trigger) self.started = True logging.debug(f"GPIO {self.name}: Listening for events, " f"current state: {self.value}") def stop(self) -> None: + if self.debounce_task is not None: + self.debounce_task.cancel() + self.debounce_task = None if self.started: - self.event_loop.remove_reader(self.fd) + self.event_loop.remove_reader(self.gpio.fd) self.started = False - def release(self) -> None: + def close(self) -> None: self.stop() - self.line.release() + self.gpio.close() def _on_event_trigger(self) -> None: - evt = self.line.event_read() - last_val = self.value - if evt.type == self.EVENT_RISING_EDGE: + evt = self.gpio.read_event() + last_value = self.value + if evt.edge == "rising": # type: ignore self.value = 1 - elif evt.type == self.EVENT_FALLING_EDGE: + elif evt.edge == "falling": # type: ignore self.value = 0 + else: + return + if self.debounce_period: + if self.debounce_task is None: + coro = self._debounce(last_value) + self.debounce_task = self.event_loop.create_task(coro) + else: + self._increment_error() + elif last_value != self.value: + # No debounce period and change detected + self._run_callback() + + async def _debounce(self, last_value: int) -> None: + await asyncio.sleep(self.debounce_period) + self.debounce_task = None + if last_value != self.value: + self._run_callback() + + def _run_callback(self) -> None: eventtime = self.event_loop.get_loop_time() evt_duration = eventtime - self.last_event_time - if last_val == self.value or evt_duration < self.min_evt_time: - self._increment_error() - return self.last_event_time = eventtime - self.error_count = 0 ret = self.callback(eventtime, evt_duration, self.value) if ret is not None: - self.event_loop.create_task(ret) + self.event_loop.create_task(ret) # type: ignore def _increment_error(self) -> None: + eventtime = self.event_loop.get_loop_time() + if eventtime - self.last_error_reset > ERROR_RESET_TIME: + self.error_count = 0 + self.last_error_reset = eventtime self.error_count += 1 if self.error_count >= MAX_ERRORS: self.stop() if self.on_error is not None: - self.on_error("Too Many Consecutive Errors, " - f"GPIO Event Disabled on {self.name}") + self.on_error( + f"Too Many Consecutive Errors, GPIO Event Disabled on {self.name}" + ) def load_component(config: ConfigHelper) -> GpioFactory: diff --git a/moonraker/components/history.py b/moonraker/components/history.py index c48bc43..66a8ab4 100644 --- a/moonraker/components/history.py +++ b/moonraker/components/history.py @@ -1,11 +1,20 @@ # History cache for printer jobs # +# Copyright (C) 2024 Eric Callahan +# # This file may be distributed under the terms of the GNU GPLv3 license. from __future__ import annotations import time import logging from asyncio import Lock +from ..common import ( + JobEvent, + RequestType, + HistoryFieldData, + FieldTracker, + SqlTableDefinition +) # Annotation imports from typing import ( @@ -15,97 +24,284 @@ from typing import ( Optional, Dict, List, + Tuple ) + if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest + from ..confighelper import ConfigHelper + from ..common import WebRequest, UserInfo from .database import MoonrakerDatabase as DBComp from .job_state import JobState from .file_manager.file_manager import FileManager + from .database import DBProviderWrapper + Totals = Dict[str, Union[float, int]] + AuxTotals = List[Dict[str, Any]] -HIST_NAMESPACE = "history" -MAX_JOBS = 10000 +BASE_TOTALS = { + "total_jobs": 0, + "total_time": 0., + "total_print_time": 0., + "total_filament_used": 0., + "longest_job": 0., + "longest_print": 0. +} +HIST_TABLE = "job_history" +TOTALS_TABLE = "job_totals" + +def _create_totals_list( + job_totals: Dict[str, Any], + aux_totals: List[Dict[str, Any]], + instance: str = "default" +) -> List[Tuple[str, str, Any, Any, str]]: + """ + Returns a list of Tuples formatted for SQL Database insertion. + + Fields of each tuple are in the following order: + provider, field, maximum, total, instance_id + """ + totals_list: List[Tuple[str, str, Any, Any, str]] = [] + for key, value in job_totals.items(): + total = value if key.startswith("total_") else None + maximum = value if total is None else None + totals_list.append(("history", key, maximum, total, instance)) + for item in aux_totals: + if not isinstance(item, dict): + continue + totals_list.append( + ( + item["provider"], + item["field"], + item["maximum"], + item["total"], + instance + ) + ) + return totals_list + +class TotalsSqlDefinition(SqlTableDefinition): + name = TOTALS_TABLE + prototype = ( + f""" + {TOTALS_TABLE} ( + provider TEXT NOT NULL, + field TEXT NOT NULL, + maximum REAL, + total REAL, + instance_id TEXT NOT NULL, + PRIMARY KEY (provider, field, instance_id) + ) + """ + ) + version = 1 + + def migrate(self, last_version: int, db_provider: DBProviderWrapper) -> None: + if last_version == 0: + # Migrate from "moonraker" namespace to a table + logging.info("Migrating history totals from moonraker namespace...") + hist_ns: Dict[str, Any] = db_provider.get_item("moonraker", "history", {}) + job_totals: Dict[str, Any] = hist_ns.get("job_totals", BASE_TOTALS) + aux_totals: List[Dict[str, Any]] = hist_ns.get("aux_totals", []) + if not isinstance(job_totals, dict): + job_totals = dict(BASE_TOTALS) + if not isinstance(aux_totals, list): + aux_totals = [] + totals_list = _create_totals_list(job_totals, aux_totals) + sql_conn = db_provider.connection + with sql_conn: + sql_conn.executemany( + f"INSERT OR IGNORE INTO {TOTALS_TABLE} VALUES(?, ?, ?, ?, ?)", + totals_list + ) + try: + db_provider.delete_item("moonraker", "history") + except Exception: + pass + +class HistorySqlDefinition(SqlTableDefinition): + name = HIST_TABLE + prototype = ( + f""" + {HIST_TABLE} ( + job_id INTEGER PRIMARY KEY ASC, + user TEXT NOT NULL, + filename TEXT, + status TEXT NOT NULL, + start_time REAL NOT NULL, + end_time REAL, + print_duration REAL NOT NULL, + total_duration REAL NOT NULL, + filament_used REAL NOT NULL, + metadata pyjson, + auxiliary_data pyjson NOT NULL, + instance_id TEXT NOT NULL + ) + """ + ) + version = 1 + + def _get_entry_item( + self, entry: Dict[str, Any], name: str, default: Any = 0. + ) -> Any: + val = entry.get(name) + if val is None: + return default + return val + + def migrate(self, last_version: int, db_provider: DBProviderWrapper) -> None: + if last_version == 0: + conn = db_provider.connection + for batch in db_provider.iter_namespace("history", 1000): + conv_vals: List[Tuple[Any, ...]] = [] + entry: Dict[str, Any] + for key, entry in batch.items(): + if not isinstance(entry, dict): + logging.info( + f"History migration, skipping invalid value: {key} {entry}" + ) + continue + try: + conv_vals.append( + ( + None, + self._get_entry_item(entry, "user", "No User"), + self._get_entry_item(entry, "filename", "unknown"), + self._get_entry_item(entry, "status", "error"), + self._get_entry_item(entry, "start_time"), + self._get_entry_item(entry, "end_time"), + self._get_entry_item(entry, "print_duration"), + self._get_entry_item(entry, "total_duration"), + self._get_entry_item(entry, "filament_used"), + self._get_entry_item(entry, "metadata", {}), + self._get_entry_item(entry, "auxiliary_data", []), + "default" + ) + ) + except KeyError: + continue + if not conv_vals: + continue + placeholders = ",".join("?" * len(conv_vals[0])) + with conn: + conn.executemany( + f"INSERT INTO {HIST_TABLE} VALUES({placeholders})", + conv_vals + ) + db_provider.wipe_local_namespace("history") class History: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() - self.file_manager: FileManager = self.server.lookup_component( - 'file_manager') + self.file_manager: FileManager = self.server.lookup_component('file_manager') self.request_lock = Lock() + FieldTracker.class_init(self) + self.auxiliary_fields: List[HistoryFieldData] = [] database: DBComp = self.server.lookup_component("database") - self.job_totals: Dict[str, float] = database.get_item( - "moonraker", "history.job_totals", - { - 'total_jobs': 0, - 'total_time': 0., - 'total_print_time': 0., - 'total_filament_used': 0., - 'longest_job': 0., - 'longest_print': 0. - }).result() + self.history_table = database.register_table(HistorySqlDefinition()) + self.totals_table = database.register_table(TotalsSqlDefinition()) + self.job_totals: Totals = dict(BASE_TOTALS) + self.aux_totals: AuxTotals = [] self.server.register_event_handler( "server:klippy_disconnect", self._handle_disconnect) self.server.register_event_handler( "server:klippy_shutdown", self._handle_shutdown) self.server.register_event_handler( - "job_state:started", self._on_job_started) + "job_state:state_changed", self._on_job_state_changed) self.server.register_event_handler( - "job_state:complete", self._on_job_complete) - self.server.register_event_handler( - "job_state:cancelled", self._on_job_cancelled) - self.server.register_event_handler( - "job_state:standby", self._on_job_standby) - self.server.register_event_handler( - "job_state:error", self._on_job_error) + "klippy_apis:job_start_complete", self._on_job_requested) self.server.register_notification("history:history_changed") self.server.register_endpoint( - "/server/history/job", ['GET', 'DELETE'], self._handle_job_request) + "/server/history/job", RequestType.GET | RequestType.DELETE, + self._handle_job_request + ) self.server.register_endpoint( - "/server/history/list", ['GET'], self._handle_jobs_list) + "/server/history/list", RequestType.GET, self._handle_jobs_list + ) self.server.register_endpoint( - "/server/history/totals", ['GET'], self._handle_job_totals) + "/server/history/totals", RequestType.GET, self._handle_job_totals + ) self.server.register_endpoint( - "/server/history/reset_totals", ['POST'], - self._handle_job_total_reset) - - database.register_local_namespace(HIST_NAMESPACE) - self.history_ns = database.wrap_namespace(HIST_NAMESPACE, - parse_keys=False) + "/server/history/reset_totals", RequestType.POST, + self._handle_job_total_reset + ) self.current_job: Optional[PrinterJob] = None - self.current_job_id: Optional[str] = None - self.next_job_id: int = 0 - self.cached_job_ids = self.history_ns.keys().result() - if self.cached_job_ids: - self.next_job_id = int(self.cached_job_ids[-1], 16) + 1 + self.current_job_id: Optional[int] = None + self.job_user: str = "No User" + self.job_paused: bool = False + + async def component_init(self) -> None: + # Popluate totals + valid_aux_totals = [ + (item.provider, item.name) for item in self.auxiliary_fields + if item.has_totals() + ] + cursor = await self.totals_table.execute(f"SELECT * from {TOTALS_TABLE}") + await cursor.set_arraysize(200) + for row in await cursor.fetchall(): + provider, field, maximum, total, _ = tuple(row) + if provider == "history": + self.job_totals[field] = total if maximum is None else maximum + elif (provider, field) in valid_aux_totals: + item = dict(row) + item.pop("instance_id", None) + self.aux_totals.append(item) + # Check for interupted jobs + cursor = await self.history_table.execute( + f"SELECT job_id FROM {HIST_TABLE} WHERE status = 'in_progress'" + ) + interrupted_jobs: List[int] = [row[0] for row in await cursor.fetchall()] + if interrupted_jobs: + async with self.history_table as tx: + await tx.execute( + f"UPDATE {HIST_TABLE} SET status = 'interrupted' " + "WHERE status = 'in_progress'" + ) + self.server.add_log_rollover_item( + "interrupted_history", + "The following jobs were detected as interrupted: " + f"{interrupted_jobs}" + ) async def _handle_job_request(self, web_request: WebRequest ) -> Dict[str, Any]: async with self.request_lock: - action = web_request.get_action() - if action == "GET": + req_type = web_request.get_request_type() + if req_type == RequestType.GET: job_id = web_request.get_str("uid") - if job_id not in self.cached_job_ids: + cursor = await self.history_table.execute( + f"SELECT * FROM {HIST_TABLE} WHERE job_id = ?", (int(job_id, 16),) + ) + result = await cursor.fetchone() + if result is None: raise self.server.error(f"Invalid job uid: {job_id}", 404) - job = await self.history_ns[job_id] + job = dict(result) return {"job": self._prep_requested_job(job, job_id)} - if action == "DELETE": + if req_type == RequestType.DELETE: all = web_request.get_boolean("all", False) if all: - deljobs = self.cached_job_ids - self.history_ns.clear() - self.cached_job_ids = [] - self.next_job_id = 0 + cursor = await self.history_table.execute( + f"SELECT job_id FROM {HIST_TABLE} WHERE instance_id = ?", + ("default",) + ) + await cursor.set_arraysize(1000) + deljobs = [f"{row[0]:06X}" for row in await cursor.fetchall()] + async with self.history_table as tx: + await tx.execute( + f"DELETE FROM {HIST_TABLE} WHERE instance_id = ?", + ("default",) + ) return {'deleted_jobs': deljobs} job_id = web_request.get_str("uid") - if job_id not in self.cached_job_ids: + async with self.history_table as tx: + cursor = await tx.execute( + f"DELETE FROM {HIST_TABLE} WHERE job_id = ?", (int(job_id, 16),) + ) + if cursor.rowcount < 1: raise self.server.error(f"Invalid job uid: {job_id}", 404) - - self.delete_job(job_id) return {'deleted_jobs': [job_id]} raise self.server.error("Invalid Request Method") @@ -113,199 +309,205 @@ class History: web_request: WebRequest ) -> Dict[str, Any]: async with self.request_lock: - i = 0 - count = 0 - end_num = len(self.cached_job_ids) - jobs: List[Dict[str, Any]] = [] - start_num = 0 - before = web_request.get_float("before", -1) since = web_request.get_float("since", -1) limit = web_request.get_int("limit", 50) start = web_request.get_int("start", 0) - order = web_request.get_str("order", "desc") + order = web_request.get_str("order", "desc").upper() - if order not in ["asc", "desc"]: + if order not in ["ASC", "DESC"]: raise self.server.error(f"Invalid `order` value: {order}", 400) - - reverse_order = (order == "desc") - - # cached jobs is asc order, find lower and upper boundary - if since != -1: - while start_num < end_num: - job_id = self.cached_job_ids[start_num] - job: Dict[str, Any] = await self.history_ns[job_id] - if job['start_time'] > since: - break - start_num += 1 - + # Build SQL Select Statement + values: List[Any] = ["default"] + sql_statement = f"SELECT * FROM {HIST_TABLE} WHERE instance_id = ?" if before != -1: - while end_num > 0: - job_id = self.cached_job_ids[end_num-1] - job = await self.history_ns[job_id] - if job['end_time'] < before: - break - end_num -= 1 - - if start_num >= end_num or end_num == 0: - return {"count": 0, "jobs": []} - - i = start - count = end_num - start_num - - if limit == 0: - limit = MAX_JOBS - - while i < count and len(jobs) < limit: - if reverse_order: - job_id = self.cached_job_ids[end_num - i - 1] - else: - job_id = self.cached_job_ids[start_num + i] - job = await self.history_ns[job_id] + sql_statement += " and end_time < ?" + values.append(before) + if since != -1: + sql_statement += " and start_time > ?" + values.append(since) + sql_statement += f" ORDER BY job_id {order}" + if limit > 0: + sql_statement += " LIMIT ? OFFSET ?" + values.append(limit) + values.append(start) + cursor = await self.history_table.execute(sql_statement, values) + await cursor.set_arraysize(1000) + jobs: List[Dict[str, Any]] = [] + for row in await cursor.fetchall(): + job = dict(row) + job_id = f"{row['job_id']:06X}" jobs.append(self._prep_requested_job(job, job_id)) - i += 1 + return {"count": len(jobs), "jobs": jobs} - return {"count": count, "jobs": jobs} - - async def _handle_job_totals(self, - web_request: WebRequest - ) -> Dict[str, Dict[str, float]]: - return {'job_totals': self.job_totals} - - async def _handle_job_total_reset(self, - web_request: WebRequest, - ) -> Dict[str, Dict[str, float]]: - if self.current_job is not None: - raise self.server.error( - "Job in progress, cannot reset totals") - last_totals = dict(self.job_totals) - self.job_totals = { - 'total_jobs': 0, - 'total_time': 0., - 'total_print_time': 0., - 'total_filament_used': 0., - 'longest_job': 0., - 'longest_print': 0. + async def _handle_job_totals( + self, web_request: WebRequest + ) -> Dict[str, Union[Totals, AuxTotals]]: + return { + "job_totals": self.job_totals, + "auxiliary_totals": self.aux_totals } - database: DBComp = self.server.lookup_component("database") - await database.insert_item( - "moonraker", "history.job_totals", self.job_totals) - return {'last_totals': last_totals} - def _on_job_started(self, - prev_stats: Dict[str, Any], - new_stats: Dict[str, Any] - ) -> None: + async def _handle_job_total_reset( + self, web_request: WebRequest + ) -> Dict[str, Union[Totals, AuxTotals]]: if self.current_job is not None: - # Finish with the previous state - self.finish_job("cancelled", prev_stats) - self.add_job(PrinterJob(new_stats)) + raise self.server.error("Job in progress, cannot reset totals") + last_totals = self.job_totals + self.job_totals = dict(BASE_TOTALS) + last_aux_totals = self.aux_totals + self._update_aux_totals(reset=True) + totals_list = _create_totals_list(self.job_totals, self.aux_totals) + async with self.totals_table as tx: + await tx.execute( + f"DELETE FROM {TOTALS_TABLE} WHERE instance_id = ?", ("default",) + ) + await tx.executemany( + f"INSERT INTO {TOTALS_TABLE} VALUES(?, ?, ?, ?, ?)", totals_list + ) + return { + "last_totals": last_totals, + "last_auxiliary_totals": last_aux_totals + } - def _on_job_complete(self, - prev_stats: Dict[str, Any], - new_stats: Dict[str, Any] - ) -> None: - self.finish_job("completed", new_stats) + async def _on_job_state_changed( + self, + job_event: JobEvent, + prev_stats: Dict[str, Any], + new_stats: Dict[str, Any] + ) -> None: + self.job_paused = job_event == JobEvent.PAUSED + if job_event == JobEvent.STARTED: + if self.current_job is not None: + # Finish with the previous state + await self.finish_job("cancelled", prev_stats) + await self.add_job(PrinterJob(new_stats)) + elif job_event == JobEvent.COMPLETE: + await self.finish_job("completed", new_stats) + elif job_event == JobEvent.ERROR: + await self.finish_job("error", new_stats) + elif job_event in (JobEvent.CANCELLED, JobEvent.STANDBY): + # Cancel on "standby" for backward compatibility with + # `CLEAR_PAUSE/SDCARD_RESET_FILE` workflow + await self.finish_job("cancelled", prev_stats) - def _on_job_cancelled(self, - prev_stats: Dict[str, Any], - new_stats: Dict[str, Any] - ) -> None: - self.finish_job("cancelled", new_stats) + def _on_job_requested(self, user: Optional[UserInfo]) -> None: + username = user.username if user is not None else "No User" + self.job_user = username + if self.current_job is not None: + self.current_job.user = username - def _on_job_error(self, - prev_stats: Dict[str, Any], - new_stats: Dict[str, Any] - ) -> None: - self.finish_job("error", new_stats) - - def _on_job_standby(self, - prev_stats: Dict[str, Any], - new_stats: Dict[str, Any] - ) -> None: - # Backward compatibility with - # `CLEAR_PAUSE/SDCARD_RESET_FILE` workflow - self.finish_job("cancelled", prev_stats) - - def _handle_shutdown(self) -> None: + async def _handle_shutdown(self) -> None: jstate: JobState = self.server.lookup_component("job_state") last_ps = jstate.get_last_stats() - self.finish_job("klippy_shutdown", last_ps) + await self.finish_job("klippy_shutdown", last_ps) - def _handle_disconnect(self) -> None: + async def _handle_disconnect(self) -> None: jstate: JobState = self.server.lookup_component("job_state") last_ps = jstate.get_last_stats() - self.finish_job("klippy_disconnect", last_ps) + await self.finish_job("klippy_disconnect", last_ps) - def add_job(self, job: PrinterJob) -> None: - if len(self.cached_job_ids) >= MAX_JOBS: - self.delete_job(self.cached_job_ids[0]) - job_id = f"{self.next_job_id:06X}" - self.current_job = job - self.current_job_id = job_id - self.grab_job_metadata() - self.history_ns[job_id] = job.get_stats() - self.cached_job_ids.append(job_id) - self.next_job_id += 1 - logging.debug( - f"History Job Added - Id: {job_id}, File: {job.filename}" - ) - self.send_history_event("added") + async def add_job(self, job: PrinterJob) -> None: + async with self.request_lock: + self.current_job = job + self.current_job_id = None + self.current_job.user = self.job_user + self.grab_job_metadata() + for field in self.auxiliary_fields: + field.tracker.reset() + self.current_job.set_aux_data(self.auxiliary_fields) + new_id = await self.save_job(job, None) + if new_id is None: + logging.info(f"Error saving job, filename '{job.filename}'") + return + self.current_job_id = new_id + job_id = f"{new_id:06X}" + self.update_metadata(job_id) + logging.debug( + f"History Job Added - Id: {job_id}, File: {job.filename}" + ) + self.send_history_event("added") - def delete_job(self, job_id: Union[int, str]) -> None: - if isinstance(job_id, int): - job_id = f"{job_id:06X}" + async def save_job(self, job: PrinterJob, job_id: Optional[int]) -> Optional[int]: + values: List[Any] = [ + job_id, + job.user, + job.filename, + job.status, + job.start_time, + job.end_time, + job.print_duration, + job.total_duration, + job.filament_used, + job.metadata, + job.auxiliary_data, + "default" + ] + placeholders = ",".join("?" * len(values)) + async with self.history_table as tx: + cursor = await tx.execute( + f"REPLACE INTO {HIST_TABLE} VALUES({placeholders})", values + ) + return cursor.lastrowid - if job_id in self.cached_job_ids: - del self.history_ns[job_id] - self.cached_job_ids.remove(job_id) + async def delete_job(self, job_id: Union[int, str]) -> None: + if isinstance(job_id, str): + job_id = int(job_id, 16) + async with self.history_table as tx: + tx.execute( + f"DELETE FROM {HIST_TABLE} WHERE job_id = ?", (job_id,) + ) - def finish_job(self, status: str, pstats: Dict[str, Any]) -> None: - if self.current_job is None: - return - cj = self.current_job - if ( - pstats.get('filename') != cj.get('filename') or - pstats.get('total_duration', 0.) < cj.get('total_duration') - ): - # Print stats have been reset, do not update this job with them - pstats = {} + async def finish_job(self, status: str, pstats: Dict[str, Any]) -> None: + async with self.request_lock: + if self.current_job is None or self.current_job_id is None: + self._reset_current_job() + return + if ( + pstats.get('filename') != self.current_job.filename or + pstats.get('total_duration', 0.) < self.current_job.total_duration + ): + # Print stats have been reset, do not update this job with them + pstats = {} + self.current_job.user = self.job_user + self.current_job.finish(status, pstats) + # Regrab metadata incase metadata wasn't parsed yet due to file upload + self.grab_job_metadata() + self.current_job.set_aux_data(self.auxiliary_fields) + job_id = f"{self.current_job_id:06X}" + await self.save_job(self.current_job, self.current_job_id) + self.update_metadata(job_id) + await self._update_job_totals() + logging.debug( + f"History Job Finished - Id: {job_id}, " + f"File: {self.current_job.filename}, " + f"Status: {status}" + ) + self.send_history_event("finished") + self._reset_current_job() - self.current_job.finish(status, pstats) - # Regrab metadata incase metadata wasn't parsed yet due to file upload - self.grab_job_metadata() - self.save_current_job() - self._update_job_totals() - logging.debug( - f"History Job Finished - Id: {self.current_job_id}, " - f"File: {self.current_job.filename}, " - f"Status: {status}" - ) - self.send_history_event("finished") + def _reset_current_job(self) -> None: self.current_job = None self.current_job_id = None + self.job_user = "No User" - async def get_job(self, - job_id: Union[int, str] - ) -> Optional[Dict[str, Any]]: - if isinstance(job_id, int): - job_id = f"{job_id:06X}" - return await self.history_ns.get(job_id, None) + async def get_job( + self, job_id: Union[int, str] + ) -> Optional[Dict[str, Any]]: + if isinstance(job_id, str): + job_id = int(job_id, 16) + cursor = await self.history_table.execute( + f"SELECT * FROM {HIST_TABLE} WHERE job_id = ?", (job_id,) + ) + result = await cursor.fetchone() + return dict(result) if result is not None else result def grab_job_metadata(self) -> None: if self.current_job is None: return - filename: str = self.current_job.get("filename") + filename: str = self.current_job.filename mdst = self.file_manager.get_metadata_storage() metadata: Dict[str, Any] = mdst.get(filename, {}) - if metadata: - # Add the start time and job id to the - # persistent metadata storage - metadata.update({ - 'print_start_time': self.current_job.get('start_time'), - 'job_id': self.current_job_id - }) - mdst.insert(filename, metadata.copy()) # We don't need to store these fields in the # job metadata, as they are redundant metadata.pop('print_start_time', None) @@ -314,61 +516,108 @@ class History: thumb: Dict[str, Any] for thumb in metadata['thumbnails']: thumb.pop('data', None) - self.current_job.set("metadata", metadata) + self.current_job.metadata = metadata - def save_current_job(self) -> None: - if self.current_job is None or self.current_job_id is None: + def update_metadata(self, job_id: str) -> None: + if self.current_job is None: return - self.history_ns[self.current_job_id] = self.current_job.get_stats() + mdst = self.file_manager.get_metadata_storage() + filename: str = self.current_job.filename + metadata: Dict[str, Any] = mdst.get(filename, {}) + if metadata: + # Add the start time and job id to the + # persistent metadata storage + metadata.update({ + 'print_start_time': self.current_job.get('start_time'), + 'job_id': job_id + }) + mdst.insert(filename, metadata) - def _update_job_totals(self) -> None: + async def _update_job_totals(self) -> None: if self.current_job is None: return job = self.current_job - self.job_totals['total_jobs'] += 1 - self.job_totals['total_time'] += job.get('total_duration') - self.job_totals['total_print_time'] += job.get('print_duration') - self.job_totals['total_filament_used'] += job.get('filament_used') - self.job_totals['longest_job'] = max( - self.job_totals['longest_job'], job.get('total_duration')) - self.job_totals['longest_print'] = max( - self.job_totals['longest_print'], job.get('print_duration')) - database: DBComp = self.server.lookup_component("database") - database.insert_item( - "moonraker", "history.job_totals", self.job_totals) + self._accumulate_total("total_jobs", 1) + self._accumulate_total("total_time", job.total_duration) + self._accumulate_total("total_print_time", job.print_duration) + self._accumulate_total("total_filament_used", job.filament_used) + self._maximize_total("longest_job", job.total_duration) + self._maximize_total("longest_print", job.print_duration) + self._update_aux_totals() + totals_list = _create_totals_list(self.job_totals, self.aux_totals) + async with self.totals_table as tx: + await tx.executemany( + f"REPLACE INTO {TOTALS_TABLE} VALUES(?, ?, ?, ?, ?)", totals_list + ) + + def _accumulate_total(self, field: str, val: Union[int, float]) -> None: + self.job_totals[field] += val + + def _maximize_total(self, field: str, val: Union[int, float]) -> None: + self.job_totals[field] = max(self.job_totals[field], val) + + def _update_aux_totals(self, reset: bool = False) -> None: + last_totals = self.aux_totals + self.aux_totals = [ + field.get_totals(last_totals, reset) + for field in self.auxiliary_fields + if field.has_totals() + ] def send_history_event(self, evt_action: str) -> None: if self.current_job is None or self.current_job_id is None: return - job = self._prep_requested_job( - self.current_job.get_stats(), self.current_job_id) - self.server.send_event("history:history_changed", - {'action': evt_action, 'job': job}) + job_id = f"{self.current_job_id:06X}" + job = self._prep_requested_job(self.current_job.get_stats(), job_id) + self.server.send_event( + "history:history_changed", {'action': evt_action, 'job': job} + ) - def _prep_requested_job(self, - job: Dict[str, Any], - job_id: str - ) -> Dict[str, Any]: - job['job_id'] = job_id - job['exists'] = self.file_manager.check_file_exists( - "gcodes", job['filename']) + def _prep_requested_job( + self, job: Dict[str, Any], job_id: str + ) -> Dict[str, Any]: + fm = self.file_manager + mtime = job.get("metadata", {}).get("modified", None) + job["exists"] = fm.check_file_exists("gcodes", job['filename'], mtime) + job["job_id"] = job_id + job.pop("instance_id", None) return job - def on_exit(self) -> None: + def register_auxiliary_field(self, new_field: HistoryFieldData) -> None: + if new_field.provider == "history": + raise self.server.error("Provider name 'history' is reserved") + for field in self.auxiliary_fields: + if field == new_field: + raise self.server.error( + f"Field {field.name} already registered by " + f"provider {field.provider}." + ) + self.auxiliary_fields.append(new_field) + + def tracking_enabled(self, check_paused: bool) -> bool: + if self.current_job is None: + return False + return not self.job_paused if check_paused else True + + async def on_exit(self) -> None: + if self.current_job is None: + return jstate: JobState = self.server.lookup_component("job_state") last_ps = jstate.get_last_stats() - self.finish_job("server_exit", last_ps) + await self.finish_job("server_exit", last_ps) class PrinterJob: def __init__(self, data: Dict[str, Any] = {}) -> None: self.end_time: Optional[float] = None self.filament_used: float = 0 - self.filename: Optional[str] = None - self.metadata: Optional[Dict[str, Any]] = None + self.filename: str = "" + self.metadata: Dict[str, Any] = {} self.print_duration: float = 0. self.status: str = "in_progress" self.start_time = time.time() self.total_duration: float = 0. + self.auxiliary_data: List[Dict[str, Any]] = [] + self.user: str = "No User" self.update_from_ps(data) def finish(self, @@ -376,7 +625,7 @@ class PrinterJob: print_stats: Dict[str, Any] = {} ) -> None: self.end_time = time.time() - self.status = status + self.status = status if status is not None else "error" self.update_from_ps(print_stats) def get(self, name: str) -> Any: @@ -392,10 +641,14 @@ class PrinterJob: return setattr(self, name, val) + def set_aux_data(self, fields: List[HistoryFieldData]) -> None: + self.auxiliary_data = [field.as_dict() for field in fields] + def update_from_ps(self, data: Dict[str, Any]) -> None: for i in data: - if hasattr(self, i): + if hasattr(self, i) and data[i] is not None: setattr(self, i, data[i]) + def load_component(config: ConfigHelper) -> History: return History(config) diff --git a/moonraker/components/http_client.py b/moonraker/components/http_client.py index fb7bcc9..ef002bb 100644 --- a/moonraker/components/http_client.py +++ b/moonraker/components/http_client.py @@ -6,14 +6,15 @@ from __future__ import annotations import re -import json import time import asyncio import pathlib import tempfile import logging -from utils import ServerError -from tornado.escape import url_escape, url_unescape +import copy +from ..utils import ServerError +from ..utils import json_wrapper as jsonw +from tornado.escape import url_unescape from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError from tornado.httputil import HTTPHeaders from typing import ( @@ -27,8 +28,8 @@ from typing import ( Any ) if TYPE_CHECKING: - from moonraker import Server - from confighelper import ConfigHelper + from ..server import Server + from ..confighelper import ConfigHelper from io import BufferedWriter StrOrPath = Union[str, pathlib.Path] @@ -40,18 +41,6 @@ AsyncHTTPClient.configure( GITHUB_PREFIX = "https://api.github.com/" -def escape_query_string(qs: str) -> str: - parts = qs.split("&") - escaped: List[str] = [] - for p in parts: - item = p.split("=", 1) - key = url_escape(item[0]) - if len(item) == 2: - escaped.append(f"{key}={url_escape(item[1])}") - else: - escaped.append(key) - return "&".join(escaped) - class HttpClient: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() @@ -76,29 +65,14 @@ class HttpClient: if len(headers) == 0: raise self.server.error( "Either an Etag or Last Modified Date must be specified") - empty_resp = HttpResponse(url, 200, b"", headers, None) + empty_resp = HttpResponse(url, url, 200, b"", headers, None) self.response_cache[url] = empty_resp - def escape_url(self, url: str) -> str: - # escape the url - match = re.match(r"(https?://[^/?#]+)([^?#]+)?(\?[^#]+)?(#.+)?", url) - if match is not None: - uri, path, qs, fragment = match.groups() - if path is not None: - uri += "/".join([url_escape(p, plus=False) - for p in path.split("/")]) - if qs is not None: - uri += "?" + escape_query_string(qs[1:]) - if fragment is not None: - uri += "#" + url_escape(fragment[1:], plus=False) - url = uri - return url - async def request( self, method: str, url: str, - body: Optional[Union[str, List[Any], Dict[str, Any]]] = None, + body: Optional[Union[bytes, str, List[Any], Dict[str, Any]]] = None, headers: Optional[Dict[str, Any]] = None, connect_timeout: float = 5., request_timeout: float = 10., @@ -113,7 +87,7 @@ class HttpClient: # prepare the body if required req_headers: Dict[str, Any] = {} if isinstance(body, (list, dict)): - body = json.dumps(body) + body = jsonw.dumps(body) req_headers["Content-Type"] = "application/json" cached: Optional[HttpResponse] = None if enable_cache: @@ -160,10 +134,13 @@ class HttpClient: continue else: result = resp.body - ret = HttpResponse(url, resp.code, result, resp.headers, err) + ret = HttpResponse( + url, resp.effective_url, resp.code, result, + resp.headers, err + ) break else: - ret = HttpResponse(url, 500, b"", HTTPHeaders(), err) + ret = HttpResponse(url, url, 500, b"", HTTPHeaders(), err) if enable_cache and ret.is_cachable(): logging.debug(f"Caching HTTP Response: {url}") self.response_cache[cache_key] = ret @@ -291,18 +268,70 @@ class HttpClient: return dl.dest_file raise self.server.error(f"Retries exceeded for request: {url}") + def wrap_request(self, default_url: str, **kwargs) -> HttpRequestWrapper: + return HttpRequestWrapper(self, default_url, **kwargs) + def close(self): self.client.close() +class HttpRequestWrapper: + def __init__( + self, client: HttpClient, default_url: str, **kwargs + ) -> None: + self._do_request = client.request + self._last_response: Optional[HttpResponse] = None + self.default_request_args: Dict[str, Any] = { + "method": "GET", + "url": default_url, + } + self.default_request_args.update(kwargs) + self.request_args = copy.deepcopy(self.default_request_args) + self.reset() + + async def send(self, **kwargs) -> HttpResponse: + req_args = copy.deepcopy(self.request_args) + req_args.update(kwargs) + method = req_args.pop("method", self.default_request_args["method"]) + url = req_args.pop("url", self.default_request_args["url"]) + self._last_response = await self._do_request(method, url, **req_args) + return self._last_response + + def set_method(self, method: str) -> None: + self.request_args["method"] = method + + def set_url(self, url: str) -> None: + self.request_args["url"] = url + + def set_body( + self, body: Optional[Union[str, List[Any], Dict[str, Any]]] + ) -> None: + self.request_args["body"] = body + + def add_header(self, name: str, value: str) -> None: + headers = self.request_args.get("headers", {}) + headers[name] = value + self.request_args["headers"] = headers + + def set_headers(self, headers: Dict[str, str]) -> None: + self.request_args["headers"] = headers + + def reset(self) -> None: + self.request_args = copy.deepcopy(self.default_request_args) + + def last_response(self) -> Optional[HttpResponse]: + return self._last_response + class HttpResponse: def __init__(self, url: str, + final_url: str, code: int, result: bytes, response_headers: HTTPHeaders, error: Optional[BaseException] ) -> None: self._url = url + self._final_url = final_url self._code = code self._result: bytes = result self._encoding: str = "utf-8" @@ -312,8 +341,8 @@ class HttpResponse: self._last_modified: Optional[str] = response_headers.get( "last-modified", None) - def json(self, **kwargs) -> Union[List[Any], Dict[str, Any]]: - return json.loads(self._result, **kwargs) + def json(self) -> Union[List[Any], Dict[str, Any]]: + return jsonw.loads(self._result) def is_cachable(self) -> bool: return self._last_modified is not None or self._etag is not None @@ -353,6 +382,10 @@ class HttpResponse: def url(self) -> str: return self._url + @property + def final_url(self) -> str: + return self._final_url + @property def status_code(self) -> int: return self._code diff --git a/moonraker/components/job_queue.py b/moonraker/components/job_queue.py index c232ba9..b2304d1 100644 --- a/moonraker/components/job_queue.py +++ b/moonraker/components/job_queue.py @@ -8,6 +8,7 @@ from __future__ import annotations import asyncio import time import logging +from ..common import JobEvent, RequestType # Annotation imports from typing import ( @@ -19,8 +20,8 @@ from typing import ( Union, ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest + from ..confighelper import ConfigHelper + from ..common import WebRequest, UserInfo from .klippy_apis import KlippyAPI from .file_manager.file_manager import FileManager @@ -46,11 +47,8 @@ class JobQueue: self.server.register_event_handler( "server:klippy_shutdown", self._handle_shutdown) self.server.register_event_handler( - "job_state:complete", self._on_job_complete) - self.server.register_event_handler( - "job_state:error", self._on_job_abort) - self.server.register_event_handler( - "job_state:cancelled", self._on_job_abort) + "job_state:state_changed", self._on_job_state_changed + ) self.server.register_notification("job_queue:job_queue_changed") self.server.register_remote_method("pause_job_queue", self.pause_queue) @@ -58,14 +56,21 @@ class JobQueue: self.start_queue) self.server.register_endpoint( - "/server/job_queue/job", ['POST', 'DELETE'], - self._handle_job_request) + "/server/job_queue/job", RequestType.POST | RequestType.DELETE, + self._handle_job_request + ) self.server.register_endpoint( - "/server/job_queue/pause", ['POST'], self._handle_pause_queue) + "/server/job_queue/pause", RequestType.POST, self._handle_pause_queue + ) self.server.register_endpoint( - "/server/job_queue/start", ['POST'], self._handle_start_queue) + "/server/job_queue/start", RequestType.POST, self._handle_start_queue + ) self.server.register_endpoint( - "/server/job_queue/status", ['GET'], self._handle_queue_status) + "/server/job_queue/status", RequestType.GET, self._handle_queue_status + ) + self.server.register_endpoint( + "/server/job_queue/jump", RequestType.POST, self._handle_jump + ) async def _handle_ready(self) -> None: async with self.lock: @@ -83,10 +88,15 @@ class JobQueue: if not self.queued_jobs and self.automatic: self._set_queue_state("ready") - async def _on_job_complete(self, - prev_stats: Dict[str, Any], - new_stats: Dict[str, Any] - ) -> None: + async def _on_job_state_changed(self, job_event: JobEvent, *args) -> None: + if job_event == JobEvent.COMPLETE: + await self._on_job_complete() + elif job_event.aborted: + await self._on_job_abort() + + async def _on_job_complete(self) -> None: + if not self.automatic: + return async with self.lock: # Transition to the next job in the queue if self.queue_state == "ready" and self.queued_jobs: @@ -95,10 +105,7 @@ class JobQueue: self.pop_queue_handle = event_loop.delay_callback( self.job_delay, self._pop_job) - async def _on_job_abort(self, - prev_stats: Dict[str, Any], - new_stats: Dict[str, Any] - ) -> None: + async def _on_job_abort(self) -> None: async with self.lock: if self.queued_jobs: self._set_queue_state("paused") @@ -128,7 +135,9 @@ class JobQueue: raise self.server.error( "Queue State Changed during Transition Gcode") self._set_queue_state("starting") - await kapis.start_print(filename) + await kapis.start_print( + filename, wait_klippy_started=True, user=job.user + ) except self.server.error: logging.exception(f"Error Loading print: {filename}") self._set_queue_state("paused") @@ -157,7 +166,9 @@ class JobQueue: async def queue_job(self, filenames: Union[str, List[str]], - check_exists: bool = True + check_exists: bool = True, + reset: bool = False, + user: Optional[UserInfo] = None ) -> None: async with self.lock: # Make sure that the file exists @@ -167,8 +178,10 @@ class JobQueue: # Make sure all files exist before adding them to the queue for fname in filenames: self._check_job_file(fname) + if reset: + self.queued_jobs.clear() for fname in filenames: - queued_job = QueuedJob(fname) + queued_job = QueuedJob(fname, user) self.queued_jobs[queued_job.job_id] = queued_job self._send_queue_event(action="jobs_added") @@ -209,9 +222,12 @@ class JobQueue: self._set_queue_state("loading") event_loop = self.server.get_event_loop() self.pop_queue_handle = event_loop.delay_callback( - 0.01, self._pop_job) + 0.01, self._pop_job, False + ) else: - self._set_queue_state("ready") + qs = "ready" if self.automatic else "paused" + self._set_queue_state(qs) + def _job_map_to_list(self) -> List[Dict[str, Any]]: cur_time = time.time() return [job.as_dict(cur_time) for @@ -241,27 +257,24 @@ class JobQueue: 'queue_state': self.queue_state }) - async def _handle_job_request(self, - web_request: WebRequest - ) -> Dict[str, Any]: - action = web_request.get_action() - if action == "POST": - files: Union[List[str], str] = web_request.get('filenames') - if isinstance(files, str): - files = [f.strip() for f in files.split(',') if f.strip()] + async def _handle_job_request( + self, web_request: WebRequest + ) -> Dict[str, Any]: + req_type = web_request.get_request_type() + if req_type == RequestType.POST: + files = web_request.get_list('filenames') + reset = web_request.get_boolean("reset", False) # Validate that all files exist before queueing - await self.queue_job(files) - elif action == "DELETE": + user = web_request.get_current_user() + await self.queue_job(files, reset=reset, user=user) + elif req_type == RequestType.DELETE: if web_request.get_boolean("all", False): await self.delete_job([], all=True) else: - job_ids: Union[List[str], str] = web_request.get('job_ids') - if isinstance(job_ids, str): - job_ids = [f.strip() for f in job_ids.split(',') - if f.strip()] + job_ids = web_request.get_list('job_ids') await self.delete_job(job_ids) else: - raise self.server.error(f"Invalid action: {action}") + raise self.server.error(f"Invalid request type: {req_type}") return { 'queued_jobs': self._job_map_to_list(), 'queue_state': self.queue_state @@ -293,18 +306,37 @@ class JobQueue: 'queue_state': self.queue_state } + async def _handle_jump(self, web_request: WebRequest) -> Dict[str, Any]: + job_id: str = web_request.get("job_id") + async with self.lock: + job = self.queued_jobs.pop(job_id, None) + if job is None: + raise self.server.error(f"Invalid job id: {job_id}") + new_queue = {job_id: job} + new_queue.update(self.queued_jobs) + self.queued_jobs = new_queue + return { + 'queued_jobs': self._job_map_to_list(), + 'queue_state': self.queue_state + } + async def close(self): await self.pause_queue() class QueuedJob: - def __init__(self, filename: str) -> None: + def __init__(self, filename: str, user: Optional[UserInfo] = None) -> None: self.filename = filename self.job_id = f"{id(self):016X}" self.time_added = time.time() + self._user = user def __str__(self) -> str: return self.filename + @property + def user(self) -> Optional[UserInfo]: + return self._user + def as_dict(self, cur_time: float) -> Dict[str, Any]: return { 'filename': self.filename, diff --git a/moonraker/components/job_state.py b/moonraker/components/job_state.py index 0e313b3..098382f 100644 --- a/moonraker/components/job_state.py +++ b/moonraker/components/job_state.py @@ -15,34 +15,45 @@ from typing import ( Dict, List, ) +from ..common import JobEvent, KlippyState if TYPE_CHECKING: - from confighelper import ConfigHelper + from ..confighelper import ConfigHelper from .klippy_apis import KlippyAPI class JobState: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() self.last_print_stats: Dict[str, Any] = {} + self.last_event: JobEvent = JobEvent.STANDBY self.server.register_event_handler( - "server:klippy_started", self._handle_started) + "server:klippy_started", self._handle_started + ) self.server.register_event_handler( - "server:status_update", self._status_update) + "server:klippy_disconnect", self._handle_disconnect + ) - async def _handle_started(self, state: str) -> None: - if state != "ready": + def _handle_disconnect(self): + state = self.last_print_stats.get("state", "") + if state in ("printing", "paused"): + # set error state + self.last_print_stats["state"] = "error" + self.last_event = JobEvent.ERROR + + async def _handle_started(self, state: KlippyState) -> None: + if state != KlippyState.READY: return kapis: KlippyAPI = self.server.lookup_component('klippy_apis') sub: Dict[str, Optional[List[str]]] = {"print_stats": None} try: - result = await kapis.subscribe_objects(sub) - except self.server.error as e: - logging.info(f"Error subscribing to print_stats") + result = await kapis.subscribe_objects(sub, self._status_update) + except self.server.error: + logging.info("Error subscribing to print_stats") self.last_print_stats = result.get("print_stats", {}) if "state" in self.last_print_stats: state = self.last_print_stats["state"] logging.info(f"Job state initialized: {state}") - async def _status_update(self, data: Dict[str, Any]) -> None: + async def _status_update(self, data: Dict[str, Any], _: float) -> None: if 'print_stats' not in data: return ps = data['print_stats'] @@ -67,8 +78,24 @@ class JobState: f"Job State Changed - Prev State: {old_state}, " f"New State: {new_state}" ) + # NOTE: Individual job_state events are DEPRECATED. New modules + # should register handlers for "job_state: status_changed" and + # match against the JobEvent object provided. + self.server.send_event(f"job_state:{new_state}", prev_ps, new_ps) + self.last_event = JobEvent.from_string(new_state) self.server.send_event( - f"job_state:{new_state}", prev_ps, new_ps) + "job_state:state_changed", + self.last_event, + prev_ps, + new_ps + ) + if "info" in ps: + cur_layer: Optional[int] = ps["info"].get("current_layer") + if cur_layer is not None: + total: int = ps["info"].get("total_layer", 0) + self.server.send_event( + "job_state:layer_changed", cur_layer, total + ) self.last_print_stats.update(ps) def _check_resumed(self, @@ -84,5 +111,8 @@ class JobState: def get_last_stats(self) -> Dict[str, Any]: return dict(self.last_print_stats) + def get_last_job_event(self) -> JobEvent: + return self.last_event + def load_component(config: ConfigHelper) -> JobState: return JobState(config) diff --git a/moonraker/components/klippy_apis.py b/moonraker/components/klippy_apis.py index 59314ba..8d3855b 100644 --- a/moonraker/components/klippy_apis.py +++ b/moonraker/components/klippy_apis.py @@ -5,8 +5,12 @@ # This file may be distributed under the terms of the GNU GPLv3 license. from __future__ import annotations -from utils import SentinelClass -from websockets import WebRequest, Subscribable +import logging +from ..utils import Sentinel +from ..common import WebRequest, APITransport, RequestType +import os +import shutil +import json # Annotation imports from typing import ( @@ -18,12 +22,16 @@ from typing import ( List, TypeVar, Mapping, + Callable, + Coroutine ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest - from klippy_connection import KlippyConnection as Klippy + from ..confighelper import ConfigHelper + from ..common import UserInfo + from .klippy_connection import KlippyConnection as Klippy + from .file_manager.file_manager import FileManager Subscription = Dict[str, Optional[List[Any]]] + SubCallback = Callable[[Dict[str, Dict[str, Any]], float], Optional[Coroutine]] _T = TypeVar("_T") INFO_ENDPOINT = "info" @@ -35,31 +43,55 @@ SUBSCRIPTION_ENDPOINT = "objects/subscribe" STATUS_ENDPOINT = "objects/query" OBJ_LIST_ENDPOINT = "objects/list" REG_METHOD_ENDPOINT = "register_remote_method" -SENTINEL = SentinelClass.get_instance() -class KlippyAPI(Subscribable): +class KlippyAPI(APITransport): def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() self.klippy: Klippy = self.server.lookup_component("klippy_connection") + self.fm: FileManager = self.server.lookup_component("file_manager") + self.eventloop = self.server.get_event_loop() app_args = self.server.get_app_args() self.version = app_args.get('software_version') # Maintain a subscription for all moonraker requests, as # we do not want to overwrite them self.host_subscription: Subscription = {} + self.subscription_callbacks: List[SubCallback] = [] # Register GCode Aliases self.server.register_endpoint( - "/printer/print/pause", ['POST'], self._gcode_pause) + "/printer/print/pause", RequestType.POST, self._gcode_pause + ) self.server.register_endpoint( - "/printer/print/resume", ['POST'], self._gcode_resume) + "/printer/print/resume", RequestType.POST, self._gcode_resume + ) self.server.register_endpoint( - "/printer/print/cancel", ['POST'], self._gcode_cancel) + "/printer/print/cancel", RequestType.POST, self._gcode_cancel + ) self.server.register_endpoint( - "/printer/print/start", ['POST'], self._gcode_start_print) + "/printer/print/start", RequestType.POST, self._gcode_start_print + ) self.server.register_endpoint( - "/printer/restart", ['POST'], self._gcode_restart) + "/printer/restart", RequestType.POST, self._gcode_restart + ) self.server.register_endpoint( - "/printer/firmware_restart", ['POST'], self._gcode_firmware_restart) + "/printer/firmware_restart", RequestType.POST, self._gcode_firmware_restart + ) + self.server.register_event_handler( + "server:klippy_disconnect", self._on_klippy_disconnect + ) + self.server.register_endpoint( + "/printer/list_endpoints", RequestType.GET, self.list_endpoints + ) + self.server.register_endpoint( + "/printer/breakheater", RequestType.POST, self.breakheater + ) + self.server.register_endpoint( + "/printer/breakmacro", RequestType.POST, self.breakmacro + ) + + def _on_klippy_disconnect(self) -> None: + self.host_subscription.clear() + self.subscription_callbacks.clear() async def _gcode_pause(self, web_request: WebRequest) -> str: return await self.pause_print() @@ -72,7 +104,8 @@ class KlippyAPI(Subscribable): async def _gcode_start_print(self, web_request: WebRequest) -> str: filename: str = web_request.get_str('filename') - return await self.start_print(filename) + user = web_request.get_current_user() + return await self.start_print(filename, user=user) async def _gcode_restart(self, web_request: WebRequest) -> str: return await self.do_restart("RESTART") @@ -80,32 +113,39 @@ class KlippyAPI(Subscribable): async def _gcode_firmware_restart(self, web_request: WebRequest) -> str: return await self.do_restart("FIRMWARE_RESTART") - async def _send_klippy_request(self, - method: str, - params: Dict[str, Any], - default: Any = SENTINEL - ) -> Any: + async def _send_klippy_request( + self, + method: str, + params: Dict[str, Any], + default: Any = Sentinel.MISSING, + transport: Optional[APITransport] = None + ) -> Any: try: - result = await self.klippy.request( - WebRequest(method, params, conn=self)) + req = WebRequest(method, params, transport=transport or self) + result = await self.klippy.request(req) except self.server.error: - if isinstance(default, SentinelClass): + if default is Sentinel.MISSING: raise result = default return result async def run_gcode(self, script: str, - default: Any = SENTINEL + default: Any = Sentinel.MISSING ) -> str: params = {'script': script} result = await self._send_klippy_request( GCODE_ENDPOINT, params, default) return result - async def start_print(self, filename: str) -> str: + async def start_print( + self, + filename: str, + wait_klippy_started: bool = False, + user: Optional[UserInfo] = None + ) -> str: # WARNING: Do not call this method from within the following - # event handlers: + # event handlers when "wait_klippy_started" is set to True: # klippy_identified, klippy_started, klippy_ready, klippy_disconnect # Doing so will result in "wait_started" blocking for the specifed # timeout (default 20s) and returning False. @@ -114,38 +154,78 @@ class KlippyAPI(Subscribable): filename = filename[1:] # Escape existing double quotes in the file name filename = filename.replace("\"", "\\\"") + homedir = os.path.expanduser("~") + if os.path.split(filename)[0].split(os.path.sep)[0] != ".cache": + base_path = os.path.join(homedir, "printer_data/gcodes") + target = os.path.join(".cache", os.path.basename(filename)) + cache_path = os.path.join(base_path, ".cache") + if not os.path.exists(cache_path): + os.makedirs(cache_path) + shutil.rmtree(cache_path) + os.makedirs(cache_path) + metadata = self.fm.gcode_metadata.metadata.get(filename, None) + self.copy_file_to_cache(os.path.join(base_path, filename), os.path.join(base_path, target)) + msg = "// metadata=" + json.dumps(metadata) + self.server.send_event("server:gcode_response", msg) + filename = target script = f'SDCARD_PRINT_FILE FILENAME="{filename}"' - await self.klippy.wait_started() - return await self.run_gcode(script) + if wait_klippy_started: + await self.klippy.wait_started() + logging.info(f"Requesting Job Start, filename = {filename}") + ret = await self.run_gcode(script) + self.server.send_event("klippy_apis:job_start_complete", user) + return ret async def pause_print( - self, default: Union[SentinelClass, _T] = SENTINEL + self, default: Union[Sentinel, _T] = Sentinel.MISSING ) -> Union[_T, str]: self.server.send_event("klippy_apis:pause_requested") + logging.info("Requesting job pause...") return await self._send_klippy_request( "pause_resume/pause", {}, default) async def resume_print( - self, default: Union[SentinelClass, _T] = SENTINEL + self, default: Union[Sentinel, _T] = Sentinel.MISSING ) -> Union[_T, str]: self.server.send_event("klippy_apis:resume_requested") + logging.info("Requesting job resume...") return await self._send_klippy_request( "pause_resume/resume", {}, default) async def cancel_print( - self, default: Union[SentinelClass, _T] = SENTINEL + self, default: Union[Sentinel, _T] = Sentinel.MISSING ) -> Union[_T, str]: self.server.send_event("klippy_apis:cancel_requested") + logging.info("Requesting job cancel...") + await self._send_klippy_request( + "breakmacro", {}, default) + await self._send_klippy_request( + "breakheater", {}, default) return await self._send_klippy_request( "pause_resume/cancel", {}, default) + + async def breakheater( + self, default: Union[Sentinel, _T] = Sentinel.MISSING + ) -> Union[_T, str]: + return await self._send_klippy_request( + "breakheater", {}, default) + + async def breakmacro( + self, default: Union[Sentinel, _T] = Sentinel.MISSING + ) -> Union[_T, str]: + return await self._send_klippy_request( + "breakmacro", {}, default) - async def do_restart(self, gc: str) -> str: + async def do_restart( + self, gc: str, wait_klippy_started: bool = False + ) -> str: # WARNING: Do not call this method from within the following - # event handlers: + # event handlers when "wait_klippy_started" is set to True: # klippy_identified, klippy_started, klippy_ready, klippy_disconnect # Doing so will result in "wait_started" blocking for the specifed # timeout (default 20s) and returning False. - await self.klippy.wait_started() + if wait_klippy_started: + await self.klippy.wait_started() try: result = await self.run_gcode(gc) except self.server.error as e: @@ -156,7 +236,7 @@ class KlippyAPI(Subscribable): return result async def list_endpoints(self, - default: Union[SentinelClass, _T] = SENTINEL + default: Union[Sentinel, _T] = Sentinel.MISSING ) -> Union[_T, Dict[str, List[str]]]: return await self._send_klippy_request( LIST_EPS_ENDPOINT, {}, default) @@ -166,7 +246,7 @@ class KlippyAPI(Subscribable): async def get_klippy_info(self, send_id: bool = False, - default: Union[SentinelClass, _T] = SENTINEL + default: Union[Sentinel, _T] = Sentinel.MISSING ) -> Union[_T, Dict[str, Any]]: params = {} if send_id: @@ -175,29 +255,36 @@ class KlippyAPI(Subscribable): return await self._send_klippy_request(INFO_ENDPOINT, params, default) async def get_object_list(self, - default: Union[SentinelClass, _T] = SENTINEL + default: Union[Sentinel, _T] = Sentinel.MISSING ) -> Union[_T, List[str]]: result = await self._send_klippy_request( OBJ_LIST_ENDPOINT, {}, default) if isinstance(result, dict) and 'objects' in result: return result['objects'] - return result + if default is not Sentinel.MISSING: + return default + raise self.server.error("Invalid response received from Klippy", 500) async def query_objects(self, objects: Mapping[str, Optional[List[str]]], - default: Union[SentinelClass, _T] = SENTINEL + default: Union[Sentinel, _T] = Sentinel.MISSING ) -> Union[_T, Dict[str, Any]]: params = {'objects': objects} result = await self._send_klippy_request( STATUS_ENDPOINT, params, default) - if isinstance(result, dict) and 'status' in result: - return result['status'] - return result + if isinstance(result, dict) and "status" in result: + return result["status"] + if default is not Sentinel.MISSING: + return default + raise self.server.error("Invalid response received from Klippy", 500) - async def subscribe_objects(self, - objects: Mapping[str, Optional[List[str]]], - default: Union[SentinelClass, _T] = SENTINEL - ) -> Union[_T, Dict[str, Any]]: + async def subscribe_objects( + self, + objects: Mapping[str, Optional[List[str]]], + callback: Optional[SubCallback] = None, + default: Union[Sentinel, _T] = Sentinel.MISSING + ) -> Union[_T, Dict[str, Any]]: + # The host transport shares subscriptions amongst all components for obj, items in objects.items(): if obj in self.host_subscription: prev = self.host_subscription[obj] @@ -208,12 +295,31 @@ class KlippyAPI(Subscribable): self.host_subscription[obj] = uitems else: self.host_subscription[obj] = items - params = {'objects': self.host_subscription} + params = {"objects": dict(self.host_subscription)} + result = await self._send_klippy_request(SUBSCRIPTION_ENDPOINT, params, default) + if isinstance(result, dict) and "status" in result: + if callback is not None: + self.subscription_callbacks.append(callback) + return result["status"] + if default is not Sentinel.MISSING: + return default + raise self.server.error("Invalid response received from Klippy", 500) + + async def subscribe_from_transport( + self, + objects: Mapping[str, Optional[List[str]]], + transport: APITransport, + default: Union[Sentinel, _T] = Sentinel.MISSING, + ) -> Union[_T, Dict[str, Any]]: + params = {"objects": dict(objects)} result = await self._send_klippy_request( - SUBSCRIPTION_ENDPOINT, params, default) - if isinstance(result, dict) and 'status' in result: - return result['status'] - return result + SUBSCRIPTION_ENDPOINT, params, default, transport + ) + if isinstance(result, dict) and "status" in result: + return result["status"] + if default is not Sentinel.MISSING: + return default + raise self.server.error("Invalid response received from Klippy", 500) async def subscribe_gcode_output(self) -> str: template = {'response_template': @@ -226,11 +332,23 @@ class KlippyAPI(Subscribable): {'response_template': {"method": method_name}, 'remote_method': method_name}) - def send_status(self, - status: Dict[str, Any], - eventtime: float - ) -> None: + def send_status( + self, status: Dict[str, Any], eventtime: float + ) -> None: + for cb in self.subscription_callbacks: + self.eventloop.register_callback(cb, status, eventtime) self.server.send_event("server:status_update", status) + def copy_file_to_cache(self, origin, target): + stat = os.statvfs("/") + free_space = stat.f_frsize * stat.f_bfree + filesize = os.path.getsize(os.path.join(origin)) + if (filesize < free_space): + shutil.copy(origin, target) + else: + msg = "!! Insufficient disk space, unable to read the file." + self.server.send_event("server:gcode_response", msg) + raise self.server.error("Insufficient disk space, unable to read the file.", 500) + def load_component(config: ConfigHelper) -> KlippyAPI: return KlippyAPI(config) diff --git a/moonraker/components/klippy_connection.py b/moonraker/components/klippy_connection.py new file mode 100644 index 0000000..31b1ef8 --- /dev/null +++ b/moonraker/components/klippy_connection.py @@ -0,0 +1,816 @@ + +# KlippyConnection - manage unix socket connection to Klipper +# +# Copyright (C) 2022 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import os +import time +import logging +import getpass +import asyncio +import pathlib +from ..utils import ServerError, get_unix_peer_credentials +from ..utils import json_wrapper as jsonw +from ..common import KlippyState, RequestType + +# Annotation imports +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Optional, + Callable, + Coroutine, + Dict, + List, + Set, + Tuple, + Union +) +if TYPE_CHECKING: + from ..common import WebRequest, APITransport, BaseRemoteConnection + from ..confighelper import ConfigHelper + from .klippy_apis import KlippyAPI + from .file_manager.file_manager import FileManager + from .machine import Machine + from .job_state import JobState + from .database import MoonrakerDatabase as Database + FlexCallback = Callable[..., Optional[Coroutine]] + Subscription = Dict[str, Optional[List[str]]] + +# These endpoints are reserved for klippy/moonraker communication only and are +# not exposed via http or the websocket +RESERVED_ENDPOINTS = [ + "list_endpoints", + "gcode/subscribe_output", + "register_remote_method", +] + +# Items to exclude from the subscription cache. They never change and can be +# quite large. +CACHE_EXCLUSIONS = { + "configfile": ["config", "settings"] +} + +INIT_TIME = .25 +LOG_ATTEMPT_INTERVAL = int(2. / INIT_TIME + .5) +MAX_LOG_ATTEMPTS = 10 * LOG_ATTEMPT_INTERVAL +UNIX_BUFFER_LIMIT = 20 * 1024 * 1024 +SVC_INFO_KEY = "klippy_connection.service_info" + +class KlippyConnection: + def __init__(self, config: ConfigHelper) -> None: + self.server = config.get_server() + self.uds_address = config.getpath( + "klippy_uds_address", pathlib.Path("/tmp/klippy_uds") + ) + self.writer: Optional[asyncio.StreamWriter] = None + self.connection_mutex: asyncio.Lock = asyncio.Lock() + self.event_loop = self.server.get_event_loop() + self.log_no_access = True + # Connection State + self.connection_task: Optional[asyncio.Task] = None + self.closing: bool = False + self.subscription_lock = asyncio.Lock() + self._klippy_info: Dict[str, Any] = {} + self._klippy_identified: bool = False + self._klippy_initializing: bool = False + self._klippy_started: bool = False + self._methods_registered: bool = False + self._klipper_version: str = "" + self._missing_reqs: Set[str] = set() + self._peer_cred: Dict[str, int] = {} + self._service_info: Dict[str, Any] = {} + self.init_attempts: int = 0 + self._state: KlippyState = KlippyState.DISCONNECTED + self._state.set_message("Klippy Disconnected") + self.subscriptions: Dict[APITransport, Subscription] = {} + self.subscription_cache: Dict[str, Dict[str, Any]] = {} + # Setup remote methods accessable to Klippy. Note that all + # registered remote methods should be of the notification type, + # they do not return a response to Klippy after execution + self.pending_requests: Dict[int, KlippyRequest] = {} + self.remote_methods: Dict[str, FlexCallback] = {} + self.klippy_reg_methods: List[str] = [] + self.register_remote_method( + 'process_gcode_response', self._process_gcode_response, + need_klippy_reg=False) + self.register_remote_method( + 'process_status_update', self._process_status_update, + need_klippy_reg=False) + + @property + def klippy_apis(self) -> KlippyAPI: + return self.server.lookup_component("klippy_apis") + + @property + def state(self) -> KlippyState: + if self.is_connected() and not self._klippy_started: + return KlippyState.STARTUP + return self._state + + @property + def state_message(self) -> str: + return self._state.message + + @property + def klippy_info(self) -> Dict[str, Any]: + return self._klippy_info + + @property + def missing_requirements(self) -> List[str]: + return list(self._missing_reqs) + + @property + def peer_credentials(self) -> Dict[str, int]: + return dict(self._peer_cred) + + @property + def service_info(self) -> Dict[str, Any]: + return self._service_info + + @property + def unit_name(self) -> str: + svc_info = self._service_info + unit_name = svc_info.get("unit_name", "klipper.service") + return unit_name.split(".", 1)[0] + + async def component_init(self) -> None: + db: Database = self.server.lookup_component('database') + machine: Machine = self.server.lookup_component("machine") + self._service_info = await db.get_item("moonraker", SVC_INFO_KEY, {}) + if self._service_info: + machine.log_service_info(self._service_info) + + async def wait_connected(self) -> bool: + if ( + self.connection_task is None or + self.connection_task.done() + ): + return self.is_connected() + try: + await self.connection_task + except Exception: + pass + return self.is_connected() + + async def wait_started(self, timeout: float = 20.) -> bool: + if self.connection_task is None or not self.is_connected(): + return False + if not self.connection_task.done(): + await asyncio.wait_for( + asyncio.shield(self.connection_task), timeout=timeout) + return self.is_connected() + + async def _read_stream(self, reader: asyncio.StreamReader) -> None: + errors_remaining: int = 10 + while not reader.at_eof(): + try: + data = await reader.readuntil(b'\x03') + except (ConnectionError, asyncio.IncompleteReadError): + break + except asyncio.CancelledError: + logging.exception("Klippy Stream Read Cancelled") + raise + except Exception: + logging.exception("Klippy Stream Read Error") + errors_remaining -= 1 + if not errors_remaining or not self.is_connected(): + break + continue + errors_remaining = 10 + try: + decoded_cmd = jsonw.loads(data[:-1]) + self._process_command(decoded_cmd) + except Exception: + logging.exception( + f"Error processing Klippy Host Response: {data.decode()}") + if not self.closing: + logging.debug("Klippy Disconnection From _read_stream()") + await self.close() + + async def _write_request(self, request: KlippyRequest) -> None: + if self.writer is None or self.closing: + request.set_exception(ServerError("Klippy Host not connected", 503)) + return + data = jsonw.dumps(request.to_dict()) + b"\x03" + try: + self.writer.write(data) + await self.writer.drain() + except asyncio.CancelledError: + request.set_exception(ServerError("Klippy Write Request Cancelled", 503)) + raise + except Exception: + request.set_exception(ServerError("Klippy Write Request Error", 503)) + if not self.closing: + logging.debug("Klippy Disconnection From _write_request()") + await self.close() + + def register_remote_method(self, + method_name: str, + cb: FlexCallback, + need_klippy_reg: bool = True + ) -> None: + if method_name in self.remote_methods: + raise self.server.error( + f"Remote method ({method_name}) already registered") + if self.server.is_running(): + raise self.server.error( + f"Failed to register remote method {method_name}, " + "methods must be registered during initialization") + self.remote_methods[method_name] = cb + if need_klippy_reg: + # These methods need to be registered with Klippy + self.klippy_reg_methods.append(method_name) + + def register_method_from_agent( + self, connection: BaseRemoteConnection, method_name: str + ) -> Optional[Awaitable]: + if connection.client_data["type"] != "agent": + raise self.server.error( + "Only connections of the 'agent' type can register methods" + ) + if method_name in self.remote_methods: + raise self.server.error( + f"Remote method ({method_name}) already registered" + ) + + def _on_agent_method_received(**kwargs) -> None: + connection.call_method(method_name, kwargs) + self.remote_methods[method_name] = _on_agent_method_received + self.klippy_reg_methods.append(method_name) + if self._methods_registered and self._state != KlippyState.DISCONNECTED: + coro = self.klippy_apis.register_method(method_name) + return self.event_loop.create_task(coro) + return None + + def unregister_method(self, method_name: str): + self.remote_methods.pop(method_name, None) + try: + self.klippy_reg_methods.remove(method_name) + except ValueError: + pass + + def connect(self) -> Awaitable[bool]: + if ( + self.is_connected() or + not self.server.is_running() or + (self.connection_task is not None and + not self.connection_task.done()) + ): + # already connecting + fut = self.event_loop.create_future() + fut.set_result(self.is_connected()) + return fut + self.connection_task = self.event_loop.create_task(self._do_connect()) + return self.connection_task + + async def _do_connect(self) -> bool: + async with self.connection_mutex: + while self.writer is None: + await asyncio.sleep(INIT_TIME) + if self.closing or not self.server.is_running(): + return False + if not self.uds_address.exists(): + continue + if not os.access(str(self.uds_address), os.R_OK | os.W_OK): + if self.log_no_access: + user = getpass.getuser() + logging.info( + f"Cannot connect to Klippy, Linux user '{user}' " + "lacks permission to open Unix Domain Socket: " + f"{self.uds_address}") + self.log_no_access = False + continue + self.log_no_access = True + try: + reader, writer = await self.open_klippy_connection(True) + except asyncio.CancelledError: + raise + except Exception: + continue + logging.info("Klippy Connection Established") + self.writer = writer + if self._get_peer_credentials(writer): + await self._get_service_info(self._peer_cred["process_id"]) + self.event_loop.create_task(self._read_stream(reader)) + return await self._init_klippy_connection() + + async def open_klippy_connection( + self, primary: bool = False + ) -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]: + if not primary and not self.is_connected(): + raise ServerError("Klippy Unix Connection Not Available", 503) + return await asyncio.open_unix_connection( + str(self.uds_address), limit=UNIX_BUFFER_LIMIT) + + def _get_peer_credentials(self, writer: asyncio.StreamWriter) -> bool: + peer_cred = get_unix_peer_credentials(writer, "Klippy") + if not peer_cred: + return False + if peer_cred.get("process_id") == 1: + logging.debug("Klipper Unix Socket created via Systemd Socket Activation") + return False + self._peer_cred = peer_cred + logging.debug( + f"Klippy Connection: Received Peer Credentials: {self._peer_cred}" + ) + return True + + async def _get_service_info(self, process_id: int) -> None: + machine: Machine = self.server.lookup_component("machine") + provider = machine.get_system_provider() + svc_info = await provider.extract_service_info("klipper", process_id) + if svc_info != self._service_info: + db: Database = self.server.lookup_component('database') + db.insert_item("moonraker", SVC_INFO_KEY, svc_info) + self._service_info = svc_info + machine.log_service_info(svc_info) + + async def _init_klippy_connection(self) -> bool: + self._klippy_identified = False + self._klippy_started = False + self._klippy_initializing = True + self._methods_registered = False + self._missing_reqs.clear() + self.init_attempts = 0 + self._state = KlippyState.STARTUP + while self.server.is_running(): + await asyncio.sleep(INIT_TIME) + await self._check_ready() + if not self._klippy_initializing: + logging.debug("Klippy Connection Initialized") + return True + if not self.is_connected(): + self._klippy_initializing = False + break + else: + self.init_attempts += 1 + logging.debug("Klippy Connection Failed to Init") + return False + + async def _request_endpoints(self) -> None: + result = await self.klippy_apis.list_endpoints(default=None) + if result is None: + return + endpoints = result.get('endpoints', []) + for ep in endpoints: + if ep not in RESERVED_ENDPOINTS: + self.server.register_endpoint( + ep, RequestType.GET | RequestType.POST, self.request, + is_remote=True + ) + + async def _request_initial_subscriptions(self) -> None: + try: + await self.klippy_apis.subscribe_objects({'webhooks': None}) + except ServerError: + logging.exception("Unable to subscribe to webhooks object") + else: + logging.info("Webhooks Subscribed") + try: + await self.klippy_apis.subscribe_gcode_output() + except ServerError: + logging.exception( + "Unable to register gcode output subscription" + ) + else: + logging.info("GCode Output Subscribed") + + async def _check_ready(self) -> None: + send_id = not self._klippy_identified + result: Dict[str, Any] + try: + result = await self.klippy_apis.get_klippy_info(send_id) + except ServerError as e: + if self.init_attempts % LOG_ATTEMPT_INTERVAL == 0 and \ + self.init_attempts <= MAX_LOG_ATTEMPTS: + logging.info( + f"{e}\nKlippy info request error. This indicates that\n" + f"Klippy may have experienced an error during startup.\n" + f"Please check klippy.log for more information") + return + version = result.get("software_version", "") + if version != self._klipper_version: + self._klipper_version = version + msg = f"Klipper Version: {version}" + self.server.add_log_rollover_item("klipper_version", msg) + klipper_pid: Optional[int] = result.get("process_id") + if klipper_pid is not None: + cur_pid: Optional[int] = self._peer_cred.get("process_id") + if cur_pid is None or klipper_pid != cur_pid: + self._peer_cred = dict( + process_id=klipper_pid, + group_id=result.get("group_id", -1), + user_id=result.get("user_id", -1) + ) + await self._get_service_info(klipper_pid) + self._klippy_info = dict(result) + state_message: str = self._state.message + if "state_message" in self._klippy_info: + state_message = self._klippy_info["state_message"] + self._state.set_message(state_message) + if "state" not in result: + return + if send_id: + self._klippy_identified = True + await self.server.send_event("server:klippy_identified") + # Request initial endpoints to register info, emergency stop APIs + await self._request_endpoints() + self._state = KlippyState.from_string(result["state"], state_message) + if self._state != KlippyState.STARTUP: + await self._request_initial_subscriptions() + # Register remaining endpoints available + await self._request_endpoints() + startup_state = self._state + await self.server.send_event("server:klippy_started", startup_state) + self._klippy_started = True + if self._state != KlippyState.READY: + logging.info("\n" + self._state.message) + if ( + self._state == KlippyState.SHUTDOWN and + startup_state != KlippyState.SHUTDOWN + ): + # Klippy shutdown during startup event + self.server.send_event("server:klippy_shutdown") + else: + await self._verify_klippy_requirements() + # register methods with klippy + for method in self.klippy_reg_methods: + try: + await self.klippy_apis.register_method(method) + except ServerError: + logging.exception( + f"Unable to register method '{method}'") + self._methods_registered = True + if self._state == KlippyState.READY: + logging.info("Klippy ready") + await self.server.send_event("server:klippy_ready") + if self._state == KlippyState.SHUTDOWN: + # Klippy shutdown during ready event + self.server.send_event("server:klippy_shutdown") + else: + logging.info( + "Klippy state transition from ready during init, " + f"new state: {self._state}" + ) + self._klippy_initializing = False + + async def _verify_klippy_requirements(self) -> None: + result = await self.klippy_apis.get_object_list(default=None) + if result is None: + logging.info("Unable to retrieve Klipper Object List") + return + req_objs = set(["virtual_sdcard", "display_status", "pause_resume"]) + self._missing_reqs = req_objs - set(result) + if self._missing_reqs: + err_str = ", ".join([f"[{o}]" for o in self._missing_reqs]) + logging.info( + f"\nWarning, unable to detect the following printer " + f"objects:\n{err_str}\nPlease add the the above sections " + f"to printer.cfg for full Moonraker functionality.") + if "virtual_sdcard" not in self._missing_reqs: + # Update the gcode path + query_res = await self.klippy_apis.query_objects( + {'configfile': None}, default=None) + if query_res is None: + logging.info("Unable to set SD Card path") + else: + config = query_res.get('configfile', {}).get('config', {}) + vsd_config = config.get('virtual_sdcard', {}) + vsd_path = vsd_config.get('path', None) + if vsd_path is not None: + file_manager: FileManager = self.server.lookup_component( + 'file_manager') + file_manager.validate_gcode_path(vsd_path) + else: + logging.info( + "Configuration for [virtual_sdcard] not found," + " unable to set SD Card path") + + def _process_command(self, cmd: Dict[str, Any]) -> None: + method = cmd.get('method', None) + if method is not None: + # This is a remote method called from klippy + if method in self.remote_methods: + params = cmd.get('params', {}) + self.event_loop.register_callback( + self._execute_method, method, **params) + else: + logging.info(f"Unknown method received: {method}") + return + # This is a response to a request, process + req_id = cmd.get('id', None) + request: Optional[KlippyRequest] + request = self.pending_requests.pop(req_id, None) + if request is None: + logging.info( + f"No request matching request ID: {req_id}, " + f"response: {cmd}") + return + if 'result' in cmd: + result = cmd['result'] + if not result: + result = "ok" + request.set_result(result) + else: + err: Union[str, Dict[str, str]] + err = cmd.get('error', "Malformed Klippy Response") + if isinstance(err, dict): + err = err.get("message", "Malformed Klippy Response") + request.set_exception(ServerError(err, 400)) + + async def _execute_method(self, method_name: str, **kwargs) -> None: + try: + ret = self.remote_methods[method_name](**kwargs) + if ret is not None: + await ret + except Exception: + logging.exception(f"Error running remote method: {method_name}") + + def _process_gcode_response(self, response: str) -> None: + self.server.send_event("server:gcode_response", response) + + def _process_status_update( + self, eventtime: float, status: Dict[str, Dict[str, Any]] + ) -> None: + for field, item in status.items(): + self.subscription_cache.setdefault(field, {}).update(item) + if 'webhooks' in status: + wh: Dict[str, str] = status['webhooks'] + state_message: str = self._state.message + if "state_message" in wh: + state_message = wh["state_message"] + self._state.set_message(state_message) + # XXX - process other states (startup, ready, error, etc)? + if "state" in wh: + new_state = KlippyState.from_string(wh["state"], state_message) + if ( + new_state == KlippyState.SHUTDOWN and + not self._klippy_initializing and + self._state != KlippyState.SHUTDOWN + ): + # If the shutdown state is received during initialization + # defer the event, the init routine will handle it. + logging.info("Klippy has shutdown") + self.server.send_event("server:klippy_shutdown") + self._state = new_state + for conn, sub in self.subscriptions.items(): + conn_status: Dict[str, Any] = {} + for name, fields in sub.items(): + if name in status: + val: Dict[str, Any] = dict(status[name]) + if fields is not None: + val = {k: v for k, v in val.items() if k in fields} + if val: + conn_status[name] = val + conn.send_status(conn_status, eventtime) + + async def request(self, web_request: WebRequest) -> Any: + if not self.is_connected(): + raise ServerError("Klippy Host not connected", 503) + rpc_method = web_request.get_endpoint() + if rpc_method == "objects/subscribe": + return await self._request_subscripton(web_request) + else: + if rpc_method == "gcode/script": + script = web_request.get_str('script', "") + if script: + self.server.send_event( + "klippy_connection:gcode_received", script) + return await self._request_standard(web_request) + + async def _request_subscripton(self, web_request: WebRequest) -> Dict[str, Any]: + async with self.subscription_lock: + args = web_request.get_args() + conn = web_request.get_subscribable() + if conn is None: + raise self.server.error( + "No connection associated with subscription request" + ) + requested_sub: Subscription = args.get('objects', {}) + all_subs: Subscription = dict(requested_sub) + # Build the subscription request from a superset of all client subscriptions + for sub in self.subscriptions.values(): + for obj, items in sub.items(): + if obj in all_subs: + prev_items = all_subs[obj] + if items is None or prev_items is None: + all_subs[obj] = None + else: + uitems = list(set(prev_items) | set(items)) + all_subs[obj] = uitems + else: + all_subs[obj] = items + args['objects'] = all_subs + args['response_template'] = {'method': "process_status_update"} + + result = await self._request_standard(web_request, 20.0) + + # prune the status response + pruned_status: Dict[str, Dict[str, Any]] = {} + status_diff: Dict[str, Dict[str, Any]] = {} + all_status: Dict[str, Dict[str, Any]] = result['status'] + for obj, fields in all_status.items(): + # Diff the current cache, then update the cache + if obj in self.subscription_cache: + cached_status = self.subscription_cache[obj] + for field_name, value in fields.items(): + if field_name not in cached_status: + continue + if value != cached_status[field_name]: + status_diff.setdefault(obj, {})[field_name] = value + if obj in CACHE_EXCLUSIONS: + # Make a shallow copy so we can pop off fields we want to + # exclude from the cache without modifying the return value + fields_to_cache = dict(fields) + removed: List[str] = [] + for excluded_field in CACHE_EXCLUSIONS[obj]: + if excluded_field in fields_to_cache: + removed.append(excluded_field) + del fields_to_cache[excluded_field] + if removed: + logging.debug( + "Removed excluded fields from subscription cache: " + f"{obj}: {removed}" + ) + self.subscription_cache[obj] = fields_to_cache + else: + self.subscription_cache[obj] = fields + # Prune Response + if obj in requested_sub: + valid_fields = requested_sub[obj] + if valid_fields is None: + pruned_status[obj] = fields + else: + pruned_status[obj] = { + k: v for k, v in fields.items() if k in valid_fields + } + if status_diff: + # The response to the status request contains changed data, so it + # is necessary to manually push the status update to existing + # subscribers + logging.debug( + f"Detected status difference during subscription: {status_diff}" + ) + self._process_status_update(result["eventtime"], status_diff) + for obj_name in list(self.subscription_cache.keys()): + # Prune the cache to match the current status response + if obj_name not in all_status: + del self.subscription_cache[obj_name] + result['status'] = pruned_status + self.subscriptions[conn] = requested_sub + return result + + async def _request_standard( + self, web_request: WebRequest, timeout: Optional[float] = None + ) -> Any: + rpc_method = web_request.get_endpoint() + args = web_request.get_args() + # Create a base klippy request + base_request = KlippyRequest(rpc_method, args) + self.pending_requests[base_request.id] = base_request + self.event_loop.register_callback(self._write_request, base_request) + try: + return await base_request.wait(timeout) + finally: + self.pending_requests.pop(base_request.id, None) + + def remove_subscription(self, conn: APITransport) -> None: + self.subscriptions.pop(conn, None) + + def is_connected(self) -> bool: + return self.writer is not None and not self.closing + + def is_ready(self) -> bool: + return self._state == KlippyState.READY + + def is_printing(self) -> bool: + if not self.is_ready(): + return False + job_state: JobState = self.server.lookup_component("job_state") + stats = job_state.get_last_stats() + return stats.get("state", "") == "printing" + + def get_subscription_cache(self) -> Dict[str, Dict[str, Any]]: + return self.subscription_cache + + async def rollover_log(self) -> None: + if "unit_name" not in self._service_info: + raise self.server.error( + "Unable to detect Klipper Service, cannot perform " + "manual rollover" + ) + logfile: Optional[str] = self._klippy_info.get("log_file", None) + if logfile is None: + raise self.server.error( + "Unable to detect path to Klipper log file" + ) + if self.is_printing(): + raise self.server.error("Cannot rollover log while printing") + logpath = pathlib.Path(logfile).expanduser().resolve() + if not logpath.is_file(): + raise self.server.error( + f"No file at {logpath} exists, cannot perform rollover" + ) + machine: Machine = self.server.lookup_component("machine") + await machine.do_service_action("stop", self.unit_name) + suffix = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()) + new_path = pathlib.Path(f"{logpath}.{suffix}") + + def _do_file_op() -> None: + if new_path.exists(): + new_path.unlink() + logpath.rename(new_path) + + await self.event_loop.run_in_thread(_do_file_op) + await machine.do_service_action("start", self.unit_name) + + async def _on_connection_closed(self) -> None: + self._klippy_identified = False + self._klippy_initializing = False + self._klippy_started = False + self._methods_registered = False + self._state = KlippyState.DISCONNECTED + self._state.set_message("Klippy Disconnected") + for request in self.pending_requests.values(): + request.set_exception(ServerError("Klippy Disconnected", 503)) + self.pending_requests = {} + self.subscriptions = {} + self.subscription_cache.clear() + self._peer_cred = {} + self._missing_reqs.clear() + logging.info("Klippy Connection Removed") + await self.server.send_event("server:klippy_disconnect") + if self.server.is_running(): + # Reconnect if server is running + loop = self.event_loop + self.connection_task = loop.create_task(self._do_connect()) + + async def close(self, wait_closed: bool = False) -> None: + if self.closing: + if wait_closed: + await self.connection_mutex.acquire() + self.connection_mutex.release() + return + self.closing = True + if ( + self.connection_task is not None and + not self.connection_task.done() + ): + self.connection_task.cancel() + async with self.connection_mutex: + if self.writer is not None: + try: + self.writer.close() + await self.writer.wait_closed() + except Exception: + logging.exception("Error closing Klippy Unix Socket") + self.writer = None + await self._on_connection_closed() + self.closing = False + +# Basic KlippyRequest class, easily converted to dict for json encoding +class KlippyRequest: + def __init__(self, rpc_method: str, params: Dict[str, Any]) -> None: + self.id = id(self) + self.rpc_method = rpc_method + self.params = params + self._fut = asyncio.get_running_loop().create_future() + + async def wait(self, timeout: Optional[float] = None) -> Any: + start_time = time.time() + to = timeout or 60. + while True: + try: + return await asyncio.wait_for(asyncio.shield(self._fut), to) + except asyncio.TimeoutError: + if timeout is not None: + self._fut.cancel() + raise ServerError("Klippy request timed out", 500) from None + pending_time = time.time() - start_time + logging.info( + f"Request '{self.rpc_method}' pending: " + f"{pending_time:.2f} seconds" + ) + + def set_exception(self, exc: Exception) -> None: + if not self._fut.done(): + self._fut.set_exception(exc) + + def set_result(self, result: Any) -> None: + if not self._fut.done(): + self._fut.set_result(result) + + def to_dict(self) -> Dict[str, Any]: + return { + 'id': self.id, + 'method': self.rpc_method, + 'params': self.params + } + +def load_component(config: ConfigHelper) -> KlippyConnection: + return KlippyConnection(config) diff --git a/moonraker/components/ldap.py b/moonraker/components/ldap.py index 77d72dc..cbc938e 100644 --- a/moonraker/components/ldap.py +++ b/moonraker/components/ldap.py @@ -18,7 +18,7 @@ from typing import ( ) if TYPE_CHECKING: - from confighelper import ConfigHelper + from ..confighelper import ConfigHelper from ldap3.abstract.entry import Entry class MoonrakerLDAP: @@ -46,6 +46,15 @@ class MoonrakerLDAP: "required when 'bind_dn' is provided" ) self.bind_password = bind_pass_template.render() + self.user_filter: Optional[str] = None + user_filter_template = config.gettemplate('user_filter', None) + if user_filter_template is not None: + self.user_filter = user_filter_template.render() + if "USERNAME" not in self.user_filter: + raise config.error( + "Section [ldap]: Option 'user_filter' is " + "is missing required token USERNAME" + ) self.lock = asyncio.Lock() async def authenticate_ldap_user(self, username, password) -> None: @@ -67,6 +76,8 @@ class MoonrakerLDAP: } attr_name = "sAMAccountName" if self.active_directory else "uid" ldfilt = f"(&(objectClass=Person)({attr_name}={username}))" + if self.user_filter: + ldfilt = self.user_filter.replace("USERNAME", username) try: with ldap3.Connection(server, **conn_args) as conn: ret = conn.search( diff --git a/moonraker/components/machine.py b/moonraker/components/machine.py index 977e3d6..4556b4d 100644 --- a/moonraker/components/machine.py +++ b/moonraker/components/machine.py @@ -8,40 +8,54 @@ from __future__ import annotations import sys import os import re -import json import pathlib import logging import asyncio import platform import socket import ipaddress +import time +import shutil import distro +import tempfile +import getpass +import configparser +from ..confighelper import FileSourceWrapper +from ..utils import source_info, cansocket, sysfs_devs, load_system_module +from ..utils import json_wrapper as jsonw +from ..common import RequestType # Annotation imports from typing import ( TYPE_CHECKING, Any, + Awaitable, Callable, Dict, List, Optional, - Tuple + Tuple, + Union, + cast ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest + from ..confighelper import ConfigHelper + from ..common import WebRequest + from .application import MoonrakerApp + from .klippy_connection import KlippyConnection + from .http_client import HttpClient from .shell_command import ShellCommandFactory as SCMDComp + from .database import MoonrakerDatabase + from .file_manager.file_manager import FileManager + from .announcements import Announcements from .proc_stats import ProcStats from .dbus_manager import DbusManager - from dbus_next.aio import ProxyInterface - from dbus_next import Variant + from dbus_next.aio.proxy_object import ProxyInterface + from dbus_next.signature import Variant + SudoReturn = Union[Awaitable[Tuple[str, bool]], Tuple[str, bool]] + SudoCallback = Callable[[], SudoReturn] -ALLOWED_SERVICES = [ - "moonraker", "klipper", "webcamd", "MoonCord", - "KlipperScreen", "moonraker-telegram-bot", - "sonar", "crowsnest" -] CGROUP_PATH = "/proc/1/cgroup" SCHED_PATH = "/proc/1/sched" SYSTEMD_PATH = "/etc/systemd/system" @@ -54,63 +68,100 @@ SD_MFGRS = { } IP_FAMILIES = {'inet': 'ipv4', 'inet6': 'ipv6'} NETWORK_UPDATE_SEQUENCE = 10 +SERVICE_PROPERTIES = [ + "Requires", "After", "SupplementaryGroups", "EnvironmentFiles", + "ExecStart", "WorkingDirectory", "FragmentPath", "Description", + "User" +] +USB_IDS_URL = "http://www.linux-usb.org/usb.ids" class Machine: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() + self._allowed_services: List[str] = [] + self._init_allowed_services() dist_info: Dict[str, Any] dist_info = {'name': distro.name(pretty=True)} dist_info.update(distro.info()) dist_info['release_info'] = distro.distro_release_info() + dist_info['kernel_version'] = platform.release() self.inside_container = False + self.moonraker_service_info: Dict[str, Any] = {} + self.sudo_req_lock = asyncio.Lock() + self.periph_lock = asyncio.Lock() + self._sudo_password: Optional[str] = None + sudo_template = config.gettemplate("sudo_password", None) + if sudo_template is not None: + self._sudo_password = sudo_template.render() + self._public_ip = "" self.system_info: Dict[str, Any] = { 'python': { - "version": sys.version_info, + "version": tuple(sys.version_info), "version_string": sys.version.replace("\n", " ") }, 'cpu_info': self._get_cpu_info(), 'sd_info': self._get_sdcard_info(), 'distribution': dist_info, - 'virtualization': self._check_inside_container() + 'virtualization': self._check_inside_container(), + 'network': {}, + 'canbus': {} } self._update_log_rollover(log=True) providers: Dict[str, type] = { "none": BaseProvider, "systemd_cli": SystemdCliProvider, - "systemd_dbus": SystemdDbusProvider + "systemd_dbus": SystemdDbusProvider, + "supervisord_cli": SupervisordCliProvider } - ptype = config.get('provider', 'systemd_dbus') - pclass = providers.get(ptype) + self.provider_type = config.get('provider', 'systemd_dbus') + pclass = providers.get(self.provider_type) if pclass is None: - raise config.error(f"Invalid Provider: {ptype}") + raise config.error(f"Invalid Provider: {self.provider_type}") self.sys_provider: BaseProvider = pclass(config) - logging.info(f"Using System Provider: {ptype}") + self.system_info["provider"] = self.provider_type + logging.info(f"Using System Provider: {self.provider_type}") + self.validator = InstallValidator(config) + self.sudo_requests: List[Tuple[SudoCallback, str]] = [] self.server.register_endpoint( - "/machine/reboot", ['POST'], self._handle_machine_request) + "/machine/reboot", RequestType.POST, self._handle_machine_request + ) self.server.register_endpoint( - "/machine/shutdown", ['POST'], self._handle_machine_request) + "/machine/shutdown", RequestType.POST, self._handle_machine_request + ) self.server.register_endpoint( - "/machine/services/restart", ['POST'], - self._handle_service_request) + "/machine/services/restart", RequestType.POST, self._handle_service_request + ) self.server.register_endpoint( - "/machine/services/stop", ['POST'], - self._handle_service_request) + "/machine/services/stop", RequestType.POST, self._handle_service_request + ) self.server.register_endpoint( - "/machine/services/start", ['POST'], - self._handle_service_request) + "/machine/services/start", RequestType.POST, self._handle_service_request + ) self.server.register_endpoint( - "/machine/system_info", ['GET'], - self._handle_sysinfo_request) + "/machine/system_info", RequestType.GET, self._handle_sysinfo_request + ) self.server.register_endpoint( - "/machine/system_info", ['POST'], - self._handle_sysinfo_request) - # self.server.register_endpoint( - # "/machine/dev_name", ['GET'], - # self._handle_devname_request) - + "/machine/sudo/info", RequestType.GET, self._handle_sudo_info + ) + self.server.register_endpoint( + "/machine/sudo/password", RequestType.POST, self._set_sudo_password + ) + self.server.register_endpoint( + "/machine/peripherals/serial", RequestType.GET, self._handle_serial_request + ) + self.server.register_endpoint( + "/machine/peripherals/usb", RequestType.GET, self._handle_usb_request + ) + self.server.register_endpoint( + "/machine/peripherals/canbus", RequestType.GET, self._handle_can_query + ) + self.server.register_endpoint( + "/machine/peripherals/video", RequestType.GET, self._handle_video_request + ) self.server.register_notification("machine:service_state_changed") + self.server.register_notification("machine:sudo_alert") # Register remote methods self.server.register_remote_method( @@ -121,31 +172,110 @@ class Machine: # IP network shell commands shell_cmd: SCMDComp = self.server.load_component( config, 'shell_command') - self.addr_cmd = shell_cmd.build_shell_command("ip -json address") + self.addr_cmd = shell_cmd.build_shell_command("ip -json -det address") iwgetbin = "/sbin/iwgetid" if not pathlib.Path(iwgetbin).exists(): iwgetbin = "iwgetid" self.iwgetid_cmd = shell_cmd.build_shell_command(iwgetbin) self.init_evt = asyncio.Event() + self.libcam = self._try_import_libcamera() + + def _init_allowed_services(self) -> None: + app_args = self.server.get_app_args() + data_path = app_args["data_path"] + fpath = pathlib.Path(data_path).joinpath("moonraker.asvc") + fm: FileManager = self.server.lookup_component("file_manager") + fm.add_reserved_path("allowed_services", fpath, False) + default_svcs = source_info.read_asset("default_allowed_services") or "" + try: + if not fpath.exists(): + fpath.write_text(default_svcs) + data = fpath.read_text() + except Exception: + logging.exception("Failed to read moonraker.asvc") + data = default_svcs + svcs = [svc.strip() for svc in data.split("\n") if svc.strip()] + for svc in svcs: + if svc.endswith(".service"): + svc = svc.rsplit(".", 1)[0] + if svc not in self._allowed_services: + self._allowed_services.append(svc) def _update_log_rollover(self, log: bool = False) -> None: sys_info_msg = "\nSystem Info:" for header, info in self.system_info.items(): sys_info_msg += f"\n\n***{header}***" if not isinstance(info, dict): - sys_info_msg += f"\n {repr(info)}" + sys_info_msg += f"\n {repr(info)}" else: for key, val in info.items(): sys_info_msg += f"\n {key}: {val}" + sys_info_msg += "\n\n***Allowed Services***" + for svc in self._allowed_services: + sys_info_msg += f"\n {svc}" self.server.add_log_rollover_item('system_info', sys_info_msg, log=log) - async def wait_for_init(self, timeout: float = None) -> None: + def _try_import_libcamera(self) -> Any: + try: + libcam = load_system_module("libcamera") + cmgr = libcam.CameraManager.singleton() + self.server.add_log_rollover_item( + "libcamera", + f"Found libcamera Python module, version: {cmgr.version}" + ) + return libcam + except Exception: + if self.server.is_verbose_enabled(): + logging.exception("Failed to import libcamera") + self.server.add_log_rollover_item( + "libcamera", "Module libcamera unavailble, import failed" + ) + return None + + @property + def public_ip(self) -> str: + return self._public_ip + + @property + def unit_name(self) -> str: + svc_info = self.moonraker_service_info + unit_name = svc_info.get("unit_name", "moonraker.service") + return unit_name.split(".", 1)[0] + + def is_service_allowed(self, service: str) -> bool: + return ( + service in self._allowed_services or + re.match(r"moonraker[_-]?\d*", service) is not None or + re.match(r"klipper[_-]?\d*", service) is not None + ) + + def validation_enabled(self) -> bool: + return self.validator.validation_enabled + + def get_system_provider(self): + return self.sys_provider + + def is_inside_container(self): + return self.inside_container + + def get_provider_type(self): + return self.provider_type + + def get_moonraker_service_info(self): + return dict(self.moonraker_service_info) + + async def wait_for_init( + self, timeout: Optional[float] = None + ) -> None: try: await asyncio.wait_for(self.init_evt.wait(), timeout) except asyncio.TimeoutError: pass - async def component_init(self): + async def component_init(self) -> None: + eventloop = self.server.get_event_loop() + eventloop.create_task(self.update_usb_ids()) + await self.validator.validation_init() await self.sys_provider.initialize() if not self.inside_container: virt_info = await self.sys_provider.check_virt_status() @@ -157,12 +287,24 @@ class Machine: avail_list = list(available_svcs.keys()) self.system_info['available_services'] = avail_list self.system_info['service_state'] = available_svcs + svc_info = await self.sys_provider.extract_service_info( + "moonraker", os.getpid() + ) + self.moonraker_service_info = svc_info + self.log_service_info(svc_info) self.init_evt.set() + async def validate_installation(self) -> bool: + return await self.validator.perform_validation() + + async def on_exit(self) -> None: + await self.validator.remove_announcement() + async def _handle_machine_request(self, web_request: WebRequest) -> str: ep = web_request.get_endpoint() if self.inside_container: - virt_id = self.system_info['virtualization'].get('virt_id', "none") + virt_id = self.system_info['virtualization'].get( + 'virt_identifier', "none") raise self.server.error( f"Cannot {ep.split('/')[-1]} from within a " f"{virt_id} container") @@ -180,19 +322,26 @@ class Machine: ) -> None: await self.sys_provider.do_service_action(action, service_name) + def restart_moonraker_service(self): + async def wrapper(): + try: + await self.do_service_action("restart", self.unit_name) + except Exception: + pass + self.server.get_event_loop().create_task(wrapper()) + async def _handle_service_request(self, web_request: WebRequest) -> str: - name: str = web_request.get('service') + name: str = web_request.get_str('service') action = web_request.get_endpoint().split('/')[-1] - if name == "moonraker": + if name == self.unit_name: if action != "restart": raise self.server.error( f"Service action '{action}' not available for moonraker") - event_loop = self.server.get_event_loop() - event_loop.register_callback(self.do_service_action, action, name) + self.restart_moonraker_service() elif self.sys_provider.is_service_available(name): await self.do_service_action(action, name) else: - if name in ALLOWED_SERVICES: + if name in self._allowed_services: raise self.server.error(f"Service '{name}' not installed") raise self.server.error( f"Service '{name}' not allowed") @@ -201,24 +350,173 @@ class Machine: async def _handle_sysinfo_request(self, web_request: WebRequest ) -> Dict[str, Any]: - # with open('../../../../../root/www/dev_info.txt', 'r') as f: + kconn: KlippyConnection + kconn = self.server.lookup_component("klippy_connection") + sys_info = self.system_info.copy() + sys_info["instance_ids"] = { + "moonraker": self.unit_name, + "klipper": kconn.unit_name + } + # Used for Qidi Slicer searching device dev_name = web_request.get_str('dev_name',default=None) if dev_name !=None: - Note=open('dev_info.txt',mode='w') + Note=open('/dev_info.txt',mode='w') Note.write(dev_name) Note.close() - # path=os.path.abspath('.') - with open('dev_info.txt', 'r') as f: + with open('/dev_info.txt', 'r') as f: content = f.read() f.close() - self.system_info["machine_name"] = content - return {'system_info': self.system_info} + self.system_info["machine_name"] = content + return {"system_info": sys_info} + async def _set_sudo_password( + self, web_request: WebRequest + ) -> Dict[str, Any]: + async with self.sudo_req_lock: + self._sudo_password = web_request.get_str("password") + if not await self.check_sudo_access(): + self._sudo_password = None + raise self.server.error( + "Invalid password, sudo access was denied" + ) + sudo_responses = ["Sudo password successfully set."] + restart: bool = False + failed: List[Tuple[SudoCallback, str]] = [] + failed_msgs: List[str] = [] + if self.sudo_requests: + while self.sudo_requests: + cb, msg = self.sudo_requests.pop(0) + try: + ret = cb() + if isinstance(ret, Awaitable): + ret = await ret + msg, need_restart = ret + sudo_responses.append(msg) + restart |= need_restart + except asyncio.CancelledError: + raise + except Exception as e: + failed.append((cb, msg)) + failed_msgs.append(str(e)) + restart = False if len(failed) > 0 else restart + self.sudo_requests = failed + if not restart and len(sudo_responses) > 1: + # at least one successful response and not restarting + eventloop = self.server.get_event_loop() + eventloop.delay_callback( + .05, self.server.send_event, + "machine:sudo_alert", + { + "sudo_requested": self.sudo_requested, + "request_messages": self.sudo_request_messages + } + ) + if failed_msgs: + err_msg = "\n".join(failed_msgs) + raise self.server.error(err_msg, 500) + if restart: + self.restart_moonraker_service() + sudo_responses.append( + "Moonraker is currently in the process of restarting." + ) + return { + "sudo_responses": sudo_responses, + "is_restarting": restart + } + async def _handle_sudo_info( + self, web_request: WebRequest + ) -> Dict[str, Any]: + check_access = web_request.get_boolean("check_access", False) + has_sudo: Optional[bool] = None + if check_access: + has_sudo = await self.check_sudo_access() + return { + "sudo_access": has_sudo, + "linux_user": self.linux_user, + "sudo_requested": self.sudo_requested, + "request_messages": self.sudo_request_messages + } + + async def _handle_serial_request(self, web_request: WebRequest) -> Dict[str, Any]: + return { + "serial_devices": await self.detect_serial_devices() + } + + async def _handle_usb_request(self, web_request: WebRequest) -> Dict[str, Any]: + return { + "usb_devices": await self.detect_usb_devices() + } + + async def _handle_can_query(self, web_request: WebRequest) -> Dict[str, Any]: + interface = web_request.get_str("interface", "can0") + return { + "can_uuids": await self.query_can_uuids(interface) + } + + async def _handle_video_request(self, web_request: WebRequest) -> Dict[str, Any]: + return await self.detect_video_devices() def get_system_info(self) -> Dict[str, Any]: return self.system_info + @property + def sudo_password(self) -> Optional[str]: + return self._sudo_password + + @sudo_password.setter + def sudo_password(self, pwd: Optional[str]) -> None: + self._sudo_password = pwd + + @property + def sudo_requested(self) -> bool: + return len(self.sudo_requests) > 0 + + @property + def linux_user(self) -> str: + return getpass.getuser() + + @property + def sudo_request_messages(self) -> List[str]: + return [req[1] for req in self.sudo_requests] + + def register_sudo_request( + self, callback: SudoCallback, message: str + ) -> None: + self.sudo_requests.append((callback, message)) + self.server.send_event( + "machine:sudo_alert", + { + "sudo_requested": True, + "request_messages": self.sudo_request_messages + } + ) + + async def check_sudo_access(self, cmds: List[str] = []) -> bool: + if not cmds: + cmds = ["systemctl --version", "ls /root"] + shell_cmd: SCMDComp = self.server.lookup_component("shell_command") + for cmd in cmds: + try: + await self.exec_sudo_command(cmd, timeout=10.) + except shell_cmd.error: + return False + return True + + async def exec_sudo_command( + self, command: str, tries: int = 1, timeout=2. + ) -> str: + proc_input = None + full_cmd = f"sudo {command}" + if self._sudo_password is not None: + proc_input = self._sudo_password + full_cmd = f"sudo -S {command}" + shell_cmd: SCMDComp = self.server.lookup_component("shell_command") + return await shell_cmd.exec_cmd( + full_cmd, proc_input=proc_input, log_complete=False, attempts=tries, + timeout=timeout + ) + def _get_sdcard_info(self) -> Dict[str, Any]: sd_info: Dict[str, Any] = {} cid_file = pathlib.Path(SD_CID_PATH) @@ -339,6 +637,9 @@ class Machine: self.inside_container = True virt_type = "container" virt_id = ct + logging.info( + f"Container detected via cgroup: {ct}" + ) break except Exception: logging.exception(f"Error reading {CGROUP_PATH}") @@ -359,6 +660,9 @@ class Machine: os.path.exists("/.dockerinit") ): virt_id = "docker" + logging.info( + f"Container detected via sched: {virt_id}" + ) except Exception: logging.exception(f"Error reading {SCHED_PATH}") return { @@ -373,39 +677,55 @@ class Machine: if sequence % NETWORK_UPDATE_SEQUENCE: return network: Dict[str, Any] = {} + canbus: Dict[str, Any] = {} try: # get network interfaces resp = await self.addr_cmd.run_with_response(log_complete=False) - decoded = json.loads(resp) + decoded: List[Dict[str, Any]] = jsonw.loads(resp) for interface in decoded: - if ( - interface['operstate'] != "UP" or - interface['link_type'] != "ether" or - 'address' not in interface - ): + if interface['operstate'] != "UP": continue - addresses: List[Dict[str, Any]] = [ - { - 'family': IP_FAMILIES[addr['family']], - 'address': addr['local'], - 'is_link_local': addr.get('scope', "") == "link" + if interface['link_type'] == "can": + infodata: dict = interface.get( + "linkinfo", {}).get("info_data", {}) + canbus[interface['ifname']] = { + 'tx_queue_len': interface['txqlen'], + 'bitrate': infodata.get("bittiming", {}).get( + "bitrate", -1 + ), + 'driver': infodata.get("bittiming_const", {}).get( + "name", "unknown" + ) + } + elif ( + interface['link_type'] == "ether" and + 'address' in interface + ): + addresses: List[Dict[str, Any]] = [ + { + 'family': IP_FAMILIES[addr['family']], + 'address': addr['local'], + 'is_link_local': addr.get('scope', "") == "link" + } + for addr in interface.get('addr_info', []) + if 'family' in addr and 'local' in addr + ] + if not addresses: + continue + network[interface['ifname']] = { + 'mac_address': interface['address'], + 'ip_addresses': addresses } - for addr in interface.get('addr_info', []) - if 'family' in addr and 'local' in addr - ] - if not addresses: - continue - network[interface['ifname']] = { - 'mac_address': interface['address'], - 'ip_addresses': addresses - } except Exception: logging.exception("Error processing network update") return prev_network = self.system_info.get('network', {}) - if notify and network != prev_network: - self.server.send_event("machine:net_state_changed", network) + if network != prev_network: + self._find_public_ip() + if notify: + self.server.send_event("machine:net_state_changed", network) self.system_info['network'] = network + self.system_info['canbus'] = canbus async def get_public_network(self) -> Dict[str, Any]: wifis = await self._get_wifi_interfaces() @@ -429,7 +749,7 @@ class Machine: continue fam = addrinfo["family"] addr = addrinfo["address"] - if fam == "ipv6" and src_ip is None: + if fam == "ipv6" and not src_ip: ip = ipaddress.ip_address(addr) if ip.is_global: return { @@ -449,7 +769,7 @@ class Machine: "family": "" } - def _find_public_ip(self) -> Optional[str]: + def _find_public_ip(self) -> str: # Check for an IPv4 Source IP # NOTE: It should also be possible to extract this from # the routing table, ie: ip -json route @@ -457,16 +777,29 @@ class Machine: # metric. Might also be able to get IPv6 info from this. # However, it would be better to use NETLINK for this rather # than run another shell command - src_ip: Optional[str] = None - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - try: - s.settimeout(0) - s.connect(('10.255.255.255', 1)) - src_ip = s.getsockname()[0] - except Exception: - pass - finally: - s.close() + src_ip: str = "" + # First attempt: use "broadcast" to find the local IP + addr_info = [ + ("", 0, socket.AF_INET), + ("10.255.255.255", 1, socket.AF_INET), + ("2001:db8::1234", 1, socket.AF_INET6), + ] + for (addr, port, fam) in addr_info: + s = socket.socket(fam, socket.SOCK_DGRAM | socket.SOCK_NONBLOCK) + try: + if addr == "": + s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + s.connect((addr, port)) + src_ip = s.getsockname()[0] + except Exception: + continue + logging.info(f"Detected Local IP: {src_ip}") + break + if src_ip != self._public_ip: + self._public_ip = src_ip + self.server.send_event("machine:public_ip_changed", src_ip) + if not src_ip: + logging.info("Failed to detect local IP address") return src_ip async def _get_wifi_interfaces(self) -> Dict[str, Any]: @@ -476,7 +809,7 @@ class Machine: try: resp = await self.iwgetid_cmd.run_with_response(log_complete=False) except shell_cmd.error: - logging.exception("Failed to run 'iwgetid' command") + logging.info("Failed to run 'iwgetid' command") return {} if resp: for line in resp.split("\n"): @@ -484,9 +817,139 @@ class Machine: wifi_intfs[parts[0]] = parts[1].split(":")[-1].strip('"') return wifi_intfs + def log_service_info(self, svc_info: Dict[str, Any]) -> None: + if not svc_info: + return + name = svc_info.get("unit_name", "unknown") + manager = svc_info.get("manager", "systemd").capitalize() + msg = f"\n{manager} unit {name}:" + for key, val in svc_info.items(): + if key == "properties": + msg += "\nProperties:" + for prop_key, prop in val.items(): + msg += f"\n**{prop_key}={prop}" + else: + msg += f"\n{key}: {val}" + self.server.add_log_rollover_item(name, msg) + + async def update_usb_ids(self, force: bool = False) -> None: + async with self.periph_lock: + db: MoonrakerDatabase = self.server.lookup_component("database") + client: HttpClient = self.server.lookup_component("http_client") + dpath = pathlib.Path(self.server.get_app_arg("data_path")) + usb_ids_path = pathlib.Path(dpath).joinpath("misc/usb.ids") + if usb_ids_path.is_file() and not force: + return + usb_id_req_info: Dict[str, str] + usb_id_req_info = await db.get_item("moonraker", "usb_id_req_info", {}) + etag: Optional[str] = usb_id_req_info.pop("etag", None) + last_modified: Optional[str] = usb_id_req_info.pop("last_modified", None) + headers = {"Accept": "text/plain"} + if etag is not None and usb_ids_path.is_file(): + headers["If-None-Match"] = etag + if last_modified is not None and usb_ids_path.is_file(): + headers["If-Modified-Since"] = last_modified + logging.info("Fetching latest usb.ids file...") + resp = await client.get( + USB_IDS_URL, headers, enable_cache=False + ) + if resp.has_error(): + logging.info("Failed to retrieve usb.ids file") + return + if resp.status_code == 304: + logging.info("USB IDs file up to date") + return + # Save etag and modified headers + if resp.etag is not None: + usb_id_req_info["etag"] = resp.etag + if resp.last_modified is not None: + usb_id_req_info["last_modifed"] = resp.last_modified + await db.insert_item("moonraker", "usb_id_req_info", usb_id_req_info) + # Write file + logging.info("Writing usb.ids file...") + eventloop = self.server.get_event_loop() + await eventloop.run_in_thread(usb_ids_path.write_bytes, resp.content) + + async def detect_serial_devices(self) -> List[Dict[str, Any]]: + async with self.periph_lock: + eventloop = self.server.get_event_loop() + return await eventloop.run_in_thread(sysfs_devs.find_serial_devices) + + async def detect_usb_devices(self) -> List[Dict[str, Any]]: + async with self.periph_lock: + eventloop = self.server.get_event_loop() + return await eventloop.run_in_thread(self._do_usb_detect) + + def _do_usb_detect(self) -> List[Dict[str, Any]]: + data_path = pathlib.Path(self.server.get_app_args()["data_path"]) + usb_id_path = data_path.joinpath("misc/usb.ids") + usb_id_data = sysfs_devs.UsbIdData(usb_id_path) + dev_list = sysfs_devs.find_usb_devices() + for usb_dev_info in dev_list: + cls_ids: List[str] = usb_dev_info.pop("class_ids", None) + class_info = usb_id_data.get_class_info(*cls_ids) + usb_dev_info.update(class_info) + prod_info = usb_id_data.get_product_info( + usb_dev_info["vendor_id"], usb_dev_info["product_id"] + ) + for field, desc in prod_info.items(): + if usb_dev_info.get(field) is None: + usb_dev_info[field] = desc + return dev_list + + async def query_can_uuids(self, interface: str) -> List[Dict[str, Any]]: + async with self.periph_lock: + cansock = cansocket.CanSocket(interface) + uuids = await cansocket.query_klipper_uuids(cansock) + cansock.close() + return uuids + + async def detect_video_devices(self) -> Dict[str, List[Dict[str, Any]]]: + async with self.periph_lock: + eventloop = self.server.get_event_loop() + v4l2_devs = await eventloop.run_in_thread(sysfs_devs.find_video_devices) + libcam_devs = await eventloop.run_in_thread(self.get_libcamera_devices) + return { + "v4l2_devices": v4l2_devs, + "libcamera_devices": libcam_devs + } + + def get_libcamera_devices(self) -> List[Dict[str, Any]]: + libcam = self.libcam + libcam_devs: List[Dict[str, Any]] = [] + if libcam is not None: + cm = libcam.CameraManager.singleton() + for cam in cm.cameras: + device: Dict[str, Any] = {"libcamera_id": cam.id} + props_by_name = {cid.name: val for cid, val in cam.properties.items()} + device["model"] = props_by_name.get("Model") + modes: List[Dict[str, Any]] = [] + cam_config = cam.generate_configuration([libcam.StreamRole.Raw]) + for stream_cfg in cam_config: + formats = stream_cfg.formats + for pix_fmt in formats.pixel_formats: + cur_mode: Dict[str, Any] = {"format": str(pix_fmt)} + resolutions: List[str] = [] + for size in formats.sizes(pix_fmt): + resolutions.append(str(size)) + cur_mode["resolutions"] = resolutions + modes.append(cur_mode) + device["modes"] = modes + libcam_devs.append(device) + return libcam_devs + + class BaseProvider: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() + self.shutdown_action = config.get("shutdown_action", "poweroff") + self.shutdown_action = self.shutdown_action.lower() + if self.shutdown_action not in ["halt", "poweroff"]: + raise config.error( + "Section [machine], Option 'shutdown_action':" + f"Invalid value '{self.shutdown_action}', must be " + "'halt' or 'poweroff'" + ) self.available_services: Dict[str, Dict[str, str]] = {} self.shell_cmd: SCMDComp = self.server.load_component( config, 'shell_command') @@ -494,17 +957,21 @@ class BaseProvider: async def initialize(self) -> None: pass + async def _exec_sudo_command(self, command: str): + machine: Machine = self.server.lookup_component("machine") + return await machine.exec_sudo_command(command) + async def shutdown(self) -> None: - await self.shell_cmd.exec_cmd(f"sudo shutdown now") + await self._exec_sudo_command(f"systemctl {self.shutdown_action}") async def reboot(self) -> None: - await self.shell_cmd.exec_cmd(f"sudo shutdown -r now") + await self._exec_sudo_command("systemctl reboot") async def do_service_action(self, action: str, service_name: str ) -> None: - raise self.server.error("Serice Actions Not Available", 503) + raise self.server.error("Service Actions Not Available", 503) async def check_virt_status(self) -> Dict[str, Any]: return { @@ -518,6 +985,15 @@ class BaseProvider: def get_available_services(self) -> Dict[str, Dict[str, str]]: return self.available_services + async def extract_service_info( + self, + service_name: str, + pid: int, + properties: Optional[List[str]] = None, + raw: bool = False + ) -> Dict[str, Any]: + return {} + class SystemdCliProvider(BaseProvider): async def initialize(self) -> None: await self._detect_active_services() @@ -534,8 +1010,7 @@ class SystemdCliProvider(BaseProvider): action: str, service_name: str ) -> None: - await self.shell_cmd.exec_cmd( - f'sudo systemctl {action} {service_name}') + await self._exec_sudo_command(f"systemctl {action} {service_name}") async def check_virt_status(self) -> Dict[str, Any]: # Fallback virtualization check @@ -573,7 +1048,8 @@ class SystemdCliProvider(BaseProvider): 'virt_identifier': virt_id } - async def _detect_active_services(self): + async def _detect_active_services(self) -> None: + machine: Machine = self.server.lookup_component("machine") try: resp: str = await self.shell_cmd.exec_cmd( "systemctl list-units --all --type=service --plain" @@ -585,12 +1061,11 @@ class SystemdCliProvider(BaseProvider): services = [] for svc in services: sname = svc.rsplit('.', 1)[0] - for allowed in ALLOWED_SERVICES: - if sname.startswith(allowed): - self.available_services[sname] = { - 'active_state': "unknown", - 'sub_state': "unknown" - } + if machine.is_service_allowed(sname): + self.available_services[sname] = { + 'active_state': "unknown", + 'sub_state': "unknown" + } async def _update_service_status(self, sequence: int, @@ -617,6 +1092,73 @@ class SystemdCliProvider(BaseProvider): except Exception: logging.exception("Error processing service state update") + async def extract_service_info( + self, + service_name: str, + pid: int, + properties: Optional[List[str]] = None, + raw: bool = False + ) -> Dict[str, Any]: + service_info: Dict[str, Any] = {} + expected_name = f"{service_name}.service" + if properties is None: + properties = SERVICE_PROPERTIES + try: + resp: str = await self.shell_cmd.exec_cmd( + f"systemctl status {pid}" + ) + unit_name = resp.split(maxsplit=2)[1] + service_info["unit_name"] = unit_name + service_info["is_default"] = True + service_info["manager"] = "systemd" + if unit_name != expected_name: + service_info["is_default"] = False + logging.info( + f"Detected alternate unit name for {service_name}: " + f"{unit_name}" + ) + prop_args = ",".join(properties) + props: str = await self.shell_cmd.exec_cmd( + f"systemctl show -p {prop_args} {unit_name}", attempts=5, + timeout=10. + ) + raw_props: Dict[str, Any] = {} + lines = [p.strip() for p in props.split("\n") if p.strip()] + for line in lines: + parts = line.split("=", 1) + if len(parts) == 2: + key = parts[0].strip() + val = parts[1].strip() + raw_props[key] = val + if raw: + service_info["properties"] = raw_props + else: + processed = self._process_raw_properties(raw_props) + service_info["properties"] = processed + except Exception: + logging.exception("Error extracting service info") + return {} + return service_info + + def _process_raw_properties( + self, raw_props: Dict[str, str] + ) -> Dict[str, Any]: + processed: Dict[str, Any] = {} + for key, val in raw_props.items(): + processed[key] = val + if key == "ExecStart": + # this is a struct, we need to deconstruct it + match = re.search(r"argv\[\]=([^;]+);", val) + if match is not None: + processed[key] = match.group(1).strip() + elif key == "EnvironmentFiles": + if val: + processed[key] = val.split()[0] + elif key in ["Requires", "After", "SupplementaryGroups"]: + vals = [v.strip() for v in val.split() if v.strip()] + processed[key] = vals + return processed + class SystemdDbusProvider(BaseProvider): def __init__(self, config: ConfigHelper) -> None: super().__init__(config) @@ -642,15 +1184,26 @@ class SystemdDbusProvider(BaseProvider): "org.freedesktop.systemd1.manage-units", "System Service Management (start, stop, restart) " "will be disabled") - await self.dbus_mgr.check_permission( - "org.freedesktop.login1.power-off", - "The shutdown API will be disabled" - ) - await self.dbus_mgr.check_permission( - "org.freedesktop.login1.power-off-multiple-sessions", - "The shutdown API will be disabled if multiple user " - "sessions are open." - ) + if self.shutdown_action == "poweroff": + await self.dbus_mgr.check_permission( + "org.freedesktop.login1.power-off", + "The shutdown API will be disabled" + ) + await self.dbus_mgr.check_permission( + "org.freedesktop.login1.power-off-multiple-sessions", + "The shutdown API will be disabled if multiple user " + "sessions are open." + ) + else: + await self.dbus_mgr.check_permission( + "org.freedesktop.login1.halt", + "The shutdown API will be disabled" + ) + await self.dbus_mgr.check_permission( + "org.freedesktop.login1.halt-multiple-sessions", + "The shutdown API will be disabled if multiple user " + "sessions are open." + ) try: # Get the login manaager interface self.login_mgr = await self.dbus_mgr.get_interface( @@ -684,7 +1237,10 @@ class SystemdDbusProvider(BaseProvider): async def shutdown(self) -> None: if self.login_mgr is None: await super().shutdown() - await self.login_mgr.call_power_off(False) # type: ignore + if self.shutdown_action == "poweroff": + await self.login_mgr.call_power_off(False) # type: ignore + else: + await self.login_mgr.call_halt(False) # type: ignore async def do_service_action(self, action: str, @@ -737,11 +1293,13 @@ class SystemdDbusProvider(BaseProvider): async def _detect_active_services(self) -> None: # Get loaded service mgr = self.systemd_mgr - patterns = [f"{svc}*.service" for svc in ALLOWED_SERVICES] - units = await mgr.call_list_units_by_patterns( # type: ignore - ["loaded"], patterns) + machine: Machine = self.server.lookup_component("machine") + units: List[str] + units = await mgr.call_list_units_filtered(["loaded"]) # type: ignore for unit in units: name: str = unit[0].split('.')[0] + if not machine.is_service_allowed(name): + continue state: str = unit[3] substate: str = unit[4] dbus_path: str = unit[6] @@ -793,6 +1351,834 @@ class SystemdDbusProvider(BaseProvider): self.server.send_event("machine:service_state_changed", {service_name: dict(svc)}) + async def extract_service_info( + self, + service_name: str, + pid: int, + properties: Optional[List[str]] = None, + raw: bool = False + ) -> Dict[str, Any]: + if not hasattr(self, "systemd_mgr"): + return {} + mgr = self.systemd_mgr + service_info: Dict[str, Any] = {} + expected_name = f"{service_name}.service" + if properties is None: + properties = SERVICE_PROPERTIES + try: + dbus_path: str + dbus_path = await mgr.call_get_unit_by_pid(pid) # type: ignore + bus = "org.freedesktop.systemd1" + unit_intf, svc_intf = await self.dbus_mgr.get_interfaces( + "org.freedesktop.systemd1", dbus_path, + [f"{bus}.Unit", f"{bus}.Service"] + ) + unit_name = await unit_intf.get_id() # type: ignore + service_info["unit_name"] = unit_name + service_info["is_default"] = True + service_info["manager"] = "systemd" + if unit_name != expected_name: + service_info["is_default"] = False + logging.info( + f"Detected alternate unit name for {service_name}: " + f"{unit_name}" + ) + raw_props: Dict[str, Any] = {} + for key in properties: + snake_key = re.sub(r"(.)([A-Z][a-z]+)", r"\1_\2", key).lower() + func = getattr(unit_intf, f"get_{snake_key}", None) + if func is None: + func = getattr(svc_intf, f"get_{snake_key}", None) + if func is None: + continue + val = await func() + raw_props[key] = val + if raw: + service_info["properties"] = raw_props + else: + processed = self._process_raw_properties(raw_props) + service_info["properties"] = processed + except Exception: + logging.exception("Error Extracting Service Info") + return {} + return service_info + + def _process_raw_properties( + self, raw_props: Dict[str, Any] + ) -> Dict[str, Any]: + processed: Dict[str, Any] = {} + for key, val in raw_props.items(): + if key == "ExecStart": + try: + val = " ".join(val[0][1]) + except Exception: + pass + elif key == "EnvironmentFiles": + try: + val = val[0][0] + except Exception: + pass + processed[key] = val + return processed + +# for docker klipper-moonraker image multi-service managing +# since in container, all command is launched by normal user, +# sudo_cmd is not needed. +class SupervisordCliProvider(BaseProvider): + def __init__(self, config: ConfigHelper) -> None: + super().__init__(config) + self.spv_conf: str = config.get("supervisord_config_path", "") + + async def initialize(self) -> None: + await self._detect_active_services() + keys = ' '.join(list(self.available_services.keys())) + if self.spv_conf: + cmd = f"supervisorctl -c {self.spv_conf} status {keys}" + else: + cmd = f"supervisorctl status {keys}" + self.svc_cmd = self.shell_cmd.build_shell_command(cmd) + await self._update_service_status(0, notify=True) + pstats: ProcStats = self.server.lookup_component('proc_stats') + pstats.register_stat_callback(self._update_service_status) + + async def do_service_action( + self, action: str, service_name: str + ) -> None: + # slow reaction for supervisord, timeout set to 6.0 + await self._exec_supervisorctl_command( + f"{action} {service_name}", timeout=6. + ) + + async def _exec_supervisorctl_command( + self, + args: str, + tries: int = 1, + timeout: float = 2., + success_codes: Optional[List[int]] = None + ) -> str: + if self.spv_conf: + cmd = f"supervisorctl -c {self.spv_conf} {args}" + else: + cmd = f"supervisorctl {args}" + return await self.shell_cmd.exec_cmd( + cmd, proc_input=None, log_complete=False, attempts=tries, + timeout=timeout, success_codes=success_codes + ) + + def _get_active_state(self, sub_state: str) -> str: + if sub_state == "stopping": + return "deactivating" + elif sub_state == "running": + return "active" + else: + return "inactive" + + async def _detect_active_services(self) -> None: + machine: Machine = self.server.lookup_component("machine") + units: Dict[str, Any] = await self._get_process_info() + for unit, info in units.items(): + if machine.is_service_allowed(unit): + self.available_services[unit] = { + 'active_state': self._get_active_state(info["state"]), + 'sub_state': info["state"] + } + + async def _get_process_info( + self, process_names: Optional[List[str]] = None + ) -> Dict[str, Any]: + units: Dict[str, Any] = {} + cmd = "status" + if process_names is not None: + cmd = f"status {' '.join(process_names)}" + try: + resp = await self._exec_supervisorctl_command( + cmd, timeout=6., success_codes=[0, 3] + ) + lines = [line.strip() for line in resp.split("\n") if line.strip()] + except Exception: + return {} + for line in lines: + parts = line.split() + name: str = parts[0] + state: str = parts[1].lower() + if state == "running" and len(parts) >= 6: + units[name] = { + "state": state, + "pid": int(parts[3].rstrip(",")), + "uptime": parts[5] + } + else: + units[name] = { + "state": parts[1].lower() + } + return units + + async def _update_service_status(self, + sequence: int, + notify: bool = True + ) -> None: + if sequence % 2: + # Update every other sequence + return + svcs = list(self.available_services.keys()) + try: + # slow reaction for supervisord, timeout set to 6.0 + resp = await self.svc_cmd.run_with_response( + log_complete=False, timeout=6., success_codes=[0, 3] + ) + resp_l = resp.strip().split("\n") # drop lengend + for svc, state in zip(svcs, resp_l): + sub_state = state.split()[1].lower() + new_state: Dict[str, str] = { + 'active_state': self._get_active_state(sub_state), + 'sub_state': sub_state + } + if self.available_services[svc] != new_state: + self.available_services[svc] = new_state + if notify: + self.server.send_event( + "machine:service_state_changed", + {svc: new_state}) + except Exception: + logging.exception("Error processing service state update") + + async def _find_service_by_pid( + self, expected_name: str, pid: int + ) -> Dict[str, Any]: + service_info: Dict[str, Any] = {} + for _ in range(5): + proc_info = await self._get_process_info( + list(self.available_services.keys()) + ) + service_info["unit_name"] = expected_name + service_info["is_default"] = True + service_info["manager"] = "supervisord" + need_retry = False + for name, info in proc_info.items(): + if "pid" not in info: + need_retry |= info["state"] == "starting" + elif info["pid"] == pid: + if name != expected_name: + service_info["unit_name"] = name + service_info["is_default"] = False + logging.info( + "Detected alternate unit name for " + f"{expected_name}: {name}" + ) + return service_info + if need_retry: + await asyncio.sleep(1.) + else: + break + return {} + + async def extract_service_info( + self, + service_name: str, + pid: int, + properties: Optional[List[str]] = None, + raw: bool = False + ) -> Dict[str, Any]: + service_info = await self._find_service_by_pid(service_name, pid) + if not service_info: + logging.info( + f"Unable to locate service info for {service_name}, pid: {pid}" + ) + return {} + # locate supervisord.conf + if self.spv_conf: + spv_path = pathlib.Path(self.spv_conf) + if not spv_path.is_file(): + logging.info( + f"Invalid supervisord configuration file: {self.spv_conf}" + ) + return service_info + else: + default_config_locations = [ + "/etc/supervisord.conf", + "/etc/supervisor/supervisord.conf" + ] + for conf_path in default_config_locations: + spv_path = pathlib.Path(conf_path) + if spv_path.is_file(): + break + else: + logging.info("Failed to locate supervisord.conf") + return service_info + spv_config = configparser.ConfigParser(interpolation=None) + spv_config.read_string(spv_path.read_text()) + unit = service_info["unit_name"] + section_name = f"program:{unit}" + if not spv_config.has_section(section_name): + logging.info( + f"Unable to location supervisor section {section_name}" + ) + return service_info + service_info["properties"] = dict(spv_config[section_name]) + return service_info + + +# Install validation +INSTALL_VERSION = 1 +SERVICE_VERSION = 1 + +SYSTEMD_UNIT = \ +""" +# systemd service file for moonraker +[Unit] +Description=API Server for Klipper SV%d +Requires=network-online.target +After=network-online.target + +[Install] +WantedBy=multi-user.target + +[Service] +Type=simple +User=%s +SupplementaryGroups=moonraker-admin +RemainAfterExit=yes +EnvironmentFile=%s +ExecStart=%s $MOONRAKER_ARGS +Restart=always +RestartSec=10 +""" # noqa: E122 + +TEMPLATE_NAME = "password_request.html" + +class ValidationError(Exception): + pass + +class InstallValidator: + def __init__(self, config: ConfigHelper) -> None: + self.server = config.get_server() + self.config = config + self.server.load_component(config, "template") + self.force_validation = config.getboolean("force_validation", False) + self.sc_enabled = config.getboolean("validate_service", True) + self.cc_enabled = config.getboolean("validate_config", True) + app_args = self.server.get_app_args() + self.data_path = pathlib.Path(app_args["data_path"]) + self._update_backup_path() + self.data_path_valid = True + self._sudo_requested = False + self.announcement_id = "" + self.validation_enabled = False + + def _update_backup_path(self) -> None: + str_time = time.strftime("%Y%m%dT%H%M%SZ", time.gmtime()) + if not hasattr(self, "backup_path"): + self.backup_path = self.data_path.joinpath(f"backup/{str_time}") + elif not self.backup_path.exists(): + self.backup_path = self.data_path.joinpath(f"backup/{str_time}") + + async def validation_init(self) -> None: + db: MoonrakerDatabase = self.server.lookup_component("database") + install_ver: Optional[int] = await db.get_item( + "moonraker", "validate_install.install_version", None + ) + if install_ver is None: + # skip validation for new installs + await db.insert_item( + "moonraker", "validate_install.install_version", INSTALL_VERSION + ) + install_ver = INSTALL_VERSION + if install_ver < INSTALL_VERSION: + logging.info("Validation version in database out of date") + self.validation_enabled = True + else: + msg = "Installation version in database up to date" + if self.force_validation: + msg += ", force is enabled" + logging.info(msg) + self.validation_enabled = self.force_validation + is_bkp_cfg = self.server.get_app_args().get("is_backup_config", False) + if self.validation_enabled and is_bkp_cfg: + self.server.add_warning( + "Backup configuration loaded, aborting install validation. " + "Please correct the configuration issue and restart moonraker." + ) + self.validation_enabled = False + return + + async def perform_validation(self) -> bool: + db: MoonrakerDatabase = self.server.lookup_component("database") + if not self.validation_enabled: + return False + fm: FileManager = self.server.lookup_component("file_manager") + need_restart: bool = False + has_error: bool = False + name = "service" + try: + need_restart = await self._check_service_file() + name = "config" + need_restart |= await self._check_configuration() + except asyncio.CancelledError: + raise + except ValidationError as ve: + has_error = True + self.server.add_warning(str(ve)) + fm.disable_write_access() + except Exception as e: + has_error = True + msg = f"Failed to validate {name}: {e}" + logging.exception(msg) + self.server.add_warning(msg, log=False) + fm.disable_write_access() + else: + self.validation_enabled = False + await db.insert_item( + "moonraker", "validate_install.install_version", INSTALL_VERSION + ) + if not has_error and need_restart: + machine: Machine = self.server.lookup_component("machine") + machine.restart_moonraker_service() + return True + return False + + async def _check_service_file(self) -> bool: + if not self.sc_enabled: + return False + machine: Machine = self.server.lookup_component("machine") + if machine.is_inside_container(): + raise ValidationError( + "Moonraker instance running inside a container, " + "cannot validate service file." + ) + if machine.get_provider_type() == "none": + raise ValidationError( + "No machine provider configured, cannot validate service file." + ) + logging.info("Performing Service Validation...") + app_args = self.server.get_app_args() + svc_info = machine.get_moonraker_service_info() + if not svc_info: + raise ValidationError( + "Unable to retrieve Moonraker service info. Service file " + "must be updated manually." + ) + props: Dict[str, str] = svc_info.get("properties", {}) + if "FragmentPath" not in props: + raise ValidationError( + "Unable to locate path to Moonraker's service unit. Service " + "file must be must be updated manually." + ) + desc = props.get("Description", "") + ver_match = re.match(r"API Server for Klipper SV(\d+)", desc) + if ver_match is not None and int(ver_match.group(1)) == SERVICE_VERSION: + logging.info("Service file validated and up to date") + return False + unit: str = svc_info.get("unit_name", "").split(".", 1)[0] + if not unit: + raise ValidationError( + "Unable to retrieve service unit name. Service file " + "must be updated manually." + ) + if unit != "moonraker": + logging.info(f"Custom service file detected: {unit}") + # Not using he default unit name + if app_args["is_default_data_path"] and self.data_path_valid: + # No datapath set, create a new, unique data path + df = f"~/{unit}_data" + match = re.match(r"moonraker[-_]?(\d+)", unit) + if match is not None: + df = f"~/printer_{match.group(1)}_data" + new_dp = pathlib.Path(df).expanduser().resolve() + if new_dp.exists() and not self._check_path_bare(new_dp): + raise ValidationError( + f"Cannot resolve data path for custom unit '{unit}', " + f"data path '{new_dp}' already exists. Service file " + "must be updated manually." + ) + + # If the current path is bare we can remove it + if ( + self.data_path.exists() and + self._check_path_bare(self.data_path) + ): + shutil.rmtree(self.data_path) + self.data_path = new_dp + if not self.data_path.exists(): + logging.info(f"New data path created at {self.data_path}") + self.data_path.mkdir() + # A non-default datapath requires successful update of the + # service + self.data_path_valid = False + user: str = props["User"] + has_sudo = False + if await machine.check_sudo_access(): + has_sudo = True + logging.info("Moonraker has sudo access") + elif user == "pi" and machine.sudo_password is None: + machine.sudo_password = "raspberry" + has_sudo = await machine.check_sudo_access() + if not has_sudo: + self._request_sudo_access() + raise ValidationError( + "Moonraker requires sudo permission to update the system " + "service. Please check your notifications for further " + "intructions." + ) + self._sudo_requested = False + svc_dest = pathlib.Path(props["FragmentPath"]) + tmp_svc = pathlib.Path( + tempfile.gettempdir() + ).joinpath(f"{unit}-tmp.svc") + # Create local environment file + sysd_data = self.data_path.joinpath("systemd") + if not sysd_data.exists(): + sysd_data.mkdir() + env_file = sysd_data.joinpath("moonraker.env") + env_vars: Dict[str, str] = { + "MOONRAKER_DATA_PATH": str(self.data_path) + } + cfg_file = pathlib.Path(app_args["config_file"]) + fm: FileManager = self.server.lookup_component("file_manager") + cfg_path = fm.get_directory("config") + log_path = fm.get_directory("logs") + if not cfg_path or not cfg_file.parent.samefile(cfg_path): + env_vars["MOONRAKER_CONFIG_PATH"] = str(cfg_file) + elif cfg_file.name != "moonraker.conf": + cfg_file = self.data_path.joinpath(f"config/{cfg_file.name}") + env_vars["MOONRAKER_CONFIG_PATH"] = str(cfg_file) + if not app_args["log_file"]: + # No log file configured + env_vars["MOONRAKER_DISABLE_FILE_LOG"] = "y" + else: + # Log file does not exist in log path + log_file = pathlib.Path(app_args["log_file"]) + if not log_path or not log_file.parent.samefile(log_path): + env_vars["MOONRAKER_LOG_PATH"] = str(log_file) + elif log_file.name != "moonraker.log": + cfg_file = self.data_path.joinpath(f"logs/{log_file.name}") + env_vars["MOONRAKER_LOG_PATH"] = str(log_file) + # backup existing service files + self._update_backup_path() + svc_bkp_path = self.backup_path.joinpath("service") + os.makedirs(str(svc_bkp_path), exist_ok=True) + if env_file.exists(): + env_bkp = svc_bkp_path.joinpath(env_file.name) + shutil.copy2(str(env_file), str(env_bkp)) + service_bkp = svc_bkp_path.joinpath(svc_dest.name) + shutil.copy2(str(svc_dest), str(service_bkp)) + # write temporary service file + src_path = source_info.source_path() + exec_path = pathlib.Path(sys.executable) + py_exec = exec_path.parent.joinpath("python") + if exec_path.name == "python" or py_exec.is_file(): + # Default to loading via the python executable. This + # makes it possible to switch between git repos, pip + # releases and git releases without reinstalling the + # service. + exec_path = py_exec + env_vars["MOONRAKER_ARGS"] = "-m moonraker" + if not source_info.is_dist_package(): + # This module isn't in site/dist packages, + # add PYTHONPATH env variable + env_vars["PYTHONPATH"] = str(src_path) + tmp_svc.write_text( + SYSTEMD_UNIT + % (SERVICE_VERSION, user, env_file, exec_path) + ) + try: + # write new environment + envout = "\n".join(f"{key}=\"{val}\"" for key, val in env_vars.items()) + env_file.write_text(envout) + await machine.exec_sudo_command( + f"cp -f {tmp_svc} {svc_dest}", tries=5, timeout=60.) + await machine.exec_sudo_command( + "systemctl daemon-reload", tries=5, timeout=60. + ) + except asyncio.CancelledError: + raise + except Exception: + logging.exception("Failed to update moonraker service unit") + raise ValidationError( + f"Failed to update service unit file '{svc_dest}'. Update must " + f"be performed manually." + ) from None + finally: + tmp_svc.unlink() + self.data_path_valid = True + self.sc_enabled = False + return True + + def _check_path_bare(self, path: pathlib.Path) -> bool: + empty: bool = True + if not path.exists(): + return True + for item in path.iterdir(): + if ( + item.is_file() or + item.is_symlink() or + item.name not in ["gcodes", "config", "logs", "certs"] + ): + empty = False + break + if item.is_dir() and next(item.iterdir(), None) is not None: + empty = False + break + return empty + + def _link_data_subfolder( + self, + folder_name: str, + source_dir: Union[str, pathlib.Path], + exist_ok: bool = False + ) -> None: + if isinstance(source_dir, str): + source_dir = pathlib.Path(source_dir).expanduser().resolve() + subfolder = self.data_path.joinpath(folder_name) + if not source_dir.exists(): + logging.info( + f"Source path '{source_dir}' does not exist. Falling " + f"back to default folder {subfolder}" + ) + return + if not source_dir.is_dir(): + raise ValidationError( + f"Failed to link subfolder '{folder_name}' to source path " + f"'{source_dir}'. The requested path is not a valid directory." + ) + if subfolder.is_symlink(): + if not subfolder.samefile(source_dir): + if exist_ok: + logging.info( + f"Folder {subfolder} already linked, aborting link " + f"to {source_dir}" + ) + return + raise ValidationError( + f"Failed to link subfolder '{folder_name}' to " + f"'{source_dir}'. '{folder_name}' already exists and is " + f"linked to {subfolder}. This conflict requires " + "manual resolution." + ) + return + if not subfolder.exists(): + subfolder.symlink_to(source_dir) + return + if subfolder.is_dir() and next(subfolder.iterdir(), None) is None: + subfolder.rmdir() + subfolder.symlink_to(source_dir) + return + if exist_ok: + logging.info( + f"Path at {subfolder} exists, aborting link to {source_dir}" + ) + return + raise ValidationError( + f"Failed to link subfolder '{folder_name}' to '{source_dir}'. " + f"Folder '{folder_name}' already exists. This conflict requires " + "manual resolution." + ) + + def _link_data_file( + self, + data_file: Union[str, pathlib.Path], + target: Union[str, pathlib.Path] + ) -> None: + if isinstance(data_file, str): + data_file = pathlib.Path(data_file) + if isinstance(target, str): + target = pathlib.Path(target) + target = target.expanduser().resolve() + if not target.exists(): + logging.info( + f"Target file {target} does not exist. Aborting symbolic " + f"link to {data_file.name}." + ) + return + if not target.is_file(): + raise ValidationError( + f"Failed to link data file {data_file.name}. Target " + f"{target} is not a valid file." + ) + if data_file.is_symlink(): + if not data_file.samefile(target): + raise ValidationError( + f"Failed to link data file {data_file.name}. Link " + f"to {data_file.resolve()} already exists. This conflict " + "must be resolved manually." + ) + return + if not data_file.exists(): + data_file.symlink_to(target) + return + raise ValidationError( + f"Failed to link data file {data_file.name}. File already exists. " + f"This conflict must be resolved manually." + ) + + async def _check_configuration(self) -> bool: + if not self.cc_enabled or not self.data_path_valid: + return False + db: MoonrakerDatabase = self.server.lookup_component("database") + cfg_source = cast(FileSourceWrapper, self.config.get_source()) + cfg_source.backup_source() + try: + # write current configuration to backup path + self._update_backup_path() + cfg_bkp_path = self.backup_path.joinpath("config") + os.makedirs(str(cfg_bkp_path), exist_ok=True) + await cfg_source.write_config(cfg_bkp_path) + # Create symbolic links for configured folders + server_cfg = self.config["server"] + + db_cfg = self.config["database"] + # symlink database path first + db_path = db_cfg.get("database_path", None) + default_db = pathlib.Path("~/.moonraker_database").expanduser() + if db_path is None and default_db.exists(): + self._link_data_subfolder( + "database", default_db, exist_ok=True + ) + elif db_path is not None: + self._link_data_subfolder("database", db_path) + cfg_source.remove_option("database", "database_path") + + fm_cfg = self.config["file_manager"] + cfg_path = fm_cfg.get("config_path", None) + if cfg_path is None: + cfg_path = server_cfg.get("config_path", None) + if cfg_path is not None: + self._link_data_subfolder("config", cfg_path) + cfg_source.remove_option("server", "config_path") + cfg_source.remove_option("file_manager", "config_path") + + log_path = fm_cfg.get("log_path", None) + if log_path is None: + log_path = server_cfg.get("log_path", None) + if log_path is not None: + self._link_data_subfolder("logs", log_path) + cfg_source.remove_option("server", "log_path") + cfg_source.remove_option("file_manager", "log_path") + + gc_path: Optional[str] = await db.get_item( + "moonraker", "file_manager.gcode_path", None + ) + if gc_path is not None: + self._link_data_subfolder("gcodes", gc_path) + db.delete_item("moonraker", "file_manager.gcode_path") + + # Link individual files + secrets_path = self.config["secrets"].get("secrets_path", None) + if secrets_path is not None: + secrets_dest = self.data_path.joinpath("moonraker.secrets") + self._link_data_file(secrets_dest, secrets_path) + cfg_source.remove_option("secrets", "secrets_path") + certs_path = self.data_path.joinpath("certs") + if not certs_path.exists(): + certs_path.mkdir() + ssl_cert = server_cfg.get("ssl_certificate_path", None) + if ssl_cert is not None: + cert_dest = certs_path.joinpath("moonraker.cert") + self._link_data_file(cert_dest, ssl_cert) + cfg_source.remove_option("server", "ssl_certificate_path") + ssl_key = server_cfg.get("ssl_key_path", None) + if ssl_key is not None: + key_dest = certs_path.joinpath("moonraker.key") + self._link_data_file(key_dest, ssl_key) + cfg_source.remove_option("server", "ssl_key_path") + + # Remove deprecated debug options + if server_cfg.has_option("enable_debug_logging"): + cfg_source.remove_option("server", "enable_debug_logging") + um_cfg = server_cfg["update_manager"] + if um_cfg.has_option("enable_repo_debug"): + cfg_source.remove_option("update_manager", "enable_repo_debug") + except Exception: + cfg_source.cancel() + raise + finally: + self.cc_enabled = False + return await cfg_source.save() + + def _request_sudo_access(self) -> None: + if self._sudo_requested: + return + self._sudo_requested = True + machine: Machine = self.server.lookup_component("machine") + machine.register_sudo_request( + self._on_password_received, + "Sudo password required to update Moonraker's systemd service." + ) + if not machine.public_ip: + async def wrapper(pub_ip): + if not pub_ip: + return + await self.remove_announcement() + self._announce_sudo_request() + self.server.register_event_handler( + "machine:public_ip_changed", wrapper + ) + self._announce_sudo_request() + + def _announce_sudo_request(self) -> None: + machine: Machine = self.server.lookup_component("machine") + host_info = self.server.get_host_info() + host_addr: str = host_info["address"] + if host_addr.lower() not in ["all", "0.0.0.0", "::"]: + address = host_addr + else: + address = machine.public_ip + if not address: + address = f"{host_info['hostname']}.local" + elif ":" in address: + # ipv6 address + address = f"[{address}]" + app: MoonrakerApp = self.server.lookup_component("application") + scheme = "https" if app.https_enabled() else "http" + host_info = self.server.get_host_info() + port = host_info["port"] + url = f"{scheme}://{address}:{port}/" + ancmp: Announcements = self.server.lookup_component("announcements") + entry = ancmp.add_internal_announcement( + "Sudo Password Required", + "Moonraker requires sudo access to finish updating. " + "Please click on the attached link and follow the " + "instructions.", + url, "high", "machine" + ) + self.announcement_id = entry.get("entry_id", "") + gc_announcement = ( + "!! ATTENTION: Moonraker requires sudo access to complete " + "the update. Go to the following URL and provide your linux " + f"password: {url}" + ) + self.server.send_event("server:gcode_response", gc_announcement) + + async def remove_announcement(self) -> None: + if not self.announcement_id: + return + ancmp: Announcements = self.server.lookup_component("announcements") + # remove stale announcement + try: + await ancmp.remove_announcement(self.announcement_id) + except self.server.error: + pass + self.announcement_id = "" + + async def _on_password_received(self) -> Tuple[str, bool]: + name = "Service" + try: + await self._check_service_file() + name = "Config" + await self._check_configuration() + except asyncio.CancelledError: + raise + except Exception: + logging.exception(f"{name} validation failed") + raise self.server.error( + f"{name} validation failed", 500 + ) from None + await self.remove_announcement() + db: MoonrakerDatabase = self.server.lookup_component("database") + await db.insert_item( + "moonraker", "validate_install.install_version", INSTALL_VERSION + ) + self.validation_enabled = False + return "System update complete.", True def load_component(config: ConfigHelper) -> Machine: return Machine(config) diff --git a/moonraker/components/mqtt.py b/moonraker/components/mqtt.py index 31f396b..07aa79e 100644 --- a/moonraker/components/mqtt.py +++ b/moonraker/components/mqtt.py @@ -8,12 +8,19 @@ from __future__ import annotations import socket import asyncio import logging -import json import pathlib import ssl from collections import deque import paho.mqtt.client as paho_mqtt -from websockets import Subscribable, WebRequest, JsonRPC, APITransport +import paho.mqtt +from ..common import ( + TransportType, + RequestType, + WebRequest, + APITransport, + KlippyState +) +from ..utils import json_wrapper as jsonw # Annotation imports from typing import ( @@ -30,12 +37,14 @@ from typing import ( Deque, ) if TYPE_CHECKING: - from app import APIDefinition - from confighelper import ConfigHelper - from klippy_connection import KlippyConnection as Klippy + from ..confighelper import ConfigHelper + from ..common import JsonRPC, APIDefinition + from ..eventloop import FlexTimer + from .klippy_apis import KlippyAPI FlexCallback = Callable[[bytes], Optional[Coroutine]] RPCCallback = Callable[..., Coroutine] +PAHO_MQTT_VERSION = tuple([int(p) for p in paho.mqtt.__version__.split(".")]) DUP_API_REQ_CODE = -10000 MQTT_PROTOCOLS = { 'v3.1': paho_mqtt.MQTTv31, @@ -54,22 +63,38 @@ class ExtPahoClient(paho_mqtt.Client): if self._port <= 0: raise ValueError('Invalid port number.') - self._in_packet = { - "command": 0, - "have_remaining": 0, - "remaining_count": [], - "remaining_mult": 1, - "remaining_length": 0, - "packet": b"", - "to_process": 0, - "pos": 0} + if PAHO_MQTT_VERSION >= (2, 0): + return self._v2_reconnect(sock) + if PAHO_MQTT_VERSION < (1, 6): + # Paho Mqtt Version < 1.6.x + self._in_packet = { + "command": 0, + "have_remaining": 0, + "remaining_count": [], + "remaining_mult": 1, + "remaining_length": 0, + "packet": b"", + "to_process": 0, + "pos": 0 + } + with self._out_packet_mutex: + self._out_packet = deque() # type: ignore - with self._out_packet_mutex: + with self._current_out_packet_mutex: + self._current_out_packet = None + else: + self._in_packet = { + "command": 0, + "have_remaining": 0, + "remaining_count": [], + "remaining_mult": 1, + "remaining_length": 0, + "packet": bytearray(b""), + "to_process": 0, + "pos": 0 + } self._out_packet = deque() # type: ignore - with self._current_out_packet_mutex: - self._current_out_packet = None - with self._msgtime_mutex: self._last_msg_in = paho_mqtt.time_func() self._last_msg_out = paho_mqtt.time_func() @@ -120,7 +145,7 @@ class ExtPahoClient(paho_mqtt.Client): sock.do_handshake() if verify_host: - ssl.match_hostname(sock.getpeercert(), self._host) + ssl.match_hostname(sock.getpeercert(), self._host) # type: ignore if self._transport == "websockets": sock.settimeout(self._keepalive) @@ -137,6 +162,65 @@ class ExtPahoClient(paho_mqtt.Client): return self._send_connect(self._keepalive) + def _v2_reconnect(self, sock: Optional[socket.socket] = None): + self._in_packet = { + "command": 0, + "have_remaining": 0, + "remaining_count": [], + "remaining_mult": 1, + "remaining_length": 0, + "packet": bytearray(b""), + "to_process": 0, + "pos": 0, + } + + self._ping_t = 0.0 # type: ignore + self._state = paho_mqtt._ConnectionState.MQTT_CS_CONNECTING + + self._sock_close() + + # Mark all currently outgoing QoS = 0 packets as lost, + # or `wait_for_publish()` could hang forever + for pkt in self._out_packet: + if ( + pkt["command"] & 0xF0 == paho_mqtt.PUBLISH and + pkt["qos"] == 0 and pkt["info"] is not None + ): + pkt["info"].rc = paho_mqtt.MQTT_ERR_CONN_LOST + pkt["info"]._set_as_published() + + self._out_packet.clear() + + with self._msgtime_mutex: + self._last_msg_in = paho_mqtt.time_func() + self._last_msg_out = paho_mqtt.time_func() + + # Put messages in progress in a valid state. + self._messages_reconnect_reset() + + with self._callback_mutex: + on_pre_connect = self.on_pre_connect + + if on_pre_connect: + try: + on_pre_connect(self, self._userdata) + except Exception as err: + self._easy_log( + paho_mqtt.MQTT_LOG_ERR, + 'Caught exception in on_pre_connect: %s', err + ) + if not self.suppress_exceptions: + raise + + self._sock = sock or self._create_socket() + + self._sock.setblocking(False) # type: ignore[attr-defined] + self._registered_write = False + self._call_socket_open(self._sock) + + return self._send_connect(self._keepalive) + + class SubscriptionHandle: def __init__(self, topic: str, callback: FlexCallback) -> None: self.callback = callback @@ -227,13 +311,13 @@ class AIOHelper: logging.info("MQTT Misc Loop Complete") -class MQTTClient(APITransport, Subscribable): +class MQTTClient(APITransport): def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() - self.event_loop = self.server.get_event_loop() - self.klippy: Klippy = self.server.lookup_component("klippy_connection") + self.eventloop = self.server.get_event_loop() self.address: str = config.get('address') self.port: int = config.getint('port', 1883) + self.tls_enabled: bool = config.getboolean("enable_tls", False) user = config.gettemplate('username', None) self.user_name: Optional[str] = None if user: @@ -266,7 +350,14 @@ class MQTTClient(APITransport, Subscribable): raise config.error( "Option 'default_qos' in section [mqtt] must be " "between 0 and 2") - self.client = ExtPahoClient(protocol=self.protocol) + self.publish_split_status = \ + config.getboolean("publish_split_status", False) + if PAHO_MQTT_VERSION < (2, 0): + self.client = ExtPahoClient(protocol=self.protocol) + else: + self.client = ExtPahoClient( + paho_mqtt.CallbackAPIVersion.VERSION1, protocol=self.protocol + ) self.client.on_connect = self._on_connect self.client.on_message = self._on_message self.client.on_disconnect = self._on_disconnect @@ -280,42 +371,54 @@ class MQTTClient(APITransport, Subscribable): self.pending_responses: List[asyncio.Future] = [] self.pending_acks: Dict[int, asyncio.Future] = {} + # We don't need to register these endpoints over the MQTT transport as they + # are redundant. MQTT clients can already publish and subscribe. + ep_transports = TransportType.all() & ~TransportType.MQTT self.server.register_endpoint( - "/server/mqtt/publish", ["POST"], - self._handle_publish_request, - transports=["http", "websocket", "internal"]) + "/server/mqtt/publish", RequestType.POST, self._handle_publish_request, + transports=ep_transports + ) self.server.register_endpoint( - "/server/mqtt/subscribe", ["POST"], + "/server/mqtt/subscribe", RequestType.POST, self._handle_subscription_request, - transports=["http", "websocket", "internal"]) + transports=ep_transports + ) # Subscribe to API requests - self.json_rpc = JsonRPC(transport="MQTT") self.api_request_topic = f"{self.instance_name}/moonraker/api/request" self.api_resp_topic = f"{self.instance_name}/moonraker/api/response" self.klipper_status_topic = f"{self.instance_name}/klipper/status" + self.klipper_state_prefix = f"{self.instance_name}/klipper/state" self.moonraker_status_topic = f"{self.instance_name}/moonraker/status" - status_cfg: Dict[str, Any] = config.getdict("status_objects", {}, - allow_empty_fields=True) - self.status_objs: Dict[str, Any] = {} + status_cfg: Dict[str, str] = config.getdict( + "status_objects", {}, allow_empty_fields=True + ) + self.status_interval = config.getfloat("status_interval", 0, above=.25) + self.status_cache: Dict[str, Dict[str, Any]] = {} + self.status_update_timer: Optional[FlexTimer] = None + self.last_status_time = 0. + self.status_objs: Dict[str, Optional[List[str]]] = {} for key, val in status_cfg.items(): if val is not None: - self.status_objs[key] = [v.strip() for v in val.split(',') - if v.strip()] + self.status_objs[key] = [v.strip() for v in val.split(',') if v.strip()] else: self.status_objs[key] = None if status_cfg: logging.debug(f"MQTT: Status Objects Set: {self.status_objs}") - self.server.register_event_handler("server:klippy_identified", - self._handle_klippy_identified) + self.server.register_event_handler( + "server:klippy_started", self._handle_klippy_started + ) + self.server.register_event_handler( + "server:klippy_disconnect", self._handle_klippy_disconnect + ) + if self.status_interval: + self.status_update_timer = self.eventloop.register_timer( + self._handle_timed_status_update + ) self.timestamp_deque: Deque = deque(maxlen=20) self.api_qos = config.getint('api_qos', self.qos) if config.getboolean("enable_moonraker_api", True): - api_cache = self.server.register_api_transport("mqtt", self) - for api_def in api_cache.values(): - if "mqtt" in api_def.supported_transports: - self.register_api_handler(api_def) self.subscribe_topic(self.api_request_topic, self._process_api_request, self.api_qos) @@ -336,21 +439,31 @@ class MQTTClient(APITransport, Subscribable): if self.user_name is not None: self.client.username_pw_set(self.user_name, self.password) self.client.will_set(self.moonraker_status_topic, - payload=json.dumps({'server': 'offline'}), + payload=jsonw.dumps({'server': 'offline'}), qos=self.qos, retain=True) + if self.tls_enabled: + self.client.tls_set() self.client.connect_async(self.address, self.port) - self.connect_task = self.event_loop.create_task( + self.connect_task = self.eventloop.create_task( self._do_reconnect(first=True) ) - async def _handle_klippy_identified(self) -> None: + async def _handle_klippy_started(self, state: KlippyState) -> None: if self.status_objs: - args = {'objects': self.status_objs} - try: - await self.klippy.request( - WebRequest("objects/subscribe", args, conn=self)) - except self.server.error: - pass + kapi: KlippyAPI = self.server.lookup_component("klippy_apis") + await kapi.subscribe_from_transport( + self.status_objs, self, default=None, + ) + if self.status_update_timer is not None: + self.status_update_timer.start(delay=self.status_interval) + + def _handle_klippy_disconnect(self): + if self.status_update_timer is not None: + self.status_update_timer.stop() + if self.status_cache: + payload = self.status_cache + self.status_cache = {} + self._publish_status_update(payload, self.last_status_time) def _on_message(self, client: str, @@ -361,7 +474,7 @@ class MQTTClient(APITransport, Subscribable): if topic in self.subscribed_topics: cb_hdls = self.subscribed_topics[topic][1] for hdl in cb_hdls: - self.event_loop.register_callback( + self.eventloop.register_callback( hdl.callback, message.payload) else: logging.debug( @@ -383,7 +496,7 @@ class MQTTClient(APITransport, Subscribable): if subs: res, msg_id = client.subscribe(subs) if msg_id is not None: - sub_fut: asyncio.Future = asyncio.Future() + sub_fut: asyncio.Future = self.eventloop.create_future() topics = list(self.subscribed_topics.keys()) sub_fut.add_done_callback( BrokerAckLogger(topics, "subscribe")) @@ -457,14 +570,14 @@ class MQTTClient(APITransport, Subscribable): raise first = False try: - sock = await self.event_loop.create_socket_connection( + sock = await self.eventloop.create_socket_connection( (self.address, self.port), timeout=10 ) self.client.reconnect(sock) except asyncio.CancelledError: raise except Exception as e: - if type(last_err) != type(e) or last_err.args != e.args: + if type(last_err) is not type(e) or last_err.args != e.args: logging.exception("MQTT Connection Error") last_err = e continue @@ -505,7 +618,7 @@ class MQTTClient(APITransport, Subscribable): if self.is_connected() and need_sub: res, msg_id = self.client.subscribe(topic, qos) if msg_id is not None: - sub_fut: asyncio.Future = asyncio.Future() + sub_fut: asyncio.Future = self.eventloop.create_future() sub_fut.add_done_callback( BrokerAckLogger([topic], "subscribe")) self.pending_acks[msg_id] = sub_fut @@ -523,7 +636,7 @@ class MQTTClient(APITransport, Subscribable): del self.subscribed_topics[topic] res, msg_id = self.client.unsubscribe(topic) if msg_id is not None: - unsub_fut: asyncio.Future = asyncio.Future() + unsub_fut: asyncio.Future = self.eventloop.create_future() unsub_fut.add_done_callback( BrokerAckLogger([topic], "unsubscribe")) self.pending_acks[msg_id] = unsub_fut @@ -537,11 +650,11 @@ class MQTTClient(APITransport, Subscribable): qos = qos or self.qos if qos > 2 or qos < 0: raise self.server.error("QOS must be between 0 and 2") - pub_fut: asyncio.Future = asyncio.Future() + pub_fut: asyncio.Future = self.eventloop.create_future() if isinstance(payload, (dict, list)): try: - payload = json.dumps(payload) - except json.JSONDecodeError: + payload = jsonw.dumps(payload) + except jsonw.JSONDecodeError: raise self.server.error( "Dict or List is not json encodable") from None elif isinstance(payload, bool): @@ -584,7 +697,7 @@ class MQTTClient(APITransport, Subscribable): qos = qos or self.qos if qos > 2 or qos < 0: raise self.server.error("QOS must be between 0 and 2") - resp_fut: asyncio.Future = asyncio.Future() + resp_fut: asyncio.Future = self.eventloop.create_future() resp_hdl = self.subscribe_topic( response_topic, resp_fut.set_result, qos) self.pending_responses.append(resp_fut) @@ -626,7 +739,7 @@ class MQTTClient(APITransport, Subscribable): topic: str = web_request.get_str("topic") qos: int = web_request.get_int("qos", self.qos) timeout: Optional[float] = web_request.get_float('timeout', None) - resp: asyncio.Future = asyncio.Future() + resp: asyncio.Future = self.eventloop.create_future() hdl: Optional[SubscriptionHandle] = None try: hdl = self.subscribe_topic(topic, resp.set_result, qos) @@ -643,8 +756,8 @@ class MQTTClient(APITransport, Subscribable): if hdl is not None: self.unsubscribe(hdl) try: - payload = json.loads(ret) - except json.JSONDecodeError: + payload = jsonw.loads(ret) + except jsonw.JSONDecodeError: payload = ret.decode() return { 'topic': topic, @@ -652,51 +765,19 @@ class MQTTClient(APITransport, Subscribable): } async def _process_api_request(self, payload: bytes) -> None: - response = await self.json_rpc.dispatch(payload.decode()) + rpc: JsonRPC = self.server.lookup_component("jsonrpc") + response = await rpc.dispatch(payload, self) if response is not None: await self.publish_topic(self.api_resp_topic, response, self.api_qos) - def register_api_handler(self, api_def: APIDefinition) -> None: - if api_def.callback is None: - # Remote API, uses RPC to reach out to Klippy - mqtt_method = api_def.jrpc_methods[0] - rpc_cb = self._generate_remote_callback(api_def.endpoint) - self.json_rpc.register_method(mqtt_method, rpc_cb) - else: - # Local API, uses local callback - for mqtt_method, req_method in \ - zip(api_def.jrpc_methods, api_def.request_methods): - rpc_cb = self._generate_local_callback( - api_def.endpoint, req_method, api_def.callback) - self.json_rpc.register_method(mqtt_method, rpc_cb) - logging.info( - "Registering MQTT JSON-RPC methods: " - f"{', '.join(api_def.jrpc_methods)}") + @property + def transport_type(self) -> TransportType: + return TransportType.MQTT - def remove_api_handler(self, api_def: APIDefinition) -> None: - for jrpc_method in api_def.jrpc_methods: - self.json_rpc.remove_method(jrpc_method) - - def _generate_local_callback(self, - endpoint: str, - request_method: str, - callback: Callable[[WebRequest], Coroutine] - ) -> RPCCallback: - async def func(args: Dict[str, Any]) -> Any: - self._check_timestamp(args) - result = await callback(WebRequest(endpoint, args, request_method)) - return result - return func - - def _generate_remote_callback(self, endpoint: str) -> RPCCallback: - async def func(args: Dict[str, Any]) -> Any: - self._check_timestamp(args) - result = await self.klippy.request(WebRequest(endpoint, args)) - return result - return func - - def _check_timestamp(self, args: Dict[str, Any]) -> None: + def screen_rpc_request( + self, api_def: APIDefinition, req_type: RequestType, args: Dict[str, Any] + ) -> None: ts = args.pop("mqtt_timestamp", None) if ts is not None: if ts in self.timestamp_deque: @@ -706,19 +787,43 @@ class MQTTClient(APITransport, Subscribable): else: self.timestamp_deque.append(ts) - def send_status(self, - status: Dict[str, Any], - eventtime: float - ) -> None: + def send_status(self, status: Dict[str, Any], eventtime: float) -> None: if not status or not self.is_connected(): return - payload = {'eventtime': eventtime, 'status': status} - self.publish_topic(self.klipper_status_topic, payload) + if not self.status_interval: + self._publish_status_update(status, eventtime) + else: + for key, val in status.items(): + self.status_cache.setdefault(key, {}).update(val) + self.last_status_time = eventtime + + def _handle_timed_status_update(self, eventtime: float) -> float: + if self.status_cache: + payload = self.status_cache + self.status_cache = {} + self._publish_status_update(payload, self.last_status_time) + return eventtime + self.status_interval + + def _publish_status_update(self, status: Dict[str, Any], eventtime: float) -> None: + if self.publish_split_status: + for objkey in status: + objval = status[objkey] + for statekey in objval: + payload = {'eventtime': eventtime, 'value': objval[statekey]} + self.publish_topic( + f"{self.klipper_state_prefix}/{objkey}/{statekey}", + payload, retain=True) + else: + payload = {'eventtime': eventtime, 'status': status} + self.publish_topic(self.klipper_status_topic, payload) + def get_instance_name(self) -> str: return self.instance_name async def close(self) -> None: + if self.status_update_timer is not None: + self.status_update_timer.stop() if self.connect_task is not None: self.connect_task.cancel() self.connect_task = None diff --git a/moonraker/components/notifier.py b/moonraker/components/notifier.py index 3e5ec48..7f01296 100644 --- a/moonraker/components/notifier.py +++ b/moonraker/components/notifier.py @@ -8,114 +8,112 @@ from __future__ import annotations import apprise import logging +import pathlib +import re +from ..common import JobEvent, RequestType # Annotation imports from typing import ( TYPE_CHECKING, - Type, - Optional, Dict, Any, List, - Union, ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from . import klippy_apis - - APIComp = klippy_apis.KlippyAPI - + from ..confighelper import ConfigHelper + from ..common import WebRequest + from .file_manager.file_manager import FileManager + from .klippy_apis import KlippyAPI as APIComp class Notifier: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() self.notifiers: Dict[str, NotifierInstance] = {} - self.events: Dict[str, NotifierEvent] = {} + self.events: Dict[str, List[NotifierInstance]] = {} prefix_sections = config.get_prefix_sections("notifier") - - self.register_events(config) - + self.register_remote_actions() for section in prefix_sections: cfg = config[section] try: notifier = NotifierInstance(cfg) - - for event in self.events: - if event in notifier.events or "*" in notifier.events: - self.events[event].register_notifier(notifier) - + for job_event in list(JobEvent): + if job_event == JobEvent.STANDBY: + continue + evt_name = str(job_event) + if "*" in notifier.events or evt_name in notifier.events: + self.events.setdefault(evt_name, []).append(notifier) logging.info(f"Registered notifier: '{notifier.get_name()}'") - except Exception as e: msg = f"Failed to load notifier[{cfg.get_name()}]\n{e}" self.server.add_warning(msg) continue self.notifiers[notifier.get_name()] = notifier - def register_events(self, config: ConfigHelper): + self.register_endpoints(config) + self.server.register_event_handler( + "job_state:state_changed", self._on_job_state_changed + ) - self.events["started"] = NotifierEvent( - "started", - "job_state:started", - config) + def register_remote_actions(self): + self.server.register_remote_method("notify", self.notify_action) - self.events["complete"] = NotifierEvent( - "complete", - "job_state:complete", - config) + async def notify_action(self, name: str, message: str = ""): + if name not in self.notifiers: + raise self.server.error(f"Notifier '{name}' not found", 404) + notifier = self.notifiers[name] + await notifier.notify("remote_action", [], message) - self.events["error"] = NotifierEvent( - "error", - "job_state:error", - config) + async def _on_job_state_changed( + self, + job_event: JobEvent, + prev_stats: Dict[str, Any], + new_stats: Dict[str, Any] + ) -> None: + evt_name = str(job_event) + for notifier in self.events.get(evt_name, []): + await notifier.notify(evt_name, [prev_stats, new_stats]) - self.events["cancelled"] = NotifierEvent( - "cancelled", - "job_state:cancelled", - config) + def register_endpoints(self, config: ConfigHelper): + self.server.register_endpoint( + "/server/notifiers/list", RequestType.GET, self._handle_notifier_list + ) + self.server.register_debug_endpoint( + "/debug/notifiers/test", RequestType.POST, self._handle_notifier_test + ) - self.events["paused"] = NotifierEvent( - "paused", - "job_state:paused", - config) + async def _handle_notifier_list( + self, web_request: WebRequest + ) -> Dict[str, Any]: + return {"notifiers": self._list_notifiers()} - self.events["resumed"] = NotifierEvent( - "resumed", - "job_state:resumed", - config) + def _list_notifiers(self) -> List[Dict[str, Any]]: + return [notifier.as_dict() for notifier in self.notifiers.values()] + async def _handle_notifier_test( + self, web_request: WebRequest + ) -> Dict[str, Any]: -class NotifierEvent: - def __init__(self, identifier: str, event_name: str, config: ConfigHelper): - self.identifier = identifier - self.event_name = event_name - self.server = config.get_server() - self.notifiers: Dict[str, NotifierInstance] = {} - self.config = config + name = web_request.get_str("name") + if name not in self.notifiers: + raise self.server.error(f"Notifier '{name}' not found", 404) + notifier = self.notifiers[name] - self.server.register_event_handler(self.event_name, self._handle) + kapis: APIComp = self.server.lookup_component('klippy_apis') + result: Dict[str, Any] = await kapis.query_objects( + {'print_stats': None}, default={}) + print_stats = result.get('print_stats', {}) + print_stats["filename"] = "notifier_test.gcode" # Mock the filename - def register_notifier(self, notifier: NotifierInstance): - self.notifiers[notifier.get_name()] = notifier - - async def _handle(self, *args) -> None: - logging.info(f"'{self.identifier}' notifier event triggered'") - await self.invoke_notifiers(args) - - async def invoke_notifiers(self, args): - for notifier_name in self.notifiers: - try: - notifier = self.notifiers[notifier_name] - await notifier.notify(self.identifier, args) - except Exception as e: - logging.info(f"Failed to notify [{notifier_name}]\n{e}") - continue + await notifier.notify(notifier.events[0], [print_stats, print_stats]) + return { + "status": "success", + "stats": print_stats + } class NotifierInstance: def __init__(self, config: ConfigHelper) -> None: - self.config = config name_parts = config.get_name().split(maxsplit=1) if len(name_parts) != 2: @@ -123,32 +121,40 @@ class NotifierInstance: self.server = config.get_server() self.name = name_parts[1] self.apprise = apprise.Apprise() - self.warned = False - - self.attach_requires_file_system_check = True - self.attach = config.get("attach", None) - if self.attach is None or \ - (self.attach.startswith("http://") or - self.attach.startswith("https://")): - self.attach_requires_file_system_check = False - - url_template = config.gettemplate('url') + self.attach = config.gettemplate("attach", None) + url_template = config.gettemplate("url") self.url = url_template.render() - if len(self.url) < 2: + if re.match(r"\w+?://", self.url) is None: raise config.error(f"Invalid url for: {config.get_name()}") - self.title = config.gettemplate('title', None) + self.title = config.gettemplate("title", None) self.body = config.gettemplate("body", None) - + upper_body_format = config.get("body_format", 'text').upper() + if not hasattr(apprise.NotifyFormat, upper_body_format): + raise config.error(f"Invalid body_format for {config.get_name()}") + self.body_format = getattr(apprise.NotifyFormat, upper_body_format) self.events: List[str] = config.getlist("events", separator=",") - self.apprise.add(self.url) - async def notify(self, event_name: str, event_args: List) -> None: + def as_dict(self): + return { + "name": self.name, + "url": self.config.get("url"), + "title": self.config.get("title", None), + "body": self.config.get("body", None), + "body_format": self.config.get("body_format", None), + "events": self.events, + "attach": self.attach + } + + async def notify( + self, event_name: str, event_args: List, message: str = "" + ) -> None: context = { "event_name": event_name, - "event_args": event_args + "event_args": event_args, + "event_message": message } rendered_title = ( @@ -159,22 +165,47 @@ class NotifierInstance: ) # Verify the attachment - if self.attach_requires_file_system_check and self.attach is not None: - fm = self.server.lookup_component("file_manager") - if not fm.can_access_path(self.attach): - if not self.warned: - self.server.add_warning( - f"Attachment of notifier '{self.name}' is not " - "valid. The location of the " - "attachment is not " - "accessible.") - self.warned = True + attachments: List[str] = [] + if self.attach is not None: + fm: FileManager = self.server.lookup_component("file_manager") + try: + rendered = self.attach.render(context) + except self.server.error: + logging.exception(f"notifier {self.name}: Failed to render attachment") + self.server.add_warning( + f"[notifier {self.name}]: The attachment is not valid. The " + "template failed to render.", + f"notifier {self.name}" + ) self.attach = None - + else: + for item in rendered.splitlines(): + item = item.strip() + if not item: + continue + if re.match(r"https?://", item) is not None: + # Attachment is a url, system check not necessary + attachments.append(item) + continue + attach_path = pathlib.Path(item).expanduser().resolve() + if not attach_path.is_file(): + self.server.add_warning( + f"[notifier {self.name}]: Invalid attachment detected, " + f"file does not exist: {attach_path}.", + f"notifier {self.name}" + ) + elif not fm.can_access_path(attach_path): + self.server.add_warning( + f"[notifier {self.name}]: Invalid attachment detected, " + f"no read permission for the file {attach_path}.", + f"notifier {self.name}" + ) + else: + attachments.append(str(attach_path)) await self.apprise.async_notify( - rendered_body.strip(), - rendered_title.strip(), - attach=self.attach + rendered_body.strip(), rendered_title.strip(), + body_format=self.body_format, + attach=None if not attachments else attachments ) def get_name(self) -> str: diff --git a/moonraker/components/octoprint_compat.py b/moonraker/components/octoprint_compat.py index 6b081f4..bd68e0a 100644 --- a/moonraker/components/octoprint_compat.py +++ b/moonraker/components/octoprint_compat.py @@ -6,6 +6,7 @@ from __future__ import annotations import logging +from ..common import RequestType, TransportType, KlippyState # Annotation imports from typing import ( @@ -15,8 +16,9 @@ from typing import ( List, ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest + from .klippy_connection import KlippyConnection + from ..confighelper import ConfigHelper + from ..common import WebRequest from .klippy_apis import KlippyAPI as APIComp from .file_manager.file_manager import FileManager from .job_queue import JobQueue @@ -65,22 +67,27 @@ class OctoPrintCompat: # Version & Server information self.server.register_endpoint( - '/api/version', ['GET'], self._get_version, - transports=['http'], wrap_result=False) + '/api/version', RequestType.GET, self._get_version, + transports=TransportType.HTTP, wrap_result=False + ) self.server.register_endpoint( - '/api/server', ['GET'], self._get_server, - transports=['http'], wrap_result=False) + '/api/server', RequestType.GET, self._get_server, + transports=TransportType.HTTP, wrap_result=False + ) # Login, User & Settings self.server.register_endpoint( - '/api/login', ['POST'], self._post_login_user, - transports=['http'], wrap_result=False) + '/api/login', RequestType.POST, self._post_login_user, + transports=TransportType.HTTP, wrap_result=False + ) self.server.register_endpoint( - '/api/currentuser', ['GET'], self._post_login_user, - transports=['http'], wrap_result=False) + '/api/currentuser', RequestType.GET, self._post_login_user, + transports=TransportType.HTTP, wrap_result=False + ) self.server.register_endpoint( - '/api/settings', ['GET'], self._get_settings, - transports=['http'], wrap_result=False) + '/api/settings', RequestType.GET, self._get_settings, + transports=TransportType.HTTP, wrap_result=False + ) # File operations # Note that file upload is handled in file_manager.py @@ -88,30 +95,34 @@ class OctoPrintCompat: # Job operations self.server.register_endpoint( - '/api/job', ['GET'], self._get_job, - transports=['http'], wrap_result=False) + '/api/job', RequestType.GET, self._get_job, + transports=TransportType.HTTP, wrap_result=False + ) # TODO: start/cancel/restart/pause jobs # Printer operations self.server.register_endpoint( - '/api/printer', ['GET'], self._get_printer, - transports=['http'], wrap_result=False) + '/api/printer', RequestType.GET, self._get_printer, + transports=TransportType.HTTP, wrap_result=False) self.server.register_endpoint( - '/api/printer/command', ['POST'], self._post_command, - transports=['http'], wrap_result=False) + '/api/printer/command', RequestType.POST, self._post_command, + transports=TransportType.HTTP, wrap_result=False + ) # TODO: head/tool/bed/chamber specific read/issue # Printer profiles self.server.register_endpoint( - '/api/printerprofiles', ['GET'], self._get_printerprofiles, - transports=['http'], wrap_result=False) + '/api/printerprofiles', RequestType.GET, self._get_printerprofiles, + transports=TransportType.HTTP, wrap_result=False + ) # Upload Handlers self.server.register_upload_handler( "/api/files/local", location_prefix="api/files/moonraker") self.server.register_endpoint( - "/api/files/moonraker/(?P.+)", ['POST'], - self._select_file, transports=['http'], wrap_result=False) + "/api/files/moonraker/(?P.+)", RequestType.POST, + self._select_file, transports=TransportType.HTTP, wrap_result=False + ) # System # TODO: shutdown/reboot/restart operations @@ -143,10 +154,11 @@ class OctoPrintCompat: data.update(status[heater_name]) def printer_state(self) -> str: - klippy_state = self.server.get_klippy_state() - if klippy_state in ["disconnected", "startup"]: + kconn: KlippyConnection = self.server.lookup_component("klippy_connection") + klippy_state = kconn.state + if not klippy_state.startup_complete(): return 'Offline' - elif klippy_state != 'ready': + elif klippy_state != KlippyState.READY: return 'Error' return { 'standby': 'Operational', @@ -192,11 +204,11 @@ class OctoPrintCompat: """ Server status """ - klippy_state = self.server.get_klippy_state() + kconn: KlippyConnection = self.server.lookup_component("klippy_connection") + klippy_state = kconn.state return { 'server': OCTO_VERSION, - 'safemode': ( - None if klippy_state == 'ready' else 'settings') + 'safemode': None if klippy_state == KlippyState.READY else 'settings' } async def _post_login_user(self, @@ -355,12 +367,12 @@ class OctoPrintCompat: async def _select_file(self, web_request: WebRequest ) -> None: - command: str = web_request.get('command') - rel_path: str = web_request.get('relative_path') + command: str = web_request.get_str('command') + rel_path: str = web_request.get_str('relative_path') root, filename = rel_path.strip("/").split("/", 1) fmgr: FileManager = self.server.lookup_component('file_manager') if command == "select": - start_print: bool = web_request.get('print', False) + start_print: bool = web_request.get_boolean('print', False) if not start_print: # No-op, selecting a file has no meaning in Moonraker return @@ -376,9 +388,10 @@ class OctoPrintCompat: except self.server.error: pstate = "not_avail" started: bool = False + user = web_request.get_current_user() if pstate not in ["printing", "paused", "not_avail"]: try: - await self.klippy_apis.start_print(filename) + await self.klippy_apis.start_print(filename, user=user) except self.server.error: started = False else: @@ -388,7 +401,7 @@ class OctoPrintCompat: if fmgr.upload_queue_enabled(): job_queue: JobQueue = self.server.lookup_component( 'job_queue') - await job_queue.queue_job(filename, check_exists=False) + await job_queue.queue_job(filename, check_exists=False, user=user) logging.debug(f"Job '{filename}' queued via OctoPrint API") else: raise self.server.error("Conflict", 409) diff --git a/moonraker/components/paneldue.py b/moonraker/components/paneldue.py index fc92231..d28d4c5 100644 --- a/moonraker/components/paneldue.py +++ b/moonraker/components/paneldue.py @@ -8,12 +8,12 @@ from __future__ import annotations import serial import os import time -import json import errno import logging import asyncio from collections import deque -from utils import ServerError +from ..utils import ServerError +from ..utils import json_wrapper as jsonw # Annotation imports from typing import ( @@ -28,11 +28,10 @@ from typing import ( Coroutine, ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from . import klippy_apis - from .file_manager import file_manager - APIComp = klippy_apis.KlippyAPI - FMComp = file_manager.FileManager + from ..confighelper import ConfigHelper + from .klippy_connection import KlippyConnection + from .klippy_apis import KlippyAPI as APIComp + from .file_manager.file_manager import FileManager as FMComp FlexCallback = Callable[..., Optional[Coroutine]] MIN_EST_TIME = 10. @@ -169,10 +168,8 @@ class PanelDue: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() self.event_loop = self.server.get_event_loop() - self.file_manager: FMComp = \ - self.server.lookup_component('file_manager') - self.klippy_apis: APIComp = \ - self.server.lookup_component('klippy_apis') + self.file_manager: FMComp = self.server.lookup_component('file_manager') + self.klippy_apis: APIComp = self.server.lookup_component('klippy_apis') self.kinematics: str = "none" self.machine_name = config.get('machine_name', "Klipper") self.firmware_name: str = "Repetier | Klipper" @@ -184,10 +181,8 @@ class PanelDue: self.debug_queue: Deque[str] = deque(maxlen=100) # Initialize tracked state. - self.printer_state: Dict[str, Dict[str, Any]] = { - 'gcode_move': {}, 'toolhead': {}, 'virtual_sdcard': {}, - 'fan': {}, 'display_status': {}, 'print_stats': {}, - 'idle_timeout': {}, 'gcode_macro PANELDUE_BEEP': {}} + kconn: KlippyConnection = self.server.lookup_component("klippy_connection") + self.printer_state: Dict[str, Dict[str, Any]] = kconn.get_subscription_cache() self.extruder_count: int = 0 self.heaters: List[str] = [] self.is_ready: bool = False @@ -218,26 +213,24 @@ class PanelDue: # command is the value self.confirmed_macros = {m.split()[0]: m for m in conf_macros} self.available_macros.update(self.confirmed_macros) - - self.non_trivial_keys = config.getlist('non_trivial_keys', - ["Klipper state"]) + self.non_trivial_keys = config.getlist('non_trivial_keys', ["Klipper state"]) self.ser_conn = SerialConnection(config, self) logging.info("PanelDue Configured") # Register server events self.server.register_event_handler( - "server:klippy_ready", self._process_klippy_ready) + "server:klippy_ready", self._process_klippy_ready + ) self.server.register_event_handler( - "server:klippy_shutdown", self._process_klippy_shutdown) + "server:klippy_shutdown", self._process_klippy_shutdown + ) self.server.register_event_handler( - "server:klippy_disconnect", self._process_klippy_disconnect) + "server:klippy_disconnect", self._process_klippy_disconnect + ) self.server.register_event_handler( - "server:status_update", self.handle_status_update) - self.server.register_event_handler( - "server:gcode_response", self.handle_gcode_response) - - self.server.register_remote_method( - "paneldue_beep", self.paneldue_beep) + "server:gcode_response", self.handle_gcode_response + ) + self.server.register_remote_method("paneldue_beep", self.paneldue_beep) # These commands are directly executued on the server and do not to # make a request to Klippy @@ -270,12 +263,12 @@ class PanelDue: async def _process_klippy_ready(self) -> None: # Request "info" and "configfile" status retries = 10 - printer_info = cfg_status = {} + printer_info: Dict[str, Any] = {} + cfg_status: Dict[str, Any] = {} while retries: try: printer_info = await self.klippy_apis.get_klippy_info() - cfg_status = await self.klippy_apis.query_objects( - {'configfile': None}) + cfg_status = await self.klippy_apis.query_objects({'configfile': None}) except self.server.error: logging.exception("PanelDue initialization request failed") retries -= 1 @@ -285,10 +278,8 @@ class PanelDue: continue break - self.firmware_name = "Repetier | Klipper " + \ - printer_info['software_version'] - config: Dict[str, Any] = cfg_status.get( - 'configfile', {}).get('config', {}) + self.firmware_name = "Repetier | Klipper " + printer_info['software_version'] + config: Dict[str, Any] = cfg_status.get('configfile', {}).get('config', {}) printer_cfg: Dict[str, Any] = config.get('printer', {}) self.kinematics = printer_cfg.get('kinematics', "none") @@ -298,34 +289,35 @@ class PanelDue: f"Kinematics: {self.kinematics}\n" f"Printer Config: {config}\n") - # Initalize printer state and make subscription request - self.printer_state = { - 'gcode_move': {}, 'toolhead': {}, 'virtual_sdcard': {}, - 'fan': {}, 'display_status': {}, 'print_stats': {}, - 'idle_timeout': {}, 'gcode_macro PANELDUE_BEEP': {}} - sub_args = {k: None for k in self.printer_state.keys()} + # Make subscription request + sub_args: Dict[str, Optional[List[str]]] = { + "motion_report": None, + "gcode_move": None, + "toolhead": None, + "virtual_sdcard": None, + "fan": None, + "display_status": None, + "print_stats": None, + "idle_timeout": None, + "gcode_macro PANELDUE_BEEP": None + } self.extruder_count = 0 self.heaters = [] extruders = [] for cfg in config: if cfg.startswith("extruder"): self.extruder_count += 1 - self.printer_state[cfg] = {} extruders.append(cfg) sub_args[cfg] = None elif cfg == "heater_bed": - self.printer_state[cfg] = {} self.heaters.append(cfg) sub_args[cfg] = None extruders.sort() self.heaters.extend(extruders) try: - status: Dict[str, Any] - status = await self.klippy_apis.subscribe_objects(sub_args) + await self.klippy_apis.subscribe_objects(sub_args) except self.server.error: logging.exception("Unable to complete subscription request") - else: - self.printer_state.update(status) self.is_shutdown = False self.is_ready = True @@ -336,15 +328,9 @@ class PanelDue: # Tell the PD that the printer is "off" self.write_response({'status': 'O'}) self.last_printer_state = 'O' + self.is_ready = False self.is_shutdown = self.is_shutdown = False - def handle_status_update(self, status: Dict[str, Any]) -> None: - for obj, items in status.items(): - if obj in self.printer_state: - self.printer_state[obj].update(items) - else: - self.printer_state[obj] = items - def paneldue_beep(self, frequency: int, duration: float) -> None: duration = int(duration * 1000.) self.write_response( @@ -550,8 +536,8 @@ class PanelDue: return def write_response(self, response: Dict[str, Any]) -> None: - byte_resp = json.dumps(response) + "\r\n" - self.ser_conn.send(byte_resp.encode()) + byte_resp = jsonw.dumps(response) + b"\r\n" + self.ser_conn.send(byte_resp) def _get_printer_status(self) -> str: # PanelDue States applicable to Klipper: @@ -561,9 +547,9 @@ class PanelDue: if self.is_shutdown: return 'S' - printer_state = self.printer_state + p_state = self.printer_state sd_state: str - sd_state = printer_state['print_stats'].get('state', "standby") + sd_state = p_state.get("print_stats", {}).get("state", "standby") if sd_state == "printing": if self.last_printer_state == 'A': # Resuming @@ -571,8 +557,9 @@ class PanelDue: # Printing return 'P' elif sd_state == "paused": - p_active = printer_state['idle_timeout'].get( - 'state', 'Idle') == "Printing" + p_active = ( + p_state.get("idle_timeout", {}).get("state", 'Idle') == "Printing" + ) if p_active and self.last_printer_state != 'A': # Pausing return 'D' @@ -618,25 +605,28 @@ class PanelDue: response['axes'] = 3 p_state = self.printer_state + toolhead = p_state.get("toolhead", {}) + gcode_move = p_state.get("gcode_move", {}) self.last_printer_state = self._get_printer_status() response['status'] = self.last_printer_state - response['babystep'] = round(p_state['gcode_move'].get( - 'homing_origin', [0., 0., 0., 0.])[2], 3) + response['babystep'] = round( + gcode_move.get('homing_origin', [0., 0., 0., 0.])[2], 3 + ) # Current position pos: List[float] homed_pos: str sfactor: float - pos = p_state['toolhead'].get('position', [0., 0., 0., 0.]) + pos = p_state.get("motion_report", {}).get('live_position', [0., 0., 0., 0.]) response['pos'] = [round(p, 2) for p in pos[:3]] - homed_pos = p_state['toolhead'].get('homed_axes', "") + homed_pos = toolhead.get('homed_axes', "") response['homed'] = [int(a in homed_pos) for a in "xyz"] - sfactor = round(p_state['gcode_move'].get('speed_factor', 1.) * 100, 2) + sfactor = round(gcode_move.get('speed_factor', 1.) * 100, 2) response['sfactor'] = sfactor # Print Progress Tracking - sd_status = p_state['virtual_sdcard'] - print_stats = p_state['print_stats'] + sd_status = p_state.get('virtual_sdcard', {}) + print_stats = p_state.get('print_stats', {}) fname: str = print_stats.get('filename', "") sd_print_state: Optional[str] = print_stats.get('state') if sd_print_state in ['printing', 'paused']: @@ -664,8 +654,9 @@ class PanelDue: obj_height: Optional[float] obj_height = self.file_metadata.get('object_height') if obj_height: - cur_height: float = p_state['gcode_move'].get( - 'gcode_position', [0., 0., 0., 0.])[2] + cur_height: float = gcode_move.get( + 'gcode_position', [0., 0., 0., 0.] + )[2] hpct = min(1., cur_height / obj_height) times_left.append(int(est_time - est_time * hpct)) else: @@ -679,13 +670,13 @@ class PanelDue: self.current_file = "" self.file_metadata = {} - fan_speed: Optional[float] = p_state['fan'].get('speed') + fan_speed: Optional[float] = p_state.get('fan', {}).get('speed') if fan_speed is not None: response['fanPercent'] = [round(fan_speed * 100, 1)] extruder_name: str = "" if self.extruder_count > 0: - extruder_name = p_state['toolhead'].get('extruder', "") + extruder_name = toolhead.get('extruder', "") if extruder_name: tool = 0 if extruder_name != "extruder": @@ -693,12 +684,12 @@ class PanelDue: response['tool'] = tool # Report Heater Status - efactor: float = round(p_state['gcode_move'].get( - 'extrude_factor', 1.) * 100., 2) + efactor: float = round(gcode_move.get('extrude_factor', 1.) * 100., 2) for name in self.heaters: - temp: float = round(p_state[name].get('temperature', 0.0), 1) - target: float = round(p_state[name].get('target', 0.0), 1) + htr_state = p_state.get(name, {}) + temp: float = round(htr_state.get('temperature', 0.0), 1) + target: float = round(htr_state.get('target', 0.0), 1) response.setdefault('heaters', []).append(temp) response.setdefault('active', []).append(target) response.setdefault('standby', []).append(target) @@ -711,7 +702,7 @@ class PanelDue: response.setdefault('hstat', []).append(2 if target else 0) # Display message (via M117) - msg: str = p_state['display_status'].get('message', "") + msg: str = p_state.get('display_status', {}).get('message', "") if msg and msg != self.last_message: response['message'] = msg # reset the message so it only shows once. The paneldue diff --git a/moonraker/components/power.py b/moonraker/components/power.py index 660fe6a..b55edb0 100644 --- a/moonraker/components/power.py +++ b/moonraker/components/power.py @@ -1,16 +1,21 @@ -# Raspberry Pi Power Control +# Power Switch Control # +# Copyright (C) 2024 Eric Callahan # Copyright (C) 2020 Jordan Ruthe # # This file may be distributed under the terms of the GNU GPLv3 license. from __future__ import annotations import logging -import json import struct import socket import asyncio import time +import re +import shutil +from urllib.parse import quote, urlencode +from ..utils import json_wrapper as jsonw +from ..common import RequestType, KlippyState # Annotation imports from typing import ( @@ -26,14 +31,14 @@ from typing import ( ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest + from ..confighelper import ConfigHelper + from ..common import WebRequest from .machine import Machine - from . import klippy_apis + from .klippy_apis import KlippyAPI as APIComp from .mqtt import MQTTClient - from .template import JinjaTemplate from .http_client import HttpClient - APIComp = klippy_apis.KlippyAPI + from .klippy_connection import KlippyConnection + from .shell_command import ShellCommandFactory as ShellCommand class PrinterPower: def __init__(self, config: ConfigHelper) -> None: @@ -53,7 +58,9 @@ class PrinterPower: "rf": RFDevice, "mqtt": MQTTDevice, "smartthings": SmartThings, - "hue": HueDevice + "hue": HueDevice, + "http": GenericHTTP, + "uhubctl": UHubCtl } for section in prefix_sections: @@ -72,20 +79,24 @@ class PrinterPower: self.devices[dev.get_name()] = dev self.server.register_endpoint( - "/machine/device_power/devices", ['GET'], - self._handle_list_devices) + "/machine/device_power/devices", RequestType.GET, self._handle_list_devices + ) self.server.register_endpoint( - "/machine/device_power/status", ['GET'], - self._handle_batch_power_request) + "/machine/device_power/status", RequestType.GET, + self._handle_batch_power_request + ) self.server.register_endpoint( - "/machine/device_power/on", ['POST'], - self._handle_batch_power_request) + "/machine/device_power/on", RequestType.POST, + self._handle_batch_power_request + ) self.server.register_endpoint( - "/machine/device_power/off", ['POST'], - self._handle_batch_power_request) + "/machine/device_power/off", RequestType.POST, + self._handle_batch_power_request + ) self.server.register_endpoint( - "/machine/device_power/device", ['GET', 'POST'], - self._handle_single_power_request) + "/machine/device_power/device", RequestType.GET | RequestType.POST, + self._handle_single_power_request + ) self.server.register_remote_method( "set_device_power", self.set_device_power) self.server.register_event_handler( @@ -94,13 +105,6 @@ class PrinterPower: "job_queue:job_queue_changed", self._handle_job_queued) self.server.register_notification("power:power_changed") - async def _check_klippy_printing(self) -> bool: - kapis: APIComp = self.server.lookup_component('klippy_apis') - result: Dict[str, Any] = await kapis.query_objects( - {'print_stats': None}, default={}) - pstate = result.get('print_stats', {}).get('state', "").lower() - return pstate == "printing" - async def component_init(self) -> None: for dev in self.devices.values(): if not dev.initialize(): @@ -122,38 +126,40 @@ class PrinterPower: fname = "unknown" if len(queue): fname = queue[0].get("filename", "unknown") - logging.debug( - f"Job '{fname}' queued, powering on device [{name}]") + logging.info( + f"Power Device {name}: Job '{fname}' queued, powering on" + ) await dev.process_request("on") - async def _handle_list_devices(self, - web_request: WebRequest - ) -> Dict[str, Any]: + async def _handle_list_devices( + self, web_request: WebRequest + ) -> Dict[str, Any]: dev_list = [d.get_device_info() for d in self.devices.values()] output = {"devices": dev_list} return output - async def _handle_single_power_request(self, - web_request: WebRequest - ) -> Dict[str, Any]: + async def _handle_single_power_request( + self, web_request: WebRequest + ) -> Dict[str, Any]: dev_name: str = web_request.get_str('device') - req_action = web_request.get_action() + req_type = web_request.get_request_type() if dev_name not in self.devices: raise self.server.error(f"No valid device named {dev_name}") dev = self.devices[dev_name] - if req_action == 'GET': + if req_type == RequestType.GET: action = "status" - elif req_action == "POST": + elif req_type == RequestType.POST: action = web_request.get_str('action').lower() if action not in ["on", "off", "toggle"]: - raise self.server.error( - f"Invalid requested action '{action}'") + raise self.server.error(f"Invalid requested action '{action}'") + else: + raise self.server.error(f"Invalid Request Type: {req_type}") result = await dev.process_request(action) return {dev_name: result} - async def _handle_batch_power_request(self, - web_request: WebRequest - ) -> Dict[str, Any]: + async def _handle_batch_power_request( + self, web_request: WebRequest + ) -> Dict[str, Any]: args = web_request.get_args() ep = web_request.get_endpoint() if not args: @@ -168,7 +174,9 @@ class PrinterPower: result[name] = "device_not_found" return result - def set_device_power(self, device: str, state: Union[bool, str]) -> None: + def set_device_power( + self, device: str, state: Union[bool, str], force: bool = False + ) -> None: request: str = "" if isinstance(state, bool): request = "on" if state else "off" @@ -184,7 +192,8 @@ class PrinterPower: return event_loop = self.server.get_event_loop() event_loop.register_callback( - self.devices[device].process_request, request) + self.devices[device].process_request, request, force=force + ) async def add_device(self, name: str, device: PowerDevice) -> None: if name in self.devices: @@ -236,32 +245,47 @@ class PowerDevice: self.server.register_event_handler( "server:klippy_started", self._schedule_firmware_restart ) - self.bound_service: Optional[str] = config.get('bound_service', None) + self.bound_services: List[str] = [] + bound_services: List[str] = config.getlist('bound_services', []) + if config.has_option('bound_service'): + # The `bound_service` option is deprecated, however this minimal + # change does not require a warning as it can be reliably resolved + bound_services.append(config.get('bound_service')) + for svc in bound_services: + if svc.endswith(".service"): + svc = svc.rsplit(".", 1)[0] + if svc in self.bound_services: + continue + self.bound_services.append(svc) self.need_scheduled_restart = False self.on_when_queued = config.getboolean('on_when_job_queued', False) if config.has_option('on_when_upload_queued'): self.on_when_queued = config.getboolean('on_when_upload_queued', False, deprecate=True) + self.initial_state: Optional[bool] = config.getboolean( + 'initial_state', None + ) - async def _check_klippy_printing(self) -> bool: - kapis: APIComp = self.server.lookup_component('klippy_apis') - result: Dict[str, Any] = await kapis.query_objects( - {'print_stats': None}, default={}) - pstate = result.get('print_stats', {}).get('state', "").lower() - return pstate == "printing" - - def _schedule_firmware_restart(self, state: str = "") -> None: + def _schedule_firmware_restart(self, state: KlippyState) -> None: if not self.need_scheduled_restart: return self.need_scheduled_restart = False - if state == "ready": - logging.info("Klipper reports 'ready', aborting FIRMWARE_RESTART") + if state == KlippyState.READY: + logging.info( + f"Power Device {self.name}: Klipper reports 'ready', " + "aborting FIRMWARE_RESTART" + ) return + logging.info( + f"Power Device {self.name}: Sending FIRMWARE_RESTART command " + "to Klippy" + ) event_loop = self.server.get_event_loop() kapis: APIComp = self.server.lookup_component("klippy_apis") event_loop.delay_callback( self.restart_delay, kapis.do_restart, - "FIRMWARE_RESTART") + "FIRMWARE_RESTART", True + ) def get_name(self) -> str: return self.name @@ -280,20 +304,31 @@ class PowerDevice: async def process_power_changed(self) -> None: self.notify_power_changed() - if self.bound_service is not None: - machine_cmp: Machine = self.server.lookup_component("machine") - action = "start" if self.state == "on" else "stop" - await machine_cmp.do_service_action(action, self.bound_service) + if self.bound_services: + await self.process_bound_services() if self.state == "on" and self.klipper_restart: self.need_scheduled_restart = True - klippy_state = self.server.get_klippy_state() - if klippy_state in ["disconnected", "startup"]: + kconn: KlippyConnection = self.server.lookup_component("klippy_connection") + klippy_state = kconn.state + if not klippy_state.startup_complete(): # If klippy is currently disconnected or hasn't proceeded past # the startup state, schedule the restart in the # "klippy_started" event callback. return self._schedule_firmware_restart(klippy_state) + async def process_bound_services(self) -> None: + if not self.bound_services: + return + machine_cmp: Machine = self.server.lookup_component("machine") + action = "start" if self.state == "on" else "stop" + for svc in self.bound_services: + logging.info( + f"Power Device {self.name}: Performing {action} action " + f"on bound service {svc}" + ) + await machine_cmp.do_service_action(action, svc) + def process_klippy_shutdown(self) -> None: if not self.off_when_shutdown: return @@ -308,7 +343,8 @@ class PowerDevice: self.off_when_shutdown_delay, self._power_off_on_shutdown) def _power_off_on_shutdown(self) -> None: - if self.server.get_klippy_state() != "shutdown": + kconn: KlippyConnection = self.server.lookup_component("klippy_connection") + if kconn.state != KlippyState.SHUTDOWN: return logging.info( f"Powering off device '{self.name}' due to klippy shutdown") @@ -316,36 +352,39 @@ class PowerDevice: power.set_device_power(self.name, "off") def should_turn_on_when_queued(self) -> bool: - return self.on_when_queued and self.state == "off" + return self.on_when_queued - def _setup_bound_service(self) -> None: - if self.bound_service is None: + def _setup_bound_services(self) -> None: + if not self.bound_services: return - if self.bound_service.startswith("moonraker"): - raise self.server.error( - f"Cannot bind to '{self.bound_service}' " - "service") machine_cmp: Machine = self.server.lookup_component("machine") sys_info = machine_cmp.get_system_info() avail_svcs: List[str] = sys_info.get('available_services', []) - if self.bound_service not in avail_svcs: - raise self.server.error( - f"Bound Service {self.bound_service} is not available") - logging.info(f"Power Device '{self.name}' bound to " - f"service '{self.bound_service}'") + for svc in self.bound_services: + if machine_cmp.unit_name == svc: + raise self.server.error( + f"Power Device {self.name}: Cannot bind to Moonraker " + f"service {svc}." + ) + if svc not in avail_svcs: + raise self.server.error( + f"Bound Service {svc} is not available" + ) + svcs = ", ".join(self.bound_services) + logging.info(f"Power Device '{self.name}' bound to services: {svcs}") def init_state(self) -> Optional[Coroutine]: return None def initialize(self) -> bool: - self._setup_bound_service() + self._setup_bound_services() ret = self.init_state() if ret is not None: eventloop = self.server.get_event_loop() self.init_task = eventloop.create_task(ret) return self.state != "error" - async def process_request(self, req: str) -> str: + async def process_request(self, req: str, force: bool = False) -> str: if self.state == "init" and self.request_lock.locked(): # return immediately if the device is initializing, # otherwise its possible for this to block indefinitely @@ -365,11 +404,13 @@ class PowerDevice: if base_state != cur_state: self.notify_power_changed() return cur_state - printing = await self._check_klippy_printing() - if self.locked_while_printing and printing: - raise self.server.error( - f"Unable to change power for {self.name} " - "while printing") + if not force: + kconn: KlippyConnection + kconn = self.server.lookup_component("klippy_connection") + if self.locked_while_printing and kconn.is_printing(): + raise self.server.error( + f"Unable to change power for {self.name} " + "while printing") ret = self.set_power(req) if ret is not None: await ret @@ -394,21 +435,27 @@ class PowerDevice: return None class HTTPDevice(PowerDevice): - def __init__(self, - config: ConfigHelper, - default_port: int = -1, - default_user: str = "", - default_password: str = "", - default_protocol: str = "http" - ) -> None: + def __init__( + self, + config: ConfigHelper, + default_port: int = -1, + default_user: str = "", + default_password: str = "", + default_protocol: str = "http", + is_generic: bool = False + ) -> None: super().__init__(config) self.client: HttpClient = self.server.lookup_component("http_client") + if is_generic: + return self.addr: str = config.get("address") self.port = config.getint("port", default_port) self.user = config.load_template("user", default_user).render() self.password = config.load_template( "password", default_password).render() self.protocol = config.get("protocol", default_protocol) + if self.port == -1: + self.port = 443 if self.protocol.lower() == "https" else 80 async def init_state(self) -> None: async with self.request_lock: @@ -419,23 +466,32 @@ class HTTPDevice(PowerDevice): except asyncio.CancelledError: raise except Exception as e: - if type(last_err) != type(e) or last_err.args != e.args: - logging.info(f"Device Init Error: {self.name}\n{e}") + if type(last_err) is not type(e) or last_err.args != e.args: + logging.exception(f"Device Init Error: {self.name}") last_err = e await asyncio.sleep(5.) continue else: self.init_task = None self.state = state + if ( + self.initial_state is not None and + state in ["on", "off"] + ): + new_state = "on" if self.initial_state else "off" + if new_state != state: + logging.info( + f"Power Device {self.name}: setting initial " + f"state to {new_state}" + ) + await self.set_power(new_state) + await self.process_bound_services() self.notify_power_changed() return - async def _send_http_command(self, - url: str, - command: str, - retries: int = 3 - ) -> Dict[str, Any]: - url = self.client.escape_url(url) + async def _send_http_command( + self, url: str, command: str, retries: int = 3 + ) -> Dict[str, Any]: response = await self.client.get( url, request_timeout=20., attempts=retries, retry_pause_time=1., enable_cache=False) @@ -479,7 +535,6 @@ class GpioDevice(PowerDevice): initial_val: Optional[int] = None ) -> None: super().__init__(config) - self.initial_state = config.getboolean('initial_state', False) self.timer: Optional[float] = config.getfloat('timer', None) if self.timer is not None and self.timer < 0.000001: raise config.error( @@ -487,11 +542,15 @@ class GpioDevice(PowerDevice): "be above 0.0") self.timer_handle: Optional[asyncio.TimerHandle] = None if initial_val is None: - initial_val = int(self.initial_state) + initial_val = int(self.initial_state or 0) self.gpio_out = config.getgpioout('pin', initial_value=initial_val) - def init_state(self) -> None: - self.set_power("on" if self.initial_state else "off") + async def init_state(self) -> None: + if self.initial_state is None: + self.set_power("off") + else: + self.set_power("on" if self.initial_state else "off") + await self.process_bound_services() def refresh_status(self) -> None: pass @@ -510,7 +569,7 @@ class GpioDevice(PowerDevice): self.state = state self._check_timer() - def _check_timer(self): + def _check_timer(self) -> None: if self.state == "on" and self.timer is not None: event_loop = self.server.get_event_loop() power: PrinterPower = self.server.lookup_component("power") @@ -533,15 +592,13 @@ class KlipperDevice(PowerDevice): raise config.error( "Option 'restart_klipper_when_powered' in section " f"[{config.get_name()}] is unsupported for 'klipper_device'") - if ( - self.bound_service is not None and - self.bound_service.startswith("klipper") - ): - # Klipper devices cannot be bound to an instance of klipper or - # klipper_mcu - raise config.error( - f"Option 'bound_service' cannot be set to {self.bound_service}" - f" for 'klipper_device' [{config.get_name()}]") + for svc in self.bound_services: + if svc.startswith("klipper"): + # Klipper devices cannot be bound to an instance of klipper or + # klipper_mcu + raise config.error( + f"Option 'bound_services' must not contain service '{svc}'" + f" for 'klipper_device' [{config.get_name()}]") self.is_shutdown: bool = False self.update_fut: Optional[asyncio.Future] = None self.timer: Optional[float] = config.getfloat( @@ -557,14 +614,12 @@ class KlipperDevice(PowerDevice): "Klipper object must be either 'output_pin' or 'gcode_macro' " f"for option 'object_name' in section [{config.get_name()}]") - self.server.register_event_handler( - "server:status_update", self._status_update) self.server.register_event_handler( "server:klippy_ready", self._handle_ready) self.server.register_event_handler( "server:klippy_disconnect", self._handle_disconnect) - def _status_update(self, data: Dict[str, Any]) -> None: + def _status_update(self, data: Dict[str, Any], _: float) -> None: self._set_state_from_data(data) def get_device_info(self) -> Dict[str, Any]: @@ -575,12 +630,24 @@ class KlipperDevice(PowerDevice): async def _handle_ready(self) -> None: kapis: APIComp = self.server.lookup_component('klippy_apis') sub: Dict[str, Optional[List[str]]] = {self.object_name: None} - data = await kapis.subscribe_objects(sub, None) + data = await kapis.subscribe_objects(sub, self._status_update, None) if not self._validate_data(data): self.state == "error" else: assert data is not None self._set_state_from_data(data) + if ( + self.initial_state is not None and + self.state in ["on", "off"] + ): + new_state = "on" if self.initial_state else "off" + if new_state != self.state: + logging.info( + f"Power Device {self.name}: setting initial " + f"state to {new_state}" + ) + await self.set_power(new_state) + self.notify_power_changed() async def _handle_disconnect(self) -> None: self.is_shutdown = False @@ -617,6 +684,7 @@ class KlipperDevice(PowerDevice): kapis: APIComp = self.server.lookup_component('klippy_apis') value = "1" if state == "on" else "0" await kapis.run_gcode(f"{self.gc_cmd} VALUE={value}") + assert self.update_fut is not None await asyncio.wait_for(self.update_fut, 1.) except TimeoutError: self.state = "error" @@ -662,17 +730,17 @@ class KlipperDevice(PowerDevice): in_event = self.update_fut is not None last_state = self.state self.state = state - if last_state != state and not in_event: + if last_state not in [state, "init"] and not in_event: self.notify_power_changed() - def _check_timer(self): + def _check_timer(self) -> None: if self.state == "on" and self.timer is not None: event_loop = self.server.get_event_loop() power: PrinterPower = self.server.lookup_component("power") self.timer_handle = event_loop.delay_callback( self.timer, power.set_device_power, self.name, "off") - def _reset_timer(self): + def _reset_timer(self) -> None: if self.timer_handle is not None: self.timer_handle.cancel() self.timer_handle = None @@ -761,10 +829,9 @@ class TPLinkSmartPlug(PowerDevice): # TPLink device controls multiple devices if self.output_id is not None: sysinfo = await self._send_tplink_command("info") - dev_id = sysinfo["system"]["get_sysinfo"]["deviceId"] - out_cmd["context"] = { - 'child_ids': [f"{dev_id}{self.output_id:02}"] - } + children = sysinfo["system"]["get_sysinfo"]["children"] + child_id = children[self.output_id]["id"] + out_cmd["context"] = {"child_ids": [f"{child_id}"]} elif command == "info": out_cmd = {'system': {'get_sysinfo': {}}} elif command == "clear_rules": @@ -800,14 +867,14 @@ class TPLinkSmartPlug(PowerDevice): finally: writer.close() await writer.wait_closed() - return json.loads(self._decrypt(data)) + return jsonw.loads(self._decrypt(data)) def _encrypt(self, outdata: Dict[str, Any]) -> bytes: - data = json.dumps(outdata) + data = jsonw.dumps(outdata) key = self.START_KEY res = struct.pack(">I", len(data)) for c in data: - val = key ^ ord(c) + val = key ^ c key = val res += bytes([val]) return res @@ -840,14 +907,26 @@ class TPLinkSmartPlug(PowerDevice): except asyncio.CancelledError: raise except Exception as e: - if type(last_err) != type(e) or last_err.args != e.args: - logging.info(f"Device Init Error: {self.name}\n{e}") + if type(last_err) is not type(e) or last_err.args != e.args: + logging.exception(f"Device Init Error: {self.name}") last_err = e await asyncio.sleep(5.) continue else: self.init_task = None self.state = "on" if state else "off" + if ( + self.initial_state is not None and + self.state in ["on", "off"] + ): + new_state = "on" if self.initial_state else "off" + if new_state != self.state: + logging.info( + f"Power Device {self.name}: setting initial " + f"state to {new_state}" + ) + await self.set_power(new_state) + await self.process_bound_services() self.notify_power_changed() return @@ -883,14 +962,11 @@ class TPLinkSmartPlug(PowerDevice): class Tasmota(HTTPDevice): def __init__(self, config: ConfigHelper) -> None: - super().__init__(config, default_password="") + super().__init__(config, default_user="admin", default_password="") self.output_id = config.getint("output_id", 1) self.timer = config.get("timer", "") - async def _send_tasmota_command(self, - command: str, - password: Optional[str] = None - ) -> Dict[str, Any]: + async def _send_tasmota_command(self, command: str) -> Dict[str, Any]: if command in ["on", "off"]: out_cmd = f"Power{self.output_id} {command}" if self.timer != "" and command == "off": @@ -899,9 +975,12 @@ class Tasmota(HTTPDevice): out_cmd = f"Power{self.output_id}" else: raise self.server.error(f"Invalid tasmota command: {command}") - - url = f"http://{self.addr}/cm?user=admin&password=" \ - f"{self.password}&cmnd={out_cmd}" + query = urlencode({ + "user": self.user, + "password": self.password, + "cmnd": out_cmd + }) + url = f"{self.protocol}://{quote(self.addr)}/cm?{query}" return await self._send_http_command(url, command) async def _send_status_request(self) -> str: @@ -910,7 +989,7 @@ class Tasmota(HTTPDevice): state: str = res[f"POWER{self.output_id}"].lower() except KeyError as e: if self.output_id == 1: - state = res[f"POWER"].lower() + state = res["POWER"].lower() else: raise KeyError(e) return state @@ -922,7 +1001,7 @@ class Tasmota(HTTPDevice): state = res[f"POWER{self.output_id}"].lower() except KeyError as e: if self.output_id == 1: - state = res[f"POWER"].lower() + state = res["POWER"].lower() else: raise KeyError(e) return state @@ -935,34 +1014,33 @@ class Shelly(HTTPDevice): self.timer = config.get("timer", "") async def _send_shelly_command(self, command: str) -> Dict[str, Any]: - if command == "on": - out_cmd = f"relay/{self.output_id}?turn={command}" - elif command == "off": - if self.timer != "": - out_cmd = f"relay/{self.output_id}?turn=on&timer={self.timer}" - else: - out_cmd = f"relay/{self.output_id}?turn={command}" - elif command == "info": - out_cmd = f"relay/{self.output_id}" - else: + query_args: Dict[str, Any] = {} + out_cmd = f"relay/{self.output_id}" + if command in ["on", "off"]: + query_args["turn"] = command + if command == "off" and self.timer != "": + query_args["turn"] = "on" + query_args["timer"] = self.timer + elif command != "info": raise self.server.error(f"Invalid shelly command: {command}") if self.password != "": - out_pwd = f"{self.user}:{self.password}@" + out_pwd = f"{quote(self.user)}:{quote(self.password)}@" else: - out_pwd = f"" - url = f"http://{out_pwd}{self.addr}/{out_cmd}" + out_pwd = "" + query = urlencode(query_args) + url = f"{self.protocol}://{out_pwd}{quote(self.addr)}/{out_cmd}?{query}" return await self._send_http_command(url, command) async def _send_status_request(self) -> str: res = await self._send_shelly_command("info") - state: str = res[f"ison"] - timer_remaining = res[f"timer_remaining"] if self.timer != "" else 0 + state: str = res["ison"] + timer_remaining = res["timer_remaining"] if self.timer != "" else 0 return "on" if state and timer_remaining == 0 else "off" async def _send_power_request(self, state: str) -> str: res = await self._send_shelly_command(state) - state = res[f"ison"] - timer_remaining = res[f"timer_remaining"] if self.timer != "" else 0 + state = res["ison"] + timer_remaining = res["timer_remaining"] if self.timer != "" else 0 return "on" if state and timer_remaining == 0 else "off" @@ -972,14 +1050,14 @@ class SmartThings(HTTPDevice): self.device: str = config.get("device", "") self.token: str = config.gettemplate("token").render() - async def _send_smartthings_command(self, - command: str - ) -> Dict[str, Any]: + async def _send_smartthings_command(self, command: str) -> Dict[str, Any]: body: Optional[List[Dict[str, Any]]] = None if (command == "on" or command == "off"): method = "POST" - url = (f"{self.protocol}://{self.addr}" - f"/v1/devices/{self.device}/commands") + url = ( + f"{self.protocol}://{quote(self.addr)}" + f"/v1/devices/{quote(self.device)}/commands" + ) body = [ { "component": "main", @@ -989,8 +1067,11 @@ class SmartThings(HTTPDevice): ] elif command == "info": method = "GET" - url = (f"{self.protocol}://{self.addr}/v1/devices/{self.device}/" - "components/main/capabilities/switch/status") + url = ( + f"{self.protocol}://{quote(self.addr)}/v1/devices/" + f"{quote(self.device)}/components/main/capabilities/" + "switch/status" + ) else: raise self.server.error( f"Invalid SmartThings command: {command}") @@ -998,7 +1079,6 @@ class SmartThings(HTTPDevice): headers = { 'Authorization': f'Bearer {self.token}' } - url = self.client.escape_url(url) response = await self.client.request( method, url, body=body, headers=headers, attempts=3, enable_cache=False @@ -1023,26 +1103,32 @@ class HomeSeer(HTTPDevice): super().__init__(config, default_user="admin", default_password="") self.device = config.getint("device") - async def _send_homeseer(self, - request: str, - additional: str = "" - ) -> Dict[str, Any]: - url = (f"http://{self.user}:{self.password}@{self.addr}" - f"/JSON?user={self.user}&pass={self.password}" - f"&request={request}&ref={self.device}&{additional}") + async def _send_homeseer( + self, request: str, state: str = "" + ) -> Dict[str, Any]: + query_args = { + "user": self.user, + "pass": self.password, + "request": request, + "ref": self.device, + } + if state: + query_args["label"] = state + query = urlencode(query_args) + url = ( + f"{self.protocol}://{quote(self.user)}:{quote(self.password)}@" + f"{quote(self.addr)}:{self.port}/JSON?{query}" + ) return await self._send_http_command(url, request) async def _send_status_request(self) -> str: res = await self._send_homeseer("getstatus") - return res[f"Devices"][0]["status"].lower() + return res["Devices"][0]["status"].lower() async def _send_power_request(self, state: str) -> str: - if state == "on": - state_hs = "On" - elif state == "off": - state_hs = "Off" - res = await self._send_homeseer("controldevicebylabel", - f"label={state_hs}") + await self._send_homeseer( + "controldevicebylabel", state.capitalize() + ) return state @@ -1054,26 +1140,23 @@ class HomeAssistant(HTTPDevice): self.domain: str = config.get("domain", "switch") self.status_delay: float = config.getfloat("status_delay", 1.) - async def _send_homeassistant_command(self, - command: str - ) -> Dict[str, Any]: + async def _send_homeassistant_command(self, command: str) -> Dict[str, Any]: body: Optional[Dict[str, Any]] = None if command in ["on", "off"]: - out_cmd = f"api/services/{self.domain}/turn_{command}" + out_cmd = f"api/services/{quote(self.domain)}/turn_{command}" body = {"entity_id": self.device} method = "POST" elif command == "info": - out_cmd = f"api/states/{self.device}" + out_cmd = f"api/states/{quote(self.device)}" method = "GET" else: raise self.server.error( f"Invalid homeassistant command: {command}") - url = f"{self.protocol}://{self.addr}:{self.port}/{out_cmd}" + url = f"{self.protocol}://{quote(self.addr)}:{self.port}/{out_cmd}" headers = { 'Authorization': f'Bearer {self.token}' } data: Dict[str, Any] = {} - url = self.client.escape_url(url) response = await self.client.request( method, url, body=body, headers=headers, attempts=3, enable_cache=False @@ -1086,7 +1169,7 @@ class HomeAssistant(HTTPDevice): async def _send_status_request(self) -> str: res = await self._send_homeassistant_command("info") - return res[f"state"] + return res["state"] async def _send_power_request(self, state: str) -> str: await self._send_homeassistant_command(state) @@ -1102,26 +1185,26 @@ class Loxonev1(HTTPDevice): async def _send_loxonev1_command(self, command: str) -> Dict[str, Any]: if command in ["on", "off"]: - out_cmd = f"jdev/sps/io/{self.output_id}/{command}" + out_cmd = f"jdev/sps/io/{quote(self.output_id)}/{command}" elif command == "info": - out_cmd = f"jdev/sps/io/{self.output_id}" + out_cmd = f"jdev/sps/io/{quote(self.output_id)}" else: raise self.server.error(f"Invalid loxonev1 command: {command}") if self.password != "": - out_pwd = f"{self.user}:{self.password}@" + out_pwd = f"{quote(self.user)}:{quote(self.password)}@" else: - out_pwd = f"" - url = f"http://{out_pwd}{self.addr}/{out_cmd}" + out_pwd = "" + url = f"http://{out_pwd}{quote(self.addr)}/{out_cmd}" return await self._send_http_command(url, command) async def _send_status_request(self) -> str: res = await self._send_loxonev1_command("info") - state = res[f"LL"][f"value"] + state = res["LL"]["value"] return "on" if int(state) == 1 else "off" async def _send_power_request(self, state: str) -> str: res = await self._send_loxonev1_command(state) - state = res[f"LL"][f"value"] + state = res["LL"]["value"] return "on" if int(state) == 1 else "off" @@ -1131,7 +1214,7 @@ class MQTTDevice(PowerDevice): self.mqtt: MQTTClient = self.server.load_component(config, 'mqtt') self.eventloop = self.server.get_event_loop() self.cmd_topic: str = config.get('command_topic') - self.cmd_payload: JinjaTemplate = config.gettemplate('command_payload') + self.cmd_payload = config.gettemplate('command_payload') self.retain_cmd_state = config.getboolean('retain_command_state', False) self.query_topic: Optional[str] = config.get('query_topic', None) self.query_payload = config.gettemplate('query_payload', None) @@ -1196,6 +1279,7 @@ class MQTTDevice(PowerDevice): while self.mqtt.is_connected(): self.query_response = self.eventloop.create_future() try: + assert self.query_response is not None await self._wait_for_update(self.query_response) except asyncio.TimeoutError: # Only wait once if no query topic is set. @@ -1220,6 +1304,20 @@ class MQTTDevice(PowerDevice): else: logging.info( f"MQTT Power Device {self.name} initialized") + if ( + self.initial_state is not None and + self.state in ["on", "off"] + ): + new_state = "on" if self.initial_state else "off" + if new_state != self.state: + logging.info( + f"Power Device {self.name}: setting initial " + f"state to {new_state}" + ) + await self.set_power(new_state) + await self.process_bound_services() + # Don't reset on next connection + self.initial_state = None self.notify_power_changed() async def _on_mqtt_disconnected(self): @@ -1244,6 +1342,7 @@ class MQTTDevice(PowerDevice): "MQTT Not Connected", 503) self.query_response = self.eventloop.create_future() try: + assert self.query_response is not None await self._wait_for_update(self.query_response) except Exception: logging.exception(f"MQTT Power Device {self.name}: " @@ -1270,6 +1369,7 @@ class MQTTDevice(PowerDevice): self.query_response = self.eventloop.create_future() new_state = "error" try: + assert self.query_response is not None payload = self.cmd_payload.render({'command': state}) await self.mqtt.publish_topic( self.cmd_topic, payload, self.qos, @@ -1291,30 +1391,185 @@ class MQTTDevice(PowerDevice): class HueDevice(HTTPDevice): def __init__(self, config: ConfigHelper) -> None: - super().__init__(config) + super().__init__(config, default_port=80) self.device_id = config.get("device_id") + self.device_type = config.get("device_type", "light") + if self.device_type == "group": + self.state_key = "action" + self.on_state = "all_on" + else: + self.state_key = "state" + self.on_state = "on" async def _send_power_request(self, state: str) -> str: new_state = True if state == "on" else False url = ( - f"http://{self.addr}/api/{self.user}/lights/{self.device_id}/state" + f"{self.protocol}://{quote(self.addr)}:{self.port}/api/{quote(self.user)}" + f"/{self.device_type}s/{quote(self.device_id)}" + f"/{quote(self.state_key)}" ) - url = self.client.escape_url(url) ret = await self.client.request("PUT", url, body={"on": new_state}) resp = cast(List[Dict[str, Dict[str, Any]]], ret.json()) + state_url = ( + f"/{self.device_type}s/{self.device_id}/{self.state_key}/on" + ) return ( - "on" if resp[0]["success"][f"/lights/{self.device_id}/state/on"] + "on" if resp[0]["success"][state_url] else "off" ) async def _send_status_request(self) -> str: url = ( - f"http://{self.addr}/api/{self.user}/lights/{self.device_id}" + f"{self.protocol}://{quote(self.addr)}:{self.port}/api/{quote(self.user)}" + f"/{self.device_type}s/{quote(self.device_id)}" ) - url = self.client.escape_url(url) ret = await self.client.request("GET", url) resp = cast(Dict[str, Dict[str, Any]], ret.json()) - return "on" if resp["state"]["on"] else "off" + return "on" if resp["state"][self.on_state] else "off" + +class GenericHTTP(HTTPDevice): + def __init__(self, config: ConfigHelper,) -> None: + super().__init__(config, is_generic=True) + self.urls: Dict[str, str] = { + "on": config.gettemplate("on_url").render(), + "off": config.gettemplate("off_url").render(), + "status": config.gettemplate("status_url").render() + } + self.request_template = config.gettemplate( + "request_template", None, is_async=True + ) + self.response_template = config.gettemplate("response_template", is_async=True) + + async def _send_generic_request(self, command: str) -> str: + request = self.client.wrap_request( + self.urls[command], request_timeout=20., attempts=3, retry_pause_time=1. + ) + context: Dict[str, Any] = { + "command": command, + "http_request": request, + "async_sleep": asyncio.sleep, + "log_debug": logging.debug, + "urls": dict(self.urls) + } + if self.request_template is not None: + await self.request_template.render_async(context) + response = request.last_response() + if response is None: + raise self.server.error("Failed to receive a response") + else: + response = await request.send() + response.raise_for_status() + result = (await self.response_template.render_async(context)).lower() + if result not in ["on", "off"]: + raise self.server.error(f"Invalid result: {result}") + return result + + async def _send_power_request(self, state: str) -> str: + return await self._send_generic_request(state) + + async def _send_status_request(self) -> str: + return await self._send_generic_request("status") + + +HUB_STATE_PATTERN = r""" + (?:Port\s(?P[0-9]+):) + (?:\s(?P[0-9a-f]{4})) + (?:\s(?Ppower|off)) + (?P(?:\s[0-9a-z]+)+)? + (?:\s\[(?P.+)\])? +""" + +class UHubCtl(PowerDevice): + _uhubctrl_regex = re.compile( + r"^\s*" + HUB_STATE_PATTERN + r"\s*$", + re.VERBOSE | re.IGNORECASE + ) + def __init__(self, config: ConfigHelper) -> None: + super().__init__(config) + self.scmd: ShellCommand = self.server.load_component(config, "shell_command") + self.location = config.get("location") + self.port = config.getint("port") + ret = shutil.which("uhubctl") + if ret is None: + raise config.error( + f"[{config.get_name()}]: failed to locate 'uhubctl' binary. " + "Make sure uhubctl is correctly installed on the host machine." + ) + + async def init_state(self) -> None: + async with self.request_lock: + await self.refresh_status() + cur_state = True if self.state == "on" else False + if self.initial_state is not None and cur_state != self.initial_state: + await self.set_power("on" if self.initial_state else "off") + + async def refresh_status(self) -> None: + try: + result = await self._run_uhubctl("info") + except self.server.error as e: + self.state = "error" + output = f"\n{e}" + if isinstance(e, self.scmd.error): + output += f"\nuhubctrl output: {e.stderr.decode(errors='ignore')}" + logging.info(f"Power Device {self.name}: Refresh Error{output}") + return + logging.debug(f"Power Device {self.name}: uhubctl device info: {result}") + self.state = result["state"] + + async def set_power(self, state: str) -> None: + try: + result = await self._run_uhubctl(state) + except self.server.error as e: + self.state = "error" + msg = f"Power Device {self.name}: Error turning device {state}" + output = f"\n{e}" + if isinstance(e, self.scmd.error): + output += f"\nuhubctrl output: {e.stderr.decode(errors='ignore')}" + logging.info(f"{msg}{output}") + raise self.server.error(msg) from None + logging.debug(f"Power Device {self.name}: uhubctl device info: {result}") + self.state = result["state"] + + async def _run_uhubctl(self, action: str) -> Dict[str, Any]: + cmd = f"uhubctl -l {self.location} -p {self.port}" + search_prefix = "Current status" + if action in ["on", "off"]: + cmd += f" -a {action}" + search_prefix = "New status" + resp: str = await self.scmd.exec_cmd(cmd, log_complete=False) + for line in resp.splitlines(): + if search_prefix: + if line.startswith(search_prefix): + search_prefix = "" + continue + match = self._uhubctrl_regex.match(line.strip()) + if match is None: + continue + result = match.groupdict() + try: + port = int(result["port"]) + status_bits = int(result["bits"], 16) + except (TypeError, ValueError): + continue + if port != self.port: + continue + if result["pstate"] is None: + continue + state = "on" if result["pstate"] == "power" else "off" + flags: List[str] = [] + if result["flags"] is not None: + flags = result["flags"].strip().split() + return { + "port": port, + "status_bits": status_bits, + "state": state, + "flags": flags, + "desc": result["desc"] + } + raise self.server.error( + f"Failed to receive response for device at location {self.location}, " + f"port {self.port}, " + ) # The power component has multiple configuration sections diff --git a/moonraker/components/proc_stats.py b/moonraker/components/proc_stats.py index c4b541f..fd4ee88 100644 --- a/moonraker/components/proc_stats.py +++ b/moonraker/components/proc_stats.py @@ -6,12 +6,16 @@ from __future__ import annotations import asyncio +import struct +import fcntl import time import re import os import pathlib import logging from collections import deque +from ..utils import ioctl_macros +from ..common import RequestType # Annotation imports from typing import ( @@ -26,12 +30,13 @@ from typing import ( Dict, ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest, WebsocketManager - from . import shell_command + from ..confighelper import ConfigHelper + from ..common import WebRequest + from .websockets import WebsocketManager STAT_CALLBACK = Callable[[int], Optional[Awaitable]] VC_GEN_CMD_FILE = "/usr/bin/vcgencmd" +VCIO_PATH = "/dev/vcio" STATM_FILE_PATH = "/proc/self/smaps_rollup" NET_DEV_PATH = "/proc/net/dev" TEMPERATURE_PATH = "/sys/class/thermal/thermal_zone0/temp" @@ -61,13 +66,10 @@ class ProcStats: self.watchdog = Watchdog(self) self.stat_update_timer = self.event_loop.register_timer( self._handle_stat_update) - self.vcgencmd: Optional[shell_command.ShellCommand] = None - if os.path.exists(VC_GEN_CMD_FILE): + self.vcgencmd: Optional[VCGenCmd] = None + if os.path.exists(VC_GEN_CMD_FILE) and os.path.exists(VCIO_PATH): logging.info("Detected 'vcgencmd', throttle checking enabled") - shell_cmd: shell_command.ShellCommandFactory - shell_cmd = self.server.load_component(config, "shell_command") - self.vcgencmd = shell_cmd.build_shell_command( - "vcgencmd get_throttled") + self.vcgencmd = VCGenCmd() self.server.register_notification("proc_stats:cpu_throttled") else: logging.info("Unable to find 'vcgencmd', throttle checking " @@ -78,9 +80,11 @@ class ProcStats: self.cpu_stats_file = pathlib.Path(CPU_STAT_PATH) self.meminfo_file = pathlib.Path(MEM_AVAIL_PATH) self.server.register_endpoint( - "/machine/proc_stats", ["GET"], self._handle_stat_request) + "/machine/proc_stats", RequestType.GET, self._handle_stat_request + ) self.server.register_event_handler( - "server:klippy_shutdown", self._handle_shutdown) + "server:klippy_shutdown", self._handle_shutdown + ) self.server.register_notification("proc_stats:proc_stat_update") self.proc_stat_queue: Deque[Dict[str, Any]] = deque(maxlen=30) self.last_update_time = time.time() @@ -170,17 +174,19 @@ class ProcStats: 'system_memory': self.memory_usage, 'websocket_connections': websocket_count }) - if not self.update_sequence % THROTTLE_CHECK_INTERVAL: - if self.vcgencmd is not None: - ts = await self._check_throttled_state() - cur_throttled = ts['bits'] - if cur_throttled & ~self.total_throttled: - self.server.add_log_rollover_item( - 'throttled', f"CPU Throttled Flags: {ts['flags']}") - if cur_throttled != self.last_throttled: - self.server.send_event("proc_stats:cpu_throttled", ts) - self.last_throttled = cur_throttled - self.total_throttled |= cur_throttled + if ( + not self.update_sequence % THROTTLE_CHECK_INTERVAL + and self.vcgencmd is not None + ): + ts = await self._check_throttled_state() + cur_throttled = ts['bits'] + if cur_throttled & ~self.total_throttled: + self.server.add_log_rollover_item( + 'throttled', f"CPU Throttled Flags: {ts['flags']}") + if cur_throttled != self.last_throttled: + self.server.send_event("proc_stats:cpu_throttled", ts) + self.last_throttled = cur_throttled + self.total_throttled |= cur_throttled for cb in self.stat_callbacks: ret = cb(self.update_sequence) if ret is not None: @@ -191,19 +197,18 @@ class ProcStats: return eventtime + STAT_UPDATE_TIME async def _check_throttled_state(self) -> Dict[str, Any]: - async with self.throttle_check_lock: - assert self.vcgencmd is not None - try: - resp = await self.vcgencmd.run_with_response( - timeout=.5, log_complete=False) - ts = int(resp.strip().split("=")[-1], 16) - except Exception: - return {'bits': 0, 'flags': ["?"]} - flags = [] - for flag, desc in THROTTLED_FLAGS.items(): - if flag & ts: - flags.append(desc) - return {'bits': ts, 'flags': flags} + ret = {'bits': 0, 'flags': ["?"]} + if self.vcgencmd is not None: + async with self.throttle_check_lock: + try: + resp = await self.event_loop.run_in_thread(self.vcgencmd.run) + ret["bits"] = tstate = int(resp.strip().split("=")[-1], 16) + ret["flags"] = [ + desc for flag, desc in THROTTLED_FLAGS.items() if flag & tstate + ] + except Exception: + pass + return ret def _read_system_files(self) -> Tuple: mem, units = self._get_memory_usage() @@ -242,7 +247,13 @@ class ProcStats: parsed_stats = stats.strip().split() net_stats[dev_name] = { 'rx_bytes': int(parsed_stats[0]), - 'tx_bytes': int(parsed_stats[8]) + 'tx_bytes': int(parsed_stats[8]), + 'rx_packets': int(parsed_stats[1]), + 'tx_packets': int(parsed_stats[9]), + 'rx_errs': int(parsed_stats[2]), + 'tx_errs': int(parsed_stats[10]), + 'rx_drop': int(parsed_stats[3]), + 'tx_drop': int(parsed_stats[11]) } return net_stats except Exception: @@ -332,5 +343,52 @@ class Watchdog: def stop(self): self.watchdog_timer.stop() +class VCGenCmd: + """ + This class uses the BCM2835 Mailbox to directly query the throttled + state. This should be less resource intensive than calling "vcgencmd" + in a subprocess. + """ + MAX_STRING_SIZE = 1024 + GET_RESULT_CMD = 0x00030080 + UINT_SIZE = struct.calcsize("@I") + def __init__(self) -> None: + self.cmd_struct = struct.Struct(f"@6I{self.MAX_STRING_SIZE}sI") + self.cmd_buf = bytearray(self.cmd_struct.size) + self.mailbox_req = ioctl_macros.IOWR(100, 0, "c_char_p") + self.err_logged: bool = False + + def run(self, cmd: str = "get_throttled") -> str: + try: + fd = os.open(VCIO_PATH, os.O_RDWR) + self.cmd_struct.pack_into( + self.cmd_buf, 0, + self.cmd_struct.size, + 0x00000000, + self.GET_RESULT_CMD, + self.MAX_STRING_SIZE, + 0, + 0, + cmd.encode("utf-8"), + 0x00000000 + ) + fcntl.ioctl(fd, self.mailbox_req, self.cmd_buf) + except OSError: + if not self.err_logged: + logging.exception("VCIO vcgencmd failed") + self.err_logged = True + return "" + finally: + os.close(fd) + result = self.cmd_struct.unpack_from(self.cmd_buf) + ret: int = result[5] + if ret: + logging.info(f"vcgencmd returned {ret}") + resp: bytes = result[6] + null_index = resp.find(b'\x00') + if null_index <= 0: + return "" + return resp[:null_index].decode() + def load_component(config: ConfigHelper) -> ProcStats: return ProcStats(config) diff --git a/moonraker/components/secrets.py b/moonraker/components/secrets.py index 578f2e1..5ecd7e3 100644 --- a/moonraker/components/secrets.py +++ b/moonraker/components/secrets.py @@ -7,7 +7,7 @@ from __future__ import annotations import pathlib import logging import configparser -import json +from ..utils import json_wrapper as jsonw from typing import ( TYPE_CHECKING, Dict, @@ -15,22 +15,21 @@ from typing import ( Any ) if TYPE_CHECKING: - from confighelper import ConfigHelper + from ..confighelper import ConfigHelper class Secrets: def __init__(self, config: ConfigHelper) -> None: server = config.get_server() - self.secrets_file: Optional[pathlib.Path] = None - path: Optional[str] = config.get('secrets_path', None) + path: Optional[str] = config.get("secrets_path", None, deprecate=True) + app_args = server.get_app_args() + data_path = app_args["data_path"] + fpath = pathlib.Path(data_path).joinpath("moonraker.secrets") + if not fpath.is_file() and path is not None: + fpath = pathlib.Path(path).expanduser().resolve() self.type = "invalid" self.values: Dict[str, Any] = {} - if path is not None: - self.secrets_file = pathlib.Path(path).expanduser().resolve() - if not self.secrets_file.is_file(): - server.add_warning( - "[secrets]: option 'secrets_path', file does not exist: " - f"'{self.secrets_file}'") - return + self.secrets_file = fpath + if fpath.is_file(): data = self.secrets_file.read_text() vals = self._parse_json(data) if vals is not None: @@ -52,10 +51,17 @@ class Secrets: self.type = "ini" logging.debug(f"[secrets]: Loaded {self.type} file: " f"{self.secrets_file}") + elif path is not None: + server.add_warning( + "[secrets]: option 'secrets_path', file does not exist: " + f"'{self.secrets_file}'") else: logging.debug( "[secrets]: Option `secrets_path` not supplied") + def get_secrets_file(self) -> pathlib.Path: + return self.secrets_file + def _parse_ini(self, data: str) -> Optional[Dict[str, Any]]: try: cfg = configparser.ConfigParser(interpolation=None) @@ -66,8 +72,8 @@ class Secrets: def _parse_json(self, data: str) -> Optional[Dict[str, Any]]: try: - return json.loads(data) - except json.JSONDecodeError: + return jsonw.loads(data) + except jsonw.JSONDecodeError: return None def get_type(self) -> str: diff --git a/moonraker/components/sensor.py b/moonraker/components/sensor.py new file mode 100644 index 0000000..4b3bcdb --- /dev/null +++ b/moonraker/components/sensor.py @@ -0,0 +1,346 @@ +# Generic sensor support +# +# Copyright (C) 2022 Morton Jonuschat +# +# This file may be distributed under the terms of the GNU GPLv3 license. + +# Component to read additional generic sensor data and make it +# available to clients +from __future__ import annotations + +import logging +from collections import defaultdict, deque +from functools import partial +from ..common import RequestType, HistoryFieldData + +# Annotation imports +from typing import ( + Any, + DefaultDict, + Deque, + Dict, + List, + Optional, + Type, + TYPE_CHECKING, + Union, + Callable +) + +if TYPE_CHECKING: + from ..confighelper import ConfigHelper + from ..common import WebRequest + from .mqtt import MQTTClient + from .history import History + +SENSOR_UPDATE_TIME = 1.0 +SENSOR_EVENT_NAME = "sensors:sensor_update" + +def _set_result( + name: str, value: Union[int, float], store: Dict[str, Union[int, float]] +) -> None: + if not isinstance(value, (int, float)): + store[name] = float(value) + else: + store[name] = value + + +class BaseSensor: + def __init__(self, config: ConfigHelper) -> None: + self.server = config.get_server() + self.error_state: Optional[str] = None + self.id = config.get_name().split(maxsplit=1)[-1] + self.type = config.get("type") + self.name = config.get("name", self.id) + self.last_measurements: Dict[str, Union[int, float]] = {} + self.last_value: Dict[str, Union[int, float]] = {} + store_size = config.getint("sensor_store_size", 1200) + self.values: DefaultDict[str, Deque[Union[int, float]]] = defaultdict( + lambda: deque(maxlen=store_size) + ) + self.param_info: List[Dict[str, str]] = [] + history: History = self.server.lookup_component("history") + self.field_info: Dict[str, List[HistoryFieldData]] = {} + all_opts = list(config.get_options().keys()) + cfg_name = config.get_name() + param_prefix = "parameter_" + hist_field_prefix = "history_field_" + for opt in all_opts: + if opt.startswith(param_prefix): + name = opt[len(param_prefix):] + data = config.getdict(opt) + data["name"] = opt[len(param_prefix):] + self.param_info.append(data) + continue + if not opt.startswith(hist_field_prefix): + continue + name = opt[len(hist_field_prefix):] + field_cfg: Dict[str, str] = config.getdict(opt) + ident: Optional[str] = field_cfg.pop("parameter", None) + if ident is None: + raise config.error( + f"[{cfg_name}]: option '{opt}', key 'parameter' must be" + f"specified" + ) + do_init: str = field_cfg.pop("init_tracker", "false").lower() + reset_cb = self._gen_reset_callback(ident) if do_init == "true" else None + excl_paused: str = field_cfg.pop("exclude_paused", "false").lower() + report_total: str = field_cfg.pop("report_total", "false").lower() + report_max: str = field_cfg.pop("report_maximum", "false").lower() + precision: Optional[str] = field_cfg.pop("precision", None) + try: + fdata = HistoryFieldData( + name, + cfg_name, + field_cfg.pop("desc", f"{ident} tracker"), + field_cfg.pop("strategy", "basic"), + units=field_cfg.pop("units", None), + reset_callback=reset_cb, + exclude_paused=excl_paused == "true", + report_total=report_total == "true", + report_maximum=report_max == "true", + precision=int(precision) if precision is not None else None, + ) + except Exception as e: + raise config.error( + f"[{cfg_name}]: option '{opt}', error encountered during " + f"sensor field configuration: {e}" + ) from e + for key in field_cfg.keys(): + self.server.add_warning( + f"[{cfg_name}]: Option '{opt}' contains invalid key '{key}'" + ) + self.field_info.setdefault(ident, []).append(fdata) + history.register_auxiliary_field(fdata) + + def _gen_reset_callback(self, param_name: str) -> Callable[[], float]: + def on_reset() -> float: + return self.last_measurements.get(param_name, 0) + return on_reset + + def _update_sensor_value(self, eventtime: float) -> None: + """ + Append the last updated value to the store. + """ + for key, value in self.last_measurements.items(): + self.values[key].append(value) + + # Copy the last measurements data + self.last_value = {**self.last_measurements} + + async def initialize(self) -> bool: + """ + Sensor initialization executed on Moonraker startup. + """ + logging.info("Registered sensor '%s'", self.name) + return True + + def get_sensor_info(self, extended: bool = False) -> Dict[str, Any]: + ret: Dict[str, Any] = { + "id": self.id, + "friendly_name": self.name, + "type": self.type, + "values": self.last_measurements, + } + if extended: + ret["parameter_info"] = self.param_info + history_fields: List[Dict[str, Any]] = [] + for parameter, field_list in self.field_info.items(): + for field_data in field_list: + field_config = field_data.get_configuration() + field_config["parameter"] = parameter + history_fields.append(field_config) + ret["history_fields"] = history_fields + return ret + + def get_sensor_measurements(self) -> Dict[str, List[Union[int, float]]]: + return {key: list(values) for key, values in self.values.items()} + + def get_name(self) -> str: + return self.name + + def close(self) -> None: + pass + + +class MQTTSensor(BaseSensor): + def __init__(self, config: ConfigHelper) -> None: + super().__init__(config=config) + self.mqtt: MQTTClient = self.server.load_component(config, "mqtt") + self.state_topic: str = config.get("state_topic") + self.state_response = config.gettemplate("state_response_template") + self.qos: Optional[int] = config.getint("qos", None, minval=0, maxval=2) + self.server.register_event_handler( + "mqtt:disconnected", self._on_mqtt_disconnected + ) + + def _on_state_update(self, payload: bytes) -> None: + measurements: Dict[str, Union[int, float]] = {} + context = { + "payload": payload.decode(), + "set_result": partial(_set_result, store=measurements), + "log_debug": logging.debug + } + + try: + self.state_response.render(context) + except Exception as e: + logging.error("Error updating sensor results: %s", e) + self.error_state = str(e) + else: + self.error_state = None + self.last_measurements = measurements + for name, value in measurements.items(): + fdata_list = self.field_info.get(name) + if fdata_list is None: + continue + for fdata in fdata_list: + fdata.tracker.update(value) + + async def _on_mqtt_disconnected(self): + self.error_state = "MQTT Disconnected" + self.last_measurements = {} + + async def initialize(self) -> bool: + await super().initialize() + try: + self.mqtt.subscribe_topic( + self.state_topic, + self._on_state_update, + self.qos, + ) + self.error_state = None + return True + except Exception as e: + self.error_state = str(e) + return False + + +class Sensors: + __sensor_types: Dict[str, Type[BaseSensor]] = {"MQTT": MQTTSensor} + + def __init__(self, config: ConfigHelper) -> None: + self.server = config.get_server() + self.sensors: Dict[str, BaseSensor] = {} + + # Register timer to update sensor values in store + self.sensors_update_timer = self.server.get_event_loop().register_timer( + self._update_sensor_values + ) + + # Register endpoints + self.server.register_endpoint( + "/server/sensors/list", + RequestType.GET, + self._handle_sensor_list_request, + ) + self.server.register_endpoint( + "/server/sensors/info", + RequestType.GET, + self._handle_sensor_info_request, + ) + self.server.register_endpoint( + "/server/sensors/measurements", + RequestType.GET, + self._handle_sensor_measurements_request, + ) + + # Register notifications + self.server.register_notification(SENSOR_EVENT_NAME) + prefix_sections = config.get_prefix_sections("sensor ") + for section in prefix_sections: + cfg = config[section] + try: + try: + _, name = cfg.get_name().split(maxsplit=1) + except ValueError: + raise cfg.error(f"Invalid section name: {cfg.get_name()}") + logging.info(f"Configuring sensor: {name}") + sensor_type: str = cfg.get("type") + sensor_class: Optional[Type[BaseSensor]] = self.__sensor_types.get( + sensor_type.upper(), None + ) + if sensor_class is None: + raise config.error(f"Unsupported sensor type: {sensor_type}") + + self.sensors[name] = sensor_class(cfg) + except Exception as e: + # Ensures that configuration errors are shown to the user + self.server.add_warning( + f"Failed to configure sensor [{cfg.get_name()}]\n{e}" + ) + continue + + def _update_sensor_values(self, eventtime: float) -> float: + """ + Iterate through the sensors and store the last updated value. + """ + changed_data: Dict[str, Dict[str, Union[int, float]]] = {} + for sensor_name, sensor in self.sensors.items(): + base_value = sensor.last_value + sensor._update_sensor_value(eventtime=eventtime) + + # Notify if a change in sensor values was detected + if base_value != sensor.last_value: + changed_data[sensor_name] = sensor.last_value + if changed_data: + self.server.send_event(SENSOR_EVENT_NAME, changed_data) + return eventtime + SENSOR_UPDATE_TIME + + async def component_init(self) -> None: + try: + logging.debug("Initializing sensor component") + for sensor in self.sensors.values(): + if not await sensor.initialize(): + self.server.add_warning( + f"Sensor '{sensor.get_name()}' failed to initialize" + ) + + self.sensors_update_timer.start() + except Exception as e: + logging.exception(e) + + async def _handle_sensor_list_request( + self, web_request: WebRequest + ) -> Dict[str, Dict[str, Any]]: + extended = web_request.get_boolean("extended", False) + return { + "sensors": { + key: sensor.get_sensor_info(extended) + for key, sensor in self.sensors.items() + } + } + + async def _handle_sensor_info_request( + self, web_request: WebRequest + ) -> Dict[str, Any]: + sensor_name: str = web_request.get_str("sensor") + extended = web_request.get_boolean("extended", False) + if sensor_name not in self.sensors: + raise self.server.error(f"No valid sensor named {sensor_name}") + sensor = self.sensors[sensor_name] + return sensor.get_sensor_info(extended) + + async def _handle_sensor_measurements_request( + self, web_request: WebRequest + ) -> Dict[str, Dict[str, Any]]: + sensor_name: str = web_request.get_str("sensor", "") + if sensor_name: + sensor = self.sensors.get(sensor_name, None) + if sensor is None: + raise self.server.error(f"No valid sensor named {sensor_name}") + return {sensor_name: sensor.get_sensor_measurements()} + else: + return { + key: sensor.get_sensor_measurements() + for key, sensor in self.sensors.items() + } + + def close(self) -> None: + self.sensors_update_timer.stop() + for sensor in self.sensors.values(): + sensor.close() + + +def load_component(config: ConfigHelper) -> Sensors: + return Sensors(config) diff --git a/moonraker/components/shell_command.py b/moonraker/components/shell_command.py index 43ae920..959ae74 100644 --- a/moonraker/components/shell_command.py +++ b/moonraker/components/shell_command.py @@ -10,7 +10,7 @@ import shlex import logging import signal import asyncio -from utils import ServerError +from ..utils import ServerError # Annotation imports from typing import ( @@ -22,47 +22,48 @@ from typing import ( Coroutine, Dict, Set, + cast ) if TYPE_CHECKING: - from confighelper import ConfigHelper + from ..confighelper import ConfigHelper OutputCallback = Optional[Callable[[bytes], None]] class ShellCommandError(ServerError): - def __init__(self, - message: str, - return_code: Optional[int], - stdout: Optional[bytes] = b"", - stderr: Optional[bytes] = b"", - status_code: int = 500 - ) -> None: + def __init__( + self, + message: str, + return_code: Optional[int], + stdout: Optional[bytes] = b"", + stderr: Optional[bytes] = b"", + status_code: int = 500 + ) -> None: super().__init__(message, status_code=status_code) self.stdout = stdout or b"" self.stderr = stderr or b"" self.return_code = return_code class ShellCommandProtocol(asyncio.subprocess.SubprocessStreamProtocol): - def __init__(self, - limit: int, - loop: asyncio.events.AbstractEventLoop, - program_name: str = "", - std_out_cb: OutputCallback = None, - std_err_cb: OutputCallback = None, - log_stderr: bool = False - ) -> None: + def __init__( + self, + limit: int, + loop: asyncio.events.AbstractEventLoop, + std_out_cb: OutputCallback = None, + std_err_cb: OutputCallback = None, + log_stderr: bool = False + ) -> None: self._loop = loop self._pipe_fds: List[int] = [] super().__init__(limit, loop) - self.program_name = program_name self.std_out_cb = std_out_cb self.std_err_cb = std_err_cb self.log_stderr = log_stderr self.pending_data: List[bytes] = [b"", b""] - def connection_made(self, - transport: asyncio.transports.BaseTransport - ) -> None: + def connection_made( + self, transport: asyncio.transports.BaseTransport + ) -> None: + transport = cast(asyncio.SubprocessTransport, transport) self._transport = transport - assert isinstance(transport, asyncio.SubprocessTransport) stdout_transport = transport.get_pipe_transport(1) if stdout_transport is not None: self._pipe_fds.append(1) @@ -74,10 +75,11 @@ class ShellCommandProtocol(asyncio.subprocess.SubprocessStreamProtocol): stdin_transport = transport.get_pipe_transport(0) if stdin_transport is not None: self.stdin = asyncio.streams.StreamWriter( - stdin_transport, + stdin_transport, # type: ignore protocol=self, reader=None, - loop=self._loop) + loop=self._loop + ) def pipe_data_received(self, fd: int, data: bytes | str) -> None: cb = None @@ -91,7 +93,7 @@ class ShellCommandProtocol(asyncio.subprocess.SubprocessStreamProtocol): msg = data.decode(errors='ignore') else: msg = data - logging.info(f"{self.program_name}: {msg}") + logging.info(msg) if cb is not None: if isinstance(data, str): data = data.encode() @@ -103,10 +105,9 @@ class ShellCommandProtocol(asyncio.subprocess.SubprocessStreamProtocol): continue cb(line) - def pipe_connection_lost(self, - fd: int, - exc: Exception | None - ) -> None: + def pipe_connection_lost( + self, fd: int, exc: Exception | None + ) -> None: cb = None pending = b"" if fd == 1: @@ -124,15 +125,16 @@ class ShellCommand: IDX_SIGINT = 0 IDX_SIGTERM = 1 IDX_SIGKILL = 2 - def __init__(self, - factory: ShellCommandFactory, - cmd: str, - std_out_callback: OutputCallback, - std_err_callback: OutputCallback, - env: Optional[Dict[str, str]] = None, - log_stderr: bool = False, - cwd: Optional[str] = None - ) -> None: + def __init__( + self, + factory: ShellCommandFactory, + cmd: str, + std_out_callback: OutputCallback, + std_err_callback: OutputCallback, + env: Optional[Dict[str, str]] = None, + log_stderr: bool = False, + cwd: Optional[str] = None + ) -> None: self.factory = factory self.name = cmd self.std_out_cb = std_out_callback @@ -178,13 +180,15 @@ class ShellCommand: self.return_code = self.proc = None self.cancelled = False - async def run(self, - timeout: float = 2., - verbose: bool = True, - log_complete: bool = True, - sig_idx: int = 1, - proc_input: Optional[str] = None - ) -> bool: + async def run( + self, + timeout: float = 2., + verbose: bool = True, + log_complete: bool = True, + sig_idx: int = 1, + proc_input: Optional[str] = None, + success_codes: Optional[List[int]] = None + ) -> bool: async with self.run_lock: self.factory.add_running_command(self) self._reset_command_data() @@ -217,22 +221,26 @@ class ShellCommand: else: complete = not self.cancelled self.factory.remove_running_command(self) - return self._check_proc_success(complete, log_complete) + return self._check_proc_success( + complete, log_complete, success_codes + ) - async def run_with_response(self, - timeout: float = 2., - retries: int = 1, - log_complete: bool = True, - sig_idx: int = 1, - proc_input: Optional[str] = None - ) -> str: + async def run_with_response( + self, + timeout: float = 2., + attempts: int = 1, + log_complete: bool = True, + sig_idx: int = 1, + proc_input: Optional[str] = None, + success_codes: Optional[List[int]] = None + ) -> str: async with self.run_lock: self.factory.add_running_command(self) - retries = max(1, retries) + attempts = max(1, attempts) stdin: Optional[bytes] = None if proc_input is not None: stdin = proc_input.encode() - while retries > 0: + while attempts > 0: self._reset_command_data() timed_out = False stdout = stderr = b"" @@ -252,7 +260,9 @@ class ShellCommand: logging.info( f"{self.command[0]}: " f"{stderr.decode(errors='ignore')}") - if self._check_proc_success(complete, log_complete): + if self._check_proc_success( + complete, log_complete, success_codes + ): self.factory.remove_running_command(self) return stdout.decode(errors='ignore').rstrip("\n") if stdout: @@ -261,24 +271,25 @@ class ShellCommand: f"\n{stdout.decode(errors='ignore')}") if self.cancelled and not timed_out: break - retries -= 1 + attempts -= 1 await asyncio.sleep(.5) self.factory.remove_running_command(self) raise ShellCommandError( - f"Error running shell command: '{self.command}'", + f"Error running shell command: '{self.name}'", self.return_code, stdout, stderr) - async def _create_subprocess(self, - use_callbacks: bool = False, - has_input: bool = False - ) -> bool: + async def _create_subprocess( + self, + use_callbacks: bool = False, + has_input: bool = False + ) -> bool: loop = asyncio.get_running_loop() def protocol_factory(): return ShellCommandProtocol( - limit=2**20, loop=loop, program_name=self.command[0], - std_out_cb=self.std_out_cb, std_err_cb=self.std_err_cb, - log_stderr=self.log_stderr) + limit=2**20, loop=loop, std_out_cb=self.std_out_cb, + std_err_cb=self.std_err_cb, log_stderr=self.log_stderr + ) try: stdpipe: Optional[int] = None if has_input: @@ -299,19 +310,25 @@ class ShellCommand: *self.command, stdin=stdpipe, stdout=asyncio.subprocess.PIPE, stderr=errpipe, env=self.env, cwd=self.cwd) + except asyncio.CancelledError: + raise except Exception: logging.exception( f"shell_command: Command ({self.name}) failed") return False return True - def _check_proc_success(self, - complete: bool, - log_complete: bool - ) -> bool: + def _check_proc_success( + self, + complete: bool, + log_complete: bool, + success_codes: Optional[List[int]] = None + ) -> bool: assert self.proc is not None + if success_codes is None: + success_codes = [0] self.return_code = self.proc.returncode - success = self.return_code == 0 and complete + success = self.return_code in success_codes and complete if success: msg = f"Command ({self.name}) successfully finished" elif self.cancelled: @@ -339,32 +356,77 @@ class ShellCommandFactory: except KeyError: pass - def build_shell_command(self, - cmd: str, - callback: OutputCallback = None, - std_err_callback: OutputCallback = None, - env: Optional[Dict[str, str]] = None, - log_stderr: bool = False, - cwd: Optional[str] = None - ) -> ShellCommand: - return ShellCommand(self, cmd, callback, std_err_callback, env, - log_stderr, cwd) + def build_shell_command( + self, + cmd: str, + callback: OutputCallback = None, + std_err_callback: OutputCallback = None, + env: Optional[Dict[str, str]] = None, + log_stderr: bool = False, + cwd: Optional[str] = None + ) -> ShellCommand: + return ShellCommand( + self, cmd, callback, std_err_callback, env, log_stderr, cwd + ) - def exec_cmd(self, - cmd: str, - timeout: float = 2., - retries: int = 1, - sig_idx: int = 1, - proc_input: Optional[str] = None, - log_complete: bool = True, - log_stderr: bool = False, - env: Optional[Dict[str, str]] = None, - cwd: Optional[str] = None - ) -> Awaitable: + def run_cmd_async( + self, + cmd: str, + callback: OutputCallback = None, + std_err_callback: OutputCallback = None, + timeout: float = 2., + attempts: int = 1, + verbose: bool = True, + sig_idx: int = 1, + proc_input: Optional[str] = None, + log_complete: bool = True, + log_stderr: bool = False, + env: Optional[Dict[str, str]] = None, + cwd: Optional[str] = None, + success_codes: Optional[List[int]] = None + ) -> Awaitable[None]: + """ + Runs a command and processes responses as they are received. Optional + callbacks may be provided to handle stdout and stderr. + """ + scmd = ShellCommand( + self, cmd, callback, std_err_callback, env, log_stderr, cwd + ) + attempts = max(1, attempts) + async def _wrapper() -> None: + for _ in range(attempts): + if await scmd.run( + timeout, verbose, log_complete, sig_idx, + proc_input, success_codes + ): + break + else: + ret_code = scmd.get_return_code() + raise ShellCommandError(f"Error running command {cmd}", ret_code) + return asyncio.create_task(_wrapper()) + + def exec_cmd( + self, + cmd: str, + timeout: float = 2., + attempts: int = 1, + sig_idx: int = 1, + proc_input: Optional[str] = None, + log_complete: bool = True, + log_stderr: bool = False, + env: Optional[Dict[str, str]] = None, + cwd: Optional[str] = None, + success_codes: Optional[List[int]] = None + ) -> Awaitable[str]: + """ + Executes a command and returns UTF-8 decoded stdout upon completion. + """ scmd = ShellCommand(self, cmd, None, None, env, log_stderr, cwd) - coro = scmd.run_with_response(timeout, retries, log_complete, - sig_idx, proc_input) + coro = scmd.run_with_response( + timeout, attempts, log_complete, sig_idx, + proc_input, success_codes + ) return asyncio.create_task(coro) async def close(self) -> None: diff --git a/moonraker/components/simplyprint.py b/moonraker/components/simplyprint.py new file mode 100644 index 0000000..eba164f --- /dev/null +++ b/moonraker/components/simplyprint.py @@ -0,0 +1,1690 @@ +# SimplyPrint Connection Support +# +# Copyright (C) 2022 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license. + +from __future__ import annotations +import os +import asyncio +import logging +import time +import pathlib +import base64 +import tornado.websocket +from tornado.escape import url_escape +import logging.handlers +import tempfile +from queue import SimpleQueue +from ..loghelper import LocalQueueHandler +from ..common import APITransport, JobEvent, KlippyState, UserInfo +from ..utils import json_wrapper as jsonw + +from typing import ( + TYPE_CHECKING, + Awaitable, + Optional, + Dict, + List, + Union, + Any, + Callable, +) +if TYPE_CHECKING: + from .application import InternalTransport + from ..confighelper import ConfigHelper + from .websockets import WebsocketManager + from ..common import BaseRemoteConnection + from tornado.websocket import WebSocketClientConnection + from .database import MoonrakerDatabase + from .klippy_apis import KlippyAPI + from .job_state import JobState + from .machine import Machine + from .file_manager.file_manager import FileManager + from .http_client import HttpClient + from .power import PrinterPower + from .announcements import Announcements + from .webcam import WebcamManager, WebCam + from .klippy_connection import KlippyConnection + +COMPONENT_VERSION = "0.0.1" +SP_VERSION = "0.1" +TEST_ENDPOINT = f"wss://testws.simplyprint.io/{SP_VERSION}/p" +PROD_ENDPOINT = f"wss://ws.simplyprint.io/{SP_VERSION}/p" +# TODO: Increase this time to something greater, perhaps 30 minutes +CONNECTION_ERROR_LOG_TIME = 60. +PRE_SETUP_EVENTS = [ + "connection", "state_change", "shutdown", "machine_data", "firmware", + "ping" +] + +class SimplyPrint(APITransport): + def __init__(self, config: ConfigHelper) -> None: + self.server = config.get_server() + self._logger = ProtoLogger(config) + self.eventloop = self.server.get_event_loop() + self.job_state: JobState + self.job_state = self.server.lookup_component("job_state") + self.klippy_apis: KlippyAPI + self.klippy_apis = self.server.lookup_component("klippy_apis") + database: MoonrakerDatabase = self.server.lookup_component("database") + database.register_local_namespace("simplyprint", forbidden=True) + self.spdb = database.wrap_namespace("simplyprint") + self.sp_info = self.spdb.as_dict() + self.is_closing = False + self.ws: Optional[WebSocketClientConnection] = None + self.cache = ReportCache() + ambient = self.sp_info.get("ambient_temp", INITIAL_AMBIENT) + self.amb_detect = AmbientDetect(config, self, ambient) + self.layer_detect = LayerDetect() + self.webcam_stream = WebcamStream(config, self) + self.print_handler = PrintHandler(self) + self.last_received_temps: Dict[str, float] = {} + self.last_err_log_time: float = 0. + self.last_cpu_update_time: float = 0. + self.intervals: Dict[str, float] = { + "job": 1., + "temps": 1., + "temps_target": .25, + "cpu": 10., + "ai": 0., + "ping": 20., + } + self.printer_status: Dict[str, Dict[str, Any]] = {} + self.heaters: Dict[str, str] = {} + self.missed_job_events: List[Dict[str, Any]] = [] + self.announce_mutex = asyncio.Lock() + self.connection_task: Optional[asyncio.Task] = None + self.reconnect_delay: float = 1. + self.reconnect_token: Optional[str] = None + self._last_sp_ping: float = 0. + self.ping_sp_timer = self.eventloop.register_timer(self._handle_sp_ping) + self.printer_info_timer = self.eventloop.register_timer( + self._handle_printer_info_update) + self._print_request_event: asyncio.Event = asyncio.Event() + self.next_temp_update_time: float = 0. + self._last_ping_received: float = 0. + self.gcode_terminal_enabled: bool = False + self.connected = False + self.is_set_up = False + self.test = config.get("use_test_endpoint", False) + connect_url = config.get("url", None) + if connect_url is not None: + self.connect_url = connect_url + self.is_set_up = True + else: + self._set_ws_url() + + self.power_id: str = "" + power_id: Optional[str] = config.get("power_device", None) + if power_id is not None: + self.power_id = power_id + if self.power_id.startswith("power "): + self.power_id = self.power_id[6:] + if not config.has_section(f"power {self.power_id}"): + self.power_id = "" + self.server.add_warning( + "Section [simplyprint], option 'power_device': Unable " + f"to locate configuration for power device {power_id}" + ) + else: + power_pfx = config.get_prefix_sections("power ") + if len(power_pfx) == 1: + name = power_pfx[0][6:] + if "printer" in name.lower(): + self.power_id = name + self.filament_sensor: str = "" + fsensor = config.get("filament_sensor", None) + fs_prefixes = ["filament_switch_sensor ", "filament_motion_sensor "] + if fsensor is not None: + for prefix in fs_prefixes: + if fsensor.startswith(prefix): + self.filament_sensor = fsensor + break + else: + self.server.add_warning( + "Section [simplyprint], option 'filament_sensor': Invalid " + f"sensor '{fsensor}', must start with one the following " + f"prefixes: {fs_prefixes}" + ) + + # Register State Events + self.server.register_event_handler( + "server:klippy_started", self._on_klippy_startup) + self.server.register_event_handler( + "server:klippy_ready", self._on_klippy_ready) + self.server.register_event_handler( + "server:klippy_shutdown", self._on_klippy_shutdown) + self.server.register_event_handler( + "server:klippy_disconnect", self._on_klippy_disconnected) + self.server.register_event_handler( + "job_state:state_changed", self._on_job_state_changed) + self.server.register_event_handler( + "klippy_apis:pause_requested", self._on_pause_requested) + self.server.register_event_handler( + "klippy_apis:resume_requested", self._on_resume_requested) + self.server.register_event_handler( + "klippy_apis:cancel_requested", self._on_cancel_requested) + self.server.register_event_handler( + "proc_stats:proc_stat_update", self._on_proc_update) + self.server.register_event_handler( + "proc_stats:cpu_throttled", self._on_cpu_throttled + ) + self.server.register_event_handler( + "websockets:client_identified", self._on_websocket_identified) + self.server.register_event_handler( + "websockets:client_removed", self._on_websocket_removed) + self.server.register_event_handler( + "server:gcode_response", self._on_gcode_response) + self.server.register_event_handler( + "klippy_connection:gcode_received", self._on_gcode_received + ) + self.server.register_event_handler( + "power:power_changed", self._on_power_changed + ) + + async def component_init(self) -> None: + await self.webcam_stream.intialize_url() + await self.webcam_stream.test_connection() + self.connection_task = self.eventloop.create_task(self._connect()) + + async def _connect(self) -> None: + log_connect = True + while not self.is_closing: + url = self.connect_url + if self.reconnect_token is not None: + url = f"{self.connect_url}/{self.reconnect_token}" + if log_connect: + logging.info(f"Connecting To SimplyPrint: {url}") + log_connect = False + try: + self.ws = await tornado.websocket.websocket_connect( + url, connect_timeout=5., + ) + setattr(self.ws, "on_ping", self._on_ws_ping) + cur_time = self.eventloop.get_loop_time() + self._last_ping_received = cur_time + except asyncio.CancelledError: + raise + except Exception: + curtime = self.eventloop.get_loop_time() + timediff = curtime - self.last_err_log_time + if timediff > CONNECTION_ERROR_LOG_TIME: + self.last_err_log_time = curtime + logging.exception("Failed to connect to SimplyPrint") + else: + logging.info("Connected to SimplyPrint Cloud") + await self._read_messages() + log_connect = True + if not self.is_closing: + await asyncio.sleep(self.reconnect_delay) + + async def _read_messages(self) -> None: + message: Union[str, bytes, None] + while self.ws is not None: + message = await self.ws.read_message() + if isinstance(message, str): + self._process_message(message) + elif message is None: + self.ping_sp_timer.stop() + cur_time = self.eventloop.get_loop_time() + ping_time: float = cur_time - self._last_ping_received + reason = code = None + if self.ws is not None: + reason = self.ws.close_reason + code = self.ws.close_code + msg = ( + f"SimplyPrint Disconnected - Code: {code}, " + f"Reason: {reason}, " + f"Server Ping Time Elapsed: {ping_time}" + ) + logging.info(msg) + self.connected = False + self.ws = None + break + + def _on_ws_ping(self, data: bytes = b"") -> None: + self._last_ping_received = self.eventloop.get_loop_time() + + def _process_message(self, msg: str) -> None: + self._logger.info(f"received: {msg}") + try: + packet: Dict[str, Any] = jsonw.loads(msg) + except jsonw.JSONDecodeError: + logging.debug(f"Invalid message, not JSON: {msg}") + return + event: str = packet.get("type", "") + data: Optional[Dict[str, Any]] = packet.get("data") + if event == "connected": + logging.info("SimplyPrint Reports Connection Success") + self.connected = True + self.reconnect_token = None + if data is not None: + if data.get("in_setup", 0) == 1: + self.is_set_up = False + self.save_item("printer_id", None) + self.save_item("printer_name", None) + if "short_id" in data: + self.eventloop.create_task( + self._announce_setup(data["short_id"]) + ) + interval = data.get("interval") + if isinstance(interval, dict): + self._update_intervals(interval) + self.reconnect_token = data.get("reconnect_token") + name = data.get("name") + if name is not None: + self.save_item("printer_name", name) + self.reconnect_delay = 1. + self._push_initial_state() + self.ping_sp_timer.start() + elif event == "error": + logging.info(f"SimplyPrint Connection Error: {data}") + self.reconnect_delay = 30. + self.reconnect_token = None + elif event == "new_token": + if data is None: + logging.debug("Invalid message, no data") + return + if data.get("no_exist", False) is True and self.is_set_up: + self.is_set_up = False + self.save_item("printer_id", None) + token: Optional[str] = data.get("token") + if not isinstance(token, str): + logging.debug(f"Invalid token received: {token}") + token = None + else: + logging.info("SimplyPrint Token Received") + self.save_item("printer_token", token) + self._set_ws_url() + if "short_id" in data: + short_id = data["short_id"] + if not isinstance(short_id, str): + self._logger.debug(f"Invalid short_id received: {short_id}") + else: + self.eventloop.create_task( + self._announce_setup(data["short_id"]) + ) + elif event == "complete_setup": + if data is None: + logging.debug("Invalid message, no data") + return + printer_id = data.get("printer_id") + if printer_id is None: + logging.debug("Invalid printer id, received null (None) value") + self.save_item("printer_id", str(printer_id)) + self._set_ws_url() + self.save_item("temp_short_setup_id", None) + self.eventloop.create_task(self._remove_setup_announcement()) + elif event == "demand": + if data is None: + logging.debug("Invalid message, no data") + return + demand = data.pop("demand", "unknown") + self._process_demand(demand, data) + elif event == "interval_change": + if isinstance(data, dict): + self._update_intervals(data) + elif event == "pong": + diff = self.eventloop.get_loop_time() - self._last_sp_ping + self.send_sp("latency", {"ms": int(diff * 1000 + .5)}) + else: + # TODO: It would be good for the backend to send an + # event indicating that it is ready to recieve printer + # status. + logging.debug(f"Unknown event: {msg}") + + def _process_demand(self, demand: str, args: Dict[str, Any]) -> None: + kconn: KlippyConnection + kconn = self.server.lookup_component("klippy_connection") + if demand in ["pause", "resume", "cancel"]: + if not kconn.is_connected(): + return + self.eventloop.create_task(self._request_print_action(demand)) + elif demand == "terminal": + if "enabled" in args: + self.gcode_terminal_enabled = args["enabled"] + elif demand == "gcode": + if not kconn.is_connected(): + return + script_list: List[str] = args.get("list", []) + ident: Optional[str] = args.get("identifier", None) + if script_list: + script = "\n".join(script_list) + self.eventloop.create_task(self._handle_gcode_demand(script, ident)) + elif demand == "webcam_snapshot": + self.eventloop.create_task(self.webcam_stream.post_image(args)) + elif demand == "file": + url: Optional[str] = args.get("url") + if not isinstance(url, str): + logging.debug("Invalid url in message") + return + start = bool(args.get("auto_start", 0)) + self.print_handler.download_file(url, start) + elif demand == "start_print": + if ( + kconn.is_connected() and + self.cache.state == "operational" + ): + self.eventloop.create_task(self.print_handler.start_print()) + else: + logging.debug("Failed to start print") + elif demand == "system_restart": + coro = self._call_internal_api("machine.reboot") + self.eventloop.create_task(coro) + elif demand == "system_shutdown": + coro = self._call_internal_api("machine.shutdown") + self.eventloop.create_task(coro) + elif demand == "api_restart": + self.eventloop.create_task(self._do_service_action("restart")) + elif demand == "api_shutdown": + self.eventloop.create_task(self._do_service_action("shutdown")) + elif demand == "psu_on": + self._do_power_action("on") + elif demand == "psu_off": + self._do_power_action("off") + elif demand == "test_webcam": + self.eventloop.create_task(self._test_webcam()) + else: + logging.debug(f"Unknown demand: {demand}") + + def save_item(self, name: str, data: Any): + if data is None: + self.sp_info.pop(name, None) + self.spdb.pop(name, None) + else: + self.sp_info[name] = data + self.spdb[name] = data + + async def _handle_gcode_demand( + self, script: str, ident: Optional[str] + ) -> None: + success: bool = True + msg: Optional[str] = None + try: + await self.klippy_apis.run_gcode(script) + except self.server.error as e: + msg = str(e) + success = False + if ident is not None: + self.send_sp( + "gcode_executed", + { + "identifier": ident, + "success": success, + "message": msg + } + ) + + async def _call_internal_api(self, method: str, **kwargs) -> Any: + itransport: InternalTransport + itransport = self.server.lookup_component("internal_transport") + try: + ret = await itransport.call_method(method, **kwargs) + except self.server.error: + return None + return ret + + def _set_ws_url(self) -> None: + token: Optional[str] = self.sp_info.get("printer_token") + printer_id: Optional[str] = self.sp_info.get("printer_id") + ep = TEST_ENDPOINT if self.test else PROD_ENDPOINT + self.connect_url = f"{ep}/0/0" + if token is not None: + if printer_id is None: + self.connect_url = f"{ep}/0/{token}" + else: + self.is_set_up = True + self.connect_url = f"{ep}/{printer_id}/{token}" + + def _update_intervals(self, intervals: Dict[str, Any]) -> None: + for key, val in intervals.items(): + self.intervals[key] = val / 1000. + cur_ai_interval = self.intervals.get("ai", 0.) + if not cur_ai_interval: + self.webcam_stream.stop_ai() + logging.debug(f"Intervals Updated: {self.intervals}") + + async def _announce_setup(self, short_id: str) -> None: + async with self.announce_mutex: + eid: Optional[str] = self.sp_info.get("announcement_id") + if ( + eid is not None and + self.sp_info.get("temp_short_setup_id") == short_id + ): + return + ann: Announcements = self.server.lookup_component("announcements") + if eid is not None: + # remove stale announcement + try: + await ann.remove_announcement(eid) + except self.server.error: + pass + + self.save_item("temp_short_setup_id", short_id) + entry = ann.add_internal_announcement( + "SimplyPrint Setup Request", + "SimplyPrint is ready to complete setup for your printer. " + "Please log in to your account and enter the following " + f"setup code:\n\n{short_id}\n\n", + "https://simplyprint.io", "high", "simplyprint" + ) + eid = entry.get("entry_id") + self.save_item("announcement_id", eid) + + async def _remove_setup_announcement(self) -> None: + async with self.announce_mutex: + eid = self.sp_info.get("announcement_id") + if eid is None: + return + self.save_item("announcement_id", None) + ann: Announcements = self.server.lookup_component("announcements") + try: + await ann.remove_announcement(eid) + except self.server.error: + pass + + def _do_power_action(self, state: str) -> None: + if self.power_id: + power: PrinterPower = self.server.lookup_component("power") + power.set_device_power(self.power_id, state) + + async def _do_service_action(self, action: str) -> None: + try: + machine: Machine = self.server.lookup_component("machine") + await machine.do_service_action(action, "moonraker") + except self.server.error: + pass + + async def _request_print_action(self, action: str) -> None: + cur_state = self.cache.state + ret: Optional[str] = "" + self._print_request_event.clear() + if action == "pause": + if cur_state == "printing": + self._update_state("pausing") + ret = await self.klippy_apis.pause_print(None) + elif action == "resume": + if cur_state == "paused": + self._print_request_fut = self.eventloop.create_future() + self._update_state("resuming") + ret = await self.klippy_apis.resume_print(None) + elif action == "cancel": + if cur_state in ["printing", "paused"]: + self._update_state("cancelling") + ret = await self.klippy_apis.cancel_print(None) + if ret is None: + # Wait for the "action" requested event to fire, then reset the + # state + try: + await asyncio.wait_for(self._print_request_event.wait(), 1.) + except Exception: + pass + self._update_state_from_klippy() + + async def _test_webcam(self) -> None: + await self.webcam_stream.test_connection() + self.send_sp( + "webcam_status", {"connected": self.webcam_stream.connected} + ) + + async def _on_klippy_ready(self) -> None: + last_stats: Dict[str, Any] = self.job_state.get_last_stats() + if last_stats["state"] == "printing": + self._on_print_started(last_stats, last_stats, False) + else: + self._update_state("operational") + query: Optional[Dict[str, Any]] + query = await self.klippy_apis.query_objects({"heaters": None}, None) + sub_objs = { + "display_status": ["progress"], + "bed_mesh": ["mesh_matrix", "mesh_min", "mesh_max"], + "toolhead": ["extruder"], + "gcode_move": ["gcode_position"] + } + # Add Heater Subscriptions + has_amb_sensor: bool = False + cfg_amb_sensor = self.amb_detect.sensor_name + if query is not None: + heaters: Dict[str, Any] = query.get("heaters", {}) + avail_htrs: List[str] + avail_htrs = sorted(heaters.get("available_heaters", [])) + logging.debug(f"SimplyPrint: Heaters Detected: {avail_htrs}") + for htr in avail_htrs: + if htr.startswith("extruder"): + sub_objs[htr] = ["temperature", "target"] + if htr == "extruder": + tool_id = "tool0" + else: + tool_id = "tool" + htr[8:] + self.heaters[htr] = tool_id + elif htr == "heater_bed": + sub_objs[htr] = ["temperature", "target"] + self.heaters[htr] = "bed" + sensors: List[str] = heaters.get("available_sensors", []) + if cfg_amb_sensor: + if cfg_amb_sensor in sensors: + has_amb_sensor = True + sub_objs[cfg_amb_sensor] = ["temperature"] + else: + logging.info( + f"SimplyPrint: Ambient sensor {cfg_amb_sensor} not " + "configured in Klipper" + ) + if self.filament_sensor: + objects: List[str] + objects = await self.klippy_apis.get_object_list(default=[]) + if self.filament_sensor in objects: + sub_objs[self.filament_sensor] = ["filament_detected"] + # Add filament sensor subscription + if not sub_objs: + return + # Create our own subscription rather than use the host sub + status: Dict[str, Any] = await self.klippy_apis.subscribe_from_transport( + sub_objs, self, default={} + ) + if status: + logging.debug(f"SimplyPrint: Got Initial Status: {status}") + self.printer_status = status + self._update_temps(1.) + self.next_temp_update_time = 0. + if "bed_mesh" in status: + self._send_mesh_data() + if "toolhead" in status: + self._send_active_extruder(status["toolhead"]["extruder"]) + if "gcode_move" in status: + self.layer_detect.update( + status["gcode_move"]["gcode_position"] + ) + if self.filament_sensor and self.filament_sensor in status: + detected = status[self.filament_sensor]["filament_detected"] + fstate = "loaded" if detected else "runout" + self.cache.filament_state = fstate + self.send_sp("filament_sensor", {"state": fstate}) + if has_amb_sensor and cfg_amb_sensor in status: + self.amb_detect.update_ambient(status[cfg_amb_sensor]) + if not has_amb_sensor: + self.amb_detect.start() + self.printer_info_timer.start(delay=1.) + + def _on_power_changed(self, device_info: Dict[str, Any]) -> None: + if self.power_id and device_info["device"] == self.power_id: + is_on = device_info["status"] == "on" + self.send_sp("power_controller", {"on": is_on}) + + def _on_websocket_identified(self, ws: BaseRemoteConnection) -> None: + if ( + self.cache.current_wsid is None and + ws.client_data.get("type", "") == "web" + ): + ui_data: Dict[str, Any] = { + "ui": ws.client_data["name"], + "ui_version": ws.client_data["version"] + } + self.cache.firmware_info.update(ui_data) + self.cache.current_wsid = ws.uid + self.send_sp("machine_data", ui_data) + + def _on_websocket_removed(self, ws: BaseRemoteConnection) -> None: + if self.cache.current_wsid is None or self.cache.current_wsid != ws.uid: + return + ui_data = self._get_ui_info() + diff = self._get_object_diff(ui_data, self.cache.firmware_info) + if diff: + self.cache.firmware_info.update(ui_data) + self.send_sp("machine_data", ui_data) + + def _on_klippy_startup(self, state: KlippyState) -> None: + if state != KlippyState.READY: + self._update_state("error") + kconn: KlippyConnection + kconn = self.server.lookup_component("klippy_connection") + self.send_sp("printer_error", {"error": kconn.state.message}) + self.send_sp("connection", {"new": "connected"}) + self._send_firmware_data() + + def _on_klippy_shutdown(self) -> None: + self._update_state("error") + kconn: KlippyConnection + kconn = self.server.lookup_component("klippy_connection") + self.send_sp("printer_error", {"error": kconn.state.message}) + + def _on_klippy_disconnected(self) -> None: + self._update_state("offline") + self.send_sp("connection", {"new": "disconnected"}) + self.amb_detect.stop() + self.printer_info_timer.stop() + self.cache.reset_print_state() + self.printer_status = {} + + def _on_job_state_changed(self, job_event: JobEvent, *args) -> None: + callback: Optional[Callable] = getattr(self, f"_on_print_{job_event}", None) + if callback is not None: + callback(*args) + else: + logging.info(f"No defined callback for Job Event: {job_event}") + + def _on_print_started( + self, + prev_stats: Dict[str, Any], + new_stats: Dict[str, Any], + need_start_event: bool = True + ) -> None: + # inlcludes started and resumed events + self._update_state("printing") + filename = new_stats["filename"] + job_info: Dict[str, Any] = {"filename": filename} + fm: FileManager = self.server.lookup_component("file_manager") + metadata = fm.get_file_metadata(filename) + filament: Optional[float] = metadata.get("filament_total") + if filament is not None: + job_info["filament"] = round(filament) + est_time = metadata.get("estimated_time") + if est_time is not None: + job_info["time"] = est_time + self.cache.metadata = metadata + self.cache.job_info.update(job_info) + if need_start_event: + job_info["started"] = True + self.layer_detect.start(metadata) + self._send_job_event(job_info) + self.webcam_stream.reset_ai_scores() + self.webcam_stream.start_ai(120.) + + def _check_job_started( + self, + prev_stats: Dict[str, Any], + new_stats: Dict[str, Any] + ) -> None: + if not self.cache.job_info: + job_info: Dict[str, Any] = { + "filename": new_stats.get("filename", ""), + "started": True + } + self._send_job_event(job_info) + + def _reset_file(self) -> None: + cur_job = self.cache.job_info.get("filename", "") + last_started = self.print_handler.last_started + if last_started and last_started == cur_job: + kapi: KlippyAPI = self.server.lookup_component("klippy_apis") + self.eventloop.create_task( + kapi.run_gcode("SDCARD_RESET_FILE", default=None) + ) + self.print_handler.last_started = "" + + def _on_print_paused(self, *args) -> None: + self.send_sp("job_info", {"paused": True}) + self._update_state("paused") + self.layer_detect.stop() + self.webcam_stream.stop_ai() + + def _on_print_resumed(self, *args) -> None: + self._update_state("printing") + self.layer_detect.resume() + self.webcam_stream.reset_ai_scores() + self.webcam_stream.start_ai(self.intervals["ai"]) + + def _on_print_cancelled(self, *args) -> None: + self._check_job_started(*args) + self._reset_file() + self._send_job_event({"cancelled": True}) + self._update_state_from_klippy() + self.cache.job_info = {} + self.layer_detect.stop() + self.webcam_stream.stop_ai() + + def _on_print_error(self, *args) -> None: + self._check_job_started(*args) + self._reset_file() + payload: Dict[str, Any] = {"failed": True} + new_stats: Dict[str, Any] = args[1] + msg = new_stats.get("message", "Unknown Error") + payload["error"] = msg + self._send_job_event(payload) + self._update_state_from_klippy() + self.cache.job_info = {} + self.layer_detect.stop() + self.webcam_stream.stop_ai() + + def _on_print_complete(self, *args) -> None: + self._check_job_started(*args) + self._reset_file() + self._send_job_event({"finished": True}) + self._update_state_from_klippy() + self.cache.job_info = {} + self.layer_detect.stop() + self.webcam_stream.stop_ai() + + def _on_print_standby(self, *args) -> None: + self._update_state_from_klippy() + self.cache.job_info = {} + self.layer_detect.stop() + self.webcam_stream.stop_ai() + + def _on_pause_requested(self) -> None: + self._print_request_event.set() + if self.cache.state == "printing": + self._update_state("pausing") + + def _on_resume_requested(self) -> None: + self._print_request_event.set() + if self.cache.state == "paused": + self._update_state("resuming") + + def _on_cancel_requested(self) -> None: + self._print_request_event.set() + if self.cache.state in ["printing", "paused", "pausing"]: + self._update_state("cancelling") + + def _on_gcode_response(self, response: str): + if self.gcode_terminal_enabled: + resp = [ + r.strip() for r in response.strip().split("\n") if r.strip() + ] + self.send_sp("term_update", {"response": resp}) + + def _on_gcode_received(self, script: str): + if self.gcode_terminal_enabled: + cmds = [s.strip() for s in script.strip().split() if s.strip()] + self.send_sp("term_update", {"command": cmds}) + + def _on_proc_update(self, proc_stats: Dict[str, Any]) -> None: + cpu = proc_stats["system_cpu_usage"] + if not cpu: + return + curtime = self.eventloop.get_loop_time() + if curtime - self.last_cpu_update_time < self.intervals["cpu"]: + return + self.last_cpu_update_time = curtime + sys_mem = proc_stats["system_memory"] + mem_pct: float = 0. + if sys_mem: + mem_pct = sys_mem["used"] / sys_mem["total"] * 100 + cpu_data = { + "usage": int(cpu["cpu"] + .5), + "memory": int(mem_pct + .5), + "flags": self.cache.throttled_state.get("bits", 0) + } + temp: Optional[float] = proc_stats["cpu_temp"] + if temp is not None: + cpu_data["temp"] = int(temp + .5) + diff = self._get_object_diff(cpu_data, self.cache.cpu_info) + if diff: + self.cache.cpu_info.update(cpu_data) + self.send_sp("cpu", diff) + + def _on_cpu_throttled(self, throttled_state: Dict[str, Any]): + self.cache.throttled_state = throttled_state + + def send_status(self, status: Dict[str, Any], eventtime: float) -> None: + for printer_obj, vals in status.items(): + self.printer_status[printer_obj].update(vals) + if self.amb_detect.sensor_name in status: + self.amb_detect.update_ambient( + status[self.amb_detect.sensor_name], eventtime + ) + self._update_temps(eventtime) + if "bed_mesh" in status: + self._send_mesh_data() + if "toolhead" in status and "extruder" in status["toolhead"]: + self._send_active_extruder(status["toolhead"]["extruder"]) + if "gcode_move" in status: + self.layer_detect.update(status["gcode_move"]["gcode_position"]) + if self.filament_sensor and self.filament_sensor in status: + detected = status[self.filament_sensor]["filament_detected"] + fstate = "loaded" if detected else "runout" + self.cache.filament_state = fstate + self.send_sp("filament_sensor", {"state": fstate}) + + def _handle_printer_info_update(self, eventtime: float) -> float: + # Job Info Timer handler + if self.cache.state == "printing": + self._update_job_progress() + return eventtime + self.intervals["job"] + + def _handle_sp_ping(self, eventtime: float) -> float: + self._last_sp_ping = eventtime + self.send_sp("ping", None) + return eventtime + self.intervals["ping"] + + def _update_job_progress(self) -> None: + job_info: Dict[str, Any] = {} + est_time = self.cache.metadata.get("estimated_time") + last_stats: Dict[str, Any] = self.job_state.get_last_stats() + if est_time is not None: + duration: float = last_stats["print_duration"] + time_left = max(0, int(est_time - duration + .5)) + last_time_left = self.cache.job_info.get("time", time_left + 60.) + time_diff = last_time_left - time_left + if ( + (time_left < 60 or time_diff >= 30) and + time_left != last_time_left + ): + job_info["time"] = time_left + if "display_status" in self.printer_status: + progress = self.printer_status["display_status"]["progress"] + pct_prog = int(progress * 100 + .5) + if pct_prog != self.cache.job_info.get("progress", 0): + job_info["progress"] = int(progress * 100 + .5) + layer: Optional[int] = last_stats.get("info", {}).get("current_layer") + if layer is None: + layer = self.layer_detect.layer + if layer != self.cache.job_info.get("layer", -1): + job_info["layer"] = layer + if job_info: + self.cache.job_info.update(job_info) + self.send_sp("job_info", job_info) + + def _update_temps(self, eventtime: float) -> None: + if eventtime < self.next_temp_update_time: + return + need_rapid_update: bool = False + temp_data: Dict[str, List[int]] = {} + for printer_obj, key in self.heaters.items(): + reported_temp = self.printer_status[printer_obj]["temperature"] + ret = [ + int(reported_temp + .5), + int(self.printer_status[printer_obj]["target"] + .5) + ] + last_temps = self.cache.temps.get(key, [-100., -100.]) + if ret[1] == last_temps[1]: + if ret[1]: + seeking_target = abs(ret[1] - ret[0]) > 5 + else: + seeking_target = ret[0] >= self.amb_detect.ambient + 25 + need_rapid_update |= seeking_target + # The target hasn't changed and not heating, debounce temp + if key in self.last_received_temps and not seeking_target: + last_reported = self.last_received_temps[key] + if abs(reported_temp - last_reported) < .75: + self.last_received_temps.pop(key) + continue + if ret[0] == last_temps[0]: + self.last_received_temps[key] = reported_temp + continue + temp_data[key] = ret[:1] + else: + # target has changed, send full data + temp_data[key] = ret + self.last_received_temps[key] = reported_temp + self.cache.temps[key] = ret + if need_rapid_update: + self.next_temp_update_time = ( + 0. if self.intervals["temps_target"] < .2501 else + eventtime + self.intervals["temps_target"] + ) + else: + self.next_temp_update_time = eventtime + self.intervals["temps"] + if not temp_data: + return + self.send_sp("temps", temp_data) + + def _update_state_from_klippy(self) -> None: + kconn: KlippyConnection = self.server.lookup_component("klippy_connection") + klippy_state = kconn.state + if klippy_state == KlippyState.READY: + sp_state = "operational" + elif klippy_state in [KlippyState.ERROR, KlippyState.SHUTDOWN]: + sp_state = "error" + else: + sp_state = "offline" + self._update_state(sp_state) + + def _update_state(self, new_state: str) -> None: + if self.cache.state == new_state: + return + self.cache.state = new_state + self.send_sp("state_change", {"new": new_state}) + if new_state == "operational": + self.print_handler.notify_ready() + + def _send_mesh_data(self) -> None: + mesh = self.printer_status["bed_mesh"] + # TODO: We are probably going to have to reformat the mesh + self.cache.mesh = mesh + self.send_sp("mesh_data", mesh) + + def _send_job_event(self, job_info: Dict[str, Any]) -> None: + if self.connected: + self.send_sp("job_info", job_info) + else: + job_info.update(self.cache.job_info) + job_info["delay"] = self.eventloop.get_loop_time() + self.missed_job_events.append(job_info) + if len(self.missed_job_events) > 10: + self.missed_job_events.pop(0) + + def _get_ui_info(self) -> Dict[str, Any]: + ui_data: Dict[str, Any] = {"ui": None, "ui_version": None} + self.cache.current_wsid = None + websockets: WebsocketManager + websockets = self.server.lookup_component("websockets") + conns = websockets.get_clients_by_type("web") + if conns: + longest = conns[0] + ui_data["ui"] = longest.client_data["name"] + ui_data["ui_version"] = longest.client_data["version"] + self.cache.current_wsid = longest.uid + return ui_data + + async def _send_machine_data(self) -> None: + app_args = self.server.get_app_args() + data = self._get_ui_info() + data["api"] = "Moonraker" + data["api_version"] = app_args["software_version"] + data["sp_version"] = COMPONENT_VERSION + machine: Machine = self.server.lookup_component("machine") + sys_info = machine.get_system_info() + pyver = sys_info["python"]["version"][:3] + data["python_version"] = ".".join([str(part) for part in pyver]) + model: str = sys_info["cpu_info"].get("model", "") + if not model or model.isdigit(): + model = sys_info["cpu_info"].get("cpu_desc", "Unknown") + data["machine"] = model + data["os"] = sys_info["distribution"].get("name", "Unknown") + pub_intf = await machine.get_public_network() + data["is_ethernet"] = int(not pub_intf["is_wifi"]) + data["ssid"] = pub_intf.get("ssid", "") + data["local_ip"] = pub_intf.get("address", "Unknown") + data["hostname"] = pub_intf["hostname"] + data["core_count"] = os.cpu_count() + mem = sys_info["cpu_info"]["total_memory"] + if mem is not None: + data["total_memory"] = mem * 1024 + self.cache.machine_info = data + self.send_sp("machine_data", data) + + def _send_firmware_data(self) -> None: + kinfo = self.server.get_klippy_info() + if "software_version" not in kinfo: + return + firmware_date: str = "" + # Approximate the firmware "date" using the last modified + # time of the Klippy source folder + kpath = pathlib.Path(kinfo["klipper_path"]).joinpath("klippy") + if kpath.is_dir(): + mtime = kpath.stat().st_mtime + firmware_date = time.asctime(time.gmtime(mtime)) + version: str = kinfo["software_version"] + unsafe = version.endswith("-dirty") or version == "?" + if unsafe: + version = version.rsplit("-", 1)[0] + fw_info = { + "firmware": "Klipper", + "firmware_version": version, + "firmware_date": firmware_date, + "firmware_link": "https://github.com/Klipper3d/klipper", + } + diff = self._get_object_diff(fw_info, self.cache.firmware_info) + if diff: + self.cache.firmware_info = fw_info + self.send_sp( + "firmware", {"fw": diff, "raw": False, "unsafe": unsafe} + ) + + def _send_active_extruder(self, new_extruder: str): + tool = "T0" if new_extruder == "extruder" else f"T{new_extruder[8:]}" + if tool == self.cache.active_extruder: + return + self.cache.active_extruder = tool + self.send_sp("tool", {"new": tool}) + + async def _send_webcam_config(self) -> None: + wc_cfg = await self.webcam_stream.get_webcam_config() + wc_data = { + "flipH": wc_cfg.get("flip_horizontal", False), + "flipV": wc_cfg.get("flip_vertical", False), + "rotate90": wc_cfg.get("rotation", 0) == 90 + } + self.send_sp("webcam", wc_data) + + async def _send_power_state(self) -> None: + dev_info: Optional[Dict[str, Any]] + dev_info = await self._call_internal_api( + "machine.device_power.get_device", device=self.power_id + ) + if dev_info is not None: + is_on = dev_info[self.power_id] == "on" + self.send_sp("power_controller", {"on": is_on}) + + def _push_initial_state(self): + self.send_sp("state_change", {"new": self.cache.state}) + if self.cache.temps: + self.send_sp("temps", self.cache.temps) + if self.cache.firmware_info: + self.send_sp( + "firmware", + {"fw": self.cache.firmware_info, "raw": False}) + curtime = self.eventloop.get_loop_time() + for evt in self.missed_job_events: + evt["delay"] = int((curtime - evt["delay"]) + .5) + self.send_sp("job_info", evt) + self.missed_job_events = [] + if self.cache.active_extruder: + self.send_sp("tool", {"new": self.cache.active_extruder}) + if self.cache.cpu_info: + self.send_sp("cpu_info", self.cache.cpu_info) + self.send_sp("ambient", {"new": self.amb_detect.ambient}) + if self.power_id: + self.eventloop.create_task(self._send_power_state()) + if self.cache.filament_state: + self.send_sp( + "filament_sensor", {"state": self.cache.filament_state} + ) + self.send_sp( + "webcam_status", {"connected": self.webcam_stream.connected} + ) + self.eventloop.create_task(self._send_machine_data()) + self.eventloop.create_task(self._send_webcam_config()) + + def _check_setup_event(self, evt_name: str) -> bool: + return self.is_set_up or evt_name in PRE_SETUP_EVENTS + + def send_sp(self, evt_name: str, data: Any) -> Awaitable[bool]: + if ( + not self.connected or + self.ws is None or + self.ws.protocol is None or + not self._check_setup_event(evt_name) + ): + fut = self.eventloop.create_future() + fut.set_result(False) + return fut + packet = {"type": evt_name, "data": data} + return self.eventloop.create_task(self._send_wrapper(packet)) + + async def _send_wrapper(self, packet: Dict[str, Any]) -> bool: + try: + assert self.ws is not None + await self.ws.write_message(jsonw.dumps(packet)) + except Exception: + return False + else: + if packet["type"] != "stream": + self._logger.info(f"sent: {packet}") + else: + self._logger.info("sent: webcam stream") + return True + + def _get_object_diff( + self, new_obj: Dict[str, Any], cached_obj: Dict[str, Any] + ) -> Dict[str, Any]: + if not cached_obj: + return new_obj + diff: Dict[str, Any] = {} + for key, val in new_obj.items(): + if key in cached_obj and val == cached_obj[key]: + continue + diff[key] = val + return diff + + async def close(self): + self.print_handler.cancel() + self.webcam_stream.stop_ai() + self.amb_detect.stop() + self.printer_info_timer.stop() + self.ping_sp_timer.stop() + await self.send_sp("shutdown", None) + self._logger.close() + self.is_closing = True + if self.ws is not None: + self.ws.close(1001, "Client Shutdown") + if ( + self.connection_task is not None and + not self.connection_task.done() + ): + try: + await asyncio.wait_for(self.connection_task, 2.) + except asyncio.TimeoutError: + pass + +class ReportCache: + def __init__(self) -> None: + self.state = "offline" + self.temps: Dict[str, Any] = {} + self.metadata: Dict[str, Any] = {} + self.mesh: Dict[str, Any] = {} + self.job_info: Dict[str, Any] = {} + self.active_extruder: str = "" + # Persistent state across connections + self.firmware_info: Dict[str, Any] = {} + self.machine_info: Dict[str, Any] = {} + self.cpu_info: Dict[str, Any] = {} + self.throttled_state: Dict[str, Any] = {} + self.current_wsid: Optional[int] = None + self.filament_state: str = "" + + def reset_print_state(self) -> None: + self.temps = {} + self.mesh = {} + self.job_info = {} + + +INITIAL_AMBIENT = 85 +AMBIENT_CHECK_TIME = 5. * 60. +TARGET_CHECK_TIME = 60. * 60. +SAMPLE_CHECK_TIME = 20. + +class AmbientDetect: + CHECK_INTERVAL = 5 + def __init__( + self, + config: ConfigHelper, + simplyprint: SimplyPrint, + initial_ambient: int + ) -> None: + self.server = config.get_server() + self.simplyprint = simplyprint + self.cache = simplyprint.cache + self._initial_sample: int = -1000 + self._ambient = initial_ambient + self._last_sample_time: float = 0. + self._update_interval = AMBIENT_CHECK_TIME + self.eventloop = self.server.get_event_loop() + self._detect_timer = self.eventloop.register_timer( + self._handle_detect_timer + ) + self._sensor_name: str = config.get("ambient_sensor", "") + + @property + def ambient(self) -> int: + return self._ambient + + @property + def sensor_name(self) -> str: + return self._sensor_name + + def update_ambient( + self, sensor_info: Dict[str, Any], eventtime: float = SAMPLE_CHECK_TIME + ) -> None: + if "temperature" not in sensor_info: + return + if eventtime < self._last_sample_time + SAMPLE_CHECK_TIME: + return + self._last_sample_time = eventtime + new_amb = int(sensor_info["temperature"] + .5) + if abs(new_amb - self._ambient) < 2: + return + self._ambient = new_amb + self._on_ambient_changed(self._ambient) + + def _handle_detect_timer(self, eventtime: float) -> float: + if "tool0" not in self.cache.temps: + self._initial_sample = -1000 + return eventtime + self.CHECK_INTERVAL + temp, target = self.cache.temps["tool0"] + if target: + self._initial_sample = -1000 + self._last_sample_time = eventtime + self._update_interval = TARGET_CHECK_TIME + return eventtime + self.CHECK_INTERVAL + if eventtime - self._last_sample_time < self._update_interval: + return eventtime + self.CHECK_INTERVAL + if self._initial_sample == -1000: + self._initial_sample = temp + self._update_interval = SAMPLE_CHECK_TIME + else: + diff = abs(temp - self._initial_sample) + if diff <= 2: + last_ambient = self._ambient + self._ambient = int((temp + self._initial_sample) / 2 + .5) + self._initial_sample = -1000 + self._last_sample_time = eventtime + self._update_interval = AMBIENT_CHECK_TIME + if last_ambient != self._ambient: + logging.debug(f"SimplyPrint: New Ambient: {self._ambient}") + self._on_ambient_changed(self._ambient) + else: + self._initial_sample = temp + self._update_interval = SAMPLE_CHECK_TIME + return eventtime + self.CHECK_INTERVAL + + def _on_ambient_changed(self, new_ambient: int) -> None: + self.simplyprint.save_item("ambient_temp", new_ambient) + self.simplyprint.send_sp("ambient", {"new": new_ambient}) + + def start(self) -> None: + if self._detect_timer.is_running(): + return + if "tool0" in self.cache.temps: + cur_temp = self.cache.temps["tool0"][0] + if cur_temp < self._ambient: + self._ambient = cur_temp + self._on_ambient_changed(self._ambient) + self._detect_timer.start() + + def stop(self) -> None: + self._detect_timer.stop() + self._last_sample_time = 0. + +class LayerDetect: + def __init__(self) -> None: + self._layer: int = 0 + self._layer_z: float = 0. + self._active: bool = False + self._layer_height: float = 0. + self._fl_height: float = 0. + self._layer_count: int = 99999999999 + self._check_next: bool = False + + @property + def layer(self) -> int: + return self._layer + + def update(self, new_pos: List[float]) -> None: + if not self._active or self._layer_z == new_pos[2]: + self._check_next = False + return + if not self._check_next: + # Try to avoid z-hops by skipping the first detected change + self._check_next = True + return + self._check_next = False + layer = 1 + int( + (new_pos[2] - self._fl_height) / self._layer_height + .5 + ) + self._layer = min(layer, self._layer_count) + self._layer_z = new_pos[2] + + def start(self, metadata: Dict[str, Any]) -> None: + self.reset() + lh: float = metadata.get("layer_height", 0) + flh: float = metadata.get("first_layer_height", lh) + if lh > 0.000001 and flh > 0.000001: + self._active = True + self._layer_height = lh + self._fl_height = flh + layer_count: Optional[int] = metadata.get("layer_count") + obj_height: Optional[float] = metadata.get("object_height") + if layer_count is not None: + self._layer_count = layer_count + elif obj_height is not None: + self._layer_count = int((obj_height - flh) / lh + .5) + + def resume(self) -> None: + self._active = True + + def stop(self) -> None: + self._active = False + + def reset(self) -> None: + self._active = False + self._layer = 0 + self._layer_z = 0. + self._layer_height = 0. + self._fl_height = 0. + self._layer_count = 99999999999 + self._check_next = False + + +# TODO: We need to get the URL/Port from settings in the future. +# Ideally we will always fetch from the localhost rather than +# go through the reverse proxy +FALLBACK_URL = "http://127.0.0.1:8080/?action=snapshot" +SP_SNAPSHOT_URL = "https://api.simplyprint.io/jobs/ReceiveSnapshot" +SP_AI_URL = "https://ai.simplyprint.io/api/v2/infer" + +class WebcamStream: + def __init__( + self, config: ConfigHelper, simplyprint: SimplyPrint + ) -> None: + self.server = config.get_server() + self.eventloop = self.server.get_event_loop() + self.simplyprint = simplyprint + self.webcam_name = config.get("webcam_name", "") + self.url = FALLBACK_URL + self.client: HttpClient = self.server.lookup_component("http_client") + self.cam: Optional[WebCam] = None + self._connected = False + self.ai_running = False + self.ai_task: Optional[asyncio.Task] = None + self.ai_scores: List[Any] = [] + self.failed_ai_attempts = 0 + + @property + def connected(self) -> bool: + return self._connected + + async def intialize_url(self) -> None: + wcmgr: WebcamManager = self.server.lookup_component("webcam") + cams = wcmgr.get_webcams() + if not cams: + # no camera configured, try the fallback url + return + if self.webcam_name and self.webcam_name in cams: + cam = cams[self.webcam_name] + else: + cam = list(cams.values())[0] + try: + url = await cam.get_snapshot_url(True) + except Exception: + logging.exception("Failed to retrive webcam url") + return + self.cam = cam + logging.info(f"SimplyPrint Webcam URL assigned: {url}") + self.url = url + + async def test_connection(self): + if not self.url.startswith("http"): + self._connected = False + return + headers = {"Accept": "image/jpeg"} + resp = await self.client.get(self.url, headers, enable_cache=False) + self._connected = not resp.has_error() + + async def get_webcam_config(self) -> Dict[str, Any]: + if self.cam is None: + return {} + return self.cam.as_dict() + + async def extract_image(self) -> str: + headers = {"Accept": "image/jpeg"} + resp = await self.client.get(self.url, headers, enable_cache=False) + resp.raise_for_status() + return await self.eventloop.run_in_thread( + self._encode_image, resp.content + ) + + def _encode_image(self, image: bytes) -> str: + return base64.b64encode(image).decode() + + async def post_image(self, payload: Dict[str, Any]) -> None: + uid: Optional[str] = payload.get("id") + timer: Optional[int] = payload.get("timer") + try: + if uid is not None: + url = payload.get("endpoint", SP_SNAPSHOT_URL) + img = await self.extract_image() + headers = { + "User-Agent": "Mozilla/5.0", + "Content-Type": "application/x-www-form-urlencoded" + } + body = f"id={url_escape(uid)}&image={url_escape(img)}" + resp = await self.client.post( + url, body=body, headers=headers, enable_cache=False + ) + resp.raise_for_status() + elif timer is not None: + await asyncio.sleep(timer / 1000) + img = await self.extract_image() + self.simplyprint.send_sp("stream", {"base": img}) + except asyncio.CancelledError: + raise + except Exception: + if not self.server.is_verbose_enabled(): + return + logging.exception("SimplyPrint WebCam Stream Error") + + async def _send_ai_image(self, base_image: str) -> None: + interval = self.simplyprint.intervals["ai"] + headers = {"User-Agent": "Mozilla/5.0"} + data = { + "api_key": self.simplyprint.sp_info["printer_token"], + "image_array": base_image, + "interval": interval, + "printer_id": self.simplyprint.sp_info["printer_id"], + "settings": { + "buffer_percent": 80, + "confidence": 60, + "buffer_length": 16 + }, + "scores": self.ai_scores + } + resp = await self.client.post( + SP_AI_URL, body=data, headers=headers, enable_cache=False + ) + resp.raise_for_status() + self.failed_ai_attempts = 0 + resp_json = resp.json() + if isinstance(resp_json, dict): + self.ai_scores = resp_json.get("scores", self.ai_scores) + ai_result = resp_json.get("s1", [0, 0, 0]) + self.simplyprint.send_sp("ai_resp", {"ai": ai_result}) + + async def _ai_stream(self, delay: float) -> None: + if delay: + await asyncio.sleep(delay) + while self.ai_running: + interval = self.simplyprint.intervals["ai"] + try: + img = await self.extract_image() + await self._send_ai_image(img) + except asyncio.CancelledError: + raise + except Exception: + self.failed_ai_attempts += 1 + if self.failed_ai_attempts == 1: + logging.exception("SimplyPrint AI Stream Error") + elif not self.failed_ai_attempts % 10: + logging.info( + f"SimplyPrint: {self.failed_ai_attempts} consecutive " + "AI failures" + ) + delay = min(120., self.failed_ai_attempts * 5.0) + interval = self.simplyprint.intervals["ai"] + delay + await asyncio.sleep(interval) + + def reset_ai_scores(self): + self.ai_scores = [] + + def start_ai(self, delay: float = 0) -> None: + if self.ai_running: + self.stop_ai() + self.ai_running = True + self.ai_task = self.eventloop.create_task(self._ai_stream(delay)) + + def stop_ai(self) -> None: + if not self.ai_running: + return + self.ai_running = False + if self.ai_task is not None: + self.ai_task.cancel() + self.ai_task = None + +class PrintHandler: + def __init__(self, simplyprint: SimplyPrint) -> None: + self.simplyprint = simplyprint + self.server = simplyprint.server + self.eventloop = self.server.get_event_loop() + self.cache = simplyprint.cache + self.download_task: Optional[asyncio.Task] = None + self.print_ready_event: asyncio.Event = asyncio.Event() + self.download_progress: int = -1 + self.pending_file: str = "" + self.last_started: str = "" + self.sp_user = UserInfo("SimplyPrint", "") + + def download_file(self, url: str, start: bool): + coro = self._download_sp_file(url, start) + self.download_task = self.eventloop.create_task(coro) + + def cancel(self): + if ( + self.download_task is not None and + not self.download_task.done() + ): + self.download_task.cancel() + self.download_task = None + + def notify_ready(self): + self.print_ready_event.set() + + async def _download_sp_file(self, url: str, start: bool): + client: HttpClient = self.server.lookup_component("http_client") + fm: FileManager = self.server.lookup_component("file_manager") + gc_path = pathlib.Path(fm.get_directory()) + if not gc_path.is_dir(): + logging.debug(f"GCode Path Not Registered: {gc_path}") + self.simplyprint.send_sp( + "file_progress", + {"state": "error", "message": "GCode Path not Registered"} + ) + return + accept = "text/plain,applicaton/octet-stream" + self._on_download_progress(0, 0, 0) + try: + logging.debug(f"Downloading URL: {url}") + tmp_path = await client.download_file( + url, accept, progress_callback=self._on_download_progress, + request_timeout=3600. + ) + except asyncio.TimeoutError: + raise + except Exception: + logging.exception(f"Failed to download file: {url}") + self.simplyprint.send_sp( + "file_progress", + {"state": "error", "message": "Network Error"} + ) + return + finally: + self.download_progress = -1 + logging.debug("Simplyprint: Download Complete") + filename = pathlib.PurePath(tmp_path.name) + fpath = gc_path.joinpath(filename.name) + if self.cache.job_info.get("filename", "") == str(fpath): + # This is an attempt to overwite a print in progress, make a copy + count = 0 + while fpath.exists(): + name = f"{filename.stem}_copy_{count}.{filename.suffix}" + fpath = gc_path.joinpath(name) + count += 1 + args: Dict[str, Any] = { + "filename": fpath.name, + "tmp_file_path": str(tmp_path), + } + state = "pending" + if self.cache.state == "operational": + args["print"] = "true" if start else "false" + try: + ret = await fm.finalize_upload(args) + except self.server.error as e: + logging.exception("GCode Finalization Failed") + self.simplyprint.send_sp( + "file_progress", + {"state": "error", "message": f"GCode Finalization Failed: {e}"} + ) + return + self.pending_file = fpath.name + if ret.get("print_started", False): + state = "started" + self.last_started = self.pending_file + self.pending_file = "" + elif not start and await self._check_can_print(): + state = "ready" + if state == "pending": + self.print_ready_event.clear() + try: + await asyncio.wait_for(self.print_ready_event.wait(), 10.) + except asyncio.TimeoutError: + self.pending_file = "" + self.simplyprint.send_sp( + "file_progress", + {"state": "error", "message": "Pending print timed out"} + ) + return + else: + if start: + await self.start_print() + return + state = "ready" + self.simplyprint.send_sp("file_progress", {"state": state}) + + async def start_print(self) -> None: + if not self.pending_file: + return + pending = self.pending_file + self.pending_file = "" + kapi: KlippyAPI = self.server.lookup_component("klippy_apis") + data = {"state": "started"} + try: + await kapi.start_print(pending, user=self.sp_user) + except Exception: + logging.exception("Print Failed to start") + data["state"] = "error" + data["message"] = "Failed to start print" + else: + self.last_started = pending + self.simplyprint.send_sp("file_progress", data) + + async def _check_can_print(self) -> bool: + kconn: KlippyConnection = self.server.lookup_component("klippy_connection") + if kconn.state != KlippyState.READY: + return False + kapi: KlippyAPI = self.server.lookup_component("klippy_apis") + try: + result = await kapi.query_objects({"print_stats": None}) + except Exception: + # Klippy not connected + return False + if 'print_stats' not in result: + return False + state: str = result['print_stats']['state'] + if state in ["printing", "paused"]: + return False + return True + + def _on_download_progress(self, percent: int, size: int, recd: int) -> None: + if percent == self.download_progress: + return + self.download_progress = percent + self.simplyprint.send_sp( + "file_progress", {"state": "downloading", "percent": percent} + ) + +class ProtoLogger: + def __init__(self, config: ConfigHelper) -> None: + server = config.get_server() + self._logger: Optional[logging.Logger] = None + if not server.is_verbose_enabled(): + return + fm: FileManager = server.lookup_component("file_manager") + log_root = fm.get_directory("logs") + if log_root: + log_parent = pathlib.Path(log_root) + else: + log_parent = pathlib.Path(tempfile.gettempdir()) + log_path = log_parent.joinpath("simplyprint.log") + queue: SimpleQueue = SimpleQueue() + self.queue_handler = LocalQueueHandler(queue) + self._logger = logging.getLogger("simplyprint") + self._logger.addHandler(self.queue_handler) + self._logger.propagate = False + file_hdlr = logging.handlers.TimedRotatingFileHandler( + log_path, when='midnight', backupCount=2) + formatter = logging.Formatter( + '%(asctime)s [%(funcName)s()] - %(message)s') + file_hdlr.setFormatter(formatter) + self.qlistner = logging.handlers.QueueListener(queue, file_hdlr) + self.qlistner.start() + + def info(self, msg: str) -> None: + if self._logger is None: + return + self._logger.info(msg) + + def debug(self, msg: str) -> None: + if self._logger is None: + return + self._logger.debug(msg) + + def warning(self, msg: str) -> None: + if self._logger is None: + return + self._logger.warning(msg) + + def exception(self, msg: str) -> None: + if self._logger is None: + return + self._logger.exception(msg) + + def close(self): + if self._logger is None: + return + self._logger.removeHandler(self.queue_handler) + self.qlistner.stop() + self._logger = None + +def load_component(config: ConfigHelper) -> SimplyPrint: + return SimplyPrint(config) diff --git a/moonraker/components/spoolman.py b/moonraker/components/spoolman.py new file mode 100644 index 0000000..d8a3e1d --- /dev/null +++ b/moonraker/components/spoolman.py @@ -0,0 +1,424 @@ +# Integration with Spoolman +# +# Copyright (C) 2023 Daniel Hultgren +# +# This file may be distributed under the terms of the GNU GPLv3 license. + +from __future__ import annotations +import asyncio +import logging +import re +import contextlib +import tornado.websocket as tornado_ws +from ..common import RequestType, HistoryFieldData +from ..utils import json_wrapper as jsonw +from typing import ( + TYPE_CHECKING, + List, + Dict, + Any, + Optional, + Union, + cast +) + +if TYPE_CHECKING: + from ..confighelper import ConfigHelper + from ..common import WebRequest + from .http_client import HttpClient, HttpResponse + from .database import MoonrakerDatabase + from .announcements import Announcements + from .klippy_apis import KlippyAPI as APIComp + from .history import History + from tornado.websocket import WebSocketClientConnection + +DB_NAMESPACE = "moonraker" +ACTIVE_SPOOL_KEY = "spoolman.spool_id" + +class SpoolManager: + def __init__(self, config: ConfigHelper): + self.server = config.get_server() + self.eventloop = self.server.get_event_loop() + self._get_spoolman_urls(config) + self.sync_rate_seconds = config.getint("sync_rate", default=5, minval=1) + self.report_timer = self.eventloop.register_timer(self.report_extrusion) + self.pending_reports: Dict[int, float] = {} + self.spoolman_ws: Optional[WebSocketClientConnection] = None + self.connection_task: Optional[asyncio.Task] = None + self.spool_check_task: Optional[asyncio.Task] = None + self.ws_connected: bool = False + self.reconnect_delay: float = 2. + self.is_closing: bool = False + self.spool_id: Optional[int] = None + self._error_logged: bool = False + self._highest_epos: float = 0 + self._current_extruder: str = "extruder" + self.spool_history = HistoryFieldData( + "spool_ids", "spoolman", "Spool IDs used", "collect", + reset_callback=self._on_history_reset + ) + history: History = self.server.lookup_component("history") + history.register_auxiliary_field(self.spool_history) + self.klippy_apis: APIComp = self.server.lookup_component("klippy_apis") + self.http_client: HttpClient = self.server.lookup_component("http_client") + self.database: MoonrakerDatabase = self.server.lookup_component("database") + announcements: Announcements = self.server.lookup_component("announcements") + announcements.register_feed("spoolman") + self._register_notifications() + self._register_listeners() + self._register_endpoints() + self.server.register_remote_method( + "spoolman_set_active_spool", self.set_active_spool + ) + + def _get_spoolman_urls(self, config: ConfigHelper) -> None: + orig_url = config.get('server') + url_match = re.match(r"(?i:(?Phttps?)://)?(?P.+)", orig_url) + if url_match is None: + raise config.error( + f"Section [spoolman], Option server: {orig_url}: Invalid URL format" + ) + scheme = url_match["scheme"] or "http" + host = url_match["host"].rstrip("/") + ws_scheme = "wss" if scheme == "https" else "ws" + self.spoolman_url = f"{scheme}://{host}/api" + self.ws_url = f"{ws_scheme}://{host}/api/v1/spool" + + def _register_notifications(self): + self.server.register_notification("spoolman:active_spool_set") + self.server.register_notification("spoolman:spoolman_status_changed") + + def _register_listeners(self): + self.server.register_event_handler( + "server:klippy_ready", self._handle_klippy_ready + ) + + def _register_endpoints(self): + self.server.register_endpoint( + "/server/spoolman/spool_id", + RequestType.GET | RequestType.POST, + self._handle_spool_id_request, + ) + self.server.register_endpoint( + "/server/spoolman/proxy", + RequestType.POST, + self._proxy_spoolman_request, + ) + self.server.register_endpoint( + "/server/spoolman/status", + RequestType.GET, + self._handle_status_request, + ) + + def _on_history_reset(self) -> List[int]: + if self.spool_id is None: + return [] + return [self.spool_id] + + async def component_init(self) -> None: + self.spool_id = await self.database.get_item( + DB_NAMESPACE, ACTIVE_SPOOL_KEY, None + ) + self.connection_task = self.eventloop.create_task(self._connect_websocket()) + + async def _connect_websocket(self) -> None: + log_connect: bool = True + err_list: List[Exception] = [] + while not self.is_closing: + if log_connect: + logging.info(f"Connecting To Spoolman: {self.ws_url}") + log_connect = False + try: + self.spoolman_ws = await tornado_ws.websocket_connect( + self.ws_url, + connect_timeout=5., + ping_interval=20., + ping_timeout=60. + ) + setattr(self.spoolman_ws, "on_ping", self._on_ws_ping) + cur_time = self.eventloop.get_loop_time() + self._last_ping_received = cur_time + except asyncio.CancelledError: + raise + except Exception as e: + if len(err_list) < 10: + # Allow up to 10 unique errors. + for err in err_list: + if type(err) is type(e) and err.args == e.args: + break + else: + err_list.append(e) + verbose = self.server.is_verbose_enabled() + if verbose: + logging.exception("Failed to connect to Spoolman") + self.server.add_log_rollover_item( + "spoolman_connect", f"Failed to Connect to spoolman: {e}", + not verbose + ) + else: + err_list = [] + self.ws_connected = True + self._error_logged = False + self.report_timer.start() + self.server.add_log_rollover_item( + "spoolman_connect", "Connected to Spoolman Spool Manager" + ) + if self.spool_id is not None: + self._cancel_spool_check_task() + coro = self._check_spool_deleted() + self.spool_check_task = self.eventloop.create_task(coro) + self._send_status_notification() + await self._read_messages() + log_connect = True + if not self.is_closing: + await asyncio.sleep(self.reconnect_delay) + + async def _read_messages(self) -> None: + message: Union[str, bytes, None] + while self.spoolman_ws is not None: + message = await self.spoolman_ws.read_message() + if isinstance(message, str): + self._decode_message(message) + elif message is None: + self.report_timer.stop() + self.ws_connected = False + cur_time = self.eventloop.get_loop_time() + ping_time: float = cur_time - self._last_ping_received + reason = code = None + if self.spoolman_ws is not None: + reason = self.spoolman_ws.close_reason + code = self.spoolman_ws.close_code + logging.info( + f"Spoolman Disconnected - Code: {code}, Reason: {reason}, " + f"Server Ping Time Elapsed: {ping_time}" + ) + self.spoolman_ws = None + if not self.is_closing: + self._send_status_notification() + break + + def _decode_message(self, message: str) -> None: + event: Dict[str, Any] = jsonw.loads(message) + if event.get("resource") != "spool": + return + if self.spool_id is not None and event.get("type") == "deleted": + payload: Dict[str, Any] = event.get("payload", {}) + if payload.get("id") == self.spool_id: + self.pending_reports.pop(self.spool_id, None) + self.set_active_spool(None) + + def _cancel_spool_check_task(self) -> None: + if self.spool_check_task is None or self.spool_check_task.done(): + return + self.spool_check_task.cancel() + + async def _check_spool_deleted(self) -> None: + if self.spool_id is not None: + response = await self.http_client.get( + f"{self.spoolman_url}/v1/spool/{self.spool_id}", + connect_timeout=1., request_timeout=2. + ) + if response.status_code == 404: + logging.info(f"Spool ID {self.spool_id} not found, setting to None") + self.pending_reports.pop(self.spool_id, None) + self.set_active_spool(None) + elif response.has_error(): + err_msg = self._get_response_error(response) + logging.info(f"Attempt to check spool status failed: {err_msg}") + else: + logging.info(f"Found Spool ID {self.spool_id} on spoolman instance") + self.spool_check_task = None + + def connected(self) -> bool: + return self.ws_connected + + def _on_ws_ping(self, data: bytes = b"") -> None: + self._last_ping_received = self.eventloop.get_loop_time() + + async def _handle_klippy_ready(self) -> None: + result: Dict[str, Dict[str, Any]] + result = await self.klippy_apis.subscribe_objects( + {"toolhead": ["position", "extruder"]}, self._handle_status_update, {} + ) + toolhead = result.get("toolhead", {}) + self._current_extruder = toolhead.get("extruder", "extruder") + initial_e_pos = toolhead.get("position", [None]*4)[3] + logging.debug(f"Initial epos: {initial_e_pos}") + if initial_e_pos is not None: + self._highest_epos = initial_e_pos + else: + logging.error("Spoolman integration unable to subscribe to epos") + raise self.server.error("Unable to subscribe to e position") + + def _get_response_error(self, response: HttpResponse) -> str: + err_msg = f"HTTP error: {response.status_code} {response.error}" + with contextlib.suppress(Exception): + msg: Optional[str] = cast(dict, response.json())["message"] + err_msg += f", Spoolman message: {msg}" + return err_msg + + def _handle_status_update(self, status: Dict[str, Any], _: float) -> None: + toolhead: Optional[Dict[str, Any]] = status.get("toolhead") + if toolhead is None: + return + epos: float = toolhead.get("position", [0, 0, 0, self._highest_epos])[3] + extr = toolhead.get("extruder", self._current_extruder) + if extr != self._current_extruder: + self._highest_epos = epos + self._current_extruder = extr + elif epos > self._highest_epos: + if self.spool_id is not None: + self._add_extrusion(self.spool_id, epos - self._highest_epos) + self._highest_epos = epos + + def _add_extrusion(self, spool_id: int, used_length: float) -> None: + if spool_id in self.pending_reports: + self.pending_reports[spool_id] += used_length + else: + self.pending_reports[spool_id] = used_length + + def set_active_spool(self, spool_id: Union[int, None]) -> None: + assert spool_id is None or isinstance(spool_id, int) + if self.spool_id == spool_id: + logging.info(f"Spool ID already set to: {spool_id}") + return + self.spool_history.tracker.update(spool_id) + self.spool_id = spool_id + self.database.insert_item(DB_NAMESPACE, ACTIVE_SPOOL_KEY, spool_id) + self.server.send_event( + "spoolman:active_spool_set", {"spool_id": spool_id} + ) + logging.info(f"Setting active spool to: {spool_id}") + + async def report_extrusion(self, eventtime: float) -> float: + if not self.ws_connected: + return eventtime + self.sync_rate_seconds + pending_reports = self.pending_reports + self.pending_reports = {} + for spool_id, used_length in pending_reports.items(): + if not self.ws_connected: + self._add_extrusion(spool_id, used_length) + continue + logging.debug( + f"Sending spool usage: ID: {spool_id}, Length: {used_length:.3f}mm" + ) + response = await self.http_client.request( + method="PUT", + url=f"{self.spoolman_url}/v1/spool/{spool_id}/use", + body={"use_length": used_length} + ) + if response.has_error(): + if response.status_code == 404: + # Since the spool is deleted we can remove any pending reports + # added while waiting for the request + self.pending_reports.pop(spool_id, None) + if spool_id == self.spool_id: + logging.info(f"Spool ID {spool_id} not found, setting to None") + self.set_active_spool(None) + else: + if not self._error_logged: + error_msg = self._get_response_error(response) + self._error_logged = True + logging.info( + f"Failed to update extrusion for spool id {spool_id}, " + f"received {error_msg}" + ) + # Add missed reports back to pending reports for the next cycle + self._add_extrusion(spool_id, used_length) + continue + self._error_logged = False + return self.eventloop.get_loop_time() + self.sync_rate_seconds + + async def _handle_spool_id_request(self, web_request: WebRequest): + if web_request.get_request_type() == RequestType.POST: + spool_id = web_request.get_int("spool_id", None) + self.set_active_spool(spool_id) + # For GET requests we will simply return the spool_id + return {"spool_id": self.spool_id} + + async def _proxy_spoolman_request(self, web_request: WebRequest): + method = web_request.get_str("request_method") + path = web_request.get_str("path") + query = web_request.get_str("query", None) + body = web_request.get("body", None) + use_v2_response = web_request.get_boolean("use_v2_response", False) + if method not in {"GET", "POST", "PUT", "PATCH", "DELETE"}: + raise self.server.error(f"Invalid HTTP method: {method}") + if body is not None and method == "GET": + raise self.server.error("GET requests cannot have a body") + if len(path) < 4 or path[:4] != "/v1/": + raise self.server.error( + "Invalid path, must start with the API version, e.g. /v1" + ) + query = f"?{query}" if query is not None else "" + full_url = f"{self.spoolman_url}{path}{query}" + if not self.ws_connected: + if not use_v2_response: + raise self.server.error("Spoolman server not available", 503) + return { + "response": None, + "error": { + "status_code": 503, + "message": "Spoolman server not available" + } + } + logging.debug(f"Proxying {method} request to {full_url}") + response = await self.http_client.request( + method=method, + url=full_url, + body=body, + ) + if not use_v2_response: + response.raise_for_status() + return response.json() + if response.has_error(): + msg: str = str(response.error or "") + with contextlib.suppress(Exception): + spoolman_msg = cast(dict, response.json()).get("message", msg) + msg = spoolman_msg + return { + "response": None, + "error": { + "status_code": response.status_code, + "message": msg + } + } + else: + return { + "response": response.json(), + "response_headers": dict(response.headers.items()), + "error": None + } + + async def _handle_status_request(self, web_request: WebRequest) -> Dict[str, Any]: + pending: List[Dict[str, Any]] = [ + {"spool_id": sid, "filament_used": used} for sid, used in + self.pending_reports.items() + ] + return { + "spoolman_connected": self.ws_connected, + "pending_reports": pending, + "spool_id": self.spool_id + } + + def _send_status_notification(self) -> None: + self.server.send_event( + "spoolman:spoolman_status_changed", + {"spoolman_connected": self.ws_connected} + ) + + async def close(self): + self.is_closing = True + self.report_timer.stop() + if self.spoolman_ws is not None: + self.spoolman_ws.close(1001, "Moonraker Shutdown") + self._cancel_spool_check_task() + if self.connection_task is None or self.connection_task.done(): + return + try: + await asyncio.wait_for(self.connection_task, 2.) + except asyncio.TimeoutError: + pass + +def load_component(config: ConfigHelper) -> SpoolManager: + return SpoolManager(config) diff --git a/moonraker/components/template.py b/moonraker/components/template.py index 796abbf..df602eb 100644 --- a/moonraker/components/template.py +++ b/moonraker/components/template.py @@ -5,8 +5,10 @@ # This file may be distributed under the terms of the GNU GPLv3 license. from __future__ import annotations import logging +import asyncio import jinja2 -import json +from ..utils import json_wrapper as jsonw +from ..common import RenderableTemplate # Annotation imports from typing import ( @@ -16,8 +18,8 @@ from typing import ( ) if TYPE_CHECKING: - from moonraker import Server - from confighelper import ConfigHelper + from ..server import Server + from ..confighelper import ConfigHelper from .secrets import Secrets class TemplateFactory: @@ -25,12 +27,16 @@ class TemplateFactory: self.server = config.get_server() secrets: Secrets = self.server.load_component(config, 'secrets') self.jenv = jinja2.Environment('{%', '%}', '{', '}') - self.async_env = jinja2.Environment('{%', '%}', '{', '}', - enable_async=True) + self.async_env = jinja2.Environment( + '{%', '%}', '{', '}', enable_async=True + ) + self.ui_env = jinja2.Environment(enable_async=True) self.jenv.add_extension("jinja2.ext.do") - self.jenv.filters['fromjson'] = json.loads + self.jenv.filters['fromjson'] = jsonw.loads self.async_env.add_extension("jinja2.ext.do") - self.async_env.filters['fromjson'] = json.loads + self.async_env.filters['fromjson'] = jsonw.loads + self.ui_env.add_extension("jinja2.ext.do") + self.ui_env.filters['fromjson'] = jsonw.loads self.add_environment_global('raise_error', self._raise_error) self.add_environment_global('secrets', secrets) @@ -56,8 +62,16 @@ class TemplateFactory: raise return JinjaTemplate(source, self.server, template, is_async) + def create_ui_template(self, source: str) -> JinjaTemplate: + try: + template = self.ui_env.from_string(source) + except Exception: + logging.exception(f"Error creating template from source:\n{source}") + raise + return JinjaTemplate(source, self.server, template, True) -class JinjaTemplate: + +class JinjaTemplate(RenderableTemplate): def __init__(self, source: str, server: Server, @@ -77,10 +91,21 @@ class JinjaTemplate: raise self.server.error( "Cannot render async templates with the render() method" ", use render_async()") - return self.template.render(context).strip() + try: + return self.template.render(context).strip() + except Exception as e: + msg = "Error rending Jinja2 Template" + if self.server.is_configured(): + raise self.server.error(msg, 500) from e + raise self.server.config_error(msg) from e async def render_async(self, context: Dict[str, Any] = {}) -> str: - ret = await self.template.render_async(context) + try: + ret = await self.template.render_async(context) + except asyncio.CancelledError: + raise + except Exception as e: + raise self.server.error("Error rending Jinja2 Template", 500) from e return ret.strip() def load_component(config: ConfigHelper) -> TemplateFactory: diff --git a/moonraker/components/update_manager/__init__.py b/moonraker/components/update_manager/__init__.py index f322adc..0dd86ed 100644 --- a/moonraker/components/update_manager/__init__.py +++ b/moonraker/components/update_manager/__init__.py @@ -9,7 +9,7 @@ from . import update_manager as um from typing import TYPE_CHECKING if TYPE_CHECKING: - from confighelper import ConfigHelper + from ...confighelper import ConfigHelper def load_component(config: ConfigHelper) -> um.UpdateManager: return um.load_component(config) diff --git a/moonraker/components/update_manager/app_deploy.py b/moonraker/components/update_manager/app_deploy.py index 334ed2a..acb8d33 100644 --- a/moonraker/components/update_manager/app_deploy.py +++ b/moonraker/components/update_manager/app_deploy.py @@ -5,11 +5,18 @@ # This file may be distributed under the terms of the GNU GPLv3 license. from __future__ import annotations +import os import pathlib -import shutil import hashlib import logging +import re +import distro +import asyncio +import importlib +from .common import AppType, Channel from .base_deploy import BaseDeploy +from ...utils import pip_utils +from ...utils import json_wrapper as jsonw # Annotation imports from typing import ( @@ -19,67 +26,145 @@ from typing import ( Union, Dict, List, + Tuple ) if TYPE_CHECKING: - from confighelper import ConfigHelper + from ...confighelper import ConfigHelper + from ..klippy_connection import KlippyConnection as Klippy from .update_manager import CommandHelper from ..machine import Machine + from ..file_manager.file_manager import FileManager + +MIN_PIP_VERSION = (23, 3, 2) SUPPORTED_CHANNELS = { - "zip": ["stable", "beta"], - "git_repo": ["dev", "beta"] + AppType.WEB: [Channel.STABLE, Channel.BETA], + AppType.ZIP: [Channel.STABLE, Channel.BETA], + AppType.GIT_REPO: list(Channel) } TYPE_TO_CHANNEL = { - "zip": "stable", - "zip_beta": "beta", - "git_repo": "dev" + AppType.WEB: Channel.STABLE, + AppType.ZIP: Channel.STABLE, + AppType.GIT_REPO: Channel.DEV } -class AppDeploy(BaseDeploy): - def __init__(self, config: ConfigHelper, cmd_helper: CommandHelper) -> None: - super().__init__(config, cmd_helper, prefix="Application") - self.config = config - self.debug = self.cmd_helper.is_debug_enabled() - type_choices = list(TYPE_TO_CHANNEL.keys()) - self.type = config.get('type').lower() - if self.type not in type_choices: - raise config.error( - f"Config Error: Section [{config.get_name()}], Option " - f"'type: {self.type}': value must be one " - f"of the following choices: {type_choices}" - ) - self.channel = config.get( - "channel", TYPE_TO_CHANNEL[self.type] - ) - if self.type == "zip_beta": - self.server.add_warning( - f"Config Section [{config.get_name()}], Option 'type: " - "zip_beta', value 'zip_beta' is deprecated. Set 'type' " - "to zip and 'channel' to 'beta'") - self.type = "zip" - self.path = pathlib.Path( - config.get('path')).expanduser().resolve() - executable = config.get('env', None) - if self.channel not in SUPPORTED_CHANNELS[self.type]: - raise config.error( - f"Invalid Channel '{self.channel}' for config " - f"section [{config.get_name()}], type: {self.type}") - self._verify_path(config, 'path', self.path) - self.executable: Optional[pathlib.Path] = None - self.pip_exe: Optional[pathlib.Path] = None - self.venv_args: Optional[str] = None - if executable is not None: - self.executable = pathlib.Path(executable).expanduser() - self.pip_exe = self.executable.parent.joinpath("pip") - if not self.pip_exe.exists(): - self.server.add_warning( - f"Update Manger {self.name}: Unable to locate pip " - "executable") - self._verify_path(config, 'env', self.executable) - self.venv_args = config.get('venv_args', None) +DISTRO_ALIASES = [distro.id()] +DISTRO_ALIASES.extend(distro.like().split()) +class AppDeploy(BaseDeploy): + def __init__( + self, config: ConfigHelper, cmd_helper: CommandHelper, prefix: str + ) -> None: + super().__init__(config, cmd_helper, prefix=prefix) + self.config = config + type_choices = list(TYPE_TO_CHANNEL.keys()) + self.type = AppType.from_string(config.get('type')) + if self.type not in type_choices: + str_types = [str(t) for t in type_choices] + raise config.error( + f"Section [{config.get_name()}], Option 'type: {self.type}': " + f"value must be one of the following choices: {str_types}" + ) + self.channel = Channel.from_string( + config.get("channel", str(TYPE_TO_CHANNEL[self.type])) + ) + self.channel_invalid: bool = False + if self.channel not in SUPPORTED_CHANNELS[self.type]: + str_channels = [str(c) for c in SUPPORTED_CHANNELS[self.type]] + self.channel_invalid = True + invalid_channel = self.channel + self.channel = TYPE_TO_CHANNEL[self.type] + self.server.add_warning( + f"[{config.get_name()}]: Invalid value '{invalid_channel}' for " + f"option 'channel'. Type '{self.type}' supports the following " + f"channels: {str_channels}. Falling back to channel '{self.channel}'" + ) + self._is_valid: bool = False + self.virtualenv: Optional[pathlib.Path] = None + self.py_exec: Optional[pathlib.Path] = None + self.pip_cmd: Optional[str] = None + self.pip_version: Tuple[int, ...] = tuple() + self.venv_args: Optional[str] = None + self.npm_pkg_json: Optional[pathlib.Path] = None + self.python_reqs: Optional[pathlib.Path] = None + self.install_script: Optional[pathlib.Path] = None + self.system_deps_json: Optional[pathlib.Path] = None self.info_tags: List[str] = config.getlist("info_tags", []) self.managed_services: List[str] = [] + + def _configure_path(self, config: ConfigHelper, reserve: bool = True) -> None: + self.path = pathlib.Path(config.get('path')).expanduser().resolve() + self._verify_path(config, 'path', self.path, check_file=False) + if ( + reserve and self.name not in ["moonraker", "klipper"] + and not self.path.joinpath(".writeable").is_file() + ): + fm: FileManager = self.server.lookup_component("file_manager") + fm.add_reserved_path(f"update_manager {self.name}", self.path) + + def _configure_virtualenv(self, config: ConfigHelper) -> None: + venv_path: Optional[pathlib.Path] = None + if config.has_option("virtualenv"): + venv_path = pathlib.Path(config.get("virtualenv")).expanduser() + if not venv_path.is_absolute(): + venv_path = self.path.joinpath(venv_path) + self._verify_path(config, 'virtualenv', venv_path, check_file=False) + elif config.has_option("env"): + # Deprecated + if self.name != "klipper": + self.log_info("Option 'env' is deprecated, use 'virtualenv' instead.") + py_exec = pathlib.Path(config.get("env")).expanduser() + self._verify_path(config, 'env', py_exec, check_exe=True) + venv_path = py_exec.expanduser().parent.parent.resolve() + if venv_path is not None: + act_path = venv_path.joinpath("bin/activate") + if not act_path.is_file(): + raise config.error( + f"[{config.get_name()}]: Invalid virtualenv at path {venv_path}. " + f"Verify that the 'virtualenv' option is set to a valid " + "virtualenv path." + ) + self.py_exec = venv_path.joinpath("bin/python") + if not (self.py_exec.is_file() and os.access(self.py_exec, os.X_OK)): + raise config.error( + f"[{config.get_name()}]: Invalid python executable at " + f"{self.py_exec}. Verify that the 'virtualenv' option is set " + "to a valid virtualenv path." + ) + self.log_info(f"Detected virtualenv: {venv_path}") + self.virtualenv = venv_path + pip_exe = self.virtualenv.joinpath("bin/pip") + if pip_exe.is_file(): + self.pip_cmd = f"{self.py_exec} -m pip" + else: + self.log_info("Unable to locate pip executable") + self.venv_args = config.get('venv_args', None) + self.pip_env_vars = config.getdict("pip_environment_variables", None) + + def _configure_dependencies( + self, config: ConfigHelper, node_only: bool = False + ) -> None: + if config.getboolean("enable_node_updates", False): + self.npm_pkg_json = self.path.joinpath("package-lock.json") + self._verify_path(config, 'enable_node_updates', self.npm_pkg_json) + if node_only: + return + if self.py_exec is not None: + self.python_reqs = self.path.joinpath(config.get("requirements")) + self._verify_path(config, 'requirements', self.python_reqs) + deps = config.get("system_dependencies", None) + if deps is not None: + self.system_deps_json = self.path.joinpath(deps).resolve() + self._verify_path(config, 'system_dependencies', self.system_deps_json) + else: + # Fall back on deprecated "install_script" option if dependencies file + # not present + install_script = config.get('install_script', None) + if install_script is not None: + self.install_script = self.path.joinpath(install_script).resolve() + self._verify_path(config, 'install_script', self.install_script) + + def _configure_managed_services(self, config: ConfigHelper) -> None: svc_default = [] if config.getboolean("is_system_service", True): svc_default.append(self.name) @@ -87,11 +172,25 @@ class AppDeploy(BaseDeploy): services: List[str] = config.getlist( "managed_services", svc_default, separator=None ) + if self.name in services: + machine: Machine = self.server.lookup_component("machine") + data_path: str = self.server.get_app_args()["data_path"] + asvc = pathlib.Path(data_path).joinpath("moonraker.asvc") + if not machine.is_service_allowed(self.name): + self.server.add_warning( + f"[{config.get_name()}]: Moonraker is not permitted to " + f"restart service '{self.name}'. To enable management " + f"of this service add {self.name} to the bottom of the " + f"file {asvc}. To disable management for this service " + "set 'is_system_service: False' in the configuration " + "for this section." + ) + services.clear() for svc in services: if svc not in svc_choices: raw = " ".join(services) self.server.add_warning( - f"[{config.get_name()}]: Option 'restart_action: {raw}' " + f"[{config.get_name()}]: Option 'managed_services: {raw}' " f"contains an invalid value '{svc}'. All values must be " f"one of the following choices: {svc_choices}" ) @@ -99,56 +198,38 @@ class AppDeploy(BaseDeploy): for svc in svc_choices: if svc in services and svc not in self.managed_services: self.managed_services.append(svc) - logging.debug( - f"Extension {self.name} managed services: {self.managed_services}" + self.log_debug( + f"Managed services: {self.managed_services}" ) - # We need to fetch all potential options for an Application. Not - # all options apply to each subtype, however we can't limit the - # options in children if we want to switch between channels and - # satisfy the confighelper's requirements. - self.moved_origin: Optional[str] = config.get('moved_origin', None) - self.origin: str = config.get('origin') - self.primary_branch = config.get("primary_branch", "master") - self.npm_pkg_json: Optional[pathlib.Path] = None - if config.getboolean("enable_node_updates", False): - self.npm_pkg_json = self.path.joinpath("package-lock.json") - self._verify_path(config, 'enable_node_updates', self.npm_pkg_json) - self.python_reqs: Optional[pathlib.Path] = None - if self.executable is not None: - self.python_reqs = self.path.joinpath(config.get("requirements")) - self._verify_path(config, 'requirements', self.python_reqs) - self.install_script: Optional[pathlib.Path] = None - install_script = config.get('install_script', None) - if install_script is not None: - self.install_script = self.path.joinpath(install_script).resolve() - self._verify_path(config, 'install_script', self.install_script) - @staticmethod - def _is_git_repo(app_path: Union[str, pathlib.Path]) -> bool: - if isinstance(app_path, str): - app_path = pathlib.Path(app_path).expanduser() - return app_path.joinpath('.git').exists() + def _verify_path( + self, + config: ConfigHelper, + option: str, + path: pathlib.Path, + check_file: bool = True, + check_exe: bool = False + ) -> None: + base_msg = ( + f"Invalid path for option `{option}` in section " + f"[{config.get_name()}]: Path `{path}`" + ) + if not path.exists(): + raise config.error(f"{base_msg} does not exist") + if check_file and not path.is_file(): + raise config.error(f"{base_msg} is not a file") + if check_exe and not os.access(path, os.X_OK): + raise config.error(f"{base_msg} is not executable") async def initialize(self) -> Dict[str, Any]: storage = await super().initialize() - self.need_channel_update = storage.get('need_channel_upate', False) - self._is_valid = storage.get('is_valid', False) + self.pip_version = tuple(storage.get("pip_version", [])) + if self.pip_version: + ver_str = ".".join([str(part) for part in self.pip_version]) + self.log_info(f"Stored pip version: {ver_str}") return storage - def _verify_path(self, - config: ConfigHelper, - option: str, - file_path: pathlib.Path - ) -> None: - if not file_path.exists(): - raise config.error( - f"Invalid path for option `{option}` in section " - f"[{config.get_name()}]: Path `{file_path}` does not exist") - - def check_need_channel_swap(self) -> bool: - return self.need_channel_update - - def get_configured_type(self) -> str: + def get_configured_type(self) -> AppType: return self.type def check_same_paths(self, @@ -161,11 +242,13 @@ class AppDeploy(BaseDeploy): executable = pathlib.Path(executable) app_path = app_path.expanduser() executable = executable.expanduser() - if self.executable is None: + if self.py_exec is None: return False try: - return self.path.samefile(app_path) and \ - self.executable.samefile(executable) + return ( + self.path.samefile(app_path) and + self.py_exec.samefile(executable) + ) except Exception: return False @@ -175,12 +258,10 @@ class AppDeploy(BaseDeploy): ) -> None: raise NotImplementedError - async def reinstall(self): - raise NotImplementedError - - async def restart_service(self): + async def restart_service(self) -> None: if not self.managed_services: return + machine: Machine = self.server.lookup_component("machine") is_full = self.cmd_helper.is_full_update() for svc in self.managed_services: if is_full and svc != self.name: @@ -192,36 +273,89 @@ class AppDeploy(BaseDeploy): if svc == "moonraker": # Launch restart async so the request can return # before the server restarts - event_loop = self.server.get_event_loop() - event_loop.delay_callback(.1, self._do_restart, svc) + machine.restart_moonraker_service() else: - await self._do_restart(svc) + if svc == "klipper": + kconn: Klippy = self.server.lookup_component("klippy_connection") + svc = kconn.unit_name + await machine.do_service_action("restart", svc) - async def _do_restart(self, svc_name: str) -> None: - machine: Machine = self.server.lookup_component("machine") + async def _read_system_dependencies(self) -> List[str]: + eventloop = self.server.get_event_loop() + if self.system_deps_json is not None: + deps_json = self.system_deps_json + try: + ret = await eventloop.run_in_thread(deps_json.read_bytes) + dep_info: Dict[str, List[str]] = jsonw.loads(ret) + except asyncio.CancelledError: + raise + except Exception: + logging.exception(f"Error reading system deps: {deps_json}") + return [] + for distro_id in DISTRO_ALIASES: + if distro_id in dep_info: + if not dep_info[distro_id]: + self.log_info( + f"Dependency file '{deps_json.name}' contains an empty " + f"package definition for linux distro '{distro_id}'" + ) + return dep_info[distro_id] + else: + self.log_info( + f"Dependency file '{deps_json.name}' has no package definition " + f" for linux distro '{DISTRO_ALIASES[0]}'" + ) + return [] + # Fall back on install script if configured + if self.install_script is None: + return [] + # Open install file file and read + inst_path: pathlib.Path = self.install_script + if not inst_path.is_file(): + self.log_info(f"Failed to open install script: {inst_path}") + return [] try: - await machine.do_service_action("restart", svc_name) + data = await eventloop.run_in_thread(inst_path.read_text) + except asyncio.CancelledError: + raise except Exception: - if svc_name == "moonraker": - # We will always get an error when restarting moonraker - # from within the child process, so ignore it - return - raise self.log_exc("Error restarting service") + logging.exception(f"Error reading install script: {deps_json}") + return [] + plines: List[str] = re.findall(r'PKGLIST="(.*)"', data) + plines = [p.lstrip("${PKGLIST}").strip() for p in plines] + packages: List[str] = [] + for line in plines: + packages.extend(line.split()) + if not packages: + self.log_info(f"No packages found in script: {inst_path}") + return packages + + async def _read_python_reqs(self) -> List[str]: + if self.python_reqs is None: + return [] + pyreqs = self.python_reqs + if not pyreqs.is_file(): + self.log_info(f"Failed to open python requirements file: {pyreqs}") + return [] + eventloop = self.server.get_event_loop() + return await eventloop.run_in_thread( + pip_utils.read_requirements_file, self.python_reqs + ) def get_update_status(self) -> Dict[str, Any]: return { - 'channel': self.channel, - 'debug_enabled': self.debug, - 'need_channel_update': self.need_channel_update, + 'channel': str(self.channel), + 'debug_enabled': self.server.is_debug_enabled(), + 'channel_invalid': self.channel_invalid, 'is_valid': self._is_valid, - 'configured_type': self.type, + 'configured_type': str(self.type), 'info_tags': self.info_tags } def get_persistent_data(self) -> Dict[str, Any]: storage = super().get_persistent_data() storage['is_valid'] = self._is_valid - storage['need_channel_update'] = self.need_channel_update + storage['pip_version'] = list(self.pip_version) return storage async def _get_file_hash(self, @@ -257,45 +391,88 @@ class AppDeploy(BaseDeploy): self.log_exc("Error updating packages") return - async def _update_virtualenv(self, - requirements: Union[pathlib.Path, List[str]] - ) -> None: - if self.pip_exe is None: + async def _update_python_requirements( + self, requirements: Union[pathlib.Path, List[str]] + ) -> None: + if self.pip_cmd is None: return - # Update python dependencies - if isinstance(requirements, pathlib.Path): - if not requirements.is_file(): - self.log_info( - f"Invalid path to requirements_file '{requirements}'") - return - args = f"-r {requirements}" - else: - args = " ".join(requirements) + if self.name == "moonraker": + importlib.reload(pip_utils) + pip_exec = pip_utils.AsyncPipExecutor( + self.pip_cmd, self.server, self.cmd_helper.notify_update_response + ) + # Check the current pip version + self.notify_status("Checking pip version...") + try: + pip_ver = await pip_exec.get_pip_version() + if pip_utils.check_pip_needs_update(pip_ver): + cur_ver = pip_ver.pip_version_string + update_ver = ".".join([str(part) for part in pip_utils.MIN_PIP_VERSION]) + self.notify_status( + f"Updating pip from version {cur_ver} to {update_ver}..." + ) + await pip_exec.update_pip() + self.pip_version = pip_utils.MIN_PIP_VERSION + except asyncio.CancelledError: + raise + except Exception as e: + self.notify_status(f"Pip Version Check Error: {e}") + self.log_exc("Pip Version Check Error") self.notify_status("Updating python packages...") try: - # First attempt to update pip - # await self.cmd_helper.run_cmd( - # f"{self.pip_exe} install -U pip", timeout=1200., notify=True, - # retries=3) - await self.cmd_helper.run_cmd( - f"{self.pip_exe} install {args}", timeout=1200., notify=True, - retries=3) + await pip_exec.install_packages(requirements, self.pip_env_vars) + except asyncio.CancelledError: + raise except Exception: self.log_exc("Error updating python requirements") - async def _build_virtualenv(self) -> None: - if self.pip_exe is None or self.venv_args is None: - return - bin_dir = self.pip_exe.parent - env_path = bin_dir.parent.resolve() - self.notify_status(f"Creating virtualenv at: {env_path}...") - if env_path.exists(): - shutil.rmtree(env_path) - try: - await self.cmd_helper.run_cmd( - f"virtualenv {self.venv_args} {env_path}", timeout=300.) - except Exception: - self.log_exc(f"Error creating virtualenv") - return - if not self.pip_exe.exists(): - raise self.log_exc("Failed to create new virtualenv", False) + async def _collect_dependency_info(self) -> Dict[str, Any]: + pkg_deps = await self._read_system_dependencies() + pyreqs = await self._read_python_reqs() + npm_hash = await self._get_file_hash(self.npm_pkg_json) + logging.debug( + f"\nApplication {self.name}: Pre-update dependencies:\n" + f"Packages: {pkg_deps}\n" + f"Python Requirements: {pyreqs}" + ) + return { + "system_packages": pkg_deps, + "python_modules": pyreqs, + "npm_hash": npm_hash + } + + async def _update_dependencies( + self, dep_info: Dict[str, Any], force: bool = False + ) -> None: + packages = await self._read_system_dependencies() + modules = await self._read_python_reqs() + logging.debug( + f"\nApplication {self.name}: Post-update dependencies:\n" + f"Packages: {packages}\n" + f"Python Requirements: {modules}" + ) + if not force: + packages = list(set(packages) - set(dep_info["system_packages"])) + modules = list(set(modules) - set(dep_info["python_modules"])) + logging.debug( + f"\nApplication {self.name}: Dependencies to install:\n" + f"Packages: {packages}\n" + f"Python Requirements: {modules}\n" + f"Force All: {force}" + ) + if packages: + await self._install_packages(packages) + if modules: + await self._update_python_requirements(self.python_reqs or modules) + npm_hash: Optional[str] = dep_info["npm_hash"] + ret = await self._check_need_update(npm_hash, self.npm_pkg_json) + if force or ret: + if self.npm_pkg_json is not None: + self.notify_status("Updating Node Packages...") + try: + await self.cmd_helper.run_cmd( + "npm ci --only=prod", notify=True, timeout=600., + cwd=str(self.path) + ) + except Exception: + self.notify_status("Node Package Update failed") diff --git a/moonraker/components/update_manager/base_deploy.py b/moonraker/components/update_manager/base_deploy.py index 1611125..a424d58 100644 --- a/moonraker/components/update_manager/base_deploy.py +++ b/moonraker/components/update_manager/base_deploy.py @@ -7,11 +7,12 @@ from __future__ import annotations import logging import time +from ...utils import pretty_print_time -from typing import TYPE_CHECKING, Dict, Any, Optional +from typing import TYPE_CHECKING, Dict, Any, Optional, Coroutine if TYPE_CHECKING: - from confighelper import ConfigHelper - from utils import ServerError + from ...confighelper import ConfigHelper + from ...utils import ServerError from .update_manager import CommandHelper class BaseDeploy: @@ -23,7 +24,7 @@ class BaseDeploy: cfg_hash: Optional[str] = None ) -> None: if name is None: - name = config.get_name().split()[-1] + name = self.parse_name(config) self.name = name if prefix: prefix = f"{prefix} {self.name}: " @@ -38,6 +39,14 @@ class BaseDeploy: cfg_hash = config.get_hash().hexdigest() self.cfg_hash = cfg_hash + @staticmethod + def parse_name(config: ConfigHelper) -> str: + name = config.get_name().split(maxsplit=1)[-1] + if name.startswith("client "): + # allow deprecated [update_manager client app] style names + name = name[7:] + return name + async def initialize(self) -> Dict[str, Any]: umdb = self.cmd_helper.get_umdb() storage: Dict[str, Any] = await umdb.get(self.name, {}) @@ -45,12 +54,14 @@ class BaseDeploy: self.last_cfg_hash: str = storage.get('last_config_hash', "") return storage - def needs_refresh(self) -> bool: + def needs_refresh(self, log_remaining_time: bool = False) -> bool: next_refresh_time = self.last_refresh_time + self.refresh_interval - return ( - self.cfg_hash != self.last_cfg_hash or - time.time() > next_refresh_time - ) + remaining_time = int(next_refresh_time - time.time() + .5) + if self.cfg_hash != self.last_cfg_hash or remaining_time <= 0: + return True + if log_remaining_time: + self.log_info(f"Next refresh in: {pretty_print_time(remaining_time)}") + return False def get_last_refresh_time(self) -> float: return self.last_refresh_time @@ -61,6 +72,9 @@ class BaseDeploy: async def update(self) -> bool: return False + async def rollback(self) -> bool: + raise self.server.error(f"Rollback not available for {self.name}") + def get_update_status(self) -> Dict[str, Any]: return {} @@ -88,7 +102,14 @@ class BaseDeploy: log_msg = f"{self.prefix}{msg}" logging.info(log_msg) + def log_debug(self, msg: str) -> None: + log_msg = f"{self.prefix}{msg}" + logging.debug(log_msg) + def notify_status(self, msg: str, is_complete: bool = False) -> None: log_msg = f"{self.prefix}{msg}" logging.debug(log_msg) self.cmd_helper.notify_update_response(log_msg, is_complete) + + def close(self) -> Optional[Coroutine]: + return None diff --git a/moonraker/components/update_manager/common.py b/moonraker/components/update_manager/common.py new file mode 100644 index 0000000..4498ef1 --- /dev/null +++ b/moonraker/components/update_manager/common.py @@ -0,0 +1,97 @@ +# Moonraker/Klipper update configuration +# +# Copyright (C) 2022 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license. + +from __future__ import annotations +import os +import sys +import copy +import pathlib +from ...common import ExtendedEnum +from ...utils import source_info +from typing import ( + TYPE_CHECKING, + Dict, + Union +) + +if TYPE_CHECKING: + from ...confighelper import ConfigHelper + from ..database import MoonrakerDatabase + +KLIPPER_DEFAULT_PATH = os.path.expanduser("~/klipper") +KLIPPER_DEFAULT_EXEC = os.path.expanduser("~/klippy-env/bin/python") + +BASE_CONFIG: Dict[str, Dict[str, str]] = { + "moonraker": { + "origin": "https://github.com/arksine/moonraker.git", + "requirements": "scripts/moonraker-requirements.txt", + "venv_args": "-p python3", + "system_dependencies": "scripts/system-dependencies.json", + "host_repo": "arksine/moonraker", + "virtualenv": sys.exec_prefix, + "pip_environment_variables": "SKIP_CYTHON=Y", + "path": str(source_info.source_path()), + "managed_services": "moonraker" + }, + "klipper": { + "moved_origin": "https://github.com/kevinoconnor/klipper.git", + "origin": "https://github.com/Klipper3d/klipper.git", + "requirements": "scripts/klippy-requirements.txt", + "venv_args": "-p python2", + "install_script": "scripts/install-octopi.sh", + "host_repo": "arksine/moonraker", + "managed_services": "klipper" + } +} + +class AppType(ExtendedEnum): + NONE = 1 + WEB = 2 + GIT_REPO = 3 + ZIP = 4 + +class Channel(ExtendedEnum): + STABLE = 1 + BETA = 2 + DEV = 3 + +def get_app_type(app_path: Union[str, pathlib.Path]) -> AppType: + if isinstance(app_path, str): + app_path = pathlib.Path(app_path).expanduser() + # None type will perform checks on Moonraker + if source_info.is_git_repo(app_path): + return AppType.GIT_REPO + else: + return AppType.NONE + +def get_base_configuration(config: ConfigHelper) -> ConfigHelper: + server = config.get_server() + base_cfg = copy.deepcopy(BASE_CONFIG) + base_cfg["moonraker"]["type"] = str(get_app_type(source_info.source_path())) + db: MoonrakerDatabase = server.lookup_component('database') + base_cfg["klipper"]["path"] = db.get_item( + "moonraker", "update_manager.klipper_path", KLIPPER_DEFAULT_PATH + ).result() + base_cfg["klipper"]["env"] = db.get_item( + "moonraker", "update_manager.klipper_exec", KLIPPER_DEFAULT_EXEC + ).result() + base_cfg["klipper"]["type"] = str(get_app_type(base_cfg["klipper"]["path"])) + channel = config.get("channel", "dev") + base_cfg["moonraker"]["channel"] = channel + base_cfg["klipper"]["channel"] = channel + if config.has_section("update_manager moonraker"): + mcfg = config["update_manager moonraker"] + base_cfg["moonraker"]["channel"] = mcfg.get("channel", channel) + commit = mcfg.get("pinned_commit", None) + if commit is not None: + base_cfg["moonraker"]["pinned_commit"] = commit + if config.has_section("update_manager klipper"): + kcfg = config["update_manager klipper"] + base_cfg["klipper"]["channel"] = kcfg.get("channel", channel) + commit = kcfg.get("pinned_commit", None) + if commit is not None: + base_cfg["klipper"]["pinned_commit"] = commit + return config.read_supplemental_dict(base_cfg) diff --git a/moonraker/components/update_manager/git_deploy.py b/moonraker/components/update_manager/git_deploy.py index e145f1e..f44a767 100644 --- a/moonraker/components/update_manager/git_deploy.py +++ b/moonraker/components/update_manager/git_deploy.py @@ -12,70 +12,76 @@ import shutil import re import logging from .app_deploy import AppDeploy +from .common import Channel +from ...utils.versions import GitVersion # Annotation imports from typing import ( TYPE_CHECKING, Any, - Tuple, Optional, Dict, List, ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from components import shell_command + from ...confighelper import ConfigHelper + from ..shell_command import ShellCommand from .update_manager import CommandHelper + from ..http_client import HttpClient class GitDeploy(AppDeploy): def __init__(self, config: ConfigHelper, cmd_helper: CommandHelper) -> None: - super().__init__(config, cmd_helper) + super().__init__(config, cmd_helper, "Git Repo") + self._configure_path(config) + self._configure_virtualenv(config) + self._configure_dependencies(config) + self._configure_managed_services(config) + self.origin: str = config.get('origin') + self.moved_origin: Optional[str] = config.get('moved_origin', None) + self.primary_branch = config.get("primary_branch", "master") + pinned_commit = config.get("pinned_commit", None) + if pinned_commit is not None: + pinned_commit = pinned_commit.lower() + # validate the hash length + if len(pinned_commit) < 8: + raise config.error( + f"[{config.get_name()}]: Value for option 'commit' must be " + "a minimum of 8 characters." + ) self.repo = GitRepo( - cmd_helper, self.path, self.name, self.origin, - self.moved_origin, self.channel + cmd_helper, self.path, self.name, self.origin, self.moved_origin, + self.primary_branch, self.channel, pinned_commit ) - if self.type != 'git_repo': - self.need_channel_update = True - - @staticmethod - async def from_application(app: AppDeploy) -> GitDeploy: - new_app = GitDeploy(app.config, app.cmd_helper) - await new_app.reinstall() - return new_app async def initialize(self) -> Dict[str, Any]: storage = await super().initialize() - self.repo.restore_state(storage) + await self.repo.restore_state(storage) + self._is_valid = storage.get("is_valid", self.repo.is_valid()) + if not self.needs_refresh(): + self.repo.log_repo_info() return storage async def refresh(self) -> None: - try: - await self._update_repo_state() - except Exception: - logging.exception("Error Refreshing git state") + await self._update_repo_state(raise_exc=False) - async def _update_repo_state(self, need_fetch: bool = True) -> None: + async def _update_repo_state( + self, need_fetch: bool = True, raise_exc: bool = True + ) -> None: self._is_valid = False - await self.repo.initialize(need_fetch=need_fetch) - self.log_info( - f"Channel: {self.channel}, " - f"Need Channel Update: {self.need_channel_update}" - ) - invalids = self.repo.report_invalids(self.primary_branch) - if invalids: - msgs = '\n'.join(invalids) - self.log_info( - f"Repo validation checks failed:\n{msgs}") - if self.debug: - self._is_valid = True - self.log_info( - "Repo debug enabled, overriding validity checks") - else: - self.log_info("Updates on repo disabled") + try: + await self.repo.refresh_repo_state(need_fetch=need_fetch) + except Exception as e: + if raise_exc or isinstance(e, asyncio.CancelledError): + raise else: - self._is_valid = True - self.log_info("Validity check for git repo passed") - self._save_state() + self._is_valid = self.repo.is_valid() + finally: + self.log_info(f"Channel: {self.channel}") + if not self._is_valid: + self.log_info("Repo validation check failed, updates disabled") + else: + self.log_info("Validity check for git repo passed") + self._save_state() async def update(self) -> bool: await self.repo.wait_for_init() @@ -89,12 +95,10 @@ class GitDeploy(AppDeploy): return False self.cmd_helper.notify_update_response( f"Updating Application {self.name}...") - inst_hash = await self._get_file_hash(self.install_script) - pyreqs_hash = await self._get_file_hash(self.python_reqs) - npm_hash = await self._get_file_hash(self.npm_pkg_json) + dep_info = await self._collect_dependency_info() await self._pull_repo() # Check Semantic Versions - await self._update_dependencies(inst_hash, pyreqs_hash, npm_hash) + await self._update_dependencies(dep_info) # Refresh local repo state await self._update_repo_state(need_fetch=False) await self.restart_service() @@ -106,34 +110,44 @@ class GitDeploy(AppDeploy): force_dep_update: bool = False ) -> None: self.notify_status("Attempting Repo Recovery...") - inst_hash = await self._get_file_hash(self.install_script) - pyreqs_hash = await self._get_file_hash(self.python_reqs) - npm_hash = await self._get_file_hash(self.npm_pkg_json) - + dep_info = await self._collect_dependency_info() if hard: await self.repo.clone() await self._update_repo_state() else: self.notify_status("Resetting Git Repo...") - await self.repo.reset() + reset_ref = await self.repo.get_recovery_ref() + if self.repo.is_dirty(): + # Try to restore modified files. If the attempt fails we + # can still try the reset + try: + await self.repo.checkout("-- .") + except self.server.error: + pass + await self.repo.checkout(self.primary_branch) + await self.repo.reset(reset_ref) await self._update_repo_state() + self.repo.set_rollback_state(None) if self.repo.is_dirty() or not self._is_valid: raise self.server.error( "Recovery attempt failed, repo state not pristine", 500) - await self._update_dependencies(inst_hash, pyreqs_hash, npm_hash, - force=force_dep_update) + await self._update_dependencies(dep_info, force=force_dep_update) await self.restart_service() self.notify_status("Reinstall Complete", is_complete=True) - async def reinstall(self): - # Clear the persistent storage prior to a channel swap. - # After the next update is complete new data will be - # restored. - umdb = self.cmd_helper.get_umdb() - await umdb.pop(self.name, None) - await self.initialize() - await self.recover(True, True) + async def rollback(self) -> bool: + dep_info = await self._collect_dependency_info() + ret = await self.repo.rollback() + if ret: + await self._update_dependencies(dep_info) + await self._update_repo_state(need_fetch=False) + await self.restart_service() + msg = "Rollback Complete" + else: + msg = "Rollback not performed" + self.notify_status(msg, is_complete=True) + return ret def get_update_status(self) -> Dict[str, Any]: status = super().get_update_status() @@ -147,6 +161,7 @@ class GitDeploy(AppDeploy): async def _pull_repo(self) -> None: self.notify_status("Updating Repo...") + rb_state = self.repo.capture_state_for_rollback() try: await self.repo.fetch() if self.repo.is_detached(): @@ -158,55 +173,17 @@ class GitDeploy(AppDeploy): await self.repo.reset() else: await self.repo.pull() - except Exception: - raise self.log_exc("Error updating git repo") - - async def _update_dependencies(self, - inst_hash: Optional[str], - pyreqs_hash: Optional[str], - npm_hash: Optional[str], - force: bool = False - ) -> None: - ret = await self._check_need_update(inst_hash, self.install_script) - if force or ret: - package_list = await self._parse_install_script() - if package_list is not None: - await self._install_packages(package_list) - ret = await self._check_need_update(pyreqs_hash, self.python_reqs) - if force or ret: - if self.python_reqs is not None: - await self._update_virtualenv(self.python_reqs) - ret = await self._check_need_update(npm_hash, self.npm_pkg_json) - if force or ret: - if self.npm_pkg_json is not None: - self.notify_status("Updating Node Packages...") - try: - await self.cmd_helper.run_cmd( - "npm ci --only=prod", notify=True, timeout=600., - cwd=str(self.path)) - except Exception: - self.notify_status("Node Package Update failed") - - async def _parse_install_script(self) -> Optional[List[str]]: - if self.install_script is None: - return None - # Open install file file and read - inst_path: pathlib.Path = self.install_script - if not inst_path.is_file(): - self.log_info(f"Unable to open install script: {inst_path}") - return None - event_loop = self.server.get_event_loop() - data = await event_loop.run_in_thread(inst_path.read_text) - plines: List[str] = re.findall(r'PKGLIST="(.*)"', data) - plines = [p.lstrip("${PKGLIST}").strip() for p in plines] - packages: List[str] = [] - for line in plines: - packages.extend(line.split()) - if not packages: - self.log_info(f"No packages found in script: {inst_path}") - return None - logging.debug(f"Repo {self.name}: Detected Packages: {repr(packages)}") - return packages + except Exception as e: + if self.repo.repo_corrupt: + self._is_valid = False + self._save_state() + event_loop = self.server.get_event_loop() + event_loop.delay_callback( + .2, self.cmd_helper.notify_update_refreshed + ) + raise self.log_exc(str(e)) + else: + self.repo.set_rollback_state(rb_state) GIT_ASYNC_TIMEOUT = 300. @@ -218,31 +195,37 @@ GIT_MAX_LOG_CNT = 100 GIT_LOG_FMT = ( "\"sha:%H%x1Dauthor:%an%x1Ddate:%ct%x1Dsubject:%s%x1Dmessage:%b%x1E\"" ) -GIT_OBJ_ERR = "fatal: loose object" GIT_REF_FMT = ( - "'%(if)%(*objecttype)%(then)%(*objecttype) (*objectname)" + "'%(if)%(*objecttype)%(then)%(*objecttype) %(*objectname)" "%(else)%(objecttype) %(objectname)%(end) %(refname)'" ) +SRC_EXTS = (".py", ".c", ".cpp") class GitRepo: - tag_r = re.compile(r"(v?\d+\.\d+\.\d+(-(alpha|beta)(\.\d+)?)?)(-\d+)?") - def __init__(self, - cmd_helper: CommandHelper, - git_path: pathlib.Path, - alias: str, - origin_url: str, - moved_origin_url: Optional[str], - channel: str - ) -> None: + def __init__( + self, + cmd_helper: CommandHelper, + src_path: pathlib.Path, + alias: str, + origin_url: str, + moved_origin_url: Optional[str], + primary_branch: str, + channel: Channel, + pinned_commit: Optional[str] + ) -> None: self.server = cmd_helper.get_server() self.cmd_helper = cmd_helper self.alias = alias - self.git_path = git_path - git_dir = git_path.parent - git_base = git_path.name + self.src_path = src_path + git_dir = src_path.parent + git_base = src_path.name self.backup_path = git_dir.joinpath(f".{git_base}_repo_backup") + self.git_folder_path = src_path.joinpath(".git") self.origin_url = origin_url + if not self.origin_url.endswith(".git"): + self.origin_url += ".git" self.moved_origin_url = moved_origin_url + self.primary_branch = primary_branch self.recovery_message = \ f""" Manually restore via SSH with the following commands: @@ -253,37 +236,53 @@ class GitRepo: sudo service {self.alias} start """ + self.repo_warnings: List[str] = [] + self.repo_anomalies: List[str] = [] self.init_evt: Optional[asyncio.Event] = None self.initialized: bool = False self.git_operation_lock = asyncio.Lock() self.fetch_timeout_handle: Optional[asyncio.Handle] = None self.fetch_input_recd: bool = False - self.is_beta = channel == "beta" - self.bound_repo = None - if self.is_beta and self.alias == "klipper": - # Bind Klipper Updates Moonraker - self.bound_repo = "moonraker" + self.channel = channel + self.pinned_commit = pinned_commit + self.is_shallow = False - def restore_state(self, storage: Dict[str, Any]) -> None: + async def restore_state(self, storage: Dict[str, Any]) -> None: self.valid_git_repo: bool = storage.get('repo_valid', False) self.git_owner: str = storage.get('git_owner', "?") self.git_repo_name: str = storage.get('git_repo_name', "?") self.git_remote: str = storage.get('git_remote', "?") self.git_branch: str = storage.get('git_branch', "?") - self.current_version: str = storage.get('current_version', "?") - self.upstream_version: str = storage.get('upstream_version', "?") + if "full_version_string" in storage: + self.current_version = GitVersion(storage["full_version_string"]) + else: + self.current_version = GitVersion(storage.get('current_version', "?")) + self.upstream_version = GitVersion(storage.get('upstream_version', "?")) self.current_commit: str = storage.get('current_commit', "?") self.upstream_commit: str = storage.get('upstream_commit', "?") self.upstream_url: str = storage.get('upstream_url', "?") - self.full_version_string: str = storage.get('full_version_string', "?") + self.recovery_url: str = storage.get( + 'recovery_url', + self.upstream_url if self.git_remote == "origin" else "?" + ) self.branches: List[str] = storage.get('branches', []) - self.dirty: bool = storage.get('dirty', False) self.head_detached: bool = storage.get('head_detached', False) self.git_messages: List[str] = storage.get('git_messages', []) - self.commits_behind: List[Dict[str, Any]] = storage.get( - 'commits_behind', []) - self.tag_data: Dict[str, Any] = storage.get('tag_data', {}) + self.commits_behind: List[Dict[str, Any]] = storage.get('commits_behind', []) + self.commits_behind_count: int = storage.get('cbh_count', 0) self.diverged: bool = storage.get("diverged", False) + self.repo_corrupt: bool = storage.get('corrupt', False) + self.modified_files: List[str] = storage.get("modified_files", []) + self.untracked_files: List[str] = storage.get("untracked_files", []) + def_rbs = self.capture_state_for_rollback() + self.rollback_commit: str = storage.get('rollback_commit', self.current_commit) + self.rollback_branch: str = storage.get('rollback_branch', def_rbs["branch"]) + rbv = storage.get('rollback_version', self.current_version) + self.rollback_version = GitVersion(str(rbv)) + self.pinned_commit_valid: bool = storage.get('pinned_commit_valid', True) + if not await self._detect_git_dir(): + self.valid_git_repo = False + self._check_warnings() def get_persistent_data(self) -> Dict[str, Any]: return { @@ -292,72 +291,66 @@ class GitRepo: 'git_repo_name': self.git_repo_name, 'git_remote': self.git_remote, 'git_branch': self.git_branch, - 'current_version': self.current_version, - 'upstream_version': self.upstream_version, + 'current_version': self.current_version.full_version, + 'upstream_version': self.upstream_version.full_version, 'current_commit': self.current_commit, 'upstream_commit': self.upstream_commit, + 'rollback_commit': self.rollback_commit, + 'rollback_branch': self.rollback_branch, + 'rollback_version': self.rollback_version.full_version, 'upstream_url': self.upstream_url, - 'full_version_string': self.full_version_string, + 'recovery_url': self.recovery_url, 'branches': self.branches, - 'dirty': self.dirty, 'head_detached': self.head_detached, 'git_messages': self.git_messages, 'commits_behind': self.commits_behind, - 'tag_data': self.tag_data, - 'diverged': self.diverged + 'cbh_count': self.commits_behind_count, + 'diverged': self.diverged, + 'corrupt': self.repo_corrupt, + 'modified_files': self.modified_files, + 'untracked_files': self.untracked_files, + 'pinned_commit_valid': self.pinned_commit_valid } - async def initialize(self, need_fetch: bool = True) -> None: + async def refresh_repo_state(self, need_fetch: bool = True) -> None: if self.init_evt is not None: # No need to initialize multiple requests await self.init_evt.wait() if self.initialized: return self.initialized = False + self.pinned_commit_valid = True self.init_evt = asyncio.Event() self.git_messages.clear() try: - await self.update_repo_status() + await self._check_repo_status() self._verify_repo() - if not self.head_detached: - # lookup remote via git config - self.git_remote = await self.get_config_item( - f"branch.{self.git_branch}.remote") + await self._find_current_branch() # Fetch the upstream url. If the repo has been moved, # set the new url - self.upstream_url = await self.remote(f"get-url {self.git_remote}") - if self.moved_origin_url is not None: - origin = self.upstream_url.lower().strip() - if not origin.endswith(".git"): - origin += ".git" - moved_origin = self.moved_origin_url.lower().strip() - if not moved_origin.endswith(".git"): - moved_origin += ".git" - if origin == moved_origin: + self.upstream_url = await self.remote(f"get-url {self.git_remote}", True) + if await self._check_moved_origin(): + need_fetch = True + if self.git_remote == "origin": + self.recovery_url = self.upstream_url + else: + remote_list = (await self.remote()).splitlines() + logging.debug( + f"Git Repo {self.alias}: Detected Remotes - {remote_list}" + ) + if "origin" in remote_list: + self.recovery_url = await self.remote("get-url origin") + else: logging.info( - f"Git Repo {self.alias}: Moved Repo Detected, Moving " - f"from {self.upstream_url} to {self.origin_url}") - need_fetch = True - await self.remote( - f"set-url {self.git_remote} {self.origin_url}") - self.upstream_url = self.origin_url - + f"Git Repo {self.alias}: Unable to detect recovery URL, " + "Hard Recovery not available" + ) + self.recovery_url = "?" if need_fetch: await self.fetch() self.diverged = await self.check_diverged() - # Populate list of current branches - blist = await self.list_branches() - self.branches = [] - for branch in blist: - branch = branch.strip() - if branch[0] == "*": - branch = branch[2:] - if branch[0] == "(": - continue - self.branches.append(branch) - # Parse GitHub Owner from URL owner_match = re.match(r"https?://[^/]+/([^/]+)", self.upstream_url) self.git_owner = "?" @@ -370,147 +363,274 @@ class GitRepo: if repo_match is not None: self.git_repo_name = repo_match.group(1) self.current_commit = await self.rev_parse("HEAD") - git_desc = await self.describe( - "--always --tags --long --dirty") - self.full_version_string = git_desc.strip() - self.dirty = git_desc.endswith("dirty") - self.tag_data = {} - if self.is_beta and self.bound_repo is None: - await self._get_beta_versions(git_desc) - else: - await self._get_dev_versions(git_desc) + git_desc = await self.describe("--always --tags --long --dirty --abbrev=8") + cur_ver = GitVersion(git_desc.strip()) + upstream_ver = await self._get_upstream_version() + await self._set_versions(cur_ver, upstream_ver) # Get Commits Behind self.commits_behind = [] - cbh = await self.get_commits_behind() - if cbh: + if self.commits_behind_count > 0: + cbh = await self.get_commits_behind() tagged_commits = await self.get_tagged_commits() - debug_msg = '\n'.join([f"{k}: {v}" for k, v in - tagged_commits.items()]) - logging.debug(f"Git Repo {self.alias}: Tagged Commits\n" - f"{debug_msg}") + debug_msg = '\n'.join([f"{k}: {v}" for k, v in tagged_commits.items()]) + logging.debug(f"Git Repo {self.alias}: Tagged Commits\n{debug_msg}") for i, commit in enumerate(cbh): tag = tagged_commits.get(commit['sha'], None) if i < 30 or tag is not None: commit['tag'] = tag self.commits_behind.append(commit) - - self.log_repo_info() + self._check_warnings() except Exception: logging.exception(f"Git Repo {self.alias}: Initialization failure") + self._check_warnings() raise else: self.initialized = True + # If no exception was raised assume the repo is not corrupt + self.repo_corrupt = False + if self.rollback_commit == "?" or self.rollback_branch == "?": + # Reset Rollback State + self.set_rollback_state(None) + self.log_repo_info() finally: self.init_evt.set() self.init_evt = None - async def _get_dev_versions(self, current_version: str) -> None: - self.upstream_commit = await self.rev_parse( - f"{self.git_remote}/{self.git_branch}") - upstream_version = await self.describe( - f"{self.git_remote}/{self.git_branch} " - "--always --tags --long") - # Get the latest tag as a fallback for shallow clones - commit, tag = await self._parse_latest_tag() - # Parse Version Info - versions: List[str] = [] - for ver in [current_version, upstream_version]: - tag_version = "?" - ver_match = self.tag_r.match(ver) - if ver_match: - tag_version = ver_match.group() - elif tag != "?": - if len(versions) == 0: - count = await self.rev_list(f"{tag}..HEAD --count") - full_ver = f"{tag}-{count}-g{ver}-shallow" - self.full_version_string = full_ver - else: - count = await self.rev_list( - f"{tag}..{self.upstream_commit} --count") - tag_version = f"{tag}-{count}" - versions.append(tag_version) - self.current_version, self.upstream_version = versions - if self.bound_repo is not None: - await self._get_bound_versions(self.current_version) - - async def _get_beta_versions(self, current_version: str) -> None: - upstream_commit, upstream_tag = await self._parse_latest_tag() - ver_match = self.tag_r.match(current_version) - current_tag = "?" - if ver_match: - current_tag = ver_match.group(1) - elif upstream_tag != "?": - count = await self.rev_list(f"{upstream_tag}..HEAD --count") - full_ver = f"{upstream_tag}-{count}-g{current_version}-shallow" - self.full_version_string = full_ver - current_tag = upstream_tag - self.upstream_commit = upstream_commit - if current_tag == upstream_tag: - self.upstream_commit = self.current_commit - self.current_version = current_tag - self.upstream_version = upstream_tag - # Check the tag for annotations - self.tag_data = await self.get_tag_data(upstream_tag) - if self.tag_data: - # TODO: need to force a repo update by resetting its refresh time? - logging.debug( - f"Git Repo {self.alias}: Found Tag Annotation: {self.tag_data}" - ) - - async def _get_bound_versions(self, current_version: str) -> None: - if self.bound_repo is None: - return - umdb = self.cmd_helper.get_umdb() - key = f"{self.bound_repo}.tag_data" - tag_data: Dict[str, Any] = await umdb.get(key, {}) - if tag_data.get("repo", "") != self.alias: - logging.info( - f"Git Repo {self.alias}: Invalid bound tag data: " - f"{tag_data}" - ) - return - if tag_data["branch"] != self.git_branch: - logging.info(f"Git Repo {self.alias}: Repo not on bound branch") - return - bound_vlist: List[int] = tag_data["version_as_list"] - current_vlist = self._convert_semver(current_version) - if self.full_version_string.endswith("shallow"): - # We need to recalculate the commit count for shallow clones - if current_vlist[:4] == bound_vlist[:4]: - commit = tag_data["commit"] - tag = current_version.split("-")[0] + async def _check_repo_status(self) -> bool: + async with self.git_operation_lock: + self.valid_git_repo = False + if not await self._detect_git_dir(): + logging.info( + f"Git Repo {self.alias}: path '{self.src_path}'" + " is not a valid git repo") + return False + await self._wait_for_lock_release() + attempts = 3 + resp: Optional[str] = None + while attempts: + self.git_messages.clear() try: - resp = await self.rev_list(f"{tag}..{commit} --count") - count = int(resp) + cmd = "status --porcelain -b" + resp = await self._run_git_cmd( + cmd, attempts=1, corrupt_hdr="fatal:" + ) except Exception: - count = 0 - bound_vlist[4] == count - if current_vlist < bound_vlist: - bound_ver_match = self.tag_r.match(tag_data["version"]) - if bound_ver_match is not None: - self.upstream_commit = tag_data["commit"] - self.upstream_version = bound_ver_match.group() - else: - # The repo is currently ahead of the bound tag/commmit, - # so pin the version - self.upstream_commit = self.current_commit - self.upstream_version = self.current_version + attempts -= 1 + resp = None + # Attempt to recover from "loose object" error + if attempts and self.repo_corrupt: + if not await self._repair_loose_objects(): + # Since we are unable to recover, immediately + # return + return False + else: + break + if resp is None: + return False + self.modified_files.clear() + self.untracked_files.clear() + for line in resp.splitlines(): + parts = line.strip().split(maxsplit=1) + if len(parts) != 2: + continue + prefix, fname = [p.strip() for p in parts] + if prefix == "M": + # modified file + self.modified_files.append(fname) + elif prefix == "??": + # untracked file + ext = pathlib.Path(fname).suffix + if ext in SRC_EXTS: + self.untracked_files.append(fname) + self.valid_git_repo = True + return True - async def _parse_latest_tag(self) -> Tuple[str, str]: - commit = tag = "?" - try: - commit = await self.rev_list("--tags --max-count=1") - tag = await self.describe(f"--tags {commit}") - except Exception: - pass - else: - tag_match = self.tag_r.match(tag) - if tag_match is not None: - tag = tag_match.group(1) + async def _detect_git_dir(self) -> bool: + if self.git_folder_path.is_file(): + # Submodules have a file that contain the path to + # the .git folder + eventloop = self.server.get_event_loop() + data = await eventloop.run_in_thread(self.git_folder_path.read_text) + ident, _, gitdir = data.partition(":") + if ident.strip() != "gitdir" or not gitdir.strip(): + return False + self.git_folder_path = pathlib.Path(gitdir).expanduser().resolve() + if self.git_folder_path.is_dir(): + self.is_shallow = self.git_folder_path.joinpath("shallow").is_file() + return True + return False + + async def _find_current_branch(self) -> None: + # Populate list of current branches + blist = await self.list_branches() + current_branch = "" + self.branches = [] + for branch in blist: + branch = branch.strip() + if not branch: + continue + if branch[0] == "*": + branch = branch[2:].strip() + current_branch = branch + if branch[0] == "(": + continue + self.branches.append(branch) + if current_branch.startswith("(HEAD detached"): + self.head_detached = True + ref_name = current_branch.split()[-1][:-1] + remote_list = (await self.remote()).splitlines() + for remote in remote_list: + remote = remote.strip() + if not remote: + continue + if ref_name.startswith(remote): + self.git_branch = ref_name[len(remote)+1:] + self.git_remote = remote + break else: - tag = "?" - return commit, tag + if self.git_remote == "?": + msg = "Resolve by manually checking out a branch via SSH." + else: + prev = f"{self.git_remote}/{self.git_branch}" + msg = f"Defaulting to previously tracked {prev}." + logging.info(f"Git Repo {self.alias}: {current_branch} {msg}") + else: + self.head_detached = False + self.git_branch = current_branch + rkey = f"branch.{self.git_branch}.remote" + self.git_remote = (await self.config_get(rkey)) or "?" + + async def _check_moved_origin(self) -> bool: + detected_origin = self.upstream_url.lower().strip() + if not detected_origin.endswith(".git"): + detected_origin += ".git" + if ( + self.server.is_debug_enabled() or + not detected_origin.startswith("http") or + detected_origin == self.origin_url.lower() + ): + # Skip the moved origin check if: + # Repo Debug is enabled + # The detected origin url is not http(s) + # The detected origin matches the expected origin url + return False + moved = False + client: HttpClient = self.server.lookup_component("http_client") + check_url = detected_origin[:-4] + logging.info( + f"Git repo {self.alias}: Performing moved origin check - " + f"{check_url}" + ) + resp = await client.get(check_url, enable_cache=False) + if not resp.has_error(): + final_url = resp.final_url.lower() + if not final_url.endswith(".git"): + final_url += ".git" + logging.debug(f"Git repo {self.alias}: Resolved url - {final_url}") + if final_url == self.origin_url.lower(): + logging.info( + f"Git Repo {self.alias}: Moved Repo Detected, Moving " + f"from {self.upstream_url} to {self.origin_url}") + moved = True + await self.remote( + f"set-url {self.git_remote} {self.origin_url}", True + ) + self.upstream_url = self.origin_url + if self.moved_origin_url is not None: + moved_origin = self.moved_origin_url.lower().strip() + if not moved_origin.endswith(".git"): + moved_origin += ".git" + if moved_origin != detected_origin: + self.server.add_warning( + f"Git Repo {self.alias}: Origin URL does not " + "not match configured 'moved_origin'option. " + f"Expected: {detected_origin}" + ) + else: + logging.debug(f"Move Request Failed: {resp.error}") + return moved + + async def _get_upstream_version(self) -> GitVersion: + self.commits_behind_count = 0 + if self.pinned_commit is not None: + self.upstream_commit = self.current_commit + if not self.current_commit.lower().startswith(self.pinned_commit): + if not await self.check_commit_exists(self.pinned_commit): + self.pinned_commit_valid = False + elif await self.is_ancestor(self.current_commit, self.pinned_commit): + self.upstream_commit = self.pinned_commit + upstream_ver_str = await self.describe( + f"{self.upstream_commit} --always --tags --long --abbrev=8", + ) + elif self.channel == Channel.DEV: + self.upstream_commit = await self.rev_parse( + f"{self.git_remote}/{self.git_branch}" + ) + upstream_ver_str = await self.describe( + f"{self.git_remote}/{self.git_branch} --always --tags --long --abbrev=8" + ) + else: + tagged_commits = await self.get_tagged_commits() + upstream_commit = upstream_ver_str = "?" + for sha, tag in tagged_commits.items(): + ver = GitVersion(tag) + if not ver.is_valid_version(): + continue + if ( + (self.channel == Channel.STABLE and ver.is_final_release()) or + (self.channel == Channel.BETA and not ver.is_alpha_release()) + ): + upstream_commit = sha + upstream_ver_str = tag + break + self.upstream_commit = upstream_commit + if self.upstream_commit != "?": + rl_args = f"HEAD..{self.upstream_commit} --count" + self.commits_behind_count = int(await self.rev_list(rl_args)) + return GitVersion(upstream_ver_str) + + async def _set_versions( + self, current_version: GitVersion, upstream_version: GitVersion + ) -> None: + if not current_version.is_valid_version(): + log_msg = ( + f"Git repo {self.alias}: Failed to detect current version, got " + f"'{current_version}'. " + ) + tag = upstream_version.infer_last_tag() + count = await self.rev_list("HEAD --count") + sha_part = "" + if current_version.is_fallback(): + sha_part = f"-g{current_version}" + elif self.current_commit not in ("?", ""): + sha_part = f"-g{self.current_commit[:8]}" + current_version = GitVersion(f"{tag}-{count}{sha_part}-inferred") + log_msg += f"Falling back to inferred version: {current_version}" + logging.info(log_msg) + if self.channel == Channel.DEV: + if not upstream_version.is_valid_version(): + log_msg = ( + f"Git repo {self.alias}: Failed to detect upstream version, got " + f"'{upstream_version}'. " + ) + tag = current_version.tag + if current_version.inferred: + count = await self.rev_list(f"{self.upstream_commit} --count") + else: + log_msg += "\nRemote has diverged, approximating dev count. " + count = str(self.commits_behind_count + current_version.dev_count) + upstream_version = GitVersion(f"{tag}-{count}-inferred") + log_msg += f"Falling back to inferred version: {upstream_version}" + logging.info(log_msg) + else: + if not upstream_version.is_valid_version(): + self.upstream_commit = self.current_commit + upstream_version = current_version + elif upstream_version <= current_version: + self.upstream_commit = self.current_commit + self.current_version = current_version + self.upstream_version = upstream_version async def wait_for_init(self) -> None: if self.init_evt is not None: @@ -519,110 +639,119 @@ class GitRepo: raise self.server.error( f"Git Repo {self.alias}: Initialization failure") - async def update_repo_status(self) -> bool: + async def is_ancestor( + self, ancestor_ref: str, descendent_ref: str, attempts: int = 3 + ) -> bool: + self._verify_repo() + cmd = f"merge-base --is-ancestor {ancestor_ref} {descendent_ref}" async with self.git_operation_lock: - if not self.git_path.joinpath(".git").is_dir(): - logging.info( - f"Git Repo {self.alias}: path '{self.git_path}'" - " is not a valid git repo") - return False - await self._wait_for_lock_release() - self.valid_git_repo = False - retries = 3 - while retries: - self.git_messages.clear() + for _ in range(attempts): try: - resp: Optional[str] = await self._run_git_cmd( - "status -u no", retries=1) - except Exception: - retries -= 1 - resp = None - # Attempt to recover from "loose object" error - if retries and GIT_OBJ_ERR in "\n".join(self.git_messages): - ret = await self._repair_loose_objects() - if not ret: - # Since we are unable to recover, immediately - # return - return False + await self._run_git_cmd(cmd, attempts=1, corrupt_hdr="error: ") + except self.cmd_helper.get_shell_command().error as err: + if err.return_code == 1: + return False + if self.repo_corrupt: + raise else: break - if resp is None: - return False - resp = resp.strip().split('\n', 1)[0] - self.head_detached = resp.startswith("HEAD detached") - branch_info = resp.split()[-1] - if self.head_detached: - bparts = branch_info.split("/", 1) - if len(bparts) == 2: - self.git_remote, self.git_branch = bparts - else: - if self.git_remote == "?": - msg = "Resolve by manually checking out" \ - " a branch via SSH." - else: - msg = "Defaulting to previously tracked " \ - f"{self.git_remote}/{self.git_branch}." - logging.info( - f"Git Repo {self.alias}: HEAD detached on untracked " - f"commit {branch_info}. {msg}") - else: - self.git_branch = branch_info - self.valid_git_repo = True + await asyncio.sleep(.2) return True async def check_diverged(self) -> bool: self._verify_repo(check_remote=True) - async with self.git_operation_lock: - if self.head_detached: - return False - cmd = ( - "merge-base --is-ancestor HEAD " - f"{self.git_remote}/{self.git_branch}" - ) - try: - await self._run_git_cmd(cmd, retries=1) - except self.cmd_helper.scmd_error: - return True + if self.head_detached: return False + descendent = f"{self.git_remote}/{self.git_branch}" + return not (await self.is_ancestor("HEAD", descendent)) def log_repo_info(self) -> None: + warnings = self._generate_warn_msg() + if warnings: + warnings = "\nRepo Warnings:\n" + warnings logging.info( f"Git Repo {self.alias} Detected:\n" f"Owner: {self.git_owner}\n" f"Repository Name: {self.git_repo_name}\n" - f"Path: {self.git_path}\n" + f"Path: {self.src_path}\n" f"Remote: {self.git_remote}\n" f"Branch: {self.git_branch}\n" f"Remote URL: {self.upstream_url}\n" + f"Recovery URL: {self.recovery_url}\n" f"Current Commit SHA: {self.current_commit}\n" f"Upstream Commit SHA: {self.upstream_commit}\n" f"Current Version: {self.current_version}\n" f"Upstream Version: {self.upstream_version}\n" - f"Is Dirty: {self.dirty}\n" + f"Rollback Commit: {self.rollback_commit}\n" + f"Rollback Branch: {self.rollback_branch}\n" + f"Rollback Version: {self.rollback_version}\n" + f"Is Dirty: {self.current_version.dirty}\n" f"Is Detached: {self.head_detached}\n" - f"Commits Behind: {len(self.commits_behind)}\n" - f"Tag Data: {self.tag_data}\n" - f"Bound Repo: {self.bound_repo}\n" - f"Diverged: {self.diverged}" + f"Is Shallow: {self.is_shallow}\n" + f"Commits Behind Count: {self.commits_behind_count}\n" + f"Diverged: {self.diverged}\n" + f"Pinned Commit: {self.pinned_commit}" + f"{warnings}" ) - def report_invalids(self, primary_branch: str) -> List[str]: - invalids: List[str] = [] + def _check_warnings(self) -> None: + self.repo_warnings.clear() + self.repo_anomalies.clear() + if self.pinned_commit is not None and not self.pinned_commit_valid: + self.repo_anomalies.append( + f"Pinned Commit {self.pinned_commit} does not exist" + ) + if self.repo_corrupt: + self.repo_warnings.append("Repo is corrupt") + if self.git_branch == "?": + self.repo_warnings.append("Failed to detect git branch") + elif self.git_remote == "?": + self.repo_warnings.append( + f"Failed to detect tracking remote for branch {self.git_branch}" + ) + if self.upstream_url == "?": + self.repo_warnings.append("Failed to detect repo url") + return upstream_url = self.upstream_url.lower() if upstream_url[-4:] != ".git": upstream_url += ".git" if upstream_url != self.origin_url.lower(): - invalids.append(f"Unofficial remote url: {self.upstream_url}") - if self.git_branch != primary_branch or self.git_remote != "origin": - invalids.append( - "Repo not on valid remote branch, expected: " - f"origin/{primary_branch}, detected: " + self.repo_anomalies.append(f"Unofficial remote url: {self.upstream_url}") + if self.git_branch != self.primary_branch or self.git_remote != "origin": + self.repo_anomalies.append( + "Repo not on offical remote/branch, expected: " + f"origin/{self.primary_branch}, detected: " f"{self.git_remote}/{self.git_branch}") - if self.head_detached: - invalids.append("Detached HEAD detected") + if self.untracked_files: + self.repo_anomalies.append( + f"Repo has untracked source files: {self.untracked_files}" + ) if self.diverged: - invalids.append("Repo has diverged from remote") - return invalids + self.repo_anomalies.append("Repo has diverged from remote") + if self.head_detached: + msg = "Detached HEAD detected" + if self.server.is_debug_enabled(): + self.repo_anomalies.append(msg) + else: + self.repo_warnings.append(msg) + if self.is_dirty(): + self.repo_warnings.append( + "Repo is dirty. Detected the following modifed files: " + f"{self.modified_files}" + ) + self._generate_warn_msg() + + def _generate_warn_msg(self) -> str: + ro_msg = f"Git Repo {self.alias}: No warnings detected" + warn_msg = "" + if self.repo_warnings or self.repo_anomalies: + ro_msg = f"Git Repo {self.alias}: Warnings detected:\n" + warn_msg = "\n".join( + [f" {warn}" for warn in self.repo_warnings + self.repo_anomalies] + ) + ro_msg += warn_msg + self.server.add_log_rollover_item(f"umgr_{self.alias}_warn", ro_msg, log=False) + return warn_msg def _verify_repo(self, check_remote: bool = False) -> None: if not self.valid_git_repo: @@ -633,14 +762,17 @@ class GitRepo: raise self.server.error( f"Git Repo {self.alias}: No valid git remote detected") - async def reset(self) -> None: - if self.git_remote == "?" or self.git_branch == "?": - raise self.server.error("Cannot reset, unknown remote/branch") + async def reset(self, ref: Optional[str] = None) -> None: async with self.git_operation_lock: - reset_cmd = f"reset --hard {self.git_remote}/{self.git_branch}" - if self.is_beta: - reset_cmd = f"reset --hard {self.upstream_commit}" - await self._run_git_cmd(reset_cmd, retries=2) + if ref is None: + if self.channel != Channel.DEV or self.pinned_commit is not None: + ref = self.upstream_commit + else: + if self.git_remote == "?" or self.git_branch == "?": + raise self.server.error("Cannot reset, unknown remote/branch") + ref = f"{self.git_remote}/{self.git_branch}" + await self._run_git_cmd(f"reset --hard {ref}", attempts=2) + self.repo_corrupt = False async def fetch(self) -> None: self._verify_repo(check_remote=True) @@ -651,7 +783,7 @@ class GitRepo: async def clean(self) -> None: self._verify_repo() async with self.git_operation_lock: - await self._run_git_cmd("clean -d -f", retries=2) + await self._run_git_cmd("clean -d -f", attempts=2) async def pull(self) -> None: self._verify_repo() @@ -660,9 +792,9 @@ class GitRepo: f"Git Repo {self.alias}: Cannot perform pull on a " "detached HEAD") cmd = "pull --progress" - if self.cmd_helper.is_debug_enabled(): + if self.server.is_debug_enabled(): cmd = f"{cmd} --rebase" - if self.is_beta: + if self.channel != Channel.DEV or self.pinned_commit is not None: cmd = f"{cmd} {self.git_remote} {self.upstream_commit}" async with self.git_operation_lock: await self._run_git_cmd_async(cmd) @@ -670,11 +802,23 @@ class GitRepo: async def list_branches(self) -> List[str]: self._verify_repo() async with self.git_operation_lock: - resp = await self._run_git_cmd("branch --list") + resp = await self._run_git_cmd("branch --list --no-color") return resp.strip().split("\n") - async def remote(self, command: str) -> str: - self._verify_repo(check_remote=True) + async def check_commit_exists(self, commit: str) -> bool: + self._verify_repo() + async with self.git_operation_lock: + shell_cmd = self.cmd_helper.get_shell_command() + try: + await self._run_git_cmd( + f"cat-file -e {commit}^{{commit}}", attempts=1 + ) + except shell_cmd.error: + return False + return True + + async def remote(self, command: str = "", validate: bool = False) -> str: + self._verify_repo(check_remote=validate) async with self.git_operation_lock: resp = await self._run_git_cmd( f"remote {command}") @@ -698,54 +842,145 @@ class GitRepo: resp = await self._run_git_cmd(f"rev-list {args}".strip()) return resp.strip() - async def get_config_item(self, item: str) -> str: + async def config_get( + self, + key: str, + pattern: str = "", + get_all: bool = False, + local_only: bool = False + ) -> Optional[str]: + local = "--local " if local_only else "" + cmd = f"{local}--get-all" if get_all else f"{local}--get" + args = f"{cmd} {key} '{pattern}'" if pattern else f"{cmd} {key}" + try: + return await self.config_cmd(args) + except self.cmd_helper.get_shell_command().error as e: + if e.return_code == 1: + return None + raise + + async def config_set(self, key: str, value: str) -> None: + await self.config_cmd(f"{key} '{value}'") + + async def config_add(self, key: str, value: str) -> None: + await self.config_cmd(f"--add {key} '{value}'") + + async def config_unset( + self, key: str, pattern: str = "", unset_all: bool = False + ) -> None: + cmd = "--unset-all" if unset_all else "--unset" + args = f"{cmd} {key} '{pattern}'" if pattern else f"{cmd} {key}" + await self.config_cmd(args) + + async def config_cmd(self, args: str) -> str: self._verify_repo() + verbose = self.server.is_verbose_enabled() async with self.git_operation_lock: - resp = await self._run_git_cmd(f"config --get {item}") - return resp.strip() + for attempt in range(3): + try: + return await self._run_git_cmd( + f"config {args}", attempts=1, log_complete=verbose + ) + except self.cmd_helper.get_shell_command().error as e: + if 1 <= (e.return_code or 10) <= 6 or attempt == 2: + raise + raise self.server.error("Failed to run git-config") + async def checkout(self, branch: Optional[str] = None) -> None: self._verify_repo() + reset_commit: Optional[str] = None async with self.git_operation_lock: if branch is None: - if self.is_beta: - branch = self.upstream_commit - else: - branch = f"{self.git_remote}/{self.git_branch}" + # No branch is specifed so we are checking out detached + if self.channel != Channel.DEV or self.pinned_commit is not None: + reset_commit = self.upstream_commit + branch = f"{self.git_remote}/{self.git_branch}" await self._run_git_cmd(f"checkout -q {branch}") + if reset_commit is not None: + await self.reset(reset_commit) async def run_fsck(self) -> None: async with self.git_operation_lock: - await self._run_git_cmd("fsck --full", timeout=300., retries=1) + await self._run_git_cmd("fsck --full", timeout=300., attempts=1) async def clone(self) -> None: + if self.is_submodule_or_worktree(): + raise self.server.error( + f"Cannot clone git repo {self.alias}, it is a {self.get_repo_type()} " + "of another git repo." + ) async with self.git_operation_lock: + if self.recovery_url == "?": + raise self.server.error( + "Recovery url has not been detected, clone aborted" + ) self.cmd_helper.notify_update_response( f"Git Repo {self.alias}: Starting Clone Recovery...") event_loop = self.server.get_event_loop() if self.backup_path.exists(): await event_loop.run_in_thread(shutil.rmtree, self.backup_path) await self._check_lock_file_exists(remove=True) - git_cmd = f"clone {self.origin_url} {self.backup_path}" + cmd = ( + f"clone --branch {self.primary_branch} --filter=blob:none " + f"{self.recovery_url} {self.backup_path}" + ) try: - await self._run_git_cmd_async(git_cmd, 1, False, False) + await self._run_git_cmd_async(cmd, 1, False, False) except Exception as e: self.cmd_helper.notify_update_response( f"Git Repo {self.alias}: Git Clone Failed") raise self.server.error("Git Clone Error") from e - if self.git_path.exists(): - await event_loop.run_in_thread(shutil.rmtree, self.git_path) + if self.src_path.exists(): + await event_loop.run_in_thread(shutil.rmtree, self.src_path) await event_loop.run_in_thread( - shutil.move, str(self.backup_path), str(self.git_path)) + shutil.move, str(self.backup_path), str(self.src_path)) + self.repo_corrupt = False + self.valid_git_repo = True self.cmd_helper.notify_update_response( f"Git Repo {self.alias}: Git Clone Complete") + reset_commit = await self.get_recovery_ref("HEAD") + if reset_commit != "HEAD": + self.cmd_helper.notify_update_response( + f"Git Repo {self.alias}: Moving HEAD to previous " + f"commit {self.current_commit}" + ) + await self.reset(reset_commit) + + async def rollback(self) -> bool: + if self.rollback_commit == "?" or self.rollback_branch == "?": + raise self.server.error("Incomplete rollback data stored, cannot rollback") + if self.rollback_branch != self.git_branch: + await self.checkout(self.rollback_branch) + elif self.rollback_commit == self.current_commit: + return False + await self.reset(self.rollback_commit) + return True + + def capture_state_for_rollback(self) -> Dict[str, Any]: + branch = self.git_branch + if self.head_detached: + valid = "?" not in (self.git_remote, self.git_branch) + branch = f"{self.git_remote}/{self.git_branch}" if valid else "?" + return { + "commit": self.current_commit, + "branch": branch, + "version": self.current_version + } + + def set_rollback_state(self, rb_state: Optional[Dict[str, Any]]) -> None: + if rb_state is None: + rb_state = self.capture_state_for_rollback() + self.rollback_commit = rb_state["commit"] + self.rollback_branch = rb_state["branch"] + self.rollback_version = rb_state["version"] async def get_commits_behind(self) -> List[Dict[str, Any]]: self._verify_repo() if self.is_current(): return [] async with self.git_operation_lock: - if self.is_beta: + if self.channel != Channel.DEV or self.pinned_commit is not None: ref = self.upstream_commit else: ref = f"{self.git_remote}/{self.git_branch}" @@ -763,13 +998,16 @@ class GitRepo: commits_behind.append(dict(cbh)) # type: ignore return commits_behind - async def get_tagged_commits(self) -> Dict[str, Any]: - self._verify_repo() + async def get_tagged_commits(self, count: int = 100) -> Dict[str, str]: + self._verify_repo(check_remote=True) + tip = f"{self.git_remote}/{self.git_branch}" + cnt_arg = f"--count={count} " if count > 0 else "" async with self.git_operation_lock: resp = await self._run_git_cmd( - "for-each-ref --count=10 --sort='-creatordate' " - f"--format={GIT_REF_FMT} 'refs/tags'") - tagged_commits: Dict[str, Any] = {} + f"for-each-ref {cnt_arg}--sort='-creatordate' --contains=HEAD " + f"--merged={tip} --format={GIT_REF_FMT} 'refs/tags'" + ) + tagged_commits: Dict[str, str] = {} for line in resp.split('\n'): parts = line.strip().split() if len(parts) != 3 or parts[0] != "commit": @@ -780,78 +1018,109 @@ class GitRepo: # Return tagged commits as SHA keys mapped to tag values return tagged_commits - async def get_tag_data(self, tag: str) -> Dict[str, Any]: - self._verify_repo() - async with self.git_operation_lock: - cmd = f"tag -l --format='%(contents)' {tag}" - resp = (await self._run_git_cmd(cmd)).strip() - req_fields = ["repo", "branch", "version", "commit"] - tag_data: Dict[str, Any] = {} - for line in resp.split("\n"): - parts = line.strip().split(":", 1) - if len(parts) != 2: - continue - field, value = parts - field = field.strip() - if field not in req_fields: - continue - tag_data[field] = value.strip() - if len(tag_data) != len(req_fields): - return {} - vlist = self._convert_semver(tag_data["version"]) - tag_data["version_as_list"] = vlist - return tag_data - def get_repo_status(self) -> Dict[str, Any]: + no_untrk_src = len(self.untracked_files) == 0 return { 'detected_type': "git_repo", 'remote_alias': self.git_remote, 'branch': self.git_branch, 'owner': self.git_owner, 'repo_name': self.git_repo_name, - 'version': self.current_version, - 'remote_version': self.upstream_version, + 'remote_url': self.upstream_url, + 'recovery_url': self.recovery_url, + 'version': self.current_version.short_version, + 'remote_version': self.upstream_version.short_version, + 'rollback_version': self.rollback_version.short_version, 'current_hash': self.current_commit, 'remote_hash': self.upstream_commit, - 'is_dirty': self.dirty, + 'is_dirty': self.current_version.dirty, 'detached': self.head_detached, 'commits_behind': self.commits_behind, + 'commits_behind_count': self.commits_behind_count, 'git_messages': self.git_messages, - 'full_version_string': self.full_version_string, - 'pristine': not self.dirty + 'full_version_string': self.current_version.full_version, + 'pristine': no_untrk_src and not self.current_version.dirty, + 'corrupt': self.repo_corrupt, + 'warnings': self.repo_warnings, + 'anomalies': self.repo_anomalies } - def get_version(self, upstream: bool = False) -> Tuple[Any, ...]: - version = self.upstream_version if upstream else self.current_version - return tuple(re.findall(r"\d+", version)) + def get_version(self, upstream: bool = False) -> GitVersion: + return self.upstream_version if upstream else self.current_version def is_detached(self) -> bool: return self.head_detached def is_dirty(self) -> bool: - return self.dirty + return self.current_version.dirty def is_current(self) -> bool: return self.current_commit == self.upstream_commit - def _convert_semver(self, version: str) -> List[int]: - ver_match = self.tag_r.match(version) - if ver_match is None: - return [] - try: - tag = ver_match.group(1) - core = tag.split("-")[0] - if core[0] == "v": - core = core[1:] - base_ver = [int(part) for part in core.split(".")] - base_ver.append({"alpha": 0, "beta": 1}.get(ver_match.group(3), 2)) - base_ver.append(int(ver_match.group(5)[1:])) - except Exception: - return [] - return base_ver + def is_submodule_or_worktree(self): + return ( + self.src_path.joinpath(".git").is_file() and + self.git_folder_path.parent.name in ("modules", "worktrees") + ) + + def is_valid(self) -> bool: + return ( + not self.is_damaged() and + not self.has_recoverable_errors() + ) + + def is_damaged(self) -> bool: + # A damaged repo requires a clone to recover + return not self.valid_git_repo or self.repo_corrupt + + def has_recoverable_errors(self) -> bool: + # These errors should be recoverable using a git reset + detached_err = False if self.server.is_debug_enabled() else self.head_detached + return ( + self.diverged or + self.is_dirty() or + detached_err + ) + + def get_repo_type(self) -> str: + type_name = self.git_folder_path.parent.name + if type_name == "modules": + return "submodule" + elif type_name == "worktrees": + return "worktree" + return "repo" + + async def get_recovery_ref(self, upstream_ref: Optional[str] = None) -> str: + """ Fetch the best reference for a 'reset' recovery attempt + + Returns the ref to reset to for "soft" recovery requests. The + preference is to reset to the current commit, however that is + only possible if the commit is known and if it is an ancestor of + the primary branch. + """ + if upstream_ref is None: + remote = await self.config_get(f"branch.{self.primary_branch}.remote") + if remote is None: + raise self.server.error( + f"Failed to find remote for primary branch '{self.primary_branch}'" + ) + upstream_ref = f"{remote}/{self.primary_branch}" + reset_commits: List[str] = [] + if self.pinned_commit is not None: + reset_commits.append(self.pinned_commit) + if self.current_commit != "?": + reset_commits.append(self.current_commit) + for commit in reset_commits: + try: + is_ancs = await self.is_ancestor(commit, upstream_ref, attempts=1) + except self.server.error: + is_ancs = False + if is_ancs: + return commit + return upstream_ref async def _check_lock_file_exists(self, remove: bool = False) -> bool: - lock_path = self.git_path.joinpath(".git/index.lock") + lock_path = self.git_folder_path.joinpath("index.lock") if lock_path.is_file(): if remove: logging.info(f"Git Repo {self.alias}: Git lock file found " @@ -877,22 +1146,36 @@ class GitRepo: return await self._check_lock_file_exists(remove=True) - async def _repair_loose_objects(self) -> bool: + async def _repair_loose_objects(self, notify: bool = False) -> bool: + if notify: + self.cmd_helper.notify_update_response( + "Attempting to repair loose objects..." + ) try: - await self.cmd_helper.run_cmd_with_response( + shell_cmd = self.cmd_helper.get_shell_command() + await shell_cmd.exec_cmd( "find .git/objects/ -type f -empty | xargs rm", - timeout=10., retries=1, cwd=str(self.git_path)) + timeout=10., attempts=1, cwd=str(self.src_path)) await self._run_git_cmd_async( - "fetch --all -p", retries=1, fix_loose=False) - await self._run_git_cmd("fsck --full", timeout=300., retries=1) + "fetch --all -p", attempts=1, fix_loose=False) + await self._run_git_cmd("fsck --full", timeout=300., attempts=1) except Exception: - logging.exception("Attempt to repair loose objects failed") + msg = ( + "Attempt to repair loose objects failed, " + "hard recovery is required" + ) + logging.exception(msg) + if notify: + self.cmd_helper.notify_update_response(msg) return False + if notify: + self.cmd_helper.notify_update_response("Loose objects repaired") + self.repo_corrupt = False return True async def _run_git_cmd_async(self, cmd: str, - retries: int = 5, + attempts: int = 5, need_git_path: bool = True, fix_loose: bool = True ) -> None: @@ -904,13 +1187,14 @@ class GitRepo: env = os.environ.copy() env.update(GIT_ENV_VARS) if need_git_path: - git_cmd = f"git -C {self.git_path} {cmd}" + git_cmd = f"git -C {self.src_path} {cmd}" else: git_cmd = f"git {cmd}" - scmd = self.cmd_helper.build_shell_command( + shell_cmd = self.cmd_helper.get_shell_command() + scmd = shell_cmd.build_shell_command( git_cmd, callback=self._handle_process_output, env=env) - while retries: + while attempts: self.git_messages.clear() self.fetch_input_recd = False self.fetch_timeout_handle = event_loop.delay_callback( @@ -920,21 +1204,25 @@ class GitRepo: await scmd.run(timeout=0) except Exception: pass - self.fetch_timeout_handle.cancel() + if self.fetch_timeout_handle is not None: + self.fetch_timeout_handle.cancel() ret = scmd.get_return_code() if ret == 0: self.git_messages.clear() return - elif fix_loose: - if GIT_OBJ_ERR in "\n".join(self.git_messages): - ret = await self._repair_loose_objects() - if ret: - break - # since the attept to repair failed, bypass retries + elif self.repo_corrupt and fix_loose: + if await self._repair_loose_objects(notify=True): + # Only attempt to repair loose objects once. Re-run + # the command once. + fix_loose = False + attempts = 2 + else: + # since the attept to repair failed, bypass attempts # and immediately raise an exception raise self.server.error( - f"Unable to repair loose objects, use hard recovery") - retries -= 1 + "Unable to repair loose objects, use hard recovery" + ) + attempts -= 1 await asyncio.sleep(.5) await self._check_lock_file_exists(remove=True) raise self.server.error(f"Git Command '{cmd}' failed") @@ -943,15 +1231,16 @@ class GitRepo: self.fetch_input_recd = True out = output.decode().strip() if out: + if out.startswith("fatal: ") and "corrupt" in out: + self.repo_corrupt = True self.git_messages.append(out) self.cmd_helper.notify_update_response(out) logging.debug( f"Git Repo {self.alias}: {out}") - async def _check_process_active(self, - scmd: shell_command.ShellCommand, - cmd_name: str - ) -> None: + async def _check_process_active( + self, scmd: ShellCommand, cmd_name: str + ) -> None: ret = scmd.get_return_code() if ret is not None: logging.debug(f"Git Repo {self.alias}: {cmd_name} returned") @@ -971,21 +1260,39 @@ class GitRepo: # Cancel with SIGKILL await scmd.cancel(2) - async def _run_git_cmd(self, - git_args: str, - timeout: float = 20., - retries: int = 5, - env: Optional[Dict[str, str]] = None - ) -> str: + async def _run_git_cmd( + self, + git_args: str, + timeout: float = 20., + attempts: int = 5, + env: Optional[Dict[str, str]] = None, + corrupt_hdr: Optional[str] = None, + log_complete: bool = True + ) -> str: + shell_cmd = self.cmd_helper.get_shell_command() try: - return await self.cmd_helper.run_cmd_with_response( - f"git -C {self.git_path} {git_args}", - timeout=timeout, retries=retries, env=env, sig_idx=2) - except self.cmd_helper.scmd_error as e: + return await shell_cmd.exec_cmd( + f"git -C {self.src_path} {git_args}", + timeout=timeout, + attempts=attempts, + env=env, + sig_idx=2, + log_complete=log_complete + ) + except shell_cmd.error as e: stdout = e.stdout.decode().strip() stderr = e.stderr.decode().strip() + msg_lines: List[str] = [] if stdout: + msg_lines.extend(stdout.split("\n")) self.git_messages.append(stdout) if stderr: + msg_lines.extend(stdout.split("\n")) self.git_messages.append(stderr) + if corrupt_hdr is not None: + for line in msg_lines: + line = line.strip().lower() + if line.startswith(corrupt_hdr) and "corrupt" in line: + self.repo_corrupt = True + break raise diff --git a/moonraker/components/update_manager/system_deploy.py b/moonraker/components/update_manager/system_deploy.py new file mode 100644 index 0000000..1bf907b --- /dev/null +++ b/moonraker/components/update_manager/system_deploy.py @@ -0,0 +1,556 @@ +# Provides System Package Updates +# +# Copyright (C) 2023 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license. + +from __future__ import annotations +import asyncio +import logging +import time +import re +from ...thirdparty.packagekit import enums as PkEnum +from .base_deploy import BaseDeploy + +# Annotation imports +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Optional, + Union, + Dict, + List, +) + +if TYPE_CHECKING: + from ...confighelper import ConfigHelper + from ..shell_command import ShellCommandFactory as SCMDComp + from ..dbus_manager import DbusManager + from ..machine import Machine + from .update_manager import CommandHelper + from dbus_next import Variant + from dbus_next.aio import ProxyInterface + JsonType = Union[List[Any], Dict[str, Any]] + + +class PackageDeploy(BaseDeploy): + def __init__(self, + config: ConfigHelper, + cmd_helper: CommandHelper + ) -> None: + super().__init__(config, cmd_helper, "system", "", "") + cmd_helper.set_package_updater(self) + self.use_packagekit = config.getboolean("enable_packagekit", True) + self.available_packages: List[str] = [] + + async def initialize(self) -> Dict[str, Any]: + storage = await super().initialize() + self.available_packages = storage.get('packages', []) + provider: BasePackageProvider + try_fallback = True + if self.use_packagekit: + try: + provider = PackageKitProvider(self.cmd_helper) + await provider.initialize() + except Exception: + pass + else: + self.log_info("PackageDeploy: PackageKit Provider Configured") + self.prefix = "PackageKit: " + try_fallback = False + if try_fallback: + # Check to see of the apt command is available + fallback = await self._get_fallback_provider() + if fallback is None: + provider = BasePackageProvider(self.cmd_helper) + machine: Machine = self.server.lookup_component("machine") + dist_info = machine.get_system_info()['distribution'] + dist_id: str = dist_info['id'].lower() + self.server.add_warning( + "Unable to initialize System Update Provider for " + f"distribution: {dist_id}") + else: + self.log_info("PackageDeploy: Using APT CLI Provider") + self.prefix = "Package Manager APT: " + provider = fallback + self.provider = provider + return storage + + async def _get_fallback_provider(self) -> Optional[BasePackageProvider]: + # Currently only the API Fallback provider is available + shell_cmd: SCMDComp + shell_cmd = self.server.lookup_component("shell_command") + cmd = shell_cmd.build_shell_command("sh -c 'command -v apt'") + try: + ret = await cmd.run_with_response() + except shell_cmd.error: + return None + # APT Command found should be available + self.log_debug(f"APT package manager detected: {ret}") + provider = AptCliProvider(self.cmd_helper) + try: + await provider.initialize() + except Exception: + return None + return provider + + async def refresh(self) -> None: + try: + # Do not force a refresh until the server has started + if self.server.is_running(): + await self._update_package_cache(force=True) + self.available_packages = await self.provider.get_packages() + pkg_msg = "\n".join(self.available_packages) + self.log_info( + f"Detected {len(self.available_packages)} package updates:" + f"\n{pkg_msg}" + ) + except Exception: + self.log_exc("Error Refreshing System Packages") + # Update Persistent Storage + self._save_state() + + def get_persistent_data(self) -> Dict[str, Any]: + storage = super().get_persistent_data() + storage['packages'] = self.available_packages + return storage + + async def update(self) -> bool: + if not self.available_packages: + return False + self.cmd_helper.notify_update_response("Updating packages...") + try: + await self._update_package_cache(force=True, notify=True) + await self.provider.upgrade_system() + except Exception: + raise self.server.error("Error updating system packages") + self.available_packages = [] + self._save_state() + self.cmd_helper.notify_update_response( + "Package update finished...", is_complete=True) + return True + + async def _update_package_cache(self, + force: bool = False, + notify: bool = False + ) -> None: + curtime = time.time() + if force or curtime > self.last_refresh_time + 3600.: + # Don't update if a request was done within the last hour + await self.provider.refresh_packages(notify) + + async def install_packages(self, + package_list: List[str], + **kwargs + ) -> None: + await self.provider.install_packages(package_list, **kwargs) + + def get_update_status(self) -> Dict[str, Any]: + return { + 'package_count': len(self.available_packages), + 'package_list': self.available_packages + } + +class BasePackageProvider: + def __init__(self, cmd_helper: CommandHelper) -> None: + self.server = cmd_helper.get_server() + self.cmd_helper = cmd_helper + + async def initialize(self) -> None: + pass + + async def refresh_packages(self, notify: bool = False) -> None: + raise self.server.error("Cannot refresh packages, no provider set") + + async def get_packages(self) -> List[str]: + raise self.server.error("Cannot retrieve packages, no provider set") + + async def install_packages(self, + package_list: List[str], + **kwargs + ) -> None: + raise self.server.error("Cannot install packages, no provider set") + + async def upgrade_system(self) -> None: + raise self.server.error("Cannot upgrade packages, no provider set") + +class AptCliProvider(BasePackageProvider): + APT_CMD = "sudo DEBIAN_FRONTEND=noninteractive apt-get" + + async def refresh_packages(self, notify: bool = False) -> None: + await self.cmd_helper.run_cmd( + f"{self.APT_CMD} update", timeout=600., notify=notify) + + async def get_packages(self) -> List[str]: + shell_cmd = self.cmd_helper.get_shell_command() + res = await shell_cmd.exec_cmd("apt list --upgradable", timeout=60.) + pkg_list = [p.strip() for p in res.split("\n") if p.strip()] + if pkg_list: + pkg_list = pkg_list[2:] + return [p.split("/", maxsplit=1)[0] for p in pkg_list] + return [] + + async def resolve_packages(self, package_list: List[str]) -> List[str]: + self.cmd_helper.notify_update_response("Resolving packages...") + search_regex = "|".join([f"^{pkg}$" for pkg in package_list]) + cmd = f"apt-cache search --names-only \"{search_regex}\"" + shell_cmd = self.cmd_helper.get_shell_command() + ret = await shell_cmd.exec_cmd(cmd, timeout=600.) + resolved = [ + pkg.strip().split()[0] for pkg in ret.split("\n") if pkg.strip() + ] + return [avail for avail in package_list if avail in resolved] + + async def install_packages(self, + package_list: List[str], + **kwargs + ) -> None: + timeout: float = kwargs.get('timeout', 300.) + retries: int = kwargs.get('retries', 3) + notify: bool = kwargs.get('notify', False) + await self.refresh_packages(notify=notify) + resolved = await self.resolve_packages(package_list) + if not resolved: + self.cmd_helper.notify_update_response("No packages detected") + return + logging.debug(f"Resolved packages: {resolved}") + pkgs = " ".join(resolved) + await self.cmd_helper.run_cmd( + f"{self.APT_CMD} install --yes {pkgs}", timeout=timeout, + attempts=retries, notify=notify) + + async def upgrade_system(self) -> None: + await self.cmd_helper.run_cmd( + f"{self.APT_CMD} upgrade --yes", timeout=3600., + notify=True) + +class PackageKitProvider(BasePackageProvider): + def __init__(self, cmd_helper: CommandHelper) -> None: + super().__init__(cmd_helper) + dbus_mgr: DbusManager = self.server.lookup_component("dbus_manager") + self.dbus_mgr = dbus_mgr + self.pkgkit: Optional[ProxyInterface] = None + + async def initialize(self) -> None: + if not self.dbus_mgr.is_connected(): + raise self.server.error("DBus Connection Not available") + # Check for PolicyKit permissions + await self.dbus_mgr.check_permission( + "org.freedesktop.packagekit.system-sources-refresh", + "The Update Manager will fail to fetch package updates") + await self.dbus_mgr.check_permission( + "org.freedesktop.packagekit.package-install", + "The Update Manager will fail to install packages") + await self.dbus_mgr.check_permission( + "org.freedesktop.packagekit.system-update", + "The Update Manager will fail to update packages" + ) + # Fetch the PackageKit DBus Inteface + self.pkgkit = await self.dbus_mgr.get_interface( + "org.freedesktop.PackageKit", + "/org/freedesktop/PackageKit", + "org.freedesktop.PackageKit") + + async def refresh_packages(self, notify: bool = False) -> None: + await self.run_transaction("refresh_cache", False, notify=notify) + + async def get_packages(self) -> List[str]: + flags = PkEnum.Filter.NONE + pkgs = await self.run_transaction("get_updates", flags.value) + pkg_ids = [info['package_id'] for info in pkgs if 'package_id' in info] + return [pkg_id.split(";")[0] for pkg_id in pkg_ids] + + async def install_packages(self, + package_list: List[str], + **kwargs + ) -> None: + notify: bool = kwargs.get('notify', False) + await self.refresh_packages(notify=notify) + flags = ( + PkEnum.Filter.NEWEST | PkEnum.Filter.NOT_INSTALLED | + PkEnum.Filter.BASENAME | PkEnum.Filter.ARCH + ) + pkgs = await self.run_transaction("resolve", flags.value, package_list) + pkg_ids = [info['package_id'] for info in pkgs if 'package_id' in info] + if pkg_ids: + logging.debug(f"Installing Packages: {pkg_ids}") + tflag = PkEnum.TransactionFlag.ONLY_TRUSTED + await self.run_transaction("install_packages", tflag.value, + pkg_ids, notify=notify) + + async def upgrade_system(self) -> None: + # Get Updates, Install Packages + flags = PkEnum.Filter.NONE + pkgs = await self.run_transaction("get_updates", flags.value) + pkg_ids = [info['package_id'] for info in pkgs if 'package_id' in info] + if pkg_ids: + logging.debug(f"Upgrading Packages: {pkg_ids}") + tflag = PkEnum.TransactionFlag.ONLY_TRUSTED + await self.run_transaction("update_packages", tflag.value, + pkg_ids, notify=True) + + def create_transaction(self) -> PackageKitTransaction: + if self.pkgkit is None: + raise self.server.error("PackageKit Interface Not Available") + return PackageKitTransaction(self.dbus_mgr, self.pkgkit, + self.cmd_helper) + + async def run_transaction(self, + method: str, + *args, + notify: bool = False + ) -> Any: + transaction = self.create_transaction() + return await transaction.run(method, *args, notify=notify) + +class PackageKitTransaction: + GET_PKG_ROLES = ( + PkEnum.Role.RESOLVE | PkEnum.Role.GET_PACKAGES | + PkEnum.Role.GET_UPDATES + ) + QUERY_ROLES = GET_PKG_ROLES | PkEnum.Role.GET_REPO_LIST + PROGRESS_STATUS = ( + PkEnum.Status.RUNNING | PkEnum.Status.INSTALL | + PkEnum.Status.UPDATE + ) + + def __init__(self, + dbus_mgr: DbusManager, + pkgkit: ProxyInterface, + cmd_helper: CommandHelper + ) -> None: + self.server = cmd_helper.get_server() + self.eventloop = self.server.get_event_loop() + self.cmd_helper = cmd_helper + self.dbus_mgr = dbus_mgr + self.pkgkit = pkgkit + # Transaction Properties + self.notify = False + self._status = PkEnum.Status.UNKNOWN + self._role = PkEnum.Role.UNKNOWN + self._tflags = PkEnum.TransactionFlag.NONE + self._percentage = 101 + self._dl_remaining = 0 + self.speed = 0 + self.elapsed_time = 0 + self.remaining_time = 0 + self.caller_active = False + self.allow_cancel = True + self.uid = 0 + # Transaction data tracking + self.tfut: Optional[asyncio.Future] = None + self.last_progress_notify_time: float = 0. + self.result: List[Dict[str, Any]] = [] + self.err_msg: str = "" + + def run(self, + method: str, + *args, + notify: bool = False + ) -> Awaitable: + if self.tfut is not None: + raise self.server.error( + "PackageKit transaction can only be used once") + self.notify = notify + self.tfut = self.eventloop.create_future() + coro = self._start_transaction(method, *args) + self.eventloop.create_task(coro) + return self.tfut + + async def _start_transaction(self, + method: str, + *args + ) -> None: + assert self.tfut is not None + try: + # Create Transaction + tid = await self.pkgkit.call_create_transaction() # type: ignore + transaction, props = await self.dbus_mgr.get_interfaces( + "org.freedesktop.PackageKit", tid, + ["org.freedesktop.PackageKit.Transaction", + "org.freedesktop.DBus.Properties"]) + # Set interface callbacks + transaction.on_package(self._on_package_signal) # type: ignore + transaction.on_repo_detail( # type: ignore + self._on_repo_detail_signal) + transaction.on_item_progress( # type: ignore + self._on_item_progress_signal) + transaction.on_error_code(self._on_error_signal) # type: ignore + transaction.on_finished(self._on_finished_signal) # type: ignore + props.on_properties_changed( # type: ignore + self._on_properties_changed) + # Run method + logging.debug(f"PackageKit: Running transaction call_{method}") + func = getattr(transaction, f"call_{method}") + await func(*args) + except Exception as e: + self.tfut.set_exception(e) + + def _on_package_signal(self, + info_code: int, + package_id: str, + summary: str + ) -> None: + info = PkEnum.Info.from_index(info_code) + if self._role in self.GET_PKG_ROLES: + pkg_data = { + 'package_id': package_id, + 'info': info.desc, + 'summary': summary + } + self.result.append(pkg_data) + else: + self._notify_package(info, package_id) + + def _on_repo_detail_signal(self, + repo_id: str, + description: str, + enabled: bool + ) -> None: + if self._role == PkEnum.Role.GET_REPO_LIST: + repo_data = { + "repo_id": repo_id, + "description": description, + "enabled": enabled + } + self.result.append(repo_data) + else: + self._notify_repo(repo_id, description) + + def _on_item_progress_signal(self, + item_id: str, + status_code: int, + percent_complete: int + ) -> None: + status = PkEnum.Status.from_index(status_code) # noqa: F841 + # NOTE: This signal doesn't seem to fire predictably, + # nor does it seem to provide a consistent "percent complete" + # parameter. + # logging.debug( + # f"Role {self._role.name}: Item Progress Signal Received\n" + # f"Item ID: {item_id}\n" + # f"Percent Complete: {percent_complete}\n" + # f"Status: {status.desc}") + + def _on_error_signal(self, + error_code: int, + details: str + ) -> None: + err = PkEnum.Error.from_index(error_code) + self.err_msg = f"{err.name}: {details}" + + def _on_finished_signal(self, exit_code: int, run_time: int) -> None: + if self.tfut is None: + return + ext = PkEnum.Exit.from_index(exit_code) + secs = run_time / 1000. + if ext == PkEnum.Exit.SUCCESS: + self.tfut.set_result(self.result) + else: + err = self.err_msg or ext.desc + server = self.cmd_helper.get_server() + self.tfut.set_exception(server.error(err)) + msg = f"Transaction {self._role.desc}: Exit {ext.desc}, " \ + f"Run time: {secs:.2f} seconds" + if self.notify: + self.cmd_helper.notify_update_response(msg) + logging.debug(msg) + + def _on_properties_changed(self, + iface_name: str, + changed_props: Dict[str, Variant], + invalid_props: Dict[str, Variant] + ) -> None: + for name, var in changed_props.items(): + formatted = re.sub(r"(\w)([A-Z])", r"\g<1>_\g<2>", name).lower() + setattr(self, formatted, var.value) + + def _notify_package(self, info: PkEnum.Info, package_id: str) -> None: + if self.notify: + if info == PkEnum.Info.FINISHED: + return + pkg_parts = package_id.split(";") + msg = f"{info.desc}: {pkg_parts[0]} ({pkg_parts[1]})" + self.cmd_helper.notify_update_response(msg) + + def _notify_repo(self, repo_id: str, description: str) -> None: + if self.notify: + if not repo_id.strip(): + repo_id = description + # TODO: May want to eliminate dups + msg = f"GET: {repo_id}" + self.cmd_helper.notify_update_response(msg) + + def _notify_progress(self) -> None: + if self.notify and self._percentage <= 100: + msg = f"{self._status.desc}...{self._percentage}%" + if self._status == PkEnum.Status.DOWNLOAD and self._dl_remaining: + if self._dl_remaining < 1024: + msg += f", Remaining: {self._dl_remaining} B" + elif self._dl_remaining < 1048576: + msg += f", Remaining: {self._dl_remaining // 1024} KiB" + else: + msg += f", Remaining: {self._dl_remaining // 1048576} MiB" + if self.speed: + speed = self.speed // 8 + if speed < 1024: + msg += f", Speed: {speed} B/s" + elif speed < 1048576: + msg += f", Speed: {speed // 1024} KiB/s" + else: + msg += f", Speed: {speed // 1048576} MiB/s" + self.cmd_helper.notify_update_response(msg) + + @property + def role(self) -> PkEnum.Role: + return self._role + + @role.setter + def role(self, role_code: int) -> None: + self._role = PkEnum.Role.from_index(role_code) + if self._role in self.QUERY_ROLES: + # Never Notify Queries + self.notify = False + if self.notify: + msg = f"Transaction {self._role.desc} started..." + self.cmd_helper.notify_update_response(msg) + logging.debug(f"PackageKit: Current Role: {self._role.desc}") + + @property + def status(self) -> PkEnum.Status: + return self._status + + @status.setter + def status(self, status_code: int) -> None: + self._status = PkEnum.Status.from_index(status_code) + self._percentage = 101 + self.speed = 0 + logging.debug(f"PackageKit: Current Status: {self._status.desc}") + + @property + def transaction_flags(self) -> PkEnum.TransactionFlag: + return self._tflags + + @transaction_flags.setter + def transaction_flags(self, bits: int) -> None: + self._tflags = PkEnum.TransactionFlag(bits) + + @property + def percentage(self) -> int: + return self._percentage + + @percentage.setter + def percentage(self, percent: int) -> None: + self._percentage = percent + if self._status in self.PROGRESS_STATUS: + self._notify_progress() + + @property + def download_size_remaining(self) -> int: + return self._dl_remaining + + @download_size_remaining.setter + def download_size_remaining(self, bytes_remaining: int) -> None: + self._dl_remaining = bytes_remaining + self._notify_progress() diff --git a/moonraker/components/update_manager/update_manager.py b/moonraker/components/update_manager/update_manager.py index 4e21ada..4c8fc66 100644 --- a/moonraker/components/update_manager/update_manager.py +++ b/moonraker/components/update_manager/update_manager.py @@ -7,28 +7,27 @@ from __future__ import annotations import asyncio import os -import pathlib import logging -import shutil -import zipfile import time import tempfile -import re -from thirdparty.packagekit import enums as PkEnum -from . import base_config +import pathlib +from .common import AppType, get_base_configuration, get_app_type from .base_deploy import BaseDeploy from .app_deploy import AppDeploy from .git_deploy import GitDeploy from .zip_deploy import ZipDeploy +from .system_deploy import PackageDeploy +from ...common import RequestType +from ...utils.filelock import AsyncExclusiveFileLock, LockTimeout # Annotation imports from typing import ( TYPE_CHECKING, + TypeVar, Any, - Awaitable, + Callable, Optional, Set, - Tuple, Type, Union, Dict, @@ -36,59 +35,69 @@ from typing import ( cast ) if TYPE_CHECKING: - from moonraker import Server - from confighelper import ConfigHelper - from websockets import WebRequest - from components.klippy_apis import KlippyAPI as APIComp - from components.shell_command import ShellCommandFactory as SCMDComp - from components.database import MoonrakerDatabase as DBComp - from components.database import NamespaceWrapper - from components.dbus_manager import DbusManager - from components.machine import Machine - from components.http_client import HttpClient - from eventloop import FlexTimer - from dbus_next import Variant - from dbus_next.aio import ProxyInterface + from ...server import Server + from ...confighelper import ConfigHelper + from ...common import WebRequest + from ..klippy_connection import KlippyConnection + from ..shell_command import ShellCommandFactory as SCMDComp + from ..database import MoonrakerDatabase as DBComp + from ..database import NamespaceWrapper + from ..machine import Machine + from ..http_client import HttpClient + from ...eventloop import FlexTimer JsonType = Union[List[Any], Dict[str, Any]] + _T = TypeVar("_T") # Check To see if Updates are necessary each hour UPDATE_REFRESH_INTERVAL = 3600. -# Perform auto refresh no later than 4am -MAX_UPDATE_HOUR = 4 -def get_deploy_class(app_path: str) -> Type: - if AppDeploy._is_git_repo(app_path): - return GitDeploy - else: - return ZipDeploy +def get_deploy_class( + app_type: Union[AppType, str], default: _T +) -> Union[Type[BaseDeploy], _T]: + key = AppType.from_string(app_type) if isinstance(app_type, str) else app_type + _deployers = { + AppType.WEB: ZipDeploy, + AppType.GIT_REPO: GitDeploy, + AppType.ZIP: ZipDeploy + } + return _deployers.get(key, default) class UpdateManager: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() self.event_loop = self.server.get_event_loop() - self.channel = config.get('channel', "dev") - if self.channel not in ["dev", "beta"]: - raise config.error( - f"Unsupported channel '{self.channel}' in section" - " [update_manager]") - self.app_config = base_config.get_base_configuration( - config, self.channel - ) + self.instance_tracker = InstanceTracker(self.server) + self.kconn: KlippyConnection + self.kconn = self.server.lookup_component("klippy_connection") + self.app_config = get_base_configuration(config) + auto_refresh_enabled = config.getboolean('enable_auto_refresh', False) - self.cmd_helper = CommandHelper(config) + self.refresh_window = config.getintlist('refresh_window', [0, 5], + separator='-', count=2) + if ( + not (0 <= self.refresh_window[0] <= 23) or + not (0 <= self.refresh_window[1] <= 23) + ): + raise config.error("The hours specified in 'refresh_window'" + " must be between 0 and 23.") + if self.refresh_window[0] == self.refresh_window[1]: + raise config.error("The start and end hours specified" + " in 'refresh_window' cannot be the same.") + + self.cmd_helper = CommandHelper(config, self.get_updaters) self.updaters: Dict[str, BaseDeploy] = {} if config.getboolean('enable_system_updates', True): self.updaters['system'] = PackageDeploy(config, self.cmd_helper) mcfg = self.app_config["moonraker"] kcfg = self.app_config["klipper"] - mclass = get_deploy_class(mcfg.get("path")) + mclass = get_deploy_class(mcfg.get("type"), BaseDeploy) self.updaters['moonraker'] = mclass(mcfg, self.cmd_helper) kclass = BaseDeploy if ( os.path.exists(kcfg.get("path")) and os.path.exists(kcfg.get("env")) ): - kclass = get_deploy_class(kcfg.get("path")) + kclass = get_deploy_class(kcfg.get("type"), BaseDeploy) self.updaters['klipper'] = kclass(kcfg, self.cmd_helper) # TODO: The below check may be removed when invalid config options @@ -104,29 +113,28 @@ class UpdateManager: client_sections = config.get_prefix_sections("update_manager ") for section in client_sections: cfg = config[section] - name = section.split()[-1] + name = BaseDeploy.parse_name(cfg) if name in self.updaters: - self.server.add_warning( - f"[update_manager]: Extension {name} already added" - ) + if name not in ["klipper", "moonraker"]: + self.server.add_warning( + f"[update_manager]: Extension {name} already added" + ) continue try: client_type = cfg.get("type") - if client_type in ["web", "web_beta"]: - self.updaters[name] = WebClientDeploy(cfg, self.cmd_helper) - elif client_type in ["git_repo", "zip", "zip_beta"]: - path = os.path.expanduser(cfg.get('path')) - dclass = get_deploy_class(path) - self.updaters[name] = dclass(cfg, self.cmd_helper) - else: + deployer = get_deploy_class(client_type, None) + if deployer is None: self.server.add_warning( f"Invalid type '{client_type}' for section [{section}]") + else: + self.updaters[name] = deployer(cfg, self.cmd_helper) except Exception as e: self.server.add_warning( f"[update_manager]: Failed to load extension {name}: {e}" ) self.cmd_request_lock = asyncio.Lock() + self.initial_refresh_complete: bool = False self.klippy_identified_evt: Optional[asyncio.Event] = None # Auto Status Refresh @@ -136,26 +144,32 @@ class UpdateManager: self._handle_auto_refresh) self.server.register_endpoint( - "/machine/update/moonraker", ["POST"], - self._handle_update_request) + "/machine/update/moonraker", RequestType.POST, self._handle_update_request + ) self.server.register_endpoint( - "/machine/update/klipper", ["POST"], - self._handle_update_request) + "/machine/update/klipper", RequestType.POST, self._handle_update_request + ) self.server.register_endpoint( - "/machine/update/system", ["POST"], - self._handle_update_request) + "/machine/update/system", RequestType.POST, self._handle_update_request + ) self.server.register_endpoint( - "/machine/update/client", ["POST"], - self._handle_update_request) + "/machine/update/client", RequestType.POST, self._handle_update_request + ) self.server.register_endpoint( - "/machine/update/full", ["POST"], - self._handle_full_update_request) + "/machine/update/full", RequestType.POST, self._handle_full_update_request + ) self.server.register_endpoint( - "/machine/update/status", ["GET"], - self._handle_status_request) + "/machine/update/status", RequestType.GET, self._handle_status_request + ) self.server.register_endpoint( - "/machine/update/recover", ["POST"], - self._handle_repo_recovery) + "/machine/update/refresh", RequestType.POST, self._handle_refresh_request + ) + self.server.register_endpoint( + "/machine/update/recover", RequestType.POST, self._handle_repo_recovery + ) + self.server.register_endpoint( + "/machine/update/rollback", RequestType.POST, self._handle_rollback + ) self.server.register_notification("update_manager:update_response") self.server.register_notification("update_manager:update_refreshed") @@ -163,7 +177,11 @@ class UpdateManager: self.server.register_event_handler( "server:klippy_identified", self._set_klipper_repo) + def get_updaters(self) -> Dict[str, BaseDeploy]: + return self.updaters + async def component_init(self) -> None: + await self.instance_tracker.set_instance_id() # Prune stale data from the database umdb = self.cmd_helper.get_umdb() db_keys = await umdb.keys() @@ -171,16 +189,16 @@ class UpdateManager: if key not in self.updaters: logging.info(f"Removing stale update_manager data: {key}") await umdb.pop(key, None) - - async with self.cmd_request_lock: - for updater in list(self.updaters.values()): - await updater.initialize() - if updater.needs_refresh(): - await updater.refresh() + for updater in list(self.updaters.values()): + await updater.initialize() if self.refresh_timer is not None: - self.refresh_timer.start(delay=UPDATE_REFRESH_INTERVAL) + self.refresh_timer.start() + else: + self.event_loop.register_callback( + self._handle_auto_refresh, self.event_loop.get_loop_time() + ) - async def _set_klipper_repo(self) -> None: + def _set_klipper_repo(self) -> None: if self.klippy_identified_evt is not None: self.klippy_identified_evt.set() kinfo = self.server.get_klippy_info() @@ -190,11 +208,13 @@ class UpdateManager: kpath: str = kinfo['klipper_path'] executable: str = kinfo['python_path'] kupdater = self.updaters.get('klipper') + app_type = get_app_type(kpath) if ( - isinstance(kupdater, AppDeploy) and - kupdater.check_same_paths(kpath, executable) + (isinstance(kupdater, AppDeploy) and + kupdater.check_same_paths(kpath, executable)) or + (app_type == AppType.NONE and type(kupdater) is BaseDeploy) ): - # Current Klipper Updater is valid + # Current Klipper Updater is valid or unnecessary return # Update paths in the database db: DBComp = self.server.lookup_component('database') @@ -203,66 +223,73 @@ class UpdateManager: kcfg = self.app_config["klipper"] kcfg.set_option("path", kpath) kcfg.set_option("env", executable) - need_notification = not isinstance(kupdater, AppDeploy) - kclass = get_deploy_class(kpath) - self.updaters['klipper'] = kclass(kcfg, self.cmd_helper) + kcfg.set_option("type", str(app_type)) + notify = not isinstance(kupdater, AppDeploy) + kclass = get_deploy_class(app_type, BaseDeploy) + coro = self._update_klipper_repo(kclass(kcfg, self.cmd_helper), notify) + self.event_loop.create_task(coro) + + async def _update_klipper_repo(self, updater: BaseDeploy, notify: bool) -> None: async with self.cmd_request_lock: + self.updaters['klipper'] = updater umdb = self.cmd_helper.get_umdb() await umdb.pop('klipper', None) - await self.updaters['klipper'].initialize() - await self.updaters['klipper'].refresh() - if need_notification: - vinfo: Dict[str, Any] = {} - for name, updater in self.updaters.items(): - vinfo[name] = updater.get_update_status() - uinfo = self.cmd_helper.get_rate_limit_stats() - uinfo['version_info'] = vinfo - uinfo['busy'] = self.cmd_helper.is_update_busy() - self.server.send_event("update_manager:update_refreshed", uinfo) + await updater.initialize() + await updater.refresh() + if notify: + self.cmd_helper.notify_update_refreshed() - async def _check_klippy_printing(self) -> bool: - kapi: APIComp = self.server.lookup_component('klippy_apis') - result: Dict[str, Any] = await kapi.query_objects( - {'print_stats': None}, default={}) - pstate: str = result.get('print_stats', {}).get('state', "") - return pstate.lower() == "printing" + def _is_within_refresh_window(self) -> bool: + cur_hour = time.localtime(time.time()).tm_hour + if self.refresh_window[0] < self.refresh_window[1]: + return self.refresh_window[0] <= cur_hour < self.refresh_window[1] + return cur_hour >= self.refresh_window[0] or cur_hour < self.refresh_window[1] async def _handle_auto_refresh(self, eventtime: float) -> float: - cur_hour = time.localtime(time.time()).tm_hour - # Update when the local time is between 12AM and 5AM - if cur_hour >= MAX_UPDATE_HOUR: - return eventtime + UPDATE_REFRESH_INTERVAL - if await self._check_klippy_printing(): - # Don't Refresh during a print - logging.info("Klippy is printing, auto refresh aborted") - return eventtime + UPDATE_REFRESH_INTERVAL - vinfo: Dict[str, Any] = {} + log_remaining_time = True + if self.initial_refresh_complete: + log_remaining_time = False + # Update only if within the refresh window + if not self._is_within_refresh_window(): + logging.debug("update_manager: current time is outside of" + " the refresh window, auto refresh rescheduled") + return eventtime + UPDATE_REFRESH_INTERVAL + if self.kconn.is_printing(): + # Don't Refresh during a print + logging.info("Klippy is printing, auto refresh aborted") + return eventtime + UPDATE_REFRESH_INTERVAL need_notify = False + machine: Machine = self.server.lookup_component("machine") + if machine.validation_enabled(): + logging.info( + "update_manger: Install validation pending, bypassing " + "initial refresh" + ) + self.initial_refresh_complete = True + return eventtime + UPDATE_REFRESH_INTERVAL async with self.cmd_request_lock: try: for name, updater in list(self.updaters.items()): - if updater.needs_refresh(): + if updater.needs_refresh(log_remaining_time): await updater.refresh() need_notify = True - vinfo[name] = updater.get_update_status() except Exception: logging.exception("Unable to Refresh Status") return eventtime + UPDATE_REFRESH_INTERVAL + finally: + self.initial_refresh_complete = True if need_notify: - uinfo = self.cmd_helper.get_rate_limit_stats() - uinfo['version_info'] = vinfo - uinfo['busy'] = self.cmd_helper.is_update_busy() - self.server.send_event("update_manager:update_refreshed", uinfo) + self.cmd_helper.notify_update_refreshed() return eventtime + UPDATE_REFRESH_INTERVAL async def _handle_update_request(self, web_request: WebRequest ) -> str: - if await self._check_klippy_printing(): + if self.kconn.is_printing(): raise self.server.error("Update Refused: Klippy is printing") app: str = web_request.get_endpoint().split("/")[-1] if app == "client": - app = web_request.get('name') + app = web_request.get_str('name') if self.cmd_helper.is_app_updating(app): return f"Object {app} is currently being updated" updater = self.updaters.get(app, None) @@ -271,13 +298,10 @@ class UpdateManager: async with self.cmd_request_lock: self.cmd_helper.set_update_info(app, id(web_request)) try: - if not await self._check_need_reinstall(app): - await updater.update() + await updater.update() except Exception as e: self.cmd_helper.notify_update_response( - f"Error updating {app}") - self.cmd_helper.notify_update_response( - str(e), is_complete=True) + f"Error updating {app}: {e}", is_complete=True) raise finally: self.cmd_helper.clear_update_info() @@ -302,17 +326,14 @@ class UpdateManager: if name in ['klipper', 'moonraker', 'system']: continue app_name = name - if not await self._check_need_reinstall(app_name): - await updater.update() + await updater.update() # Update Klipper app_name = 'klipper' kupdater = self.updaters.get('klipper') if isinstance(kupdater, AppDeploy): self.klippy_identified_evt = asyncio.Event() - check_restart = True - if not await self._check_need_reinstall(app_name): - check_restart = await kupdater.update() + check_restart = await kupdater.update() if self.cmd_helper.needs_service_restart(app_name): await kupdater.restart_service() check_restart = True @@ -328,51 +349,26 @@ class UpdateManager: "Klippy reconnect timed out...") else: self.cmd_helper.notify_update_response( - f"Klippy Reconnected") + "Klippy Reconnected") self.klippy_identified_evt = None # Update Moonraker app_name = 'moonraker' moon_updater = cast(AppDeploy, self.updaters["moonraker"]) - if not await self._check_need_reinstall(app_name): - await moon_updater.update() + await moon_updater.update() if self.cmd_helper.needs_service_restart(app_name): await moon_updater.restart_service() self.cmd_helper.set_full_complete(True) self.cmd_helper.notify_update_response( "Full Update Complete", is_complete=True) except Exception as e: - self.cmd_helper.notify_update_response( - f"Error updating {app_name}") self.cmd_helper.set_full_complete(True) self.cmd_helper.notify_update_response( - str(e), is_complete=True) + f"Error updating {app_name}: {e}", is_complete=True) finally: self.cmd_helper.clear_update_info() return "ok" - async def _check_need_reinstall(self, name: str) -> bool: - if name not in self.updaters: - return False - updater = self.updaters[name] - if not isinstance(updater, AppDeploy): - return False - if not updater.check_need_channel_swap(): - return False - app_type = updater.get_configured_type() - if app_type == "git_repo": - deploy_class: Type = GitDeploy - else: - deploy_class = ZipDeploy - if isinstance(updater, deploy_class): - # Here the channel swap can be done without instantiating a new - # class, as it will automatically be done when the user updates. - return False - # Instantiate the new updater. This will perform a reinstallation - new_updater = await deploy_class.from_application(updater) - self.updaters[name] = new_updater - return True - async def _handle_status_request(self, web_request: WebRequest ) -> Dict[str, Any]: @@ -380,10 +376,16 @@ class UpdateManager: # Override a request to refresh if: # - An update is in progress # - Klippy is printing + # - Validation is pending + machine: Machine = self.server.lookup_component("machine") if ( + machine.validation_enabled() or self.cmd_helper.is_update_busy() or - await self._check_klippy_printing() + self.kconn.is_printing() or + not self.initial_refresh_complete ): + if check_refresh: + logging.info("update_manager: bypassing refresh request") check_refresh = False if check_refresh: @@ -394,6 +396,7 @@ class UpdateManager: lrt = max([upd.get_last_refresh_time() for upd in self.updaters.values()]) if time.time() < lrt + 60.: + logging.debug("update_manager: refresh bypassed due to spam") check_refresh = False self.cmd_request_lock.release() vinfo: Dict[str, Any] = {} @@ -413,14 +416,43 @@ class UpdateManager: if check_refresh: event_loop = self.server.get_event_loop() event_loop.delay_callback( - .2, self.server.send_event, - "update_manager:update_refreshed", ret) + .2, self.cmd_helper.notify_update_refreshed + ) return ret - async def _handle_repo_recovery(self, - web_request: WebRequest - ) -> str: - if await self._check_klippy_printing(): + async def _handle_refresh_request( + self, web_request: WebRequest + ) -> Dict[str, Any]: + name: Optional[str] = web_request.get_str("name", None) + if name is not None and name not in self.updaters: + raise self.server.error(f"No updater registered for '{name}'") + machine: Machine = self.server.lookup_component("machine") + if ( + machine.validation_enabled() or + self.cmd_helper.is_update_busy() or + self.kconn.is_printing() or + not self.initial_refresh_complete + ): + raise self.server.error( + "Server is busy, cannot perform refresh", 503 + ) + async with self.cmd_request_lock: + vinfo: Dict[str, Any] = {} + for updater_name, updater in list(self.updaters.items()): + if name is None or updater_name == name: + await updater.refresh() + vinfo[updater_name] = updater.get_update_status() + ret = self.cmd_helper.get_rate_limit_stats() + ret['version_info'] = vinfo + ret['busy'] = self.cmd_helper.is_update_busy() + event_loop = self.server.get_event_loop() + event_loop.delay_callback( + .2, self.cmd_helper.notify_update_refreshed + ) + return ret + + async def _handle_repo_recovery(self, web_request: WebRequest) -> str: + if self.kconn.is_printing(): raise self.server.error( "Recovery Attempt Refused: Klippy is printing") app: str = web_request.get_str('name') @@ -445,21 +477,47 @@ class UpdateManager: self.cmd_helper.clear_update_info() return "ok" - def close(self) -> None: + async def _handle_rollback(self, web_request: WebRequest) -> str: + if self.kconn.is_printing(): + raise self.server.error("Rollback Attempt Refused: Klippy is printing") + app: str = web_request.get_str('name') + updater = self.updaters.get(app, None) + if updater is None: + raise self.server.error(f"Updater {app} not available", 404) + async with self.cmd_request_lock: + self.cmd_helper.set_update_info(f"rollback_{app}", id(web_request)) + try: + await updater.rollback() + except Exception as e: + self.cmd_helper.notify_update_response(f"Error Rolling Back {app}") + self.cmd_helper.notify_update_response(str(e), is_complete=True) + raise + finally: + self.cmd_helper.clear_update_info() + return "ok" + + async def close(self) -> None: if self.refresh_timer is not None: self.refresh_timer.stop() + await self.instance_tracker.close() + for updater in self.updaters.values(): + ret = updater.close() + if ret is not None: + await ret class CommandHelper: - def __init__(self, config: ConfigHelper) -> None: + def __init__( + self, + config: ConfigHelper, + get_updater_cb: Callable[[], Dict[str, BaseDeploy]] + ) -> None: self.server = config.get_server() + self.get_updaters = get_updater_cb self.http_client: HttpClient self.http_client = self.server.lookup_component("http_client") - self.debug_enabled = config.getboolean('enable_repo_debug', False) - if self.debug_enabled: + config.getboolean('enable_repo_debug', False, deprecate=True) + if self.server.is_debug_enabled(): logging.warning("UPDATE MANAGER: REPO DEBUG ENABLED") - shell_cmd: SCMDComp = self.server.lookup_component('shell_command') - self.scmd_error = shell_cmd.error - self.build_shell_command = shell_cmd.build_shell_command self.pkg_updater: Optional[PackageDeploy] = None # database management @@ -487,6 +545,9 @@ class CommandHelper: def get_server(self) -> Server: return self.server + def get_shell_command(self) -> SCMDComp: + return self.server.lookup_component("shell_command") + def get_http_client(self) -> HttpClient: return self.http_client @@ -496,9 +557,6 @@ class CommandHelper: def get_umdb(self) -> NamespaceWrapper: return self.umdb - def is_debug_enabled(self) -> bool: - return self.debug_enabled - def set_update_info(self, app: str, uid: int) -> None: self.cur_update_app = app self.cur_update_id = uid @@ -537,40 +595,36 @@ class CommandHelper: def set_package_updater(self, updater: PackageDeploy) -> None: self.pkg_updater = updater - async def run_cmd(self, - cmd: str, - timeout: float = 20., - notify: bool = False, - retries: int = 1, - env: Optional[Dict[str, str]] = None, - cwd: Optional[str] = None, - sig_idx: int = 1 - ) -> None: + async def run_cmd( + self, + cmd: str, + timeout: float = 20., + notify: bool = False, + attempts: int = 1, + env: Optional[Dict[str, str]] = None, + cwd: Optional[str] = None, + sig_idx: int = 1, + log_stderr: bool = False + ) -> None: cb = self.notify_update_response if notify else None - scmd = self.build_shell_command(cmd, callback=cb, env=env, cwd=cwd) - for _ in range(retries): - if await scmd.run(timeout=timeout, sig_idx=sig_idx): - break - else: - raise self.server.error("Shell Command Error") + log_stderr |= self.server.is_verbose_enabled() + await self.get_shell_command().run_cmd_async( + cmd, cb, timeout=timeout, attempts=attempts, + env=env, cwd=cwd, sig_idx=sig_idx, log_stderr=log_stderr + ) - async def run_cmd_with_response(self, - cmd: str, - timeout: float = 20., - retries: int = 5, - env: Optional[Dict[str, str]] = None, - cwd: Optional[str] = None, - sig_idx: int = 1 - ) -> str: - scmd = self.build_shell_command(cmd, None, env=env, cwd=cwd) - result = await scmd.run_with_response(timeout, retries, - sig_idx=sig_idx) - return result + def notify_update_refreshed(self) -> None: + vinfo: Dict[str, Any] = {} + for name, updater in self.get_updaters().items(): + vinfo[name] = updater.get_update_status() + uinfo = self.get_rate_limit_stats() + uinfo['version_info'] = vinfo + uinfo['busy'] = self.is_update_busy() + self.server.send_event("update_manager:update_refreshed", uinfo) - def notify_update_response(self, - resp: Union[str, bytes], - is_complete: bool = False - ) -> None: + def notify_update_response( + self, resp: Union[str, bytes], is_complete: bool = False + ) -> None: if self.cur_update_app is None: return resp = resp.strip() @@ -587,726 +641,96 @@ class CommandHelper: self.server.send_event( "update_manager:update_response", notification) - async def install_packages(self, - package_list: List[str], - **kwargs - ) -> None: + async def install_packages( + self, package_list: List[str], **kwargs + ) -> None: if self.pkg_updater is None: return await self.pkg_updater.install_packages(package_list, **kwargs) - def get_rate_limit_stats(self): + def get_rate_limit_stats(self) -> Dict[str, Any]: return self.http_client.github_api_stats() - def on_download_progress(self, - progress: int, - download_size: int, - downloaded: int - ) -> None: + def on_download_progress( + self, progress: int, download_size: int, downloaded: int + ) -> None: totals = ( f"{downloaded // 1024} KiB / " - f"{download_size// 1024} KiB" + f"{download_size // 1024} KiB" ) self.notify_update_response( f"Downloading {self.cur_update_app}: {totals} [{progress}%]") - async def create_tempdir(self, suffix=None, prefix=None): + async def create_tempdir( + self, suffix: Optional[str] = None, prefix: Optional[str] = None + ) -> tempfile.TemporaryDirectory[str]: def _createdir(sfx, pfx): return tempfile.TemporaryDirectory(suffix=sfx, prefix=pfx) eventloop = self.server.get_event_loop() return await eventloop.run_in_thread(_createdir, suffix, prefix) -class PackageDeploy(BaseDeploy): - def __init__(self, - config: ConfigHelper, - cmd_helper: CommandHelper - ) -> None: - super().__init__(config, cmd_helper, "system", "", "") - cmd_helper.set_package_updater(self) - self.use_packagekit = config.getboolean("enable_packagekit", True) - self.available_packages: List[str] = [] +class InstanceTracker: + def __init__(self, server: Server) -> None: + self.server = server + self.inst_id = "" + tmpdir = pathlib.Path(tempfile.gettempdir()) + self.inst_file_path = tmpdir.joinpath("moonraker_instance_ids") - async def initialize(self) -> Dict[str, Any]: - storage = await super().initialize() - self.available_packages = storage.get('packages', []) - provider: BasePackageProvider - try_fallback = True - if self.use_packagekit: - try: - provider = PackageKitProvider(self.cmd_helper) - await provider.initialize() - except Exception: - pass - else: - logging.info("PackageDeploy: Using PackageKit Provider") - try_fallback = False - if try_fallback: - # Check to see of the apt command is available - fallback = await self._get_fallback_provider() - if fallback is None: - provider = BasePackageProvider(self.cmd_helper) - machine: Machine = self.server.lookup_component("machine") - dist_info = machine.get_system_info()['distribution'] - dist_id: str = dist_info['id'].lower() - self.server.add_warning( - "Unable to initialize System Update Provider for " - f"distribution: {dist_id}") - else: - logging.info("PackageDeploy: Using APT CLI Provider") - provider = fallback - self.provider = provider - return storage + def get_instance_id(self) -> str: + machine: Machine = self.server.lookup_component("machine") + cur_name = "".join(machine.unit_name.split()) + cur_uuid: str = self.server.get_app_args()["instance_uuid"] + pid = os.getpid() + return f"{cur_name}:{cur_uuid}:{pid}" - async def _get_fallback_provider(self) -> Optional[BasePackageProvider]: - # Currently only the API Fallback provider is available - shell_cmd: SCMDComp - shell_cmd = self.server.lookup_component("shell_command") - cmd = shell_cmd.build_shell_command("sh -c 'command -v apt'") + async def _read_instance_ids(self) -> List[str]: + if not self.inst_file_path.exists(): + return [] + eventloop = self.server.get_event_loop() + id_data = await eventloop.run_in_thread(self.inst_file_path.read_text) + return [iid.strip() for iid in id_data.strip().splitlines() if iid.strip()] + + async def set_instance_id(self) -> None: try: - ret = await cmd.run_with_response() - except shell_cmd.error: - return None - # APT Command found should be available - logging.debug(f"APT package manager detected: {ret.encode()}") - provider = AptCliProvider(self.cmd_helper) - try: - await provider.initialize() + async with AsyncExclusiveFileLock(self.inst_file_path, 2.): + self.inst_id = self.get_instance_id() + iids = await self._read_instance_ids() + if self.inst_id not in iids: + iids.append(self.inst_id) + iid_string = "\n".join(iids) + if len(iids) > 1: + self.server.add_log_rollover_item( + "um_multi_instance_msg", + "Multiple instances of Moonraker have the update " + f"manager enabled.\n{iid_string}" + ) + eventloop = self.server.get_event_loop() + await eventloop.run_in_thread( + self.inst_file_path.write_text, iid_string + ) + except LockTimeout as e: + logging.info(str(e)) except Exception: - return None - return provider + logging.exception("Failed to set instance id") - async def refresh(self) -> None: + async def close(self) -> None: try: - # Do not force a refresh until the server has started - if self.server.is_running(): - await self._update_package_cache(force=True) - self.available_packages = await self.provider.get_packages() - pkg_msg = "\n".join(self.available_packages) - logging.info( - f"Detected {len(self.available_packages)} package updates:" - f"\n{pkg_msg}") + async with AsyncExclusiveFileLock(self.inst_file_path, 2.): + # Remove current id + iids = await self._read_instance_ids() + if self.inst_id in iids: + iids.remove(self.inst_id) + iid_string = "\n".join(iids) + eventloop = self.server.get_event_loop() + await eventloop.run_in_thread( + self.inst_file_path.write_text, iid_string + ) + except LockTimeout as e: + logging.info(str(e)) except Exception: - logging.exception("Error Refreshing System Packages") - # Update Persistent Storage - self._save_state() + logging.exception("Failed to remove instance id") - def get_persistent_data(self) -> Dict[str, Any]: - storage = super().get_persistent_data() - storage['packages'] = self.available_packages - return storage - - async def update(self) -> bool: - if not self.available_packages: - return False - self.cmd_helper.notify_update_response("Updating packages...") - try: - await self._update_package_cache(force=True, notify=True) - await self.provider.upgrade_system() - except Exception: - raise self.server.error("Error updating system packages") - self.available_packages = [] - self._save_state() - self.cmd_helper.notify_update_response( - "Package update finished...", is_complete=True) - return True - - async def _update_package_cache(self, - force: bool = False, - notify: bool = False - ) -> None: - curtime = time.time() - if force or curtime > self.last_refresh_time + 3600.: - # Don't update if a request was done within the last hour - await self.provider.refresh_packages(notify) - - async def install_packages(self, - package_list: List[str], - **kwargs - ) -> None: - await self.provider.install_packages(package_list, **kwargs) - - def get_update_status(self) -> Dict[str, Any]: - return { - 'package_count': len(self.available_packages), - 'package_list': self.available_packages - } - -class BasePackageProvider: - def __init__(self, cmd_helper: CommandHelper) -> None: - self.server = cmd_helper.get_server() - self.cmd_helper = cmd_helper - - async def initialize(self) -> None: - pass - - async def refresh_packages(self, notify: bool = False) -> None: - raise self.server.error("Cannot refresh packages, no provider set") - - async def get_packages(self) -> List[str]: - raise self.server.error("Cannot retrieve packages, no provider set") - - async def install_packages(self, - package_list: List[str], - **kwargs - ) -> None: - raise self.server.error("Cannot install packages, no provider set") - - async def upgrade_system(self) -> None: - raise self.server.error("Cannot upgrade packages, no provider set") - -class AptCliProvider(BasePackageProvider): - APT_CMD = "sudo DEBIAN_FRONTEND=noninteractive apt-get" - - async def refresh_packages(self, notify: bool = False) -> None: - await self.cmd_helper.run_cmd( - f"{self.APT_CMD} update", timeout=600., notify=notify) - - async def get_packages(self) -> List[str]: - res = await self.cmd_helper.run_cmd_with_response( - "apt list --upgradable", timeout=60.) - pkg_list = [p.strip() for p in res.split("\n") if p.strip()] - if pkg_list: - pkg_list = pkg_list[2:] - return [p.split("/", maxsplit=1)[0] for p in pkg_list] - return [] - - async def install_packages(self, - package_list: List[str], - **kwargs - ) -> None: - timeout: float = kwargs.get('timeout', 300.) - retries: int = kwargs.get('retries', 3) - notify: bool = kwargs.get('notify', False) - pkgs = " ".join(package_list) - await self.refresh_packages(notify=notify) - await self.cmd_helper.run_cmd( - f"{self.APT_CMD} install --yes {pkgs}", timeout=timeout, - retries=retries, notify=notify) - - async def upgrade_system(self) -> None: - await self.cmd_helper.run_cmd( - f"{self.APT_CMD} upgrade --yes", timeout=3600., - notify=True) - -class PackageKitProvider(BasePackageProvider): - def __init__(self, cmd_helper: CommandHelper) -> None: - super().__init__(cmd_helper) - dbus_mgr: DbusManager = self.server.lookup_component("dbus_manager") - self.dbus_mgr = dbus_mgr - self.pkgkit: Optional[ProxyInterface] = None - - async def initialize(self) -> None: - if not self.dbus_mgr.is_connected(): - raise self.server.error("DBus Connection Not available") - # Check for PolicyKit permissions - await self.dbus_mgr.check_permission( - "org.freedesktop.packagekit.system-sources-refresh", - "The Update Manager will fail to fetch package updates") - await self.dbus_mgr.check_permission( - "org.freedesktop.packagekit.package-install", - "The Update Manager will fail to install packages") - await self.dbus_mgr.check_permission( - "org.freedesktop.packagekit.system-update", - "The Update Manager will fail to update packages" - ) - # Fetch the PackageKit DBus Inteface - self.pkgkit = await self.dbus_mgr.get_interface( - "org.freedesktop.PackageKit", - "/org/freedesktop/PackageKit", - "org.freedesktop.PackageKit") - - async def refresh_packages(self, notify: bool = False) -> None: - await self.run_transaction("refresh_cache", False, notify=notify) - - async def get_packages(self) -> List[str]: - flags = PkEnum.Filter.NONE - pkgs = await self.run_transaction("get_updates", flags.value) - pkg_ids = [info['package_id'] for info in pkgs if 'package_id' in info] - return [pkg_id.split(";")[0] for pkg_id in pkg_ids] - - async def install_packages(self, - package_list: List[str], - **kwargs - ) -> None: - notify: bool = kwargs.get('notify', False) - await self.refresh_packages(notify=notify) - flags = PkEnum.Filter.NEWEST | PkEnum.Filter.NOT_INSTALLED | \ - PkEnum.Filter.BASENAME - pkgs = await self.run_transaction("resolve", flags.value, package_list) - pkg_ids = [info['package_id'] for info in pkgs if 'package_id' in info] - if pkg_ids: - tflag = PkEnum.TransactionFlag.ONLY_TRUSTED - await self.run_transaction("install_packages", tflag.value, - pkg_ids, notify=notify) - - async def upgrade_system(self) -> None: - # Get Updates, Install Packages - flags = PkEnum.Filter.NONE - pkgs = await self.run_transaction("get_updates", flags.value) - pkg_ids = [info['package_id'] for info in pkgs if 'package_id' in info] - if pkg_ids: - tflag = PkEnum.TransactionFlag.ONLY_TRUSTED - await self.run_transaction("update_packages", tflag.value, - pkg_ids, notify=True) - - def create_transaction(self) -> PackageKitTransaction: - if self.pkgkit is None: - raise self.server.error("PackageKit Interface Not Available") - return PackageKitTransaction(self.dbus_mgr, self.pkgkit, - self.cmd_helper) - - async def run_transaction(self, - method: str, - *args, - notify: bool = False - ) -> Any: - transaction = self.create_transaction() - return await transaction.run(method, *args, notify=notify) - -class PackageKitTransaction: - GET_PKG_ROLES = ( - PkEnum.Role.RESOLVE | PkEnum.Role.GET_PACKAGES | - PkEnum.Role.GET_UPDATES - ) - QUERY_ROLES = GET_PKG_ROLES | PkEnum.Role.GET_REPO_LIST - PROGRESS_STATUS = ( - PkEnum.Status.RUNNING | PkEnum.Status.INSTALL | - PkEnum.Status.UPDATE - ) - - def __init__(self, - dbus_mgr: DbusManager, - pkgkit: ProxyInterface, - cmd_helper: CommandHelper - ) -> None: - self.server = cmd_helper.get_server() - self.eventloop = self.server.get_event_loop() - self.cmd_helper = cmd_helper - self.dbus_mgr = dbus_mgr - self.pkgkit = pkgkit - # Transaction Properties - self.notify = False - self._status = PkEnum.Status.UNKNOWN - self._role = PkEnum.Role.UNKNOWN - self._tflags = PkEnum.TransactionFlag.NONE - self._percentage = 101 - self._dl_remaining = 0 - self.speed = 0 - self.elapsed_time = 0 - self.remaining_time = 0 - self.caller_active = False - self.allow_cancel = True - self.uid = 0 - # Transaction data tracking - self.tfut: Optional[asyncio.Future] = None - self.last_progress_notify_time: float = 0. - self.result: List[Dict[str, Any]] = [] - self.err_msg: str = "" - - def run(self, - method: str, - *args, - notify: bool = False - ) -> Awaitable: - if self.tfut is not None: - raise self.server.error( - "PackageKit transaction can only be used once") - self.notify = notify - self.tfut = self.eventloop.create_future() - coro = self._start_transaction(method, *args) - self.eventloop.create_task(coro) - return self.tfut - - async def _start_transaction(self, - method: str, - *args - ) -> None: - assert self.tfut is not None - try: - # Create Transaction - tid = await self.pkgkit.call_create_transaction() # type: ignore - transaction, props = await self.dbus_mgr.get_interfaces( - "org.freedesktop.PackageKit", tid, - ["org.freedesktop.PackageKit.Transaction", - "org.freedesktop.DBus.Properties"]) - # Set interface callbacks - transaction.on_package(self._on_package_signal) # type: ignore - transaction.on_repo_detail( # type: ignore - self._on_repo_detail_signal) - transaction.on_item_progress( # type: ignore - self._on_item_progress_signal) - transaction.on_error_code(self._on_error_signal) # type: ignore - transaction.on_finished(self._on_finished_signal) # type: ignore - props.on_properties_changed( # type: ignore - self._on_properties_changed) - # Run method - logging.debug(f"PackageKit: Running transaction call_{method}") - func = getattr(transaction, f"call_{method}") - await func(*args) - except Exception as e: - self.tfut.set_exception(e) - - def _on_package_signal(self, - info_code: int, - package_id: str, - summary: str - ) -> None: - info = PkEnum.Info.from_index(info_code) - if self._role in self.GET_PKG_ROLES: - pkg_data = { - 'package_id': package_id, - 'info': info.desc, - 'summary': summary - } - self.result.append(pkg_data) - else: - self._notify_package(info, package_id) - - def _on_repo_detail_signal(self, - repo_id: str, - description: str, - enabled: bool - ) -> None: - if self._role == PkEnum.Role.GET_REPO_LIST: - repo_data = { - "repo_id": repo_id, - "description": description, - "enabled": enabled - } - self.result.append(repo_data) - else: - self._notify_repo(repo_id, description) - - def _on_item_progress_signal(self, - item_id: str, - status_code: int, - percent_complete: int - ) -> None: - status = PkEnum.Status.from_index(status_code) - # NOTE: This signal doesn't seem to fire predictably, - # nor does it seem to provide a consistent "percent complete" - # parameter. - # logging.debug( - # f"Role {self._role.name}: Item Progress Signal Received\n" - # f"Item ID: {item_id}\n" - # f"Percent Complete: {percent_complete}\n" - # f"Status: {status.desc}") - - def _on_error_signal(self, - error_code: int, - details: str - ) -> None: - err = PkEnum.Error.from_index(error_code) - self.err_msg = f"{err.name}: {details}" - - def _on_finished_signal(self, exit_code: int, run_time: int) -> None: - if self.tfut is None: - return - ext = PkEnum.Exit.from_index(exit_code) - secs = run_time / 1000. - if ext == PkEnum.Exit.SUCCESS: - self.tfut.set_result(self.result) - else: - err = self.err_msg or ext.desc - server = self.cmd_helper.get_server() - self.tfut.set_exception(server.error(err)) - msg = f"Transaction {self._role.desc}: Exit {ext.desc}, " \ - f"Run time: {secs:.2f} seconds" - if self.notify: - self.cmd_helper.notify_update_response(msg) - logging.debug(msg) - - def _on_properties_changed(self, - iface_name: str, - changed_props: Dict[str, Variant], - invalid_props: Dict[str, Variant] - ) -> None: - for name, var in changed_props.items(): - formatted = re.sub(r"(\w)([A-Z])", r"\g<1>_\g<2>", name).lower() - setattr(self, formatted, var.value) - - def _notify_package(self, info: PkEnum.Info, package_id: str) -> None: - if self.notify: - if info == PkEnum.Info.FINISHED: - return - pkg_parts = package_id.split(";") - msg = f"{info.desc}: {pkg_parts[0]} ({pkg_parts[1]})" - self.cmd_helper.notify_update_response(msg) - - def _notify_repo(self, repo_id: str, description: str) -> None: - if self.notify: - if not repo_id.strip(): - repo_id = description - # TODO: May want to eliminate dups - msg = f"GET: {repo_id}" - self.cmd_helper.notify_update_response(msg) - - def _notify_progress(self) -> None: - if self.notify and self._percentage <= 100: - msg = f"{self._status.desc}...{self._percentage}%" - if self._status == PkEnum.Status.DOWNLOAD and self._dl_remaining: - if self._dl_remaining < 1024: - msg += f", Remaining: {self._dl_remaining} B" - elif self._dl_remaining < 1048576: - msg += f", Remaining: {self._dl_remaining // 1024} KiB" - else: - msg += f", Remaining: {self._dl_remaining // 1048576} MiB" - if self.speed: - speed = self.speed // 8 - if speed < 1024: - msg += f", Speed: {speed} B/s" - elif speed < 1048576: - msg += f", Speed: {speed // 1024} KiB/s" - else: - msg += f", Speed: {speed // 1048576} MiB/s" - self.cmd_helper.notify_update_response(msg) - - @property - def role(self) -> PkEnum.Role: - return self._role - - @role.setter - def role(self, role_code: int) -> None: - self._role = PkEnum.Role.from_index(role_code) - if self._role in self.QUERY_ROLES: - # Never Notify Queries - self.notify = False - if self.notify: - msg = f"Transaction {self._role.desc} started..." - self.cmd_helper.notify_update_response(msg) - logging.debug(f"PackageKit: Current Role: {self._role.desc}") - - @property - def status(self) -> PkEnum.Status: - return self._status - - @status.setter - def status(self, status_code: int) -> None: - self._status = PkEnum.Status.from_index(status_code) - self._percentage = 101 - self.speed = 0 - logging.debug(f"PackageKit: Current Status: {self._status.desc}") - - @property - def transaction_flags(self) -> PkEnum.TransactionFlag: - return self._tflags - - @transaction_flags.setter - def transaction_flags(self, bits: int) -> None: - self._tflags = PkEnum.TransactionFlag(bits) - - @property - def percentage(self) -> int: - return self._percentage - - @percentage.setter - def percentage(self, percent: int) -> None: - self._percentage = percent - if self._status in self.PROGRESS_STATUS: - self._notify_progress() - - @property - def download_size_remaining(self) -> int: - return self._dl_remaining - - @download_size_remaining.setter - def download_size_remaining(self, bytes_remaining: int) -> None: - self._dl_remaining = bytes_remaining - self._notify_progress() - -class WebClientDeploy(BaseDeploy): - def __init__(self, - config: ConfigHelper, - cmd_helper: CommandHelper - ) -> None: - super().__init__(config, cmd_helper, prefix="Web Client") - self.repo = config.get('repo').strip().strip("/") - self.owner = self.repo.split("/", 1)[0] - self.path = pathlib.Path(config.get("path")).expanduser().resolve() - self.type = config.get('type') - def_channel = "stable" - if self.type == "web_beta": - def_channel = "beta" - self.server.add_warning( - f"Config Section [{config.get_name()}], option 'type': " - "web_beta', value 'web_beta' is deprecated. Set 'type' to " - "web and 'channel' to 'beta'") - self.type = "zip" - self.channel = config.get("channel", def_channel) - if self.channel not in ["stable", "beta"]: - raise config.error( - f"Invalid Channel '{self.channel}' for config " - f"section [{config.get_name()}], type: {self.type}. " - f"Must be one of the following: stable, beta") - self.info_tags: List[str] = config.getlist("info_tags", []) - self.persistent_files: List[str] = [] - pfiles = config.getlist('persistent_files', None) - if pfiles is not None: - self.persistent_files = [pf.strip("/") for pf in pfiles] - if ".version" in self.persistent_files: - raise config.error( - "Invalid value for option 'persistent_files': " - "'.version' can not be persistent") - - async def initialize(self) -> Dict[str, Any]: - storage = await super().initialize() - self.version: str = storage.get('version', "?") - self.remote_version: str = storage.get('remote_version', "?") - dl_info: List[Any] = storage.get('dl_info', ["?", "?", 0]) - self.dl_info: Tuple[str, str, int] = cast( - Tuple[str, str, int], tuple(dl_info)) - logging.info(f"\nInitializing Client Updater: '{self.name}'," - f"\nChannel: {self.channel}" - f"\npath: {self.path}") - return storage - - async def _get_local_version(self) -> None: - version_path = self.path.joinpath(".version") - if version_path.is_file(): - event_loop = self.server.get_event_loop() - version = await event_loop.run_in_thread(version_path.read_text) - self.version = version.strip() - else: - self.version = "?" - - async def refresh(self) -> None: - try: - await self._get_local_version() - await self._get_remote_version() - except Exception: - logging.exception("Error Refreshing Client") - self._save_state() - - async def _get_remote_version(self) -> None: - # Remote state - if self.channel == "stable": - resource = f"repos/{self.repo}/releases/latest" - else: - resource = f"repos/{self.repo}/releases?per_page=1" - client = self.cmd_helper.get_http_client() - resp = await client.github_api_request(resource, attempts=3) - release: Union[List[Any], Dict[str, Any]] = {} - if resp.status_code == 304: - if self.remote_version == "?" and resp.content: - # Not modified, however we need to restore state from - # cached content - release = resp.json() - else: - # Either not necessary or not possible to restore from cache - return - elif resp.has_error(): - logging.info( - f"Client {self.repo}: Github Request Error - {resp.error}") - else: - release = resp.json() - result: Dict[str, Any] = {} - if isinstance(release, list): - if release: - result = release[0] - else: - result = release - self.remote_version = result.get('name', "?") - release_asset: Dict[str, Any] = result.get('assets', [{}])[0] - dl_url: str = release_asset.get('browser_download_url', "?") - content_type: str = release_asset.get('content_type', "?") - size: int = release_asset.get('size', 0) - self.dl_info = (dl_url, content_type, size) - logging.info( - f"Github client Info Received:\nRepo: {self.name}\n" - f"Local Version: {self.version}\n" - f"Remote Version: {self.remote_version}\n" - f"Pre-release: {result.get('prerelease', '?')}\n" - f"url: {dl_url}\n" - f"size: {size}\n" - f"Content Type: {content_type}") - - def get_persistent_data(self) -> Dict[str, Any]: - storage = super().get_persistent_data() - storage['version'] = self.version - storage['remote_version'] = self.remote_version - storage['dl_info'] = list(self.dl_info) - return storage - - async def update(self) -> bool: - if self.remote_version == "?": - await self._get_remote_version() - if self.remote_version == "?": - raise self.server.error( - f"Client {self.repo}: Unable to locate update") - dl_url, content_type, size = self.dl_info - if dl_url == "?": - raise self.server.error( - f"Client {self.repo}: Invalid download url") - if self.version == self.remote_version: - # Already up to date - return False - event_loop = self.server.get_event_loop() - self.cmd_helper.notify_update_response( - f"Updating Web Client {self.name}...") - self.cmd_helper.notify_update_response( - f"Downloading Client: {self.name}") - td = await self.cmd_helper.create_tempdir(self.name, "client") - try: - tempdir = pathlib.Path(td.name) - temp_download_file = tempdir.joinpath(f"{self.name}.zip") - temp_persist_dir = tempdir.joinpath(self.name) - client = self.cmd_helper.get_http_client() - await client.download_file( - dl_url, content_type, temp_download_file, size, - self.cmd_helper.on_download_progress) - self.cmd_helper.notify_update_response( - f"Download Complete, extracting release to '{self.path}'") - await event_loop.run_in_thread( - self._extract_release, temp_persist_dir, - temp_download_file) - finally: - await event_loop.run_in_thread(td.cleanup) - self.version = self.remote_version - version_path = self.path.joinpath(".version") - if not version_path.exists(): - await event_loop.run_in_thread( - version_path.write_text, self.version) - self.cmd_helper.notify_update_response( - f"Client Update Finished: {self.name}", is_complete=True) - self._save_state() - return True - - def _extract_release(self, - persist_dir: pathlib.Path, - release_file: pathlib.Path - ) -> None: - if not persist_dir.exists(): - os.mkdir(persist_dir) - if self.path.is_dir(): - # find and move persistent files - for fname in os.listdir(self.path): - src_path = self.path.joinpath(fname) - if fname in self.persistent_files: - dest_dir = persist_dir.joinpath(fname).parent - os.makedirs(dest_dir, exist_ok=True) - shutil.move(str(src_path), str(dest_dir)) - shutil.rmtree(self.path) - os.mkdir(self.path) - with zipfile.ZipFile(release_file) as zf: - zf.extractall(self.path) - # Move temporary files back into - for fname in os.listdir(persist_dir): - src_path = persist_dir.joinpath(fname) - dest_dir = self.path.joinpath(fname).parent - os.makedirs(dest_dir, exist_ok=True) - shutil.move(str(src_path), str(dest_dir)) - - def get_update_status(self) -> Dict[str, Any]: - return { - 'name': self.name, - 'owner': self.owner, - 'version': self.version, - 'remote_version': self.remote_version, - 'configured_type': self.type, - 'channel': self.channel, - 'info_tags': self.info_tags - } def load_component(config: ConfigHelper) -> UpdateManager: return UpdateManager(config) diff --git a/moonraker/components/update_manager/zip_deploy.py b/moonraker/components/update_manager/zip_deploy.py index 2960c0f..87c8ccd 100644 --- a/moonraker/components/update_manager/zip_deploy.py +++ b/moonraker/components/update_manager/zip_deploy.py @@ -1,19 +1,18 @@ # Zip Application Deployment implementation # -# Copyright (C) 2021 Eric Callahan +# Copyright (C) 2024 Eric Callahan # # This file may be distributed under the terms of the GNU GPLv3 license. from __future__ import annotations -import os import pathlib -import json import shutil -import re -import time import zipfile +import logging from .app_deploy import AppDeploy -from utils import verify_source +from .common import Channel, AppType +from ...utils import source_info +from ...utils import json_wrapper as jsonw # Annotation imports from typing import ( @@ -23,409 +22,402 @@ from typing import ( Optional, Dict, List, + Union, + cast ) if TYPE_CHECKING: - from confighelper import ConfigHelper + from ...confighelper import ConfigHelper from .update_manager import CommandHelper - -RINFO_KEYS = [ - "git_version", "long_version", "commit_hash", "source_checksum", - "ignored_exts", "ignored_dirs", "build_date", "channel", - "owner_repo", "host_repo", "release_tag" -] + from ..file_manager.file_manager import FileManager class ZipDeploy(AppDeploy): - def __init__(self, config: ConfigHelper, cmd_helper: CommandHelper) -> None: - super().__init__(config, cmd_helper) - self.need_channel_update = self.type != "zip" - self.official_repo: str = "?" - self.owner: str = "?" - # Extract repo from origin for validation - match = re.match(r"https?://(?:www\.)?github.com/([^/]+/[^.]+)", - self.origin) - if match is not None: - self.official_repo = match.group(1) - self.owner = self.official_repo.split('/')[0] - else: - raise config.error( - "Invalid url set for 'origin' option in section " - f"[{config.get_name()}]. Unable to extract owner/repo.") - self.host_repo: str = config.get('host_repo', self.official_repo) - self.package_list: List[str] = [] - self.python_pkg_list: List[str] = [] - self.release_download_info: Tuple[str, str, int] = ("?", "?", 0) + def __init__( + self, + config: ConfigHelper, + cmd_helper: CommandHelper + ) -> None: + super().__init__(config, cmd_helper, "Zip Application") + self._configure_path(config, False) + if self.type == AppType.ZIP: + self._configure_virtualenv(config) + self._configure_dependencies(config) + self._configure_managed_services(config) + elif self.type == AppType.WEB: + self.prefix = f"Web Client {self.name}: " + self.repo = config.get('repo').strip().strip("/") + self.owner, self.project_name = self.repo.split("/", 1) + self.persistent_files: List[str] = [] + self.warnings: List[str] = [] + self.anomalies: List[str] = [] + self.version: str = "?" + self.remote_version: str = "?" + self.rollback_version: str = "?" + self.rollback_repo: str = "?" + self.last_error: str = "?" + self._dl_info: Tuple[str, str, int] = ("?", "?", 0) + self._is_fallback: bool = False + self._is_prerelease: bool = False + self._path_writable: bool = False + self._configure_persistent_files(config) - @staticmethod - async def from_application(app: AppDeploy) -> ZipDeploy: - new_app = ZipDeploy(app.config, app.cmd_helper) - await new_app.reinstall() - return new_app + def _configure_persistent_files(self, config: ConfigHelper) -> None: + pfiles = config.getlist('persistent_files', None) + if pfiles is not None: + self.persistent_files = [pf.strip("/") for pf in pfiles] + for fname in (".version", "release_info.json"): + if fname in self.persistent_files: + raise config.error( + "Invalid value for option 'persistent_files': " + f"'{fname}' can not be persistent." + ) + if ( + self.type == AppType.ZIP and + self.virtualenv is not None and + self.virtualenv in self.path.parents + ): + rel_path = str(self.virtualenv.relative_to(self.path)) + if rel_path not in self.persistent_files: + self.persistent_files.append(rel_path) + if self.persistent_files: + self.log_info(f"Configured persistent files: {self.persistent_files}") + + async def _validate_release_info(self) -> None: + self._is_valid = False + self._is_fallback = False + eventloop = self.server.get_event_loop() + self.warnings.clear() + repo_parent = source_info.find_git_repo(self.path) + homedir = pathlib.Path("~").expanduser() + if not self._path_writable: + self.warnings.append( + f"Location at option 'path: {self.path}' is not writable." + ) + elif not self.path.is_dir(): + self.warnings.append( + f"Location at option 'path: {self.path}' is not a directory." + ) + elif repo_parent is not None and repo_parent != homedir: + self.warnings.append( + f"Location at option 'path: {self.path}' is within a git repo. Found " + f".git folder at '{repo_parent.joinpath('.git')}'" + ) + else: + rinfo = self.path.joinpath("release_info.json") + if rinfo.is_file(): + try: + data = await eventloop.run_in_thread(rinfo.read_text) + uinfo: Dict[str, str] = jsonw.loads(data) + project_name = uinfo["project_name"] + owner = uinfo["project_owner"] + self.version = uinfo["version"] + except Exception: + logging.exception("Failed to load release_info.json.") + else: + self._is_valid = True + detected_repo = f"{owner}/{project_name}" + if self.repo.lower() != detected_repo.lower(): + self.anomalies.append( + f"Value at option 'repo: {self.repo}' does not match " + f"detected repo '{detected_repo}', falling back to " + "detected version." + ) + self.repo = detected_repo + self.owner = owner + self.project_name = project_name + elif self.type == AppType.WEB: + version_path = self.path.joinpath(".version") + if version_path.is_file(): + version = await eventloop.run_in_thread(version_path.read_text) + self.version = version.strip() + self._is_valid = await self._detect_fallback() + if not self._is_valid: + self.warnings.append("Failed to validate installation") + if self.server.is_debug_enabled(): + self.log_info("Debug Enabled, overriding validity checks") + + async def _detect_fallback(self) -> bool: + # Only used by "web" app types to fallback on the previous version info + fallback_defs = { + "mainsail": "mainsail-crew", + "fluidd": "fluidd-core" + } + for fname in ("manifest.json", "manifest.webmanifest"): + manifest = self.path.joinpath(fname) + eventloop = self.server.get_event_loop() + if manifest.is_file(): + try: + mtext = await eventloop.run_in_thread(manifest.read_text) + mdata: Dict[str, Any] = jsonw.loads(mtext) + proj_name: str = mdata["name"].lower() + except Exception: + self.log_exc(f"Failed to load json from {manifest}") + continue + if proj_name in fallback_defs: + owner = fallback_defs[proj_name] + detected_repo = f"{owner}/{proj_name}" + if detected_repo != self.repo.lower(): + self.anomalies.append( + f"Value at option 'repo: {self.repo}' does not match " + f"detected repo '{detected_repo}', falling back to " + "detected version." + ) + self.repo = detected_repo + self.owner = owner + self.project_name = proj_name + self._is_fallback = True + return True + return False async def initialize(self) -> Dict[str, Any]: storage = await super().initialize() - self.source_checksum: str = storage.get("source_checksum", "?") - self.pristine = storage.get('pristine', False) - self.verified = storage.get('verified', False) - self.build_date: int = storage.get('build_date', 0) - self.full_version: str = storage.get('full_version', "?") - self.short_version: str = storage.get('short_version', "?") - self.commit_hash: str = storage.get('commit_hash', "?") - self.lastest_hash: str = storage.get('latest_hash', "?") - self.latest_version: str = storage.get('latest_version', "?") - self.latest_checksum: str = storage.get('latest_checksum', "?") - self.latest_build_date: int = storage.get('latest_build_date', 0) - self.errors: List[str] = storage.get('errors', []) - self.commit_log: List[Dict[str, Any]] = storage.get('commit_log', []) + fm: FileManager = self.server.lookup_component("file_manager") + self._path_writable = not fm.check_reserved_path( + self.path, need_write=True, raise_error=False + ) + if self._path_writable and not self.path.joinpath(".writeable").is_file(): + fm.add_reserved_path(f"update_manager {self.name}", self.path) + await self._validate_release_info() + if self.version == "?": + self.version = storage.get("version", "?") + self.remote_version = storage.get('remote_version', "?") + self.rollback_version = storage.get('rollback_version', self.version) + self.rollback_repo = storage.get( + 'rollback_repo', self.repo if self._is_valid else "?" + ) + self.last_error = storage.get('last_error', "") + dl_info: List[Any] = storage.get('dl_info', ["?", "?", 0]) + self.dl_info = cast(Tuple[str, str, int], tuple(dl_info)) + if not self.needs_refresh(): + self._log_zipapp_info() return storage def get_persistent_data(self) -> Dict[str, Any]: storage = super().get_persistent_data() storage.update({ - 'source_checksum': self.source_checksum, - 'pristine': self.pristine, - 'verified': self.verified, - 'build_date': self.build_date, - 'full_version': self.full_version, - 'short_version': self.short_version, - 'commit_hash': self.commit_hash, - 'latest_hash': self.lastest_hash, - 'latest_version': self.latest_version, - 'latest_checksum': self.latest_checksum, - 'latest_build_date': self.latest_build_date, - 'commit_log': self.commit_log, - 'errors': self.errors + "version": self.version, + "remote_version": self.remote_version, + "rollback_version": self.rollback_version, + "rollback_repo": self.rollback_repo, + "dl_info": list(self.dl_info), + "last_error": self.last_error }) return storage - async def _parse_info_file(self, file_name: str) -> Dict[str, Any]: - info_file = self.path.joinpath(file_name) - if not info_file.exists(): - self.log_info(f"Unable to locate file '{info_file}'") - return {} - try: - event_loop = self.server.get_event_loop() - info_bytes = await event_loop.run_in_thread(info_file.read_text) - info: Dict[str, Any] = json.loads(info_bytes) - except Exception: - self.log_exc(f"Unable to parse info file {file_name}") - info = {} - return info - - def _get_tag_version(self, version_string: str) -> str: - tag_version: str = "?" - ver_match = re.match(r"v\d+\.\d+\.\d-\d+", version_string) - if ver_match: - tag_version = ver_match.group() - return tag_version - async def refresh(self) -> None: try: - await self._update_repo_state() + await self._validate_release_info() + await self._get_remote_version() except Exception: - self.verified = False - self.log_exc("Error refreshing application state") - - async def _update_repo_state(self) -> None: - self.errors = [] - self._is_valid = False - self.verified = False - release_info = await self._parse_info_file(".release_info") - dep_info = await self._parse_info_file(".dependencies") - for key in RINFO_KEYS: - if key not in release_info: - self._add_error(f"Missing release info item: {key}") - if 'channel' in release_info: - local_channel = release_info['channel'] - if self.channel == "stable" and local_channel == "beta": - self.need_channel_update = True - self.full_version = release_info.get('long_version', "?") - self.short_version = self._get_tag_version( - release_info.get('git_version', "")) - self.commit_hash = release_info.get('commit_hash', "?") - self.build_date = release_info.get('build_date', 0) - owner_repo = release_info.get('owner_repo', "?") - if self.official_repo != owner_repo: - self._add_error( - f"Owner repo mismatch. Received {owner_repo}, " - f"official: {self.official_repo}") - # validate the local source code - event_loop = self.server.get_event_loop() - res = await event_loop.run_in_thread(verify_source, self.path) - if res is not None: - self.source_checksum, self.pristine = res - if self.name in ["moonraker", "klipper"]: - self.server.add_log_rollover_item( - f"{self.name}_validation", - f"{self.name} checksum: {self.source_checksum}, " - f"pristine: {self.pristine}") - else: - self._add_error("Unable to validate source checksum") - self.source_checksum = "" - self.pristine = False - self.package_list = sorted(dep_info.get( - 'debian', {}).get('packages', [])) - self.python_pkg_list = sorted(dep_info.get('python', [])) - # Retrieve version info from github to check for updates and - # validate local release info - host_repo = release_info.get('host_repo', "?") - release_tag = release_info.get('release_tag', "?") - if host_repo != self.host_repo: - self._add_error( - f"Host repo mismatch, received: {host_repo}, " - f"expected: {self.host_repo}. This could result in " - " a failed update.") - resource = f"repos/{self.host_repo}/releases" - current_release, latest_release = await self._fetch_github_releases( - resource, release_tag) - await self._validate_current_release(release_info, current_release) - if not self.errors: - self.verified = True - await self._process_latest_release(latest_release) - self._save_state() + logging.exception("Error Refreshing Client") self._log_zipapp_info() + self._save_state() - async def _fetch_github_releases(self, - resource: str, - current_tag: Optional[str] = None - ) -> Tuple[Dict[str, Any], Dict[str, Any]]: - try: - client = self.cmd_helper.get_http_client() - resp = await client.github_api_request(resource, attempts=3) - resp.raise_for_status() - releases = resp.json() - assert isinstance(releases, list) - except Exception: - self.log_exc("Error fetching releases from GitHub") - return {}, {} - release: Dict[str, Any] - latest_release: Dict[str, Any] = {} - current_release: Dict[str, Any] = {} - for release in releases: - if not latest_release: - if self.channel != "stable": - # Allow the beta channel to update regardless - latest_release = release - elif not release['prerelease']: - # This is a stable release on the stable channle - latest_release = release - if current_tag is not None: - if not current_release and release['tag_name'] == current_tag: - current_release = release - if latest_release and current_release: - break - elif latest_release: - break - return current_release, latest_release - - async def _validate_current_release(self, - release_info: Dict[str, Any], - release: Dict[str, Any] - ) -> None: - if not release: - self._add_error("Unable to find current release on GitHub") - return - asset_info = self._get_asset_urls(release, ["RELEASE_INFO"]) - if "RELEASE_INFO" not in asset_info: - self._add_error( - "RELEASE_INFO not found in current release assets") - info_url, content_type, size = asset_info['RELEASE_INFO'] + async def _fetch_github_version( + self, repo: Optional[str] = None, tag: Optional[str] = None + ) -> Dict[str, Any]: + if repo is None: + if not self._is_valid: + self.log_info("Invalid Installation, aborting remote refresh") + return {} + repo = self.repo + if tag is not None: + resource = f"repos/{repo}/releases/tags/{tag}" + elif self.channel == Channel.STABLE: + resource = f"repos/{repo}/releases/latest" + else: + resource = f"repos/{repo}/releases?per_page=1" client = self.cmd_helper.get_http_client() - rinfo_bytes = await client.get_file(info_url, content_type) - github_rinfo: Dict[str, Any] = json.loads(rinfo_bytes) - if github_rinfo.get(self.name, {}) != release_info: - self._add_error( - "Local release info does not match the remote") + resp = await client.github_api_request( + resource, attempts=3, retry_pause_time=.5 + ) + release: Union[List[Any], Dict[str, Any]] = {} + if resp.status_code == 304: + if resp.content: + # Not modified, however we need to restore state from + # cached content + release = resp.json() + else: + # Either not necessary or not possible to restore from cache + return {} + elif resp.has_error(): + self.log_info(f"Github Request Error - {resp.error}") + self.last_error = str(resp.error) + return {} else: - self.log_info("Current Release Info Validated") + release = resp.json() + result: Dict[str, Any] = {} + if isinstance(release, list): + if release: + result = release[0] + else: + result = release + self.last_error = "" + return result - async def _process_latest_release(self, release: Dict[str, Any]): - if not release: - self._add_error("Unable to find latest release on GitHub") + async def _get_remote_version(self) -> None: + result = await self._fetch_github_version() + if not result: return - zip_file_name = f"{self.name}.zip" - asset_names = ["RELEASE_INFO", "COMMIT_LOG", zip_file_name] - asset_info = self._get_asset_urls(release, asset_names) - if "RELEASE_INFO" in asset_info: - asset_url, content_type, size = asset_info['RELEASE_INFO'] - client = self.cmd_helper.get_http_client() - rinfo_bytes = await client.get_file(asset_url, content_type) - update_release_info: Dict[str, Any] = json.loads(rinfo_bytes) - update_info = update_release_info.get(self.name, {}) - self.lastest_hash = update_info.get('commit_hash', "?") - self.latest_checksum = update_info.get('source_checksum', "?") - self.latest_version = self._get_tag_version( - update_info.get('git_version', "?")) - self.latest_build_date = update_info.get('build_date', 0) - else: - self._add_error( - "RELEASE_INFO not found in latest release assets") - self.commit_log = [] - if self.short_version != self.latest_version: - # Only report commit log if versions change - if "COMMIT_LOG" in asset_info: - asset_url, content_type, size = asset_info['COMMIT_LOG'] - client = self.cmd_helper.get_http_client() - commit_bytes = await client.get_file(asset_url, content_type) - commit_info: Dict[str, Any] = json.loads(commit_bytes) - self.commit_log = commit_info.get(self.name, []) - if zip_file_name in asset_info: - self.release_download_info = asset_info[zip_file_name] - self._is_valid = True - else: - self.release_download_info = ("?", "?", 0) - self._add_error(f"Release asset {zip_file_name} not found") - - def _get_asset_urls(self, - release: Dict[str, Any], - filenames: List[str] - ) -> Dict[str, Tuple[str, str, int]]: - asset_info: Dict[str, Tuple[str, str, int]] = {} - asset: Dict[str, Any] - for asset in release.get('assets', []): - name = asset['name'] - if name in filenames: - rinfo_url = asset['browser_download_url'] - content_type = asset['content_type'] - size = asset['size'] - asset_info[name] = (rinfo_url, content_type, size) - filenames.remove(name) - if not filenames: - break - return asset_info - - def _add_error(self, warning: str): - self.log_info(warning) - self.errors.append(warning) + self.remote_version = result.get('name', "?") + release_asset: Dict[str, Any] = result.get('assets', [{}])[0] + dl_url: str = release_asset.get('browser_download_url', "?") + content_type: str = release_asset.get('content_type', "?") + size: int = release_asset.get('size', 0) + self.dl_info = (dl_url, content_type, size) + self._is_prerelease = result.get('prerelease', False) def _log_zipapp_info(self): + warn_str = "" + if self.warnings or self.anomalies: + warn_str = "\nWarnings:\n" + warn_str += "\n".join( + [f" {item}" for item in self.warnings + self.anomalies] + ) + dl_url, content_type, size = self.dl_info self.log_info( - "\nZip Application Distribution Detected\n" - f" Valid: {self._is_valid}\n" - f" Verified: {self.verified}\n" - f" Channel: {self.channel}\n" - f" Repo: {self.official_repo}\n" - f" Path: {self.path}\n" - f" Pristine: {self.pristine}\n" - f" Need Channel Update: {self.need_channel_update}\n" - f" Commits Behind: {len(self.commit_log)}\n" - f"Current Release Info:\n" - f" Source Checksum: {self.source_checksum}\n" - f" Commit SHA: {self.commit_hash}\n" - f" Long Version: {self.full_version}\n" - f" Short Version: {self.short_version}\n" - f" Build Date: {time.ctime(self.build_date)}\n" - f"Latest Available Release Info:\n" - f" Source Checksum: {self.latest_checksum}\n" - f" Commit SHA: {self.lastest_hash}\n" - f" Version: {self.latest_version}\n" - f" Build Date: {time.ctime(self.latest_build_date)}\n" - f" Download URL: {self.release_download_info[0]}\n" - f" Content Type: {self.release_download_info[1]}\n" - f" Download Size: {self.release_download_info[2]}" + f"Detected\n" + f"Repo: {self.repo}\n" + f"Channel: {self.channel}\n" + f"Path: {self.path}\n" + f"Local Version: {self.version}\n" + f"Remote Version: {self.remote_version}\n" + f"Valid: {self._is_valid}\n" + f"Fallback Detected: {self._is_fallback}\n" + f"Pre-release: {self._is_prerelease}\n" + f"Download Url: {dl_url}\n" + f"Download Size: {size}\n" + f"Content Type: {content_type}\n" + f"Rollback Version: {self.rollback_version}\n" + f"Rollback Repo: {self.rollback_repo}" + f"{warn_str}" ) - async def _update_dependencies(self, - npm_hash, - force: bool = False - ) -> None: - new_deps = await self._parse_info_file('.dependencies') - system_pkgs = sorted( - new_deps.get('debian', {}).get('packages', [])) - python_pkgs = sorted(new_deps.get('python', [])) - if system_pkgs: - if force or system_pkgs != self.package_list: - await self._install_packages(system_pkgs) - if python_pkgs: - if force or python_pkgs != self.python_pkg_list: - await self._update_virtualenv(python_pkgs) - ret = await self._check_need_update(npm_hash, self.npm_pkg_json) - if force or ret: - if self.npm_pkg_json is not None: - self.notify_status("Updating Node Packages...") - try: - await self.cmd_helper.run_cmd( - "npm ci --only=prod", notify=True, timeout=600., - cwd=str(self.path)) - except Exception: - self.notify_status("Node Package Update failed") - - def _extract_release(self, release_zip: pathlib.Path) -> None: + def _extract_release( + self, persist_dir: pathlib.Path, release_file: pathlib.Path + ) -> None: + if not persist_dir.exists(): + persist_dir.mkdir() if self.path.is_dir(): + # find and move persistent files + for src_path in self.path.iterdir(): + fname = src_path.name + if fname in self.persistent_files: + dest_path = persist_dir.joinpath(fname) + dest_dir = dest_path.parent + dest_dir.mkdir(parents=True, exist_ok=True) + shutil.move(str(src_path), str(dest_path)) shutil.rmtree(self.path) - os.mkdir(self.path) - with zipfile.ZipFile(release_zip) as zf: - zf.extractall(self.path) + self.path.mkdir() + with zipfile.ZipFile(release_file) as zf: + for zip_entry in zf.filelist: + dest = pathlib.Path(zf.extract(zip_entry, str(self.path))) + dest.chmod((zip_entry.external_attr >> 16) & 0o777) + # Move temporary files back into + for src_path in persist_dir.iterdir(): + dest_path = self.path.joinpath(src_path.name) + dest_dir = dest_path.parent + dest_dir.mkdir(parents=True, exist_ok=True) + shutil.move(str(src_path), str(dest_path)) - async def update(self, force_dep_update: bool = False) -> bool: + async def update( + self, + rollback_info: Optional[Tuple[str, str, int]] = None, + is_recover: bool = False, + force_dep_update: bool = False + ) -> bool: if not self._is_valid: - raise self.log_exc("Update aborted, repo not valid", False) - if self.short_version == self.latest_version: - # already up to date - return False - self.cmd_helper.notify_update_response( - f"Updating Application {self.name}...") - npm_hash = await self._get_file_hash(self.npm_pkg_json) - dl_url, content_type, size = self.release_download_info - self.notify_status("Starting Download...") + raise self.server.error( + f"{self.prefix}Invalid install detected, aborting update" + ) + if rollback_info is not None: + dl_url, content_type, size = rollback_info + start_msg = "Rolling Back..." if not is_recover else "Recovering..." + else: + if self.remote_version == "?": + await self._get_remote_version() + if self.remote_version == "?": + raise self.server.error( + f"{self.prefix}Unable to locate update" + ) + dl_url, content_type, size = self.dl_info + if self.version == self.remote_version: + # Already up to date + return False + start_msg = "Updating..." + if dl_url == "?": + raise self.server.error(f"{self.prefix}Invalid download url") + current_version = self.version + event_loop = self.server.get_event_loop() + self.notify_status(start_msg) + self.notify_status("Downloading Release...") + dep_info: Optional[Dict[str, Any]] = None + if self.type == AppType.ZIP: + dep_info = await self._collect_dependency_info() td = await self.cmd_helper.create_tempdir(self.name, "app") try: tempdir = pathlib.Path(td.name) temp_download_file = tempdir.joinpath(f"{self.name}.zip") + temp_persist_dir = tempdir.joinpath(self.name) client = self.cmd_helper.get_http_client() await client.download_file( dl_url, content_type, temp_download_file, size, - self.cmd_helper.on_download_progress) + self.cmd_helper.on_download_progress + ) self.notify_status( - f"Download Complete, extracting release to '{self.path}'") - event_loop = self.server.get_event_loop() + f"Download Complete, extracting release to '{self.path}'" + ) await event_loop.run_in_thread( - self._extract_release, temp_download_file) + self._extract_release, temp_persist_dir, temp_download_file + ) finally: await event_loop.run_in_thread(td.cleanup) - await self._update_dependencies(npm_hash, force=force_dep_update) - await self._update_repo_state() + if dep_info is not None: + await self._update_dependencies(dep_info, force_dep_update) + self.version = self.remote_version + await self._validate_release_info() + if self._is_valid and rollback_info is None: + self.rollback_version = current_version + self.rollback_repo = self.repo + self._log_zipapp_info() + self._save_state() await self.restart_service() - self.notify_status("Update Finished...", is_complete=True) + msg = "Update Finished..." if rollback_info is None else "Rollback Complete" + self.notify_status(msg, is_complete=True) return True - async def recover(self, - hard: bool = False, - force_dep_update: bool = False - ) -> None: - res = f"repos/{self.host_repo}/releases" - releases = await self._fetch_github_releases(res) - await self._process_latest_release(releases[1]) - await self.update(force_dep_update=force_dep_update) + async def recover( + self, hard: bool = False, force_dep_update: bool = False + ) -> None: + await self.update(self.dl_info, True, force_dep_update) - async def reinstall(self) -> None: - # Clear the persistent storage prior to a channel swap. - # After the next update is complete new data will be - # restored. - umdb = self.cmd_helper.get_umdb() - await umdb.pop(self.name, None) - await self.initialize() - await self.recover(force_dep_update=True) + async def rollback(self) -> bool: + if self.rollback_version == "?" or self.rollback_repo == "?": + raise self.server.error("Incomplete Rollback Data", False) + if self.rollback_version == self.version: + return False + result = await self._fetch_github_version( + self.rollback_repo, self.rollback_version + ) + if not result: + raise self.server.error("Failed to retrieve release asset data") + release_asset: Dict[str, Any] = result.get('assets', [{}])[0] + dl_url: str = release_asset.get('browser_download_url', "?") + content_type: str = release_asset.get('content_type', "?") + size: int = release_asset.get('size', 0) + dl_info = (dl_url, content_type, size) + return await self.update(dl_info) def get_update_status(self) -> Dict[str, Any]: status = super().get_update_status() - # XXX - Currently this reports status matching - # that of the git repo so as to not break existing - # client functionality. In the future it would be - # good to report values that are specifc status.update({ - 'detected_type': "zip", - 'remote_alias': "origin", - 'branch': "master", + 'name': self.name, + 'repo_name': self.project_name, 'owner': self.owner, - 'version': self.short_version, - 'remote_version': self.latest_version, - 'current_hash': self.commit_hash, - 'remote_hash': self.lastest_hash, - 'is_dirty': False, - 'detached': not self.verified, - 'commits_behind': self.commit_log, - 'git_messages': self.errors, - 'full_version_string': self.full_version, - 'pristine': self.pristine, + 'version': self.version, + 'remote_version': self.remote_version, + 'rollback_version': self.rollback_version, + 'last_error': self.last_error, + 'warnings': self.warnings, + 'anomalies': self.anomalies }) return status diff --git a/moonraker/components/webcam.py b/moonraker/components/webcam.py index 14834d9..6c33b88 100644 --- a/moonraker/components/webcam.py +++ b/moonraker/components/webcam.py @@ -10,19 +10,20 @@ import ipaddress import socket import uuid import logging +from ..common import RequestType from typing import ( TYPE_CHECKING, Optional, Dict, List, Any, - Tuple ) if TYPE_CHECKING: - from moonraker import Server - from confighelper import ConfigHelper - from websockets import WebRequest + from asyncio import Future + from ..server import Server + from ..confighelper import ConfigHelper + from ..common import WebRequest from .database import MoonrakerDatabase from .machine import Machine from .shell_command import ShellCommandFactory @@ -33,7 +34,9 @@ if TYPE_CHECKING: CAM_FIELDS = { "name": "name", "service": "service", "target_fps": "targetFps", "stream_url": "urlStream", "snapshot_url": "urlSnapshot", - "flip_horizontal": "flipX", "flip_vertical": "flipY" + "flip_horizontal": "flipX", "flip_vertical": "flipY", + "enabled": "enabled", "target_fps_idle": "targetFpsIdle", + "aspect_ratio": "aspectRatio", "icon": "icon" } class WebcamManager: @@ -48,36 +51,68 @@ class WebcamManager: self.webcams[webcam.name] = webcam self.server.register_endpoint( - "/server/webcams/list", ["GET"], self._handle_webcam_list + "/server/webcams/list", RequestType.GET, self._handle_webcam_list ) self.server.register_endpoint( - "/server/webcams/item", ["GET", "POST", "DELETE"], + "/server/webcams/item", RequestType.all(), self._handle_webcam_request ) self.server.register_endpoint( - "/server/webcams/test", ["POST"], self._handle_webcam_test + "/server/webcams/test", RequestType.POST, self._handle_webcam_test ) self.server.register_notification("webcam:webcams_changed") + self.server.register_event_handler( + "machine:public_ip_changed", self._set_default_host_ip + ) async def component_init(self) -> None: machine: Machine = self.server.lookup_component("machine") - pubnet = await machine.get_public_network() - ip: Optional[str] = pubnet.get("address") - default_host = f"http://{pubnet['hostname']}" - if ip is not None: - default_host = f"http://{ip}" - WebCam.set_default_host(default_host) + if machine.public_ip: + self._set_default_host_ip(machine.public_ip) + all_uids = [wc.uid for wc in self.webcams.values()] db: MoonrakerDatabase = self.server.lookup_component("database") - saved_cams: Dict[str, Any] = await db.get_item("webcams", default={}) - for cam_data in saved_cams.values(): + db_cams: Dict[str, Dict[str, Any]] = await db.get_item("webcams", default={}) + ro_info: List[str] = [] + # Process configured cams + for uid, cam_data in db_cams.items(): try: + cam_data["uid"] = uid webcam = WebCam.from_database(self.server, cam_data) + if uid in all_uids: + # Unlikely but possible collision between random UUID4 + # and UUID5 generated from a configured webcam. + await db.delete_item("webcams", uid) + webcam.uid = self._get_guaranteed_uuid() + await self._save_cam(webcam, False) + ro_info.append(f"Detected webcam UID collision: {uid}") + all_uids.append(webcam.uid) if webcam.name in self.webcams: + ro_info.append( + f"Detected webcam name collision: {webcam.name}, uuid: " + f"{uid}. This camera will be ignored." + ) continue self.webcams[webcam.name] = webcam except Exception: logging.exception("Failed to process webcam from db") continue + if ro_info: + self.server.add_log_rollover_item("webcam", "\n".join(ro_info)) + + def _set_default_host_ip(self, ip: str) -> None: + default_host = "http://127.0.0.1" + if ip: + try: + addr = ipaddress.ip_address(ip) + except Exception: + logging.debug(f"Invalid IP Recd: {ip}") + else: + if addr.version == 6: + default_host = f"http://[{addr}]" + else: + default_host = f"http://{addr}" + WebCam.set_default_host(default_host) + logging.info(f"Default public webcam address set: {default_host}") def get_webcams(self) -> Dict[str, WebCam]: return self.webcams @@ -85,103 +120,113 @@ class WebcamManager: def _list_webcams(self) -> List[Dict[str, Any]]: return [wc.as_dict() for wc in self.webcams.values()] - async def _find_dbcam_by_uuid( - self, name: str - ) -> Tuple[str, Dict[str, Any]]: - db: MoonrakerDatabase = self.server.lookup_component("database") - saved_cams: Dict[str, Dict[str, Any]] - saved_cams = await db.get_item("webcams", default={}) - for uid, cam_data in saved_cams.items(): - if name == cam_data["name"]: - return uid, cam_data - return "", {} - - async def _save_cam(self, webcam: WebCam) -> None: - uid, cam_data = await self._find_dbcam_by_uuid(webcam.name) - if not uid: - uid = str(uuid.uuid4()) + def _save_cam(self, webcam: WebCam, save_local: bool = True) -> Future: + if save_local: + self.webcams[webcam.name] = webcam + cam_data: Dict[str, Any] = {} for mfield, dbfield in CAM_FIELDS.items(): cam_data[dbfield] = getattr(webcam, mfield) cam_data["location"] = webcam.location cam_data["rotation"] = webcam.rotation - if "icon" not in cam_data: - cam_data["icon"] = "mdi-webcam" + cam_data["extra_data"] = webcam.extra_data db: MoonrakerDatabase = self.server.lookup_component("database") - db.insert_item("webcams", uid, cam_data) + return db.insert_item("webcams", webcam.uid, cam_data) - async def _delete_cam(self, webcam: WebCam) -> None: - uid, cam = await self._find_dbcam_by_uuid(webcam.name) - if not uid: - return + def _delete_cam(self, webcam: WebCam) -> Future: db: MoonrakerDatabase = self.server.lookup_component("database") - db.delete_item("webcams", uid) + self.webcams.pop(webcam.name, None) + return db.delete_item("webcams", webcam.uid) - async def _handle_webcam_request( - self, web_request: WebRequest - ) -> Dict[str, Any]: - action = web_request.get_action() + def _get_guaranteed_uuid(self) -> str: + cur_uids = [wc.uid for wc in self.webcams.values()] + while True: + uid = str(uuid.uuid4()) + if uid not in cur_uids: + break + return uid + + def get_cam_by_uid(self, uid: str) -> WebCam: + for cam in self.webcams.values(): + if cam.uid == uid: + return cam + raise self.server.error(f"Webcam with UID {uid} not found", 404) + + def _lookup_camera( + self, web_request: WebRequest, required: bool = True + ) -> Optional[WebCam]: + args = web_request.get_args() + if "uid" in args: + return self.get_cam_by_uid(web_request.get_str("uid")) name = web_request.get_str("name") + webcam = self.webcams.get(name, None) + if required and webcam is None: + raise self.server.error(f"Webcam {name} not found", 404) + return webcam + + async def _handle_webcam_request(self, web_request: WebRequest) -> Dict[str, Any]: + req_type = web_request.get_request_type() + webcam = self._lookup_camera(web_request, req_type != RequestType.POST) webcam_data: Dict[str, Any] = {} - if action == "GET": - if name not in self.webcams: - raise self.server.error(f"Webcam {name} not found", 404) - webcam_data = self.webcams[name].as_dict() - elif action == "POST": - if ( - name in self.webcams and - self.webcams[name].source == "config" - ): - raise self.server.error( - f"Cannot overwrite webcam '{name}' sourced from " - "Moonraker configuration" - ) - webcam = WebCam.from_web_request(self.server, web_request) - self.webcams[name] = webcam + if req_type == RequestType.GET: + assert webcam is not None webcam_data = webcam.as_dict() + elif req_type == RequestType.POST: + if webcam is not None: + if webcam.source == "config": + raise self.server.error( + f"Cannot overwrite webcam '{webcam.name}' sourced from " + "Moonraker configuration" + ) + new_name = web_request.get_str("name", None) + if new_name is not None and webcam.name != new_name: + if new_name in self.webcams: + raise self.server.error( + f"Cannot rename webcam from '{webcam.name}' to " + f"'{new_name}'. Webcam with requested name '{new_name}' " + "already exists." + ) + self.webcams.pop(webcam.name, None) + webcam.update(web_request) + else: + uid = self._get_guaranteed_uuid() + webcam = WebCam.from_web_request(self.server, web_request, uid) await self._save_cam(webcam) - elif action == "DELETE": - if name not in self.webcams: - raise self.server.error(f"Webcam {name} not found", 404) - elif self.webcams[name].source == "config": + webcam_data = webcam.as_dict() + elif req_type == RequestType.DELETE: + assert webcam is not None + if webcam.source == "config": raise self.server.error( - f"Cannot delete webcam '{name}' sourced from " + f"Cannot delete webcam '{webcam.name}' sourced from " "Moonraker configuration" ) - webcam = self.webcams.pop(name) webcam_data = webcam.as_dict() - await self._delete_cam(webcam) - if action != "GET": + self._delete_cam(webcam) + if req_type != RequestType.GET: self.server.send_event( "webcam:webcams_changed", {"webcams": self._list_webcams()} ) return {"webcam": webcam_data} - async def _handle_webcam_list( - self, web_request: WebRequest - ) -> Dict[str, Any]: + async def _handle_webcam_list(self, web_request: WebRequest) -> Dict[str, Any]: return {"webcams": self._list_webcams()} - async def _handle_webcam_test( - self, web_request: WebRequest - ) -> Dict[str, Any]: - name = web_request.get_str("name") - if name not in self.webcams: - raise self.server.error(f"Webcam '{name}' not found", 404) + async def _handle_webcam_test(self, web_request: WebRequest) -> Dict[str, Any]: client: HttpClient = self.server.lookup_component("http_client") - cam = self.webcams[name] + webcam = self._lookup_camera(web_request) + assert webcam is not None result: Dict[str, Any] = { - "name": name, + "name": webcam.name, "snapshot_reachable": False } for img_type in ["snapshot", "stream"]: try: - func = getattr(cam, f"get_{img_type}_url") + func = getattr(webcam, f"get_{img_type}_url") result[f"{img_type}_url"] = await func(True) except Exception: logging.exception(f"Error Processing {img_type} url") result[f"{img_type}_url"] = "" - if result.get("snapshot_url", "").startswith("http"): - url = client.escape_url(result["snapshot_url"]) + url: str = result["snapshot_url"] + if url.startswith("http"): ret = await client.get(url, connect_timeout=1., request_timeout=1.) result["snapshot_reachable"] = not ret.has_error() return result @@ -189,18 +234,32 @@ class WebcamManager: class WebCam: _default_host: str = "http://127.0.0.1" + _protected_fields: List[str] = ["source", "uid"] def __init__(self, server: Server, **kwargs) -> None: self._server = server self.name: str = kwargs["name"] + self.enabled: bool = kwargs["enabled"] + self.icon: str = kwargs["icon"] + self.aspect_ratio: str = kwargs["aspect_ratio"] + self.target_fps: int = kwargs["target_fps"] + self.target_fps_idle: int = kwargs["target_fps_idle"] self.location: str = kwargs["location"] self.service: str = kwargs["service"] - self.target_fps: int = kwargs["target_fps"] self.stream_url: str = kwargs["stream_url"] self.snapshot_url: str = kwargs["snapshot_url"] self.flip_horizontal: bool = kwargs["flip_horizontal"] self.flip_vertical: bool = kwargs["flip_vertical"] self.rotation: int = kwargs["rotation"] self.source: str = kwargs["source"] + self.extra_data: Dict[str, Any] = kwargs.get("extra_data", {}) + self.uid: str = kwargs["uid"] + if self.rotation not in [0, 90, 180, 270]: + raise server.error(f"Invalid value for 'rotation': {self.rotation}") + prefix, sep, postfix = self.aspect_ratio.partition(":") + if not (prefix.isdigit() and sep == ":" and postfix.isdigit()): + raise server.error( + f"Invalid value for 'aspect_ratio': {self.aspect_ratio}" + ) def as_dict(self): return {k: v for k, v in self.__dict__.items() if k[0] != "_"} @@ -301,59 +360,107 @@ class WebCam: pass return url + def update(self, web_request: WebRequest) -> None: + valid_fields = [ + f for f in self.__dict__.keys() if f[0] != "_" + and f not in self._protected_fields + ] + for field in web_request.get_args().keys(): + if field not in valid_fields: + continue + try: + attr_type = type(getattr(self, field)) + except AttributeError: + continue + if attr_type is bool: + val: Any = web_request.get_boolean(field) + elif attr_type is int: + val = web_request.get_int(field) + elif attr_type is float: + val = web_request.get_float(field) + elif attr_type is str: + val = web_request.get_str(field) + else: + val = web_request.get(field) + setattr(self, field, val) + @staticmethod def set_default_host(host: str) -> None: WebCam._default_host = host @classmethod def from_config(cls, config: ConfigHelper) -> WebCam: - webcam: Dict[str, Any] = {} - webcam["name"] = config.get_name().split(maxsplit=1)[-1] - webcam["location"] = config.get("location", "printer") - webcam["service"] = config.get("service", "mjpegstreamer") - webcam["target_fps"] = config.getint("target_fps", 15) - webcam["stream_url"] = config.get("stream_url") - webcam["snapshot_url"] = config.get("snapshot_url") - webcam["flip_horizontal"] = config.getboolean("flip_horizontal", False) - webcam["flip_vertical"] = config.getboolean("flip_vertical", False) - webcam["rotation"] = config.getint("rotation", 0) - if webcam["rotation"] not in [0, 90, 180, 270]: - raise config.error("Invalid value for option 'rotation'") - webcam["source"] = "config" - return cls(config.get_server(), **webcam) + server = config.get_server() + name = config.get_name().split(maxsplit=1)[-1] + ns = uuid.UUID(server.get_app_args()["instance_uuid"]) + try: + return cls( + server, + name=name, + enabled=config.getboolean("enabled", True), + icon=config.get("icon", "mdiWebcam"), + aspect_ratio=config.get("aspect_ratio", "4:3"), + target_fps=config.getint("target_fps", 15), + target_fps_idle=config.getint("target_fps_idle", 5), + location=config.get("location", "printer"), + service=config.get("service", "mjpegstreamer"), + stream_url=config.get("stream_url"), + snapshot_url=config.get("snapshot_url", ""), + flip_horizontal=config.getboolean("flip_horizontal", False), + flip_vertical=config.getboolean("flip_vertical", False), + rotation=config.getint("rotation", 0), + source="config", + uid=str(uuid.uuid5(ns, f"moonraker.webcam.{name}")) + ) + except server.error as err: + raise config.error(str(err)) from err @classmethod def from_web_request( - cls, server: Server, web_request: WebRequest + cls, server: Server, web_request: WebRequest, uid: str ) -> WebCam: - webcam: Dict[str, Any] = {} - webcam["name"] = web_request.get_str("name") - webcam["location"] = web_request.get_str("location", "printer") - webcam["service"] = web_request.get_str("service", "mjpegstreamer") - webcam["target_fps"] = web_request.get_int("target_fps", 15) - webcam["stream_url"] = web_request.get_str("stream_url") - webcam["snapshot_url"] = web_request.get_str("snapshot_url") - webcam["flip_horizontal"] = web_request.get_boolean( - "flip_horizontal", False + name = web_request.get_str("name") + return cls( + server, + name=name, + enabled=web_request.get_boolean("enabled", True), + icon=web_request.get_str("icon", "mdiWebcam"), + aspect_ratio=web_request.get_str("aspect_ratio", "4:3"), + target_fps=web_request.get_int("target_fps", 15), + target_fps_idle=web_request.get_int("target_fps_idle", 5), + location=web_request.get_str("location", "printer"), + service=web_request.get_str("service", "mjpegstreamer"), + stream_url=web_request.get_str("stream_url"), + snapshot_url=web_request.get_str("snapshot_url", ""), + flip_horizontal=web_request.get_boolean("flip_horizontal", False), + flip_vertical=web_request.get_boolean("flip_vertical", False), + rotation=web_request.get_int("rotation", 0), + source="database", + extra_data=web_request.get("extra_data", {}), + uid=uid ) - webcam["flip_vertical"] = web_request.get_boolean( - "flip_vertical", False - ) - webcam["rotation"] = web_request.get_str("rotation", 0) - if webcam["rotation"] not in [0, 90, 180, 270]: - raise server.error("Invalid value for parameter 'rotate'") - webcam["source"] = "database" - return cls(server, **webcam) @classmethod def from_database(cls, server: Server, cam_data: Dict[str, Any]) -> WebCam: - webcam: Dict[str, Any] = {} - for mfield, dbfield in CAM_FIELDS.items(): - webcam[mfield] = cam_data[dbfield] - webcam["location"] = webcam.get("location", "printer") - webcam["rotation"] = webcam.get("rotation", 0) - webcam["source"] = "database" - return cls(server, **webcam) + return cls( + server, + name=str(cam_data["name"]), + enabled=bool(cam_data.get("enabled", True)), + icon=str(cam_data.get("icon", "mdiWebcam")), + aspect_ratio=str(cam_data.get("aspectRatio", "4:3")), + target_fps=int(cam_data.get("targetFps", 15)), + target_fps_idle=int(cam_data.get("targetFpsIdle", 5)), + location=str(cam_data.get("location", "printer")), + service=str(cam_data.get("service", "mjpegstreamer")), + stream_url=str(cam_data.get("urlStream", "")), + snapshot_url=str(cam_data.get("urlSnapshot", "")), + flip_horizontal=bool(cam_data.get("flipX", False)), + flip_vertical=bool(cam_data.get("flipY", False)), + rotation=int(cam_data.get("rotation", cam_data.get("rotate", 0))), + source="database", + extra_data=cam_data.get("extra_data", {}), + uid=cam_data["uid"] + ) def load_component(config: ConfigHelper) -> WebcamManager: return WebcamManager(config) diff --git a/moonraker/components/websockets.py b/moonraker/components/websockets.py new file mode 100644 index 0000000..7d0c85b --- /dev/null +++ b/moonraker/components/websockets.py @@ -0,0 +1,498 @@ +# Websocket Request/Response Handler +# +# Copyright (C) 2020 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import logging +import asyncio +from tornado.websocket import WebSocketHandler, WebSocketClosedError +from tornado.web import HTTPError +from ..common import ( + RequestType, + WebRequest, + BaseRemoteConnection, + TransportType, +) +from ..utils import ServerError, parse_ip_address + +# Annotation imports +from typing import ( + TYPE_CHECKING, + Any, + Optional, + Callable, + Coroutine, + Tuple, + Union, + Dict, + List, +) + +if TYPE_CHECKING: + from ..server import Server + from .klippy_connection import KlippyConnection as Klippy + from ..confighelper import ConfigHelper + from .application import MoonrakerApp + from .extensions import ExtensionManager + from .authorization import Authorization + from ..utils import IPAddress + ConvType = Union[str, bool, float, int] + ArgVal = Union[None, int, float, bool, str] + RPCCallback = Callable[..., Coroutine] + AuthComp = Optional[Authorization] + +CLIENT_TYPES = ["web", "mobile", "desktop", "display", "bot", "agent", "other"] + +class WebsocketManager: + def __init__(self, config: ConfigHelper) -> None: + self.server = config.get_server() + self.clients: Dict[int, BaseRemoteConnection] = {} + self.bridge_connections: Dict[int, BridgeSocket] = {} + self.closed_event: Optional[asyncio.Event] = None + app: MoonrakerApp = self.server.lookup_component("application") + app.register_websocket_handler("/websocket", WebSocket) + app.register_websocket_handler("/klippysocket", BridgeSocket) + self.server.register_endpoint( + "/server/websocket/id", RequestType.GET, self._handle_id_request, + TransportType.WEBSOCKET + ) + self.server.register_endpoint( + "/server/connection/identify", RequestType.POST, self._handle_identify, + TransportType.WEBSOCKET, auth_required=False + ) + + def register_notification( + self, + event_name: str, + notify_name: Optional[str] = None, + event_type: Optional[str] = None + ) -> None: + if notify_name is None: + notify_name = event_name.split(':')[-1] + if event_type == "logout": + def notify_handler(*args): + self.notify_clients(notify_name, args) + self._process_logout(*args) + else: + def notify_handler(*args): + self.notify_clients(notify_name, args) + self.server.register_event_handler(event_name, notify_handler) + + async def _handle_id_request(self, web_request: WebRequest) -> Dict[str, int]: + sc = web_request.get_client_connection() + assert sc is not None + return {'websocket_id': sc.uid} + + async def _handle_identify(self, web_request: WebRequest) -> Dict[str, int]: + sc = web_request.get_client_connection() + assert sc is not None + if sc.identified: + raise self.server.error( + f"Connection already identified: {sc.client_data}" + ) + name = web_request.get_str("client_name") + version = web_request.get_str("version") + client_type: str = web_request.get_str("type").lower() + url = web_request.get_str("url") + sc.authenticate( + token=web_request.get_str("access_token", None), + api_key=web_request.get_str("api_key", None) + ) + + if client_type not in CLIENT_TYPES: + raise self.server.error(f"Invalid Client Type: {client_type}") + sc.client_data = { + "name": name, + "version": version, + "type": client_type, + "url": url + } + if client_type == "agent": + extensions: ExtensionManager + extensions = self.server.lookup_component("extensions") + try: + extensions.register_agent(sc) + except ServerError: + sc.client_data["type"] = "" + raise + logging.info( + f"Websocket {sc.uid} Client Identified - " + f"Name: {name}, Version: {version}, Type: {client_type}" + ) + self.server.send_event("websockets:client_identified", sc) + return {'connection_id': sc.uid} + + def _process_logout(self, user: Dict[str, Any]) -> None: + if "username" not in user: + return + name = user["username"] + for sc in self.clients.values(): + sc.on_user_logout(name) + + def has_socket(self, ws_id: int) -> bool: + return ws_id in self.clients + + def get_client(self, uid: int) -> Optional[BaseRemoteConnection]: + return self.clients.get(uid, None) + + def get_client_ws(self, ws_id: int) -> Optional[WebSocket]: + sc = self.clients.get(ws_id, None) + if sc is None or not isinstance(sc, WebSocket): + return None + return sc + + def get_clients_by_type( + self, client_type: str + ) -> List[BaseRemoteConnection]: + if not client_type: + return [] + ret: List[BaseRemoteConnection] = [] + for sc in self.clients.values(): + if sc.client_data.get("type", "") == client_type.lower(): + ret.append(sc) + return ret + + def get_clients_by_name(self, name: str) -> List[BaseRemoteConnection]: + if not name: + return [] + ret: List[BaseRemoteConnection] = [] + for sc in self.clients.values(): + if sc.client_data.get("name", "").lower() == name.lower(): + ret.append(sc) + return ret + + def get_unidentified_clients(self) -> List[BaseRemoteConnection]: + ret: List[BaseRemoteConnection] = [] + for sc in self.clients.values(): + if not sc.client_data: + ret.append(sc) + return ret + + def add_client(self, sc: BaseRemoteConnection) -> None: + self.clients[sc.uid] = sc + self.server.send_event("websockets:client_added", sc) + logging.debug(f"New Websocket Added: {sc.uid}") + + def remove_client(self, sc: BaseRemoteConnection) -> None: + old_sc = self.clients.pop(sc.uid, None) + if old_sc is not None: + self.server.send_event("websockets:client_removed", sc) + logging.debug(f"Websocket Removed: {sc.uid}") + self._check_closed_event() + + def add_bridge_connection(self, bc: BridgeSocket) -> None: + self.bridge_connections[bc.uid] = bc + logging.debug(f"New Bridge Connection Added: {bc.uid}") + + def remove_bridge_connection(self, bc: BridgeSocket) -> None: + old_bc = self.bridge_connections.pop(bc.uid, None) + if old_bc is not None: + logging.debug(f"Bridge Connection Removed: {bc.uid}") + self._check_closed_event() + + def _check_closed_event(self) -> None: + if ( + self.closed_event is not None and + not self.clients and + not self.bridge_connections + ): + self.closed_event.set() + + def notify_clients( + self, + name: str, + data: Union[List, Tuple] = [], + mask: List[int] = [] + ) -> None: + msg: Dict[str, Any] = {'jsonrpc': "2.0", 'method': "notify_" + name} + if data: + msg['params'] = data + for sc in list(self.clients.values()): + if sc.uid in mask or sc.need_auth: + continue + sc.queue_message(msg) + + def get_count(self) -> int: + return len(self.clients) + + async def close(self) -> None: + if not self.clients: + return + self.closed_event = asyncio.Event() + for bc in list(self.bridge_connections.values()): + bc.close_socket(1001, "Server Shutdown") + for sc in list(self.clients.values()): + sc.close_socket(1001, "Server Shutdown") + try: + await asyncio.wait_for(self.closed_event.wait(), 2.) + except asyncio.TimeoutError: + pass + self.closed_event = None + +class WebSocket(WebSocketHandler, BaseRemoteConnection): + connection_count: int = 0 + + def initialize(self) -> None: + self.on_create(self.settings['server']) + self._ip_addr = parse_ip_address(self.request.remote_ip or "") + self.last_pong_time: float = self.eventloop.get_loop_time() + self.cors_allowed: bool = False + + @property + def ip_addr(self) -> Optional[IPAddress]: + return self._ip_addr + + @property + def hostname(self) -> str: + return self.request.host_name + + def get_current_user(self) -> Any: + return self._user_info + + def open(self, *args, **kwargs) -> None: + self.__class__.connection_count += 1 + self.set_nodelay(True) + self._connected_time = self.eventloop.get_loop_time() + agent = self.request.headers.get("User-Agent", "") + is_proxy = False + if ( + "X-Forwarded-For" in self.request.headers or + "X-Real-Ip" in self.request.headers + ): + is_proxy = True + logging.info(f"Websocket Opened: ID: {self.uid}, " + f"Proxied: {is_proxy}, " + f"User Agent: {agent}, " + f"Host Name: {self.hostname}") + self.wsm.add_client(self) + + def on_message(self, message: Union[bytes, str]) -> None: + self.eventloop.register_callback(self._process_message, message) + + def on_pong(self, data: bytes) -> None: + self.last_pong_time = self.eventloop.get_loop_time() + + def on_close(self) -> None: + self.is_closed = True + self.__class__.connection_count -= 1 + kconn: Klippy = self.server.lookup_component("klippy_connection") + kconn.remove_subscription(self) + self.message_buf = [] + now = self.eventloop.get_loop_time() + pong_elapsed = now - self.last_pong_time + for resp in self.pending_responses.values(): + resp.set_exception(ServerError("Client Socket Disconnected", 500)) + self.pending_responses = {} + logging.info(f"Websocket Closed: ID: {self.uid} " + f"Close Code: {self.close_code}, " + f"Close Reason: {self.close_reason}, " + f"Pong Time Elapsed: {pong_elapsed:.2f}") + if self._client_data["type"] == "agent": + extensions: ExtensionManager + extensions = self.server.lookup_component("extensions") + extensions.remove_agent(self) + self.wsm.remove_client(self) + + async def write_to_socket(self, message: Union[bytes, str]) -> None: + try: + await self.write_message(message) + except WebSocketClosedError: + self.is_closed = True + self.message_buf.clear() + logging.info( + f"Websocket closed while writing: {self.uid}") + except Exception: + logging.exception( + f"Error sending data over websocket: {self.uid}") + + def check_origin(self, origin: str) -> bool: + if not super(WebSocket, self).check_origin(origin): + return self.cors_allowed + return True + + def on_user_logout(self, user: str) -> bool: + if super().on_user_logout(user): + self._need_auth = True + return True + return False + + # Check Authorized User + async def prepare(self) -> None: + max_conns = self.settings["max_websocket_connections"] + if self.__class__.connection_count >= max_conns: + raise self.server.error( + "Maximum Number of Websocket Connections Reached" + ) + auth: AuthComp = self.server.lookup_component('authorization', None) + if auth is not None: + try: + self._user_info = await auth.authenticate_request(self.request) + except Exception as e: + logging.info(f"Websocket Failed Authentication: {e}") + self._user_info = None + self._need_auth = True + if "Origin" in self.request.headers: + origin = self.request.headers.get("Origin") + else: + origin = self.request.headers.get("Sec-Websocket-Origin", None) + self.cors_allowed = await auth.check_cors(origin) + + def close_socket(self, code: int, reason: str) -> None: + self.close(code, reason) + +class BridgeSocket(WebSocketHandler): + def initialize(self) -> None: + self.server: Server = self.settings['server'] + self.wsm: WebsocketManager = self.server.lookup_component("websockets") + self.eventloop = self.server.get_event_loop() + self.uid = id(self) + self._ip_addr = parse_ip_address(self.request.remote_ip or "") + self.last_pong_time: float = self.eventloop.get_loop_time() + self.is_closed = False + self.klippy_writer: Optional[asyncio.StreamWriter] = None + self.klippy_write_buf: List[bytes] = [] + self.klippy_queue_busy: bool = False + self.cors_allowed: bool = False + + @property + def ip_addr(self) -> Optional[IPAddress]: + return self._ip_addr + + @property + def hostname(self) -> str: + return self.request.host_name + + def open(self, *args, **kwargs) -> None: + WebSocket.connection_count += 1 + self.set_nodelay(True) + self._connected_time = self.eventloop.get_loop_time() + agent = self.request.headers.get("User-Agent", "") + is_proxy = False + if ( + "X-Forwarded-For" in self.request.headers or + "X-Real-Ip" in self.request.headers + ): + is_proxy = True + logging.info(f"Bridge Socket Opened: ID: {self.uid}, " + f"Proxied: {is_proxy}, " + f"User Agent: {agent}, " + f"Host Name: {self.hostname}") + self.wsm.add_bridge_connection(self) + + def on_message(self, message: Union[bytes, str]) -> None: + if isinstance(message, str): + message = message.encode(encoding="utf-8") + self.klippy_write_buf.append(message) + if self.klippy_queue_busy: + return + self.klippy_queue_busy = True + self.eventloop.register_callback(self._write_klippy_messages) + + async def _write_klippy_messages(self) -> None: + while self.klippy_write_buf: + if self.klippy_writer is None or self.is_closed: + break + msg = self.klippy_write_buf.pop(0) + try: + self.klippy_writer.write(msg + b"\x03") + await self.klippy_writer.drain() + except asyncio.CancelledError: + raise + except Exception: + if not self.is_closed: + logging.debug("Klippy Disconnection From _write_request()") + self.close(1001, "Klippy Disconnected") + break + self.klippy_queue_busy = False + + def on_pong(self, data: bytes) -> None: + self.last_pong_time = self.eventloop.get_loop_time() + + def on_close(self) -> None: + WebSocket.connection_count -= 1 + self.is_closed = True + self.klippy_write_buf.clear() + if self.klippy_writer is not None: + self.klippy_writer.close() + self.klippy_writer = None + now = self.eventloop.get_loop_time() + pong_elapsed = now - self.last_pong_time + logging.info(f"Bridge Socket Closed: ID: {self.uid} " + f"Close Code: {self.close_code}, " + f"Close Reason: {self.close_reason}, " + f"Pong Time Elapsed: {pong_elapsed:.2f}") + self.wsm.remove_bridge_connection(self) + + async def _read_unix_stream(self, reader: asyncio.StreamReader) -> None: + errors_remaining: int = 10 + while not reader.at_eof(): + try: + data = memoryview(await reader.readuntil(b'\x03')) + except (ConnectionError, asyncio.IncompleteReadError): + break + except asyncio.CancelledError: + logging.exception("Klippy Stream Read Cancelled") + raise + except Exception: + logging.exception("Klippy Stream Read Error") + errors_remaining -= 1 + if not errors_remaining or self.is_closed: + break + continue + try: + await self.write_message(data[:-1].tobytes()) + except WebSocketClosedError: + logging.info( + f"Bridge closed while writing: {self.uid}") + break + except asyncio.CancelledError: + raise + except Exception: + logging.exception( + f"Error sending data over Bridge: {self.uid}") + errors_remaining -= 1 + if not errors_remaining or self.is_closed: + break + continue + errors_remaining = 10 + if not self.is_closed: + logging.debug("Bridge Disconnection From _read_unix_stream()") + self.close_socket(1001, "Klippy Disconnected") + + def check_origin(self, origin: str) -> bool: + if not super().check_origin(origin): + return self.cors_allowed + return True + + # Check Authorized User + async def prepare(self) -> None: + max_conns = self.settings["max_websocket_connections"] + if WebSocket.connection_count >= max_conns: + raise self.server.error( + "Maximum Number of Bridge Connections Reached" + ) + auth: AuthComp = self.server.lookup_component("authorization", None) + if auth is not None: + self.current_user = await auth.authenticate_request(self.request) + if "Origin" in self.request.headers: + origin = self.request.headers.get("Origin") + else: + origin = self.request.headers.get("Sec-Websocket-Origin", None) + self.cors_allowed = await auth.check_cors(origin) + kconn: Klippy = self.server.lookup_component("klippy_connection") + try: + reader, writer = await kconn.open_klippy_connection() + except ServerError as err: + raise HTTPError(err.status_code, str(err)) from None + except Exception as e: + raise HTTPError(503, "Failed to open connection to Klippy") from e + self.klippy_writer = writer + self.eventloop.register_callback(self._read_unix_stream, reader) + + def close_socket(self, code: int, reason: str) -> None: + self.close(code, reason) + +def load_component(config: ConfigHelper) -> WebsocketManager: + return WebsocketManager(config) diff --git a/moonraker/components/wled.py b/moonraker/components/wled.py index b3d6baa..ea758e0 100644 --- a/moonraker/components/wled.py +++ b/moonraker/components/wled.py @@ -11,11 +11,12 @@ from __future__ import annotations from enum import Enum import logging -import json import asyncio import serial_asyncio from tornado.httpclient import AsyncHTTPClient from tornado.httpclient import HTTPRequest +from ..utils import json_wrapper as jsonw +from ..common import RequestType # Annotation imports from typing import ( @@ -28,10 +29,8 @@ from typing import ( ) if TYPE_CHECKING: - from confighelper import ConfigHelper - from websockets import WebRequest - from . import klippy_apis - APIComp = klippy_apis.KlippyAPI + from ..confighelper import ConfigHelper + from ..common import WebRequest class OnOff(str, Enum): on: str = "on" @@ -295,7 +294,7 @@ class StripHttp(Strip): request = HTTPRequest(url=self.url, method="POST", headers=headers, - body=json.dumps(state), + body=jsonw.dumps(state), connect_timeout=self.timeout, request_timeout=self.timeout) for i in range(retries): @@ -331,7 +330,7 @@ class StripSerial(Strip): logging.debug(f"WLED: serial:{self.serialport} json:{state}") - self.ser.write(json.dumps(state).encode()) + self.ser.write(jsonw.dumps(state)) def close(self: StripSerial): if hasattr(self, 'ser'): @@ -390,23 +389,24 @@ class WLED: # As moonraker is about making things a web api, let's try it # Yes, this is largely a cut-n-paste from power.py self.server.register_endpoint( - "/machine/wled/strips", ["GET"], - self._handle_list_strips) + "/machine/wled/strips", RequestType.GET, self._handle_list_strips + ) self.server.register_endpoint( - "/machine/wled/status", ["GET"], - self._handle_batch_wled_request) + "/machine/wled/status", RequestType.GET, self._handle_batch_wled_request + ) self.server.register_endpoint( - "/machine/wled/on", ["POST"], - self._handle_batch_wled_request) + "/machine/wled/on", RequestType.POST, self._handle_batch_wled_request + ) self.server.register_endpoint( - "/machine/wled/off", ["POST"], - self._handle_batch_wled_request) + "/machine/wled/off", RequestType.POST, self._handle_batch_wled_request + ) self.server.register_endpoint( - "/machine/wled/toggle", ["POST"], - self._handle_batch_wled_request) + "/machine/wled/toggle", RequestType.POST, self._handle_batch_wled_request + ) self.server.register_endpoint( - "/machine/wled/strip", ["GET", "POST"], - self._handle_single_wled_request) + "/machine/wled/strip", RequestType.GET | RequestType.POST, + self._handle_single_wled_request + ) async def component_init(self) -> None: try: @@ -447,9 +447,15 @@ class WLED: # Full control of wled # state: True, False, "on", "off" # preset: wled preset (int) to use (ignored if state False or "Off") - async def set_wled_state(self: WLED, strip: str, state: str = None, - preset: int = -1, brightness: int = -1, - intensity: int = -1, speed: int = -1) -> None: + async def set_wled_state( + self: WLED, + strip: str, + state: Optional[str] = None, + preset: int = -1, + brightness: int = -1, + intensity: int = -1, + speed: int = -1 + ) -> None: status = None if isinstance(state, bool): @@ -462,7 +468,8 @@ class WLED: if status is None and preset == -1 and brightness == -1 and \ intensity == -1 and speed == -1: logging.info( - f"Invalid state received but no control or preset data passed") + "Invalid state received but no control or preset data passed" + ) return if strip not in self.strips: @@ -516,19 +523,19 @@ class WLED: intensity: int = web_request.get_int('intensity', -1) speed: int = web_request.get_int('speed', -1) - req_action = web_request.get_action() + req_type = web_request.get_request_type() if strip_name not in self.strips: raise self.server.error(f"No valid strip named {strip_name}") strip = self.strips[strip_name] - if req_action == 'GET': + if req_type == RequestType.GET: return {strip_name: strip.get_strip_info()} - elif req_action == "POST": + elif req_type == RequestType.POST: action = web_request.get_str('action').lower() if action not in ["on", "off", "toggle", "control"]: - raise self.server.error( - f"Invalid requested action '{action}'") - result = await self._process_request(strip, action, preset, - brightness, intensity, speed) + raise self.server.error(f"Invalid requested action '{action}'") + result = await self._process_request( + strip, action, preset, brightness, intensity, speed + ) return {strip_name: result} async def _handle_batch_wled_request(self: WLED, diff --git a/moonraker/components/zeroconf.py b/moonraker/components/zeroconf.py index 254bcc2..3844f55 100644 --- a/moonraker/components/zeroconf.py +++ b/moonraker/components/zeroconf.py @@ -7,15 +7,32 @@ from __future__ import annotations import socket import asyncio import logging +import ipaddress +import random +import uuid +from itertools import cycle +from email.utils import formatdate from zeroconf import IPVersion from zeroconf.asyncio import AsyncServiceInfo, AsyncZeroconf +from ..common import RequestType, TransportType -from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterator, + List, + Optional, + Tuple +) if TYPE_CHECKING: - from confighelper import ConfigHelper + from ..confighelper import ConfigHelper + from ..common import WebRequest + from .application import MoonrakerApp from .machine import Machine +ZC_SERVICE_TYPE = "_moonraker._tcp.local." class AsyncRunner: def __init__(self, ip_version: IPVersion) -> None: @@ -48,54 +65,355 @@ class AsyncRunner: class ZeroconfRegistrar: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() - self.runner = AsyncRunner(IPVersion.All) hi = self.server.get_host_info() - addresses: Optional[List[bytes]] = [socket.inet_aton(hi["address"])] - self.bound_all = hi["address"] == "0.0.0.0" - self.service_info = self._build_service_info(addresses) + self.mdns_name = config.get("mdns_hostname", hi["hostname"]) + addr: str = hi["address"] + self.ip_version = IPVersion.All + if addr.lower() == "all": + addr = "::" + else: + addr_obj = ipaddress.ip_address(addr) + self.ip_version = ( + IPVersion.V4Only if addr_obj.version == 4 else IPVersion.V6Only + ) + self.runner = AsyncRunner(self.ip_version) + self.cfg_addr = addr + self.bound_all = addr in ["0.0.0.0", "::"] if self.bound_all: self.server.register_event_handler( "machine:net_state_changed", self._update_service) + self.ssdp_server: Optional[SSDPServer] = None + if config.getboolean("enable_ssdp", False): + self.ssdp_server = SSDPServer(config) async def component_init(self) -> None: logging.info("Starting Zeroconf services") + app: MoonrakerApp = self.server.lookup_component("application") + machine: Machine = self.server.lookup_component("machine") + app_args = self.server.get_app_args() + instance_uuid: str = app_args["instance_uuid"] + if ( + machine.get_provider_type().startswith("systemd") and + "unit_name" in machine.get_moonraker_service_info() + ): + # Use the name of the systemd service unit to identify service + instance_name = machine.unit_name.capitalize() + else: + # Use the UUID. First 8 hex digits should be unique enough + instance_name = f"Moonraker-{instance_uuid[:8]}" + hi = self.server.get_host_info() + host = self.mdns_name + zc_service_props = { + "uuid": instance_uuid, + "https_port": hi["ssl_port"] if app.https_enabled() else "", + "version": app_args["software_version"], + "route_prefix": app.route_prefix + } if self.bound_all: - machine: Machine = self.server.lookup_component("machine") + if not host: + host = machine.public_ip network = machine.get_system_info()["network"] - addresses = [x for x in self._extract_ip_addresses(network)] - self.service_info = self._build_service_info(addresses) + addresses: List[bytes] = [x for x in self._extract_ip_addresses(network)] + else: + if not host: + host = self.cfg_addr + host_addr = ipaddress.ip_address(self.cfg_addr) + addresses = [host_addr.packed] + zc_service_name = f"{instance_name} @ {host}.{ZC_SERVICE_TYPE}" + server_name = self.mdns_name or instance_name.lower() + self.service_info = AsyncServiceInfo( + ZC_SERVICE_TYPE, + zc_service_name, + addresses=addresses, + port=hi["port"], + properties=zc_service_props, + server=f"{server_name}.local.", + ) await self.runner.register_services([self.service_info]) + if self.ssdp_server is not None: + addr = self.cfg_addr if not self.bound_all else machine.public_ip + if not addr: + addr = f"{self.mdns_name}.local" + name = f"{instance_name} ({host})" + if len(name) > 64: + name = instance_name + await self.ssdp_server.start() + self.ssdp_server.register_service(name, addr, hi["port"]) async def close(self) -> None: await self.runner.unregister_services([self.service_info]) + if self.ssdp_server is not None: + await self.ssdp_server.stop() async def _update_service(self, network: Dict[str, Any]) -> None: if self.bound_all: addresses = [x for x in self._extract_ip_addresses(network)] - self.service_info = self._build_service_info(addresses) + self.service_info.addresses = addresses await self.runner.update_services([self.service_info]) - def _build_service_info(self, - addresses: Optional[List[bytes]] = None - ) -> AsyncServiceInfo: - hi = self.server.get_host_info() - return AsyncServiceInfo( - "_moonraker._tcp.local.", - f"Moonraker Instance on {hi['hostname']}._moonraker._tcp.local.", - addresses=addresses, - port=hi["port"], - properties={"path": "/"}, - server=f"{hi['hostname']}.local.", - ) - def _extract_ip_addresses(self, network: Dict[str, Any]) -> Iterator[bytes]: for ifname, ifinfo in network.items(): for addr_info in ifinfo["ip_addresses"]: if addr_info["is_link_local"]: continue - is_ipv6 = addr_info['family'] == "ipv6" - family = socket.AF_INET6 if is_ipv6 else socket.AF_INET - yield socket.inet_pton(family, addr_info["address"]) + addr_obj = ipaddress.ip_address(addr_info["address"]) + ver = addr_obj.version + if ( + (self.ip_version == IPVersion.V4Only and ver == 6) or + (self.ip_version == IPVersion.V6Only and ver == 4) + ): + continue + yield addr_obj.packed + + +SSDP_ADDR = ("239.255.255.250", 1900) +SSDP_SERVER_ID = "Moonraker SSDP/UPNP Server" +SSDP_MAX_AGE = 1800 +SSDP_DEVICE_TYPE = "urn:arksine.github.io:device:Moonraker:1" +SSDP_DEVICE_XML = """ + + + + 2 + 0 + + + {device_type} + {friendly_name} + Arksine + https://github.com/Arksine/moonraker + API Server for Klipper + Moonraker + {model_number} + https://github.com/Arksine/moonraker + {serial_number} + uuid:{device_uuid} + {presentation_url} + + +""".strip() + +class SSDPServer(asyncio.protocols.DatagramProtocol): + def __init__(self, config: ConfigHelper) -> None: + self.server = config.get_server() + self.unique_id = uuid.UUID(self.server.get_app_args()["instance_uuid"]) + self.name: str = "Moonraker" + self.base_url: str = "" + self.response_headers: List[str] = [] + self.registered: bool = False + self.running: bool = False + self.close_fut: Optional[asyncio.Future] = None + self.response_handle: Optional[asyncio.TimerHandle] = None + eventloop = self.server.get_event_loop() + self.boot_id = int(eventloop.get_loop_time()) + self.config_id = 1 + self.ad_timer = eventloop.register_timer(self._advertise_presence) + self.server.register_endpoint( + "/server/zeroconf/ssdp", + RequestType.GET, + self._handle_xml_request, + transports=TransportType.HTTP, + wrap_result=False, + content_type="application/xml", + auth_required=False + ) + + def _create_ssdp_socket( + self, + source_addr: Tuple[str, int] = ("0.0.0.0", 0), + target_addr: Tuple[str, int] = SSDP_ADDR + ) -> socket.socket: + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + try: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + except AttributeError: + pass + sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + source_ip = socket.inet_aton(source_addr[0]) + target_ip = socket.inet_aton(target_addr[0]) + ip_combo = target_ip + source_ip + sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, source_ip) + sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2) + sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, ip_combo) + return sock + + async def start(self) -> None: + if self.running: + return + try: + sock = self._create_ssdp_socket() + sock.settimeout(0) + sock.setblocking(False) + sock.bind(("", SSDP_ADDR[1])) + _loop = asyncio.get_running_loop() + ret = await _loop.create_datagram_endpoint(lambda: self, sock=sock) + self.transport, _ = ret + except (socket.error, OSError): + return + self.running = True + + async def stop(self) -> None: + if not self.running: + return + self.running = False + self.ad_timer.stop() + if self.response_handle is not None: + self.response_handle.cancel() + self.response_handle = None + if self.transport.is_closing(): + logging.info("Transport already closing") + return + for notification in self._build_notifications("ssdp:byebye"): + self.transport.sendto(notification, SSDP_ADDR) + self.close_fut = self.server.get_event_loop().create_future() + self.transport.close() + try: + await asyncio.wait_for(self.close_fut, 2.) + except asyncio.TimeoutError: + pass + self.close_fut = None + + def register_service( + self, name: str, host_name_or_ip: str, port: int + ) -> None: + if len(name) > 64: + name = name[:64] + self.name = name + app: MoonrakerApp = self.server.lookup_component("application") + self.base_url = f"http://{host_name_or_ip}:{port}{app.route_prefix}" + self.response_headers = [ + f"USN: uuid:{self.unique_id}::upnp:rootdevice", + f"LOCATION: {self.base_url}/server/zeroconf/ssdp", + "ST: upnp:rootdevice", + "EXT:", + f"SERVER: {SSDP_SERVER_ID}", + f"CACHE-CONTROL: max-age={SSDP_MAX_AGE}", + f"BOOTID.UPNP.ORG: {self.boot_id}", + f"CONFIGID.UPNP.ORG: {self.config_id}", + ] + self.registered = True + advertisements = self._build_notifications("ssdp:alive") + if self.running: + for ad in advertisements: + self.transport.sendto(ad, SSDP_ADDR) + self.advertisements = cycle(advertisements) + self.ad_timer.start() + + async def _handle_xml_request(self, web_request: WebRequest) -> str: + if not self.registered: + raise self.server.error("Moonraker SSDP Device not registered", 404) + app_args = self.server.get_app_args() + return SSDP_DEVICE_XML.format( + device_type=SSDP_DEVICE_TYPE, + config_id=str(self.config_id), + friendly_name=self.name, + model_number=app_args["software_version"], + serial_number=self.unique_id.hex, + device_uuid=str(self.unique_id), + presentation_url=self.base_url + ) + + def _advertise_presence(self, eventtime: float) -> float: + if self.running and self.registered: + cur_ad = next(self.advertisements) + self.transport.sendto(cur_ad, SSDP_ADDR) + delay = random.uniform(SSDP_MAX_AGE / 6., SSDP_MAX_AGE / 3.) + return eventtime + delay + + def connection_made( + self, transport: asyncio.transports.BaseTransport + ) -> None: + logging.debug("SSDP Server Connected") + + def connection_lost(self, exc: Exception | None) -> None: + logging.debug("SSDP Server Disconnected") + if self.close_fut is not None: + self.close_fut.set_result(None) + + def pause_writing(self) -> None: + logging.debug("SSDP Pause Writing Requested") + + def resume_writing(self) -> None: + logging.debug("SSDP Resume Writing Requested") + + def datagram_received(self, data: bytes, addr: tuple[str | Any, int]) -> None: + if not self.registered: + return + try: + parts = data.decode().split("\r\n\r\n", maxsplit=1) + header = parts[0] + except ValueError: + logging.exception("Data Decode Error") + return + hlines = header.splitlines() + ssdp_command = hlines[0].strip() + headers = {} + for line in hlines[1:]: + parts = line.strip().split(":", maxsplit=1) + if len(parts) < 2: + continue + headers[parts[0].upper()] = parts[1].strip() + if ( + ssdp_command != "M-SEARCH * HTTP/1.1" or + headers.get("MAN") != '"ssdp:discover"' + ): + # Not a discovery request + return + if headers.get("ST") not in ["upnp:rootdevice", "ssdp:all"]: + # Service Type doesn't apply + return + if self.response_handle is not None: + # response in progress + return + if "MX" in headers: + delay_time = random.uniform(0, float(headers["MX"])) + eventloop = self.server.get_event_loop() + self.response_handle = eventloop.delay_callback( + delay_time, self._respond_to_discovery, addr + ) + else: + self._respond_to_discovery(addr) + + def _respond_to_discovery(self, addr: tuple[str | Any, int]) -> None: + if not self.running: + return + self.response_handle = None + response: List[str] = ["HTTP/1.1 200 OK"] + response.extend(self.response_headers) + response.append(f"DATE: {formatdate(usegmt=True)}") + response.extend(["", ""]) + self.transport.sendto("\r\n".join(response).encode(), addr) + + def _build_notifications(self, nts: str) -> List[bytes]: + notifications: List[bytes] = [] + notify_types = [ + ("upnp:rootdevice", f"uuid:{self.unique_id}::upnp:rootdevice"), + (f"uuid:{self.unique_id}", f"uuid:{self.unique_id}"), + (SSDP_DEVICE_TYPE, f"uuid:{self.unique_id}::{SSDP_DEVICE_TYPE}") + ] + for (nt, usn) in notify_types: + notifications.append( + "\r\n".join([ + "NOTIFY * HTTP/1.1", + f"HOST: {SSDP_ADDR[0]}:{SSDP_ADDR[1]}", + f"NTS: {nts}", + f"NT: {nt}", + f"USN: {usn}", + f"LOCATION: {self.base_url}/server/zeroconf/ssdp", + "EXT:", + f"SERVER: {SSDP_SERVER_ID}", + f"CACHE-CONTROL: max-age={SSDP_MAX_AGE}", + f"BOOTID.UPNP.ORG: {self.boot_id}", + f"CONFIGID.UPNP.ORG: {self.config_id}", + "", + "" + ]).encode() + ) + return notifications + + def error_received(self, exc: Exception) -> None: + logging.info(f"SSDP Server Error: {exc}") def load_component(config: ConfigHelper) -> ZeroconfRegistrar: diff --git a/moonraker/confighelper.py b/moonraker/confighelper.py index e8a6a24..aa802d6 100644 --- a/moonraker/confighelper.py +++ b/moonraker/confighelper.py @@ -10,13 +10,18 @@ import os import hashlib import pathlib import re +import threading +import copy import logging -from utils import SentinelClass +from io import StringIO +from .utils import Sentinel +from .common import RenderableTemplate # Annotation imports from typing import ( TYPE_CHECKING, Any, + Awaitable, Callable, IO, Optional, @@ -27,17 +32,22 @@ from typing import ( Dict, List, Type, + TextIO ) if TYPE_CHECKING: - from moonraker import Server - from components.gpio import GpioFactory, GpioOutputPin - from components.template import TemplateFactory, JinjaTemplate - from io import TextIOWrapper + from .server import Server + from .components.gpio import ( + GpioFactory, + GpioOutputPin, + GpioEvent, + GpioEventCallback + ) + from .components.template import TemplateFactory _T = TypeVar("_T") ConfigVal = Union[None, int, float, bool, str, dict, list] -SENTINEL = SentinelClass.get_instance() DOCS_URL = "https://moonraker.readthedocs.io/en/latest" +CFG_ERROR_KEY = "__CONFIG_ERROR__" class ConfigError(Exception): pass @@ -47,26 +57,28 @@ class ConfigHelper: error = ConfigError def __init__(self, server: Server, - config: configparser.ConfigParser, + config_source: ConfigSourceWrapper, section: str, parsed: Dict[str, Dict[str, ConfigVal]], - file_section_map: Dict[str, List[str]], fallback_section: Optional[str] = None ) -> None: self.server = server - self.config = config + self.source = config_source + self.config = config_source.get_parser() self.section = section self.fallback_section: Optional[str] = fallback_section self.parsed = parsed if self.section not in self.parsed: self.parsed[self.section] = {} - self.file_section_map = file_section_map - self.sections = config.sections - self.has_section = config.has_section + self.sections = self.config.sections + self.has_section = self.config.has_section def get_server(self) -> Server: return self.server + def get_source(self) -> ConfigSourceWrapper: + return self.source + def __getitem__(self, key: str) -> ConfigHelper: return self.getsection(key) @@ -77,16 +89,13 @@ class ConfigHelper: return self.config.has_option(self.section, option) def set_option(self, option: str, value: str) -> None: - self.config[self.section][option] = value + self.source.set_option(self.section, option, value) def get_name(self) -> str: return self.section def get_file(self) -> Optional[pathlib.Path]: - for fname in reversed(self.file_section_map.keys()): - if self.section in self.file_section_map[fname]: - return pathlib.Path(fname) - return None + return self.source.find_config_file(self.section) def get_options(self) -> Dict[str, str]: if self.section not in self.config: @@ -110,20 +119,19 @@ class ConfigHelper: self, section: str, fallback: Optional[str] = None ) -> ConfigHelper: return ConfigHelper( - self.server, self.config, section, self.parsed, - self.file_section_map, fallback + self.server, self.source, section, self.parsed, fallback ) def _get_option(self, func: Callable[..., Any], option: str, - default: Union[SentinelClass, _T], + default: Union[Sentinel, _T], above: Optional[Union[int, float]] = None, below: Optional[Union[int, float]] = None, minval: Optional[Union[int, float]] = None, maxval: Optional[Union[int, float]] = None, deprecate: bool = False - ) -> _T: + ) -> Any: section = self.section warn_fallback = False if ( @@ -135,14 +143,17 @@ class ConfigHelper: try: val = func(section, option) except (configparser.NoOptionError, configparser.NoSectionError) as e: - if isinstance(default, SentinelClass): + if default is Sentinel.MISSING: + self.parsed[self.section][CFG_ERROR_KEY] = True raise ConfigError(str(e)) from None val = default section = self.section - except Exception: + except Exception as e: + self.parsed[self.section][CFG_ERROR_KEY] = True raise ConfigError( - f"Error parsing option ({option}) from " - f"section [{self.section}]") + f"[{self.section}]: Option '{option}' encountered the following " + f"error while parsing: {e}" + ) from e else: if deprecate: self.server.add_warning( @@ -156,15 +167,17 @@ class ConfigHelper: f"to section [{self.section}]. Please correct your " f"configuration, see {help} for detailed documentation." ) - self._check_option(option, val, above, below, minval, maxval) - if ( - val is None or - isinstance(val, (int, float, bool, str, dict, list)) - ): - self.parsed[section][option] = val - else: - # If the item cannot be encoded to json serialize to a string - self.parsed[section][option] = str(val) + if isinstance(val, (int, float)): + self._check_option(option, val, above, below, minval, maxval) + if option not in self.parsed[section]: + if ( + val is None or + isinstance(val, (int, float, bool, str, dict, list)) + ): + self.parsed[section][option] = copy.deepcopy(val) + else: + # If the item cannot be encoded to json serialize to a string + self.parsed[section][option] = str(val) return val def _check_option(self, @@ -194,7 +207,7 @@ class ConfigHelper: def get(self, option: str, - default: Union[SentinelClass, _T] = SENTINEL, + default: Union[Sentinel, _T] = Sentinel.MISSING, deprecate: bool = False ) -> Union[str, _T]: return self._get_option( @@ -203,7 +216,7 @@ class ConfigHelper: def getint(self, option: str, - default: Union[SentinelClass, _T] = SENTINEL, + default: Union[Sentinel, _T] = Sentinel.MISSING, above: Optional[int] = None, below: Optional[int] = None, minval: Optional[int] = None, @@ -216,7 +229,7 @@ class ConfigHelper: def getboolean(self, option: str, - default: Union[SentinelClass, _T] = SENTINEL, + default: Union[Sentinel, _T] = Sentinel.MISSING, deprecate: bool = False ) -> Union[bool, _T]: return self._get_option( @@ -225,7 +238,7 @@ class ConfigHelper: def getfloat(self, option: str, - default: Union[SentinelClass, _T] = SENTINEL, + default: Union[Sentinel, _T] = Sentinel.MISSING, above: Optional[float] = None, below: Optional[float] = None, minval: Optional[float] = None, @@ -238,7 +251,7 @@ class ConfigHelper: def getlists(self, option: str, - default: Union[SentinelClass, _T] = SENTINEL, + default: Union[Sentinel, _T] = Sentinel.MISSING, list_type: Type = str, separators: Tuple[Optional[str], ...] = ('\n',), count: Optional[Tuple[Optional[int], ...]] = None, @@ -249,7 +262,7 @@ class ConfigHelper: f"Option '{option}' in section " f"[{self.section}]: length of 'count' argument must ", "match length of 'separators' argument") - else: + elif count is None: count = tuple(None for _ in range(len(separators))) def list_parser(value: str, @@ -288,7 +301,7 @@ class ConfigHelper: def getlist(self, option: str, - default: Union[SentinelClass, _T] = SENTINEL, + default: Union[Sentinel, _T] = Sentinel.MISSING, separator: Optional[str] = '\n', count: Optional[int] = None, deprecate: bool = False @@ -298,7 +311,7 @@ class ConfigHelper: def getintlist(self, option: str, - default: Union[SentinelClass, _T] = SENTINEL, + default: Union[Sentinel, _T] = Sentinel.MISSING, separator: Optional[str] = '\n', count: Optional[int] = None, deprecate: bool = False @@ -308,7 +321,7 @@ class ConfigHelper: def getfloatlist(self, option: str, - default: Union[SentinelClass, _T] = SENTINEL, + default: Union[Sentinel, _T] = Sentinel.MISSING, separator: Optional[str] = '\n', count: Optional[int] = None, deprecate: bool = False @@ -318,7 +331,7 @@ class ConfigHelper: def getdict(self, option: str, - default: Union[SentinelClass, _T] = SENTINEL, + default: Union[Sentinel, _T] = Sentinel.MISSING, separators: Tuple[Optional[str], Optional[str]] = ('\n', '='), dict_type: Type = str, allow_empty_fields: bool = False, @@ -352,12 +365,12 @@ class ConfigHelper: def getgpioout(self, option: str, - default: Union[SentinelClass, _T] = SENTINEL, + default: Union[Sentinel, _T] = Sentinel.MISSING, initial_value: int = 0, deprecate: bool = False ) -> Union[GpioOutputPin, _T]: try: - gpio: GpioFactory = self.server.load_component(self, 'gpio') + gpio: GpioFactory = self.server.load_component(self, "gpio") except Exception: raise ConfigError( f"Section [{self.section}], option '{option}', " @@ -369,33 +382,54 @@ class ConfigHelper: return self._get_option(getgpio_wrapper, option, default, deprecate=deprecate) - def gettemplate(self, - option: str, - default: Union[SentinelClass, _T] = SENTINEL, - is_async: bool = False, - deprecate: bool = False - ) -> Union[JinjaTemplate, _T]: + def getgpioevent( + self, + option: str, + event_callback: GpioEventCallback, + default: Union[Sentinel, _T] = Sentinel.MISSING, + deprecate: bool = False + ) -> Union[GpioEvent, _T]: try: - template: TemplateFactory - template = self.server.load_component(self, 'template') + gpio: GpioFactory = self.server.load_component(self, "gpio") except Exception: raise ConfigError( f"Section [{self.section}], option '{option}', " - "Template Component not available") + "GPIO Component not available" + ) - def gettemplate_wrapper(sec: str, opt: str) -> JinjaTemplate: + def getgpioevent_wrapper(sec: str, opt: str) -> GpioEvent: + val = self.config.get(sec, opt) + return gpio.register_gpio_event(val, event_callback) + return self._get_option( + getgpioevent_wrapper, option, default, deprecate=deprecate + ) + + def gettemplate(self, + option: str, + default: Union[Sentinel, _T] = Sentinel.MISSING, + is_async: bool = False, + deprecate: bool = False + ) -> Union[RenderableTemplate, _T]: + try: + template: TemplateFactory = self.server.load_component(self, 'template') + except Exception: + raise ConfigError( + f"Section [{self.section}], option '{option}': " + "Failed to load 'template' component." + ) + + def gettemplate_wrapper(sec: str, opt: str) -> RenderableTemplate: val = self.config.get(sec, opt) return template.create_template(val.strip(), is_async) - return self._get_option(gettemplate_wrapper, option, default, deprecate=deprecate) def load_template(self, option: str, - default: Union[SentinelClass, str] = SENTINEL, + default: Union[Sentinel, str] = Sentinel.MISSING, is_async: bool = False, deprecate: bool = False - ) -> JinjaTemplate: + ) -> RenderableTemplate: val = self.gettemplate(option, default, is_async, deprecate) if isinstance(val, str): template: TemplateFactory @@ -403,29 +437,32 @@ class ConfigHelper: return template.create_template(val.strip(), is_async) return val + def getpath(self, + option: str, + default: Union[Sentinel, _T] = Sentinel.MISSING, + deprecate: bool = False + ) -> Union[pathlib.Path, _T]: + val = self.gettemplate(option, default, deprecate=deprecate) + if isinstance(val, RenderableTemplate): + ctx = {"data_path": self.server.get_app_args()["data_path"]} + strpath = val.render(ctx) + return pathlib.Path(strpath).expanduser().resolve() + return val + def read_supplemental_dict(self, obj: Dict[str, Any]) -> ConfigHelper: if not obj: - raise ConfigError(f"Cannot ready Empty Dict") - try: - sup_cfg = configparser.ConfigParser(interpolation=None) - sup_cfg.read_dict(obj) - except Exception: - raise ConfigError("Error Reading Object") - sections = sup_cfg.sections() - return ConfigHelper(self.server, sup_cfg, sections[0], {}, {}) + raise ConfigError("Cannot ready Empty Dict") + source = DictSourceWrapper() + source.read_dict(obj) + sections = source.config.sections() + return ConfigHelper(self.server, source, sections[0], {}) def read_supplemental_config(self, file_name: str) -> ConfigHelper: - cfg_file_path = os.path.normpath(os.path.expanduser(file_name)) - if not os.path.isfile(cfg_file_path): - raise ConfigError( - f"Configuration File Not Found: '{cfg_file_path}''") - try: - sup_cfg = configparser.ConfigParser(interpolation=None) - sup_cfg.read_file(open(cfg_file_path)) - except Exception: - raise ConfigError(f"Error Reading Config: '{cfg_file_path}'") - sections = sup_cfg.sections() - return ConfigHelper(self.server, sup_cfg, sections[0], {}, {}) + fpath = pathlib.Path(file_name).expanduser().resolve() + source = FileSourceWrapper(self.server) + source.read_file(fpath) + sections = source.config.sections() + return ConfigHelper(self.server, source, sections[0], {}) def write_config(self, file_obj: IO[str]) -> None: self.config.write(file_obj) @@ -434,15 +471,13 @@ class ConfigHelper: return dict(self.parsed) def get_orig_config(self) -> Dict[str, Dict[str, str]]: - return { - key: dict(val) for key, val in self.config.items() - } + return self.source.as_dict() def get_file_sections(self) -> Dict[str, List[str]]: - return dict(self.file_section_map) + return self.source.get_file_sections() def get_config_files(self) -> List[str]: - return list(self.file_section_map.keys()) + return [str(f) for f in self.source.get_files()] def validate_config(self) -> None: for sect in self.config.sections(): @@ -451,9 +486,14 @@ class ConfigHelper: f"Unparsed config section [{sect}] detected. This " "may be the result of a component that failed to " "load. In the future this will result in a startup " - "error.") + "error." + ) continue parsed_opts = self.parsed[sect] + if CFG_ERROR_KEY in parsed_opts: + # Skip validation for sections that have encountered an error, + # as this will always result in unparsed options. + continue for opt, val in self.config.items(sect): if opt not in parsed_opts: self.server.add_warning( @@ -461,18 +501,18 @@ class ConfigHelper: f"section [{sect}]. This may be an option no longer " "available or could be the result of a module that " "failed to load. In the future this will result " - "in a startup error.") + "in a startup error." + ) - def create_backup(self): + def create_backup(self) -> None: cfg_path = self.server.get_app_args()["config_file"] cfg = pathlib.Path(cfg_path).expanduser().resolve() backup = cfg.parent.joinpath(f".{cfg.name}.bkp") - backup_fp: Optional[TextIOWrapper] = None + backup_fp: Optional[TextIO] = None try: if backup.exists(): cfg_mtime: int = 0 - for cfg_fname in set(self.file_section_map.keys()): - cfg = pathlib.Path(cfg_fname) + for cfg in self.source.get_files(): cfg_mtime = max(cfg_mtime, cfg.stat().st_mtime_ns) backup_mtime = backup.stat().st_mtime_ns if backup_mtime >= cfg_mtime: @@ -487,67 +527,568 @@ class ConfigHelper: if backup_fp is not None: backup_fp.close() +class ConfigSourceWrapper: + def __init__(self): + self.config = configparser.ConfigParser(interpolation=None) + + def get_parser(self): + return self.config + + def as_dict(self) -> Dict[str, Dict[str, str]]: + return {key: dict(val) for key, val in self.config.items()} + + def write_to_string(self) -> str: + sio = StringIO() + self.config.write(sio) + val = sio.getvalue() + sio.close() + return val + + def get_files(self) -> List[pathlib.Path]: + return [] + + def set_option(self, section: str, option: str, value: str) -> None: + self.config.set(section, option, value) + + def remove_option(self, section: str, option: str) -> None: + self.config.remove_option(section, option) + + def add_section(self, section: str) -> None: + self.config.add_section(section) + + def remove_section(self, section: str) -> None: + self.config.remove_section(section) + + def get_file_sections(self) -> Dict[str, List[str]]: + return {} + + def find_config_file( + self, section: str, option: Optional[str] = None + ) -> Optional[pathlib.Path]: + return None + +class DictSourceWrapper(ConfigSourceWrapper): + def __init__(self): + super().__init__() + + def read_dict(self, cfg: Dict[str, Any]) -> None: + try: + self.config.read_dict(cfg) + except Exception as e: + raise ConfigError("Error Reading config as dict") from e + +class FileSourceWrapper(ConfigSourceWrapper): + section_r = re.compile(r"\s*\[([^]]+)\]") + + def __init__(self, server: Server) -> None: + super().__init__() + self.server = server + self.files: List[pathlib.Path] = [] + self.raw_config_data: List[str] = [] + self.updates_pending: Set[int] = set() + self.file_section_map: Dict[str, List[int]] = {} + self.file_option_map: Dict[Tuple[str, str], List[int]] = {} + self.save_lock = threading.Lock() + self.backup: Dict[str, Any] = {} + + def get_files(self) -> List[pathlib.Path]: + return self.files + + def is_in_transaction(self) -> bool: + return ( + len(self.updates_pending) > 0 or + self.save_lock.locked() + ) + + def backup_source(self) -> None: + self.backup = { + "raw_data": list(self.raw_config_data), + "section_map": copy.deepcopy(self.file_section_map), + "option_map": copy.deepcopy(self.file_option_map), + "config": self.write_to_string() + } + + def _acquire_save_lock(self) -> None: + if not self.files: + raise ConfigError( + "Can only modify file backed configurations" + ) + if not self.save_lock.acquire(blocking=False): + raise ConfigError("Configuration locked, cannot modify") + + def set_option(self, section: str, option: str, value: str) -> None: + self._acquire_save_lock() + try: + value = value.strip() + try: + if (self.config.get(section, option).strip() == value): + return + except (configparser.NoSectionError, configparser.NoOptionError): + pass + file_idx: int = 0 + has_sec = has_opt = False + if (section, option) in self.file_option_map: + file_idx = self.file_option_map[(section, option)][0] + has_sec = has_opt = True + elif section in self.file_section_map: + file_idx = self.file_section_map[section][0] + has_sec = True + buf = self.raw_config_data[file_idx].splitlines() + new_opt_list = [f"{option}: {value}"] + if "\n" in value: + vals = [f" {v}" for v in value.split("\n")] + new_opt_list = [f"{option}:"] + vals + sec_info = self._find_section_info(section, buf, raise_error=False) + if sec_info: + options: Dict[str, Any] = sec_info["options"] + indent: int = sec_info["indent"] + opt_start: int = sec_info["end"] + opt_end: int = sec_info["end"] + opt_info: Optional[Dict[str, Any]] = options.get(option) + if opt_info is not None: + indent = opt_info["indent"] + opt_start = opt_info["start"] + opt_end = opt_info["end"] + elif options: + # match indentation of last option in section + last_opt = list(options.values())[-1] + indent = last_opt["indent"] + if indent: + padding = " " * indent + new_opt_list = [f"{padding}{v}" for v in new_opt_list] + buf[opt_start:] = new_opt_list + buf[opt_end:] + else: + # Append new section to the end of the file + new_opt_list.insert(0, f"[{section}]") + if buf and buf[-1].strip() != "": + new_opt_list.insert(0, "") + buf.extend(new_opt_list) + buf.append("") + updated_cfg = "\n".join(buf) + # test changes to the configuration + test_parser = configparser.ConfigParser(interpolation=None) + try: + test_parser.read_string(updated_cfg) + if not test_parser.has_option(section, option): + raise ConfigError("Option not added") + except Exception as e: + raise ConfigError( + f"Failed to set option '{option}' in section " + f"[{section}], file: {self.files[file_idx]}" + ) from e + # Update local configuration/tracking + self.raw_config_data[file_idx] = updated_cfg + self.updates_pending.add(file_idx) + if not has_sec: + self.file_section_map[section] = [file_idx] + if not has_opt: + self.file_option_map[(section, option)] = [file_idx] + if not self.config.has_section(section): + self.config.add_section(section) + self.config.set(section, option, value) + finally: + self.save_lock.release() + + def remove_option(self, section: str, option: str) -> None: + self._acquire_save_lock() + try: + key = (section, option) + if key not in self.file_option_map: + return + pending: List[Tuple[int, str]] = [] + file_indices = self.file_option_map[key] + for idx in file_indices: + buf = self.raw_config_data[idx].splitlines() + try: + sec_info = self._find_section_info(section, buf) + opt_info = sec_info["options"][option] + start = opt_info["start"] + end = opt_info["end"] + if ( + end < len(buf) and + not buf[start-1].strip() + and not buf[end].strip() + ): + end += 1 + buf[start:] = buf[end:] + buf.append("") + updated_cfg = "\n".join(buf) + test_parser = configparser.ConfigParser(interpolation=None) + test_parser.read_string(updated_cfg) + if test_parser.has_option(section, option): + raise ConfigError("Option still exists") + pending.append((idx, updated_cfg)) + except Exception as e: + raise ConfigError( + f"Failed to remove option '{option}' from section " + f"[{section}], file: {self.files[idx]}" + ) from e + # Update configuration/tracking + for (idx, data) in pending: + self.updates_pending.add(idx) + self.raw_config_data[idx] = data + del self.file_option_map[key] + self.config.remove_option(section, option) + finally: + self.save_lock.release() + + def add_section(self, section: str) -> None: + self._acquire_save_lock() + try: + if section in self.file_section_map: + return + # add section to end of primary file + buf = self.raw_config_data[0].splitlines() + if buf and buf[-1].strip() != "": + buf.append("") + buf.extend([f"[{section}]", ""]) + updated_cfg = "\n".join(buf) + try: + test_parser = configparser.ConfigParser(interpolation=None) + test_parser.read_string(updated_cfg) + if not test_parser.has_section(section): + raise ConfigError("Section not added") + except Exception as e: + raise ConfigError( + f"Failed to add section [{section}], file: {self.files[0]}" + ) from e + self.updates_pending.add(0) + self.file_section_map[section] = [0] + self.raw_config_data[0] = updated_cfg + self.config.add_section(section) + finally: + self.save_lock.release() + + def remove_section(self, section: str) -> None: + self._acquire_save_lock() + try: + if section not in self.file_section_map: + return + pending: List[Tuple[int, str]] = [] + file_indices = self.file_section_map[section] + for idx in file_indices: + buf = self.raw_config_data[idx].splitlines() + try: + sec_info = self._find_section_info(section, buf) + start = sec_info["start"] + end = sec_info["end"] + if ( + end < len(buf) and + not buf[start-1].strip() + and not buf[end].strip() + ): + end += 1 + buf[start:] = buf[end:] + buf.append("") + updated_cfg = "\n".join(buf) + test_parser = configparser.ConfigParser(interpolation=None) + test_parser.read_string(updated_cfg) + if test_parser.has_section(section): + raise ConfigError("Section still exists") + pending.append((idx, updated_cfg)) + except Exception as e: + raise ConfigError( + f"Failed to remove section [{section}], " + f"file: {self.files[0]}" + ) from e + for (idx, data) in pending: + self.updates_pending.add(idx) + self.raw_config_data[idx] = data + del self.file_section_map[section] + self.config.remove_section(section) + finally: + self.save_lock.release() + + def save(self) -> Awaitable[bool]: + eventloop = self.server.get_event_loop() + if self.server.is_running(): + fut = eventloop.run_in_thread(self._do_save) + else: + fut = eventloop.create_future() + fut.set_result(self._do_save()) + return fut + + def _do_save(self) -> bool: + with self.save_lock: + self.backup.clear() + if not self.updates_pending: + return False + for idx in self.updates_pending: + fpath = self.files[idx] + fpath.write_text( + self.raw_config_data[idx], encoding="utf-8" + ) + self.updates_pending.clear() + return True + + def cancel(self): + self._acquire_save_lock() + try: + if not self.backup or not self.updates_pending: + self.backup.clear() + return + self.raw_config_data = self.backup["raw_data"] + self.file_option_map = self.backup["option_map"] + self.file_section_map = self.backup["section_map"] + self.config.clear() + self.config.read_string(self.backup["config"]) + self.updates_pending.clear() + self.backup.clear() + finally: + self.save_lock.release() + + def revert(self) -> Awaitable[bool]: + eventloop = self.server.get_event_loop() + if self.server.is_running(): + fut = eventloop.run_in_thread(self._do_revert) + else: + fut = eventloop.create_future() + fut.set_result(self._do_revert()) + return fut + + def _do_revert(self) -> bool: + with self.save_lock: + if not self.updates_pending: + return False + self.backup.clear() + entry = self.files[0] + self.read_file(entry) + return True + + def write_config( + self, dest_folder: Union[str, pathlib.Path] + ) -> Awaitable[None]: + eventloop = self.server.get_event_loop() + if self.server.is_running(): + fut = eventloop.run_in_thread(self._do_write, dest_folder) + else: + self._do_write(dest_folder) + fut = eventloop.create_future() + fut.set_result(None) + return fut + + def _do_write(self, dest_folder: Union[str, pathlib.Path]) -> None: + with self.save_lock: + if isinstance(dest_folder, str): + dest_folder = pathlib.Path(dest_folder) + dest_folder = dest_folder.expanduser().resolve() + cfg_parent = self.files[0].parent + for i, path in enumerate(self.files): + try: + rel_path = path.relative_to(cfg_parent) + dest_file = dest_folder.joinpath(rel_path) + except ValueError: + dest_file = dest_folder.joinpath( + f"{path.parent.name}-{path.name}" + ) + os.makedirs(str(dest_file.parent), exist_ok=True) + dest_file.write_text(self.raw_config_data[i]) + + def _find_section_info( + self, section: str, file_data: List[str], raise_error: bool = True + ) -> Dict[str, Any]: + options: Dict[str, Dict[str, Any]] = {} + result: Dict[str, Any] = { + "indent": -1, + "start": -1, + "end": -1, + "options": options + } + last_option: str = "" + opt_indent = -1 + for idx, line in enumerate(file_data): + if not line.strip() or line.lstrip()[0] in "#;": + # skip empty lines, whitespace, and comments + continue + line = line.expandtabs() + line_indent = len(line) - len(line.strip()) + if opt_indent != -1 and line_indent > opt_indent: + if last_option: + options[last_option]["end"] = idx + 1 + # Continuation of an option + if result["start"] != -1: + result["end"] = idx + 1 + continue + sec_match = self.section_r.match(line) + if sec_match is not None: + opt_indent = -1 + if result["start"] != -1: + break + cursec = sec_match.group(1) + if section == cursec: + result["indent"] = line_indent + result["start"] = idx + result["end"] = idx + 1 + else: + # This is an option + opt_indent = line_indent + if result["start"] != -1: + result["end"] = idx + 1 + last_option = re.split(r"[:=]", line, 1)[0].strip() + options[last_option] = { + "indent": line_indent, + "start": idx, + "end": idx + 1 + } + if result["start"] != -1: + return result + if raise_error: + raise ConfigError(f"Unable to find section [{section}]") + return {} + + def get_file_sections(self) -> Dict[str, List[str]]: + sections_by_file: Dict[str, List[str]] = { + str(fname): [] for fname in self.files + } + for section, idx_list in self.file_section_map.items(): + for idx in idx_list: + fname = str(self.files[idx]) + sections_by_file[fname].append(section) + return sections_by_file + + def find_config_file( + self, section: str, option: Optional[str] = None + ) -> Optional[pathlib.Path]: + idx: int = -1 + if option is not None: + key = (section, option) + if key in self.file_option_map: + idx = self.file_option_map[key][0] + elif section in self.file_section_map: + idx = self.file_section_map[section][0] + if idx == -1: + return None + return self.files[idx] + + def _write_buffer(self, buffer: List[str], fpath: pathlib.Path) -> None: + if not buffer: + return + self.config.read_string("\n".join(buffer), fpath.name) + + def _parse_file( + self, file_path: pathlib.Path, visited: List[Tuple[int, int]] + ) -> None: + buffer: List[str] = [] + try: + stat = file_path.stat() + cur_stat = (stat.st_dev, stat.st_ino) + if cur_stat in visited: + raise ConfigError( + f"Recursive include directive detected, {file_path}" + ) + visited.append(cur_stat) + self.files.append(file_path) + file_index = len(self.files) - 1 + cfg_data = file_path.read_text(encoding="utf-8") + self.raw_config_data.append(cfg_data) + lines = cfg_data.splitlines() + last_section = "" + opt_indent = -1 + for line in lines: + if not line.strip() or line.lstrip()[0] in "#;": + # ignore lines that contain only whitespace/comments + continue + line = line.expandtabs(tabsize=4) + # Search for and remove inline comments + cmt_match = re.search(r" +[#;]", line) + if cmt_match is not None: + line = line[:cmt_match.start()] + # Unescape prefix chars that are preceded by whitespace + line = re.sub(r" \\(#|;)", r" \1", line) + line_indent = len(line) - len(line.lstrip()) + if opt_indent != -1 and line_indent > opt_indent: + # Multi-line value, append to buffer and resume parsing + buffer.append(line) + continue + sect_match = self.section_r.match(line) + if sect_match is not None: + # Section detected + opt_indent = -1 + section = sect_match.group(1) + if section.startswith("include "): + inc_path = section[8:].strip() + if not inc_path: + raise ConfigError( + f"Invalid include directive: [{section}]" + ) + if inc_path[0] == "/": + new_path = pathlib.Path(inc_path).resolve() + paths = sorted(new_path.parent.glob(new_path.name)) + else: + paths = sorted(file_path.parent.glob(inc_path)) + if not paths: + raise ConfigError( + "No files matching include directive " + f"[{section}]" + ) + # Write out buffered data to the config before parsing + # included files + self._write_buffer(buffer, file_path) + buffer.clear() + for p in paths: + self._parse_file(p, visited) + # Don't add included sections to the configparser + continue + else: + last_section = section + if section not in self.file_section_map: + self.file_section_map[section] = [] + elif file_index in self.file_section_map[section]: + raise ConfigError( + f"Duplicate section [{section}] in file " + f"{file_path}" + ) + self.file_section_map[section].insert(0, file_index) + else: + # This line must specify an option + opt_indent = line_indent + option = re.split(r"[:=]", line, 1)[0].strip() + key = (last_section, option) + if key not in self.file_option_map: + self.file_option_map[key] = [] + elif file_index in self.file_option_map[key]: + raise ConfigError( + f"Duplicate option '{option}' in section " + f"[{last_section}], file {file_path} " + ) + self.file_option_map[key].insert(0, file_index) + buffer.append(line) + self._write_buffer(buffer, file_path) + except ConfigError: + raise + except Exception as e: + if not file_path.is_file(): + raise ConfigError( + f"Configuration File Not Found: '{file_path}''") from e + if not os.access(file_path, os.R_OK): + raise ConfigError( + "Moonraker does not have Read/Write permission for " + f"config file at path '{file_path}'") from e + raise ConfigError(f"Error Reading Config: '{file_path}'") from e + + def read_file(self, main_conf: pathlib.Path) -> None: + self.config.clear() + self.files.clear() + self.raw_config_data.clear() + self.updates_pending.clear() + self.file_section_map.clear() + self.file_option_map.clear() + self._parse_file(main_conf, []) + size = sum([len(rawcfg) for rawcfg in self.raw_config_data]) + logging.info( + f"Configuration File '{main_conf}' parsed, total size: {size} B" + ) + + def get_configuration( server: Server, app_args: Dict[str, Any] ) -> ConfigHelper: - config = configparser.ConfigParser(interpolation=None) - section_map = parse_config_file(config, app_args) - if not config.has_section('server'): - raise ConfigError("No section [server] in config") - return ConfigHelper(server, config, 'server', {}, section_map) - -def parse_config_file( - config: configparser.ConfigParser, app_args: Dict[str, Any] -) -> Dict[str, List[str]]: start_path = pathlib.Path(app_args['config_file']).expanduser().resolve() - config_files: List[pathlib.Path] = [start_path] - visited_files: Set[Tuple[int, int]] = set() - file_sections: Dict[str, List[str]] = {} - while config_files: - config_path = config_files.pop(0) - try: - stat = config_path.stat() - visited = (stat.st_dev, stat.st_ino) - if visited in visited_files: - raise ConfigError("Recursive include directive detected") - visited_files.add(visited) - data = config_path.read_text() - config.read_string(data) - except Exception as e: - if not config_path.is_file(): - raise ConfigError( - f"Configuration File Not Found: '{config_path}''") from e - if not os.access(config_path, os.R_OK): - raise ConfigError( - "Moonraker does not have Read/Write permission for " - f"config file at path '{config_path}'") from e - raise ConfigError(f"Error Reading Config: '{config_path}'") from e - all_sections: List[str] = re.findall( - r"^\[([^]]+)\]\s*$", data, flags=re.MULTILINE - ) - file_sections[str(config_path)] = [ - sec for sec in all_sections if not sec.startswith("include") - ] - for sec in config.sections(): - if not sec.startswith("include"): - continue - str_path = sec[8:].strip() - if not str_path: - raise ConfigError( - f"Invalid include directive: [{sec}]" - ) - config.remove_section(sec) - if str_path[0] == "/": - path = pathlib.Path(str_path) - paths = sorted(path.parent.glob(path.name)) - else: - paths = sorted(config_path.parent.glob(str_path)) - if not paths: - raise ConfigError( - f"No files matching include directive [{sec}]" - ) - config_files.extend(paths) - return file_sections + source = FileSourceWrapper(server) + source.read_file(start_path) + if not source.config.has_section('server'): + raise ConfigError("No section [server] in config") + return ConfigHelper(server, source, 'server', {}) def find_config_backup(cfg_path: str) -> Optional[str]: cfg = pathlib.Path(cfg_path).expanduser().resolve() diff --git a/moonraker/eventloop.py b/moonraker/eventloop.py index 22b710b..6ee83b9 100644 --- a/moonraker/eventloop.py +++ b/moonraker/eventloop.py @@ -5,6 +5,8 @@ # This file may be distributed under the terms of the GNU GPLv3 license from __future__ import annotations +import os +import contextlib import asyncio import inspect import functools @@ -15,23 +17,36 @@ from typing import ( TYPE_CHECKING, Awaitable, Callable, - Coroutine, Optional, Tuple, TypeVar, Union ) +_uvl_var = os.getenv("MOONRAKER_ENABLE_UVLOOP", "y").lower() +_uvl_enabled = False +if _uvl_var in ["y", "yes", "true"]: + with contextlib.suppress(ImportError): + import uvloop + asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) + _uvl_enabled = True + if TYPE_CHECKING: + from asyncio import AbstractEventLoop _T = TypeVar("_T") FlexCallback = Callable[..., Optional[Awaitable]] TimerCallback = Callable[[float], Union[float, Awaitable[float]]] class EventLoop: + UVLOOP_ENABLED = _uvl_enabled TimeoutError = asyncio.TimeoutError def __init__(self) -> None: self.reset() + @property + def asyncio_loop(self) -> AbstractEventLoop: + return self.aioloop + def reset(self) -> None: self.aioloop = self._create_new_loop() self.add_signal_handler = self.aioloop.add_signal_handler @@ -67,11 +82,16 @@ class EventLoop: *args, **kwargs ) -> None: - if inspect.iscoroutinefunction(callback): - self.aioloop.create_task(callback(*args, **kwargs)) # type: ignore - else: - self.aioloop.call_soon( - functools.partial(callback, *args, **kwargs)) + async def _wrapper(): + try: + ret = callback(*args, **kwargs) + if inspect.isawaitable(ret): + await ret + except asyncio.CancelledError: + raise + except Exception: + logging.exception("Error Running Callback") + self.aioloop.create_task(_wrapper()) def delay_callback(self, delay: float, @@ -79,23 +99,14 @@ class EventLoop: *args, **kwargs ) -> asyncio.TimerHandle: - if inspect.iscoroutinefunction(callback): - return self.aioloop.call_later( - delay, self._async_callback, - functools.partial(callback, *args, **kwargs)) - else: - return self.aioloop.call_later( - delay, functools.partial(callback, *args, **kwargs)) + return self.aioloop.call_later( + delay, self.register_callback, + functools.partial(callback, *args, **kwargs) + ) def register_timer(self, callback: TimerCallback): return FlexTimer(self, callback) - def _async_callback(self, callback: Callable[[], Coroutine]) -> None: - # This wrapper delays creation of the coroutine object. In the - # event that a callback is cancelled this prevents "coroutine - # was never awaited" warnings in asyncio - self.aioloop.create_task(callback()) - def run_in_thread(self, callback: Callable[..., _T], *args @@ -158,12 +169,18 @@ class FlexTimer: self.eventloop = eventloop self.callback = callback self.timer_handle: Optional[asyncio.TimerHandle] = None + self.timer_task: Optional[asyncio.Task] = None self.running: bool = False + def in_callback(self) -> bool: + return self.timer_task is not None and not self.timer_task.done() + def start(self, delay: float = 0.): if self.running: return self.running = True + if self.in_callback(): + return call_time = self.eventloop.get_loop_time() + delay self.timer_handle = self.eventloop.call_at( call_time, self._schedule_task) @@ -176,9 +193,14 @@ class FlexTimer: self.timer_handle.cancel() self.timer_handle = None + async def wait_timer_done(self) -> None: + if self.timer_task is None: + return + await self.timer_task + def _schedule_task(self): self.timer_handle = None - self.eventloop.create_task(self._call_wrapper()) + self.timer_task = self.eventloop.create_task(self._call_wrapper()) def is_running(self) -> bool: return self.running @@ -186,8 +208,14 @@ class FlexTimer: async def _call_wrapper(self): if not self.running: return - ret = self.callback(self.eventloop.get_loop_time()) - if isinstance(ret, Awaitable): - ret = await ret + try: + ret = self.callback(self.eventloop.get_loop_time()) + if isinstance(ret, Awaitable): + ret = await ret + except Exception: + self.running = False + raise + finally: + self.timer_task = None if self.running: self.timer_handle = self.eventloop.call_at(ret, self._schedule_task) diff --git a/moonraker/loghelper.py b/moonraker/loghelper.py new file mode 100644 index 0000000..8245fb7 --- /dev/null +++ b/moonraker/loghelper.py @@ -0,0 +1,165 @@ +# Log Management +# +# Copyright (C) 2023 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import logging +import logging.handlers +import time +import os +import sys +import asyncio +import platform +from queue import SimpleQueue as Queue +from .common import RequestType + +# Annotation imports +from typing import ( + TYPE_CHECKING, + Optional, + Awaitable, + Dict, + List, + Any, +) + +if TYPE_CHECKING: + from .server import Server + from .common import WebRequest + from .components.klippy_connection import KlippyConnection + +# Coroutine friendly QueueHandler courtesy of Martjin Pieters: +# https://www.zopatista.com/python/2019/05/11/asyncio-logging/ +class LocalQueueHandler(logging.handlers.QueueHandler): + def emit(self, record: logging.LogRecord) -> None: + # Removed the call to self.prepare(), handle task cancellation + try: + self.enqueue(record) + except asyncio.CancelledError: + raise + except Exception: + self.handleError(record) + +# Timed Rotating File Handler, based on Klipper's implementation +class MoonrakerLoggingHandler(logging.handlers.TimedRotatingFileHandler): + def __init__(self, app_args: Dict[str, Any], **kwargs) -> None: + super().__init__(app_args['log_file'], **kwargs) + self.app_args = app_args + self.rollover_info: Dict[str, str] = {} + + def set_rollover_info(self, name: str, item: str) -> None: + self.rollover_info[name] = item + + def doRollover(self) -> None: + super().doRollover() + self.write_header() + + def write_header(self) -> None: + if self.stream is None: + return + strtime = time.asctime(time.gmtime()) + header = f"{'-'*20} Log Start | {strtime} {'-'*20}\n" + self.stream.write(header) + self.stream.write(f"platform: {platform.platform(terse=True)}\n") + app_section = "\n".join([f"{k}: {v}" for k, v in self.app_args.items()]) + self.stream.write(app_section + "\n") + if self.rollover_info: + lines = [line for line in self.rollover_info.values() if line] + self.stream.write("\n".join(lines) + "\n") + +class LogManager: + def __init__( + self, app_args: Dict[str, Any], startup_warnings: List[str] + ) -> None: + root_logger = logging.getLogger() + while root_logger.hasHandlers(): + root_logger.removeHandler(root_logger.handlers[0]) + queue: Queue = Queue() + queue_handler = LocalQueueHandler(queue) + root_logger.addHandler(queue_handler) + root_logger.setLevel(logging.INFO) + stdout_hdlr = logging.StreamHandler(sys.stdout) + stdout_fmt = logging.Formatter( + '[%(filename)s:%(funcName)s()] - %(message)s') + stdout_hdlr.setFormatter(stdout_fmt) + app_args_str = f"platform: {platform.platform(terse=True)}\n" + app_args_str += "\n".join([f"{k}: {v}" for k, v in app_args.items()]) + sys.stdout.write(f"\nApplication Info:\n{app_args_str}\n") + self.file_hdlr: Optional[MoonrakerLoggingHandler] = None + self.listener: Optional[logging.handlers.QueueListener] = None + log_file: str = app_args.get('log_file', "") + if log_file: + try: + self.file_hdlr = MoonrakerLoggingHandler( + app_args, when='midnight', backupCount=2) + formatter = logging.Formatter( + '%(asctime)s [%(filename)s:%(funcName)s()] - %(message)s') + self.file_hdlr.setFormatter(formatter) + self.listener = logging.handlers.QueueListener( + queue, self.file_hdlr, stdout_hdlr) + self.file_hdlr.write_header() + except Exception: + log_file = os.path.normpath(log_file) + dir_name = os.path.dirname(log_file) + startup_warnings.append( + f"Unable to create log file at '{log_file}'. " + f"Make sure that the folder '{dir_name}' exists " + "and Moonraker has Read/Write access to the folder. " + ) + if self.listener is None: + self.listener = logging.handlers.QueueListener( + queue, stdout_hdlr) + self.listener.start() + + def set_server(self, server: Server) -> None: + self.server = server + self.server.register_endpoint( + "/server/logs/rollover", RequestType.POST, self._handle_log_rollover + ) + + def set_rollover_info(self, name: str, item: str) -> None: + if self.file_hdlr is not None: + self.file_hdlr.set_rollover_info(name, item) + + def rollover_log(self) -> Awaitable[None]: + if self.file_hdlr is None: + raise self.server.error("File Logging Disabled") + eventloop = self.server.get_event_loop() + return eventloop.run_in_thread(self.file_hdlr.doRollover) + + def stop_logging(self): + self.listener.stop() + + async def _handle_log_rollover( + self, web_request: WebRequest + ) -> Dict[str, Any]: + log_apps = ["moonraker", "klipper"] + app = web_request.get_str("application", None) + result: Dict[str, Any] = {"rolled_over": [], "failed": {}} + if app is not None: + if app not in log_apps: + raise self.server.error(f"Unknown application {app}") + log_apps = [app] + if "moonraker" in log_apps: + try: + ret = self.rollover_log() + if ret is not None: + await ret + except asyncio.CancelledError: + raise + except Exception as e: + result["failed"]["moonraker"] = str(e) + else: + result["rolled_over"].append("moonraker") + if "klipper" in log_apps: + kconn: KlippyConnection + kconn = self.server.lookup_component("klippy_connection") + try: + await kconn.rollover_log() + except self.server.error as e: + result["failed"]["klipper"] = str(e) + else: + result["rolled_over"].append("klipper") + return result diff --git a/moonraker/moonraker.py b/moonraker/moonraker.py index 2e3a4ac..0948717 100644 --- a/moonraker/moonraker.py +++ b/moonraker/moonraker.py @@ -1,517 +1,17 @@ #!/usr/bin/env python3 -# Moonraker - HTTP/Websocket API Server for Klipper +# Legacy entry point for Moonraker # -# Copyright (C) 2020 Eric Callahan +# Copyright (C) 2022 Eric Callahan # # This file may be distributed under the terms of the GNU GPLv3 license -from __future__ import annotations -import pathlib -import sys -import argparse -import importlib -import os -import io -import time -import socket -import logging -import signal -import confighelper -import utils -import asyncio -from eventloop import EventLoop -from app import MoonrakerApp -from klippy_connection import KlippyConnection -from utils import ServerError, SentinelClass -# Annotation imports -from typing import ( - TYPE_CHECKING, - Any, - Optional, - Callable, - Coroutine, - Dict, - List, - Tuple, - Union, - TypeVar, -) -if TYPE_CHECKING: - from websockets import WebRequest, WebsocketManager - from components.file_manager.file_manager import FileManager - FlexCallback = Callable[..., Optional[Coroutine]] - _T = TypeVar("_T") - -API_VERSION = (1, 0, 5) - -CORE_COMPONENTS = [ - 'dbus_manager', 'database', 'file_manager', 'klippy_apis', - 'machine', 'data_store', 'shell_command', 'proc_stats', - 'job_state', 'job_queue', 'http_client', 'announcements', - 'webcam', 'extensions', -] - -SENTINEL = SentinelClass.get_instance() - -class Server: - error = ServerError - def __init__(self, - args: Dict[str, Any], - file_logger: Optional[utils.MoonrakerLoggingHandler], - event_loop: EventLoop - ) -> None: - self.event_loop = event_loop - self.file_logger = file_logger - self.app_args = args - self.config = config = self._parse_config() - self.host: str = config.get('host', "0.0.0.0") - self.port: int = config.getint('port', 7125) - self.ssl_port: int = config.getint('ssl_port', 7130) - self.exit_reason: str = "" - self.server_running: bool = False - - # Configure Debug Logging - self.debug = config.getboolean('enable_debug_logging', False) - asyncio_debug = config.getboolean('enable_asyncio_debug', False) - log_level = logging.DEBUG if self.debug else logging.INFO - logging.getLogger().setLevel(log_level) - self.event_loop.set_debug(asyncio_debug) - - # Event initialization - self.events: Dict[str, List[FlexCallback]] = {} - self.components: Dict[str, Any] = {} - self.failed_components: List[str] = [] - self.warnings: List[str] = [] - self.klippy_connection = KlippyConnection(config) - - # Tornado Application/Server - self.moonraker_app = app = MoonrakerApp(config) - self.register_endpoint = app.register_local_handler - self.register_static_file_handler = app.register_static_file_handler - self.register_upload_handler = app.register_upload_handler - self.register_api_transport = app.register_api_transport - - log_warn = args.get('log_warning', "") - if log_warn: - self.add_warning(log_warn) - cfg_warn = args.get("config_warning", "") - if cfg_warn: - self.add_warning(cfg_warn) - - self.register_endpoint( - "/server/info", ['GET'], self._handle_info_request) - self.register_endpoint( - "/server/config", ['GET'], self._handle_config_request) - self.register_endpoint( - "/server/restart", ['POST'], self._handle_server_restart) - self.register_notification("server:klippy_ready") - self.register_notification("server:klippy_shutdown") - self.register_notification("server:klippy_disconnect", - "klippy_disconnected") - self.register_notification("server:gcode_response") - - def get_app_args(self) -> Dict[str, Any]: - return dict(self.app_args) - - def get_event_loop(self) -> EventLoop: - return self.event_loop - - def get_api_version(self) -> Tuple[int, int, int]: - return API_VERSION - - def get_warnings(self) -> List[str]: - return self.warnings - - def is_running(self) -> bool: - return self.server_running - - def is_debug_enabled(self) -> bool: - return self.debug - - def _parse_config(self) -> confighelper.ConfigHelper: - config = confighelper.get_configuration(self, self.app_args) - # log config file - cfg_files = "\n".join(config.get_config_files()) - strio = io.StringIO() - config.write_config(strio) - cfg_item = f"\n{'#'*20} Moonraker Configuration {'#'*20}\n\n" - cfg_item += strio.getvalue() - cfg_item += "#"*65 - cfg_item += f"\nAll Configuration Files:\n{cfg_files}\n" - cfg_item += "#"*65 - strio.close() - self.add_log_rollover_item('config', cfg_item) - return config - - async def server_init(self, start_server: bool = True) -> None: - self.event_loop.add_signal_handler( - signal.SIGTERM, self._handle_term_signal) - - # Perform asynchronous init after the event loop starts - optional_comps: List[Coroutine] = [] - for name, component in self.components.items(): - if not hasattr(component, "component_init"): - continue - if name in CORE_COMPONENTS: - # Process core components in order synchronously - await self._initialize_component(name, component) - else: - optional_comps.append( - self._initialize_component(name, component)) - - # Asynchronous Optional Component Initialization - if optional_comps: - await asyncio.gather(*optional_comps) - - if not self.warnings: - await self.event_loop.run_in_thread(self.config.create_backup) - - if start_server: - await self.start_server() - - async def start_server(self, connect_to_klippy: bool = True) -> None: - # Start HTTP Server - logging.info( - f"Starting Moonraker on ({self.host}, {self.port}), " - f"Hostname: {socket.gethostname()}") - self.moonraker_app.listen(self.host, self.port, self.ssl_port) - self.server_running = True - if connect_to_klippy: - self.klippy_connection.connect() - - def add_log_rollover_item(self, name: str, item: str, - log: bool = True) -> None: - if self.file_logger is not None: - self.file_logger.set_rollover_info(name, item) - if log and item is not None: - logging.info(item) - - def add_warning(self, warning: str, log: bool = True) -> None: - self.warnings.append(warning) - if log: - logging.warning(warning) - - # ***** Component Management ***** - async def _initialize_component(self, name: str, component: Any) -> None: - logging.info(f"Performing Component Post Init: [{name}]") - try: - ret = component.component_init() - if ret is not None: - await ret - except Exception as e: - logging.exception(f"Component [{name}] failed post init") - self.add_warning(f"Component '{name}' failed to load with " - f"error: {e}") - self.set_failed_component(name) - - def load_components(self) -> None: - config = self.config - cfg_sections = [s.split()[0] for s in config.sections()] - cfg_sections.remove('server') - - # load core components - for component in CORE_COMPONENTS: - self.load_component(config, component) - if component in cfg_sections: - cfg_sections.remove(component) - - # load remaining optional components - for section in cfg_sections: - self.load_component(config, section, None) - - config.validate_config() - - def load_component(self, - config: confighelper.ConfigHelper, - component_name: str, - default: Union[SentinelClass, _T] = SENTINEL - ) -> Union[_T, Any]: - if component_name in self.components: - return self.components[component_name] - try: - module = importlib.import_module("components." + component_name) - is_core = component_name in CORE_COMPONENTS - fallback: Optional[str] = "server" if is_core else None - config = config.getsection(component_name, fallback) - load_func = getattr(module, "load_component") - component = load_func(config) - except Exception: - msg = f"Unable to load component: ({component_name})" - logging.exception(msg) - if component_name not in self.failed_components: - self.failed_components.append(component_name) - if isinstance(default, SentinelClass): - raise ServerError(msg) - return default - self.components[component_name] = component - logging.info(f"Component ({component_name}) loaded") - return component - - def lookup_component(self, - component_name: str, - default: Union[SentinelClass, _T] = SENTINEL - ) -> Union[_T, Any]: - component = self.components.get(component_name, default) - if isinstance(component, SentinelClass): - raise ServerError(f"Component ({component_name}) not found") - return component - - def set_failed_component(self, component_name: str) -> None: - if component_name not in self.failed_components: - self.failed_components.append(component_name) - - def register_component(self, component_name: str, component: Any) -> None: - if component_name in self.components: - raise self.error( - f"Component '{component_name}' already registered") - self.components[component_name] = component - - def register_notification(self, - event_name: str, - notify_name: Optional[str] = None - ) -> None: - wsm: WebsocketManager = self.lookup_component("websockets") - wsm.register_notification(event_name, notify_name) - - def register_event_handler(self, - event: str, - callback: FlexCallback - ) -> None: - self.events.setdefault(event, []).append(callback) - - def send_event(self, event: str, *args) -> asyncio.Future: - fut = self.event_loop.create_future() - self.event_loop.register_callback( - self._process_event, fut, event, *args) - return fut - - async def _process_event(self, - fut: asyncio.Future, - event: str, - *args - ) -> None: - events = self.events.get(event, []) - coroutines: List[Coroutine] = [] - try: - for func in events: - ret = func(*args) - if ret is not None: - coroutines.append(ret) - if coroutines: - await asyncio.gather(*coroutines) - except ServerError as e: - logging.exception(f"Error Processing Event: {fut}") - if not fut.done(): - fut.set_result(None) - - def register_remote_method(self, - method_name: str, - cb: FlexCallback - ) -> None: - self.klippy_connection.register_remote_method(method_name, cb) - - def get_host_info(self) -> Dict[str, Any]: - return { - 'hostname': socket.gethostname(), - 'address': self.host, - 'port': self.port, - 'ssl_port': self.ssl_port - } - - def get_klippy_info(self) -> Dict[str, Any]: - return self.klippy_connection.klippy_info - - def get_klippy_state(self) -> str: - return self.klippy_connection.state - - def _handle_term_signal(self) -> None: - logging.info(f"Exiting with signal SIGTERM") - self.event_loop.register_callback(self._stop_server, "terminate") - - async def _stop_server(self, exit_reason: str = "restart") -> None: - self.server_running = False - # Call each component's "on_exit" method - for name, component in self.components.items(): - if hasattr(component, "on_exit"): - func: FlexCallback = getattr(component, "on_exit") - try: - ret = func() - if ret is not None: - await ret - except Exception: - logging.exception( - f"Error executing 'on_exit()' for component: {name}") - - # Sleep for 100ms to allow connected websockets to write out - # remaining data - await asyncio.sleep(.1) - try: - await self.moonraker_app.close() - except Exception: - logging.exception("Error Closing App") - - # Disconnect from Klippy - try: - await asyncio.wait_for( - asyncio.shield(self.klippy_connection.close( - wait_closed=True)), 2.) - except Exception: - logging.exception("Klippy Disconnect Error") - - # Close all components - for name, component in self.components.items(): - if name in ["application", "websockets", "klippy_connection"]: - # These components have already been closed - continue - if hasattr(component, "close"): - func = getattr(component, "close") - try: - ret = func() - if ret is not None: - await ret - except Exception: - logging.exception( - f"Error executing 'close()' for component: {name}") - # Allow cancelled tasks a chance to run in the eventloop - await asyncio.sleep(.001) - - self.exit_reason = exit_reason - self.event_loop.remove_signal_handler(signal.SIGTERM) - self.event_loop.stop() - - async def _handle_server_restart(self, web_request: WebRequest) -> str: - self.event_loop.register_callback(self._stop_server) - return "ok" - - async def _handle_info_request(self, - web_request: WebRequest - ) -> Dict[str, Any]: - file_manager: Optional[FileManager] = self.lookup_component( - 'file_manager', None) - reg_dirs = [] - if file_manager is not None: - reg_dirs = file_manager.get_registered_dirs() - wsm: WebsocketManager = self.lookup_component('websockets') - mreqs = self.klippy_connection.missing_requirements - return { - 'klippy_connected': self.klippy_connection.is_connected(), - 'klippy_state': self.klippy_connection.state, - 'components': list(self.components.keys()), - 'failed_components': self.failed_components, - 'registered_directories': reg_dirs, - 'warnings': self.warnings, - 'websocket_count': wsm.get_count(), - 'moonraker_version': self.app_args['software_version'], - 'missing_klippy_requirements': mreqs, - 'api_version': API_VERSION, - 'api_version_string': ".".join([str(v) for v in API_VERSION]) - } - - async def _handle_config_request(self, - web_request: WebRequest - ) -> Dict[str, Any]: - cfg_file_list: List[Dict[str, Any]] = [] - cfg_parent = pathlib.Path( - self.app_args["config_file"] - ).expanduser().resolve().parent - for fname, sections in self.config.get_file_sections().items(): - path = pathlib.Path(fname) - try: - rel_path = str(path.relative_to(str(cfg_parent))) - except ValueError: - rel_path = fname - cfg_file_list.append({"filename": rel_path, "sections": sections}) - return { - 'config': self.config.get_parsed_config(), - 'orig': self.config.get_orig_config(), - 'files': cfg_file_list - } - -def main(cmd_line_args: argparse.Namespace) -> None: - cfg_file = cmd_line_args.configfile - app_args = {'config_file': cfg_file} - - # Setup Logging - version = utils.get_software_version() - if cmd_line_args.nologfile: - app_args['log_file'] = "" - else: - app_args['log_file'] = os.path.normpath( - os.path.expanduser(cmd_line_args.logfile)) - app_args['software_version'] = version - app_args['python_version'] = sys.version.replace("\n", " ") - ql, file_logger, warning = utils.setup_logging(app_args) - if warning is not None: - app_args['log_warning'] = warning - - # Start asyncio event loop and server - event_loop = EventLoop() - alt_config_loaded = False - estatus = 0 - while True: - try: - server = Server(app_args, file_logger, event_loop) - server.load_components() - except confighelper.ConfigError as e: - backup_cfg = confighelper.find_config_backup(cfg_file) - logging.exception("Server Config Error") - if alt_config_loaded or backup_cfg is None: - estatus = 1 - break - app_args['config_file'] = backup_cfg - app_args['config_warning'] = ( - f"Server configuration error: {e}\n" - f"Loaded server from most recent working configuration:" - f" '{app_args['config_file']}'\n" - f"Please fix the issue in moonraker.conf and restart " - f"the server." - ) - alt_config_loaded = True - continue - except Exception: - logging.exception("Moonraker Error") - estatus = 1 - break - try: - event_loop.register_callback(server.server_init) - event_loop.start() - except Exception: - logging.exception("Server Running Error") - estatus = 1 - break - if server.exit_reason == "terminate": - break - # Restore the original config and clear the warning - # before the server restarts - if alt_config_loaded: - app_args['config_file'] = cfg_file - app_args.pop('config_warning', None) - alt_config_loaded = False - event_loop.close() - # Since we are running outside of the the server - # it is ok to use a blocking sleep here - time.sleep(.5) - logging.info("Attempting Server Restart...") - event_loop.reset() - event_loop.close() - logging.info("Server Shutdown") - ql.stop() - exit(estatus) - - -if __name__ == '__main__': - # Parse start arguments - parser = argparse.ArgumentParser( - description="Moonraker - Klipper API Server") - parser.add_argument( - "-c", "--configfile", default="~/moonraker.conf", - metavar='', - help="Location of moonraker configuration file") - parser.add_argument( - "-l", "--logfile", default="/tmp/moonraker.log", metavar='', - help="log file name and location") - parser.add_argument( - "-n", "--nologfile", action='store_true', - help="disable logging to a file") - main(parser.parse_args()) +if __name__ == "__main__": + import sys + import importlib + import pathlib + pkg_parent = pathlib.Path(__file__).parent.parent + sys.path.pop(0) + sys.path.insert(0, str(pkg_parent)) + svr = importlib.import_module(".server", "moonraker") + svr.main(False) # type: ignore diff --git a/moonraker/server.py b/moonraker/server.py new file mode 100644 index 0000000..ccd1c39 --- /dev/null +++ b/moonraker/server.py @@ -0,0 +1,712 @@ +#!/usr/bin/env python3 +# Moonraker - HTTP/Websocket API Server for Klipper +# +# Copyright (C) 2020 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import pathlib +import sys +import argparse +import importlib +import os +import io +import time +import socket +import logging +import signal +import asyncio +import uuid +import traceback +from . import confighelper +from .eventloop import EventLoop +from .utils import ( + ServerError, + Sentinel, + get_software_info, + json_wrapper, + pip_utils, + source_info +) +from .loghelper import LogManager +from .common import RequestType + +# Annotation imports +from typing import ( + TYPE_CHECKING, + Any, + Optional, + Callable, + Coroutine, + Dict, + List, + Tuple, + Union, + TypeVar, +) +if TYPE_CHECKING: + from .common import WebRequest + from .components.application import MoonrakerApp + from .components.websockets import WebsocketManager + from .components.klippy_connection import KlippyConnection + from .components.file_manager.file_manager import FileManager + from .components.machine import Machine + from .components.extensions import ExtensionManager + FlexCallback = Callable[..., Optional[Coroutine]] + _T = TypeVar("_T", Sentinel, Any) + +API_VERSION = (1, 4, 0) +SERVER_COMPONENTS = ['application', 'websockets', 'klippy_connection'] +CORE_COMPONENTS = [ + 'dbus_manager', 'database', 'file_manager', 'authorization', + 'klippy_apis', 'machine', 'data_store', 'shell_command', + 'proc_stats', 'job_state', 'job_queue', 'history', + 'http_client', 'announcements', 'webcam', 'extensions' +] + + +class Server: + error = ServerError + config_error = confighelper.ConfigError + def __init__(self, + args: Dict[str, Any], + log_manager: LogManager, + event_loop: EventLoop + ) -> None: + self.event_loop = event_loop + self.log_manager = log_manager + self.app_args = args + self.events: Dict[str, List[FlexCallback]] = {} + self.components: Dict[str, Any] = {} + self.failed_components: List[str] = [] + self.warnings: Dict[str, str] = {} + self._is_configured: bool = False + + self.config = config = self._parse_config() + self.host: str = config.get('host', "0.0.0.0") + self.port: int = config.getint('port', 7125) + self.ssl_port: int = config.getint('ssl_port', 7130) + self.exit_reason: str = "" + self.server_running: bool = False + self.pip_recovery_attempted: bool = False + + # Configure Debug Logging + config.getboolean('enable_debug_logging', False, deprecate=True) + self.debug = args["debug"] + log_level = logging.DEBUG if args["verbose"] else logging.INFO + logging.getLogger().setLevel(log_level) + self.event_loop.set_debug(args["asyncio_debug"]) + self.klippy_connection: KlippyConnection + self.klippy_connection = self.load_component(config, "klippy_connection") + + # Tornado Application/Server + self.moonraker_app: MoonrakerApp = self.load_component(config, "application") + app = self.moonraker_app + self.register_endpoint = app.register_endpoint + self.register_debug_endpoint = app.register_debug_endpoint + self.register_static_file_handler = app.register_static_file_handler + self.register_upload_handler = app.register_upload_handler + self.log_manager.set_server(self) + self.websocket_manager: WebsocketManager + self.websocket_manager = self.load_component(config, "websockets") + + for warning in args.get("startup_warnings", []): + self.add_warning(warning) + + self.register_endpoint( + "/server/info", RequestType.GET, self._handle_info_request + ) + self.register_endpoint( + "/server/config", RequestType.GET, self._handle_config_request + ) + self.register_endpoint( + "/server/restart", RequestType.POST, self._handle_server_restart + ) + self.register_notification("server:klippy_ready") + self.register_notification("server:klippy_shutdown") + self.register_notification("server:klippy_disconnect", + "klippy_disconnected") + self.register_notification("server:gcode_response") + + def get_app_args(self) -> Dict[str, Any]: + return dict(self.app_args) + + def get_app_arg(self, key: str, default=Sentinel.MISSING) -> Any: + val = self.app_args.get(key, default) + if val is Sentinel.MISSING: + raise KeyError(f"No key '{key}' in Application Arguments") + return val + + def get_event_loop(self) -> EventLoop: + return self.event_loop + + def get_api_version(self) -> Tuple[int, int, int]: + return API_VERSION + + def get_warnings(self) -> List[str]: + return list(self.warnings.values()) + + def is_running(self) -> bool: + return self.server_running + + def is_configured(self) -> bool: + return self._is_configured + + def is_debug_enabled(self) -> bool: + return self.debug + + def is_verbose_enabled(self) -> bool: + return self.app_args["verbose"] + + def _parse_config(self) -> confighelper.ConfigHelper: + config = confighelper.get_configuration(self, self.app_args) + # log config file + cfg_files = "\n".join(config.get_config_files()) + strio = io.StringIO() + config.write_config(strio) + cfg_item = f"\n{'#'*20} Moonraker Configuration {'#'*20}\n\n" + cfg_item += strio.getvalue() + cfg_item += "#"*65 + cfg_item += f"\nAll Configuration Files:\n{cfg_files}\n" + cfg_item += "#"*65 + strio.close() + self.add_log_rollover_item('config', cfg_item) + return config + + async def server_init(self, start_server: bool = True) -> None: + self.event_loop.add_signal_handler( + signal.SIGTERM, self._handle_term_signal) + + # Perform asynchronous init after the event loop starts + optional_comps: List[Coroutine] = [] + for name, component in self.components.items(): + if not hasattr(component, "component_init"): + continue + if name in CORE_COMPONENTS: + # Process core components in order synchronously + await self._initialize_component(name, component) + else: + optional_comps.append( + self._initialize_component(name, component)) + + # Asynchronous Optional Component Initialization + if optional_comps: + await asyncio.gather(*optional_comps) + + if not self.warnings: + await self.event_loop.run_in_thread(self.config.create_backup) + + machine: Machine = self.lookup_component("machine") + if await machine.validate_installation(): + return + + if start_server: + await self.start_server() + + async def start_server(self, connect_to_klippy: bool = True) -> None: + # Open Unix Socket Server + extm: ExtensionManager = self.lookup_component("extensions") + await extm.start_unix_server() + + # Start HTTP Server + logging.info( + f"Starting Moonraker on ({self.host}, {self.port}), " + f"Hostname: {socket.gethostname()}") + self.moonraker_app.listen(self.host, self.port, self.ssl_port) + self.server_running = True + if connect_to_klippy: + self.klippy_connection.connect() + + def add_log_rollover_item( + self, name: str, item: str, log: bool = True + ) -> None: + self.log_manager.set_rollover_info(name, item) + if log and item is not None: + logging.info(item) + + def add_warning( + self, warning: str, warn_id: Optional[str] = None, log: bool = True + ) -> str: + if warn_id is None: + warn_id = str(id(warning)) + self.warnings[warn_id] = warning + if log: + logging.warning(warning) + return warn_id + + def remove_warning(self, warn_id: str) -> None: + self.warnings.pop(warn_id, None) + + # ***** Component Management ***** + async def _initialize_component(self, name: str, component: Any) -> None: + logging.info(f"Performing Component Post Init: [{name}]") + try: + ret = component.component_init() + if ret is not None: + await ret + except Exception as e: + logging.exception(f"Component [{name}] failed post init") + self.add_warning(f"Component '{name}' failed to load with " + f"error: {e}") + self.set_failed_component(name) + + def load_components(self) -> None: + config = self.config + cfg_sections = set([s.split()[0] for s in config.sections()]) + cfg_sections.remove('server') + + # load core components + for component in CORE_COMPONENTS: + self.load_component(config, component) + if component in cfg_sections: + cfg_sections.remove(component) + + # load remaining optional components + for section in cfg_sections: + self.load_component(config, section, None) + + config.validate_config() + self._is_configured = True + + def load_component( + self, + config: confighelper.ConfigHelper, + component_name: str, + default: _T = Sentinel.MISSING + ) -> Union[_T, Any]: + if component_name in self.components: + return self.components[component_name] + if self.is_configured(): + raise self.error( + "Cannot load components after configuration", 500 + ) + if component_name in self.failed_components: + raise self.error( + f"Component {component_name} previously failed to load", 500 + ) + try: + full_name = f"moonraker.components.{component_name}" + module = importlib.import_module(full_name) + # Server components use the [server] section for configuration + if component_name not in SERVER_COMPONENTS: + is_core = component_name in CORE_COMPONENTS + fallback: Optional[str] = "server" if is_core else None + config = config.getsection(component_name, fallback) + load_func = getattr(module, "load_component") + component = load_func(config) + except Exception as e: + ucomps: List[str] = self.app_args.get("unofficial_components", []) + if isinstance(e, ModuleNotFoundError) and component_name not in ucomps: + if self.try_pip_recovery(e.name or "unknown"): + return self.load_component(config, component_name, default) + msg = f"Unable to load component: ({component_name})" + logging.exception(msg) + if component_name not in self.failed_components: + self.failed_components.append(component_name) + if default is Sentinel.MISSING: + raise + return default + self.components[component_name] = component + logging.info(f"Component ({component_name}) loaded") + return component + + def try_pip_recovery(self, missing_module: str) -> bool: + if self.pip_recovery_attempted: + return False + self.pip_recovery_attempted = True + src_dir = source_info.source_path() + req_file = src_dir.joinpath("scripts/moonraker-requirements.txt") + if not req_file.is_file(): + return False + pip_cmd = f"{sys.executable} -m pip" + pip_exec = pip_utils.PipExecutor(pip_cmd, logging.info) + logging.info(f"Module '{missing_module}' not found. Attempting Pip Update...") + logging.info("Checking Pip Version...") + try: + pipver = pip_exec.get_pip_version() + if pip_utils.check_pip_needs_update(pipver): + cur_ver = pipver.pip_version_string + new_ver = ".".join([str(part) for part in pip_utils.MIN_PIP_VERSION]) + logging.info(f"Updating Pip from {cur_ver} to {new_ver}...") + pip_exec.update_pip() + except Exception: + logging.exception("Pip version check failed") + return False + logging.info("Installing Moonraker python dependencies...") + try: + pip_exec.install_packages(req_file, {"SKIP_CYTHON": "Y"}) + except Exception: + logging.exception("Failed to install python packages") + return False + return True + + def lookup_component( + self, component_name: str, default: _T = Sentinel.MISSING + ) -> Union[_T, Any]: + component = self.components.get(component_name, default) + if component is Sentinel.MISSING: + raise ServerError(f"Component ({component_name}) not found") + return component + + def set_failed_component(self, component_name: str) -> None: + if component_name not in self.failed_components: + self.failed_components.append(component_name) + + def register_component(self, component_name: str, component: Any) -> None: + if component_name in self.components: + raise self.error( + f"Component '{component_name}' already registered") + self.components[component_name] = component + + def register_notification( + self, event_name: str, notify_name: Optional[str] = None + ) -> None: + self.websocket_manager.register_notification(event_name, notify_name) + + def register_event_handler( + self, event: str, callback: FlexCallback + ) -> None: + self.events.setdefault(event, []).append(callback) + + def send_event(self, event: str, *args) -> asyncio.Future: + fut = self.event_loop.create_future() + self.event_loop.register_callback( + self._process_event, fut, event, *args) + return fut + + async def _process_event( + self, fut: asyncio.Future, event: str, *args + ) -> None: + events = self.events.get(event, []) + coroutines: List[Coroutine] = [] + for func in events: + try: + ret = func(*args) + except Exception: + logging.exception(f"Error processing callback in event {event}") + else: + if ret is not None: + coroutines.append(ret) + if coroutines: + results = await asyncio.gather(*coroutines, return_exceptions=True) + for val in results: + if isinstance(val, Exception): + if sys.version_info < (3, 10): + exc_info = "".join(traceback.format_exception( + type(val), val, val.__traceback__ + )) + else: + exc_info = "".join(traceback.format_exception(val)) + logging.info( + f"\nError processing callback in event {event}\n{exc_info}" + ) + if not fut.done(): + fut.set_result(None) + + def register_remote_method( + self, method_name: str, cb: FlexCallback + ) -> None: + self.klippy_connection.register_remote_method(method_name, cb) + + def get_host_info(self) -> Dict[str, Any]: + return { + 'hostname': socket.gethostname(), + 'address': self.host, + 'port': self.port, + 'ssl_port': self.ssl_port + } + + def get_klippy_info(self) -> Dict[str, Any]: + return self.klippy_connection.klippy_info + + def _handle_term_signal(self) -> None: + logging.info("Exiting with signal SIGTERM") + self.event_loop.register_callback(self._stop_server, "terminate") + + def restart(self, delay: Optional[float] = None) -> None: + if delay is None: + self.event_loop.register_callback(self._stop_server) + else: + self.event_loop.delay_callback(delay, self._stop_server) + + async def _stop_server(self, exit_reason: str = "restart") -> None: + self.server_running = False + # Call each component's "on_exit" method + for name, component in self.components.items(): + if hasattr(component, "on_exit"): + func: FlexCallback = getattr(component, "on_exit") + try: + ret = func() + if ret is not None: + await ret + except Exception: + logging.exception( + f"Error executing 'on_exit()' for component: {name}") + + # Sleep for 100ms to allow connected websockets to write out + # remaining data + await asyncio.sleep(.1) + try: + await self.moonraker_app.close() + await self.websocket_manager.close() + except Exception: + logging.exception("Error Closing App") + + # Disconnect from Klippy + try: + await asyncio.wait_for( + asyncio.shield(self.klippy_connection.close( + wait_closed=True)), 2.) + except Exception: + logging.exception("Klippy Disconnect Error") + + # Close all components + for name, component in self.components.items(): + if name in ["application", "websockets", "klippy_connection"]: + # These components have already been closed + continue + if hasattr(component, "close"): + func = getattr(component, "close") + try: + ret = func() + if ret is not None: + await ret + except Exception: + logging.exception( + f"Error executing 'close()' for component: {name}") + # Allow cancelled tasks a chance to run in the eventloop + await asyncio.sleep(.001) + + self.exit_reason = exit_reason + self.event_loop.remove_signal_handler(signal.SIGTERM) + self.event_loop.stop() + + async def _handle_server_restart(self, web_request: WebRequest) -> str: + self.event_loop.register_callback(self._stop_server) + return "ok" + + async def _handle_info_request(self, web_request: WebRequest) -> Dict[str, Any]: + raw = web_request.get_boolean("raw", False) + file_manager: Optional[FileManager] = self.lookup_component( + 'file_manager', None) + reg_dirs = [] + if file_manager is not None: + reg_dirs = file_manager.get_registered_dirs() + mreqs = self.klippy_connection.missing_requirements + if raw: + warnings = list(self.warnings.values()) + else: + warnings = [ + w.replace("\n", "
") for w in self.warnings.values() + ] + return { + 'klippy_connected': self.klippy_connection.is_connected(), + 'klippy_state': str(self.klippy_connection.state), + 'components': list(self.components.keys()), + 'failed_components': self.failed_components, + 'registered_directories': reg_dirs, + 'warnings': warnings, + 'websocket_count': self.websocket_manager.get_count(), + 'moonraker_version': self.app_args['software_version'], + 'missing_klippy_requirements': mreqs, + 'api_version': API_VERSION, + 'api_version_string': ".".join([str(v) for v in API_VERSION]) + } + + async def _handle_config_request(self, web_request: WebRequest) -> Dict[str, Any]: + cfg_file_list: List[Dict[str, Any]] = [] + cfg_parent = pathlib.Path( + self.app_args["config_file"] + ).expanduser().resolve().parent + for fname, sections in self.config.get_file_sections().items(): + path = pathlib.Path(fname) + try: + rel_path = str(path.relative_to(str(cfg_parent))) + except ValueError: + rel_path = fname + cfg_file_list.append({"filename": rel_path, "sections": sections}) + return { + 'config': self.config.get_parsed_config(), + 'orig': self.config.get_orig_config(), + 'files': cfg_file_list + } + +def main(from_package: bool = True) -> None: + def get_env_bool(key: str) -> bool: + return os.getenv(key, "").lower() in ["y", "yes", "true"] + + # Parse start arguments + parser = argparse.ArgumentParser( + description="Moonraker - Klipper API Server") + parser.add_argument( + "-d", "--datapath", + default=os.getenv("MOONRAKER_DATA_PATH"), + metavar='', + help="Location of Moonraker Data File Path" + ) + parser.add_argument( + "-c", "--configfile", + default=os.getenv("MOONRAKER_CONFIG_PATH"), + metavar='', + help="Path to Moonraker's configuration file" + ) + parser.add_argument( + "-l", "--logfile", + default=os.getenv("MOONRAKER_LOG_PATH"), + metavar='', + help="Path to Moonraker's log file" + ) + parser.add_argument( + "-u", "--unixsocket", + default=os.getenv("MOONRAKER_UDS_PATH"), + metavar="", + help="Path to Moonraker's unix domain socket" + ) + parser.add_argument( + "-n", "--nologfile", + action='store_const', + const=True, + default=get_env_bool("MOONRAKER_DISABLE_FILE_LOG"), + help="disable logging to a file" + ) + parser.add_argument( + "-v", "--verbose", + action='store_const', + const=True, + default=get_env_bool("MOONRAKER_VERBOSE_LOGGING"), + help="Enable verbose logging" + ) + parser.add_argument( + "-g", "--debug", + action='store_const', + const=True, + default=get_env_bool("MOONRAKER_ENABLE_DEBUG"), + help="Enable Moonraker debug features" + ) + parser.add_argument( + "-o", "--asyncio-debug", + action='store_const', + const=True, + default=get_env_bool("MOONRAKER_ASYNCIO_DEBUG"), + help="Enable asyncio debug flag" + ) + cmd_line_args = parser.parse_args() + + startup_warnings: List[str] = [] + dp: str = cmd_line_args.datapath or "~/printer_data" + data_path = pathlib.Path(dp).expanduser().resolve() + if not data_path.exists(): + try: + data_path.mkdir() + except Exception: + startup_warnings.append( + f"Unable to create data path folder at {data_path}" + ) + uuid_path = data_path.joinpath(".moonraker.uuid") + if not uuid_path.is_file(): + instance_uuid = uuid.uuid4().hex + uuid_path.write_text(instance_uuid) + else: + instance_uuid = uuid_path.read_text().strip() + if cmd_line_args.configfile is not None: + cfg_file: str = cmd_line_args.configfile + else: + cfg_file = str(data_path.joinpath("config/moonraker.conf")) + if cmd_line_args.unixsocket is not None: + unix_sock: str = cmd_line_args.unixsocket + else: + comms_dir = data_path.joinpath("comms") + if not comms_dir.exists(): + comms_dir.mkdir() + unix_sock = str(comms_dir.joinpath("moonraker.sock")) + misc_dir = data_path.joinpath("misc") + if not misc_dir.exists(): + misc_dir.mkdir() + app_args = { + "data_path": str(data_path), + "is_default_data_path": cmd_line_args.datapath is None, + "config_file": cfg_file, + "startup_warnings": startup_warnings, + "verbose": cmd_line_args.verbose, + "debug": cmd_line_args.debug, + "asyncio_debug": cmd_line_args.asyncio_debug, + "is_backup_config": False, + "is_python_package": from_package, + "instance_uuid": instance_uuid, + "unix_socket_path": unix_sock + } + + # Setup Logging + app_args.update(get_software_info()) + if cmd_line_args.nologfile: + app_args["log_file"] = "" + elif cmd_line_args.logfile: + app_args["log_file"] = os.path.normpath( + os.path.expanduser(cmd_line_args.logfile)) + else: + app_args["log_file"] = str(data_path.joinpath("logs/moonraker.log")) + app_args["python_version"] = sys.version.replace("\n", " ") + app_args["launch_args"] = " ".join([sys.executable] + sys.argv).strip() + app_args["msgspec_enabled"] = json_wrapper.MSGSPEC_ENABLED + app_args["uvloop_enabled"] = EventLoop.UVLOOP_ENABLED + log_manager = LogManager(app_args, startup_warnings) + + # Start asyncio event loop and server + event_loop = EventLoop() + alt_config_loaded = False + estatus = 0 + while True: + try: + server = Server(app_args, log_manager, event_loop) + server.load_components() + except confighelper.ConfigError as e: + backup_cfg = confighelper.find_config_backup(cfg_file) + logging.exception("Server Config Error") + if alt_config_loaded or backup_cfg is None: + estatus = 1 + break + app_args["config_file"] = backup_cfg + app_args["is_backup_config"] = True + warn_list = list(startup_warnings) + app_args["startup_warnings"] = warn_list + warn_list.append( + f"Server configuration error: {e}\n" + f"Loaded server from most recent working configuration:" + f" '{app_args['config_file']}'\n" + f"Please fix the issue in moonraker.conf and restart " + f"the server." + ) + alt_config_loaded = True + continue + except Exception: + logging.exception("Moonraker Error") + estatus = 1 + break + try: + event_loop.register_callback(server.server_init) + event_loop.start() + except Exception: + logging.exception("Server Running Error") + estatus = 1 + break + if server.exit_reason == "terminate": + break + # Restore the original config and clear the warning + # before the server restarts + if alt_config_loaded: + app_args["config_file"] = cfg_file + app_args["startup_warnings"] = startup_warnings + app_args["is_backup_config"] = False + alt_config_loaded = False + event_loop.close() + # Since we are running outside of the the server + # it is ok to use a blocking sleep here + time.sleep(.5) + logging.info("Attempting Server Restart...") + del server + event_loop.reset() + event_loop.close() + logging.info("Server Shutdown") + log_manager.stop_logging() + exit(estatus) diff --git a/moonraker/utils/__init__.py b/moonraker/utils/__init__.py new file mode 100644 index 0000000..5a216a3 --- /dev/null +++ b/moonraker/utils/__init__.py @@ -0,0 +1,281 @@ +# General Server Utilities +# +# Copyright (C) 2020 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import logging +import os +import glob +import importlib +import pathlib +import sys +import subprocess +import asyncio +import hashlib +import shlex +import re +import struct +import socket +import enum +import ipaddress +import platform +from . import source_info +from . import json_wrapper + +# Annotation imports +from typing import ( + TYPE_CHECKING, + List, + Optional, + Any, + Tuple, + Dict, + Union +) + +if TYPE_CHECKING: + from types import ModuleType + from asyncio.trsock import TransportSocket + +SYS_MOD_PATHS = glob.glob("/usr/lib/python3*/dist-packages") +SYS_MOD_PATHS += glob.glob("/usr/lib/python3*/site-packages") +SYS_MOD_PATHS += glob.glob("/usr/lib/*-linux-gnu/python3*/site-packages") +IPAddress = Union[ipaddress.IPv4Address, ipaddress.IPv6Address] + +try: + KERNEL_VERSION = tuple([int(part) for part in platform.release().split(".")[:2]]) +except Exception: + KERNEL_VERSION = (0, 0) + +class ServerError(Exception): + def __init__(self, message: str, status_code: int = 400) -> None: + Exception.__init__(self, message) + self.status_code = status_code + + +class Sentinel(enum.Enum): + MISSING = object() + +def _run_git_command(cmd: str) -> str: + prog = shlex.split(cmd) + process = subprocess.Popen(prog, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + ret, err = process.communicate() + retcode = process.wait() + if retcode == 0: + return ret.strip().decode() + raise Exception( + f"Failed to run git command '{cmd}': {err.decode(errors='ignore')}" + ) + +def _retrieve_git_tag(source_path: str) -> str: + cmd = f"git -C {source_path} rev-list --tags --max-count=1" + hash = _run_git_command(cmd) + cmd = f"git -C {source_path} describe --tags {hash}" + tag = _run_git_command(cmd) + cmd = f"git -C {source_path} rev-list {tag}..HEAD --count" + count = _run_git_command(cmd) + return f"{tag}-{count}" + +# Parse the git version from the command line. This code +# is borrowed from Klipper. +def retrieve_git_version(source_path: str) -> str: + # Obtain version info from "git" program + cmd = f"git -C {source_path} describe --always --tags --long --dirty" + ver = _run_git_command(cmd) + tag_match = re.match(r"v\d+\.\d+\.\d+", ver) + if tag_match is not None: + return ver + # This is likely a shallow clone. Resolve the tag and manually create + # the version string + tag = _retrieve_git_tag(source_path) + return f"t{tag}-g{ver}-shallow" + +def get_repo_info(source_path: str) -> Dict[str, Any]: + repo_info: Dict[str, Any] = { + "software_version": "?", + "git_branch": "?", + "git_remote": "?", + "git_repo_url": "?", + "modified_files": [], + "unofficial_components": [] + } + try: + repo_info["software_version"] = retrieve_git_version(source_path) + cmd = f"git -C {source_path} branch --no-color" + branch_list = _run_git_command(cmd) + for line in branch_list.split("\n"): + if line[0] == "*": + repo_info["git_branch"] = line[1:].strip() + break + else: + return repo_info + if repo_info["git_branch"].startswith("(HEAD detached"): + parts = repo_info["git_branch"] .strip("()").split()[-1] + remote, _, _ = parts.partition("/") + if not remote: + return repo_info + repo_info["git_remote"] = remote + else: + branch = repo_info["git_branch"] + cmd = f"git -C {source_path} config --get branch.{branch}.remote" + repo_info["git_remote"] = _run_git_command(cmd) + cmd = f"git -C {source_path} remote get-url {repo_info['git_remote']}" + repo_info["git_repo_url"] = _run_git_command(cmd) + cmd = f"git -C {source_path} status --porcelain --ignored" + status = _run_git_command(cmd) + for line in status.split("\n"): + parts = line.strip().split(maxsplit=1) + if len(parts) != 2: + continue + if parts[0] == "M": + repo_info["modified_files"].append(parts[1]) + elif ( + parts[0] in ("??", "!!") + and parts[1].endswith(".py") + and parts[1].startswith("components") + ): + comp = parts[1].split("/", maxsplit=1)[-1] + repo_info["unofficial_components"].append(comp) + except Exception: + logging.exception("Error Retreiving Git Repo Info") + return repo_info + +def get_software_info() -> Dict[str, Any]: + src_path = source_info.source_path() + if source_info.is_git_repo(): + return get_repo_info(str(src_path)) + pkg_ver = source_info.package_version() + if pkg_ver is not None: + return {"software_version": pkg_ver} + version: str = "?" + vfile = src_path.joinpath("moonraker/.version") + if vfile.exists(): + try: + version = vfile.read_text().strip() + except Exception: + logging.exception("Unable to extract version from file") + version = "?" + return {"software_version": version} + +def hash_directory( + dir_path: Union[str, pathlib.Path], + ignore_exts: List[str], + ignore_dirs: List[str] +) -> str: + if isinstance(dir_path, str): + dir_path = pathlib.Path(dir_path) + checksum = hashlib.blake2s() + if not dir_path.exists(): + return "" + for dpath, dnames, fnames in os.walk(dir_path): + valid_dirs: List[str] = [] + for dname in sorted(dnames): + if dname[0] == '.' or dname in ignore_dirs: + continue + valid_dirs.append(dname) + dnames[:] = valid_dirs + for fname in sorted(fnames): + ext = os.path.splitext(fname)[-1].lower() + if fname[0] == '.' or ext in ignore_exts: + continue + fpath = pathlib.Path(os.path.join(dpath, fname)) + try: + checksum.update(fpath.read_bytes()) + except Exception: + pass + return checksum.hexdigest() + +def verify_source( + path: Optional[Union[str, pathlib.Path]] = None +) -> Optional[Tuple[str, bool]]: + if path is None: + path = source_info.source_path() + elif isinstance(path, str): + path = pathlib.Path(path) + rfile = path.joinpath(".release_info") + if not rfile.exists(): + return None + try: + rinfo = json_wrapper.loads(rfile.read_text()) + except Exception: + return None + orig_chksum = rinfo['source_checksum'] + ign_dirs = rinfo['ignored_dirs'] + ign_exts = rinfo['ignored_exts'] + checksum = hash_directory(path, ign_exts, ign_dirs) + return checksum, checksum == orig_chksum + +def load_system_module(name: str) -> ModuleType: + if not SYS_MOD_PATHS: + # no dist path detected, fall back to direct import attempt + try: + return importlib.import_module(name) + except ImportError as e: + raise ServerError(f"Unable to import module {name}") from e + for module_path in SYS_MOD_PATHS: + sys.path.insert(0, module_path) + try: + module = importlib.import_module(name) + except ImportError as e: + if not isinstance(e, ModuleNotFoundError): + logging.exception(f"Failed to load {name} module") + else: + break + finally: + sys.path.pop(0) + else: + raise ServerError(f"Unable to import module {name}") + return module + +def get_unix_peer_credentials( + writer: asyncio.StreamWriter, name: str +) -> Dict[str, int]: + sock: TransportSocket + sock = writer.get_extra_info("socket", None) + if sock is None: + logging.debug( + f"Unable to get underlying Unix Socket for {name}, " + "cant fetch peer credentials" + ) + return {} + data: bytes = b"" + try: + size = struct.calcsize("3I") + data = sock.getsockopt(socket.SOL_SOCKET, socket.SO_PEERCRED, size) + pid, uid, gid = struct.unpack("3I", data) + except asyncio.CancelledError: + raise + except Exception: + logging.exception( + f"Failed to get Unix Socket Peer Credentials for {name}" + f", raw: 0x{data.hex()}" + ) + return {} + return { + "process_id": pid, + "user_id": uid, + "group_id": gid + } + +def pretty_print_time(seconds: int) -> str: + if seconds == 0: + return "0 Seconds" + fmt_list: List[str] = [] + times: Dict[str, int] = {} + times["Day"], seconds = divmod(seconds, 86400) + times["Hour"], seconds = divmod(seconds, 3600) + times["Minute"], times["Second"] = divmod(seconds, 60) + for ident, val in times.items(): + if val == 0: + continue + fmt_list.append(f"{val} {ident}" if val == 1 else f"{val} {ident}s") + return ", ".join(fmt_list) + +def parse_ip_address(address: str) -> Optional[IPAddress]: + try: + return ipaddress.ip_address(address) + except Exception: + return None diff --git a/moonraker/utils/cansocket.py b/moonraker/utils/cansocket.py new file mode 100644 index 0000000..ac22033 --- /dev/null +++ b/moonraker/utils/cansocket.py @@ -0,0 +1,199 @@ +# Async CAN Socket utility +# +# Copyright (C) 2023 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license. + +from __future__ import annotations +import socket +import asyncio +import errno +import struct +import logging +from . import ServerError +from typing import List, Dict, Optional, Union + +CAN_FMT = " None: + self.node_id = node_id + self._reader = asyncio.StreamReader(CAN_READER_LIMIT) + self._cansocket = cansocket + + async def read( + self, n: int = -1, timeout: Optional[float] = 2 + ) -> bytes: + return await asyncio.wait_for(self._reader.read(n), timeout) + + async def readexactly( + self, n: int, timeout: Optional[float] = 2 + ) -> bytes: + return await asyncio.wait_for(self._reader.readexactly(n), timeout) + + async def readuntil( + self, sep: bytes = b"\x03", timeout: Optional[float] = 2 + ) -> bytes: + return await asyncio.wait_for(self._reader.readuntil(sep), timeout) + + def write(self, payload: Union[bytes, bytearray]) -> None: + if isinstance(payload, bytearray): + payload = bytes(payload) + self._cansocket.send(self.node_id, payload) + + async def write_with_response( + self, + payload: Union[bytearray, bytes], + resp_length: int, + timeout: Optional[float] = 2. + ) -> bytes: + self.write(payload) + return await self.readexactly(resp_length, timeout) + + def feed_data(self, data: bytes) -> None: + self._reader.feed_data(data) + + def close(self) -> None: + self._reader.feed_eof() + +class CanSocket: + def __init__(self, interface: str): + self._loop = asyncio.get_running_loop() + self.nodes: Dict[int, CanNode] = {} + self.cansock = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) + self.input_buffer = b"" + self.output_packets: List[bytes] = [] + self.input_busy = False + self.output_busy = False + self.closed = True + try: + self.cansock.bind((interface,)) + except Exception: + raise ServerError(f"Unable to bind socket to interface '{interface}'", 500) + self.closed = False + self.cansock.setblocking(False) + self._loop.add_reader(self.cansock.fileno(), self._handle_can_response) + + def register_node(self, node_id: int) -> CanNode: + if node_id in self.nodes: + return self.nodes[node_id] + node = CanNode(node_id, self) + self.nodes[node_id + 1] = node + return node + + def remove_node(self, node_id: int) -> None: + node = self.nodes.pop(node_id + 1, None) + if node is not None: + node.close() + + def _handle_can_response(self) -> None: + try: + data = self.cansock.recv(4096) + except socket.error as e: + # If bad file descriptor allow connection to be + # closed by the data check + if e.errno == errno.EBADF: + logging.exception("Can Socket Read Error, closing") + data = b'' + else: + return + if not data: + # socket closed + self.close() + return + self.input_buffer += data + if self.input_busy: + return + self.input_busy = True + while len(self.input_buffer) >= 16: + packet = self.input_buffer[:16] + self._process_packet(packet) + self.input_buffer = self.input_buffer[16:] + self.input_busy = False + + def _process_packet(self, packet: bytes) -> None: + can_id, length, data = struct.unpack(CAN_FMT, packet) + can_id &= socket.CAN_EFF_MASK + payload = data[:length] + node = self.nodes.get(can_id) + if node is not None: + node.feed_data(payload) + + def send(self, can_id: int, payload: bytes = b"") -> None: + if can_id > 0x7FF: + can_id |= socket.CAN_EFF_FLAG + if not payload: + packet = struct.pack(CAN_FMT, can_id, 0, b"") + self.output_packets.append(packet) + else: + while payload: + length = min(len(payload), 8) + pkt_data = payload[:length] + payload = payload[length:] + packet = struct.pack( + CAN_FMT, can_id, length, pkt_data) + self.output_packets.append(packet) + if self.output_busy: + return + self.output_busy = True + asyncio.create_task(self._do_can_send()) + + async def _do_can_send(self): + while self.output_packets: + packet = self.output_packets.pop(0) + try: + await self._loop.sock_sendall(self.cansock, packet) + except socket.error: + logging.info("Socket Write Error, closing") + self.close() + break + self.output_busy = False + + def close(self): + if self.closed: + return + self.closed = True + for node in self.nodes.values(): + node.close() + self._loop.remove_reader(self.cansock.fileno()) + self.cansock.close() + +async def query_klipper_uuids(can_socket: CanSocket) -> List[Dict[str, str]]: + loop = asyncio.get_running_loop() + admin_node = can_socket.register_node(KLIPPER_ADMIN_ID) + payload = bytes([CMD_QUERY_UNASSIGNED]) + admin_node.write(payload) + curtime = loop.time() + endtime = curtime + 2. + uuids: List[Dict[str, str]] = [] + while curtime < endtime: + timeout = max(.1, endtime - curtime) + try: + resp = await admin_node.read(8, timeout) + except asyncio.TimeoutError: + continue + finally: + curtime = loop.time() + if len(resp) < 7 or resp[0] != CANBUS_RESP_NEED_NODEID: + continue + app_names = { + KLIPPER_SET_NODE_CMD: "Klipper", + KATAPULT_SET_NODE_CMD: "Katapult" + } + app = "Unknown" + if len(resp) > 7: + app = app_names.get(resp[7], "Unknown") + data = resp[1:7] + uuids.append( + { + "uuid": data.hex(), + "application": app + } + ) + return uuids diff --git a/moonraker/utils/filelock.py b/moonraker/utils/filelock.py new file mode 100644 index 0000000..32fd391 --- /dev/null +++ b/moonraker/utils/filelock.py @@ -0,0 +1,111 @@ +# Async file locking using flock +# +# Copyright (C) 2024 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import os +import fcntl +import errno +import logging +import pathlib +import contextlib +import asyncio +from . import ServerError +from typing import Optional, Type, Union +from types import TracebackType + +class LockTimeout(ServerError): + pass + +class AsyncExclusiveFileLock(contextlib.AbstractAsyncContextManager): + def __init__( + self, file_path: pathlib.Path, timeout: Union[int, float] = 0 + ) -> None: + self.lock_path = file_path.parent.joinpath(f".{file_path.name}.lock") + self.timeout = timeout + self.fd: int = -1 + self.locked: bool = False + self.required_wait: bool = False + + async def __aenter__(self) -> AsyncExclusiveFileLock: + await self.acquire() + return self + + async def __aexit__( + self, + __exc_type: Optional[Type[BaseException]], + __exc_value: Optional[BaseException], + __traceback: Optional[TracebackType] + ) -> None: + await self.release() + + def _get_lock(self) -> bool: + flags = os.O_RDWR | os.O_CREAT | os.O_TRUNC + fd = os.open(str(self.lock_path), flags, 0o644) + with contextlib.suppress(PermissionError): + os.chmod(fd, 0o644) + try: + fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except OSError as err: + os.close(fd) + if err.errno == errno.ENOSYS: + raise + return False + stat = os.fstat(fd) + if stat.st_nlink == 0: + # File was deleted before opening and after acquiring + # lock, create a new one + os.close(fd) + return False + self.fd = fd + return True + + async def acquire(self) -> None: + self.required_wait = False + if self.timeout < 0: + return + loop = asyncio.get_running_loop() + endtime = loop.time() + self.timeout + logged: bool = False + while True: + try: + self.locked = await loop.run_in_executor(None, self._get_lock) + except OSError as err: + logging.info( + "Failed to aquire advisory lock, allowing unlocked entry." + f"Error: {err}" + ) + self.locked = False + return + if self.locked: + return + self.required_wait = True + await asyncio.sleep(.25) + if not logged: + logged = True + logging.info( + f"File lock {self.lock_path} is currently acquired by another " + "process, waiting for release." + ) + if self.timeout > 0 and endtime >= loop.time(): + raise LockTimeout( + f"Attempt to acquire lock '{self.lock_path}' timed out" + ) + + def _release_file(self) -> None: + with contextlib.suppress(OSError, PermissionError): + if self.lock_path.is_file(): + self.lock_path.unlink() + with contextlib.suppress(OSError, PermissionError): + fcntl.flock(self.fd, fcntl.LOCK_UN) + with contextlib.suppress(OSError, PermissionError): + os.close(self.fd) + + async def release(self) -> None: + if not self.locked: + return + loop = asyncio.get_running_loop() + await loop.run_in_executor(None, self._release_file) + self.locked = False diff --git a/moonraker/utils/ioctl_macros.py b/moonraker/utils/ioctl_macros.py new file mode 100644 index 0000000..08ccbb7 --- /dev/null +++ b/moonraker/utils/ioctl_macros.py @@ -0,0 +1,77 @@ +# Methods to create IOCTL requests +# +# Copyright (C) 2023 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import ctypes +from typing import Union, Type, TYPE_CHECKING + +""" +This module contains of Python port of the macros avaialble in +"/include/uapi/asm-generic/ioctl.h" from the linux kernel. +""" + +if TYPE_CHECKING: + IOCParamSize = Union[int, str, Type[ctypes._CData]] + +_IOC_NRBITS = 8 +_IOC_TYPEBITS = 8 + +# NOTE: The following could be platform specific. +_IOC_SIZEBITS = 14 +_IOC_DIRBITS = 2 + +_IOC_NRMASK = (1 << _IOC_NRBITS) - 1 +_IOC_TYPEMASK = (1 << _IOC_TYPEBITS) - 1 +_IOC_SIZEMASK = (1 << _IOC_SIZEBITS) - 1 +_IOC_DIRMASK = (1 << _IOC_DIRBITS) - 1 + +_IOC_NRSHIFT = 0 +_IOC_TYPESHIFT = _IOC_NRSHIFT + _IOC_NRBITS +_IOC_SIZESHIFT = _IOC_TYPESHIFT + _IOC_TYPEBITS +_IOC_DIRSHIFT = _IOC_SIZESHIFT + _IOC_SIZEBITS + +# The constants below may also be platform specific +IOC_NONE = 0 +IOC_WRITE = 1 +IOC_READ = 2 + +def _check_value(val: int, name: str, maximum: int): + if val > maximum: + raise ValueError(f"Value '{val}' for '{name}' exceeds max of {maximum}") + +def _IOC_TYPECHECK(param_size: IOCParamSize) -> int: + if isinstance(param_size, int): + return param_size + elif isinstance(param_size, bytearray): + return len(param_size) + elif isinstance(param_size, str): + ctcls = getattr(ctypes, param_size) + return ctypes.sizeof(ctcls) + return ctypes.sizeof(param_size) + +def IOC(direction: int, cmd_type: int, cmd_number: int, param_size: int) -> int: + _check_value(direction, "direction", _IOC_DIRMASK) + _check_value(cmd_type, "cmd_type", _IOC_TYPEMASK) + _check_value(cmd_number, "cmd_number", _IOC_NRMASK) + _check_value(param_size, "ioc_size", _IOC_SIZEMASK) + return ( + (direction << _IOC_DIRSHIFT) | + (param_size << _IOC_SIZESHIFT) | + (cmd_type << _IOC_TYPESHIFT) | + (cmd_number << _IOC_NRSHIFT) + ) + +def IO(cmd_type: int, cmd_number: int) -> int: + return IOC(IOC_NONE, cmd_type, cmd_number, 0) + +def IOR(cmd_type: int, cmd_number: int, param_size: IOCParamSize) -> int: + return IOC(IOC_READ, cmd_type, cmd_number, _IOC_TYPECHECK(param_size)) + +def IOW(cmd_type: int, cmd_number: int, param_size: IOCParamSize) -> int: + return IOC(IOC_WRITE, cmd_type, cmd_number, _IOC_TYPECHECK(param_size)) + +def IOWR(cmd_type: int, cmd_number: int, param_size: IOCParamSize) -> int: + return IOC(IOC_READ | IOC_WRITE, cmd_type, cmd_number, _IOC_TYPECHECK(param_size)) diff --git a/moonraker/utils/json_wrapper.py b/moonraker/utils/json_wrapper.py new file mode 100644 index 0000000..5ff0dec --- /dev/null +++ b/moonraker/utils/json_wrapper.py @@ -0,0 +1,33 @@ +# Wrapper for msgspec with stdlib fallback +# +# Copyright (C) 2023 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import os +import contextlib +from typing import Any, Union, TYPE_CHECKING + +if TYPE_CHECKING: + def dumps(obj: Any) -> bytes: ... # type: ignore # noqa: E704 + def loads(data: Union[str, bytes, bytearray]) -> Any: ... # noqa: E704 + +MSGSPEC_ENABLED = False +_msgspc_var = os.getenv("MOONRAKER_ENABLE_MSGSPEC", "y").lower() +if _msgspc_var in ["y", "yes", "true"]: + with contextlib.suppress(ImportError): + import msgspec + from msgspec import DecodeError as JSONDecodeError + encoder = msgspec.json.Encoder() + decoder = msgspec.json.Decoder() + dumps = encoder.encode # noqa: F811 + loads = decoder.decode # noqa: F811 + MSGSPEC_ENABLED = True +if not MSGSPEC_ENABLED: + import json + from json import JSONDecodeError # type: ignore # noqa: F401,F811 + loads = json.loads # type: ignore + + def dumps(obj) -> bytes: # type: ignore # noqa: F811 + return json.dumps(obj).encode("utf-8") diff --git a/moonraker/utils/pip_utils.py b/moonraker/utils/pip_utils.py new file mode 100644 index 0000000..f220f77 --- /dev/null +++ b/moonraker/utils/pip_utils.py @@ -0,0 +1,247 @@ +# Utilities for managing python packages using Pip +# +# Copyright (C) 2024 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import os +import re +import shlex +import subprocess +import pathlib +import shutil +import threading +from dataclasses import dataclass + +# Annotation imports +from typing import ( + TYPE_CHECKING, + Any, + Optional, + Union, + Dict, + List, + Tuple, + Callable, + IO +) + +if TYPE_CHECKING: + from ..server import Server + from ..components.shell_command import ShellCommandFactory + +MIN_PIP_VERSION = (23, 3, 2) +MIN_PYTHON_VERSION = (3, 7) + +# Synchronous Subprocess Helpers +def _run_subprocess_with_response( + cmd: str, + timeout: Optional[float] = None, + env: Optional[Dict[str, str]] = None +) -> str: + prog = shlex.split(cmd) + proc = subprocess.run( + prog, capture_output=True, timeout=timeout, env=env, + check=True, text=True, errors="ignore", encoding="utf-8" + ) + if proc.returncode == 0: + return proc.stdout.strip() + err = proc.stderr + raise Exception(f"Failed to run pip command '{cmd}': {err}") + +def _process_subproc_output( + stdout: IO[str], + callback: Callable[[str], None] +) -> None: + for line in stdout: + callback(line.rstrip("\n")) + +def _run_subprocess( + cmd: str, + timeout: Optional[float] = None, + env: Optional[Dict[str, str]] = None, + response_cb: Optional[Callable[[str], None]] = None +) -> None: + prog = shlex.split(cmd) + params: Dict[str, Any] = {"errors": "ignore", "encoding": "utf-8"} + if response_cb is not None: + params = {"stdout": subprocess.PIPE, "stderr": subprocess.STDOUT} + with subprocess.Popen(prog, text=True, env=env, **params) as process: + if process.stdout is not None and response_cb is not None: + reader_thread = threading.Thread( + target=_process_subproc_output, args=(process.stdout, response_cb) + ) + reader_thread.start() + reader_thread.join(timeout) + if reader_thread.is_alive(): + process.kill() + elif timeout is not None: + process.wait(timeout) + ret = process.poll() + if ret != 0: + raise Exception(f"Failed to run pip command '{cmd}'") + +@ dataclass(frozen=True) +class PipVersionInfo: + pip_version_string: str + python_version_string: str + + @property + def pip_version(self) -> Tuple[int, ...]: + return tuple(int(part) for part in self.pip_version_string.split(".")) + + @property + def python_version(self) -> Tuple[int, ...]: + return tuple(int(part) for part in self.python_version_string.split(".")) + +class PipExecutor: + def __init__( + self, pip_cmd: str, response_handler: Optional[Callable[[str], None]] = None + ) -> None: + self.pip_cmd = pip_cmd + self.response_hdlr = response_handler + + def call_pip_with_response( + self, + args: str, + timeout: Optional[float] = None, + env: Optional[Dict[str, str]] = None + ) -> str: + return _run_subprocess_with_response(f"{self.pip_cmd} {args}", timeout, env) + + def call_pip( + self, + args: str, + timeout: Optional[float] = None, + env: Optional[Dict[str, str]] = None + ) -> None: + _run_subprocess(f"{self.pip_cmd} {args}", timeout, env, self.response_hdlr) + + def get_pip_version(self) -> PipVersionInfo: + resp = self.call_pip_with_response("--version", 10.) + return parse_pip_version(resp) + + def update_pip(self) -> None: + pip_ver = ".".join([str(part) for part in MIN_PIP_VERSION]) + self.call_pip(f"install pip=={pip_ver}", 120.) + + def install_packages( + self, + packages: Union[pathlib.Path, List[str]], + sys_env_vars: Optional[Dict[str, Any]] = None + ) -> None: + args = prepare_install_args(packages) + env: Optional[Dict[str, str]] = None + if sys_env_vars is not None: + env = dict(os.environ) + env.update(sys_env_vars) + self.call_pip(f"install {args}", timeout=1200., env=env) + + def build_virtualenv(self, py_exec: pathlib.Path, args: str) -> None: + bin_dir = py_exec.parent + env_path = bin_dir.parent.resolve() + if env_path.exists(): + shutil.rmtree(env_path) + _run_subprocess( + f"virtualenv {args} {env_path}", + timeout=600., + response_cb=self.response_hdlr + ) + if not py_exec.exists(): + raise Exception("Failed to create new virtualenv", 500) + +class AsyncPipExecutor: + def __init__( + self, + pip_cmd: str, + server: Server, + notify_callback: Optional[Callable[[bytes], None]] = None + ) -> None: + self.pip_cmd = pip_cmd + self.server = server + self.notify_callback = notify_callback + + def get_shell_cmd(self) -> ShellCommandFactory: + return self.server.lookup_component("shell_command") + + async def get_pip_version(self) -> PipVersionInfo: + resp: str = await self.get_shell_cmd().exec_cmd( + f"{self.pip_cmd} --version", timeout=30., attempts=3, log_stderr=True + ) + return parse_pip_version(resp) + + async def update_pip(self) -> None: + pip_ver = ".".join([str(part) for part in MIN_PIP_VERSION]) + shell_cmd = self.get_shell_cmd() + await shell_cmd.run_cmd_async( + f"{self.pip_cmd} install pip=={pip_ver}", + self.notify_callback, timeout=1200., attempts=3, log_stderr=True + ) + + async def install_packages( + self, + packages: Union[pathlib.Path, List[str]], + sys_env_vars: Optional[Dict[str, Any]] = None + ) -> None: + # Update python dependencies + args = prepare_install_args(packages) + env: Optional[Dict[str, str]] = None + if sys_env_vars is not None: + env = dict(os.environ) + env.update(sys_env_vars) + shell_cmd = self.get_shell_cmd() + await shell_cmd.run_cmd_async( + f"{self.pip_cmd} install {args}", self.notify_callback, + timeout=1200., attempts=3, env=env, log_stderr=True + ) + + async def build_virtualenv(self, py_exec: pathlib.Path, args: str) -> None: + bin_dir = py_exec.parent + env_path = bin_dir.parent.resolve() + if env_path.exists(): + shutil.rmtree(env_path) + shell_cmd = self.get_shell_cmd() + await shell_cmd.exec_cmd(f"virtualenv {args} {env_path}", timeout=600.) + if not py_exec.exists(): + raise self.server.error("Failed to create new virtualenv", 500) + +def read_requirements_file(requirements_path: pathlib.Path) -> List[str]: + if not requirements_path.is_file(): + raise FileNotFoundError(f"Requirements file {requirements_path} not found") + data = requirements_path.read_text() + modules: List[str] = [] + for line in data.split("\n"): + line = line.strip() + if not line or line[0] in "#-": + continue + match = re.search(r"\s#", line) + if match is not None: + line = line[:match.start()].strip() + modules.append(line) + return modules + +def parse_pip_version(pip_response: str) -> PipVersionInfo: + match = re.match( + r"^pip ([0-9.]+) from .+? \(python ([0-9.]+)\)$", pip_response.strip() + ) + if match is None: + raise ValueError("Unable to parse pip version from response") + pipver_str: str = match.group(1).strip() + pyver_str: str = match.group(2).strip() + return PipVersionInfo(pipver_str, pyver_str) + +def check_pip_needs_update(version_info: PipVersionInfo) -> bool: + if version_info.python_version < MIN_PYTHON_VERSION: + return False + return version_info.pip_version < MIN_PIP_VERSION + +def prepare_install_args(packages: Union[pathlib.Path, List[str]]) -> str: + if isinstance(packages, pathlib.Path): + if not packages.is_file(): + raise FileNotFoundError( + f"Invalid path to requirements_file '{packages}'" + ) + return f"-r {packages}" + reqs = [req.replace("\"", "'") for req in packages] + return " ".join([f"\"{req}\"" for req in reqs]) diff --git a/moonraker/utils/source_info.py b/moonraker/utils/source_info.py new file mode 100644 index 0000000..d845199 --- /dev/null +++ b/moonraker/utils/source_info.py @@ -0,0 +1,88 @@ +# General Server Utilities +# +# Copyright (C) 2023 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import importlib.resources as ilr +import pathlib +import sys +import site + +# Annotation imports +from typing import ( + Optional, +) + +def package_path() -> pathlib.Path: + return pathlib.Path(__file__).parent.parent + +def source_path() -> pathlib.Path: + return package_path().parent + +def is_git_repo(src_path: Optional[pathlib.Path] = None) -> bool: + if src_path is None: + src_path = source_path() + return src_path.joinpath(".git").is_dir() + +def find_git_repo(src_path: Optional[pathlib.Path] = None) -> Optional[pathlib.Path]: + if src_path is None: + src_path = source_path() + if src_path.joinpath(".git").is_dir(): + return src_path + for parent in src_path.parents: + if parent.joinpath(".git").is_dir(): + return parent + return None + +def is_dist_package(src_path: Optional[pathlib.Path] = None) -> bool: + if src_path is None: + # Check Moonraker's source path + src_path = source_path() + if hasattr(site, "getsitepackages"): + # The site module is present, get site packages for Moonraker's venv. + # This is more "correct" than the fallback method. + site_dirs = site.getsitepackages() + return str(src_path) in site_dirs + # Make an assumption based on the source path. If its name is + # site-packages or dist-packages then presumably it is an + # installed package + return src_path.name in ["dist-packages", "site-packages"] + +def package_version() -> Optional[str]: + try: + import moonraker.__version__ as ver # type: ignore + version = ver.__version__ + except Exception: + pass + else: + if version: + return version + return None + +def read_asset(asset_name: str) -> Optional[str]: + if sys.version_info < (3, 10): + with ilr.path("moonraker.assets", asset_name) as p: + if not p.is_file(): + return None + return p.read_text() + else: + files = ilr.files("moonraker.assets") + with ilr.as_file(files.joinpath(asset_name)) as p: + if not p.is_file(): + return None + return p.read_text() + +def get_asset_path() -> Optional[pathlib.Path]: + if sys.version_info < (3, 10): + with ilr.path("moonraker.assets", "__init__.py") as p: + asset_path = p.parent + else: + files = ilr.files("moonraker.assets") + with ilr.as_file(files.joinpath("__init__.py")) as p: + asset_path = p.parent + if not asset_path.is_dir(): + # Somehow running in a zipapp. This is an error. + return None + return asset_path diff --git a/moonraker/utils/sysfs_devs.py b/moonraker/utils/sysfs_devs.py new file mode 100644 index 0000000..4f906cb --- /dev/null +++ b/moonraker/utils/sysfs_devs.py @@ -0,0 +1,467 @@ +# Utilities for enumerating devices using sysfs +# +# Copyright (C) 2024 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license +from __future__ import annotations +import os +import fcntl +import ctypes +import pathlib +import enum +from ..common import ExtendedFlag +from . import ioctl_macros +from typing import ( + Dict, + List, + Any, + Union, + Optional +) + +DEFAULT_USB_IDS_PATH = "/usr/share/misc/usb.ids" +USB_DEVICE_PATH = "/sys/bus/usb/devices" +TTY_PATH = "/sys/class/tty" +SER_BYPTH_PATH = "/dev/serial/by-path" +SER_BYID_PATH = "/dev/serial/by-id" +V4L_DEVICE_PATH = "/sys/class/video4linux" +V4L_BYPTH_PATH = "/dev/v4l/by-path" +V4L_BYID_PATH = "/dev/v4l/by-id" + +OPTIONAL_USB_INFO = ["manufacturer", "product", "serial"] +NULL_DESCRIPTIONS = [ + "?", "none", "undefined", "reserved/undefined", "unused", "no subclass" +] + +def read_item(parent: pathlib.Path, filename: str) -> str: + return parent.joinpath(filename).read_text().strip() + +def find_usb_folder(usb_path: pathlib.Path) -> Optional[str]: + # Find the sysfs usb folder from a child folder + while usb_path.is_dir() and usb_path.name: + dnum_file = usb_path.joinpath("devnum") + bnum_file = usb_path.joinpath("busnum") + if not dnum_file.is_file() or not bnum_file.is_file(): + usb_path = usb_path.parent + continue + devnum = int(dnum_file.read_text().strip()) + busnum = int(bnum_file.read_text().strip()) + return f"{busnum}:{devnum}" + return None + +class UsbIdData: + _usb_info_cache: Dict[str, str] = { + "DI:1d50": "OpenMoko, Inc", + "DI:1d50:614e": "Klipper 3d-Printer Firmware", + "DI:1d50:6177": "Katapult Bootloader (CDC_ACM)" + } + + def __init__(self, usb_id_path: Union[str, pathlib.Path]) -> None: + if isinstance(usb_id_path, str): + usb_id_path = pathlib.Path(usb_id_path) + self.usb_id_path = usb_id_path.expanduser().resolve() + self.parsed: bool = False + self.usb_info: Dict[str, str] = {} + + def _is_hex(self, item: str) -> bool: + try: + int(item, 16) + except ValueError: + return False + return True + + def get_item(self, key: str, check_null: bool = False) -> Optional[str]: + item = self.usb_info.get(key, self._usb_info_cache.get(key)) + if item is None: + if self.parsed: + return None + self.parse_usb_ids() + item = self.usb_info.get(key) + if item is None: + return None + self._usb_info_cache[key] = item + if check_null and item.lower() in NULL_DESCRIPTIONS: + return None + return item + + def parse_usb_ids(self) -> None: + self.parsed = True + if not self.usb_id_path.is_file(): + return + top_key: str = "" + sub_key: str = "" + with self.usb_id_path.open(encoding="latin-1") as f: + while True: + line = f.readline() + if not line: + break + stripped_line = line.strip() + if not stripped_line or stripped_line[0] == "#": + continue + if line[:2] == "\t\t": + if not sub_key: + continue + tertiary_id, desc = stripped_line.split(maxsplit=1) + self.usb_info[f"{sub_key}:{tertiary_id.lower()}"] = desc + elif line[0] == "\t": + if not top_key: + continue + sub_id, desc = stripped_line.split(maxsplit=1) + sub_key = f"{top_key}:{sub_id.lower()}" + self.usb_info[sub_key] = desc + else: + id_type, data = line.rstrip().split(maxsplit=1) + if len(id_type) == 4 and self._is_hex(id_type): + # This is a vendor ID + top_key = f"DI:{id_type.lower()}" + self.usb_info[top_key] = data + elif id_type: + # This is a subtype + num_id, desc = data.split(maxsplit=1) + top_key = f"{id_type}:{num_id.lower()}" + self.usb_info[top_key] = desc + else: + break + + def get_product_info(self, vendor_id: str, product_id: str) -> Dict[str, Any]: + vendor_name = self.get_item(f"DI:{vendor_id}") + if vendor_name is None: + return { + "description": None, + "manufacturer": None, + "product": None, + } + product_name = self.get_item(f"DI:{vendor_id}:{product_id}") + return { + "description": f"{vendor_name} {product_name or ''}".strip(), + "manufacturer": vendor_name, + "product": product_name, + } + + def get_class_info( + self, cls_id: str, subcls_id: str, proto_id: str + ) -> Dict[str, Any]: + cls_desc = self.get_item(f"C:{cls_id}") + if cls_desc is None or cls_id == "00": + return { + "class": None, + "subclass": None, + "protocol": None + } + return { + "class": cls_desc, + "subclass": self.get_item(f"C:{cls_id}:{subcls_id}", True), + "protocol": self.get_item(f"C:{cls_id}:{subcls_id}:{proto_id}", True) + } + +def find_usb_devices() -> List[Dict[str, Any]]: + dev_folder = pathlib.Path(USB_DEVICE_PATH) + if not dev_folder.is_dir(): + return [] + usb_devs: List[Dict[str, Any]] = [] + # Find sysfs usb device descriptors + for dev_cfg_path in dev_folder.glob("*/bDeviceClass"): + dev_folder = dev_cfg_path.parent + device_info: Dict[str, Any] = {} + try: + device_info["device_num"] = int(read_item(dev_folder, "devnum")) + device_info["bus_num"] = int(read_item(dev_folder, "busnum")) + device_info["vendor_id"] = read_item(dev_folder, "idVendor").lower() + device_info["product_id"] = read_item(dev_folder, "idProduct").lower() + usb_location = f"{device_info['bus_num']}:{device_info['device_num']}" + device_info["usb_location"] = usb_location + dev_cls = read_item(dev_folder, "bDeviceClass").lower() + dev_subcls = read_item(dev_folder, "bDeviceSubClass").lower() + dev_proto = read_item(dev_folder, "bDeviceProtocol").lower() + device_info["class_ids"] = [dev_cls, dev_subcls, dev_proto] + for field in OPTIONAL_USB_INFO: + if dev_folder.joinpath(field).is_file(): + device_info[field] = read_item(dev_folder, field) + elif field not in device_info: + device_info[field] = None + except Exception: + continue + usb_devs.append(device_info) + return usb_devs + +def find_serial_devices() -> List[Dict[str, Any]]: + serial_devs: List[Dict[str, Any]] = [] + devs_by_path: Dict[str, str] = {} + devs_by_id: Dict[str, str] = {} + by_path_dir = pathlib.Path(SER_BYPTH_PATH) + by_id_dir = pathlib.Path(SER_BYID_PATH) + dev_root_folder = pathlib.Path("/dev") + if by_path_dir.is_dir(): + devs_by_path = { + dev.resolve().name: str(dev) for dev in by_path_dir.iterdir() + } + if by_id_dir.is_dir(): + devs_by_id = { + dev.resolve().name: str(dev) for dev in by_id_dir.iterdir() + } + tty_dir = pathlib.Path(TTY_PATH) + for tty_path in tty_dir.iterdir(): + device_folder = tty_path.joinpath("device") + if not device_folder.is_dir(): + continue + uartclk_file = tty_path.joinpath("uartclk") + port_file = tty_path.joinpath("port") + device_name = tty_path.name + driver_name = device_folder.joinpath("driver").resolve().name + device_info: Dict[str, Any] = { + "device_type": "unknown", + "device_path": str(dev_root_folder.joinpath(device_name)), + "device_name": device_name, + "driver_name": driver_name, + "path_by_hardware": devs_by_path.get(device_name), + "path_by_id": devs_by_id.get(device_name), + "usb_location": None + } + if uartclk_file.is_file() and port_file.is_file(): + # This is a potential hardware uart. Need to + # validate that "serial8250" devices have a port + # number of zero + if driver_name == "serial8250": + portnum = int(port_file.read_text().strip(), 16) + if portnum != 0: + # Not a usable UART + continue + device_info["device_type"] = "hardware_uart" + else: + usb_path = device_folder.resolve() + usb_location: Optional[str] = find_usb_folder(usb_path) + if usb_location is not None: + device_info["device_type"] = "usb" + device_info["usb_location"] = usb_location + serial_devs.append(device_info) + return serial_devs + +class struct_v4l2_capability(ctypes.Structure): + _fields_ = [ + ("driver", ctypes.c_char * 16), + ("card", ctypes.c_char * 32), + ("bus_info", ctypes.c_char * 32), + ("version", ctypes.c_uint32), + ("capabilities", ctypes.c_uint32), + ("device_caps", ctypes.c_uint32), + ("reserved", ctypes.c_uint32 * 3), + ] + +class struct_v4l2_fmtdesc(ctypes.Structure): + _fields_ = [ + ("index", ctypes.c_uint32), + ("type", ctypes.c_uint32), + ("flags", ctypes.c_uint32), + ("description", ctypes.c_char * 32), + ("pixelformat", ctypes.c_uint32), + ("reserved", ctypes.c_uint32 * 4) + ] + +class struct_v4l2_frmsize_discrete(ctypes.Structure): + _fields_ = [ + ("width", ctypes.c_uint32), + ("height", ctypes.c_uint32), + ] + + +class struct_v4l2_frmsize_stepwise(ctypes.Structure): + _fields_ = [ + ("min_width", ctypes.c_uint32), + ("max_width", ctypes.c_uint32), + ("step_width", ctypes.c_uint32), + ("min_height", ctypes.c_uint32), + ("max_height", ctypes.c_uint32), + ("step_height", ctypes.c_uint32), + ] + +class struct_v4l2_frmsize_union(ctypes.Union): + _fields_ = [ + ("discrete", struct_v4l2_frmsize_discrete), + ("stepwise", struct_v4l2_frmsize_stepwise) + ] + +class struct_v4l2_frmsizeenum(ctypes.Structure): + _anonymous_ = ("size",) + _fields_ = [ + ("index", ctypes.c_uint32), + ("pixel_format", ctypes.c_uint32), + ("type", ctypes.c_uint32), + ("size", struct_v4l2_frmsize_union), + ("reserved", ctypes.c_uint32 * 2) + ] + +class V4L2Capability(ExtendedFlag): + VIDEO_CAPTURE = 0x00000001 # noqa: E221 + VIDEO_OUTPUT = 0x00000002 # noqa: E221 + VIDEO_OVERLAY = 0x00000004 # noqa: E221 + VBI_CAPTURE = 0x00000010 # noqa: E221 + VBI_OUTPUT = 0x00000020 # noqa: E221 + SLICED_VBI_CAPTURE = 0x00000040 # noqa: E221 + SLICED_VBI_OUTPUT = 0x00000080 # noqa: E221 + RDS_CAPTURE = 0x00000100 # noqa: E221 + VIDEO_OUTPUT_OVERLAY = 0x00000200 + HW_FREQ_SEEK = 0x00000400 # noqa: E221 + RDS_OUTPUT = 0x00000800 # noqa: E221 + VIDEO_CAPTURE_MPLANE = 0x00001000 + VIDEO_OUTPUT_MPLANE = 0x00002000 # noqa: E221 + VIDEO_M2M_MPLANE = 0x00004000 # noqa: E221 + VIDEO_M2M = 0x00008000 # noqa: E221 + TUNER = 0x00010000 # noqa: E221 + AUDIO = 0x00020000 # noqa: E221 + RADIO = 0x00040000 # noqa: E221 + MODULATOR = 0x00080000 # noqa: E221 + SDR_CAPTURE = 0x00100000 # noqa: E221 + EXT_PIX_FORMAT = 0x00200000 # noqa: E221 + SDR_OUTPUT = 0x00400000 # noqa: E221 + META_CAPTURE = 0x00800000 # noqa: E221 + READWRITE = 0x01000000 # noqa: E221 + STREAMING = 0x04000000 # noqa: E221 + META_OUTPUT = 0x08000000 # noqa: E221 + TOUCH = 0x10000000 # noqa: E221 + IO_MC = 0x20000000 # noqa: E221 + SET_DEVICE_CAPS = 0x80000000 # noqa: E221 + +class V4L2FrameSizeTypes(enum.IntEnum): + DISCRETE = 1 + CONTINUOUS = 2 + STEPWISE = 3 + +class V4L2FormatFlags(ExtendedFlag): + COMPRESSED = 0x0001 + EMULATED = 0x0002 + + +V4L2_BUF_TYPE_VIDEO_CAPTURE = 1 +V4L2_QUERYCAP = ioctl_macros.IOR(ord("V"), 0, struct_v4l2_capability) +V4L2_ENUM_FMT = ioctl_macros.IOWR(ord("V"), 2, struct_v4l2_fmtdesc) +V4L2_ENUM_FRAMESIZES = ioctl_macros.IOWR(ord("V"), 74, struct_v4l2_frmsizeenum) + +def v4l2_fourcc_from_fmt(pixelformat: int) -> str: + fmt = bytes([((pixelformat >> (8 * i)) & 0xFF) for i in range(4)]) + return fmt.decode(encoding="ascii", errors="ignore") + +def v4l2_fourcc(format: str) -> int: + assert len(format) == 4 + result: int = 0 + for idx, val in enumerate(format.encode()): + result |= (val << (8 * idx)) & 0xFF + return result + +def _get_resolutions(fd: int, pixel_format: int) -> List[str]: + res_info = struct_v4l2_frmsizeenum() + result: List[str] = [] + for idx in range(128): + res_info.index = idx + res_info.pixel_format = pixel_format + try: + fcntl.ioctl(fd, V4L2_ENUM_FRAMESIZES, res_info) + except OSError: + break + if res_info.type != V4L2FrameSizeTypes.DISCRETE: + break + width = res_info.discrete.width + height = res_info.discrete.height + result.append(f"{width}x{height}") + return result + +def _get_modes(fd: int) -> List[Dict[str, Any]]: + pix_info = struct_v4l2_fmtdesc() + result: List[Dict[str, Any]] = [] + for idx in range(128): + pix_info.index = idx + pix_info.type = V4L2_BUF_TYPE_VIDEO_CAPTURE + try: + fcntl.ioctl(fd, V4L2_ENUM_FMT, pix_info) + except OSError: + break + desc: str = pix_info.description.decode() + pixel_format: int = pix_info.pixelformat + flags = V4L2FormatFlags(pix_info.flags) + resolutions = _get_resolutions(fd, pixel_format) + if not resolutions: + continue + result.append( + { + "format": v4l2_fourcc_from_fmt(pixel_format), + "description": desc, + "flags": [f.name for f in flags], + "resolutions": resolutions + } + ) + return result + +def find_video_devices() -> List[Dict[str, Any]]: + v4lpath = pathlib.Path(V4L_DEVICE_PATH) + if not v4lpath.is_dir(): + return [] + v4l_by_path_dir = pathlib.Path(V4L_BYPTH_PATH) + v4l_by_id_dir = pathlib.Path(V4L_BYID_PATH) + dev_root_folder = pathlib.Path("/dev") + v4l_devs_by_path: Dict[str, str] = {} + v4l_devs_by_id: Dict[str, str] = {} + if v4l_by_path_dir.is_dir(): + v4l_devs_by_path = { + dev.resolve().name: str(dev) for dev in v4l_by_path_dir.iterdir() + } + if v4l_by_id_dir.is_dir(): + v4l_devs_by_id = { + dev.resolve().name: str(dev) for dev in v4l_by_id_dir.iterdir() + } + v4l_devices: List[Dict[str, Any]] = [] + for v4ldev_path in v4lpath.iterdir(): + devfs_name = v4ldev_path.name + devfs_path = dev_root_folder.joinpath(devfs_name) + # The video4linux sysfs implmentation provides limited device + # info. Use the VIDEOC_QUERYCAPS ioctl to retreive extended + # information about the v4l2 device. + fd: int = -1 + try: + fd = os.open(str(devfs_path), os.O_RDONLY | os.O_NONBLOCK) + cap_info = struct_v4l2_capability() + fcntl.ioctl(fd, V4L2_QUERYCAP, cap_info) + capabilities = V4L2Capability(cap_info.device_caps) + if not capabilities & V4L2Capability.VIDEO_CAPTURE: + # Skip devices that do not capture video + continue + modes = _get_modes(fd) + except Exception: + continue + finally: + if fd != -1: + os.close(fd) + ver_tuple = tuple( + [str((cap_info.version >> (i)) & 0xFF) for i in range(16, -1, -8)] + ) + video_device: Dict[str, Any] = { + "device_name": devfs_name, + "device_path": str(devfs_path), + "camera_name": cap_info.card.decode(), + "driver_name": cap_info.driver.decode(), + "hardware_bus": cap_info.bus_info.decode(), + "capabilities": [cap.name for cap in capabilities], + "version": ".".join(ver_tuple), + "path_by_hardware": v4l_devs_by_path.get(devfs_name), + "path_by_id": v4l_devs_by_id.get(devfs_name), + "alt_name": None, + "usb_location": None, + "modes": modes + } + name_file = v4ldev_path.joinpath("name") + if name_file.is_file(): + video_device["alt_name"] = read_item(v4ldev_path, "name") + device_path = v4ldev_path.joinpath("device") + if device_path.is_dir(): + usb_location = find_usb_folder(device_path.resolve()) + if usb_location is not None: + video_device["usb_location"] = usb_location + v4l_devices.append(video_device) + + def idx_sorter(item: Dict[str, Any]) -> int: + try: + return int(item["device_name"][5:]) + except ValueError: + return -1 + # Sort by string first, then index + v4l_devices.sort(key=lambda item: item["device_name"]) + v4l_devices.sort(key=idx_sorter) + return v4l_devices diff --git a/moonraker/utils/versions.py b/moonraker/utils/versions.py new file mode 100644 index 0000000..51d8f4f --- /dev/null +++ b/moonraker/utils/versions.py @@ -0,0 +1,383 @@ +# Semantic Version Parsing and Comparison +# +# Copyright (C) 2023 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license. +from __future__ import annotations +import re +from enum import Flag, auto +from typing import Tuple, Optional, Dict, List + +# Python regex for parsing version strings from PEP 440 +# https://peps.python.org/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+GIT_VERSION_PATTERN = r"""
+    (?P
+        v?
+        (?P[0-9]+(?:\.[0-9]+)*)                  # release segment
+        (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:
+        (?:-(?P[0-9]+))                            # dev count
+        (?:-g(?P[a-fA-F0-9]+))?                     # abbrev hash
+    )?
+    (?P-dirty)?
+    (?P-(?:inferred|shallow))?
+
+"""
+
+_py_version_regex = re.compile(
+    r"^\s*" + VERSION_PATTERN + r"\s*$",
+    re.VERBOSE | re.IGNORECASE,
+)
+
+_git_version_regex = re.compile(
+    r"^\s*" + GIT_VERSION_PATTERN + r"\s*$",
+    re.VERBOSE | re.IGNORECASE,
+)
+
+class ReleaseType(Flag):
+    FINAL = auto()
+    ALPHA = auto()
+    BETA = auto()
+    RELEASE_CANDIDATE = auto()
+    POST = auto()
+    DEV = auto()
+
+class BaseVersion:
+    def __init__(self, version: str) -> None:
+        self._release: str = "?"
+        self._release_type = ReleaseType(0)
+        self._tag: str = "?"
+        self._orig: str = version.strip()
+        self._release_tup: Tuple[int, ...] = tuple()
+        self._extra_tup: Tuple[int, ...] = tuple()
+        self._has_dev_part: bool = False
+        self._dev_count: int = 0
+        self._valid_version: bool = False
+
+    @property
+    def full_version(self) -> str:
+        return self._orig
+
+    @property
+    def release(self) -> str:
+        return self._release
+
+    @property
+    def tag(self) -> str:
+        return self._tag
+
+    @property
+    def release_type(self) -> ReleaseType:
+        return self._release_type
+
+    @property
+    def dev_count(self) -> int:
+        return self._dev_count
+
+    def is_pre_release(self) -> bool:
+        for pr_idx in (1, 2, 3):
+            if ReleaseType(1 << pr_idx) in self._release_type:
+                return True
+        return False
+
+    def is_post_release(self) -> bool:
+        return ReleaseType.POST in self._release_type
+
+    def is_dev_release(self) -> bool:
+        return ReleaseType.DEV in self._release_type
+
+    def is_alpha_release(self) -> bool:
+        return ReleaseType.ALPHA in self._release_type
+
+    def is_beta_release(self) -> bool:
+        return ReleaseType.BETA in self._release_type
+
+    def is_release_candidate(self) -> bool:
+        return ReleaseType.RELEASE_CANDIDATE in self._release_type
+
+    def is_final_release(self) -> bool:
+        return ReleaseType.FINAL in self._release_type
+
+    def is_valid_version(self) -> bool:
+        return self._valid_version
+
+    def __str__(self) -> str:
+        return self._orig
+
+    def _validate(self, other: BaseVersion) -> None:
+        if not self._valid_version:
+            raise ValueError(
+                f"Version {self._orig} is not a valid version string "
+                f"for type {type(self).__name__}"
+            )
+        if not other._valid_version:
+            raise ValueError(
+                f"Version {other._orig} is not a valid version string "
+                f"for type {type(self).__name__}"
+            )
+
+    def __eq__(self, __value: object) -> bool:
+        if not isinstance(__value, type(self)):
+            raise ValueError("Invalid type for comparison")
+        self._validate(__value)
+        if self._release_tup != __value._release_tup:
+            return False
+        if self._extra_tup != __value._extra_tup:
+            return False
+        if self._has_dev_part != __value._has_dev_part:
+            return False
+        if self._dev_count != __value._dev_count:
+            return False
+        return True
+
+    def __lt__(self, __value: object) -> bool:
+        if not isinstance(__value, type(self)):
+            raise ValueError("Invalid type for comparison")
+        self._validate(__value)
+        if self._release_tup != __value._release_tup:
+            return self._release_tup < __value._release_tup
+        if self._extra_tup != __value._extra_tup:
+            return self._extra_tup < __value._extra_tup
+        if self._has_dev_part != __value._has_dev_part:
+            return self._has_dev_part
+        return self._dev_count < __value._dev_count
+
+    def __le__(self, __value: object) -> bool:
+        if not isinstance(__value, type(self)):
+            raise ValueError("Invalid type for comparison")
+        self._validate(__value)
+        if self._release_tup > __value._release_tup:
+            return False
+        if self._extra_tup > __value._extra_tup:
+            return False
+        if self._has_dev_part != __value._has_dev_part:
+            return self._has_dev_part
+        return self._dev_count <= __value._dev_count
+
+    def __ne__(self, __value: object) -> bool:
+        if not isinstance(__value, type(self)):
+            raise ValueError("Invalid type for comparison")
+        self._validate(__value)
+        if self._release_tup != __value._release_tup:
+            return True
+        if self._extra_tup != __value._extra_tup:
+            return True
+        if self._has_dev_part != __value._has_dev_part:
+            return True
+        if self._dev_count != __value._dev_count:
+            return True
+        return False
+
+    def __gt__(self, __value: object) -> bool:
+        if not isinstance(__value, type(self)):
+            raise ValueError("Invalid type for comparison")
+        self._validate(__value)
+        if self._release_tup != __value._release_tup:
+            return self._release_tup > __value._release_tup
+        if self._extra_tup != __value._extra_tup:
+            return self._extra_tup > __value._extra_tup
+        if self._has_dev_part != __value._has_dev_part:
+            return __value._has_dev_part
+        return self._dev_count > __value._dev_count
+
+    def __ge__(self, __value: object) -> bool:
+        if not isinstance(__value, type(self)):
+            raise ValueError("Invalid type for comparison")
+        self._validate(__value)
+        if self._release_tup < __value._release_tup:
+            return False
+        if self._extra_tup < __value._extra_tup:
+            return False
+        if self._has_dev_part != __value._has_dev_part:
+            return __value._has_dev_part
+        return self._dev_count >= __value._dev_count
+
+
+class PyVersion(BaseVersion):
+    def __init__(self, version: str) -> None:
+        super().__init__(version)
+        ver_match = _py_version_regex.match(version)
+        if ver_match is None:
+            return
+        version_info = ver_match.groupdict()
+        release: Optional[str] = version_info["release"]
+        if release is None:
+            return
+        self._valid_version = True
+        self._release = release
+        self._tag = f"v{release}" if self._orig[0].lower() == "v" else release
+        self._release_tup = tuple(int(part) for part in release.split("."))
+        self._extra_tup = (1, 0, 0)
+        if version_info["pre"] is not None:
+            pre_conv = dict([("a", 1), ("b", 2), ("c", 3), ("r", 3), ("p", 3)])
+            lbl = version_info["pre_l"][0].lower()
+            self._extra_tup = (0, pre_conv.get(lbl, 0), int(version_info["pre_n"] or 0))
+            self._tag += version_info["pre"]
+            self._release_type |= ReleaseType(1 << pre_conv.get(lbl, 1))
+            if version_info["post"] is not None:
+                # strange combination of a "post" pre-release.
+                num = version_info["post_n1"] or version_info["post_n2"]
+                self._extra_tup += (int(num or 0),)
+                self._tag += version_info["post"]
+                self._release_type |= ReleaseType.POST
+        elif version_info["post"] is not None:
+            num = version_info["post_n1"] or version_info["post_n2"]
+            self._extra_tup = (2, int(num or 0), 0)
+            self._tag += version_info["post"]
+            self._release_type |= ReleaseType.POST
+        self._has_dev_part = version_info["dev"] is not None
+        if self._has_dev_part:
+            self._release_type |= ReleaseType.DEV
+        elif self._release_type.value == 0:
+            self._release_type = ReleaseType.FINAL
+        elif self._release_type.value == ReleaseType.POST.value:
+            self._release_type |= ReleaseType.FINAL
+        self._dev_count = int(version_info["dev_n"] or 0)
+        self.local: Optional[str] = version_info["local"]
+
+    def convert_to_git(self, version_info: Dict[str, Optional[str]]) -> GitVersion:
+        git_version: Optional[str] = version_info["release"]
+        if git_version is None:
+            raise ValueError("Invalid version string")
+        if self._orig[0].lower() == "v":
+            git_version == f"v{git_version}"
+        local: str = version_info["local"] or ""
+        # Assume semantic versioning, convert the version string.
+        if version_info["dev_n"] is not None:
+            major, _, minor = git_version.rpartition(".")
+            if major:
+                git_version = f"v{major}.{max(int(minor) - 1, 0)}"
+        if version_info["pre"] is not None:
+            git_version = f"{git_version}{version_info['pre']}"
+        dev_num = version_info["dev_n"] or 0
+        git_version = f"{git_version}-{dev_num}"
+        local_parts = local.split(".", 1)[0]
+        if local_parts[0]:
+            git_version = f"{git_version}-{local_parts[0]}"
+        if len(local_parts) > 1:
+            git_version = f"{git_version}-dirty"
+        return GitVersion(git_version)
+
+
+class GitVersion(BaseVersion):
+    def __init__(self, version: str) -> None:
+        super().__init__(version)
+        self._is_dirty: bool = False
+        self._is_inferred: bool = False
+        ver_match = _git_version_regex.match(version)
+        if ver_match is None:
+            # Check Fallback
+            fb_match = re.match(r"(?P[a-fA-F0-9]+)(?P-dirty)?", self._orig)
+            if fb_match is None:
+                return
+            self._tag = ""
+            self._release = fb_match["hash"]
+            self._is_dirty = fb_match["dirty"] is not None
+            self._is_inferred = True
+            return
+        version_info = ver_match.groupdict()
+        release: Optional[str] = version_info["release"]
+        if release is None:
+            return
+        self._valid_version = True
+        self._release = release
+        self._tag = version_info["tag"] or "?"
+        self._release_tup = tuple(int(part) for part in release.split("."))
+        self._extra_tup = (1, 0, 0)
+        if version_info["pre"] is not None:
+            pre_conv = dict([("a", 1), ("b", 2), ("c", 3), ("r", 3), ("p", 3)])
+            lbl = version_info["pre_l"][0].lower()
+            self._extra_tup = (0, pre_conv.get(lbl, 0), int(version_info["pre_n"] or 0))
+            self._release_type = ReleaseType(1 << pre_conv.get(lbl, 1))
+        # All git versions are considered to have a dev part.  Contrary to python
+        # versioning, a version with a dev number is greater than the same version
+        # without one.
+        self._has_dev_part = True
+        self._dev_count = int(version_info["dev_n"] or 0)
+        if self._dev_count > 0:
+            self._release_type |= ReleaseType.DEV
+        if self._release_type.value == 0:
+            self._release_type = ReleaseType.FINAL
+        self._is_inferred = version_info["inferred"] is not None
+        self._is_dirty = version_info["dirty"] is not None
+
+    @property
+    def short_version(self) -> str:
+        if not self._valid_version:
+            return "?"
+        return f"{self._tag}-{self._dev_count}"
+
+    @property
+    def dirty(self) -> bool:
+        return self._is_dirty
+
+    @property
+    def inferred(self) -> bool:
+        return self._is_inferred
+
+    def is_fallback(self) -> bool:
+        return self._is_inferred and not self._valid_version
+
+    def infer_last_tag(self) -> str:
+        if self._valid_version:
+            if self._is_inferred:
+                # We can't infer a previous release from another inferred release
+                return self._tag
+            type_choices = dict([(1, "a"), (2, "b"), (3, "rc")])
+            if self.is_pre_release() and self._extra_tup > (0, 1, 0):
+                type_idx = self._extra_tup[1]
+                type_count = self._extra_tup[2]
+                if type_count == 0:
+                    type_idx -= 1
+                else:
+                    type_count -= 1
+                pretype = type_choices.get(type_idx, "rc")
+                return f"{self._release}.{pretype}{type_count}"
+            else:
+                parts = [int(ver) for ver in self._release.split(".")]
+                new_ver: List[str] = []
+                need_decrement = True
+                for part in reversed(parts):
+                    if part > 0 and need_decrement:
+                        need_decrement = False
+                        part -= 1
+                    new_ver.insert(0, str(part))
+                return "v" + ".".join(new_ver)
+        return "v0.0.0"
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..9238379
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,71 @@
+[project]
+name = "moonraker"
+dynamic = ["version"]
+description = "API Server for Klipper"
+authors = [
+    {name = "Eric Callahan", email = "arksine.code@gmail.com"},
+]
+dependencies = [
+    "tornado==6.2.0 ; python_version=='3.7'",
+    "tornado==6.4.0 ; python_version>='3.8'",
+    "pyserial==3.4",
+    "pyserial-asyncio==0.6",
+    "pillow==9.5.0 ; python_version=='3.7'",
+    "pillow==10.3.0 ; python_version>='3.8'",
+    "streaming-form-data==1.11.0 ; python_version=='3.7'",
+    "streaming-form-data==1.15.0 ; python_version>='3.8'",
+    "distro==1.9.0",
+    "inotify-simple==1.3.5",
+    "libnacl==2.1.0",
+    "paho-mqtt==1.6.1",
+    "zeroconf==0.131.0",
+    "preprocess-cancellation==0.2.1",
+    "jinja2==3.1.4",
+    "dbus-next==0.2.3",
+    "apprise==1.8.0",
+    "ldap3==2.9.1",
+    "python-periphery==2.4.1"
+]
+requires-python = ">=3.7"
+readme = "README.md"
+license = {text = "GPL-3.0-only"}
+keywords = ["klipper", "3D printing", "server", "moonraker"]
+classifiers = [
+    "Development Status :: 4 - Beta",
+    "Topic :: Internet :: WWW/HTTP :: HTTP Servers",
+    "Programming Language :: Python :: 3",
+    "Programming Language :: Python :: 3.7",
+    "Programming Language :: Python :: 3.8",
+    "Programming Language :: Python :: 3.9",
+    "Programming Language :: Python :: 3.10",
+    "Programming Language :: Python :: 3.11",
+]
+
+[project.urls]
+homepage = "https://github.com/Arksine/moonraker"
+repository = "https://github.com/Arksine/moonraker"
+documentation = "https://moonraker.readthedocs.io"
+changelog = "https://moonraker.readthedocs.io/en/latest/changelog/"
+
+[project.optional-dependencies]
+msgspec=["msgspec>=0.18.4 ; python_version>='3.8'"]
+uvloop=["uvloop>=0.17.0"]
+speedups = ["moonraker[msgspec,uvloop]"]
+
+[tool.pdm.version]
+source = "scm"
+write_to = "moonraker/__version__.py"
+write_template = "__version__ = '{}'\n"
+
+[tool.pdm.build]
+excludes = ["./**/.git", "moonraker/moonraker.py"]
+includes = ["moonraker"]
+editable-backend = "path"
+custom-hook = "scripts/pdm_build_dist.py"
+
+[project.scripts]
+moonraker = "moonraker.server:main"
+
+[build-system]
+requires = ["pdm-backend"]
+build-backend = "pdm.backend"
diff --git a/scripts/backup-database.sh b/scripts/backup-database.sh
new file mode 100644
index 0000000..22c3aa5
--- /dev/null
+++ b/scripts/backup-database.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+# LMDB Database backup utility
+
+DATABASE_PATH="${HOME}/printer_data/database"
+MOONRAKER_ENV="${HOME}/moonraker-env"
+OUPUT_FILE="${HOME}/database.backup"
+
+print_help()
+{
+    echo "Moonraker Database Backup Utility"
+    echo
+    echo "usage: backup-database.sh [-h] [-e ] [-d ] [-o ]"
+    echo
+    echo "optional arguments:"
+    echo "  -h                  show this message"
+    echo "  -e        Moonraker Python Environment"
+    echo "  -d   Moonraker LMDB database to backup"
+    echo "  -o     backup file to save to"
+    exit 0
+}
+
+# Parse command line arguments
+while getopts "he:d:o:" arg; do
+    case $arg in
+        h) print_help;;
+        e) MOONRAKER_ENV=$OPTARG;;
+        d) DATABASE_PATH=$OPTARG;;
+        o) OUPUT_FILE=$OPTARG;;
+    esac
+done
+
+PYTHON_BIN="${MOONRAKER_ENV}/bin/python"
+DB_TOOL="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/dbtool.py"
+
+if [ ! -f $PYTHON_BIN ]; then
+    echo "No Python binary found at '${PYTHON_BIN}'"
+    exit -1
+fi
+
+if [ ! -f "$DATABASE_PATH/data.mdb" ]; then
+    echo "No Moonraker database found at '${DATABASE_PATH}'"
+    exit -1
+fi
+
+if [ ! -f $DB_TOOL ]; then
+    echo "Unable to locate dbtool.py at '${DB_TOOL}'"
+    exit -1
+fi
+
+${PYTHON_BIN} ${DB_TOOL} backup ${DATABASE_PATH} ${OUPUT_FILE}
diff --git a/scripts/data-path-fix.sh b/scripts/data-path-fix.sh
new file mode 100644
index 0000000..e80c7cd
--- /dev/null
+++ b/scripts/data-path-fix.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+# Data Path Fix for legacy MainsailOS and FluiddPi installations running
+# a single instance of Moonraker with a default configuration
+
+DATA_PATH="${HOME}/printer_data"
+DATA_PATH_BKP="${HOME}/.broken_printer_data"
+DB_PATH="${HOME}/.moonraker_database"
+CONFIG_PATH="${HOME}/klipper_config"
+LOG_PATH="${HOME}/klipper_logs"
+GCODE_PATH="${HOME}/gcode_files"
+MOONRAKER_CONF="${CONFIG_PATH}/moonraker.conf"
+MOONRAKER_LOG="${LOG_PATH}/moonraker.log"
+ALIAS="moonraker"
+
+# Parse command line arguments
+while getopts "c:l:d:a:m:g:" arg; do
+    case $arg in
+        c)
+            MOONRAKER_CONF=$OPTARG
+            CONFIG_PATH="$( dirname $OPTARG )"
+            ;;
+        l)
+            MOONRAKER_LOG=$OPTARG
+            LOG_PATH="$( dirname $OPTARG )"
+            ;;
+        d)
+            DATA_PATH=$OPTARG
+            dpbase="$( basename $OPTARG )"
+            DATA_PATH_BKP="${HOME}/.broken_${dpbase}"
+            ;;
+        a)
+            ALIAS=$OPTARG
+            ;;
+        m)
+            DB_PATH=$OPTARG
+            [ ! -f "${DB_PATH}/data.mdb" ] && echo "No valid database found at ${DB_PATH}" && exit 1
+            ;;
+        g)
+            GCODE_PATH=$OPTARG
+            [ ! -d "${GCODE_PATH}" ] && echo "No GCode Path found at ${GCODE_PATH}" && exit 1
+            ;;
+    esac
+done
+
+[ ! -f "${MOONRAKER_CONF}" ] && echo "Error: unable to find config: ${MOONRAKER_CONF}" && exit 1
+[ ! -d "${LOG_PATH}" ] && echo "Error: unable to find log path: ${LOG_PATH}" && exit 1
+
+sudo systemctl stop ${ALIAS}
+
+[ -d "${DATA_PATH_BKP}" ] && rm -rf ${DATA_PATH_BKP}
+[ -d "${DATA_PATH}" ] && echo "Moving broken datapath to ${DATA_PATH_BKP}" && mv ${DATA_PATH} ${DATA_PATH_BKP}
+
+mkdir ${DATA_PATH}
+
+echo "Creating symbolic links..."
+[ -f "${DB_PATH}/data.mdb" ] && ln -s ${DB_PATH} "$DATA_PATH/database"
+[ -d "${GCODE_PATH}" ] && ln -s ${GCODE_PATH} "$DATA_PATH/gcodes"
+ln -s ${LOG_PATH} "$DATA_PATH/logs"
+ln -s ${CONFIG_PATH} "$DATA_PATH/config"
+
+[ -f "${DB_PATH}/data.mdb" ] && ~/moonraker-env/bin/python -mlmdb -e ${DB_PATH} -d moonraker edit --delete=validate_install
+
+echo "Running Moonraker install script..."
+
+~/moonraker/scripts/install-moonraker.sh -f -a ${ALIAS} -d ${DATA_PATH} -c ${MOONRAKER_CONF} -l ${MOONRAKER_LOG}
diff --git a/scripts/dbtool.py b/scripts/dbtool.py
index 929c821..e4671c5 100644
--- a/scripts/dbtool.py
+++ b/scripts/dbtool.py
@@ -9,6 +9,7 @@ import pathlib
 import base64
 import tempfile
 import re
+import time
 from typing import Any, Dict, Optional, TextIO, Tuple
 import lmdb
 
@@ -16,7 +17,9 @@ MAX_NAMESPACES = 100
 MAX_DB_SIZE = 200 * 2**20
 HEADER_KEY = b"MOONRAKER_DATABASE_START"
 
-LINE_MATCH = re.compile(r"\+(\d+),(\d+):(.+?)->(.+)")
+LINE_MATCH = re.compile(
+    r"^\+(\d+),(\d+):([A-Za-z0-9+/]+={0,2})->([A-Za-z0-9+/]+={0,2})$"
+)
 
 class DBToolError(Exception):
     pass
@@ -157,10 +160,13 @@ def restore(args: Dict[str, Any]):
     print(f"Restoring backup from '{input_db}' to '{dest_path}'...")
     bkp_dir: Optional[pathlib.Path] = None
     if dest_path.joinpath("data.mdb").exists():
-        tmp_dir = pathlib.Path(tempfile.gettempdir())
-        bkp_dir = tmp_dir.joinpath("moonrakerdb_backup")
+        bkp_dir = dest_path.parent.joinpath("backup")
+        if not bkp_dir.exists():
+            bkp_dir = pathlib.Path(tempfile.gettempdir())
+        str_time = time.strftime("%Y%m%dT%H%M%SZ", time.gmtime())
+        bkp_dir = bkp_dir.joinpath(f"{str_time}/database")
         if not bkp_dir.is_dir():
-            bkp_dir.mkdir()
+            bkp_dir.mkdir(parents=True)
         print(f"Warning: database file at found in '{dest_path}', "
               "all data will be overwritten.  Copying existing DB "
               f"to '{bkp_dir}'")
diff --git a/scripts/fetch-apikey.sh b/scripts/fetch-apikey.sh
index d022366..1bd0654 100644
--- a/scripts/fetch-apikey.sh
+++ b/scripts/fetch-apikey.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 # Helper Script for fetching the API Key from a moonraker database
-DATABASE_PATH="${HOME}/.moonraker_database"
+DATABASE_PATH="${HOME}/printer_data/database"
 MOONRAKER_ENV="${HOME}/moonraker-env"
 DB_ARGS="--read=READ --db=authorized_users get _API_KEY_USER_"
 API_REGEX='(?<="api_key": ")([^"]+)'
diff --git a/scripts/finish-upgrade.sh b/scripts/finish-upgrade.sh
new file mode 100644
index 0000000..44c5482
--- /dev/null
+++ b/scripts/finish-upgrade.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+#  Helper script for completing service upgrades via ssh
+
+ADDRESS="localhost"
+PORT="7125"
+API_KEY=""
+
+# Python Helper Scripts
+check_sudo_request=$( cat << EOF
+import sys
+import json
+try:
+  ret = json.load(sys.stdin)
+except Exception:
+  exit(0)
+entries = ret.get('result', {}).get('entries', [])
+for item in entries:
+   if item['dismissed'] is False and item['title'] == 'Sudo Password Required':
+     sys.stdout.write('true')
+     exit(0)
+sys.stdout.write('false')
+EOF
+)
+
+check_pw_response=$( cat << EOF
+import sys
+import json
+try:
+  ret = json.load(sys.stdin)
+except Exception:
+  exit(0)
+responses = ret.get('result', {}).get('sudo_responses', [])
+if responses:
+  sys.stdout.write('\n'.join(responses))
+EOF
+)
+
+print_help_message()
+{
+    echo "Utility to complete privileged upgrades for Moonraker"
+    echo
+    echo "usage: finish-upgrade.sh [-h] [-a 
] [-p ] [-k ]" + echo + echo "optional arguments:" + echo " -h show this message" + echo " -a
address for Moonraker instance" + echo " -p port for Moonraker instance" + echo " -k API Key for authorization" +} + +while getopts "a:p:k:h" arg; do + case $arg in + a) ADDRESS=${OPTARG};; + b) PORT=${OPTARG};; + k) API_KEY=${OPTARG};; + h) + print_help_message + exit 0 + ;; + esac +done + +base_url="http://${ADDRESS}:${PORT}" + +echo "Completing Upgrade for Moonraker at ${base_url}" +echo "Requesting Announcements..." +ann_url="${base_url}/server/announcements/list" +curl_cmd=(curl -f -s -S "${ann_url}") +[ -n "${API_KEY}" ] && curl_cmd+=(-H "X-Api-Key: ${API_KEY}") +result="$( "${curl_cmd[@]}" 2>&1 )" +if [ $? -ne 0 ]; then + echo "Moonraker announcement request failed with error: ${result}" + echo "Make sure the address and port are correct. If authorization" + echo "is required supply the API Key with the -k option." + exit -1 +fi +has_req="$( echo "$result" | python3 -c "${check_sudo_request}" )" +if [ "$has_req" != "true" ]; then + echo "No sudo request detected, aborting" + exit -1 +fi + +# Request Password, send to Moonraker +echo "Sudo request announcement found, please enter your password" +read -sp "Password: " passvar +echo -e "\n" +sudo_url="${base_url}/machine/sudo/password" +curl_cmd=(curl -f -s -S -X POST "${sudo_url}") +curl_cmd+=(-d "{\"password\": \"${passvar}\"}") +curl_cmd+=(-H "Content-Type: application/json") +[ -n "$API_KEY" ] && curl_cmd+=(-H "X-Api-Key: ${API_KEY}") + +result="$( "${curl_cmd[@]}" 2>&1)" +if [ $? -ne 0 ]; then + echo "Moonraker password request failed with error: ${result}" + echo "Make sure you entered the correct password." + exit -1 +fi +response="$( echo "$result" | python3 -c "${check_pw_response}" )" +if [ -n "${response}" ]; then + echo "${response}" +else + echo "Invalid response received from Moonraker. Raw result: ${result}" +fi diff --git a/scripts/install-moonraker.sh b/scripts/install-moonraker.sh index de11ab2..c34d990 100644 --- a/scripts/install-moonraker.sh +++ b/scripts/install-moonraker.sh @@ -7,8 +7,24 @@ SYSTEMDDIR="/etc/systemd/system" REBUILD_ENV="${MOONRAKER_REBUILD_ENV:-n}" FORCE_DEFAULTS="${MOONRAKER_FORCE_DEFAULTS:-n}" DISABLE_SYSTEMCTL="${MOONRAKER_DISABLE_SYSTEMCTL:-n}" -CONFIG_PATH="${MOONRAKER_CONFIG_PATH:-${HOME}/moonraker.conf}" -LOG_PATH="${MOONRAKER_LOG_PATH:-/tmp/moonraker.log}" +SKIP_POLKIT="${MOONRAKER_SKIP_POLKIT:-n}" +CONFIG_PATH="${MOONRAKER_CONFIG_PATH}" +LOG_PATH="${MOONRAKER_LOG_PATH}" +DATA_PATH="${MOONRAKER_DATA_PATH}" +INSTANCE_ALIAS="${MOONRAKER_ALIAS:-moonraker}" +SPEEDUPS="${MOONRAKER_SPEEDUPS:-n}" +SERVICE_VERSION="1" + +package_decode_script=$( cat << EOF +import sys +import json +try: + ret = json.load(sys.stdin) +except Exception: + exit(0) +sys.stdout.write(' '.join(ret['debian'])) +EOF +) # Step 2: Clean up legacy installation cleanup_legacy() { @@ -25,17 +41,30 @@ cleanup_legacy() { # Step 3: Install packages install_packages() { - PKGLIST="python3-virtualenv python3-dev libopenjp2-7 python3-libgpiod" - PKGLIST="${PKGLIST} curl libcurl4-openssl-dev libssl-dev liblmdb-dev" - PKGLIST="${PKGLIST} libsodium-dev zlib1g-dev libjpeg-dev packagekit" - # Update system package info report_status "Running apt-get update..." sudo apt-get update --allow-releaseinfo-change + system_deps="${SRCDIR}/scripts/system-dependencies.json" + if [ -f "${system_deps}" ]; then + if [ ! -x "$(command -v python3)" ]; then + report_status "Installing python3 base package..." + sudo apt-get install --yes python3 + fi + PKGS="$( cat ${system_deps} | python3 -c "${package_decode_script}" )" + + else + echo "Error: system-dependencies.json not found, falling back to legacy pacakge list" + PKGLIST="${PKGLIST} python3-virtualenv python3-dev" + PKGLIST="${PKGLIST} libopenjp2-7 libsodium-dev zlib1g-dev libjpeg-dev" + PKGLIST="${PKGLIST} packagekit wireless-tools curl" + PKGS=${PKGLIST} + fi + # Install desired packages - report_status "Installing packages..." - sudo apt-get install --yes ${PKGLIST} + report_status "Installing Moonraker Dependencies:" + report_status "${PKGS}" + sudo apt-get install --yes ${PKGS} } # Step 4: Create python virtual environment @@ -50,29 +79,84 @@ create_virtualenv() fi if [ ! -d ${PYTHONDIR} ]; then - GET_PIP="${HOME}/get-pip.py" - virtualenv --no-pip -p /usr/bin/python3 ${PYTHONDIR} - curl https://bootstrap.pypa.io/pip/3.6/get-pip.py -o ${GET_PIP} - ${PYTHONDIR}/bin/python ${GET_PIP} - rm ${GET_PIP} + virtualenv -p /usr/bin/python3 ${PYTHONDIR} + #GET_PIP="${HOME}/get-pip.py" + #curl https://bootstrap.pypa.io/pip/3.6/get-pip.py -o ${GET_PIP} + #${PYTHONDIR}/bin/python ${GET_PIP} + #rm ${GET_PIP} fi # Install/update dependencies + export SKIP_CYTHON=1 ${PYTHONDIR}/bin/pip install -r ${SRCDIR}/scripts/moonraker-requirements.txt + + if [ ${SPEEDUPS} = "y" ]; then + report_status "Installing Speedups..." + ${PYTHONDIR}/bin/pip install -r ${SRCDIR}/scripts/moonraker-speedups.txt + fi } -# Step 5: Install startup script +# Step 5: Initialize data folder +init_data_path() +{ + report_status "Initializing Moonraker Data Path at ${DATA_PATH}" + config_dir="${DATA_PATH}/config" + logs_dir="${DATA_PATH}/logs" + env_dir="${DATA_PATH}/systemd" + config_file="${DATA_PATH}/config/moonraker.conf" + [ ! -e "${DATA_PATH}" ] && mkdir ${DATA_PATH} + [ ! -e "${config_dir}" ] && mkdir ${config_dir} + [ ! -e "${logs_dir}" ] && mkdir ${logs_dir} + [ ! -e "${env_dir}" ] && mkdir ${env_dir} + [ -n "${CONFIG_PATH}" ] && config_file=${CONFIG_PATH} + # Write initial configuration for first time installs + if [ ! -f $SERVICE_FILE ] && [ ! -e "${config_file}" ]; then + # detect machine provider + if [ "$( systemctl is-active dbus )" = "active" ]; then + provider="systemd_dbus" + else + provider="systemd_cli" + fi + report_status "Writing Config File ${config_file}:\n" + /bin/sh -c "cat > ${config_file}" << EOF +# Moonraker Configuration File + +[server] +host: 0.0.0.0 +port: 7125 +# Make sure the klippy_uds_address is correct. It is initialized +# to the default address. +klippy_uds_address: /tmp/klippy_uds + +[machine] +provider: ${provider} + +EOF + cat ${config_file} + fi +} + +# Step 6: Install startup script install_script() { # Create systemd service file - SERVICE_FILE="${SYSTEMDDIR}/moonraker.service" + ENV_FILE="${DATA_PATH}/systemd/moonraker.env" + if [ ! -f $ENV_FILE ] || [ $FORCE_DEFAULTS = "y" ]; then + rm -f $ENV_FILE + env_vars="MOONRAKER_DATA_PATH=\"${DATA_PATH}\"" + [ -n "${CONFIG_PATH}" ] && env_vars="${env_vars}\nMOONRAKER_CONFIG_PATH=\"${CONFIG_PATH}\"" + [ -n "${LOG_PATH}" ] && env_vars="${env_vars}\nMOONRAKER_LOG_PATH=\"${LOG_PATH}\"" + env_vars="${env_vars}\nMOONRAKER_ARGS=\"-m moonraker\"" + env_vars="${env_vars}\nPYTHONPATH=\"${SRCDIR}\"\n" + echo -e $env_vars > $ENV_FILE + fi [ -f $SERVICE_FILE ] && [ $FORCE_DEFAULTS = "n" ] && return report_status "Installing system start script..." sudo groupadd -f moonraker-admin sudo /bin/sh -c "cat > ${SERVICE_FILE}" << EOF -#Systemd service file for moonraker +# systemd service file for moonraker [Unit] -Description=API Server for Klipper +Description=API Server for Klipper SV${SERVICE_VERSION} Requires=network-online.target After=network-online.target @@ -84,50 +168,57 @@ Type=simple User=$USER SupplementaryGroups=moonraker-admin RemainAfterExit=yes -WorkingDirectory=${SRCDIR} -ExecStart=${LAUNCH_CMD} -c ${CONFIG_PATH} -l ${LOG_PATH} +EnvironmentFile=${ENV_FILE} +ExecStart=${PYTHONDIR}/bin/python \$MOONRAKER_ARGS Restart=always RestartSec=10 EOF # Use systemctl to enable the klipper systemd service script if [ $DISABLE_SYSTEMCTL = "n" ]; then - sudo systemctl enable moonraker.service + sudo systemctl enable "${INSTANCE_ALIAS}.service" sudo systemctl daemon-reload fi } +# Step 7: Validate/Install polkit rules check_polkit_rules() { - if [ ! -x "$(command -v pkaction)" ]; then + if [ ! -x "$(command -v pkaction || true)" ]; then return fi - POLKIT_VERSION="$( pkaction --version | grep -Po "(\d?\.\d+)" )" + POLKIT_VERSION="$( pkaction --version | grep -Po "(\d+\.?\d*)" )" + NEED_POLKIT_INSTALL="n" if [ "$POLKIT_VERSION" = "0.105" ]; then POLKIT_LEGACY_FILE="/etc/polkit-1/localauthority/50-local.d/10-moonraker.pkla" # legacy policykit rules don't give users other than root read access if sudo [ ! -f $POLKIT_LEGACY_FILE ]; then - echo -e "\n*** No PolicyKit Rules detected, run 'set-policykit-rules.sh'" - echo "*** if you wish to grant Moonraker authorization to manage" - echo "*** system services, reboot/shutdown the system, and update" - echo "*** packages." + NEED_POLKIT_INSTALL="y" fi else POLKIT_FILE="/etc/polkit-1/rules.d/moonraker.rules" POLKIT_USR_FILE="/usr/share/polkit-1/rules.d/moonraker.rules" if [ ! -f $POLKIT_FILE ] && [ ! -f $POLKIT_USR_FILE ]; then + NEED_POLKIT_INSTALL="y" + fi + fi + if [ "${NEED_POLKIT_INSTALL}" = "y" ]; then + if [ "${SKIP_POLKIT}" = "y" ]; then echo -e "\n*** No PolicyKit Rules detected, run 'set-policykit-rules.sh'" echo "*** if you wish to grant Moonraker authorization to manage" echo "*** system services, reboot/shutdown the system, and update" echo "*** packages." + else + report_status "Installing PolKit Rules" + ${SRCDIR}/scripts/set-policykit-rules.sh -z fi fi } -# Step 6: Start server +# Step 8: Start server start_software() { report_status "Launching Moonraker API Server..." - sudo systemctl restart moonraker + sudo systemctl restart ${INSTANCE_ALIAS} } # Helper functions @@ -149,24 +240,43 @@ set -e # Find SRCDIR from the pathname of this script SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )" -LAUNCH_CMD="${PYTHONDIR}/bin/python ${SRCDIR}/moonraker/moonraker.py" # Parse command line arguments -while getopts "rfzc:l:" arg; do +while getopts "rfzxsc:l:d:a:" arg; do case $arg in r) REBUILD_ENV="y";; f) FORCE_DEFAULTS="y";; z) DISABLE_SYSTEMCTL="y";; + x) SKIP_POLKIT="y";; + s) SPEEDUPS="y";; c) CONFIG_PATH=$OPTARG;; l) LOG_PATH=$OPTARG;; + d) DATA_PATH=$OPTARG;; + a) INSTANCE_ALIAS=$OPTARG;; esac done +if [ -z "${DATA_PATH}" ]; then + if [ "${INSTANCE_ALIAS}" = "moonraker" ]; then + DATA_PATH="${HOME}/printer_data" + else + num="$( echo ${INSTANCE_ALIAS} | grep -Po "moonraker[-_]?\K\d+" || true )" + if [ -n "${num}" ]; then + DATA_PATH="${HOME}/printer_${num}_data" + else + DATA_PATH="${HOME}/${INSTANCE_ALIAS}_data" + fi + fi +fi + +SERVICE_FILE="${SYSTEMDDIR}/${INSTANCE_ALIAS}.service" + # Run installation steps defined above verify_ready cleanup_legacy install_packages create_virtualenv +init_data_path install_script check_polkit_rules if [ $DISABLE_SYSTEMCTL = "n" ]; then diff --git a/scripts/make_sysdeps.py b/scripts/make_sysdeps.py new file mode 100644 index 0000000..d052d60 --- /dev/null +++ b/scripts/make_sysdeps.py @@ -0,0 +1,57 @@ +#! /usr/bin/python3 +# Create system dependencies json file from the install script +# +# Copyright (C) 2023 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license +from __future__ import annotations +import argparse +import pathlib +import json +import re +from typing import List, Dict + +def make_sysdeps(input: str, output: str, distro: str, truncate: bool) -> None: + sysdeps: Dict[str, List[str]] = {} + outpath = pathlib.Path(output).expanduser().resolve() + if outpath.is_file() and not truncate: + sysdeps = json.loads(outpath.read_bytes()) + inst_path: pathlib.Path = pathlib.Path(input).expanduser().resolve() + if not inst_path.is_file(): + raise Exception(f"Unable to locate install script: {inst_path}") + data = inst_path.read_text() + plines: List[str] = re.findall(r'PKGLIST="(.*)"', data) + plines = [p.lstrip("${PKGLIST}").strip() for p in plines] + packages: List[str] = [] + for line in plines: + packages.extend(line.split()) + sysdeps[distro] = packages + outpath.write_text(json.dumps(sysdeps, indent=4)) + + +if __name__ == "__main__": + def_path = pathlib.Path(__file__).parent + desc = ( + "make_sysdeps - generate system dependency json file from an install script" + ) + parser = argparse.ArgumentParser(description=desc) + parser.add_argument( + "-i", "--input", metavar="", + help="path of the install script to read", + default=f"{def_path}/install-moonraker.sh" + ) + parser.add_argument( + "-o", "--output", metavar="", + help="path of the system dependency file to write", + default=f"{def_path}/system-dependencies.json" + ) + parser.add_argument( + "-d", "--distro", metavar="", + help="linux distro for dependencies", default="debian" + ) + parser.add_argument( + "-t", "--truncate", action="store_true", + help="truncate output file" + ) + args = parser.parse_args() + make_sysdeps(args.input, args.output, args.distro, args.truncate) diff --git a/scripts/moonraker-requirements.txt b/scripts/moonraker-requirements.txt index 53fec9a..2cd3809 100644 --- a/scripts/moonraker-requirements.txt +++ b/scripts/moonraker-requirements.txt @@ -1,18 +1,21 @@ # Python dependencies for Moonraker -tornado==6.1.0 +--find-links=python_wheels +tornado==6.2.0 ; python_version=='3.7' +tornado==6.4.0 ; python_version>='3.8' pyserial==3.4 pyserial-asyncio==0.6 -pillow==9.0.1 -lmdb==1.2.1 -streaming-form-data==1.8.1 -distro==1.5.0 +pillow==9.5.0 ; python_version=='3.7' +pillow==10.3.0 ; python_version>='3.8' +streaming-form-data==1.11.0 ; python_version=='3.7' +streaming-form-data==1.15.0 ; python_version>='3.8' +distro==1.9.0 inotify-simple==1.3.5 -libnacl==1.7.2 -paho-mqtt==1.5.1 -pycurl==7.44.1 -zeroconf==0.37.0 -preprocess-cancellation==0.2.0 -jinja2==3.0.3 +libnacl==2.1.0 +paho-mqtt==1.6.1 +zeroconf==0.131.0 +preprocess-cancellation==0.2.1 +jinja2==3.1.4 dbus-next==0.2.3 -apprise==0.9.7 +apprise==1.8.0 ldap3==2.9.1 +python-periphery==2.4.1 diff --git a/scripts/moonraker-speedups.txt b/scripts/moonraker-speedups.txt new file mode 100644 index 0000000..14abd81 --- /dev/null +++ b/scripts/moonraker-speedups.txt @@ -0,0 +1,2 @@ +msgspec>=0.18.4 ; python_version>='3.8' +uvloop>=0.17.0 diff --git a/scripts/pdm_build_dist.py b/scripts/pdm_build_dist.py new file mode 100644 index 0000000..d3279e1 --- /dev/null +++ b/scripts/pdm_build_dist.py @@ -0,0 +1,80 @@ +# Wheel Setup Script for generating metadata +# +# Copyright (C) 2023 Eric Callahan +# +# This file may be distributed under the terms of the GNU GPLv3 license + +from __future__ import annotations +import pathlib +import subprocess +import shlex +import json +import shutil +from datetime import datetime, timezone +from typing import Dict, Any, TYPE_CHECKING + +if TYPE_CHECKING: + from pdm.backend.hooks.base import Context + +__package_name__ = "moonraker" +__dependencies__ = "scripts/system-dependencies.json" + +def _run_git_command(cmd: str) -> str: + prog = shlex.split(cmd) + process = subprocess.Popen( + prog, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + ret, err = process.communicate() + retcode = process.wait() + if retcode == 0: + return ret.strip().decode() + return "" + +def get_commit_sha(source_path: pathlib.Path) -> str: + cmd = f"git -C {source_path} rev-parse HEAD" + return _run_git_command(cmd) + +def retrieve_git_version(source_path: pathlib.Path) -> str: + cmd = f"git -C {source_path} describe --always --tags --long --dirty" + return _run_git_command(cmd) + +def pdm_build_initialize(context: Context) -> None: + context.ensure_build_dir() + build_ver: str = context.config.metadata['version'] + proj_name: str = context.config.metadata['name'] + urls: Dict[str, str] = context.config.metadata['urls'] + build_dir = pathlib.Path(context.build_dir) + rel_dpath = f"{__package_name__}-{build_ver}.data/data/share/{proj_name}" + data_path = build_dir.joinpath(rel_dpath) + pkg_path = build_dir.joinpath(__package_name__) + build_time = datetime.now(timezone.utc) + release_info: Dict[str, Any] = { + "project_name": proj_name, + "package_name": __package_name__, + "urls": {key.lower(): val for key, val in urls.items()}, + "package_version": build_ver, + "git_version": retrieve_git_version(context.root), + "commit_sha": get_commit_sha(context.root), + "build_time": datetime.isoformat(build_time, timespec="seconds") + } + if __dependencies__: + deps = pathlib.Path(context.root).joinpath(__dependencies__) + if deps.is_file(): + dep_info: Dict[str, Any] = json.loads(deps.read_bytes()) + release_info["system_dependencies"] = dep_info + # Write the release info to both the package and the data path + rinfo_data = json.dumps(release_info, indent=4) + data_path.mkdir(parents=True, exist_ok=True) + pkg_path.mkdir(parents=True, exist_ok=True) + data_path.joinpath("release_info").write_text(rinfo_data) + pkg_path.joinpath("release_info").write_text(rinfo_data) + scripts_path = context.root.joinpath("scripts") + scripts_dest = data_path.joinpath("scripts") + scripts_dest.mkdir() + for item in scripts_path.iterdir(): + if item.name == "__pycache__": + continue + if item.is_dir(): + shutil.copytree(str(item), str(scripts_dest.joinpath(item.name))) + else: + shutil.copy2(str(item), str(scripts_dest)) diff --git a/scripts/python_wheels/zeroconf-0.131.0-py3-none-any.whl b/scripts/python_wheels/zeroconf-0.131.0-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..0b73489796e94c2977cf90b2f42797168c749225 GIT binary patch literal 115593 zcmZs?LwGJ)6RjKDwryj@w(T#rZQHhO+sTS;+gY)lyU*?Y=Q)k)%ywbacxx)kfP$d` z0Rch%7sNn1eb$*N;6OlJh(JL9=he&9$==xB&WzE(z|zjr#lV2U!Lt-|%WjL~NzcG> zpPvjEZzgWb$a>RA{;~uIZ|vDri~|mYUWo%UQdMfQ^5vPQM07(14^9pa40-O`Z(P0{ zlvAuqOEe`+A(?x+a=R&Fh_Z~Nv9u{fR5ByYi_`W_MUDK5@ zTUznEsE}QOm58xsYTDx@7HfTMAZ;K_gjrBNsA)v~Q*k_up~LBG?x?mICKK~XpF4e)X;6{6B#@786!#f-ez@xi~fz=o^V#%62 zxO9UM$v`lV-gn|Am<4p@$l0Hf5+=**Ra6%cz__DYsjz1bM2xDUrd9m`a?h|16)$79 z8Ba5sC7o=uAPIzueuG@fgrVa)VrRJq@kXEnW^lnIARAU*JajCo&CzS>M!Fln9m?iU zLiWoyVmYPSk0Ph~qCppSI}oO_m9!6==lS>QUH&apZx2p?CQ`q2;XweQD+ynvnwKt_ z7)U5iR>fk7w9cFU0#`pjWDoYcf+cSmW5{HvlXy;G8-pH3HWg_{mg%s1?5{{oGKd-; z-RV?uM2Kp4CD9z_NXNXd=T6{AKXv+)c`wRV9$BR1oB=c1IG_EP9z=t?$N^9~Z2_L8 z@^W^NX*zuE|YKbLtp+Ax`Wo)O@=iS5h-#khD>S@Ciz;3B{G7 zwn~;*r}Lr{^-GL8&s*g%rq@j+S!qz(i6f9~kff1wsV}B9B4`2i@wrB%(1y-7?qRM&2nWB2uy1voR z?Skr&Wr{0qeXHNsf%J+4SpK7+bGq6p!>%&Y*3KYRRZ}!rM0eF6r9j_S^MKE>UUuyy z_R+(<&Z*^=dhM?!ljbo1St5aEM)zB6n>UcUFL2AOW!F`mmsl=CHMmioe_}n7MkQEC zF#Zk|&t5`Tm-}4@XnCslZO8l(KOv^a$<}B+)0;8J(czA(!dfo0a_F>{Y}FDQRgt*% zVc)cY%2P{g)vYo>N-7g);4I1vg;%^DHD~(n?NU{hKp8W1K7wJ*s@1Jc5x_J_<8C&>zZzI@=|#ohdL3T0$dqw+M$oj|WWKiQUfjTnI*YmrD z=dZT1K>1O8P${C$yx;0Q|LP5qr;eEb!{3(|7td>rCO#4H(^BUC^2U?OiLWtw)3X*! zuekF=Ur}uiX;-TOX_u-jGroIeg{3Ri3cqv98NOxTAkxs52$`f)tS{6Iqb6p}S!?WL z)XVd90DjwW-P;#EQ#^B{sBrxP;qTCW+YtZaO?wB)$-DR!iaGXI7spqF$!)y<7icV) zp@+}YoFrAIX?C2)hOWV$fmRD8>~(*DBh&TdO-lKZ=g*&qDsz*^qIW3JUwZxFestMY z{4AZQCI3s5yI!J3d$y&&1yS2>Pd8#P<_(_&Z^27}zc>#mC~Cdfxn)0$e!st>@L!za zn%+MfbAEHq3>m+ayZws))5ZH6J5TR`fq)7jfPhf{r;8gK8e5n$ICz+pst($1F~RkI z(z?h76b#gXg4kHV*FTJZI6~WXng-YW{gn%^<&r?Ff(V`$FBgD59rhyslOL@ z4~NJa6iFQJqNc;pQ*qGTBppIsO=eK3DvR7Vif`^Sb^e13#k$sq!d%F$#MOr=mfxJ_ z@2!mArVZ`3@MZ-u!Ynu)Q>iw-6GrP$6$`{bE{efMyxjd44VNBm(M8w2McU1E$97Ng zqqMlBc0a4=<$%~8e^$b$Sa%6|m#G5k1C|dj=EHda!9fx#{v&aqCbgR6NBU)&S=rdgWo}pEre4P_M3p|QY{mNHfDlF{y z9M-+$SGLNc7S5Q_fU#>vg+4~)2Dh--{q)|Idt^bN^!TRo(f!vLxSIUWowwaSYGC_W z-<2e?*Xl~>y)q6Xr;D(}8N?6l|0W0!v9D_z3=q&g84wW4|0_W}N3?D2ak^1{X6qXk z8HsE=h|f->Yn{llC~Qm890DtTdk|S&h9708(2^c?A4OZnp8} zd7jT)l$4ZLB8gg=4#peV3n_A;yW^q_oQq^uq)&15DP~8xdR4qe+08%uYfINOp7Si2 z_z?Lrt-F@*9W7U1S67%Er(&aJkxeg8B86*HdSJJZ{Y~*KQ)C8>h!fV@($?lOmg~}` zVifDz)v>anhB@?X29dRk3N7t=FzQ%?%SE|DMXFet0!`V>I^dQct@JGK1>6xJ0+`xR zX3Il%of!4&^5}F2+)E^>qgL;Vnu8^#yb8aiY)U{s$423d(Y)w!gtSkSL=tazZZ2cH zu*QI^+VRj+Y+OvlOj(O}ZvY46OQ)f=k98V}DI$-yXY8~rAM2I&AjGzGYQK^2P{$Ub z3EN#xAFDz9ep#n@kb2{=yxmKu+ToKH&y@2^e zaGf!Vjs%A(+qWmPx4SpH@!%ZwlPgCytazhbg9(=oOngL~OWUb+mMj5cfo<*@!GjWo zu#O`A7BF-7JoA~oL5X(oVaCywCBN}}9Rk1M!IinAyW2}Bzy4;&(wD3MEObF4cr9of z+#P~bYgP)hpsLBhdIbCi*2OVklGa4N2Q}|Watxt)Ob1N*BkpPj1BXx=CWhWnpP}2J zyAfgA3Z~L@skk6ouI%>L?xBRRaSwti_Dlhr$`}T$?gS*lz+CzwqE~(Na2Zj=&5a)` zCl|qw8yBgt`=Y_w$!YO|u0@-u)Gtxq^kv+YoAGuy#O`zta|%W@BK;s6jqHtHb!zXyfKN}L787c zw8%x9->aV?-~6yS_D}DCw4qX>dY$M#6?F?nXY9RR&(NLG*Rkjc(#%)Oo0|)|LOp}M zggpty9Di*6;l0h`#b5X0?B8v@5mc_ckIbo+?rVT9!!KP3>g2TB zz9c5X(r3S@<8Gu(41;GAS{_-|69FElQ$GzJt>oErYX{la&EEzj{Q-AMeG{0kub_oJ zdAOVbP#^a{xvF%UH6t)X)?Z|9J7BN=6WUl&STVy;%q5mJ{Pkn05ja zXB?+KEnHV0{5lY%q!`@bp2UVyAQ4Vl6HpJf(n1<{Opml_gaG1gaP9l3 zAc)}Sv;WlOD2C)`kwp7BNY8tl=)|HiA)gt}xa80*osxOjgtaSI$Ye37Jd6?Slm}!3 zwW473Tuq`2WfA$vQrd_>?KfCii~J8sMp?qKbCBH`^r1P4O<|yEHEdv3QLZCG36hA{ z5%xnjtPL_2JIJ0Li)*uEP<-pUq*{gzq<#^bKbaiQ0vvOYr(8_{0tG^iI?>HK4mflp zWJgFK8XlT*`gEfCE|;&vK2%PjMxC$+1Bjr7BmglEC5x|@se%I`>p`?Aq0eikSA>iDB6S$hc&#cN zRgqJ$Q>#NZ=va6}G=D$C=SO65Qc&tYsz0-<;5+W6wF<6=l1ccisi6@Z36bqN>bUUN zhhFVZ98qz*fBwHe_y&*Ds%G)71<^z-j3agyz#Zgb%R$)m>niH4gK#E37_JHv@8ge& zjhSeZ$LlzF{e%VfEpuK!0xc^}Y+=SkCd$d4Wal-QVw%nzD{RsCZ43B~?pR%M3 zzr7TDP>&A^T;N{Xhm5S;(0Y*vCJ5lV=PC}6*M)|k$l?DAMXtz7nUB~u{cU;gnUc`} zX-%=I=ngT*t^jq_sN1XG-h5h5QUE43&pR*Z^?Gwi4oob|Ujd8-`l`)WMO_9ifS%nC zGrUMV1HOF7hZ;fRT1dhmxv~fD(W;$(pTyc7K7iTTn8eqB=-ItN4S>F$!inR~@?ewd z^Qk4Hce<|s#B(W1h#-@}iVyNlEMT6wx~3w$#0>E^iZk&zECi=W;TU2T!dx7&1|GGj z;ND;Uku3Au<2R@m%X52W9V2l6G+d-+9wn8Fk>rShunB`Zdb{^v6zez~(O6e(2gBW9 zNJTDToP)ebXM1(dW#dv_qPnD59%j(y`oRaDG`-A+PTjgE1gqB47l7S_1;`KdF{wM9 z$60xQl}2^Uh88rd9~E3ttI>(uKzrNjE_s}ZEF=%Okq()qc-tQvGhnhDbk4ZtMNE$v z1@?;TSfZ0#9M{hKeTLbj{uc1M_Oz7TN3=e#dDRM?Yd#dRD)2dnxMG!B_~E3dri*95 zryzLX-4alG3fw+>K_bt{Ih60&<+@A>4LIxNx7z!rK>oAhNjvBx8T3unUSP#g4*tis zqN?C>!cA|%tx*cPRFze(A5_^@>=Qj^)S_QYWY>)#*+HY|=>`OLbNmRc2baaz_GJ+M3CtXbJf#3)-hu@h#@~Y=ZPmqVH2S`~4Hx zhEu@oSA+51F^TsMY>M!iuy2;Svm}Q+KfLmov^{*oB@-kC{iLf4jTSSeI7t&mf zg~&XdJh<3Z6VPh%L{mpt=4xQ+0p3C6FPc*PeE1r%piRb4E8;EHH0RlINP@5SHC7;W z2Rxx8o3F2-%$LpA0PDNp7QbqHy7>_~Fb0Kkh@|;t1&zIf(Oxf)00>#>-TwJIUub)a z+#)|lJ8k#vHABew5dv=~A%J5YfA;<&Z_P?kxL($EO{@X}--5lcYwFxUSr4|yN$F)^ zC);OtLXe{aG$;;HJRyeb4Q7_sTja)1~;8!-ZyEJmw%M;SA7`6Av zjiqHbpuffA7(0u8y|9?=J@`cO{=L_q-_LYIuHwbdDupi1E zW-gWv(bsv!a6p_%YFA(vD=f;+Vu7yRjmww*+?Ypo`+MOJjzlzhHINwe{-vZek_e+2 z-dhrtGtCJQvOGqh1L~|Mz^n7N<$TJDvQh)_fHH=^F`*V)uk&AX;ip6$xlfo%FcZ;( z-?H_xsdaBlXV-yh_YeXpCxp%9Rs%oJRHO8M{Q>ZM@rV zaGlrVjWYQHw98X`EpZ_sYGx8jBjCMqe?k9O<$etlDHdV_0kN0>0U`gd%C&d;ZwvCg z=CyX-5>MKDrai53ikwz%FrnXP>Cf6!i{pVaslzDG^W5*zT%L-ds_$esGZy>*<(1&BY@-spU5n&7OS* z&SRhv&5PTx)eHFj-042C(I&&*GrD_aF{q6`HdAUW_x7G!_f##n;hn0sdbQQ7!T1N< zv-g|VtFXuxnRa!31@2nfmn^2-WJ<6`ZC*{%FK49R6fqNRF>(8Fh#IR$&s8lm`{2nF z1XjtAd=-YD-&~p{2Y^qcSRx6k9GLSQ2p+lS{^S6#zD26%?S^ zfZ0ofC1}3L^6C>d8AZWlXQHY{F3 zy!t*rG;;a#en;8@0UcQXl?JOh;gm`Ngj1$2s?`dy4gGhkI6B?FVD5b6e&YTZRQ_P) z{`kp)*Q|b)VLc)@o`GVFkx@Y$^Xlv2@XN*r4QQ#8q?O1Dx0Ah6$tsOok?mA1rKFT> z0CL|ITC$8;N35HUIh=wP5Pr z;@?8CgpQ8jT@TO%{n`fI#ENm&-gbah#h8;`RsKrRzEaHIsaT)MiWm4KWxrgM#c27$ z)e_k5%a?GiRHkoBt)?b5MJ<@RPPW5l*s@C>st$X0Qdo2 z7BsPtMZp^5y_RoG^Ww)--DxRN$}<27Ryc1EjlBUGWj{jy0wUi$hiM6*@CbMJUU^y$ zu!>tg1MaD3xTFityiA7~2eA=`!9lk`MGYF*JD%qN6KoY1#9d~6f5-Q;4G82P(dMh_ zyS+QXy{}sU1>;~mbig)Uh`n$UvfqnJ??P-KmEckU zXo+pagkv|b{x)pQB~Vy~Txrq|WXZOK z&C^|!@xO5mrI4zsyqqW1Lx9yky`V=dW`jR-RU3BJJTXi&*Tve0dS-}ed0Cjk)ej9# z&<0rcq1YeDL1`Wn<5V_Cr{TR(@~1|3MLl*;Tgf>rcm%6r+F5^qai|%Ask&@E?PLdLlusUK>nVLEWCYP zxzv30;eYe_@c7}!>Br2;Gf;l8Q(1BS;qAH0&);I4onxSQc)xn0!XxbE<>5nvp0GKIH#%f@%x^y!7*hrvT2LDi#Gr8e>M%U~0qeI@QJFzGa ztshZOMf|a*@6~gCN&GW%wsC$nZH8MtqSvU~ZdAh2$RX1R7)nR zG8S$|Tuag&75v50JO{!0VAzyOg8E}_*HO2d;G4j=5Z6pw?TuCfWSQ~ae}YRw)*uQcZ2@yr6T$RbvaHQ0r_ggtibhss>0K zCAI2@{fT6*{M5my2UsEfZC+0dUVoC{N=gu?hKjU{wzpoMc)&Nfb(x19ERj)!(Q#Ew zo*LJHe?)?n@`I9`^}4vj&K_{IRU5GCUMq1RI3^k5U!A0!=a}u537F4W>{i67vsJp- zfSBJAScl1G&pD?pAs!3e2*~(EU>OH}s?oou@~$0>2c&{A|Imw6ST1O$6kBJv&0E`u zE+*0;OJUFuKu~b@k+vNiNJNT5$raAT_ff77@jY$`<~N*Sf|ate@?6DLkkfE+`lAL8 zH3M2XEjF~Td5ao(SNhn00&^8SJG{EAK+iW+xu?>}o+Lr=!zxsRD7YJK1_10aRNx`i zwIGE%@XJ}`GvTYU5rb4tf~=v(p@vi-n2z1jR{MK{G&Dt}B7Z?~?)Nu~ci5veL<5k= zj+{$Vl?#^*5T9PWanR8_6#Cgb-q0WCn2c?nb569nZgkf734MXoLc}d-i2OGA1aJv| z@gEV<4btk+f8*XgNaE`qi9w3QEm5GEV%5WkJUiS90)3}UhLlYQr0;l7GmyM6zBYzk zQqnrbEY5?Ok<0isZ$Ccv0lyzzbB8(U8wgw+`YrnPUfc0RkpBY7 zUmT|KRgLP3*YkQE%5 z6Td*>(ff*t;v1kY0{Nz@CeTpWy5qKQDktpUhRfHHyQ zez@S6+h$THBv~F#FPpMaKiwO+o58vKa@lV9>lmH*s}D?&Bq<^$oeY_;^OG0|eRZhV zm%089Bo}`3^KvII-}cgR9t79Ph2J`1j9ekykBR2Ic>WF*7GSQc@o%ZBt3c32D3cF4 z02(ttTnT^1lZycGKmhjL5*0fdr^EZ1CRZ|I$Se3pMzN z*d!+uQH=%;BN!iTxTvjmZ^KH}>XSc!%ZkwXMKR14dH%@x=9lz`haKZ8iKy3~17Afs zEI53kvT8{U(gQEl^(at(}s ztm7Ud*v87oaABXB)5^Oq=R0 z83#5qEvh0Vh+RxM{9ExQ%7!_p^4AV!JA3Z(1kD7vNT4y1_=PEf{~W122IKm#PH9Q! z;>#c=uJ`-miRU& zi#d&TyN=e`0p|p2LHEtvD4mfFOK0g$z*f6!K%o=|J0`5x4q=z!9&MO6%WKlu+agZi zW;5oh6TjB7&>vvm>xE57!wQ8sFc4@Ezrp^l0?QG?o6)hiwS{|r4Z6wS!pmI`H*gOU z8PrwUD&=#M@D$Y2sG5z>Q`(%YL?^Z?dk!x;z8|n=Y4WhrG8Sp1s$>buCISW~#!dzV z5<{!j7~fnTfF#}X^wT+S0gVOz#ioj_E7TFc>wZshU&+;B_2 z>QFiK?`u8|t3ooW9^H_A@(MUynmv_QNz;EueMZbodqj03sT#2Ah;G2&N_WmVJ*AWA zI*Mez6>iIWTh|EXWIpkQL1iahV>;Dczi6v(RpQ%VA6431nm8HZSzXWJb{8rerS48( z~NDa#tv&RPn`pSaY zGr>o{6Uho`hnR)Uhasx2(u|WM2_K`W-ypFu1_bI@7ntFQBQ}hnXej{#)ZHfsdnk_O zG_JaG*VedNtxZl^dztU@F5yw|N$%!orEQx#7#t9=mAOq(imLXA@Bb#Axr!6xAggj+ z|3Q8t8JRY!8KQ@AUu^B23KGwoe`r0yfhHejXtmuy_TU?*x1sNLa?kH2VR$WG)Vbi{ zNYS}s4;n(qqTxPQ>GwLb z`Zu`L>BapRX@4CRup|vZOQS|Zvuxr)lT)kc0-jGBRq3%cGEeJ>re!%bfdbywmF!T4 zyLyRu+Tvqv-3&;SxwHrPwuNk=f}#xl3~2KZw(0M->-q(@d=IAOr$KEf*)l)wsr)yV zcmjPon}vB^+m)=)osyc^e6Sv8;Hs^i2=Firxk()qgQ>)j9n#i12mzBW%tomhsj)=t zgXb-R|F*4@wHT#GY*1GCvuuDktQL`olOs)N)!TgYd)p(mFO}ZO<#IsTI<}9TlpFgtMOZ4suXx{1;tOV=={BftT z)qgOgrib^eLW9N?^V5J=af?^$(&;JdYchu8Y?nu)`9ReIsp81i4cMS)eLke2vcB1> znNY?#wCvpptfmJox2=&&5i}=}9RZcpKG+pe{&fT)sEQ@A2eU@>MULCk;(LLk1Mq?y zI1UjaO!OH)gB+%iKXQmg#wZ|ghL#}BHHkVOMf+FEukE7uxbQJJD&Le~5^hY(WPI<} zcD%>i&pFd#y*bn zcuM8(^WUw2onud4J!enN@?~>}#qFCI_?k&(w%FP{p>Fs=+9vf~J?_=9 zr$n~)<&_&I9NA0b3#A+8%P|haW-9uPk*4z9`Z5bC&z-G!1IUT>i1eybfRp9L!u2-E|VhaBBH4b#5M*BOrG$=JufwjYa?Qx z-s%7{vYFV=Zmy*St0zAlllT10^ShZvK4`@D&1+7K3H#;V#`2GuH?F$x8 ze}Iy1qB71W&Ky7k0*h@EEp~`nA+`qE2Ev9)JFp&mAi@GZ!OJGr5yiZbfO_Ur&#c@an`QkSK5A%UnM+|fB&SuR~+zG}dFOY|ZJ=Sbs1 z5*0OU!3 zJpf_Fr+TW(Zu~A9uW=pVe70KBAcVuOm44IVTHO^k%U+`{WugP!Po2~HV04IqaAwD1 zW8dG|)wP*}U|s2fj{afAt5sFFHIIOY(T;-)jF7%flcI%nfvrtmJKKi$C$!6>m8G@s zp$|)yI+u+8a~2uXyR}@jy2|DEBlV3K{b^STy7#xf)AG%nYo#Xax+cih_+8|mP`}oP zN3tC~s-5zKf!96lIXKJ0UBqgcsUFOk8XN;7f6=4V-)(hZ&y2fQj!D02rY}33yDgQW ze68q|C6J{9Nq75{mhGA43ijnSwhNiW?J3{Bhh2)5oi0D=qlJ#}$&spVwt$<3`F74I z#ht?$d(RhWY^DO-@Qlu5vwdZAfZ0~Uu0L}_bfDj$(b~N@a(Rc;1)cfoP8vBa@nyM+ ze@eJ`)|4JVSusFM`}^6hDR;=sg?Xuq+jm;guVy-g8NVRBY=`~zZ<7vgRs9nPl}Emq zGJ&Z@$&m*4d3VPa2N$oG~Hpcfd&_u!RY_@!H z>Lm^G@vqKQja#m5&-yAZx*ZRX@hPz>9V;dUqdkQJ2Axz0r=_0dLff8Uot?*JP1su2 z;%mynTv#TKnLER_-9Eau>(#(hp&$Q%%!kOM9h`jv+Mf6W2@8;gV}RPTT%fx-?FU@M zXSHw+e|b#_KU91sAuOy3;pJfugN2N|zWyl>^P+~RAhSW#B6^}SXi7u_Oeq~HJ=~kk-2NpN|(+4 z!J=e3^=`gH-65CjrdgNKDUfpz9h$)+X(lj*=tx62Jt1>j&C8o659_izyawX{JM+Fl zX`SJyJ#{GZ+uD?m#v7PsI~#`&zp`w&Ee8;z z@_VSRm&D* z8f{_a0_ASZ=#1>Yv;eE@()Z5|00=a~)ZYh`iPyJ0=NtCEHc5SDivRF{x3=bL+QN39 z2zhakQP$3a^c5Ag8^`K^y->@db(-k`TASCco*0Z_aVTa%c>8)cN>rY@p%3{fWFxil zJ_FxVhjyI^W7L;gKw97Q(Jz}4Z%1Ny+Kn|TCs5B?E1}o<=af;PkpYFi_?nW^6+;iQ z<~g62kPiqJ(*4$^h~1r86d%Jjl`v!A@0U^2Vo18(lZbs;wn2btIn8JRl4Js&qI^@# z9fxLH$pEt~&L zv%;G`=_IPR@*OCX4BR+LXt=53eYj0gk7nFO$2N>0kOHuVIb>P!e9djR)FxkHayt&#N(vXS!$1W(6 zV#tQ;a!4fgE{yG3k|O<#-q28IY~nyV=uL{0dZR3h)}hwzgA9vyn_j|E1zNgZHhmHGc2@i>`B~ z*NAEe=ia&dnEHe_tLp(S(s03E%F9Pw--x5-)M6bW^S;N!`bk_%7c6B#^}4IInVXxN znOmQ$SE^s%Til@5;<14ReMdyVPAG& zib38px^K^zOW$;EMY18^{msV%eaeg*Qkjx1@o2-LWDg9nWGFh0!p~`_JSbDkAc@?G!NXx4(6{F?)J$YX`8dS|WBEpmlnwMA2w=5AN98w4hsEiP{|m>)Y$Ff~ z4T4=dF&85-Uw;Zo0N&D=yV{nibo7K3QI0HmOk#Wi7tRs*1Rf1qFdw5!9GP=~9E~!> zBV|6Y1;zx@&e>pem1H82H%4#(WbyJjc(-%Ek>L7CxPH5{v}EW1c3>U=0UxOEH-lbh z@h6&85WdD#hy+uRr;zJ1_og?Pd8ZvR9C8?OeCTJeVGkE>{pX(gUsLkU#U0@Gs=<3w zepX%~4zd@6?hj{?x^OK@J37173>$c4TnAi{rUkH1Kr4oz39nJ|e%5eSIBypO3MfVp zihvXM5gHmoXP8hYz~5{kgS_%-T@E9VX2Qj@5L%&c{Xai;C~ZvN9#k{IC&rK&A^;$O z1N!jICC3`yD;Fv{U(8^AYh&Ne+1_t&k6P3$)UIB?X3dV|ku?ZH_E_nOVfmIgwaFz@ zF3VtMLDFUZc2IJ5+xzeO`?=sZd#nq3{i0q8YKb67+v6%Dg*t){iLWP*T$E~4O|H=T z{mXeAS{kclM`pf)C`AI##c-MR+iXDhP@7B_$3p<+=Q-Yw3CEPk9#d9a#cqGdGwojV zWBpWNArWgI$!u=n8s7~P^tXcNt=e3F_5<>E&g-<5Gl0?vKSe_T!HWI2VeNI#59?)} zVN2aU*nO;A#A8RP9@5w69awPB4U#Y8@b+hapAgC*z)$Ty2sGF21w&h|Uk;{v&Ioaq4jmjb&_R zBqy>iZEXX6pllKU-O`Kc#_3qPwX#bO*GA@y>TGa*-75&(^dRwyPG{Ha2w-8LHQvri z5?f>w{H;XmLo~+4C44s;HpYbxQU4Mv+-D?yx|f>p zEUtuzf5So&l_K-)C-&fd!9uuZVseZZy>wS(%oNv^!Ay4WT#aJX1nh8Gh|yy;O;vXgweEmRsDF!?x$7xO<1qLC_8 zoobleVcQ^d)9MzSqeAAdjTL}5;W`MbLks3bcF8Pr9OHsaI8g(+|HmuEc?R7U!vQ=O zT1s-{lt&JZd_$x91X3xZ@NH=*xg>zgYPyZ9E(e2p0Uk8 z=v-*w+t&pw{?nU(MKSSJ(Q?91ZYwf<#A<2NuOEo6(iLv{%&lm?rQ) z-Aq7En$sHHru*ZHx{0HHJw>n!(fX;|D**l1WZ2tEfj_@6*iK8A3^sHihQ8CD1Wa-$ zUwpWOmZz+P1>7iDG~jHX^w+8IB2>0_m?Pjm=!lKfF!zX1xO ze#6%^q()dkh5%)MI{Tjvv64NY+{uQck?xHK{S6H!$}bdMsPG(}*iY~>S6UkC0aou3 zX&o~L7ZN@Q=+?4yQF{#%cf=`s9S@XRV(`r3Pd-ZsOLwCW z@JgFg*hNYW3Etva7`j)q$R_t;Y-}f<{ZGq+O%`2_(^-{l?;k#ckr;UlXZ_Vh@$qw< zbC_L1b|qqBxdl(^AIm*biUu(>Fa|~MvcR{95l|b+WM2Ou+xG~!uB;Sw#|LS?#mJV! z@fZ-C5(c)E_lI>?q`5&QB71ZM;9}tPAqO{Xsj1v%e!E#wuISB}!V(KeYq1=3r4fs0 ziA3;V8CMG}#-w?>LL?lj6%7JBH*fvlc;YMu za~D@l3wS#7UZ@KJq)pHT2Ksi2T*f3S{t5!ge4Cmpx(xg{NUR8&@u{kAWriikzO}d< z9buJZBm*ZHMHo@~JLp3VW*%&Iy@(N2*1>thK2KA2!DbI-_d%anSO3?}jq}ndHGVPvie2i@a&xktutf!GR zEjib`W)}Z4cGhho^YdIlW>9Jg;|Xnb6P^rru2|+thD<7EhVW-tH^&~GC3Sk43r4GI zxYq}ZYJ%PYw#4`9_&>XU9E;*>+Aj~Om4IgGt!Ss2GrNg6S&Y3ZhOre__#u(396dn| z{3!OTFn}dtLHrDiY0OF~QCnf*y;4X+=5w)qiu6%$Sa_n+;-hen*UwUDnw8Z9 zL#aYkv83*dPmCqHE+uw`2@y*d=wU=ab*&((4!fMbn;EJ${*a8@WH z#C9L!$^^(&SJBa-!Y^7E|?I1Vy2$+v;%@E5^cQXfYhXA_4 zR|rU2^dsL3smDYe+Q9QyZWJfe?EhPwW$x@eOZGJU+c4`~KL?@Aq?R?abBfmMq3yIE ze?J$1-07`ek0%c@-RN=pW?o=Z`P2KduSys1OCu03BQz2wovw26PwM&iEbj-lV#akU zuh80~TEC1G-I_4lCs%^f&{xF1ZzvsmqQ}Vf?KTxT;%HG`uwCf+#?guuywT1NS0b0C)2Up%B7Y+r89u}R&|WOPzHWKm`8hXh?2QZ4{&BFGkC0s%U7pZ=I$gu(QG_3Y zZTf-(yKO;gqV|kx(fhlj^j*uJn9nK*B z-v2N~+4!9eL3T-lOp8w^>FB~Q=chl(DfKe$v$^li4^2IphOF8}NmRyIxZOLc;` zn<&c;j-(;NXL%mp09ouxmcUyp=Lk0xeoUN!lpEemkjt+0&U;tv$muBm8vZ58w?Fxj zN(K9`AC7cSTcCkpCPLQ|CyV=O^fsN|)e^vdefHnlv1)n6xg`qYc^avtMng6gb}1+L zm%8uysgNH8Xjs|(PSqYy&ob#!%7~C#=?!xc{L0QIyc6`(zd{|IE6VcjzZ63Ki-eb* zD8<%rm{$Ts(Q^W=8`+$)v=E}z2$*x@N-$TPmsJmFQNzh7eg4E!%L|j{+G4>foFC(sDeMP#1N@vUDdJi zrfc2@P&KDpY@pL?{5*sdIS3uo0?nhJ4w`+0N*VCC4ljL_b;fEWcKoLjS7Y6_Uc%V8}vXmFEz+87LmT( zQJtiFbpQM7-zG_w7B7cZrE9HjT-EU`<;0#eTJ2TcitZ{rr;N)I~x|0sLMAkm^`NwiPfwr$(CZQHhO+qP}nwrzKxHr}}tFDCAHV(fA>SmreepN4n}vji0Y8c@iu#y;R~{$(rkh$%JfzA<7~UX59PCuhyHxK7 zekvn#dfCV2&}6-8}Fn9rpj6tRy7? zuc`+7hfkvY&(T{GTQdvW|5BazsO#Bnu_OAumZ2NtSso$hE#3AMpZ~GFY#t@4K;S|< zjue~{ca{z&bsyZZQ2?-c#Ic5p1x=M4ddqE2)I(@OklX$Uz))_-R=&L z9wIYf`W~D?P2KFbzEagBxo5=8{r2qCFpCo<-v#>Wm>*dUFG~q(D_Y=6Z%`cMUYgC( zO)FKmLDJ^?M7a$~4y-xg{5;p-xici8u6EItgbi7sBRoSzr-spbiea7mv2Ht**M#-l zfSH$~tdW18p(?LV2daqzytBngdTq&s!k(QQT71#bJPQdp4~t=@W;xZ%+ur^k`eu z#mLUe8>*>WTT|WB1boh{m6Dq14dd(VGBS%*4C+jWk>IAjKiG4NNIYX0ir2yUQ*;zs z@HqYu{*Q#4WXvBiO(G?u^;i7V{AMYABjC`)rLfo+M@%y^dX)|OfW90845c~590svJ zX@(JlbS0~#0dbq0ggv-J=QkT0=g-$W=hIM~S#e(frfYj}F}1OxwYyX@=Kqof(#u*| zT!B54)0VCwO?JWE+cIPR`2w_xB4?bau7S-dC!?-GafXg)0K8T658mAO&?4h9sRU|# znID65D5*%<++ZG2`tS|$-GyJsDt`Y-Xz8nWqUO6HnOgZ#ym5*v=|QN@ zR823!8CsyFgwslBXHLqlXuLPjADqosq&bsPmLoq{Y*v_Y?63y@0-xmxV{6INIwTT5 z>k@ubrGc*E+r#?c$v@)_;H&_%|8Q$xXt}f07i#pn%8zFrdLUA@P3?ftxPi3jE$RyA zIsHjEaI8`6ZgJ88KpCNy_HKLEQdkqE=vcCcj0wRb_=mCywZSHGky} zTK1yydR9<(+1H%^K+DD;-YeJw$vLT|{@|U@cRW|%Qw6qANmlx3vB_zzS{Z=FQ3#6Q z7Q2}$v>Z@>PoHd`y9??k#5%}R$o1J_k5c!`a? z_$TQP2RHK35)Ql9<(gwb64iO3Xz*cgV?qyOehFr)(@}qh)0b-Xo!;=^K71Bx!XWk? zeABmJ9uP>(3vyRv=kzf;y+}s2uN)#~yL=9rNc4*vzN^}`RcyIrwtp#V!b$+k6f89g%Jj_5@=JiwQ+_7681gb}tmy(V4z2G0Yp_u9l^4uw4 zM`iq{+S$G4`r5Cl96D&6t8VONKiDR z-OB^rl!}z{%LPNa5RfO_>NKF);7Jb!mxk)wpnKleCE>Lo)QuoagULGc{997|_Y@7< zecu0>N6&brDR`{MX zHE8cmz*sA@wzN}XvX|OFTMSR9lXhoSIGxdgLkv4F-mhH z`B}cdOfIYs}cpRSD3o>^0k?zCSh{x~5g42TuHHBAp z`MdF``tFZka3r)4XT{QkG}a;8kobgKK$N0e#ox87JHdafJhQ9&c&t~GXY$S&v}l{& zKQ2&L&*8mKc-qt}tD6Ct6`clyLAQ~Ht(&_)LG!HHGh|og7(yP5dGKmfnSIq2MWd`c za<^?tHxf~^P7V!NE(^}R6FX7OMVss$d$K%N#rOIyhLY8sd~vH z6E6(+smt0d*fUtM>jd|Zv=A8yrNPP;qy&Du6(G%)tm|Z=Z(J?n0i{Dc6)Hrd0E}9K z-8ZzZx>%U|gqJo&f3O7!{CeJZ@QL$hskp9OIM>$J7NhPijdNV_#`uccejdyjgG3bo z+^mv963g%Ot!r4!#J(Bddet@XPp6_+d!@3T5jlSK<>vw|6qr-N?G%JDdKIY1jw=km z3`SQFVX6rB+PfoJ*v^cW-?7Xwe3(tY1|{5#ahD(cfs#UN$gVbqBjguQ0H zEquthF!_cb&0X`<+M|tiMvbF4ExAC9WbV>DScLp=7~>YuzPy6wGA?IG=m*d)uQm@A z1#;EeBU^~xPCsYD9)6nmvG<>8U$tZvPVi56F!e9>asMOj%?)ggtxX)A{<}R|ilU5N z7XN=`Kll5xhb=VOF7yFeojuzEtA75X6M6z!%?{o5-}{f@GwXStla>*>sA`i6C= zOj$Q|@RR$t>G=3dBR;pJT6*ra|D-x`P+bh{Jl*NR&ff+KSXIkTz+YTxF7<42lZbukleob}#Gng#a2Tl)=iN!6Gl)f@ z^T)6TB*3Sn6?#-IoyCYavaR-ng|{=0uA$8Aqc*ce;}F3`oMh_K2bQE2X+8F4qm>oq zLwf0?ssw3Q(x8+BkU^=UH2jIXvhV5z2N<@e`^JZ84F8*V(y#Kt!RDiLw^Cky2vhfr zuP~&%Kq+yagMZ>D8y2F*O(L?;p<|zw3jM)-+wPI~sViEs=A9!N$tClwyR_{Yw86UN zB?u|jnndWC#p-!q{ad8rQwksVgN$=t-LZGyLi(znO%Bh$S9h^;%*Si8`6x zrgBcOIkc*I(%+M}d#Z72OX#l6#B{}uW#O337I;lE6>CU5(q zE8Nc(*NBaQti1gTlDs5#Kd_#I@6cTQ)lnY)c?4|`-v#W^(SKAM4ewJJM?u@!X5(tI zz@xJ0wY9_AqU-@TO%_(HS-U?24YJoz-=h-TGDhh#A8LZmv{4pVr?RKS<0Q6%wCOR;0HshDeN*@p7%>!AYDpt zBrrdP!C~ew&V5!WWkU{QJ!h-6Qac3zW6Hc3tuTqOPLGA)n}^C^4w~Crv21K%K71!A zL)(f~E&o)zE^d=hY7}*_AdBVZ`ZTbk2$G_y`%K7(t+iQe4Hozl((%1?F@{lh5xQ=8 z!ESxIPe<7|<0Wsvnv0v}JZuj%Q=WTYDQ63PU0DuHd<>ZC>a|5u*nN^TD|%|vsXvEK z86Rqz;C$oWiQ$hKO9DW!=8{2jSlv**Y-!&b3ueGuvSTdIjB8$;{`K?3F?$V?ln3uW z#xj*reHte)Z%j6m`2qOPI56@RYl;5z%i8`Ym%#ZyC;9)5gU5)Pj2$5de9se=7ZRPu@-*G4WGF*ooj)t!J&L3T;|k zZ0tE{ObmDit20aHVsaH3f@3ElXR@63jX42IjGHM5Qd}i&d`uh#_X# zlX|eM54aIh`W&;(b+zt8_$gP}M%$>glTkTc!yl%9(H{m`ZTdba8pY0O=^BaUC>6^u zn~44^n6RzBf-1eIFs*M;@D7QTGK3H@d{BD~+FRofZr2u$K7;cb#r%Q|T72P4|LN|< zM&$R*|N3Km7-tHH7B3pApThS5Emh6d0KlxRM9sjpev5DGnFNPdaMXtf4b}v9CFn}Ut6D>f9@Ak+xY`L27v2hn z=qOzEI)dnw08NfQK_Q&@XTW|G5yGq|R8iy_Bm#tF(Y;v(u)_Fwik~Z+avc{YvHoVk zPA9zKSSngy`p-NXl>e&{MT*#639IV!XxXer+ZM;o&*lYZepkgFJ13_lGpb~50xJj}xp^8F9 z#IejMq6-^d74R@dfs~20pr84^T+rS-l)OCBfnqkCz1Y$oQKEaKo|8*l z;7lfmCRM8o$%JEt6RdYJ>2>(RqlBHN#Db!kx6T|0o+Pyb{wjn|MV~gkghHP?*T3@o< zoElqk_z7E<{i#wTY2`3rrGe|PEacLJSEVAcZ-D272&CYdwxd7HacH_6j-NS8R}YoK zQc=Fb-OTr8$(Oo5SotDleM{n`U){q%Q$TNY>eN4j6@Iw1Xd#>oHe+Cf2S2ZAlD`ew zzIccfGjt$R8Ehk0yS>Uab*o2JZ=wXfsPhQ^Rt9sw5Ns8a`&~VB_Wd>7YupUA-XnYTUkmDGMLp9iv;4%-rX&cbCY``fAk3F6l*e4y~#J zty0D~oh~+U587gzVHaTYzay>>Mop|NL{=QqLPZA%h*rN^ieL}O^`Vr>ymEKUXC2-< zj&kno@5#H6q*FdTRFeX|X@M$8oyyy0=K_+B+^b&vM_T^i6x|JTQ0TZe(Z*Gef$=jI zK_+`YW0U6jf|TKZH{OFUG zC-vGbyEUgAVDwx+cW8W6x)jsH-|SU!4YJV7eJvF}64_*KMnNHK=^BoNmGETtom&M_ z+HrR7pzGfXGJA}RxD}9 z=alfyBGDa{ed2-sO8g2i zxL7+|7#TP@>)E+Do7q{|n&~;Xn7I5eL&p@wDceDM`0h_CZ(xWMAW8xjKw$-s=492w zX2zGqPRHwIIP&k0(Fh9V{(=tT-JFMM)70@s##JTX`#Vvj#+K4z4bTCp6Gl3z;xBNB z)xsC}D0#_};;V%hm|cG%+h+R<(9#{;nop9m84qija?fT^PCKAi{Xn|tWsqb6kBN+@ z`1D|ST6-xrMXydxz8L*`dQ5KF)1%f~p_2Gn-Lk3)d_Ro=d*+$7`suTw5opq<74!F0 zIj!;yZ~Ry07eg|gX%wf}+1Spg<6{o}el9`t@9d_&_X_2Zn8dI3_vmP?nWh0?N>_lX zW_pwF-{(Gx>#IQ&dV=CKQKpr^I{e5%!vR8a0nBNiEca4qvq=}_WGX}MZ$1T*8_r-~ z!r+gttx07CGqd9SX&mM_Q%=~GR3;f0d+W0=?0{9s&DQn=d?gh|2)+Z~tFV#u9_h1}c@{dwc8{U7Xsd zW8J7gH~>Dg%5C!oZ4Gx1?=L244HAP~ax1txN}XC(g4j6S0WpFSeGY9ByB*aww+2K| z#}w5>p^B9t@s2;B8KxY1RNo^xHfq?ELjoqBFwWvB2qTE3O5<2|g{ugP+g72q`*YGx zu%afeGt7Q4SSkX8kn`Lqqm~YO8nAk) z4D7GDXxdB=OP9Kl!!~`wMo`Ro7~~eq)$Wk5SZz;%(HU!_{UL)u=_r-%Fxvawe0V<0 z?4Wc-3QmSG7=WQ<`l;RX`I4KQ!b@!U7@F(X-h_x;VM_2JLpl|RK3W8Xo?WBS13ghb zsuS}D`<8F)8s6<4I{j}Su)MKmO%Hz$Z&_+ZL-(ou_Hm_-OOU~1YAsaPVK#5^YiG^O z@a*T_ZjK!r+lkO`u=RNORZIq#OdxR%^Yi39?or;F-INv=R*<2(Ej_(m54~^H zC0)G&^$|uMPAUS_jC(ZES2Ul7#8^;ewKQzK>sD1h12y2ihZEqHRp8g9Ge^lxd~@SQ z1=k{C5y7QF>O{2}!mqgo2Oa41k1ZJ3Yl2Y~(kZA%8?43*MF`m%Ccu|j%KePB3?elR z;LjEhMsk3>@`NLR<6&>(Crqd34DK$6ewQ7hysoErI{b+lxyPE3pC}Q=Y1Ovu9oF2P zz-(MiKBr(ynCgs@wk~cYjeXF@GYI>l;Q)7&gYDp%kCdGx*&60WPhULLm|9<(Wo(^^ z&|Ax?#u<2@y`JZae}v9~52<&+MyG9p?_GJ|3CPQo|3w!3jYrD2C;DDr8#8;GAi(Yo zuu@rmD22;`_mQI4oT1pEK&wR3qs-If{)*<>)~DaLb^pV~7|4~{+#-P=fj`%vsd-hV zsX;AS1_jbqJJ&#hG_Jd^hop>)OhT^l7@~EE+?r>&vDw$y%1kCER*pMusaZ-iV1r80 z!!4=e*xcB`y{Pdx)#vN+KKS;*huw4e@s@L4l*U&+FE^H#7ctt)3uLn_c2$PK@FDb# z^L>1j+Ecua5St;BFt5OJ1lb}w+D<51=+VRe9(#FBfQsTI`E4xWToAf=o@a&3g(tm) z-WhTy+4*N^I_+FUW;GnQ&FRyn@}1sNcNGX9qXCD5tIg{2*uB7Vnf?udW)QE`bDghX zLinoRk2~Q|l{bpn6k|tEmV&IkC}vg1ZH{U2*z`b2`|Y3rU2Y;uU+sIyR#>l}IEZDe zs!*D;Q`;c6N5y4^4NavHvit8co)`X)Wk?zbwWH@f8;L~YErtF_0?!~W zsHTA8fQF>EBLK{VrEJT6x8p`l3xEl3S2!nHT_7V-||Wc|(~^V|bUX-D?3^ISNNBaMWg zMu;gax`btYShMl?-e+$em#~TB)DBFPtJpPgTIvCerp4>ay!r>Z1j{08MF}NPAC0pq z+-Kl*;mz0H9cHWCWx{i2(DDmwqS(*rvU0l6Mf=Hz4^630%(vG`O7pq%9t%f#PEFi7 z1#EW!LXTT()zzER@@fZLt;F9-j{v$z?Cq%LQ(cw$OirSl^XHObDK(XSlsw~+9^`^h zc9s{4uPc_<=CZKk3%99w-WEXXv0MeZ^o+gV^W9x>xE`<3 zr?4ORb~jDiiRhclzmnoVQs+imOX)sk#4Tt+^Ze=r$sEb1mdo?fibE=v1z{ zJhTX!g2OHXHi^4KMexe)uoVl>B!LrdJrlok_id+pVK*1gZAT3t#nl@XxjQzhUqH8-)8;3psk|{lB08g_g9a?b#Kv!~4$Z9gL|8`gDgS#lw=A zN6`{!?0c?}MfnT0gJc<+r+i-7eL+d*?_5`Ak(r%7y**94o2sX{3~j=iq}@)RTe?_k z#NguEr<$4tGS=(_KWKu3y2y6aJU@1mWOEt%=#$)tQ?2QF4R$3eyps`PSUI&s8!p0( z&XU}zdtpFl>cmO%?q{aDP#diXNieCVHP=%BYqI8ZSm;ef+wr=% zOgDEfKJgTje8;j71KX~F|AMxXv%sO4Mh}VcSv6R71930ychPe(NqvMZ+Y@+shIFfg zh$aQSmXiBm(B%SJxB6Bq(5+F`kd-|L|06i@G%$80=J#y|@2lXL0tDA1f8=w1w61qI z6cbbrjF&i0-E;4u4h!{`;eySRVe%+fSUq$DuO6Kn*ixtL`?fY4*G;@0#{TU36AVJe zPxyEQMBhJPdu9hnr+{_DF~4OEfHf5Qmv?iW-1RHX7zYwoZeK7X(p}xit}7@1I;yq} z7adwqUC{Hu=kZ*OU7~d}g6G6UKq9udt(*P_YuN#C?V*pm&udQW1@}cD{cV9p-sc8S z-x-Sefxr|}U0|2!`VMwNpI~$gU-s1U!l`8Gp+a$XpmTb+-D)WoIc-mkatkE?B2Fb^ z!v(PCd<2@=V7i52TL+UdNK#R7C{5+Gqpp-YK*%cs+-{!$F{{*@w*9#92fOCT`G*1z z{ctsdBS;8hFu5m2P)Q=8vYyGxgJO|>=1Eaqii%fg-1~0iNLO{GkzN`$r)sIt-w;f; z8zPwMAx-GJw8L>4@AQ!xDAuip`sP;t_)e^;u6i0_8;qNo9inY>#w}Z!%*@eE=lWI+ ziuAa~Z*USttHik`)mXYym6mp=;0GJf#ghY3n!Gt#B(EZxt4tg3P1W@*(NGj3JEwJC z$6-z7V|f6;vL$DZU2$X3z7pj#PA=KCwln`BXxoF&!~U^{r>tfFEWspr zfEID3evbH3I+O&X)Z|uZP(?bu*g{5ITu1E2frbk9WBp+Xv;ZCR>S!q2Me!_Xm18D{ zD(p)p(p#=Yw3FxXmIoK~=WlthVX#;SCbfz~z4ehC*jn;r;)iu?--)sY)o*7y(NX$v) z&sY-i$U$CrG-qm@;*NqaMmogE*`hQ!ZoYTr#RsTq$J*XK1b2kTC??X*@Bf%cET1Gs z%l$WIbuSD6K=l9Vr{uANW#x3#`q1SEaw$!wC^ptam*!l#oTFf^BdesPO2Uy=H8xZr zK7NQTLJ4dkc5&S2hGyrVZ2g|ts^r&>6fk0_r%sI;b#!FpsA8sqDOOF0c7&0!@JJ9t z(IKv(%{}iZl`0}Zx@D=ikBy))f(C=)-RIBv0Dzck{<2_QkkdvsX zy}VLQ&80)b#%td9Bh_2Hi8^NWnDQqU?!@S?7x-DT2;0Gi4on87!P#mLZAEhjfOucgrCkQA5g7x*O($7baj%vu9crl%z-64w=aw#=uR}P z=v~bCV8@&nch>ZH=WjCfxG{ZB+^iifm=a@0hHS0b5J0kQN_mL<)^LqTjA|1te<4Io zA+wPp0G_}aboex4V%3b-c=KAD}YoSbfsuZ!a( z!p=0P*x1;ZnAlXKdXi<=KnTDf-DKo6pKR#C`{%*?p4icy7e5zJ_u95>VKtay$ec&d zM1P{Kqby`Cb_6B(AbVs8DFOy{uhZ9U0=&Opmy# zNM^92p#chtQPvD+kim(CItW`b?5>~ zpw;}N(XW#E_d*yptjrlzurQix=G+`B`9%|Oh7uIQ3x9xq5PR zg3ANa)w%ayjsQ4C4PU^#gIE7f!JIuuXR?LDDqCCT^zOrh8_Lwr+#3(mbnqOu@E(5z z6Ly59E*JEE?ui`>rX~b2^a(l(_QkPx6!&t5URvKr_EhoBk@chRH|qy{*{KMM-z)|n zNd;W8A8?)=G_nS!&ZLiCQ7>rATbKvcRDDDVEanJz>WF7)hbzijiaO5uy5A`#mLIqQ zOpF#=TN{d2G@vbRBRV_YN8L+9I+3Tj@=CRjsVj4em-1|OaAMAS_k`kNrLlpN-`k_q z?_CW8AuJVMB)_G|-rgN7`i%qK2>oHu!cX<(g*-_A2JtKSq|f~c0LqjVv>LYOw`~DS zzh4Z&(}5dvHbmi^%gRM^HvevV#y$V8(&WVZ8yS$b+$vc)&e8-~tFkprQgX9;B*0~&njg^- zVVMsPX!g5@gaPaNLtcRoy(U=WJGS=KZBQTfeAZB_=;!%j#kDs3xC@YNDtLPYtI!y zZBkS3;R!4t@Sk;>WY?tED%Z9Yo2*+njr92(U=~C!F1P`haRQpJiURVr18WIMQ34OV zGQ;rE<874%-7!^FO6jjNC9|eyFU(J+-PvzqZ#`c9~XZ@)yGuc(A(Qs^|O!{U{b|HRa9Gg%pHUwMr zVfhQj-pregVMR;DZiOwdE~XZ?)Tfn8;JeB)V9`|O!pqN=md)!Iwib4iyyEZu3>jW! zGJ~<8N8N8d3`CCP&iBwu+64(Y;`^EPNtn@$7lO~aA1Sv$M>Ew_S`5C|>IRlX>{-M4 z78u1XTA(8ze_RxY>(o{_y>w`g?0&vxvgrI#lEC)6{87z>9p5;9{VQPOEZVR;og3Ge}TdkRnqaT%Bt?ckwb$+xhd?_QSL$JR0A zK3p;-cuagpX=;mZ0z>gZ*_p}hW+)&CTW5~$b5+yB|1ihYi`SAf@5IKCqBM@dXk)|4 z=mBt*#e0h35p~`~mHRAt4PXwA6z$v)nFkzPCR{iS%N*C1hsjPB0)8F=aMZlDh&baF zL_&OH%uZ1IN3q91s&=gITEZG*gqHL zoF4JyqIb~^8Y=EZDKx746&VeKs!-fMNZFuX)l!P%c7Ifyz%=a4e?v0Y#2U^rhWa#R zyHGRT83JX_-~c zTv@(gv0Uvq1V1m0>P+Btk5Z1zFxyH+OS(@&8oPy^_-~vakT^65gfhb%m-y}XD{Dh(P4C`2q3QbQh0HMI-%$bwm>vg^HAH$d`;#QMGat0qy%?sBi zCVQe3VfGjbu!GSq-gDnqDTV}dPzTy}y9DLUi%&k|SF5wGoz5)CFsQgGYoxyLnmkB( zL-6HRFeheRU>Lpu`KWAB-My%TPWRt5BJHWcdW6V;rf%7f2O$!>B1=ni6I`5826GDE zI_M^5Y5cR04t)e&`?|KN7vT`pu72p`DZrljTwkO7yn1Ne>brd0vbJJ2dv2asfgI9I z@yL3njI>MVs`=I({yuG==SHo-_IKk?QN!AH%Pmn*;khQ9h($YLHGcxljP(KLRpp0yH9Qd97N>m{nwPLo}|-i1U{Nx{cH8785mx@1gMK^z^aj2zdZ z+w(hfUx$KxnNynY#8oi161I&4F*cM;ccDcq?x4``0$i+t2B;g+S>@0J@${p%%CQ_~ z+>A69X&6wp*PMV^wdSfy6orUNWv;h)eyvuvTZZHxh`P7z>;_qbp{2P zZHAn*Lnw2VobSO9(y}@2aMh6-lE7(S2z-DR?Jk&^wmXq$T79Cb(?M6HOMN3=P_o?v$5pxxxyAAtNP!15L_7-YK+fJZ6yR2N|55=W$28uGYH`v9dz!m;M9VPPYL;)*+OQb3F^+Lrf3qA zyH=bv%s~Fs(MCwwYk$rl2<)~GH1}nhClYJi``cP36rAvTigk2lz2us*7mcpvnw9J| zA)8iCuh@nM^TXd2rb|ad&?+6!DNtFlSXdo7GUFSWL@M{Z*=ImVSJZNh-q++6lbbIWD(5=v>J3WJ-pW;o@+&U5?GPS)XFQZf&^n-hT_XXQtr+TXOe8*o8 z@g0;uPiub1bCQ+SS9GFLF*+>rtXRI!#s>1UGn{X&b}eCeeje`2n0)PSWg@8uIOQ2r zn~84N5>C%3sX+sGpb`eE_;84*zfkT`E$$+_htoU|IGA7?31PzSrnKB|ismF2*AZGs zYyK)eElyTuJ0*`lvPLGi;=}3&snT67AHs15uJ~G~J!g22CoNdf<{mf;oC-^D?KF#> zFm^PR0W!5ldZHCzG%=1!RmMF!`){h@4h?Ra%UkloUWp$}&JAM6jCutkR=bJmTvevp}N%<^!1!BX@q@gpFo1Mg7_T0h>;txG*gbP^A| zWnpuxw=cc+nUjgUy6g)S7HUpA<*Dcqo;-gsk&CL551Vu-{wNu<^RbzPJpO4KeMCQ~ zvfZu2O%b|Rt`O^kM7w0cn<&3a32DX9BS@L(aDUjn)FyrOsq4 zpK+hYcIfbXdUSDwW!qY9`L+Z~;5=>-{_{Bs;_@VKNG5K5y2VI>ZG-p`Z*j=KAN0~> z*3Ej;oOCuhFhfFYaAY26QUtAyLRxYR%wrI}J~^dTvSZs=!}dlHGYyfAqJ8#3z?WX8 z%lwACghd6*;I4cumX|2{0I5`SxIsEbNbkvoQzw>0>W}Qo&;2DLzj4&M?8GBF*PhWs zijYpGAt&>ejW#5+WB#>sHgAyY$x^W(VwXtXedux>K?as9=naoDyTl-ukf$R}IUns_ zYOjE*FgQ1Mo`wlh&*O4`0y3^XeWsN%Ng(P7{8mlU`|DFYn7e>ylw;kcUh~mEhghrS z|7-W3m@`)9pTpLFlzi$x+?nM64DUFa7}+@*>)9CC8km_l{uk(+q9kMYzcwqXov#A~ z`1Sh(;3Vopt(Mj#;kY6qXUH;(7m`iTZ0qr9Bp8?Jnfo-pE3U;^Fe>X|25l}DhOZ(hX%JPo&%(T5xFK#FE3cQmdq?Hj zwXyj9a~ik^af9*6_j(oJ7q{ji!nv&8@c2AO4%H`hF_ z!gBD2?o)FN6;xwJStMrOV;6+X=Em6jGF?Z?K0ahffVp$wh0&>twIre0qHc(~JT^9U zVu43&-WBa*OAPqZ%LJAtvA~@@Fv=Ai|F=*2kIC(|5!ld#0#K`nO;XInH=;@09ZbLq zi%d{#hMkU!t|;zp-jYFAWRFvNPNwnD!NeJn=v!F7=n4B*nbMEk2 zwrp$i&4<+o+}r#CV}t@nL75Z{JW9ZR(-~7hRXHIQ3=rkW-(BpI@F%az+Bg zT4+yKx9M7;dwhicU9s$P5P2)L3P{&xX31pO^#M7juS>fINt58IVcdaJ7m?jp&Mw?2F1c zjOFM2u@VdQEoI8V^nIonVPSzzi89MUa+xx;I;C1dt)=9Ho3YfRXR2?P7_l@l{Y~@6 zmf-SiORb&XLws;$NQ0>I$l@_0DNy8O z&_3O)){nNX88l%%oBActe;N@Fr=QRSEc+F7I*p7STMVjLv%V%>O08x(&KeR-g|WiY zT%v^KH9qQr2(w`~25|K518Srl)ZZa3@WriEiQd>Ii3Qk$-cXeG54Q^&>%+y-5@cH| z>iPcu?&)ckr!sA|f&?blLkX4X)6N+w!<&`;<>>Y11HQv_=F{Li*nrm@IcnB6yD!e# zZWK&7k`PhIz8}^|3?Bh_FG5C2C^xJv+iEG_Kka~(2Vd|@5#$=U-|wdym!?6LuLc`c z`V0DCr(D=5ej1_45)6>gz*p%HCB;I!CT}p3-*q{fs}c_@{5lZNIo{7wO<>00sFEN( zSqR?|aY3!C0J-#sQ-MR#bbjW1rwc-xJvTeYH?uoR;$ck)HR$!tk~4yCH0{Cp1A%On zn0<3AsHpkc$#6!aBKK=e={Jr%KZAQ`D{4uafOC)j)6dS1ZlZlvw-PGXiF|D#KuY@$ zl<#zQV}9VaR}mAHBDM$RN$a$gpx9~>IA5$wX>_cSHtYIt>NVU(??@3H%GXaoP;L}5 zI%67XESRS8MZC|GbYA^g!0#E@^*!FT;>qB{`8zhy%;O2V2^Cq`35h?#%()hzDd5fv z6`A7qDo(QjWv$%!lijEG0W5JdeNd>nw%rqPdqNtGS{Bl&H3Re|Ps71Vq30UiU5w~# z0EKIc90WNU);a`)wTV-Z)Tq=l>{x$y;m~&vZ`(fwJU*OI7)iF1Xz46tB^x1$Uu6s( zRb(7I_(mW3lROT2i_&zZ%QAA=hpYt~xDY#+WZnKbQfyJFr;j=p&CsUmLP{WB6NDg0 zi72`PM%c3FpsA$EFB9l~#Y4@|Z@`aL46d?xZFPd7mUDLrHwGAj(6Ja1c`0yM)HCKg zQ@V8t8rcl@o?WXOV9C1bcBL>dr}a#dmx1%}I=JUi$1eZzrt1B7$#^C2*rvWYlfI2N zU?bpot->OsTs0SP(@SQECwDPpe~_$5KQ+|AS!P+`p0zH6&pXh@`Aco8A#YeW=jbnh z4$3-?yvWXx5&%?#Akqz!A=G%u06-Z3Fvd0Vf!0jt4Y=Vy6f5|K)OJA~wqkpo&rQdY zKx?g{3w1GLR_`<~kPU?HH3Sg!9>Au|dsO}-T3lKMR$BumWPX&{wAvVovbh zicL${#*(XL~2Om>q#*0_07&Xw1M1q*34 zjwXhF4Jzbi1!IS!5j@eHNmOEzxF!OF;^sq?T7dznhZ|%NgugAk+{{E#nWRhs5dbfI}%1fbm(%$dO=aiY| z`j>~hH*(MQ{i-w(LgFfZeaetjm<&-^*!WA&gFsqOZfa*0XgfL#XV9%U674!c_u*DX zc)}S2XP?8xR9ZI(^5q*I@H9>=`7QUF;@IJpCaNJXKPJz4BiQ|^N^Rn`@}SsTPYe74Ozjz!A7Y%w>obZt^`OHkAl^3dSx~J>~*Y*J9M4Yr%za zbXTnDoUkyenju{wJI^;LbhkZDYBdFur}QwC=D2uQ>*ROyL8K!F85G&SqVzs zy3b*W`;HJ@8}r{)571x)-fn(k97!>ID}!j-v{+r}OewhJ$lR9u>uby9Z-tn5TlD9@ z)+2g32(5GIyb;ieW&S;rp`|YGMj_Qe4C}?fC@LUGbHvVPdIZKiXF5=CYwDc7 zbMeZAAQGb6S|T;Jsr1Z?#GMJRBaQwkhDx|~S`eeKf-@1T1;s5z zl3eU}#oNE0f`TFx7bzpV0tOz5)HY-VP3&=`@GY(ZiAxabd6 z0&2cci_q!#U`>0GrT;RZ1^yegyk_gw9nEYLWHY2wx((^xqnVbGm4Wpbf^fRPd z!SgjBXI$b*AL`4AL`%U2?7Ab=3Bi_~_iT$FRt(m~#8a`_)aFX$wY6Cmsz95)z0)C4 z>!*vGp!VQN!v6#TnOzLy(@tl|%1(ujg?_x$;u^g>VdN`xz=oAoGOmS%6in#1&v?`n z{j$fEl+$9}?voFPo73zQ;=J@!fFnr$aC@zi$@F%Vg|HVQA)~_{RtDSn|FHLt?U_JL zyJl?rj&0kvZQHhO+qP||V|CDRI=1a(`rU&)&wK3sWj@beSjSqms;;W*Ea?Zo=QU`k zY&cP+N@K$ZBR3mSRyEE3!#-K)}#@#TxJ zYjE4kLj@U?jW7IGbGgi6AiGy_fIUOFZ3ZQW#xbRQ&O{^(h3ibTfWaj~3J|m6OXD&Vg2iA7h9WC3GFrEJ2n2pjEkE-X~MO<&#Ty>zE4dp<> zHrZ`PKjq=KvG@FR?qUjZjgG40`2{39-JjR=4werY`)`RuG(Z1s!8pS3@!a#jii!E3 zZ4UiED<>-#R|jXW|EiSw5{LfF%p`jA9nA;vKW3K6tP`Wr7RrhGw12S=!>q-7NKNfz zpe=#5oxGoN@UZlByRD!`=3Cof)%4mKO_m8Ed5h-w4qTum_dAi7RtI}i*?HKa-OldW ze>eS=2bGJ0N|}ujZ|)B;h70F#Sq=_dB|%)e9waQ&j&tF2DzfShr}kN%G3+Kw!%uyOOmLfVuse{zD)TJ5Pc(dPL0(@NV3Ly-#7g#tT*WHa zWda0Xt!)%U?0x-`Nu0Bi;Qw0r@ObWd&-RSI*lOwyQMJ+kfjcIDm{I`oKt4e{`1*xfrIY*UNGTc)FJ*hG2+ zSEQ6pJbXB@Ewlx9vbyuz;?|27z;II+y5Px$M~}hSoi_qx9`few+m{V@%&0SK(GgP= zAyuJP8B>T4vGMnW!|m;r%i@2!sK(ttAHGZ!pyDQKbC>vvgqW{+t0NR)$R81brQO@dQp$YM(frKY8*LyP_8-z4UrOOr9>L zskKad0@9#%kU+MfThP9XJhr6s;OpF>tROoU!GIlsl~f6e-ydKqRF=S+!SP{vB!s8e zZ+!^W&8%8zEtYRY4NygfzNAzd5e7T8A^Enf9S6FDS~%AzlKwpn(B z@l7|a+N@e@SnK9N8voNr500-j=_1w+t`>u zO2g}h4cmy<`i~{rX3e?+BJ6?JCR?@;77^mfAS6%pkiWTM%kxq2{mwj zJT4)>Gei<}3y^nfdeybNV6uO@KjWSxr9Usw3FY|qSt2jD;?C zLHWK*OsTSEVSjmhj=|-*vHZAem~_58bh8rqe|q*4;l zvR!@d&5B#C=+Q9DbNz<+&j4zTxHGNr@0a2J=iS8kj{s`>@24@d|35{!K2^DdO+uu; z4-Ll|Y4vzM)o4|Ursf6mbyoT6=TIm^2092#NA>Fl2CRUUN&JftI&=J)86QMQT82_) zAdO?WXhO-fnIYVMA|XyS@9&<{YU>Ojj?)JE_sftJx@hAnsF@_eb;J28gd4xsZB;-m zE9?~O{brBkTErIZM)9k<4DRKSTaDTM4EJ^$=|1JSUcgG-EITno` zzRq)7Xf;f`2BKn(C(1A0HJA1*>}$oWYy9g;cH$+BfEg@Qf7*5-+4v~nejMXkS3;p& z@j9~NQhZWYB|qE*;lcxx#muNnB$V>YwZpT%Wi)Wmh|2^0YJ0JYS~&u<8os=^VrG@Lx$!*A8A0Ta$_l2w|vMhzd) zS`Lc?Rh?>xIqN{5^Wtr5OO9#%0xVZHXzm`l;5_7y*ownv=yKhDkvOwGyRG|!Fr{UC z)L$H*mGk8j0k(Gx|9J&t{o_MzVS#`ii2r-C^FOcPKY7s30jCT3yWT+P)KJr!!-mJB z4?|i-u<1yYbD*+)ixoZ=yiG>R);)vUd6oYt@#n9_gkRc9*I|7`@w=gxF95+J--}b^ z`NPjWYdj0CYjSP<4_y!XyHI#=o)l^|WKoB06>E-NOPe&`v*Trf@5?0-HPMP0;m%FUV%U_6=PlG)d+Vx#M!jg?kO2_Lq7 z;ao_3m~k#Wn_^$-4AK3{mQK?iOl8_0ysmU>!gaKqB|Ww04oBvfa}ig2a6pDOl=(8V z&!Kj&p-x&$VufssWVY~d;CJj*M>Lz zv&Qq&f)*~XT)&Us?dQfcZr1)0z-C{qzpuf?L5WV$5r-#}C&|;1;_9sO&2>)g#dPKE z<#!C{UjIBAa|cN1a2Cupkau)ke!hptt=syt6y*Qai*;DLyf%0D)t{F=S7GJ3W+U<1V7WR|3N_{dd5mO8W}Qmn^n#5 zeU?!arT);M@pHRi2f@!`YLwIW=#T~e&Rx2RR82^D`sg%zbfqid61V^$cEty*g=b{TOa z_qz0Mxe6DqKFuD;kqchpNLeB%*J$7aGk}6;2qa9<)7iRPGkNeu1x@v++C~RAnxDJ} zRR)EiEB)fjAns|D!?)6m2Z1!DR&8QfQY4EzLVjH;uENbRq8W3?FbVF+c3>yY&cMT$ zd4)vg?LPJvrXre$!pV{qr6Qhb!?40o)Ja4#0YEkF2$h472&ajcXD0l}^}VidUmAOJ z=PLxSzdlAx@^<#+%-RQ@pFOED_GK&Be>3&pYC3X#TR3`saAOU0_w{$?DC~BaZFZAf ztO$=M8tT^GF%%i)x~^YIqIy3oW628yXGt_{u$*{j(zhW73^W#4=IyTJ;RxxD# ztLP#a8376o0_8k~B$D)@IKejLOk@ILy zxIr=;EPi_%&BPg*1QP~@3|kxqBO>9oG{>DnwkqEm`auwC8}o!xNf4vw=m2Qy?&0CR zb*CNe6hgp)rZilw2jwC9ACR(|yh$%pX-QF9Hx*ly83hiBp2`TLf4L*^pkCO(3sb%s zM>`@594m|TV$c-JaIA^*A;d{VZyy+Hf9iw+oGKTe0~ON^wv27|0r>FYis*0vqbp8w zZC#TLTneREAyZStZ=h=8CGGcOE>kU>by7+w*;-~)utVcb;ewHby&{bt7#Ow=o;X1z zG*aHMGS`wCg3rtYAovx+FwIkc6O_s*jiS?|Tj6-F4{nli6J_zAE995I`5yAnx2JwiKf!vZ z`D^t<`j~$2se2+uIS{FDfhvEP7~T7ZSAn zZKWC)Qq0@)AZ>CGe#+7~pLV42Yar{z9T9q>kidOi09LJVYsC8DO6#w-y+b~91wkDg zIrP!y42C+CI{ij)oW#2VKO+bZ(}3;0eLgQAQQ6MY39CCpu59jS^H&>t$LQZe#+0wv zlvw`!l_Z&P(5Q)eU{3)Q7>E@$s#Y+A-a*bD>=-Ix=WE-{j{bziX>C+O=73sf7vfaUeG`jf`pu? zHn0fbXk1q_%YWH-9&EX#+s&MwT7u)8z^39(E{y4q;*xF7K1*VQnh@mWe!el-846J( ztQYTJTt&5nORta<|r8qz1#AlblaEnlBS61DwBKM~M^$2*ld z7;zLmV4F&jb?8=$P=X-+zWBLooWm%LuHJ4sH`U40PaBVsr zs(+6J-I~MVi!BJ z=gW&PCV*irffV?H#2*nSoambK=>5K!JTIT+@pdiDS!NMlT3t}^NMKucI3mN^dm*g- zB7Dc2U2i=4nM)ji9a=h*WlS4GcMHio=%^8Vv=c%(#Hpo{6%-$VQdO)%28H@47TY|k znuSaRSX0f=v&m412|ciw zvn6Y$8rs!KeFt}`1R<@H@@)Dk+$U8Np+Op~Ik8UddBV8DwB}i`QnJL|RD$@}wW`&_ zgVz!BmfsH;Hbj8eD~+eNY*gA#deaZi?_G;zJ*))p-V_Oh#J#n&b z+apGnA?=V;nNJ84e+l9j0Hx z7}fu6e>YWeT5LY!xTNFjhke1=*Vl`V&HEd1AtA2>BhnA|?ssUlf)q^3j8rBa6dZAB z6R$mV^$3=mT&mo7l`m(%X6pN1PDGk4;|Ucd|3sgxEM&FgkKFV)*0KO&E*HBr)sF}q z3lFd_w+dIdyKK8un8>y_ZyKD>1N?hIk@g@ofVdawfPfOv&&MgZS`Talu!{=fRufeG zsNs+}PTGbB1ET^pTK8Ai8$e2Y#@x$nP~DJBm&GW$!GTsQHrE!>clqU)#nQ@ey%epF z?_Q2c1L?!Vb5tDHM=b!A{FB0nf5m9vMs3tvG&s%ejz`H3A1JFR6N;JB55a$?=c``B zd-VS%I7I(W|1amDt%Jq?7~1f<`oD;M7aDL|BDHnJ+Ur-NLebpoo+g7%x&eex zv4VwUQ{?Hy@X5~VnvVk>l2YFjNVgU;2g9>8-wEuXyvMQ zTEcrIOHo_OYQp5`u`(^`#DR7cvHZFGN0Z7-LVT41{U2^*9A(BeZ9y&aCw#F|Lq^$> zCh`ujZCV+?`4R-X`A&0^2pyAUpiW&aY)gsLt-EOK()L=?Y)BJmlV>Ta1$eM6<%*Ut z%syGwBGjy?G+41j4(~9C(vIz_U^&ADS;c6rGc%}4kGRrWg2G^w+01rmIvC*~EP|r% zDXIv-Z%wtW5n+;6qvMKcHq#UQzSy%yl5Jxi1(k)-j#!v7b0``7qRk*@sPH&G)vK&w zW@pSF=`*e5png%{kw(h7pk}ZA)d_soydf=&*3>JRz=gba#i&h2bypC8{@Gl|jv)jH9eG8ylFXSnIE&r9K3!F5s z>GoD(qxbL@P;qrs#IjLac)zZm4$mQ6yfglp$Da*#P7%#ZF3!;2Jy1`FIyti7>yJnN zf{i^)zS?`^6`!>xb)!6yb$KXnR!x#oXDf?ft+oLnp8l867RI)&{K#AMUqTxx=dT?r ziVRN2b{p9ZSRVvhP$D@Xh3Gv>z+2q{f`x-S5NJ{;@F%Vj7IFbsHj`K&|1?tXe3>e+ zuC$%)sU0G>h0-!t++Po3NY;YqUiF$IFUn3;h)&%)N6c&qJ!=LPW=IwL%jo5CzMz=5 zBr;74dR*upmi&ly#|6Vs8td@alxlZe*sV!U1h#JL4T?P}Ag;Ile0t)51+Patd&$K| z2#*L8X;5ZrPKP<)orm63^TMjAu8?@Hc(?>D49v8h;Ywj|PPWm=&SaL;9 z6+|96{eF&mtCy$SRQi}SkI(9_dOyON zJ_8Jj)wplMlu|{{%>v%{q4@d#lF|ZNBI?gK@oH0WFh9V2-fynl0!*TJ1**_+83gWT zb^}A7{Tcf&ccy`}p0}!t%iWtbX`B@?$xF?)e1#H*nbh zo(|~f?BMEP;$Zv#(*gf~I^h3DI$#p-hmQC^AQ1h(_7K5;{9XUg<+8Fjaj^S`0{X9x zTAtdK{RSi2S3yPOdrcY=q9U{vpcFi=A=%cNO)kBOVV7<#>q^k;gUbnu$iVDKg61RF z$M4z#w>=?jnV#z`Z#Vt`2`1-rSpjSH+7iET&$wOpO^8uzW?}6`{2NB@%PWuPL0@UA zz}w)<>b9xj$e0}90N4vEEog5x_C(GC=-yEAy~8v)J^vZFWNcEEb*UJTf{KQ zWJzeFrz+e^=&lK<6B@6hI7xQB3$ouo@PZ;vy26V=vl{OOeCO zfw-4WX^IRvCk{l9#h`$dGQ&7d#yptD&q>x@vs-{5pbHXW9QNYeDAijK`F7(VH7l4n zUhWbs1#cOO954KqW8O!vZ{Wh{Az)(=*~ zZAeQEk&`-RmWX;Lz_E*SrO=lV4VkGBVu^(wMXGQ^n5S0uRSd#5#*eE)1Bqk_LHV7h z1)|JoVi59zo&HmO>n}|tJEDBrs4~ig3J)5OsAkOHkZq?bWc0^TU+A|&~alM{d_hZC~nr?lp*qpl| z#4Z+tH0|uG zm5xJ{vPJagb;MS)96c-A8loSI=sK({KRXwuH{}xJ1VDr35=9FXjmP?Gt9O;#V^$+J zn}&$6Qz|7X=hm+!iG?X1X8F-3bo~~0Dl0l2p_L=&1QS;sn{@|Abt0S9WqNa{X+C$W z@m8^Wj8Dk_Odn?w+tu*?K_PAV|2zHlfAIvpX8+l{H#`5!-aVF;TLxPqIagAJYEjMU zykA8SO=^&Q=gEl%E!C-57+-pMy7l>T4MGTYTO?`kO((EM+}QKa+x-F3+0!F4mu@uW z%OEw`^FRbxd<}m~?KO!a*#7M)My@3LUgL`46tUH+lV_3OaPrM*o_met8DBg~ZN8)> z{k(ZqQF=os!u(jm>gU@Yi`kL}dX;B!hzcumJUf~7qh1KJwGc~YiVr@Wm@J07-55yz zizH<&XK9o2n|e&%B9#_Zx>$-WBzTe8$X8na-86r;+zbx6+39jfb@zm{Je72TwC~CI zJ-W7-?2b>~7`u@)Zrw9Wd02V>ZdVEo7G{tjr?$~)5|sK)%8FMZk;K<4z+XzFjfQP0V@kYZ9d@y2I@%_^GYKOc$~g7iid7Z z`ckLBQCe_?imccxOD~!G z1R+5HRPphA_-OhFUg7A=%zXnNarkO1*j>7V7b_o6uh0X!IDq@e5dL3FHD^WX!nDck z>8zKty}(f5Ezg@6l|WuWZ1R9oj0Da9!iTZFQ1t*~@a>17ICSM2fx$$yK$Pk>HbCVd z;g=zF#Q`dnvC}ZMe@|j=5>ae9a2AhF^>yYuJaAv2twhiJHXZPSfAMqTIFjf>o zAao&39fTo7V(Uvxi$R^cO%^#xpjftT1yTTl!m)6kO*o*8kc zhqWhU^bKVg9~5$28ssASsDGSF_Qm%<`q-~}2aWp*aNp!V&4eRToQz|dgCccIJV}5) zxN*MD`r`TF0~$n9?-3-o__{eSKpRFDql(vpZ{q41$iiNX6><`yuu{c7VC#_Ao!Uor*C1hYbq%BlMFC zyUj{)p@s%Ada!=vztCpJKv?mYMK1MQv3CVBK``j8_udSwsM*UBt8zk>vODSLq+6ka z?FT_P%Jp}}EiY2^BwnuMgdMUk0q_f^$-kFvfe-ewZa8B9X5Y|*bP^q@lR(a&zvl~- zs6P-`(`cJ?<15$)F}1{jvv(c!LSXt(-^syeJm9jnnzabev` zoq^pB!=zX?lnMtbJrt<63^bV#f#4`SIBGQt+H2vM2o@eIvpvE{+B_vPrJz#U+$H+e z&H|A&olF9ET@%q{WJmo=eTfQs#dUwBu?Nm&R53ddlnqIBE!YVyES(PPw<{pTIhd~y zD>vFQ%_y6UQRnzL0i4zfOMEv&*mH?AB4YG(Y#-A10E8X14^swkA26YYp??&M{%8+- z%$T=$PM!JbS%p*mTC3_hIVnImsrQ>x&#K=L?OLQ6V@pOC+sDK!E?!nCF8=&=QJn6i z##E7xT)>*9(pjp@;T*hFqS9ojxsD5G7rZuTKI4Ju*HhBaLpiM`Nv(-U)~#ITMC}&_ zEoEvO+@ZcP2{~LvGdu~`hPJtLL@P2&_|Hao7Va$6ysbo`1cR8M-zBC=)NZH>cA#8@ zEiQDR-cJg4Y@(#-bDc6S>GlcS_wUYAC^LilLa&ZmAk{O?G#zGMSEtCCt^~t_8gS(G z$=qRX@Giw_-W&pQgf}(vzWP{gqO%61bYXWU2Zk<_5)v)4y30S$gI?r+CV=X>SJQc! zDN|uj1Cy$>P`m=SCO~v9g;J649Ki!br$a{*JxxrFiw;ZcmkF_Y&3rWr!?Iwiu>I($ zIt_5be=|~cZ(Nu)lO-_-mm1!5pPdRJ$PI_P&}+pBCt6Y2zC^f>DAx$vTEsM=iGE3L zE(_ZuNgt)M!&Kb9F@&mS$8rWW)enA2Tdg> zm%*S)v#J7}8;*jl#GylNeT{&%$yt%{)+#`Y)M>HtMs~y|PWNI{K}~UXwNwu-$m)1r zlow~R@3Gh{whlee{;4(2^so6Vg#biIgc55gr1Ga_N)0$|;-XaF<1{(2X_GnzBB2qe z(g!~g8J}Y!iAa^4!ElQ`&Fd?>m-L5u zbAn`%iu6v)Xe4~2VaQ*b787mB59d=0KI=ry^u9U}{jH>|7J~>m5u~S9zPzhyT?k{d-S`VHBcGk0 ziPVmUd!(J#p!i`Ch6KrGK#^Mc@sV@E&wpkuv~AZ$K!D8C{j%V%vK#0?8U&A)NaE$Z z4AtJLnV;E~o;!2{ni`xWO0n;WS&|QSBrtZMVTszvHYCv%YbxfF9^BeH8hl@v#Flzl zD)2zKAxm_aVz!7~P_4E~f=xnGJ4M2d*O7&dtIRKFQI22$fN|Edp?&C77~RooH7-s8 z#ZicJhi{_Ak7IpKKRdjWd3-^l4@FYGVY`4x%^Zai23r940w_D9Z2+5_SHT4~w(C-{ zkG*OSEEC5?SDP&|Oe$oGS z1Iqk4g&`WBGV#HTnh>U`pX`pA_m_^$;NKF|IMGowvZ9nIt#FV(p@7i6XrLp{uE7@b zwwloWm>fl7mU_aYxDp&``_U(iA$a4N!3^Sb`DJKQop0I8^Q}Qu9<4qXqXlUsuf9_5 zlYzs(oM@{CqGi|UQi7)Y5YcEkjr55h)3B_KqAv_!m0|UlDXRc&m2CD@{K$aW0t2Q& z3Cz&nx(zhcyP(v;)vz}4YIvI?ng?d#yfG#~HE6?>c^#jTC8VJJTDhVzSuzrNb`h8G z>|)PLk0-&oj)Ct`-R0h4--?wy^Bv%D6nsSVe6*#P;a=pU&FRfK5$k3dA;C;Y{@epL zt?o8B;ew-7^V#vdjH{6Ov-i7=MA*lFvOia3TjFut@~l+sOC5BlL(>OOA1dutd%@bu zfKuBwZBO@hIebnxEI|wG`aWW!b)m#(VnNJB6gphaV=|2dTe=fA`^xtJp7U+jcLKX7 zU#(mmBf0O!%*8(9oYVN=#O z|EU`2)PG%tna(t&QmM_kP2bg-guDUy)`ea7N&-Y&u8f(tgWO=H!%_8bp+G1v76fLA8 z`K5;C71WP}SVPOOPdS}6;WLKY8xoZNX572SU(SpME9wXhZPmF!uY|GZNp!6UlDr22 zMj?(Ti0i&r)?Td*qzJlAgwmJ!vnfFwl$zp3RI@@cS-6lpXEU_oE-%C072}xI$!HZg z!zW2c$VrLnxC7E_#*2hmCre-^B&pP?c9v#diXu|3DkhJ=4n`-sOmrM64F9e^mEi)z zRY#o|O)V3&CB(nt)`DA3EL6L3rY*Ed#6Yuq3rs@{@g2sxv7_HUnZpsO4H}!E97y{< z>V$80|Cx3v5CGf&=ik>lauF7uQ!!J-QEueEsfeeh z^8!cZfyrD8R!Jn+qEo~OTT&0w?=wY+?ZLtCH;H1JmEWC=xGIAlCLUB=!s%w4e(qqY zj}ApRpDe1Mo80!ay;9HXow_ZjiD}842lp3JY?s_i1mjE#2a*L>^&61Jh)EEC{&+H`*kr@-a?OA}Qyk^t0r>r-wC_ z1oCNa2Pb6iiC0JQa69+o(3@ul9RogTfq%2?_t+hUs%4rA}avq>5IE`Gmqmc~_1_ zXe$7j3k08fF_GZWdVpRgXOd0^ZRoT>tZeiJW1_2>De+mm0 zobqS{|N2r0|HOs=jkfoHW4`}~wx_Xazt4^4cS2L_l`UOKz=*O~ny9P_ld5?3Xf;g= z!OO-(;XdWq|NWYNY)rv8B2%@x@e(r|h^s)0Ddda{nGO%plZzMf;Umd{Pl}O4yHAPw zW>(o>hcyW{_w=#~;v%G3{-bpedQ_z**c^%{)!uNCx!y8lm{{s*A@>(k*#PLk3v2wg zJ+rT03|}*Pgc?^s0>?9YSgJMLtM&F4|74Ffe3h*?^v;EVeztb!1L}fOpU!MIFL!ng zv@i5$P=+_`mPmhYhn@c{x!KELc5JVAW=uWko;LVSh1EZwt{j7sS^gOy4=NT}kbVIl zD>A7RO&ewxuqO&eEp&UUuU%JQ&Gq8hf63Sa&4E0F(IQ9jO1bb#mCdS`$xxwk#yw*k ziLp1rn>~2od{rh}9MNIbf-dS*%&CYxC=Tr-m=gHr{Px*yJxI0CU1kicok~2X_Cg2S zlu_67!-NHPUoxc9R7Pqg-ce#Xj~87(7jGI$;CNoh&b&{oeXXe|gz_s4RA+~3kC?OQ z5EK@H3Z*ipGGyjAau5EBnwJ`D@!_Gsr3Z%6)SO^e?vg(|2(*USc1*yP`{@h1*=Otn zO|G#2d=#O^zPOcis+ee3-Mti(06+?Kf@vJYoi+mQ2g7jjE6=36r=~4*FTaclZZl>u zo6%PGmZBKk-Ar=Ki9S$90i5tntWa0;PW+hr(8afya!5RAoOV+A^COy~alBYXC_)dl zhEx-Ia|U6vdQr|eYzjtaawG0m&9`RHLW?|oV&8Yw;yfi@x{Z0&S2J%KAzUGNzC_!P zM)Sm26}CQ@a_)Iwah%n46-ToErk)zkB?J7}6RTAA85@Dw{W+o}18&*w?^}y4TJW*P z%HxrBoZe)HJ+fViA(&!(U>+ zTpyCwaV05rEN;z~9N`=q*tX+fjYj}|!SOSHt?&NCKQ&mRm$6kJQ&#L}I86DsTT%ZN zZOxI!NYS|&Fyybj31_(Apskb+(sqV@c5{x$GSF}Bnca5{0wkhS0|lctklz?8*E_h_ zG(`0GIquSbp8R#UCwA%cjr z+l7kK?a$0s_EzHfrENy}OMWVjwZQ~lIvU42Vd`9aaMKu(Q=}PLd{Z1go^sT2YF+>D zLvL7{_drZZuTq-%%uKn%xiHRc~T`={#jqZOQOesi4p zn(Cg?F#%7WRxr89G|jgbWp8Ka)$oC$Lj>R!dGX@}meMq`r*zaqn?w6G89O%aRmnt! zOHPLXIgUKLX$`+cjF&L$(x*iV-7>C$PBTg)E?SCpjWy=)|0Y^C90*hi|MQc)O8jp{ z%l{c$ypC}7TsGP7^#(u=Wak%C)lDhnSfuj+qe*shc$;a5(`LK?1TkS0oaklbQarte zpD%YzAn4l>_v?+GhzAjp;I&bI6Axf_2Zsc|i4xjJzNDs?3>Nl-Grd!nXMQw)=kX;x z9dfFkzh)Pt?rYnH0k8MF{s#<;jK_Eo0Th$&8R7q?Q{y2*FfTWzN!TA!Xn9@}D>k3z zg6>8#E={n+iaLx4K0Uys8z0;#*T|voBU|jUx59uiXH2G-H*Y^kzl3cpZ04MYw3=N7@XW6rJ`kO@2=#>#Qg9@foUC5Ko+;7T0;Wx>+9{MuNc%j77!((_4XlF7p zA?H(48G-!voFhWKa2=%-#+Kp)qi^>v@5A*RiFUUE30~)7m)r)TMGCu^g0^snKEZZQ zs{OcQ|7ro(!vy_~5ZkCErE*N_(zYkrfwa6Jk@zbivPo zk9aY>g^D8WlEtQ+hDNfUtCJX>I-uga&-kok7^EL&_*|i&<}C>+Z*PBQbK~d*ni5LR zTwS?-9wE#_!UlEx$I0p)TjGQT;cLtEbFTv3h3|}b`M4l67kVZ~O~0YFIP+#0##;b9 zec8?bqLSQ6N=i=5LaN@5Y(s}YJurG)nEFm+@ z+D6Yop-Vx8hhXuzFXO(0szqwx5YZ}-D$FxHYzD17bHsK9QKoYGGP<0nv>A^r%7hR3>onk|XqlLJ6r9-ZMQiK>mx-IHY7`WN2tFW{8NWJ1dUc$HylR0g6{j z2-vBAVW7mc4P@YycFA-^aI>p`>B(AL;a;wNv-qWw<3VB7M-~f0b$)`x&EEFtt8yPtA6DM%mO=S-#KPst zIaV#}mnvRvyv3?UMoSYLzkpH#fp~z6z|7p%LeKi2(OF+z0K}K9yttsILKF6we=`#r zRIB{5Vg;zS$2t@BKuatN!wBU20&vKagl(pJ$s z9ZpklPz>J=V`+37laRkRgj@pmp+N0KrXCAkA11E~4wBM{CQ!e|Z3Pi!Jd3wMUm-i! zeZf8z=qZ%Tf&Px4eep0%b$R>1G+3M*yo|!diOhni&&>_aZconO85C3Q^k9W0ETp^_ zVU7PK-XEf}4f8(!xa-Mb;Q}_1DaeOq4;-rD)d?DC2xmSQw(ZvboFcj}_04e}I*1*{ z57$}qL}kkY%3&R8}g6s3{ zq!>lJC5wc4y6)B+Dk&S06cu^2sh9~5w@o5No^Z}TMyeYbECX2T@VVcn>ya}NU|bLt z^Y0Z|0{^J%r=@$skUD+XMIJN;^Jyo7FvRRUqp47dY&hFf5&A(SZqDZ!-?MC~7$N!( zG1_h1KMD#rhky!(`m{)9#A44BvFE?%w6B=<+OPHqdwx++q3ZaEBWK4?oe z9~W9#RE>CV(5^57-=zFu8k)8=(obL``xC;{!7QlB~SZJZRyU{@NK9z2^Iz1}1UWkgP(bU<_9lo3v)YHgFo zpshwr(Q{0yuCryi7koQdIxh^!kpt^wtDO$7pF_`))9V78I5(OWRdj6}~OrO)yBL8v@ng)_uj+qWSc2-X_&BpIqB+1!q|xs( zy@D<}oRv}@Lj%P?QK^qn+54RMNPVQlJ$5IUL<*Dl^VKRB_^qLN!AyRrrTjp)RicWb z9PgR)VC_+bAgb%$ff4MZKx+GQ0F-AL5MPOHt=g_Bb-WAJo4+FQ7G^eH^XGMEF2qO<>)^HZ> zRqV+WqhRySksF(B?2?v2(wra9RhBiR`+YtY|dmgz$hw!Qe*%RzBrNr9qaZfR|( zs$2^ncvhZ|;I7}6#{Ts>PWd-kS?sJlG2Y+vX(w6P#dFkD6dEFc-&AkZftiIo$JeiYX@P5W zXb%w~m;%1nI@;9u`Aopz*5p+og)m|jkZwAMbpLi|RPXu`!15dx_U!C5xN1UuXMT6# z;p1XQ5VodWAXL&5YfS8RaVFfs~h-Q<_Uai{;N#!}62fiiOV2_HS z(QPAop+%qaM+ITAn3BY0lA=8LKnqzzOcz}z#3z5+P)^7;*_a)1z{_3hd@_)6fCt6# z#{lc0c=5b$U1&aEVhoum#+uk&Qh4R2Sqw)YfMZ6LZJ=)spBG_vNqY?TT$KdNPO9@%n(8y8sMBA2 zzQ$_gmj(T4>Lr`mph~y4aM)>H<$G3OfAwZQZJSR|`2ksySGcHEM?icmar50}FUUX# zBtv)?G84)SCfep`BrQGVW5>nzWww^AlB3%GZLWHxJXR8@Qqpz?bw1k{VH(J-wl;#^ zYv}qBV?whZo{>}LkSn$XuJy3l;)c+0L)X06sI%6(&8%ju*Xlw{6YxY9xoT*wo4UYN zFF~y?L0a?8ufZ^S$;iRi3C;QgOvu^WS~Q1z*wTelg^2=q$H&K0P8s9xw^?hH1+NPG zw%R%OZvCz*sf6X6E^a3QdVwD2in|BP{nEa&S+0|&W|Ye}+@2NzSjSa*Xku3Qg0cxv zhDO%nHfcWJ!Q;02R1^2pAhPC;xnPc;{(c%PgB2~ac>yd%9F^KS`XznvA1b?srClcy zZoc%cTo^d+jBE~&G22KYYvQKU(Hy^?fTNV9N(@J;Un~Vj+S9VjiTdS8*tCf@#??_v zh2WPNdiG$_N|SARDrx*R18;lRil^Wve~CsMCP93P`?5s)%ogD#TsXtoY<_Z?KVmcX z0=N%lc+zA%>qa%|x2#)Xwpr6$(M-T2tlzd=ZNljPAB4SAkSI~xtl7qH+qSvewr$(C zZQHhO+qT`iZF~AZGjZa4UrfwgR8-Vm-K@7(t$Z?5`QBfAn|*YVwg!Xb91Js$m`R$^ zhL1v{QXpOr1$4cehj2lEx!_iI4HkEVP!m;ItK&<}fSI#zna?hOYHT4)#V#qe1~H^G zd1*Z~ORZX*U+Yes$GV-cif(-9f@GPqCal$?Jjg)zX`_bpU*pJ=(nE(Y ziFvxSW?LOVtB2{a8fTaWRw2dpk{ydfn0~5tAj^qn>-6yoyj$U(^K0q)13+1-TBAqalhna%-x+_wUNWtBRsBp+p6Cg=i!KonZs zsg7|63O5yT(_l;%FJPY}F*ftrgVrAx7uGVd!t+744$*Cj%=$ z<$g5FCK-}!2<0NBOy$J1j{SLjI%kZ3`ugU#Eb?c}Ky=M*!ClBp)!jpf1q$o2!H?n# z6$$Ial@nPhOO@gZ@23r~4g(#8k97clS6Bd3OlvLvUA0P7wp~9Giz}a*Cny=m5R^BQ zX?=2lLz#?WcdA%fIQEMuQa#LTJ9l+082029<8GYs39MOA)4iKVV@a|%O=FA>+*=la zf{UAls|8yh(#)ZXCNDMQPFU5rnYOb1=`{zCXI=%QTxignm{f3Xt6-+-Q-CMQi!Uyd z+&-3;J4AFobk^9y1pO;whWK|5^dXTgF*%k%y}q_bjp9~|)GEF7^$NRLHRyZKlAqN^ z@1~g>%Vcy{cO-^d4i_X@d%=pv=v6)JdaBP;iWaS7X2-UtKH(h4`s8w3n2O5Po zb(_=~kWc%&@Piu^_X{Scg^H3g5s^G!_VXKXi97@!e0>aivn_Ec&KhM@UYq&f-{z4g z1H#l@%RYg`3&uIU6h*JD`d};e5gVf)qQ+jkRC{-~ipKt_ zr-*2lC@F0+R#qIoH+j^4GVF`>z9R)K))8?r%HIITN;3)k7%E2V?GjgsF3cW|6s^dl zRbFFB%)-u z;3`HKvIL!X&!u=O!+@~j)HVUE8o$HzwYcQMsxdXIC@S@TejJ8$l-ih$JX)OZ8i{c_ z`e?(`nCJs73;&FU*jQ2j=my(bmi4$x6k5=TYp_?^$(OdtWCu41pr1BNXxif|cww)~ zWpW!{P;W-;Kb)K%sz;p3Z@{^?T#}ugyT`rGuC9}M{o28 z99GkOnKreVa1}nhzd?GlcBt<1;yE1NCsK2*&gTwqvYnrB$c}2Hz6*-V#O~vK5(EYF z2U9YQE~~Dhew@SdW6WN3=~>%a14%zSsOWt4-w3d5T9sOYJMYc`Gr{Xis8b#__yo{! zf3^(FE+QvNao%4{X9V6h$$*(-J6AyaW7r#+I<2Lir?wDAMibQ!8Lb9{!;wF$I9aRdSZ~@QL(wnkT{n3Cp1rlEOK*}*CA-u-1=rQ&R^WhFI=%l_s7-qc z5QEw;tsMM&Vf_cS{Qsn8I@=lPJN>WP(!+@?G%*>Nfr zu@H&&S5WX`(i5~|J9~SQ(xMUy;uB(2G%CI!0VoATNrmdRHe^2lYE8L}q{Q^7x#GA~ zjTliDUb0}Q|6=s_L|TC$e=+KV-=rPP|F0KvPgeR>S?J+^RTdFD`;PB+2GS{V({jsVpW_S{Zl{`kLTl3MN!sGDzaGRxV+; zBT{9}C72_L!K(V;1OhA1?bDCt8Y$KcNh`o?aOJ-ly(p+Pp50h!^%{H$l$Nz4%{39E z#Ga2hI_>aT$@DTe+)IXNpsaEgRj3fjDw;X@fMD$=B-cqkFYVU;a#$eAx~Qk=7K6uF z(57;@6=ylc@Lq)&$pXHP(GViXE~Csl0)-Z0=^)VZ)`o?r{34H9jd!rGXnW8GY`Df9w{V=qyNf05`bSp!OOc?jzhj+lUx8#92I)|heG?F{ARm^#4MOQ3eC(UGskI}54Gtesi0jd33qm)#^`P@mi{4?IOukuVa560B$T(_{RDR<$ov_1j2IWh25`2b{M~%Dk4~m89^*IvaK@>YdyiBN}am3`i;s|$F zo|d4w7Z@Y92H=}DJ5)>5NgvY!{na&xQtU;P@?z_}kje;*yGUrAa4OZ-&8`PwC53`-a_=7UX({S;JcTfA9wwTPs!-pmD598v4 z*2`}Us=RyB%$di}*<$Qf-}0J!oom1GMdsnhnr4YXW$OxO^k@9rTdci zV?G?`n30{BLpN|K(-e zyOrf_5B{eFuonFeNV}Nb>_w;SxrqXbhC>2DPFYJ?nDl?8XRX63U$5@25rB4V?Obs) z!;!m~^xS0Hj+880^lOj9M2556T6g$?Dr=%DzK%Fiw<;;eNq{y^7%9BgU4eDap47T` zsYuDe-C~RtC|D(8w4J)p>5E9fMyg>t`lwbAR(fsSc!!9NuoJOw4X@HJAJa0G5TuHa zQy~9(`UL*Mi=m94RVvZNY#Jd;Nb{GJf^rloFxC6tLEo2T?D+*WHWRA-=_!d&m}C4~ zD>P44W(3KAkY_f~^xGST`DY3MVVq8Z9b{>AVZtVbFuxgBP`Rd>u(K_lr!UANde`QE zEPZLomNN#aJpA=Q$4D^2+shczdfM$%@hFXJ;r{VLKn7{Zs(ctewQuzz(Vlh)kC-Dk zC2(f4q3D{yKc`^pU1Bt*TozkRx_o1yh# zz<54azm7gS?96&TKLkkxuc2Zk9xcgVN1rytcBD?N=T7J4Gxppd;iE=%%1Qpdf4OUYpeLHAy3G5S{6EU8)=3{(a;I0qNup3o>>aS9>(gHzil)7(J!Ghm7K4zhAmh@wR?eX9zSO&bu2UD@f1NYT?Mz zd`3nLK>;cLP$taSct7u0V|m-z4UI-r)`s9keqjCQKmuuW{3G@IHD~&TCvgAci~hf7 zfPsUp>+i$=%}#7PY;eH$yePib=#M0VvoTF!1KA8P6AY1BJ2n#;0rD`UP>tt5p3R<* zwnRyL{<8yKsd+is+?JJ$!=s{A5}Q5mo5Wv(^ntdu^+)v3BXC|Arp&$wRPh=sJ~-H- z>{^nl$V!EK?pm${QO&wPNLjU*eQ>zR^l~wc_fb2f8xDHgR%3#LEYvcp7S_kjuVF0c z0H|K5WVceTj=@&;7~)eO7b>l~7uc!zT*D}ChObh}gWFF-6Q(Vk^9ek_pVvwkH_i8^ zntmanAbiQLn8G`FOFcf{$$7ZqjVkbkaR)<5 zW!y!DUTQa3uvl~0UfBQ`+FhL_4k7-|8ae~EN|9x#IyAzqWs!%FZM?Zn+5vu7pD~Sf z3&uTn^JlTK7nN3hzTU1i&*&Q(Rb@%72tF1<}KbH86f!$mKr z)DeCbrGb-?f+94AD6g^6u7f5Ri9tK*4Zt6Yoi9KCJvD^aBqJ7G(Hw7zsaag#W_n>N!&HDnnT<*l;P*V1V|t6mvu-=Tilehlnpi#etXMy%Wt#q{u&ezf$g%J1kVd4=as*H8jdnp^CBAC6U^vK>Zffkn-6r zQtY)1%*k@tc3}OD!Y72IQXS)}fWORyk6%)?F6%?j1^lqj0jnrV66jM7AbUxbz+FUq zByABp(EObsWoLoZF9?_!8INH7(5_5Z12^9x!anoYQ@2^g@7K_wICbH|u14q@x@_C= zkf!I%THy}jgMa}M$ggf~(>WNjZo%4q_hJs@Y_%PW zxKRE|LVEDkFyX7`alp)Fat*m^466QD@JCB;^=U$kQ^LO9mb-%7=JEFTEp^_tOW(!(Iak7GH~0g|;Inzz@q2-D%7J1y50Y)N|t6SXhTRAk$7~ z=hTi54;a@@bN0)+1@0h>+l+i8N{Kup-F)}vnUv1$^kx^Wg9v<>p36q9zZi|z32@yC z1Ohv-=-{Ysw_pLP;JyGyynnuO@ruuYUJnJ2kisBMQTHTyGCO_MKtPVULY)0<3{22>U)YPx>Kf zVGa6tm?%=E8Q3`efd1zk&N6)-xWxYjO*{braQ=Vhu=^GFm+huVL+?ec8XGxvbAv_c z70#Z@u%ZjT< z&85jNY0|w@q|6vS8Ij%7Zj{>9CRvh|O?u4SdpMjj{ScsSRcV~#yuPh}b2=LMvFxNk zjA6_0$~nHeNvKXu_565y3+{qotKh;uU3tFuRH8=Ls_o|UNaUGXWR6(7c5%jZF5x{o zS!7h{)gsQWiT0+=ct{et-W8_$$wOyjerYkjUUEd88;U?ImdQAddU+X8y=3LF0&mVp zVUYI6D;q7Vkug{G#lGLyWYcS~#D3%tkK{y!7-?oiW08S9%Lfk~1khgURO_o<;&fYI z>A`S?P}J>cB&+Hr3Z10eOMq!tn4(L4deL}NM=y{O6CW?_I%aN)B#&Vt&68Kmn3jam z+5{1Z;!ZrK{JRK~!nXNh|a7cwIFv(Pq zDfKwSHKXp}Rtf-ui6DeZuI)zwCA%8JtoXf)b0;-wZGsV zP46L)N`pozq+dumioJ?8pexXw2OEbwq&L{?j81m+?jdqcZuBlR9o_oF0|HiEWWSl0 zi^G!zy#bnPVdTT4J{)Q`l>*eSsi0asTB~uwzko?+SX@H_s4KLY7I55YB;^h#^l$F{ z5v*L)I&#`8R4UW7|GwSI+&KKRY08v(imJAYE^(iPvW~7*ML^?cQnVAQ6z0GrQT<)m5_w-)$Gb3{C4g z^Z4>CN$e`#QY;)+&;B`h)ZaQH!fg;1-OJ{n?Gu%w-r{?kzT|9GOaX@-8C5B zJaEvI5lauOo3$OkOtgj+dBRIu4c#WzknpEN(RutzK^FH9jT4lMucI8OJzGI{dwS7D z$ml}Jd^tTI1nTzjHzVlC>@!9ZlOlCX$D0s$XZ*gVA5{C!4mR1x#i-lg5++9vr!|^| zg@#)zq0vHn3V6<1IB5KhYSE?{xrOJ%(b%W_1}XbrTCsE@l$(9<<+fR;(V+Hd)^U8! zSo44?y)sI<;fsWA5P}++W3Z^mJ!YV_EJluq5ohB8`rWQylmkPzOj_zx| zZW{u=$tKw}#Wb`?p#=ZF^Bh1e;SKehvVE{uF)xTkQ^HdTrj;{DZ~iG}2ClBnkUgY0 z9XHEPH3qssM^JT6&-(KfRo4v;l&%uEh73Ae0g)?A#mo!9=Ov;HtCaq4y-EpS4_u8e zRA7GZ(Fw$Z0ux4dMzj=x@t`=5|Iy0n``UpmV#KsI^nQN460nKelgM`mM&5E1kL4P)~@MA>07{YTR5+8nuJo+-&H=$vZtRB{g8{&{97Kq=tQ+ z!u^YfUe>2Xw@^?kQ~Gxm5VDB+M(mu%TJi5$0XUCD-SMRhUoC!b7=FWl75v%wy5hi)usorcVndUH=Vuy9&Tt+3(KusN z83g`R5^9O~+b?zn1RBC;bQYBy@e>xW5BJZFtDfdh%JIuOKGjWsV!yh7K(9*@b=j?D zp}(z-1`V^@g%?cq_~X-&?VXn*?KawN-m-=CVjH!(Q#c2>*E|LUIJCS~Z15qxuyKZ} zaS&U}i{Y)|V@X4+3*DEAC0zN8CLf#}?7x1?< zK~_K-YS^}8ioII^Epi3PV^v@3hJvNE6_iQ3A>RIUA)Ee1vYwoS4dkgpTPZH`Q+3b>U~=pEsPLK0fc81&C%(ePQO*(!Z3?i-)$?*Zx;jfJ`5OU zaWtrlx)HHNav#bD5$oWr7td@IH=oEzaGnb5{%@ZCXTzn^4HPHZ{ z-(Ay&-vbqHLNju8)w)6ye%l;Q{Y*N&(^r1*C}&n^B%hrTgY?=a+rw&ZkNh<+IWO=Gu$B!e33!@%u{zocxEjV$De@_T z7Q{^q>>xZr7>F!rCqLe}=#~bMk-r2jSJ)6{30pM7Cn&7i9UGOlPKKb@AjxX-Vstkf zE}YsQz?6Y^Q`2l%=ZJAiXDbwadM#A~okIj;NH>684&8=lOAUV++ZqKr=rF(qz?WjvpvQ|CqB!ju#)bmA(Rc;DWbamzH!!jm{zH_c; zC0N&06&9tg9q|UU5V$d^5v_I5vBYZH4Vr(+*VIW2tGjbcK%5n5R&Y=BU-fTWUiB$A zF_RNKJr;ww(L_HEc49xfS6$PQuWvl{-QnK#_wV~}pR5Aj8a*JF4}wHlY6@>|1(YPYpjRAiRyrjshBu~DI2a4r*nB+R`J zpsxNkk+y4|%>6mV72Yk8G?A-KyA2tNK*BX}vY zT()5Zc{KJQpbA6QV3f$Qi#52$0cQU_w12sGo_(O($}r8uwf|5J*+CUZL>@#G`ZIAy zKL3bmdNU}BtZMcZ*6|Zw&{qRXG=&+&BUwb|YWo@o5P#L+5#re~Cb-ETWjZ&7;HyYz z1i9~rky3-x1P=ns66Xs46=_T%nEOfl3pPTd0V`FZttsVb7-UvY{tC3N%wBCw+wckR zRb|)Nr;WK)9_P+#S4h>&E@$A?I`SEJ+p0X`i?N$;*Fit=R|Kq$HxQe2h2s#93KF1e z4|yys7BVHn%}-t~d-%+|iza;z2nEbYF#@xR(j#-; z0OYv55C2OuII!=pj1)Bb-{aGQD*B+Yb($0WU3x0)mEn3B0^$PR_J&fGBIC1>Q~51m zUh0tbaSELARR?)aA=773?A z2Mogj!O@!s;=p<8TLZ1ci6)|qi3kJ?|aDYhRol{*1}*L@^lt2b&T8PujQ ztWoa1T2{nyF$^Vu7vQh#bG0ZDziL*MVk;*E&M6sLRX9oN1YwThabo*}%K9b#Lhs|3 zv6KGWMNp0)CqS8Heey~dPab4jc+h!-n3dIt&2TPa9oxLk1&)Sx{%a@?87T%tjQc_K}iT!0(qkQWl3b!<*F5AR} z_|%Daehkte&0Zj2SWgXAe(u;YuVfRdN`KfX3L7F6cVL4>b2W9yJhkP6-;8c3elk+6 ziN;$T?5f);sTRHFGw;St=6A4?TE_}^{d7QhH3gTQl`)~%Z*%6cudV#s`T1?yxD)$G zOW!r2u2A19gP=B^4UBvlIji`P)J|NZ69o=Tr6`=>0=DZ-Mtf+T(Pm*soT0zcjGdgW z3kS11`cJR3I$%8kIw`KAMH`E>Mp49s2#_5qFJxi{hv_6Xmz!I)T?kq9gB4ai8wyZt zCMWA-1B;n;c0iEN%Z26_hCxCg=}(78F>R~;`TGtlC^ybQ#T>iK4mL(kN)}ZX9V^X- zW*SIyJd9tmh1)_g%lkK?jf&376RIwBYFCTLuN%0o`Jj1BC>VLKHo#194YhTOa|N0xS2K?i`+rmkjHumwmVhG~V?DFu<1WhhZJ z?w30@2wBwh&aIE8#!@pC3=9*XM5O}gIN?c^Ka||qhq*>5M=1G`d%2%sm>$lYaMEU3 z@wGm))56@B>q%r)&YA;(7M6*xP9Ld{{gf5j<|4~-PPcG?Jqr{i&0=MORHj*rRkftx z>%*e;yuJ71X9|}V`#i2c=1b3fR^P=}w%ma*tL9e^~dIxlhtp+$g23o24JTuDAgD$tmZOD&VdgIAG|rQO}5dL4sn9%p;b? z*lN`n?`Rz(o)YcX=_gGrJ!nzBZ9*xv z7d=DAh{^P%QZVi_K%8o%wRbVjT&REZSCvcx!=V;0?tIt~N^>aDG6i!>jyyeNg`cc+ z;fFj!oB9^8UT=5gmX&i)_q!QlensgK%f>)xwf5!D6+BDUPGIWu>nDMc)-v+RRL?+2 zl~kF^eVp4bDziY`XqAT&PcbPo;34Z3VroWNVQ1whm^v(^Mszumq8M>=ozM^M-5@!D zbSNnoXeBD0ge%K5lRzuNQa;`LSK&nB%#*7xWmpyZP%*>aMW*bv*2~k|IkIRh#V74= zmICRZ6i#u2Y0Om0E^eoAfUNg80h7zSn~P}w0D3UhGg?^_>KvJVLQ@nI8^A$hz@S&+ zKT7oL8z}ZQYb0TkkJ`#Pf`b_)kO1WUf9RCJ0ARGW7`_oqGHj%Ke3KVT1b)xpK5>+* ziQqWllO5yrLjL;i;IpUwtBwe&=(}qZ0u|wXrY_by3DEB0>h*B8Skl+{ClFyF{M#uY zlBN$4WT3tu8@_1_>(GCo+*2P>d{iUMAMJ9!vnyu0RuPJx#Wqxu%(OGA+m5xB*-5wGvp& z=*)V^h{j@BWyHAvU1n6?mDq^NH-$Ksy-C z;T=@Wrag1``3VK=(^~n~ur~&idDTl3s!qj5ZRWSwpa%%#QF}%xB!En~BtR!b1t|il zzVHt8xIFbIycF{l2h(AkQvPaLvia;VU8nRAsaXQ|OzIPW#Aam(ixi=R$2e6wEdvC2 zC-p*Q0}R+83w4@brTy*A-lY9NukJfP2Oc?BAd05S2_0Uu;;sbJI5_JWWoYeP%Ql47 zyWqs|KFy&uxa6#BJ?iTaXLB85g>xeX^ts#SvD~dta!a>n5!h9O2n`OoB5)y;y&mB~ znAvnB1xHt!tJmBm5Ao&c@klyo4#w@+S%_CpILi_025xl=nuD6y?x;A)OmLacZpcXy zClVP%B42=U>RkPe*P2Yp=EYm30K?w$QZlGb)q*{+blczevmDsF_wa4y1%ZHuCi~iA zT&SSy`Uc3@ZJVzi;RU%R*$>Ks)7`Vf@by)9)|BX!Epl$G?yPN*24`=~pKx56K*`;s z!JD1Icnl+|(_6*Nf)yKpS%?pC ze=6PpK~WoG#H?}xA{)ieUh^A=lF<94rl<+8T~Mnij2BHquSpY<;VbLqAO>8$U5sY0 zuq$h?%WEhpQa$QVZ33wKmD;?+z)fwPY4w9|on&ZdI{L8DkPSX0b4=lNCIdB8T%GNt zvsn-aG$$$~6JKY)1{FQ(0~JA-ghe&*vSw6*V{C%{A90 zbv!sg{mg4|GN<;NJ&N@kT)nciykGqxnzH9(=Ll=@q=%To`BUl?CG^NuAf=jk50ChPp4VO$rRvHE_0B zE)`vKLex)eqPR@8$28}E{hKq8Od(#3qD2SZ>#pC)XFFG>ss)>VQ^UGwO!5!74zW9~ za1ol+ySo7bUie=jc~{?@_P6TSmaB0g)z)#S5kgwh+YBa4FE6T zxKRoh|4|B7E-yl}jDFp!$wAq6j&@$9Y$cuj!%a5L%Uzl|dMLABa9neCAVifg=OF(A zJiu@Q&YO4H5L)Y37bo^F=3g1y=IQ5rzoV-WRKj17`kBCp!~8BGnolMLgLLVJscTDF z5O1q3ibc&AGiN5|LJUh1J!!kkq`SjzLc0eboorU2QP-&RO8%44jFrVnoiY$&YrKi= z7rh-qGdCA&Bt5Z!(xf2ACjz@P`SKW=OcExIO^4}HH_J>m8C%$}Ff&nu_2BXChE`So z`+v2)s3)TC*T8`v11<%p8{iH+^O_q~kgcBfNn!h-HiRVwokKS4ueeHrOGwvQx4TxK zO=#tlc1yw5$dqH5i(NFj>^}F~<_<7P3w-{H`mynB_pK%XVH^8WsBWmjaSfI!ZVfJLnAYrUN)W zsHt5z9oH3wl4W&}NZAN9t-gbAD$zGS)p6rgPJ}!g+FDJh-@ZlXS6dT>a^^YP0&b8V zo=ZN#Ha>X(km2KzWkBv99R&ovgh)SSC2wk5i=>onah=r?IDa`Otx=-C?q&Ctz$nr7 zWj&g~DkyxHEblG_bU%g+?Q`I6Fbgl|YeL%5?ccd}FJn9l+o!w<{Hbs0har zDGZdq#88ln3=+%PM0H%M3}TR+<)AaF4ZEfr7ev|Pj~9{ka5JBG8+?5-z8}(HT!ixH zLQt_Yrdi!EuLi-g#@IpCoMC&yP(DYfmyw>*eQYK;pYM%+lwCt;@_4GcO+n#SSS-5;uKM`mw6{e6l1d@&(%!$eAG0e!7d*2 zqm)7j;6C;d7(WgEUZQc6c-o_i((l0*E8U~k=Dp`+!V9H#YNW#d`xH5IN zsRI3=n;@XsJ;T3357l$Xs6Pn)JT0itlhQ?6$$e^s-Dno!lUkjz z>o67BYAj85aAAb1MP)UMO;Y_fpjV2uv#Be~#|4(sCDOK{ZZ;GLC+O@(UmoKSfg5gE z8gf?%uhfdNhy$jT)Opsxfyc@uLe`Xc#o7zzcl4oTw(G?6%A$Yg%YmgM0o+VYDw^&2rf;YLeQs#vdwCtE6{M7gAS^(F$e1w z@=p-{3N(KsY&=KfB&LLKAh27$tgWeiBM3IwxMEr!CI^LZJ((Le{v8a++k(0T%xCEo zlU)0E-{og?;SY(zxwHixjvY(sQDhON2B>;JL@Zyn**4(|quC@9O`njusqIDMNVJEc zOl4xIDnnEi-Ie+)o)d&$B0Ct>p`0aA7cInFG3UmjPOgN=`6gSCI?%B>EoUFg-rvc$ z_$Hqkm^&f2BIFO`Hxq&uN50`Kcpo!el){DQv>Ew$inTfKCGqlx=xK|RN^SA>TJ-y* zGyOmS@9ua?30vYq*+v?{J~Y$f%4K+UbA-o@L&`Jq-5dMXbOV#Sd|)c4s@ zD}(6()5966Nb)-G^|Kkh6%#vy*HEwt*a49EEQASe3CQN!a1QW%HXyzHF2B|gaPN`M zG+fWhrT;0b5MZEC~E%J6QOT&>&m%@%1JvV{`{WJ}3i~U9? zdqv^tyJs;NxXU}}>&YE&=BOR?75KGdyT{f#O#Ak9*RN}v@uH)(SZkw?=6hvy7n)%m%X(yCVaRASM2GJCZHhoqBt`9!*){s08F#Xu3KcCr=b z#C;VW-r#+Nrg{YG`FsUQlNI>q75tA-ljW2Js0H|nOJrm$)H!&Om} zG^zcWUd84ciBBK9n>qj6_#_VPi5^foxJzO-Y_rM_EVIeU`#xTU3pe=B!==_RGqBqE zhXrQ_R#qGE4ppgE*G!K*G8VlLy!qx@ltr4lwT+E5Vu2Md(=Xe4KcCnC>Y(tUm&Jzq z#no~U|C@;a-?*B&jfw65uxia}lD1f^@I4o*)D8{)!Lt0Ypdd*OD%qN@O>;hf}PB?s2&BioaVh-EbqQeZDI&15u> za)Aild7k+G6#oFJ#VM}ElGEXv4P5&Wub4yO1c`?#n)%%V&VSuTEt@A5^1b>a)`&j!`!0AZAgocH4h@-GBbe* z2blAlhwEOC4$Ro>Mn(@n*0}BgdtG(1cU z(em*0+vt4Y`lr;8<%==*yT|PndmN3*)cm1IgJxCp)vyEP(qKie=0gC%mF_R7<9Wavxl+$%ebgf)wcXxTigK+B>Ofz=->t)e-+8uK%ZZH>g`Ei_ttpq z&kJO_gsK_spyX*pEvP&sgmwLZ zR)Rh+OpV_m08}w9xNoyZ7hLK2Q|4*bG3Y1%u;`7 zeqxz>Fy({3h$h}fNjZOK4tjyYY~OpFMm|%oq0IB{DB=B>V)-gk zA2=5#h9E$yD&!XxPQ>0$a?+L5z6_#p7HUfP8MeIjXr>2N-=}5VWiDleJJp?37Qhh4 zQ4>*eAl-bqjwf3EOzSHdEEYV}J24xdOD4yTj+$l*U!PJeQG+N9_gH`Fc=rU@+?5d1 z&{Y#>B9^gFTdR%F(a@anm)cA?(s-8r7*uD`_s$y*(?o=K(t|?6{sG1u#UEC|ulJBO ze+jRp%x#YEXiCrj9eB)X%X}Td0P5~Mos>VdCs-~qm9vy4mP-U!z?DY`D+fQ!bVYt*>!M| zhMQJ7=$$fv4e2`2xFsC=T7OE~KZRl*3 zpk$&Rx{s_^QC}}I)T#}T8Z#cl3A>B#*4VOVs%D`i(g%Q;>lDAiaB>+dE@%2bg;RIa zBAx#EWT#*^Rc0v1OGxmYMIFwHPRL63%2IJo*O|4PS;!!HdsUZ51nHy8rmnNgiEZyI z{de3X5OuphdMN2KRQYG;Ys^uH6oWHc*Yqqb0}rr)Jug?uDx`Rs2Cwk~wAm|gEKAa8 zEfa_*Zpl2}ch>FC&G1WpEJ{*6THwifB%;ZXA6IwLh)z;fn*JhBUXPwS5PC%lO%gp0 zZX+{UTkA0Ny9*S2ek&Zk!DWUSZ@MW$zu(4X>R@CmSt^><%`5kZ@y+G${ow0G4vP(B z=iZZ6uXs^C1<34FBmL+J$c_yD26pCoS0J-nHB>Yd?WP{e7M3msZf15pGm)ufROV%d z7WRcM_IYOQ?0Efb{H-4V?Ej$6m}vY840XuNRm6X3v-%oFzzIZIt6WJ@Ql5*{+Km=9!#N2+u3}Lgk58L^b>$R zX%4_W+lB&iqUVY-i>h~K%)!NvRL=tPfl!<;_+AN0+FYSS4+cT=L>juCT->1# zTe#;RcehVN5C?XkqgcOoLKgw*bRz_4d@h}TOWH{@Fo5RtVWQ(=c2EOAfV-onUoKge z(4I@$W%=}qv!f_@XQ7LP)BZ0{53-Ir2+3aok;73J157-W6ZJkx93s5Bx4Y+Ng|+nu zz0qvW(nK$m7({)8pXf@f{vN-coX{oEi0razJ2$Eb=jN)mcEFaz<~{LVz$-PhLP9LR zsBL7_C%{f5^&+a&rA#*-V6ep^6z#4eMr}ol6$Xd~5H!`_ z&5~1gPpE2OKB&o%_90vL@crLfXn_)D+M1tX(OZ5$igB_E{^Sc#IdU+D2>pR{`%6^= zM|`t+H49#KA{^GcNq;(Ii)6i8^rNuR*MZteOQIBBC=ytQu?KmEf9HofSj15l%o(X9 zvzCM^T*Kk2tGIx(D?58v-9Fx6=d3)OYrO29IE7rfku}2~qMo-0w`-nJbg^*09b9q6 zeF1lyi4T#l2M1%69PMtNb|$IXIk-62H`>@kbB~_~w@2vs+SplNU8NRX>>D3~Ob@xv{fw&sQglkxUZ|cL%+6 zb|9>1jC#huaEf7nnbVh+>mn}7>%eU)H@XJNCiSsyg;1soFfdyUeEVLrO zWR3ZARaTVCx8cb!(p+4bZO*s#EzpLJ-$3?25YXa|({CO-xe!t-SMG>bd+HZ z2~-{9Irxm@a?VbGFbiI|y025_5z!@cI;j~tpw}z!x(8Doj&@fW)+%2@gn05G9||vl z9KME;8X|EJVA!uaBvqNbm{(M_gb2fu)-nHS3MvT9#0=HR#{S<@P&inVW}jIz1)$se_$>xtB=R3`u_F^ zSPX$*aH|0`cmqIHOt>Wgw0P9xQH)C363fq^WLp3I_{hjenk>L%#fUx$C^>TNLL|8q z_vdV!wMJDNhQ4oK7ka}*!5Bb#q{Ga|EkPij&TR+@7?|u>~U~$W3TA2-F!NZY(=p z_>huKkfGXXrTjnG|2DGDW_4iJ7o$#Sxt^1_e*gl>-whEO&=~RFJ+zVzL*^za(t8%@ zf2FJPtk^zOgGP{SUO6h1aMOT;1Y`VFLHQTW=lIhHdx*kBW2ol~uJHv(4@^W!kwU@6 z3M)Tf*QM4oidv5xlaNM%P?aNKdL&uvisSU3_Y$S z=LxDZP8o5-!dkHAX{r^XEB^c0p#O`mcM8%q>asP{wz<=`ZSJ&f+qRuMZQHhO+qP|W zR#imz`78QdeD`0hc-NX^YCKCo<;2(pNJQG|es38o>l|NdfmiQ{YO$sV-~9DJN&(>& zXq!-lWUwj)!^o^)Ns+P;Bk%Y7Qwu%tt9)AVD#0a)0JkI)fEn_Ef@%)hrvXqt(Z7}$SX>*c%e8hN z5zArG`j}E*zTVfZKE;LKAwD5#qF5mHVwd7JLrbL48?tYAhdAq_!EMh&QgC^jmGJRY`@@o zZ)Go8JUn(gj$LINfQqFnfY1Ekb0=VWk6GZP%G4x)Q%?D2gQ9Freg|Zs+zG&knLGL6 zDA!c&lTe*e4-OX>BFvGwIxy8|@cW;c%>Q)k{-eu$IlKHW!bJI5l6ggyX#1i*TrDFY zS*Aty+Mo{i&WxJn9MYr64QuLDnY2nV&u)V5B5)}i!Yf2qq;Q=SA*k6;vr7#A*>bC3 z_Iag&Q=OnXl4IG)A1T_-X9G=M5H~bey@6H{AR-8v_Q%7(Rw;91Wsozc#K?zYG9IPW zL43_L^_#iT~T-Fze~juB!sxx8ejiU0O51+X<5Z6@1K>y z9w0mGog7RqFS=dLZHaq-*~y#M>&NnES#WPZU{Xr&yAU$E?%LZ`E%1~I?9qaZ8{_uv z90rB#>NbQmQth2yb{+%}CCxkfK#bY_ZiWBb_MnO;>JooyVeJ>~D(a-z9a$w2b2Qv| zJ-KH_u`~igsZRfpneMDh?;$;HhBr1xrAX8|^K4YviJH4@eC7U^4@3{#g>?8xFE+p) z(kh9KbD~LKg$~EMdDRD*r)wd-kKTN zq8Zs{X*7YB2a=hh?Mma^61y6)rJWq`eiC}a8HxAV*V)^4?-Qpq=_zd)?MrYYt83Bl zy#@ucqv{1sOMsl<{6_DZNB;MIuj68wT%a0ErJI~kN2k4&rFa#X92cc@8FbMv7OF3B zA(PqLt@KuVX*~$^wK(Y>!V^q|*s9S-kDcN+49P(GMiWLM*v$_s?Y*y9NuI)!U+L>i zW!B=4#c7YXzSZ;%t$Fh%`JH$fIy4L<#G&sPgO1ro@FJ(rP_*1G{G@(VUmTjVw*YQbZ;sPQ z^mtMZ;V*(pWdc?3#4?V-52%64kME#?IxenBL*JhX2f-gY6g_B5;LtJ2$IwXiFk3mq zV~pJF$s(km(H*CMm)b}ghnYV2VrkUsv>hYNSjXDQf5`2xFA{Jp{9le?{0r)`g$rh2 zW2?$!Nx4*^+kKnR;gP5s^tvc=U*Q6=Zrj();UQ5=%fzV>EjI@=-rdp%YV1{97)F)T z10KFhYU%r*`6TaW$1xnp6<@Oi4-O`*!v>|Aor9QuhoVwQo(aaLe!u}gXtUO!3RTyj zEpk!tRID*1Qo+$bm{E+`AkoQLMM+ra4|P~0duE?;W4S6V4u3UGm;W-n&vvW$2lcXN zgUp+DbX>l{p@PE>t3ZSMLV!q*S0^z%6_G>q-1J&a=Pj!OUAVTR^MaN_=Kc}gV7tLa z4dZ)ehL<@}rdgn$9VQD=07Pb9c)&Jh6(;mke87(V<;Vp3k!4(hk-vuyer4P*_;m%d z3Zb$k(9;m)vTG_wUAw`82nXiV-jhD!V=lI%z&%co()0^_3?t~l30rGQL2%Y9g1$$G z2$V^fFxh#RzEK+ftesv_i`kaIb|t$H)*n9#&l`_;X#9JuLLE}p7cfmHaa?aoANZuU zn0mmRWH6uvQK7VoT(IZo3Zn?ec(0rX(03eMv@TFvFnS?JuU$=1l#b9zwV9H2nY#(8 z_q6z9_RRhs%(o*wVuFqQMcBYoye2vc6XF*g?u2ILzFfHVU>2cq(E=@0**||KYrH)^KHbIASgnYhi?5 zobnpXNn4#$=t(0>O(b8jtf5)Jy{sr0U?Zzc*+cS87jt+*^DsR9Tft@!@Vp5iua23$ z4BZFNEzfoIS4SNZWqiC~bAs4Y91Yb#wIT>sN}ad?o$|g8Z&kUwaF;&=25sqahtgT3 zD7|!?aA45jZ_Pd}v~jcB^C>kLFhB_z{uyltVTLd-r6t;5+0!^U+QB?8uK~7O3YHXIk>iW@X z_%DQ5xxfUinr}s=6;$OB12as+zDORih^wT=YZHC2xX3z zI6HMkopILU!Nxf#`Jap`*i6 z*9pv$)lA=H166}|y6HWi$~1kd&k;CnPX-?NCps8c$q74dFL@-iUekS2wLHF3as$p5 zs7WP3mT{?8N8@XWz4#87LAhJX2EaVSg>A%BhEl;B#*Obbh>hu{-|y`6kN@b5GBZ>h zSsDd29?X|6*a#a{3M1C{p`@7_NzfGSKt(HYUz(4f4$pR4((5Xh;q){vL)eyb14(z^ z&Bkb*g1wZB%gz8;py)EnbMC2PkwNKbuIBD_-kOX@x(D{wwhVR{Ry_Og?))R}!r#43 z@$Eb9AV3G)oR|XnURC3cyRymSwQrv2EB^t!%j0PoyjR41B3{RT6p>xp85A|25Yj!s zA=3hi#F=918}OKX>a#K)ZKUPKK~t4w7c0;l6mvr{j={{C6QjlB(c{n}e|If2Ch$Y! zomEl4c@r3VAWaTfbGC~N@AUv$Q_0B!=d>gT*<COPsLuIklW$wjWm3=}`$dmlnJ0SZ9-gTnRz zWDTdUBGF|BWGkFgEob35c`vqBu25%yt(;YEd-RvJAcbJ_+m76Y4I#-R<1dSZ;%>Hd zTVm@PUIaITQUIkKHKY2qGm(O_3=ue|GzcQ=jCY;yXA7ptcaKXO=wQeUR?@ijs6H`@PyVr+0)h<%u8d|v{L4Ts(=ei6 zD02Xr(LX4rR_lo-bXNbiL07Iu8{)yt*Dwu60Yj74^%boK43_l9^2PzZnxRv*9s-zF zgw>_(8=|?B;xj5N*m+7YO#+&?R`4LG)o8OnMQx_NV|M_|DK_iYtFHGSP(eAkmE%R+ z?1Ps0raj8fg&?!=^S*KpPQ`9DG2^#V+_{!eUkKmiZJE5l8~EJVgV6x$hO?UE|Efa9 zu_l+S=Yj*2#`7%^92*d+H>0R+E)oR}nNHP>uAZF5yxap!fG1|&oD8FMe4-gV-b6-D zk2nnuZa+k+~W*ZSD z84$UF@NTH(bSq3dI&(t*sZv-Nw_<->Xhzzsm#El)jEH)w)?oYAKL+;B6|Vlf50e=* z@a?)H^-RQMJlOwU5g(pPfs+f&A3J)jmJdZ(eqJ=OryVNy2vlX=WZDWtNMf&FEI!)08{%rIaPwmFu8mvq>av6 z2x2G}iDEuk96M}O(L-ke$dnOfpQdlsU3XziXBXG_py2c<@mXgQo0mqm9E7W$>M=Zk zH7tf~lR|(5*JM02wM#(3Fqcmva)*zH>If67sD&MM=VV1Vc{m_$Bl>erm)rbpVEcVh zd{_RYv~1Vl!&HiH!j|$!mqJ%wP&dAQm2s)Da+D=bb-AFWi6X2Z1p_h_XuRtxj0I*4 z05T9!ZoQz8{>^0N1L)P4ia_Ds+@>Gn2+q4kJ* zkeTZO)@TF&9T?-?#}0!BB^h?lAxbrsBq~CTq`KDH?8Z2{>TNF{@nCVyUsvcx(~s!M zJw;`^nXYCo*M_+kyt$*s0a~%4gVOZg@^Lj7SKY03i~N^UY5nYYd;$=6_%HWZ6>OT6 zV2i&ORtY{}MH3YmcCe$F)&3&1y`9Ka3rvF}gWlP@3z2EIe$~(ZX}$ii0~oGvSU}#45?BJBO%$|d{rTkpFvpnl*r34k1?>9>5^g!$ z(U;GV{7%cXOkIV#%{Ff!u<{=(QbnGxifbqm~lf+!iYNu$G|)ddSm ze?$E3B=QdgkMCl#(Lp)Upvzj)f+LfHbcw$VnptA{t8ye}H_`h+W|irWa7y~o=q})f zP;biL7HPd*tiXbv4xUvPT~^hNM(`#TR!Qmy0#sr$@0#GObvE%;Q;ZcuAZ$CroXt^uIf4(ES3#k1Kcj` zf3xsf{!CD5wX69Ud-dSw#~8Z1+AbNYZ~Nz0NIa-!^s|CZxc07{SnwY}r0{$Q*vF$l zhAH~_b{(iY+ussvJbgrAa^WkFlf!o+sdRq?c$!6qb_ZA7E~Qh;j&hi_wZ0+5uK(-I26C1~bZ`})T7Zd}8ZK-Xv`t#l!J|YeEOLx@ z(m9khm4jUgDH_kUX>!yvRb5}beR%(|y?A|x@(%VsiIKmb=sR!D4kKD4h6jx~$J;$3 z;n`V^hlTCJ&ghQ|Np`fvIjd-e^rBx2J*N8@Pl~*dw-<9?SgWiU|GDdB_oaEI{k|JPC%5}D*P9e)lQS4JHs}J>LzNLX7!Q$bERN}} zDmOErG*7~)sM15_KK6ZIz*p;(%fLuSP?(@rZy>h)%E1Pn7RiLC__&&A^hP0`;4Xt* zetHdlLEcb)Mcwe+cg^lQ#2H?X=>9~x2CdNQQ~()=I_5S6!^MLaj)Q9|VRLw%iN@p@ z?3Xos)ZotU4sE-#uk4X1zK?Nf880L>ZWU!m>c?U}6;NT@v~@pA)WmgIEX zWvhVEvb0QkiI!TPfB7$DSW6g|Iwle~|M$&b;N2V?8m7H0~eW-)KsC6n zEFb|GGi8UdWce=|j~yG`h?<+&z>tYMz@QRuK2Gz8$#Gwq(qd$}vB*8%e`uL55Y>d< zS72}x7}{>&z$y8z{mDzF<6l}I&{)EB7R@CqBDlc*vQWEO@474MYz0J3i6Js9a5wrS z$8sH~E8?a~%YA=-^OJ_`)w#osFs1f)MW?G+y1_GM^K%mSRFkc+&0636K7yCKTBmEE zO6n;kdQ);+>9ZES0(S54{T~?&;Pofls)h(?Izqy_cJqI%wiBL`W))qFmS{MZGH)W6 zWAjACp`P3&4`dFL71J1S-Uc>rL`YmAlg#R)!1zyRIyLnHGAnUk`5inB!CjbWu8nyr zujEK0M{2%}a6YP08}TtzUC62Pr|dL27e*YU7vlv@n}#P!9a~$&Pwu}<;d3oa+!|Kp z|7u2nOS2>@CUpiJHL<&9nN2g;ydloe8P4@^+YBY&hT(TTjPBgSO>?|8>Gtyj;z1biVHm zoPR43>8eIo*3}8Na7YV-v5;WTf+B#@IF5O`+$ktB5+Y=Fri<%Xm;!OMP7G|KmjKZ~ zt@7`%kyfvwBBnJCcDz%w80kiUyC9MGI>rAREE0Db9Z$5GHAFee|2VnTXlIZVHI`TZ z*f?3?A1hxn7g_>QP+fx5**^?cKmFu)vz%5b3%kq(N>kuUB za(GKO;=j`fr97CU)w`G&3C4(@ir$Y~T3vFZv7s*;jTQaJ|o zDd72Sr`q|rF)HM25eeh#C{bw43;J*jnSuwAW=GhXkcAF_&$$W;Xw2Z^1n0Ev4L00t zO8`X(YiW=obK%-iAQdJa#jlsY&uZ*u1dp98 z7uiW9Ubfxt9qXpG^0)yLGyVV$;NHg$G;r)AkVqwNKyc07%^7<}v)M|Vp1+Q3+SKW> zgu4cMQTEACHPk)hfvY#F9tE{MpzB`*qk}@~Go)+K z8t^KB#wfiQPa__+W5L=ql2rU);%3~Z?grznw0F&CK`B8ic;dR7d6buceA2hPhxZLy z6(Bmy&qZJTO{`Msl%$_WnS=zHptuzo7XX4MSSFF-1PNiFkq33K1_kRo4C9YY%}^(n zJ*|ho0x691wH1oE)LxTwjJR^QrJK}U2Wx`EAyMM5Q9FPl+GRAdBmG1codqnr8L(28VGo+a%l5*(3ji+^|INEA*tIP%4A!9U@;S>O-7KTw!qjd*L zRx`{^b2PO>5cpVNyIu#)v?24I{!cry{l$H=r)?L zINv`@$c%%CIGhDqc%luyxXke> zFIF0udUlfirVsyo8+!(Ng}e)lh$yhYT@@ei)A#G*4p!I##pb^=VgGM@G}T5=Pab~R zQ6fZa(P^U#c&lGfHGFE%?Xl^FLH#$|QAw2vG(}~$wLLXe&<7D{ zJk8(2by1Y0m)+6cQdZa0a`B&p`+KM!HdM^yS}TYuW}FsOajMe3bDTy^VRWd*0kZg8 z?p*R{2`a^mH)wk(WGgtip$imU;%(*}yYx<|xXY>Qo5{(lkUE%`!gacL?xIH=^99JZ zm(k)6>jDE&aj%-Mz?QytT(OG!T)6e8P1eI5&An%m-BzOfBXNA(!J^zowvb`;QgWs z^JxSe!D`b%2k8RHR;3Bt)`yPbj|kByJS1#3k5ADdSohP-l!zi(pT*koB$OA+6lyxH zHR{r#-8xo?%+LVGyCtJP{wtVpcpIx98KBM)++A-R>1LgGh!8LSvCjpx+-3>s)B->WEjWv4IfpA zH-Q2dYgu;8A1-HHsNm9$au*Ijcoa;6Jp%4{Dl5$T*AW${VF%=HnOHIdm&!I62IvS@ zFbAdl_rhzZ!;FvFA;6_be9aCJV{^nEH71q%p?UvhG?9otj-+gGXgU6_5J0PL(nG$i zDMYMx=n|<1Kc)r4R;{DT>ZN(kAjYL78|f9^0UQUZr1XMbqn{W?p{?v{vzN+HGXV3u zdrZosfIWt2eTegbkHzXAxeJDS_d-G(*{ga4)>R=O7HsJc>zy^pGGV&GG=|t7tgUps z!GyHH-&(V$!PuI@rQi{$K$+&#HH^Aki3 zH*Cx-P()pm(g%I)8L@V!>%x=S@#SppNb`lJI_dn?4)c(@MlUO~SjIw?ZZ#BKSMUR$ zY!ZoQ^he!rm~@{ULZe^|s|(vnl_{&+CAV-|hxUXd1TE4A;3BF`%;c4799bjd@~ev& z0Qknq#Z;U@N?;K2ixWrmB5q;@^jchwJDWn7-hl->F%5h<0fuMJf#ce;X<7IitrIxG zwg^Gq&AK{ED4J_2*m59uq2@K0Nz z9Wv=W`3)n5GYcXT>Ko{@W#McvUS0DF9fP#zwlVG=fk}=!PHHC8wigqPPm$1S&rsJQ znlKh+@#;O@S;kWD!cIMGJPJ9H=tv%hlpPcU$D5qG5-3Yl? z;C+{kQq~S^Ze%(dUSk-INDh?=5qCet<{3H!q%5Kc`CfkMiR!6on^ffX*}&S4T?J~7 z+uGnlO>8VxL`M;_+moPy8aT9WtM=z(YvNT^&(NjkfUTF5I^F$*NE zURjfH24X_;&5<6fs{LSm;fXOod3q%0%0W5|^7-E(S7l5kYo(-KMEO&MfSVRsHm~Rv z;XprIKhUA3(u-xKJNBkUN9W}o@fvYX0Dm~sA3Qan|_MVj&LljJz1^?zgoQz)dw3v?R5 z`)Lv|Te`mGc?XFU*oP&SP9FkH5=xY%2U`?8-IEYD8M0|2MhnG?#VFpVt^Njs$ll*R z*F+vj%be?f#TDT{VqT%X(S--FINn9j;sSBejARcniV;Ho=oo4;x(7Qkq)Mz0;q^qi z`aioIA5~VKF4p+}R!+M(8vnwbSo*!_`T)+^j)QddoGz-!Ik^Z22Nbj$?N=brY`<{G zl(ds4sNI*|yNeg5N`2+1Q>91|8M(lswl4siVkHb1=zI?xWr`f!{_p zqJp<)oPCT&$JB8uq5nu8f6hKvBIk6pworz5d0Yc0m7MN1~nF?Tj61?cA$WG;OgsP<&2Qag25O(w9mk*ne%&A_+6Pv6X5abxSgSQ_cP|_>@C##7=hha)LJd&t zJ}9E8n=0}%4>C!00M~;jy%uZ8ke$WUiMbh$B*W{!a$~@pI&u1-FCdT(Z5Vnrac9e% zbK=P0ofKn;pSCQQRM0Kx$Xi#PQVE<(Xw~blp>uGoesr-CJY5sQNH>`S5DesKG5QRxxK3j)X-!44<%MwuB}16jBrUa6_hQpQX8#E0uR2rq!*@_k5A~Mp>|cu#eH0{e#GIJRC$w`0 zauhK8^;jRBRV(f;T9G8zU!|fwVZ2=Mz-N|^N{6e`?`xl(z(oCa{AJuNrNKrH8$yS^ za{4*55fP}Dknb;h&-DHQt?mA1Q!~^%FG|}RcIwG!mKyH0cDv-4ixbz64>z$=x7>h1 z>i*6o#ZcJ3y^EMkCby;!q53EB7Szn};vuD^6XE6f!-jcqMquwjy zZxn?;3NtOQn`2b6h@(ine?UwTNnJa~DeJ!&!i^LE)=|kU=#=ea#;n6}Q@9ZO%HG?4 zY>9~(w#zV#viB@^&nzHPolVgPiR-DCb^cc$s0#N-5{`NF*dD-mb3x346oC)_q~Wlw zW)x23`T{F&Lsb#2zVuX zfB;7~TT2j7(Cb@jHxBlHXVHO0h6l#u<5A8uHVoM=7p*cT<~1vC|BK|RA$rqV_-l3! z2>}3r_8$hDP7eAuj&`;VPQNc?l9IOV`LDt~QH^q=5NC$&a=igYr7FVPj8q~IP*Tnz zOpuCD9{Fbl&iZlTdIe5HQp)_{v-IdZdoEd*}8-f=(e|5)hc_09vvjM=?L}vaF*2B=mE&g{{=5VwSPf) z7+LzFi-5c_k!?7gN}|sai5z*tDxRT&(8sLKU;CO+Yz;V?uz2hAfhj$6i)}}HVrq*~ z=qMXQrAW^=)=CxiV>@pmh$jrD*&ul07U(95fI-6A|5=mCGuzc#LRp0Jgyse=y99Vn zUm#a^UNehXVD$uGN&^9YbriOd@`11zP6sIFCb}(?lYrlq!MxBqA%L+JE!cZH-LO0K z*ICvt*|>7yByOE&ZYz$5R1K9ah=*^?lB&S5Qf{R&rL19_i`deG*tpH4fs)S7{ST-giYh+uvpQ=fi>{I|PUfAF?hG#7xAzRaLwG=UYtRR5iH(rnIgnqr#rnls1JrY(N?O*$Po)!a%Xji@x_iubMyHD!3y!cdEv_# z*#x|_zkR*GyL9sj{h#tj$$j6>0v-_e1cAOi- zXg8|?+@j9mJ^w~vIME%p$hjc#P=8uG@wK@I4t%hoSX z4tMeO{x6_Ql3#XZ&97Et{MBl|RA~SYV+UJ9TN@KP-T(K`x)-Tz*c`C_capssIt5g3 zVcJ3m*X(!Tj8qVA_!@vu0~MHD8V2!S8y7+!Ph3LUR<@g9)(^3(#HMb?nP~$~9olZ> zg~*5vaNX0%wTC8TgmAjS(U1Q!kdUnPblR0F09?N8ZrfUB@~eJT8i4OGCtbALUtF!{ zj~u;eD98zMt`$#msqAFHYIxp(gx?e!=1;yQ-npS_S#9iDYb=gKfh*xL8U}HQu~*Gh zMbAbN4%CDI*$LpgR50o}XtCpHx4YN*f=Il9CYwy>&PvzV`mR3mM@i3<;fcOlkqf#(nmg1>c zkWPS6SZ(+xHYptF2!7&B#yO6qBm$#m+@OY^IJXg6&laRPe!({Q*&WURoL*TTqn8etdw8)zm?1fJ zSClyQQ`Ev5sC9XHZY~t@kq$i2$M&|2?>2mSLjJ#>qVPG)R>l$m{`)5TT90g5)`O0* z*Os$OR{qQ7rIUW?N-Ia05sN+|>wS}%SKACiv7k;4WnA@-CK>Pv?lIm+z4O=?`3fhs6&9g-qL3=_^7$Po93-7At3}*Bl@2ft zQc>eL9}`JbYc(9(^sQpZQKjjvs|5ze{HmTz{t)tbRYnRe{ntA$jBizfS-yTShhVZ} zNF|VECmn!K5clx=b^=(u4T*1&v+N_rGXgX`5xz!*-WD?mJgj?OkX5A*eGx4=MS5#=TL!s7`!iq~FL$N{H)cX06^i6!jJyn@SkbobN)QKDlQA ze|$Ao<2l;|-!2Oo#lIPI_-sK@%obZyGL8q*ih)@Cwbdy+5c2h~Tdh4Idl?xkI~3r} zk^{vw$T$JD%pGgC)e9w)K0|&THWM;kmxiY~`Z)A5{j=(vBzuB&G9&9SNcee`P@u0W z3sgnbZR^AyL}#H7m#OL;UotyjKPjwczcp+S?qM4aq-kU%^X>)j(XwuixH3EYrW^Sg z9W+uYw}M{o-hFoEjFOy#N9!_ zfK#z(2m1LV#ijP9!k5{o+gaUpHG;Q`V5XM0bYl0+m>+MX|3roo*C7faD4 zVpd~eA|3~Xk_w9x>x1FynI+E=MA2K?PVhW)@g@wJ8J*>UPlepQHlXO%)Hr-s>L{jv z9x3ol4gyxP$Z30A=mc_J==46c4jJenVTm4%OM5;%hHRrXZtnHOy<;Ff^Pt6a-Y8gb z9ygJ?_>LuENlKT!(Qs)=X`fyyp(0Mog)}f}542yhAO(NyrsS(~&K{QS;kN!R3e)fE zjr6m$zrXR|)U}wGAJW7v?K2cPCqPo1XK%UK*qVi?ew~OcWZ=+ysZ6hb*QRsmZQ_Df zta0mrMqq=_kzzL_=0nq&%HPB+JW;BbYTcsk?i5k8QTl3Ny}G%$z#*%k(n}UHl&~dHrex4| z-+TeWyUQP1fwGO6R#-TnOlS>A6f5`TDczIEwt<1A8Bld6pRa9OC*x2~37CukYDgwc z4{%2+BJy~(b%iplV9j*f^`TPfDbPf4*PR=JP$BRx$jxhZ9a2!Usyw9k zPI6F)hR*6;G$u%Z1g2qYREv+kPg30*lSS`qn4 zU1XcP^IVoeBNju7iJ4%jYym z8SsXmsq>F3L+)Bafg>P~s@Dm-Kk&l`+Vwq2$%AOO6(%=|Nu=!&u|wF4CzRVw35 z{Ib%Q_mpS~x9l^wzQ`r79nm)*{?VuFP|mxLJ$8|y|Nc&DPGXY+v$nHyVI=i=)Rc8>JTl3rx_;qHv>2ai;e zcfu!g@6|0h=G?z;RKxGUXQgsVuA)HQM8I_~j+RjvC!P_8Jz6I6K^vAFc@s2|Q?UQq z&JF+pJA{-o4O0c6GKR_W?yalfas6FGU{urlFbyxW%!Q7=mhb5x@coEIjZa(F$Nz&6 z^3+(I2-aFN#oB&tF_%V6o$%az143h9etu>zmYQciM~+X0d=Uw7e|fd1-gF`V~O~849K!7KHxiGaH!Q4Y?SL49=|(vt201 zdrr=jV#D-+hcO6u!Xz?^^U)=Mr2v0kS=}K1%%#=p8YM*H@6uXCP_R|g3ziM;k!UMq z))3;{Oc%(%Ep9hKjWB_M#|E1pV#BJ5jc`t}svAw^@H^CI@+uCwBn0*qiyhp;+)C)_ ziD>ED04Q4@j5XT~=d93CT<|lkm1Qi)4;xzTVMmnADEO-nFfUK`o~_(#NMF?b5CL z=qXKnL;BC3TeLsj`+O;@?V+l6JY60BZ!38`X^I`s`!?WNR;1XQ2KrN5ltPwZHqL6! zUe*-DS!B#vsTy!DLuA{$JNt*y`z1;E&>ukyIroT9h#Ms1NA2i++&g5gHuA)mPnTRD z4Zh-sT$s{a%E(4`jdIM5`VnXvFkw0>q)C={U|X!<86Ou8?d*(KE*JzFZ=D;Gf3Q{BfY3Z?mX3f)lHXVkO`3O|-NS<<0RiUIIDpc7;kJMlrtJ=i8 z>!%T+Hp(uZ+mg$9PNX>;CYy2vuOjCu4dD4;esf)patVIz*d3`QY%?_9 zEXbI2ee~652u{^Ev#D7h5)x8TpQEVpiD;{+tq+u(w6HZ4tbx#`aJv_`Ue3xorlLCC z9P6>XZk@HNriI3~gc2EBUEuwvFvQTdGnxO*c;$Zk_W#MZ`JeD@Zl`Z#NacwQvBzN@at$ef# zlJo1eW+mVHd&5RI@Yk7kk9oQ`7Yw`C)0d5}xMY{)b~PY7n5(>Uut`M3amaPMIN#kY`!ii96q`^1>^(pntvh=H^iWR3ZFaF$Bv zno%c|s6+N#x`B_tk$79OIOuSPoiI<@5QWXvW2T$@MBP3kAu?JTbJi-iH#vi?faeV0 zpZ`1zHT)Zmt>3PD^skEz&VQBx{#zygABW+7^H1%o9_tM~!&q>BDVwK&1loM8w2Rs>%N# zk|8Qh<2JRAVWm!sURjV56-JUNfiOU%Wx=zdA#+4=2oxH?SB#MkwI>U6^~yzeAQlZX z%Fujr+CA@H9dpm?2G;9XefSJ#`T$;L=}g8%Ye0)sWEfx7p%1y(U$Fc%lcuQMv3n{? zAP;lY$bIIm$n|OkH6JE;kW$P|42#az*scTpz;@ zSc$kt@`zf`i<{=khydyMfI(`iDS8x+S4XQ4a{+6Gm$46JN>5FS=?~Wv6O4-9tCEn7 z=~EO5Jx$he8LWvGC~yf9uG~NoJHW90*2f4A%XFVjiK=I%m8jqCFZ)TIJhn|4mf;bJ zF#>TpJqEu{h8>VLb1Q9xqtZ z1)MyZ3q*A-z+KZT+Z0FI&>{j#(cp4=@NMk$6#4v-59XKaXPMD|rsrVK_-Ekk2;!ao zGKMs;prYX$B1E-d@94EKu&@Ek0{)z}RaI_7n<*vCtRLnmts0 zijH0wios?-r;sV3bB|67Mp&Wu&+`97A9k0EkOUJe3rG3LOq{2tawY$mZeqCMCNHrV-=gl*yf_P znY{L$ZmYRa9Yhs~4~TB{ELONu%TQZ2`C>^8iaWO4n6d3ob}a9AfSv|cF(gjF2Sh8q zi=S_w-YOsOpFLi}gNnUd2j;$*0!bwtoC1*j`Lc6=yEq9qSBR?>#7{&4HkbhXX>!3k zHbo@ulIX#W=o`D88)88x(rD6U=MItDe;SC55Gx>K9YkqS<|;ENf+y6+6w88CkLQq* zb0FxO2=mL-l0(5L!-B|!H=E^L z7ZwteJj9c$eCJ+Hhqbe%f^YK8%+<_}5yKY|GQ1y+9wM6RUmN~O2l2@Aj zNXzw}aqPf>TiDWUUkToEzW#$kBQBoFCnJ zties!c(}WCb?R#OTycCA+mrq2zPbwWCz_rj3py!eoOJ4VUw>ISMi=I~chxbn?Z8I& z_HOaHV~o*WVPoSJiFzLOsQ11fpQ8jX82|bhv+97SlI|H=Ye_uH3+sG~UjJdj|1@;8dqfKN#*a*ccvTMMd%MM`*7D*POb3T?O3X3ZlA11l{S&BfP^BUG7yPcQVV`=b6a<9G^!n-DyBpq2|8wRJPS21tP1n zjg01QSuN~MUU=B6pps2?ju9ep?NJ%SL}pmK>DDju0VgO6=pU_qrWdX+~?hp71^?I?#`F!h$hqQ zz*G*4?uEVtyX+jG9MnyCE4CA<>Uu3lCogl294O-D#WS{i$o~wNtOslLdF2EZqxmUY zuXj0538$&a*FYa>`5D`VUYS@U_@H-0VhTmZ4LCV+qt_~R{f%(T<5 zJi+&_xx>?K(MLvAiMBqBS{>Zi^PgeC_uCo0dktP;W zyuFZefRmVfo|Lm`zEa+es7axmUi$^|GUit7ab zv)7#z!%C?Y=-s0&KIz`>cl&FZK}8&vD2}V-4apz`3q5{=U&rbOC2#OLEfUc@6-T7u z8S^JFyw4|jsCv)8WY5y})7Xw9#O4R8+a+|s#@FlVkE~(PE^W_2vTCFF0fc&ic|3_n zcsn-Q3E98_o%q7nBG*N9&1lfVcB?NVR^OQ(gm&vG+(_5KM8h=#Eo~@z;9Pi&%n9fs z+M>2yO>S|iI)t#(I7_5K60luxw@RrIcQ8lC!RbS+m}@CvAVT14@O2a@7>;_hu%+IP z5Obig8Tz$xP+s9sa{+8lQ-k&?N#0GrK6Xsqp=Q)H20`G0S;(ZUAweN@PSCv5V<&Xg zmrKV{6TF7?z^TJx)zZb2hBfIBkcm2y1%bi;!`3+mXA*VmKDIrvZQD*Jb~3Tv*fuA& zZQHhOys>9uPMn)_ZrwWdow{A!e|GJvuIk!*t@S+5PvRNAf+STNrO{#bvRA(}nfm4A z_j&g0Kk!c!Cv;J|O+_1C{nGBV2Noj5$jfg~Xi_KlCVMA@L1$4uUNrQ;H`C9^W$PSr zEcnxdn;U-qWk?y|J}=ZY2?+W;_Pja%doU&QH32?g_H{qu;KGlSdt@;OHMZSk$ba!P zs}T00LljDA0D(O;;NcKG=$_+w5ujSN^4Stp-(iSM`x-+;(E2ZDGkH#U!x)3OnqL~F zr1WJKRGtC*^UtCM#$P1)c`648lMF}b?zrY1P!GsIXxlMw>CrZ5_hb(_ZT$#O%$aBy z716Or2Cwg2QDg|X3LLCahZOIb`;*a&^lK2(u-Nz7fa25SU$o!SLP5PL?dPX$U@%@5 zl2|QU#H?SmYA#RSNRW}*Vq~G0A>2AxRRh;6LZ)xi1P{6+{S{3&GbnbY34qH_u>6SZ z`G#cP=yjJktMoyWUKH!?GgTRN(=mOul5PSq+Zc8hD@QGj2-&)dPlBX}62vNR|EFAlSf%m+DYDfel$ z17F~eRNh!$vTU{qQJY^{iWS|Q4T;9Kv-AfL`*2LaXh(?nEk|d4wS@YL1?}o%(t-Ne z4oWvh%5~e~+a!KYgHmVugIdH;*ggq^d0r@q*$LuVC#%8D_)`dt3h^X(8FFN{7+=$w z)SBn?`w{J7k`s1|h>+82&68y@|xfb^f@q+Y4l!yG7E@Yt4hsU9q zA`r@-2UpCWS%1U!Z{LfYF)IB_yNuTmK@?kb)~Z<5aSP#Q8QrrE^0fj1nPH!+i6pWJ zjRO%)6~8*(G*XCRD08f!^(QBNd=?%N8p zbVZ4Ise15figcMMosJPnECjnminvKU`3}kwKMS+A!jX&`>M3OLNxC8@kCVM@ENlfm z72j-X|7;FWMmgxu!Ln&NPZ#qf<-{r?GAhIFZrUjje_HxdA9@)$9(TtT_(HK3DYp@AC|%qstw&5O3$6-~aB+ zAK;e4^B;F!-oEK*332XmL^H26fAi?xba*7tbOmf^m-kyn#w#%4vV?r29V9>(Yy?-I z^3w6}Eo^I!)Fvjhr8I_Jq4~j1Q9oy6>Kr&%XTo+JG_p}Ds7H-eZ#)y>UF4cl<7N0$p}sRku1EoL!U*%J748UZF=E>+KFi1l75s9wUp|Sq=LG zh>@r5M~dD?;Lk+)A283YsyZ#0{*)I@_v_|9mz!zcy7=uPq9+Rqq*OaHDO+qOKc<3)pO!IMXkJA)?aOz4aEiIMi= z<#MhglZ+jK7Cn=`yDzfByFXdB{Hw#`&sf$23#l~Rj0=|q8{-etZw>o4gCc4~w@@#I z8gwBYIqnw^;f~6nb&2r1-d}Vo1)?u$jrmoB)SJDg8g(frvarhd_qtxjGHyEcRg>#g zsvOe()L|n!Gu@D-q7{~Cz%k(!_K!%kq3(Y%?Y*PdTsa4%k2F!nUY;6yS$LfK05~Xf zg@UgNlqdpZ-xKZH_J|RiAV5egR~6oU^#;Q9y$m7lQ~P#{sO9RsXX?CetU%1Tbn4cf zd4$A-fCub~xYfq5v||q?pI(}{aZhdb5RBB)xDB4%zhW!*=o1@lk3QmFuQt54wT*-I z6DBp3+T~4JrsoGT;A}AN}9{hn?Af z$&Y6E0M3W)DZ3wZ^;drr)8`)+2+Im1nv~ztS4Ug*QZH&5)1tof7DKHfO<@+t6i)h1 zd!Pd#s3)boE8RGjQl-I=uX{mi>*|`8SQ&@O6CwHd@fuL)u17+e#7LB@D-T9;-Qp$I6Z)woZQ$q$7`kVh&k zsepovjv#c70aZ%0AU;L5d|M=xz)k3QoGyx;xyf}Z9Og`4g6KX3rm<@Orx@Q$1HFg2 z(Oti=g?y5mH|5KCf)xWoh4?|j8FDQsv1Lxr#VJyYh-Fmin4Q46UeR6aue zdO^NxgJ@{zG^!7HH$P8DL3{#8Vi!BDn)J=ltH_mdjwlRn-eU=J&9dsEE6Mu8#(ZuGP4KYUHlt>rtd5AKs);XvN@|z7s zATzye?-+43DhM|g8;Bo%8Q`jEu7cj-43KAgX=871YvaGUNhzFG^tt)^`nrkB%s(Rn zBGXascj}p#37;Ub1V9SOz+=tvIsFDp9UT|
#n@&Bqnpb?u|Y495O_1vK&M{p@FHF zBa7XOTO*?+y!-K76LiTovMgBsDF_5LT#@JT13O&T9R7uuS+_#LJO@oi%MM?=vir*T z-l6PoZ}(2g$#uzXEyYFKpNf2+R=FbcH$RTZTBjKgE#wW|hs>Iy+*&IZ7syQh} z58As)KzXYQ`C_GLaHX{+l24?i{aY)*b#Br$2##d`DtS(e+SH>D+bZS!z#Vx@f1qTH z^K;g2iCshPxSRSuWQ4y!gb*06{tp3T&yn{S3FrB$pp}*zMj12+Qo^kdI^ETa;K5RI z`?lbdV74%nST^|%`s2{S!Hs=Ktce4}gWmJ7Bc{R+Or#oyjd`*b9>`_wnHk21gFVtv z5IDtX6sr+}>vY%%dgbOnR1i2mo&c+cK`y1OB)nIu3`H9f?*Oy&{h9DjWQrvR;thHJ zb@kN^LzIns$faFywMUj9N>e5Ppequ-}ypU}XU(nqzu#F838O8aI;Gl0C z_Y+g&;gVRt`!h9!+U=dT)tblKOOImq2u1$#?pu-#vmgb+M=jUWhfEU}KXyXdD6$~? zkTud&rQB1vS@q;J%u1iG8}Sl6*<}+HE$QGFFw~}W!25BkS;ot}(n}yyXj!I>oN@;+73Z&oM^%Y07c(To zDEr5x_5`~gb;uQP<7lRvbC4{aA6F4Q;W-v1W*2k5_v5x-a7g@$q1B86b>oyPcWO`x zQeb*R5*ZlA195l!GhI1N%?D=)CcF(OXv#$kL}uZ>rqm!c3X(%9SOr!EVI0&p)R?gs z`)YD^dOBWpNlbuFi66Id`Ft1N!MNRGnEDk+Q|H=?zAX;B)FLAU4E&3rQv}Ob=p|)a zIZlc#Uw<3ik6^6rpC;nL2#aA&-$N56LVr_WN=u#tacy((Op-0f6}Kqj#lDT3`dA|B zC-dQFMvHoMe6!+99QlAkv}BGQ&*+LOv@$i=^~V^bby?Siq887)7TI0vID52-444APdNg2u@eU9s`Hn5{R>YN10}mNeVZcChCSiXquRdL{>(LNpTAqRKT; z4jWyU7J>rWPu_(nI2-4^#vNKR$LPu`OPpeS>Dzj&W51dQK zQKohY+C^U~W&ww*C3!r@TU)#*JYj{xpjypS_JA87L7wfv7vbls|}_`FPaos1{rEG&(Lo4PBt1C3+_9=9Yr`Qkpx5` zF%T#v=WFr&7ee{2y#I|39(--Cl-%vEgTj_UR!o&4K3m zr;xfM48mq$&HC>cB2gaQk`oiYt57#HSP&_rg0q z2DOW&Qz|Oi#!{BZD_-`a)Ki+yy}?gYqWQIu*fEu|+v%;0x~@t4vU&zHJ=s%F7s8}6l@T~D5a3H1H3%2a3?7ZZ#^#x5y_JVw|0pHV)IP_dh z8*Wm+72?AO#y#2y{FEQA)hKZQ*o8AI_aRacd?=k%Bqc!!C?FL|69e0oM{P5^YG9C3 zsnM*F;CG8f8)sXJR8pfasE$m(eu250yAz+l=p2C_y|_kF)z;8Zo&J3@SRoU7T>mDwX}>rsD2|6Xn!*wK~HV1m2QkPJWZrYx>L(G3S;ZXt80?U;Mb5qlQC| z8?E8%W{ND_CAlnDALQFwHzPGW6%I2HQG$kn=otIF`K9b(46ms8IfxTz;ireGzfJyp z7-x>hC+m~oR3ak(01F@D)hVxEW`~?7bwDbx601xupA5EpUyXb!@Loc9-Oo&}+P@*{* zFn3SzPoB>opB>QpR&O^QXa6epAs_JOA|KwRV$?bYHj;Kx2cyvWcJ6eFu(aU_^s`8~t$AD4oFmGh9!Fuy zIq_SBd`Tdy5-5zWfRz@ZdtAe8fDuMrw!bM>kIj0ZB>oPVPjkJ9v4N+$aX_GM*J^;L zvT0*mM~0|nJRn01Q7#}zrgL45i<2k`?aTJ!^C7Lq(4B96qdR5k=Xeh%N7PB(Eba2^ z@aSe`6qxtX-sI{DlgZNrMU}|~7S*Y{Jc#(qe6WwX_fdF3Yo94$r z#QDjB2T>tEsIF~=+gs<5qD=7u$P;sT5O#tnIJ8jQ6VQl2*$@$ghC0ts44-hiVkRIq zJyADjUT5_Jhg#`O(2*>!wx5{}fPoZ-bR3d=y-z7MqJy`*U2!mt#8@v_efLkP#4{4U zvU1G|DBrxr+3vVecP<17*ReY+}3pd1x{xRo>sW8FWXZW45~)_TLRcrn{c~a+-QFK}Jjq14$9VM+4b+3P zkQ;@`n_M=OZkIab$9Zr06$mNNkfnLDG$!rHX81P-nVApW=3PFakrMz>SZqCzWHHUs zjw}rtk~_wS!<(}Ba&bN1G$Ab-WI%DLCD zC?e;gKM;Y1WqI;WCM3a+7}+(XRP> z!cRs+AAzaaKa%Uny(b2fVmlc@=eaYVvy0fqVKmmHp>?jx%kAw6PlViw-+tRr-&&n`ZtF zQS-RHTlP8WFeBfhDdp89eo8y7fZ9^g>WNoh)FhgF2E|%)c0@1RqvM*lGnuu+#Trht zT~|%WH7AR~THEmPPWlBq*EzjSvxGu9@A5yLSayb-FuM&%O*mCb@}*ML&am6hoh+3n zOpVKPvDVp{*X0tChJ&~ym_gAL(k>?(q^9WIM$f9ZRjc9D+k-!l&a!2d7GQuZJ14vvlU$aq?v9aJMR%k*O#zo0Z@l-8vuveZ!b7U&7a)fs+mQv@v@;G z)(Y`zc-J)u4ki}X7NQlsU8jfAaB%?7p9aU#Hye*rN_ML{q5*2q7OOj=5mQYiAEE9j z8*Uh_6owq4b(SkjlBm6EX)U7en@Us8`j~<<(9UZgHL1zPE6b^20(e3*Wt#vpbdV93S zDL}wJuBHHn46~vc9OQDUd}R-P_If`^vpjc;XAKHC(Vhq8&`EPzio$tc_egs2`>K|N zXAYgkpS$yw~4xqL@l{g;#EP?k|z=BKLk1k=w+8Fgt!23pw}x(KF;>7OxkGt(0j^sDnn z2SMe5H-~a}hf%6Y_xCE19QmEZXqkE-|G6SGFtqOITPkh$Ed=@hP5b`uiry7-fwL7CjkITP3f>W3g~B!qnT> zPB@m`C`4^hU2ZuB0@QR1IGJ-rT-DHhsYymQnG;3pLPa2|;M**q>hc#j97_8Q8LQch zfAK-=2UNVPWd#Voi!VG6=^U@$Ia<=5|9BB{U!vg8Z*g%CheJ$au3A&MiT@Nf#ayt! zYW-YJO&{un%^*{xUge9k@O9PWx{G(J)Yzut&$W5@jEc?OhomrpX8@A}y*elD7*(+NNuF0hr z2RRM-55D=-%LPkU%(oS-xs&6Ou4}jYNmUi##4a@!(hLorTd|fZJ&7;dD)yI0s^Qy; z#*KLE->Pbb%Uze5xD(R@rW~+Xi|kEinVkLEwE%kBv*C+FNBFq!Eovm+BWooo2|}if z@+@s-`|40rb6=2zanM8_0X^ZF4Uun_>ejh!SGGhl*5bG)E^*#MTU;vm8E{h!#E0K& zVJY9546ml`OK5cm3(lLDG=LHY!)tW}ot%@zi`1*y>ivmMxG!4OP{<|p4TZqc_c2!k zIEm5?80>_V86(~#Rv;2u)RzCUy#c571s@)@KdRHg8H9T~iHJ|bbnY%zx5B{YCADr% z==+<4pAlXr5~OyM0S@~CC4)Z(&7|{_u1pi!sORbE&TurX*HVJ-Sny>ZKv44%EQ|+L zR$znS?g###KL#-gW3n+K2nZD&2*|hC`+rPQ|0D4}*3oqawqt%vy!$i$w!8k`=zZ=Xlytim5FcFU8y|v6F}$92^*TJNn<= z-Yyy^)o`6F*2Ak<@S^T`Vj$n*C)+)+j>Bf661Q!Z3&+8qd~gVRJX`cfn^|`Z8|7Nga!l+q4#>oQ_YgGY`$euLo2$kMFW147&FS zWh)QjcoPtA5FpM31bTSs?bFC=R678Ow07&G!Nc8-kz|&k6|^$ZwvW@2t^-8s<;*NkCp7bjOwCtf~SL5rc7 zEm&4*?K$NOR;iHE?5L4t8SG%s5S^+0rm6Li{#g0?PadCAm_eX(9mE>G5N%-m>rf7 zxc5HGJD-{zG_b7Isxt^PUalW+ycyf5!vTU!I!98>%^YK`SUYJ70c!HeOEL>yG@jeoc9N-WebpU*q~d`Ds+@KFOo zfFo}Nmig7yUFzL915#gu^y zbJC&SFjROD!oLwjNHIe?a02V+Fy{;v_okghW7CHi-3Gu8R4HY(78^Z6RL^5@C-R&O zA>(~xPD6;{H7G7C8-lnU;f4tzLAkg)H;CCDTHWi}JQDXb--9s%+c7-BrlVLPrOuR} zrHAqFKtm0F8D8naS@sGCRzCx$P!VG zk6QpzD}P%4G-!fDGgl5dh$2a|l9S zz84ib{eo;BaE&sAomw-d$?46xg=sQVPH3)Vqgfdi=~;)T@y38C{hFv)iB2z8zY zoL4Hp12Gff5zh*VOmqzfq(V)=q#xH|f%9PuXMpy6{~d{QAYB0frk%s`bMUw}2BD@V zhZqMo+S@_CZB7I_pJ*8`O;p`XVY#c^*K?wLB(1%MaCt&tG`xwaA%7q|{3OUC*YiG0 zRkzqe&`4;`>t^}Vd^(cUyBv9xrDajY8n(vOMNORT)m36!e{F9HwbfeUOzE($xj?c^ zAvmMVQ$j zRPB&%wlk@28hdqnoLGy*RpO%&Opw1UHiWr#U!#HApHV~a0R}8rTTLLa8rQ?Nfi&a~ zE=HNs*R9{P7yc8Pq9dk(LTp2d%1Yf$K_awF#7_sQ$jQ5Pv$ar3QKH$RmRTof^@JAL~NTAL(;Jc6L_3a{7BC$%z5jTc-NMLp&r|(*o0sKZ+;J}+9 zN(mbfO>jZgcYNA5n8iH_G(~uga!fdt@x;+J&0yt45C=9okLaX{`h9OLIsDHhaG8EX z&YBcf*d zJi8Yc^bze=Wn-`!;+PxV@!)H}3qDgh>jvHxt(>ng2E{y^BY)1=ATLp4;Qi?uwM6I3 zejld?hk_+2vm=$>NO0i5ii$c;jQtrZFN{?j)5A&f?z(py;D#fwOPa$<8=C-g2LWRv zD}+pZvxc|L-&gRlw@*cDXLWaz{F_N@cV9~Q#b)VW`v8&eI6MY;AqrGU*7@z3y}Vt( zM~O|RV6qcIWnLn{_~K^=WfRAFFp|rXh69AqVvpUq#$x7!!2#TkQkvuJJ1QjxmTQuC&`|n+I)N#Tbb{a!qHZAYuTJVMo~UghRP}d$(akLVDk< zI@At|xVkal-Q}#7z!>8_GTzNjVW43z-(SvB90MU1|12WDA!p}EzL^N`N~&hrGRbK0 zxq*{YH+3ND6e|4ZOWl=@oD@iTunnFe75;2g;4bVvtNC#n4RHk$!Vr>r2t$dqJEEJB zHUcGsEZDuY1q5tPi#I2wx+7w9Qsdq2tZOu0zPmLN{V%^TDY`V?=1i#0iD=iM96q`{ zc}6SmYBBsL9Slx6gfZz73^hn=?i=@ccX>TeCrVCH0#jv4tPqA^IV;u-KRMA3Hj44( z@&m;}4K#h+l6scHVE5Q~#3b?+IMIK?TchMFNt{(BzvzPPDx2Hc#m2LbD?37G_WJ>$ zwj2j_&|3#JYnfuyYcu4ap`wv93I!mVyqJO`ML(7EREg@Ta+NN8QJ=vTsbNGo1~U%O zV&&2lpMUokv#}tjGfNWVXB>elz`9UZH7$#8Ny6Laj+>*biDrzgj%>#mL z+hE+dg$?CUwtHq2e>xt=y{gI+}Gj%RD~7Z-XxIEtvgy7=YthLMhl8|)g41fh!G zc%Acgatkq`45(B%>-I#F3SI1Hxcq{&0!lM8gjh3;4}yFZ!2FkbLex|kX+tag+Hl>^ z?b9&&7+k6x24`)jeLz!v1h}Q2hqV2|sg+>5AlhV!zuu8Uo`j-~{rN{Y)Ng|pT?)GJ zlxEA>f^)RTPYGQ)%CWRsGdc~`|8~X(1y=DfE89e%7A#(hzF!lzwI5m|(B*IXqi^7g( zG%0Uov0rl04krs7w+|_!xBL4>H$7Z91O%5)$5&$TRP*m8p+UiR-ScY=N@l{!x@O&W zTFMHNgwp>GnGrkYpH3+NwEDu_Y+u$Uv)ZSX%P;^8p|RG4jb^KN=zye4pp`RZaQA~R z_B`*X`fRpz_D^uvAp6}}_RH-(2$l1z4|nY#_pUR9wl-UcM@*e6^FO1g<65>AG6K`B zZn&2#^Y48LS5M2V?v^{Qn6k9C`ay2L0$T^R-^s9!u$R|!0+AN@A4)7rD>OR1k=>8* z5qEdd@_aoSAiCR{2;CkWfRy<^%S(HTNH5G~tRMJVJmiKmBa6Q-3+l_K{rz8RhC6MS z2fd_-XZX(dV+k*#Sb?rAmm&1T82) zWfy^*84*1eb5qVcnjP)!hEb#why7_hcuWK*4h>P@v!e|NjRe@xSY|=m;XXMfwyo!` zZ$q4f#|&XYUabPZpFMM$MC#l}==-G8(M>&fv@wjk1e-;!kvC4cWsLoYum*b2 z)ElpOQ*Dw$B?dP%OuM*`5?|=iKEtlrZVPQrt!LpTw6Q+$^B2{|xqZ#qqHL|~J1r;^ zpJkpMAp0bPoGF{Aci)2i4a2Jq1M^r1hO|yd{b-$L53*kX9vQGDpw9Rl1ca${oTn-WSAk`E&#+CAW#mZ7&-jL_{r@O{XzI0|k2l`W!hTH{ksKkS0-sc=F|Th$ps`2vudY& zXBPtLQfXke@rCBpJZ@ol>`Jz+!~GeTc7$8BN<=KQJZFlVzi(=9b|NpR5*??m`J zH|UGNp*}+?CN7ry=15uv!cV$}Mw}F1^Fbgr3r=_qvS0xk`TQkskyx-dZVi(I_7*sT zD%aFK^F%61bG#83q2Z-4Vb`RM>jJb{hR(|CC6^8ZhfLxQx|2yXE1bDv%RoODHR%!Q z+dY4%^YSgRJhrTfii*;J2b~i~s{6ZZiq(D7z2XHvg(n=rwM-_GI*AXPP?OrGvpG66 z?T83;wtx@*2z`Vm8{MgovGIIeo);qg!zK0lk2E-GJex7qLbFHT*VR5NFNckMrBks=RY&4^^Y=HnxaCJ@13mp|M+HsKwu*37d>1w=dvSF z8H8sEnBvBlIfn8)3bbM#-lmfo$3Q%!)8_K%=(;oE{+sBF)VAa3UVFJEOkOPpw|p<0 zcH-rV9*LIF%Ie*8$^N%eoN;W1o4BK?(dJm>#78V9$JFn4WvwI?hba>0@&eWj^@ggb zYk_%uob0b8fI)AvuOfm`srjE0?KnuZ&@gwi`(k_NhdH+>!7|4H8B}u|;NCWzQ2)*G z^+s+}@(|SI*AE6LBcf?#@zw*xGLb+!c9lTJk|%UxBZhKq{1KW`a`+7SF zlmE)Q%WmgCAWE4DE|`=ER$!EBZvtpr^txuYn64C8dX2nDAQE%%sXzHcV*?7|b8bu5 znORFyJFJP+N#Q>kqr~7WI<&4OTE{MuF0>K*vaJI9pe*sp6F>AFP{H%|w2GxwT(3m+ zfp+LE43?q)r0%xt@D?_T&>%_R*)rpg5j0YwY!x9R-$KHeqYF=%YnE|GU57WE_tEt- z62QC=ADF@)`zZ}L@p4rbuHTFI)Iarry3?uZ(S7i*6nL$)t&MMGP_MI3C!;D9>9jI$ z2MpQJbH2^YRBAinYqW|Ia1i?uVqNajFOwx5Pg9qV9HvyxmjkzckRUTJFizKxX_Xc< zhS0*P!06R+s2umvSZ#A5+Rq)HuOHBq$!3fMMFq_o?#@1(MGZ*RSwe^cIB&=N0%1G)@Y7Ct)QixG3XM$m7H6R_KBlS2gu z?~@X4!@quWtgP{S6wr5oP1A z390ro(D=P##KH)g7A_N^N$^lo>h`ysmyeRBqgMOcvAqHrS*_?X(EP~O)|!K`~ zJt6v^H8ne%nnCi|9~&N)2nPr?bStGOCYMT*h3UbD(PBWQFf<2RX0pT8-?KIm37%@@ zyuvt^hW@{RZWGI}5*(7o*)EasuL44;F^AoTDDEE1Jakv+VPQY1Y&6Gha)k)z4+1Jo z*m0c2ZcotW{6kEnyS~ciToPIV(pa79yNu)sC|edNTXaqUdZ7ejCN0R|Pb15%d<@|< z?WNU&To-;00J6BZ-J(rl1f{|fpc&tTaR$h+%MBF=5e6lLb&+6lkZ+IttAPFWb_O7_Je&^j>$!|mp= zvT6;TO^ekC-^v0s{x!8Bg&RhVWln`>zT2ldoztq_mp6O+-88)ur%$a0(oT5$SIeI( zSXR;>HE8Fqc73jXZ})khTDW46z-dsGLu5?f`&l(WtYDU>he>BEj}(<<>pl3BJx*7S zeci_y@cgGZV>AC@6h{-{uvyTv{7TYF(@lZ}eZ{H$e6z>0aiCXi&CE%dSGwyh)MFCk zj()A3XR2*yDN>fuou_BL087#7C!RlQ?lTL+s%LMCdsPu}q8uuuhv=^!k=8#^)xib7?Nb!lwyBe>h#SLdD(a%G*7ZFaeV8U{u}d4thMa-V z_){bMwe;l~B_M~VR9>E`d1l+rR#Ajnz-)l$ql6tyLWBxFa?`V?ulvJMg-7ak&FdG= zu{gci0CXGK^k$wm-jnVZ)PJ5xz7HWF-@}vtzW(Dx;^4{nUva0V;Qwpf!YHHXUP1x_ z!fyZqLi+vM|BK#YU}j`tV_{@wH2n??X8_ol+cSwNDrw6oNTulMI^%IQ`su&JCF_%_ zFUVhP&+y3PT1GT#)5+2@B4gM+wM0EOTO6NR3n82XUFw@ zc?EkvW?J=Y_ig4{8ZqW=BPwlMdN~^15$f&B_@(n|;nkI%**8!9xqDq(+0D#S%LI)$<+659Nk0zFPs)=gXit%8ECMG^FE=kIlCdzVV9dUxpCgeuvlCGkaW|i z9u2ejeAC5E#W1+9u8#_%oLik))ju`Uj=6Yiv}1l>8s5y?>G6GA%4+lf_*2so7ZuU4 zM5ReEIk-{X>aBU%Sn60|E89itWusEeIrF!4GS&8wKIYfrCf_@Yb4$bVCa3Nz>zqby zdCt!%^)6>fW#Pk1t1OJH>h<;Nfd`3=b@I6>i?rC7mpo`6=`Z`%+1jQ`JDX#twB_da zB-Y3>_ova7ma2>Jx2%R1HhGoc-d4-sA>!ucz?I0bl$5XEa}>H7hPS?5b&+k{XfOwcPc991I;~NWe`Z)K8gT zIks^7rb#{}27*-3-qgU!>OcHTPT_sax`c}I<{Aqm?9D^}pf~jv)MS48Jm|jc!D&)- z*o|yGAG3&Pu-8hPH6Tu?i3;)QPxm(Bpre)h-G_x;2m`hknawM9Q3%lDMl}9_L`-hB zx{y9Xu^z} zdeLKhS5W|V7n67ICQ70l+cq&&Z3^1U=~$+eAYsf4hMXHru2v_C`iuC}O8m|nNI*oL z4=AFoAb%Lq>?&+oWq^@Qr3kYl&|novA?Wlr_+wxuefS$bODrD+ zj|YXRl2W(DQ)OY(@HlZLv^BzhJMWF3#skRlQ#^9?^gcs8+r7bkLeo;}dQ0jV-9u8> zQ618@grjGD-crZUG{5sLQkp_?yM>9s)|dRXL6{;26ufOB>9;0%W`tZ2dzJ?qY5Uy z48>t|G+xt4RrpjwMul^tiW^p~6`C=4%=!{hlUb_8Ps-H<#a};s@AC0D%Zw``$O$^> zmTY_j?1ISF9-#kOGy+e_8+Lg%#_5Vn&;gB`^Y}J{7n=+8-O_TC-4kl0GWx8U;ahN7 zJfA5Jk)Co}yYV;1LdHx-_r1}FbUGO`;c0yeCu-@$u}}vv{*+l0MoPm<4cUSgNB!Pe zun&or{0WbRu>DAzz8&ve;$}I=#_Dh0KNf-r0P?UrI`mRP*V7IpS=AF@Mw<1Ih2Tc3 z%weWk8pL2{`B5=O@Vo^EP^9O!FVpbl;|tkcI@Bbjy;yf^(^IWlwC=JOp_mc2xl>%O zhfi)&;X0*#oA+(l+)3&n3Jwr~zld+&3F4^NOgGiuO$;8pXEbyLKXqpNZ4<0>TX{$KD!5C$!%3 zio(h4$8o?lK&%n=Y$`pS(HuQJeAkbJbvAq4f>YRk3I@_+h$FA|V*a4*MN$bc|ti;?6~PFvSHqI~~hFwjd<8%;5wXWhZtQ zgEQ->vHyfn+TU8X#(D%%X+sFCo3j9J55$9XoG#Gxr=UTd&JfJ~4Q3H$NDvNi1j8RC z4O(UrigC}}f}o(_Wp3@HbQ+#n##JbTNKG-sGF6G=iDIFvZf9pOf4v-71Vh<>9I-93tqk~*%qA^i5q_y!sU&-6LTjJ9HBUy=6E3c!V%MWJk9 z9z@Zz7yofgVMq=D3riv$XT>>BUC}fqXKA0zMZ>cB;MR(s^$`%~_uYT6IXu?yP$QcK zLg}YKqLM<*&ug2h=iP98Q`@T|XPYO{km+QqQv&?TjiILAB;50f6+bU12OFShU-f|q zzF-j}vNz)7*f=hfRe3!a==9sP2@b=>6cJ!yO~d8kx2-4Po#0DHb&9f8Jc$}a(sbG7 zu0+1;IlQd)$AnJ^Rd_fYi5(9S^mGztSXR?+3mk?c4cT57(y`1w>RH8izv(LEVik0b zC)y6kk=iECTyaI^eCYEcW-GCMy(GggU8lr8)Tn%K0Giq@R_-@AA?I-j^sq>yLiadd;s5l|>|#07!&qz>6rZW|&GwnGK?#XqXq9I>_vO+12$LJAuZl)5a$HISMArEIORk`(6|}AZALlPt7MVeWsjZ z#$t+)6)*GU-XZ=9=}jhJ6G#V!czZiNY=je`4L)3eQZT{evmO90H~d&^XAJRXIW|^I z2}7xtmw`DkTjdEYs9GrAk|Ir#`q-d9)lUM~2wqM*I4W;gOP5Fi;x4)m35t-ZhA!n& z>#cd^yf%|TX4f*sDgfg4pk~7#-)xfDUDSo}LZ1jc9C~^X<~x#pQ>({8E8LqvaN#x5 zUxU8;>_W{eCSyzaJ>yfjBp1mD{&@IPquGD+CMV%Ky(THUTcc~k?+wG@%4T#q%$0GF z+3Iybw-FU1U?fH`DwwX-qmXasHllZ#^2tv=qS3mFN7SC2f)$!e8jU4$oSahKmZX1e zZVD-J6*j;tJuN!$3rk9bl6aUmyPi6VJ;@^@5CjsP-3&v~GE$c1P4+_r)|XX^p+xA1xy|bPecl8_f)IKuP3o#4Lk)Ewd^4Bmg>WoJSKKnp)c(F^hxsHdZG2q9M=E%n+@UCj|EM#H6PZGgO*fy+DxUrA}0@5t(v9 z4|I#!&~5x!;!;C_5U&S~qhrQ(vv}$xGgrDuK_k52(%g!_xiRF*yonDM?l;02WeVQ` zt|4$*DHIP^N3fo3{YXz*Ofx_8(ilw~f7f6IwH=_C0CGb$nRCRCC+_QZz?_hnMDD~! zEA_nMaOZ5E%#M*6$>{1JLg0{}BAtIDs&dc8zsd2aBJ%!T8Lq`*r77*KnKP*zqgQAVQ$kmsJ$LaBkfqponitpr(JY>f(IpkYJG4^?viZIz5XaRql zNxJD8ALO*{^27X){pt4jPqu5Xd6h#E=i=9R=ZLnM{={>!(L1&QTS14ehlPrbJ;V6g zsqIX7_Oh$Y87hJVOBrcBbXHes(~>l$yB~j@aNxLpC9_VoST1>6V!|OJj-_ZAzI5)t zgLZEAjDB#r(pEK4@q;%Cu9ZWS)8+)|KO=WJjxRr*q!3m&OC@_k!MQWH0S^-KSe()P z0mb4_xm>B*GlDPK5ponmQf$|{u#1RvwoLd>$uHaqCIgM}{HRpF8^fHvT2+;bVwaeF zauWtG6EK-@=R_s01zu4ixF~W!A^vut;b(DRo5U^&I7`S?VUnUDkL^6B#9TOUGh;i1`**{&+>*Rd3^I$f-$C@*Ev*i`rI-x4S| zYpii52}bZzzoLWT`7lSuR=02Po=u`{p~S`X{OMZRnWdg<$~O=oDDj&0@pp@`G+dna ztiY!<0YMi;L^0vqf|~j_t@dgr_VA88*JD6L1j@PGu2E@sG59~}vEg-QeAu+Q;!l*s z&bP|~Nd?Ii5riV9kqI5L)X?DBL`%2sttt%@0}{;($CmY<|H`l$))9W_q``WvMMRu5 zcuH<)y6W(D3#s%OE!(pLQyVT7#s#4Axg14z;7cN&C#-;6J%23~_XxV?XL>j$5}Un= zy;ZEiw>EgjwxK$sHYK#)HHUtohoBmUfOTT$1;%GwgiNjBt|2S+YYFP>yI>MeEINb? zP^2@H(X$aCx^z%5;{B!f;tJE~Z5io&WfQp9NMHnSmdkS_DcmG#iR`Vg%*NB#rPw zeYYfxNVcjPJx2`g71TQBS+_xG@3SSoU$Y- z9;$G1L3N<>o#+%Ua8) z4b_Vp%x0)%i%gU*Zy*?*m(Ar!pwjfzo^;*Zzx>mMUm52MA3P#`4k4CjOCd3^?KiLw`(ZoyvW~9uh1UdD{5kmB~kzquTqvc z(<#GWyl~ao>44zm$fZQ#Gm9rLwSMaD3QyAttt`BJe)593-nS2mJ zzU+_P0kJIR-CLQ7PMXb2-bi`^vOo)Y5ok3ae4oLgKXc=|-^#0x-^lO_0I~s;SCN31$OMLYF{6ws-u-k;yQ92R$Gmf0{KO0tKJ% zyRwFKst1W~jU~lYJ>|v$6iMPR&UFtwCa-NDMO!)#OTGQ8;Q*c@l+c-7!<)7iLN=uD z0u>~VYH2>Xcm}VCt74Rvb?r*DZ>zkX3{zq^+xwgGwaX8HA(u)@V#=;CqY4<^XbZLp3`W@9t(_ z-{uiYql$5x_l5?z+0KCt3w5$p@Yt+jU;Hox{Ug1?J!JBRj z14I_G!^@XDDk8GXG>QolTx|T=hOpa4fLLjfd1cXCr53ewBL_gY&fK~1xQkWu?rqux z+G`YEx?=?)ZgT=)#2T7i)yGp?7nr*7&DI@j88w&(Dss6VN5+?D{LF+4P&rZw_feO| zsWSzp?_(Z<3MCh3o!qrsiUXP%0_Kl;z%GK&)WtSB#fqElm@zCN!f{)yB|kb|lYaD` zr`hF{!eM8grpBKiy=I(4k&kiOaXd_8Qk0C1@(JIZ!&8@v#-7Yut8mxCPlhU7lYD^aU2X`#0T9#qy`n$x6lD;tXw z#qH@VC;iP~iU?fAZHn7D6ZZTV%L^4s_wsFbdiQhdv`^7WV@sNnc!q7g#tH%Gvpu0< zcQ*rO4PoR_mmqtRP*$NR0Yu7iCAtHeB$Iqa7t>x8sAD`%nQzW?Gg( zmNtza8CdeY--Yxaxnr}vXaLhq7vh8>PP&pm_?1qTT}Mc zle>h0bG{;L(fqW@sXE7WgeBXfykP;kCNH(#{f)RshxR}5orz9M4 zH9JL68PRWjcMwxM$MNRQtAYvQL^Z~2Q|jIjnzs7;diRN6Ar}FLjvdV5?EUxhcn#ES z)1r=iGu46 z?;;I@gCbcixGy_?uD#aJ?-+k4qPpxv+mC8UfZMAqmjT)axDnH?C zJNU$aYS@UlZnvDfOLA<7Bh}qRxBANDTU+yvSHl!@b&kot!j6&VC?e7uh(Rvs@c9^5 zc5O9FQ)y4xtqh~E0L>+AS0(!yG)WXEv@sk{?(G$3JS{ z+;S`&aSiOMa>-+YOs0C%ar}=w3R+y`P@KnD+G|iqD z^Ux>{k{VD;gSD7HGCY<;qU*XB6sjrHUmw7Pl_h=@@9Q{aEFqAC@X#=CmUsK{BW#K# zHk6Y+3TdHG1m}aCHWqXBv#V4fZt(JS&LK@P1YGIg7O4(sZDikHC`PtwEhCL~w_aDf#ZM$+2f4VRf5Y~3Nw`s^CYKRN>^>8Y)*A1OWTjv719eNBQC?@}tQj8jwb6}k4%*~B`6^7}u?u)g9Kk@qJ2loD+j6XM9V9=dZ;U96A@bZf zV%%MhlyGLqqY_+AMo-RNE4b49%sYB@mTis%$hVl%KbOzDY-AV`Q&S}0v8z#ZI9}BJ zwvs(wD=dXb5oR?c>gZY9 z-yIx!%2z>Z@q1^nD3NYO+tDK_7;2 zBt@vH(P(T-5nW>w{gth$!PQt_6<5#ZE-puuyLDDoB4v2gb4TB56`GoxE>|;i>x+0t z8=q4!&7*>HnNdEkor4o6wptt-)B~>`6*1q8q%xQbq=e~Qc(mt6EYyN+L+Z+ckyC#B zeY%=o7jnp_$Vevt3xUHH1lp9{&2=cOuRP+RTjrKzaP=yLzR1?oy+ow9Fy|?N_N3}G zJ8A0JQ1utnJv*^As0B#WnOxIt(G3e*-DbqxaQUvov{#9z1lZi?9sGE5?ykP#wd zm>{*mT4Qa!@04ZTAeg^)onn-gk7gTeusQj?QN`5DJ4=FodY3G8j`1z=<3lnLEveK- z9n6K&OD2@;f%sOdc!{zr*7YOQhxzlg6RB~-1|w3!uV|@`(GF=sik{6h*4EW_+qk6nvxa z?xRT^eMz`<6sxD30WmsZCN`7*3TlYVTTs+y)r%8@zRefU^Wa;C4%wE$Hy;+C|Q9Qkei1FJQnj{3@(^WDj1-G#xJgqx7v7aQBkk@YQXK8!w? zWa)mu5H}+ezWlPbL_2s3uu7z?kEF3)>+Kecoo zq1EX-P}-Qa+fyAOjU@s zR)8ia$^a9QEv2L56IWZCwrt!yD6|FY`juDhbw~jLwf@r$7YrJTqeZlQ!h(zdl>$l@ zS*>N;B7Yu_&|Pc89t&h-`uxY;?-yr=m%LewQ2CTkwY|B3lOnYM%?Xg!9NN4QjO(TwUV5D-6EhF6C|<^zkti? zdi{-y1$`l~P`iDjLhxVg3Jpk3z9(tPikne7oKy_I@GNSJB(?#gi?`~26!x-KTQ z!v3wTjZ76dvJ*(fDmQY|jHQ9xS0EZ`X6GN@%db{7WPi0G+|la+gSOb>H6r<7qPdS~ zV(D`Z4*)q~T6y37pFJB;wZF-+A zJ0dyL(973vt(oOUsdW1tN_c+380q%&M$_spNri~0Su#I1V46p;`N+4^NLWs$rlwV1 zSzAn-`gKrtGp~Hnh*udcSv@U@g)=!zZ;6%cLbfM>V!Z1ur4vEsN1Y&woDQwG))r6? zk1F%kxDpiucY4;dw-qscH~A<5;tWlc%|FJ>76is13!XZ1DuY^tS8QuFqV4Ku?y z8Vn7^;zEN6RKw6AN%83uNt{G#94wVV}yQ-Xq4P`#MmLsE3WBe@)Jd`t^%@0vyehi% z_32ORuw)JnTXjs1w@B}p&~eJHxR*1pyPvd1s?5v($`#I1xp` zpFb&M1l>^Mn}0Meib4pGE56zn6hanDl5&_#7 z{QZi`?2P%twRUjsW$V<#PGEk*8eXCqN}=1O#ChokYmPW&5~&RJf`azIV`&=~Fb{lT z*;r2>FA5F+_kf5*elr@iu=WEAx`~TBiY+hO2-qc^4=YmB5f{*3%s=7JH4)MEwo>2X z)OE~0hKzZmL%0^R!?&TCWSdK9*2VL?G>8k?;K4e~D?YN}P)KO{<) z4?A-B>%NoD=a@T$Y>qB&tmv zagLzlUC&-$cdd;xn*u4q9vwDy@Y+W$7c;RTD5*7n$S+I(u{j>1u&6U42!#w;1Gzf? z>1G|UV65Juv~YViLc4y-Smfo-JIw&u^cSPUMj@`l zTaa_2+zB%;r-ZpE#1rukUNOs+qX~_Oz82y^<4aRf)$C9bKw}=RH};DUhhd5=aiS;9 zNzaFunPLxC7$!>o(3?3PG%n)PMdlW|W8#<@HJIXe&+2{vq2YSIbQ^ zzz&5a@}8!;GuTX=B%A9ass>#{lrol`D*5XqS{+JQ4kR>|V%XGOuLC2!bruHq^}J>t`O3Zy@S?*Z7O7$Mk3+eIKz(K{!zlvQ5c;4ZHGj07x=}u6ukxxy zOr2OY)Gt1XHkcAp@V%3T;8Q}GV_Z@tys05Q;w;M}!DJfAJ5~T(wdwKC{ zn8FT!G!fa!LLlwc1l8DLMs_111cE?yOC=t`!IefC$W$%L%qOmTp-fCv@agg{l+cQh zI!w7;R7-xM2)n?vdiT)0WK$R4V<< zjzv9P#*qscB+_H=^*408W=9K%lP1t%MpM1qk8E9oFf9EQg;3A-4E%@CLLwPMn8O-J zAz3+_(^{96{kx)#3(>sUl7oKPk-Tr44;bg+lR6zSYWeM2mv;=`mW z?qH6LuC&@*ByNsh1V2mAEi?*HG?NI@gmn0JY!71YYQ~1bo}l(c(F8hE0%;n_!v;*vy5wHzysUuGxGp2VfkF@j1vdxI-G2?T{en0;35mVTRC-qET64+-{_Q**yZGHlH%NyTSYQzP5Oc8 ztBzVyJEHxH+bj!dXXpba5~hde*L4pEK?9HnLb}B|7CslvYk%EhFrkX19MpraE|l5~ z6ZcsVkLIQ1+DpzGxDanIeL0Ul@v%hT9X@@nq^AsT2?=;{IR3V!u#TXx$MV}_Xr0~Q zn%a4?6a4J)#ST(mV<4DT}uY?M14- zY-`R#F0Y##HQH-K9yiF%Ui6K86y{b!3pF#L6EpLurV6P&)^r#0hiYYApJydOcP)JWdG zOfllUEioAbrq~N&ljB)pj6TBr!m%161c{0>v4ev31~FVjr|F>;poD`P0It6-m<}|;%S+s7A{G7>}2yD zdOvK=+ah*$0)~>X!am2B%_D2NmGD}fs*bywD(ynB4?k@sB|W>|iH8HXlBmEOJ;29? z|Lu8UB{30sCDBi*{lobhsKHm?>0`V3=siH8;IY5**&rvd^n6WN+g&IwahOl?HFK4z zIVIL4+UbenqT1vqny1SJ>i&I_QAS96uS;hfHOHH= zo;eKqBP`(hsRaS^WDY_thSqk-q=8#FbFY2X_h_`cn04r-1d5pX8j&mB`IK2I+^|~pTVR5PRJgS)W7iTLK zP~1*Ig~OBAdslGvQU-Ggszh#&v!2QX=iRyXG(_M87^BYunGSxGvreq@O-` zs>IaIBeq@kwpOP&vi!tgSPsp-wI^eMW!spk)eD69gGf@jyU4J(ym&%EN zQ)N&GMj~bPl0aqarHVECXV54*sSOZjvC(Th8r$$ncft2-Tn9@BAIQQ==yQfRZ6}DS z5#@*-$0PZx$!N9?FutR*SVuGukH7~Tb4R_B#{CEq zjNArj1-yw*bVzJ9>bKdq=*S-CsG9wSz{{x?v*pZPFPr! zRhY++LAtl&^_ZH{f_jdRBj9XkaotKQ?4Kj_vZB`roZYA*Y$dj&8IMU*pLZYw}HNr-|wN9o| zLajoaVH^}#Sy2m-li1>}hc4rYp)~TG&3zBz7WF9%W^Yn^N2P(X!6z5#MRa_JmdxpE z=||_5UO{XXyp4<$!W(eMuLcto-I1tvQ$m`ciXZS1oc|MiVFi;tlYPMiG(zizW_;!g zA_%s9Y;?G0D^(d5=z@MW>F`eFO(fJ;W3wUkwb#u7d$h?ea4RR_PP8Iqx2c8{oEsFf z!R?nyeW)z@5KyP#u&>Pbmadv|v$BbfpGsp|icAw~zdNc3Ryy*Q(Vq9kq6ni4D#X1I ztbQlL&2OKEf>n|T7f)YTtXh6NC;X6pN46Yi&hBB|#egQN8c0YZ(NPD1bnQF=y%I)t zW=~5*2wu4Id1s&Y-jw0kV+vbP$A($@%LZoqpzg4xTlpfh>M0)SjJJwM&#IiB%#RN}(04SrZhA^?Odp#)V&iCDM4++jk5~&D?gWm2GlxH#ZOXhc3oUX@AVPNU zr}gE@*-(iwaPAn}7#I%T2yarXDBY!)tSTj{<=pO6`f%eZ{;06mXm=tBIUW<5uA?UA zH^pUUm^n@X3Ra|r8%h#HpHqeqfT*K5PKiJInJc70~< zKFH=U_<0v!278CK5VZT09*bSrH6Ms}s2g7fzeFSnK8i8uTyJhOZ(ZZC`@WPj%4Dhi zGUZaY#Gq+A&R#`!E3R1p_v3En##}N_5PHso9JZ(#TvpzJoD=euGwduuhM6cA6huMB&n_ zfZqdRpE$>lSwljL`NG5VW-(b*R^R&KExwhD*D%@b`Pj;PrUK@J!neklMzCP6%MfYzY`BLX8+w z)Neaxe-VA(l`(Kj=6^D1Bm80-V@%VwEV{C6w1K_{dan_Qw_5p&z>7{A2di4hz8uSI z=%a$qsfuL8=K}SlQ1M%)e#CAlyfQ-vFrSvs^2@E{Ce8<~_|7AJ2A6X(DBnnN8=~_z zf3Gibf$wGt0$*|pU|ujCj0tIGI8=6h6?v@9d~visl5*N&1!_3aYzKFo;gdL~O_xZB zzKg_acGP@OL6ZQ9i>8m}u>lHSxUAK<(CU4atlq#JzY%pL9#)DG7{@S07&;i@_0plL zw;q6u62k7ye>qZT<(@*8upIv=l!XOVw zSf|iX<7{IJbO9V{qX1DcuDUO?g+i6uOg=Ub*+mXDyQpi8Z;3iOT6((K`6{fxqS~~c zRT{CUtHd>(bH)*fW``$!U*{{>YT!vaLihPZDP@^Gfd!f z00@bkJU|V{>;DmT>X)wpMnhVlpVAbFge*@AN{BLKBix) z|MKCK6fZuO0$Yg!yeRueJh#6Jz+Xt{$J(YgW)?QTQ{N&yp3?zLDDMjFP9Wp&0`MmU z3i_w-fZ zKkUb*`Tq(3ML1fMFsU*G2#7Fnf&wSV-v!{GZtz<+P^E*r{{M8<@2pxhK#r9S9I+>& z|G5$H7ZTvlWcq(0{vr`C9dx}GXxn6q zKFtE?DSq~q_QAhJ{37Bl(#oJbPyi2bP-FfiqB8tPf)!ANsmFE$c5w@jGFkHw24?frI8+;<>QCy8i|Ci?D`dFqS{A1_1{h2rwPy z-v!_={`E)J|4cJyI}<~v-}v^{wT=N)WOBV*!0Ks<5#D@q91zL2NdN9yukGH0#fk5!F~}2QRN6L3v3)4unT`? z6;J+Fn3036D^TU{dN9-Mvg3dl*n^(W7bBMWe`oz7%ZFJW9}Xxh8@NCDxdS!sevA6! z=>6|BRhFWyhXFM$L_F`nOc2l?R{y^Z{~ypVg6ssgXc2)P{1E7ye(peBlHYP1OwE8U z>i2Fkj4gpC1lYQqUyNQv`oF+_5hfXTDSiZ;gH1qB^|PfcsejA*Bfi1!wm4ei$Mpc< zsDc6RlYaKR@eDtLoE(7r5j$IlKLR&B-;SubuiJS5hj4N_uaY+73`k&;% z&s+99FWo;76s6xl{>)PMobfz%`ag^c<6jwnMN)rGc^(h_9}1btf1~_jljpIc|AFwC z{Q&vzJ^eh=**}00ryl@+#XWo8h(G=v=wJRU!0qSX1>kS!{X_M?f|NY3{ya#@U-be0 z@jq1m?;%T`^PWEs^AFE8=N~!0;r;b=%yZ!L8#Vs`nTr1v_~&h#XS84LvHVQ~?f7@v df9|*_$Up-3n85G#z@KlxuMQDaK=%&v{{Uy;iD>`; literal 0 HcmV?d00001 diff --git a/scripts/restore-database.sh b/scripts/restore-database.sh new file mode 100644 index 0000000..32987aa --- /dev/null +++ b/scripts/restore-database.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# LMDB Database restore utility + +DATABASE_PATH="${HOME}/printer_data/database" +MOONRAKER_ENV="${HOME}/moonraker-env" +INPUT_FILE="${HOME}/database.backup" + +print_help() +{ + echo "Moonraker Database Restore Utility" + echo + echo "usage: restore-database.sh [-h] [-e ] [-d ] [-i ]" + echo + echo "optional arguments:" + echo " -h show this message" + echo " -e Moonraker Python Environment" + echo " -d Moonraker LMDB database path to restore to" + echo " -i backup file to restore from" + exit 0 +} + +# Parse command line arguments +while getopts "he:d:i:" arg; do + case $arg in + h) print_help;; + e) MOONRAKER_ENV=$OPTARG;; + d) DATABASE_PATH=$OPTARG;; + i) INPUT_FILE=$OPTARG;; + esac +done + +PYTHON_BIN="${MOONRAKER_ENV}/bin/python" +DB_TOOL="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/dbtool.py" + +if [ ! -f $PYTHON_BIN ]; then + echo "No Python binary found at '${PYTHON_BIN}'" + exit -1 +fi + +if [ ! -d $DATABASE_PATH ]; then + echo "No database folder found at '${DATABASE_PATH}'" + exit -1 +fi + +if [ ! -f $INPUT_FILE ]; then + echo "No Database Backup File found at '${INPUT_FILE}'" + exit -1 +fi + +if [ ! -f $DB_TOOL ]; then + echo "Unable to locate dbtool.py at '${DB_TOOL}'" + exit -1 +fi + +${PYTHON_BIN} ${DB_TOOL} restore ${DATABASE_PATH} ${INPUT_FILE} diff --git a/scripts/set-policykit-rules.sh b/scripts/set-policykit-rules.sh index 9ec6afe..d52a088 100644 --- a/scripts/set-policykit-rules.sh +++ b/scripts/set-policykit-rules.sh @@ -30,6 +30,8 @@ add_polkit_legacy_rules() ACTIONS="${ACTIONS};org.freedesktop.login1.power-off-multiple-sessions" ACTIONS="${ACTIONS};org.freedesktop.login1.reboot" ACTIONS="${ACTIONS};org.freedesktop.login1.reboot-multiple-sessions" + ACTIONS="${ACTIONS};org.freedesktop.login1.halt" + ACTIONS="${ACTIONS};org.freedesktop.login1.halt-multiple-sessions" ACTIONS="${ACTIONS};org.freedesktop.packagekit.*" sudo /bin/sh -c "cat > ${RULE_FILE}" << EOF [moonraker permissions] @@ -72,6 +74,8 @@ polkit.addRule(function(action, subject) { action.id == "org.freedesktop.login1.power-off-multiple-sessions" || action.id == "org.freedesktop.login1.reboot" || action.id == "org.freedesktop.login1.reboot-multiple-sessions" || + action.id == "org.freedesktop.login1.halt" || + action.id == "org.freedesktop.login1.halt-multiple-sessions" || action.id.startsWith("org.freedesktop.packagekit.")) && subject.user == "$USER") { // Only allow processes with the "moonraker-admin" supplementary group diff --git a/scripts/system-dependencies.json b/scripts/system-dependencies.json new file mode 100644 index 0000000..4426e53 --- /dev/null +++ b/scripts/system-dependencies.json @@ -0,0 +1,13 @@ +{ + "debian": [ + "python3-virtualenv", + "python3-dev", + "libopenjp2-7", + "libsodium-dev", + "zlib1g-dev", + "libjpeg-dev", + "packagekit", + "wireless-tools", + "curl" + ] +} \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py index 742a81a..4d8e19b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,9 +10,9 @@ import shlex import tempfile import subprocess from typing import Iterator, Dict, AsyncIterator, Any -from moonraker import Server -from eventloop import EventLoop -import utils +from moonraker.server import Server +from moonraker.eventloop import EventLoop +from moonraker import utils import dbtool from fixtures import KlippyProcess, HttpClient, WebsocketClient diff --git a/tests/test_config.py b/tests/test_config.py index cc31db8..4496517 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -5,10 +5,10 @@ import hashlib import confighelper import shutil import time -from confighelper import ConfigError -from moonraker import Server -from utils import ServerError -from components import gpio +from moonraker.confighelper import ConfigError +from moonraker.server import Server +from moonraker.utils import ServerError +from moonraker.components import gpio from mocks import MockGpiod from typing import TYPE_CHECKING, Dict if TYPE_CHECKING: diff --git a/tests/test_database.py b/tests/test_database.py index 50b8d85..70520ca 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -5,8 +5,8 @@ import pytest_asyncio import asyncio import copy from inspect import isawaitable -from moonraker import Server -from utils import ServerError +from moonraker.server import Server +from moonraker.utils import ServerError from typing import TYPE_CHECKING, AsyncIterator, Dict, Any, Iterator if TYPE_CHECKING: diff --git a/tests/test_klippy_connection.py b/tests/test_klippy_connection.py index 4b6122f..0a9af62 100644 --- a/tests/test_klippy_connection.py +++ b/tests/test_klippy_connection.py @@ -3,12 +3,12 @@ import pytest import asyncio import pathlib from typing import TYPE_CHECKING, Dict -from moonraker import ServerError -from klippy_connection import KlippyRequest +from moonraker.server import ServerError +from moonraker.klippy_connection import KlippyRequest from mocks import MockReader, MockWriter if TYPE_CHECKING: - from moonraker import Server + from server import Server from conftest import KlippyProcess @pytest.mark.usefixtures("klippy") diff --git a/tests/test_server.py b/tests/test_server.py index 7b30daa..b32b68a 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -6,12 +6,12 @@ import socket import pathlib from collections import namedtuple -from moonraker import CORE_COMPONENTS, Server, API_VERSION -from moonraker import main as servermain -from eventloop import EventLoop -from utils import ServerError -from confighelper import ConfigError -from components.klippy_apis import KlippyAPI +from moonraker.server import CORE_COMPONENTS, Server, API_VERSION +from moonraker.server import main as servermain +from moonraker.eventloop import EventLoop +from moonraker.utils import ServerError +from moonraker.confighelper import ConfigError +from moonraker.components.klippy_apis import KlippyAPI from mocks import MockComponent, MockWebsocket from typing import (