mirror of
https://github.com/QIDITECH/moonraker.git
synced 2026-01-30 16:18:44 +03:00
QIDI moonraker
This commit is contained in:
13
.editorconfig
Normal file
13
.editorconfig
Normal file
@@ -0,0 +1,13 @@
|
||||
# Editorconfig file for moonraker repo, courtesy of @trevjonez
|
||||
|
||||
root = true
|
||||
|
||||
[*]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
charset = utf-8
|
||||
|
||||
[*.py]
|
||||
max_line_length = 80
|
||||
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# Auto detect text files and perform LF normalization
|
||||
* text=auto
|
||||
7
.gitignore
vendored
Normal file
7
.gitignore
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
.devel
|
||||
.venv
|
||||
|
||||
10
.readthedocs.yaml
Normal file
10
.readthedocs.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
version: 2
|
||||
|
||||
mkdocs:
|
||||
configuration: mkdocs.yml
|
||||
fail_on_warning: false
|
||||
|
||||
python:
|
||||
version: 3.8
|
||||
install:
|
||||
- requirements: docs/doc-requirements.txt
|
||||
376
docs/api_changes.md
Normal file
376
docs/api_changes.md
Normal file
@@ -0,0 +1,376 @@
|
||||
##
|
||||
This document keeps a record of all changes to Moonraker's web APIs.
|
||||
|
||||
### March 4th 2022
|
||||
- Moonraker API Version 1.0.1
|
||||
- The `server.websocket.id` endpoint has been deprecated. It is
|
||||
recommended to use `server.connection.idenitfy` method to identify
|
||||
your client. This method returns a `connection_id` which is
|
||||
the websocket's unique id. See
|
||||
[the documentation](web_api.md#identify-connection) for details.
|
||||
|
||||
### May 8th 2021
|
||||
- The `file_manager` has been refactored to support system file
|
||||
file events through `inotify`. Only mutable `roots` are monitored,
|
||||
(currently `gcodes` and `config`). Subfolders within these
|
||||
these roots are also monitored, however hidden folders are not.
|
||||
The following changes API changes have been made to acccommodate
|
||||
this functionality:
|
||||
- The `notify_filelist_changed` actions have changed. The new
|
||||
actions are as follows:
|
||||
- `create_file`: sent when a new file has been created. This
|
||||
includes file uploads and copied files.
|
||||
- `create_dir`: sent when a new directory has been created.
|
||||
- `delete_file`: sent when a file has been deleted.
|
||||
- `delete_dir`: sent when a directory has been deleted.
|
||||
- `move_file`: sent when a file has moved.
|
||||
- `move_dir`: sent when a directory has moved.
|
||||
- `modify_file`: sent when an existing file has been modified
|
||||
- `root_update`: sent when a root directory location has been set.
|
||||
For example, if a user changes the gcode path in Klipper, this
|
||||
action is sent with a `notify_filelist_changed` notification.
|
||||
- File list notifications for gcode files are now only sent after
|
||||
all metadata has been processed. Likewise, requests to copy,
|
||||
move, or upload a file will only return after metadata has been
|
||||
processed. Notifications are synced with requests so that the
|
||||
request should always return before the notification is sent.
|
||||
- Thumbnails are now stored in the `.thumbs` directory to prevent
|
||||
changes to thumbnails from emitting filelist notications. This
|
||||
change will be reflected in the metadata's `relative_path` field,
|
||||
so clients that use this field should not need to take additional
|
||||
action. Note that existing thumbnails will remain in the `thumbs`
|
||||
directory and filelist notifications will be sent for changes to
|
||||
these thumbnails.
|
||||
- The `notify_metadata_update` notification has been removed Clients
|
||||
- can reliably expect metadata to be available for new or moved gcode
|
||||
files when a request returns.
|
||||
- The return values for several endpoints have been updated. They
|
||||
now contain information similar to that which is pushed by the
|
||||
`notify_filelist_changed` notification.
|
||||
- The deprecated `data` field in gcode metadata has been removed.
|
||||
The `size` field now returns the size of the `.png` file.
|
||||
|
||||
|
||||
### March 15th 2021
|
||||
- The `data` field for gcode thumbnails is now deprecated and will
|
||||
be removed in the near future. Thumbnails are now saved to png
|
||||
files in a `thumbs` directory relative to a gcode file's location.
|
||||
This path is available in the `relative_path` field for each
|
||||
thumbnail entry in the metadata.
|
||||
|
||||
### January 31st 2021
|
||||
- The `GET /server/temperature_store` endpoint now only returns fields
|
||||
that each sensor reports. For example, if a particuarly temperature
|
||||
sensor does not report "target" or "power", then the corresponding
|
||||
fields will not be reported for that sensor in response to the
|
||||
`temperature_store` request.
|
||||
|
||||
### January 22nd 2021
|
||||
- The `POST /machine/update/client` endpoint now requires a `name`
|
||||
argument. This change added multiple client support
|
||||
- The response to `GET /machine/update/status` no longer returns a
|
||||
`client` field. Instead it will add fields matching the `name` of
|
||||
each configured client. Keep in mind that the client object could
|
||||
have a different set of fields depending on the type of a client. The
|
||||
easy way to check for this is to see if a `branch` field is present.
|
||||
If so, this client is a `git repo`. Otherwise it is a `web` client.
|
||||
|
||||
### January 4th 2021
|
||||
- A `notify_update_refreshed` notification has been added. Moonraker now
|
||||
auto-refreshes the update status at roughly a 2 hour interval. When
|
||||
an auto-refresh is complete this notification is broadcast. Included
|
||||
is an object that matches the response from `/machine/update/status`.
|
||||
- The behavior of some of the `update_manager` APIs has changed:
|
||||
- The `refresh` argument for `/machine/update/status` is now more
|
||||
of a suggestion than a rule. If an update or a print is in
|
||||
progress then the request will ignore the refresh argument
|
||||
and immediately return the current status. Generally speaking requesting
|
||||
a refresh should not be necessary with addition of auto refresh.
|
||||
- The update requests (ie `/machine/update/klipper`) will now return
|
||||
an error if a print is in progress. If the requested update is in
|
||||
progress then the request will return valid with a message stating
|
||||
that the update is in progress. If another object is being updated
|
||||
then the request will be queued and block until it its complete.
|
||||
### January 1st 2021
|
||||
- A `notify_klippy_shutdown` websocket notification has been added
|
||||
|
||||
### December 30th 2020
|
||||
- Some additional fields are now reported in the response to
|
||||
`GET /machine/update/status`.
|
||||
|
||||
### November 28th 2020
|
||||
- The following new endpoints are available when the `[update_manager]`
|
||||
section has been configured:
|
||||
- `GET /machine/update/status`
|
||||
- `POST /machine/update/moonraker`
|
||||
- `POST /machine/update/klipper`
|
||||
- `POST /machine/update/client`
|
||||
- `POST /machine/update/system`
|
||||
- The following endpoint has been added and is available as part of the
|
||||
core API:
|
||||
- `POST /machine/services/restart`
|
||||
|
||||
See [web_api.md](web_api.md) for details on these new endpoints.
|
||||
|
||||
### November 23rd 2020
|
||||
- Moonraker now serves Klipper's "docs" directory. This can be access
|
||||
at `GET /server/files/docs/<filename>`.
|
||||
|
||||
### November 19th 2020
|
||||
- The path for the power APIs has changed from `gpio_power` to `device_power`:
|
||||
- `GET /machine/device_power/devices`\
|
||||
`{"jsonrpc":"2.0","method":"machine.device_power.devices","id":"1"}`\
|
||||
Returns an array of objects listing all detected devices.
|
||||
Each object in the array is guaranteed to have the following
|
||||
fields:
|
||||
- `device`: The device name
|
||||
- `status`: May be "init", "on", "off", or "error"
|
||||
- `type`: May be "gpio" or "tplink_smartplug"
|
||||
- `GET /machine/device_power/status?dev_name`\
|
||||
`{"jsonrpc":"2.0","method":"machine.device_power.status","id":"1",
|
||||
"params":{"dev_name":null}}`\
|
||||
It is no longer possible to call this method with no arguments.
|
||||
Status will only be returned for the requested device, to get
|
||||
status of all devices use `/machine/device_power/devices`. As
|
||||
before, this returns an object in the format of
|
||||
`{device_name: status}`, where device_name is the name of the device
|
||||
and `status` is the devices current status.
|
||||
- `POST /machine/device_power/on?dev_name`\
|
||||
`{"jsonrpc":"2.0","method":"machine.device_power.on","id":"1",
|
||||
"params":{"dev_name":null}}`\
|
||||
Toggles device on. Returns the current status of the device.
|
||||
- `POST /machine/device_power/off?dev_name`\
|
||||
`{"jsonrpc":"2.0","method":"machine.device_power.off","id":"1",
|
||||
"params":{"dev_name":null}}`\
|
||||
Toggles device off. Returns the current status of the device.
|
||||
- The `notify_power_changed` notification now includes an object
|
||||
containing device info, matching that which would be recieved
|
||||
from a single item in `/machine/power/devices`.
|
||||
|
||||
### November 12th 2020
|
||||
- Two new fields have been added to the gcode metadata:
|
||||
- `gcode_start_byte`: Indicates the byte position in the
|
||||
file where the first "Gxx" or "Mxx" command is detected.
|
||||
- `gcode_end_byte`: Indicates the byte position in the
|
||||
file where the last "Gxx" or "Mxx" command is detected.
|
||||
These fields may be used to more accurately predict print
|
||||
progress based on the file size.
|
||||
|
||||
### November 11th 2020
|
||||
- The `server.websocket.id` API has been added. This returns a
|
||||
unique ID that Moonraker uses to track each client connection.
|
||||
As such, this API is only available over the websocket, there
|
||||
is no complementary HTTP request.
|
||||
- All HTTP API request may now include arguments in either the
|
||||
query string or in the request's body.
|
||||
- Subscriptions are now managed on a per connection basis. Each
|
||||
connection will only recieve updates for objects in which they
|
||||
are currently subscribed. If an "empty" request is sent, the
|
||||
subscription will be cancelled.
|
||||
- The `POST /printer/object/subscribe` now requires a
|
||||
`connection_id` argument. This is used to identify which
|
||||
connection's associated subscription should be updated.
|
||||
Currenlty subscriptions are only supported over the a
|
||||
websocket connection, one may use the id received from
|
||||
`server.websocket.id`.
|
||||
- The `notify_klippy_ready` websocket notification has been
|
||||
added.
|
||||
|
||||
### November 2nd 2020
|
||||
- The `GET /server/files/directory` endpoint now accepts a new
|
||||
optional argument, `extended`. If `extended=true`, then
|
||||
the data returned for gcode files will also include extracted
|
||||
metadata if it exists.
|
||||
|
||||
### October 25th 2020
|
||||
- The `modified` field reported for files and directories is no
|
||||
longer represented as a string. It is now a floating point
|
||||
value representing unix time (in seconds). This can be used
|
||||
to display the "last modified date" based on the client's
|
||||
timezone.
|
||||
|
||||
### October 21st 2020
|
||||
- The `/server/gcode_store` endpoint no longer returns a string
|
||||
in the result's `gcode_store` field. It now returns an
|
||||
Array of objects, each object containing `message` and `time`
|
||||
fields. The time refers to a timestamp in unix time (seconds),
|
||||
and may be used to determine when the gcode store received the
|
||||
accompanying `message`.
|
||||
|
||||
### September 30th 2020
|
||||
- Two new endpoints have been added:
|
||||
- `GET /server/info` (`server.info`)
|
||||
- `GET /server/gcode_store` (`server.gcode_store`)
|
||||
See web_api.md for details on their usage.
|
||||
|
||||
### September 7th 2020
|
||||
- A new websocket API has been added, `server.files.delete_file`:
|
||||
```
|
||||
{jsonrpc: "2.0", method: "server.files.delete_file", params:
|
||||
{path: "<root>/<file_name>"}, id: <request id>}
|
||||
```
|
||||
Where <root> is either "gcodes" or "config", and <file_name> is
|
||||
the relative path to the file for deletion. For example:
|
||||
`path: "gcodes/my_sub_dir/my_gcode_file.gcode"`
|
||||
|
||||
|
||||
### September 3rd 2020
|
||||
- The Websocket APIs have changed for clarity. The APIs methods now
|
||||
use namespaces similar to those found in common programming languages.
|
||||
This change affects all websocket APIs, however websocket events have
|
||||
not changed. Below is a chart mapping the Previous API to the New API:
|
||||
| Previous Websocket Method | New Websocket Method |
|
||||
|---------------------------|----------------------|
|
||||
| get_printer_info | printer.info |
|
||||
| post_printer_emergency_stop | printer.emergency_stop |
|
||||
| post_printer_restart | printer.restart |
|
||||
| post_printer_firmware_restart | printer.firmware_restart |
|
||||
| get_printer_objects_list | printer.objects.list |
|
||||
| get_printer_objects_query | printer.objects.query |
|
||||
| post_printer_objects_subscribe | printer.objects.subscribe |
|
||||
| get_printer_query_endstops_status | printer.query_endstops.status |
|
||||
| post_printer_gcode_script | printer.gcode.script |
|
||||
| get_printer_gcode_help | printer.gcode.help |
|
||||
| post_printer_print_start | printer.print.start |
|
||||
| post_printer_print_pause | printer.print.pause |
|
||||
| post_printer_print_resume | printer.print.resume |
|
||||
| post_printer_print_cancel | printer.print.cancel |
|
||||
| post_machine_reboot | machine.reboot |
|
||||
| post_machine_shutdown | machine.shutdown |
|
||||
| get_server_temperature_store | server.temperature_store |
|
||||
| get_file_list | server.files.list |
|
||||
| get_file_metadata | server.files.metadata |
|
||||
| get_directory | server.files.get_directory |
|
||||
| post_directory | server.files.post_directory |
|
||||
| delete_directory | server.files.delete_directory |
|
||||
| post_file_move | server.files.move |
|
||||
| post_file_copy | server.files.copy |
|
||||
- The "power" plugin APIs have changed. This affects both HTTP and
|
||||
Websocket APIs. They were originally added to the "/printer" path,
|
||||
however this adds the possibility of a naming conflict. The new
|
||||
APIs are as follows:
|
||||
- `GET /machine/gpio_power/devices` : `machine.gpio_power.devices`
|
||||
- `GET /machine/gpio_power/status` : `machine.gpio_power.status`
|
||||
- `POST /machine/gpio_power/on` : `machine.gpio_power.on`
|
||||
- `POST /machine/gpio_power/off` : `machine.gpio_power.off`
|
||||
|
||||
### September 1st 2020
|
||||
- A new notification has been added: `notify_metdata_update`. This
|
||||
notification is sent when Moonraker parses metdata from a new upload.
|
||||
Note that the upload must be made via the API, files manually (using
|
||||
SAMBA, SCP, etc) do not trigger a notification. The notification is
|
||||
sent in the following format:
|
||||
```
|
||||
{jsonrpc: "2.0", method: "notify_metadata_update", params: [metadata]}
|
||||
```
|
||||
Where `metadata` is an object in the following format:
|
||||
|
||||
```json
|
||||
{
|
||||
filename: "file name",
|
||||
size: <file size>,
|
||||
modified: "last modified date",
|
||||
slicer: "Slicer Name",
|
||||
first_layer_height: <in mm>,
|
||||
layer_height: <in mm>,
|
||||
object_height: <in mm>,
|
||||
estimated_time: <time in seconds>,
|
||||
filament_total: <in mm>,
|
||||
thumbnails: [
|
||||
{
|
||||
width: <in pixels>,
|
||||
height: <in pixels>,
|
||||
size: <length of string>,
|
||||
data: <base64 string>
|
||||
}, ...
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### August 16th 2020
|
||||
- The structure of data returned from `/printer/info` (`get_printer_info`)
|
||||
has changed to the following format:
|
||||
```json
|
||||
{
|
||||
state: "<klippy state>",
|
||||
state_message: "<current state message>",
|
||||
hostname: "<hostname>",
|
||||
software_version: "<version>",
|
||||
cpu_info: "<cpu_info>",
|
||||
klipper_path: "<moonraker use only>",
|
||||
python_path: "<moonraker use only>",
|
||||
log_file: "<moonraker use only>",
|
||||
config_file: "<moonraker use only>",
|
||||
}
|
||||
```
|
||||
The "state" item can be one of the following:
|
||||
- "startup" - Klippy is in the process of starting up
|
||||
- "ready" - Klippy is ready
|
||||
- "shutdown" - Klippy has shutdown
|
||||
- "error" - Klippy has experienced an error during startup
|
||||
|
||||
The message from each state can be found in the `state_message`.
|
||||
- A `webhooks` printer object has been added, available for subscription or
|
||||
query. It includes the following items:
|
||||
- `state` - Printer state identical to that returned from `/printer/info`
|
||||
- `state_message` - identical to that returned from `/printer/info`
|
||||
- `/printer/objects/status` (`get_printer_objects_status`) has been renamed to
|
||||
`/printer/objects/query` (`get_printer_objects_query`). The format of the
|
||||
websocket request has changed, it should now look like the following:
|
||||
```json
|
||||
{
|
||||
jsonrpc: "2.0",
|
||||
method: "get_printer_objects_query",
|
||||
params: {
|
||||
objects: {
|
||||
gcode: null,
|
||||
toolhead: ["position", "status"]
|
||||
}
|
||||
},
|
||||
id: <request id>
|
||||
}
|
||||
```
|
||||
As shown above, printer objects are now wrapped in an "objects" parameter.
|
||||
When a client wishes to subscribe to all items of a printer object, they
|
||||
should now be set to `null` rather than an empty array.
|
||||
The return value has also changed:
|
||||
```json
|
||||
{
|
||||
eventtime: <klippy time of update>,
|
||||
status: {
|
||||
gcode: {
|
||||
busy: true,
|
||||
gcode_position: [0, 0, 0 ,0],
|
||||
...},
|
||||
toolhead: {
|
||||
position: [0, 0, 0, 0],
|
||||
status: "Ready",
|
||||
...},
|
||||
...}
|
||||
}
|
||||
```
|
||||
The `status` item now contains the requested status.
|
||||
- `/printer/objects/subscription` (`post_printer_objects_subscription`) is now
|
||||
`printer/objects/subscribe` (`post_printer_objects_subscribe`). This
|
||||
request takes parameters in the same format as the `query`. It now returns
|
||||
state for all currently subscribed objects (in the same format as a `query`).
|
||||
This data can be used to initialize all local state after the request
|
||||
completes.
|
||||
- Subscriptions are now pushed as "diffs". Clients will only recieve updates
|
||||
for subscribed items when that data changes. This requires that clients
|
||||
initialize their local state with the data returned from the subscription
|
||||
request.
|
||||
- The structure of data returned from `/printer/objects/list` has changed. It
|
||||
now returns an array of available printer objects:
|
||||
```json
|
||||
{ objects: ["gcode", "toolhead", "bed_mesh", "configfile",....]}
|
||||
```
|
||||
- The `notify_klippy_state_changed` notification has been removed. Clients
|
||||
can subscribe to `webhooks` and use `webhooks.state` to be notified of
|
||||
transitions to the "ready" and "shutdown" states
|
||||
- A `notify_klippy_disconnected` event has been added to notify clients
|
||||
when the connection between Klippy and Moonraker has been terminated.
|
||||
This event is sent with no parameters:
|
||||
```json
|
||||
{jsonrpc: "2.0", method: "notify_klippy_disconnected"}
|
||||
```
|
||||
365
docs/components.md
Normal file
365
docs/components.md
Normal file
@@ -0,0 +1,365 @@
|
||||
## Components
|
||||
|
||||
Components in Moonraker are used to extend Moonraker's functionality,
|
||||
similar to "extras" in Klipper. Moonraker divides components into
|
||||
two categories, "core" components and "optional" components. A core
|
||||
component gets its configuration from the `[server]` section and is
|
||||
loaded when Moonraker starts. For example, the `file_manager` is a
|
||||
core component. If a core component fails to load Moonraker will
|
||||
exit with an error.
|
||||
|
||||
Optional components must be configured in `moonraker.conf`. If they
|
||||
have no specific configuration, a bare section, such as `[octoprint_compat]`
|
||||
must be present in `moonraker.conf`. Unlike with core components,
|
||||
Moonraker will still start if an optional component fails to load.
|
||||
Its failed status will be available for clients to query and present
|
||||
to the user.
|
||||
|
||||
### Basic Example
|
||||
|
||||
Components exist in the `components` directory. The below example
|
||||
shows how an `example.py` component might look:
|
||||
```python
|
||||
# Example Component
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
class Example:
|
||||
def __init__(self, config):
|
||||
self.server = config.get_server()
|
||||
self.name = config.get_name()
|
||||
|
||||
# Raises an error if "example_int_option" is not configured in
|
||||
# the [example] section
|
||||
self.example_int_opt = config.getint("example_int_option")
|
||||
|
||||
# Returns a NoneType if "example_float_option is not configured
|
||||
# in the config
|
||||
self.example_float_opt = config.getfloat("example_float_option", None)
|
||||
|
||||
self.server.register_endpoint("/server/example", ['GET'],
|
||||
self._handle_example_request)
|
||||
|
||||
async def request_some_klippy_state(self):
|
||||
klippy_apis = self.server.lookup_component('klippy_apis')
|
||||
return await klippy_apis.query_objects({'print_stats': None})
|
||||
|
||||
async def _handle_example_request(self, web_request):
|
||||
web_request.get_int("required_reqest_param")
|
||||
web_request.get_float("optional_request_param", None)
|
||||
state = await self.request_some_klippy_state()
|
||||
return {"example_return_value": state}
|
||||
|
||||
def load_component(config):
|
||||
return Example(config)
|
||||
|
||||
```
|
||||
If you have created a "Klippy extras" module then the above should look
|
||||
look familiar. Moonraker attempts to use similar method for adding
|
||||
extensions, making easier Klipper contributors to add new functionality
|
||||
to Moonraker. Be aware that there is no "Reactor" object in Moonraker,
|
||||
it uses `asyncio` for coroutines. Like Klippy, you should not write
|
||||
code that blocks the main thread.
|
||||
|
||||
### The ConfigHelper Object
|
||||
|
||||
As shown above, each component is passed a config object. This object
|
||||
will be a `ConfigHelper` type, which is an object that wraps a
|
||||
configuration section to simply access to the native `ConfigParser`.
|
||||
A `ConfigHelper` should never be directly instantiated.
|
||||
|
||||
#### *ConfigHelper.get_server()*
|
||||
|
||||
Returns the primary [server](#the-server-object) instance.
|
||||
|
||||
#### *ConfigHelper.get_name()*
|
||||
|
||||
Returns the configuration section name associated with this `ConfigHelper`.
|
||||
|
||||
#### *ConfigHelper.get(option_name, default=Sentinel)*
|
||||
|
||||
Returns the value of the option`option_name` as a string. If
|
||||
the option does not exist, returns `default`. If `default` is
|
||||
not provided raises a `ConfigError`.
|
||||
|
||||
#### *ConfigHelper.getint(option_name, default=Sentinel)*
|
||||
|
||||
Returns the value of the option`option_name` as an integer. If
|
||||
the option does not exist, returns `default`. If `default` is
|
||||
not provided raises a `ConfigError`.
|
||||
|
||||
#### *ConfigHelper.getfloat(option_name, default=Sentinel)*
|
||||
|
||||
Returns the value of the option`option_name` as a float. If
|
||||
the option does not exist, returns `default`. If `default` is
|
||||
not provided raises a `ConfigError`.
|
||||
|
||||
#### *ConfigHelper.getboolean(option_name, default=Sentinel)*
|
||||
|
||||
Returns the value of the option`option_name` as a boolean. If
|
||||
the option does not exist, returns `default`. If `default` is
|
||||
not provided raises a `ConfigError`.
|
||||
|
||||
#### *ConfigHelper.has_section(section_name)*
|
||||
|
||||
Returns True if a section matching `section_name` is in the configuration,
|
||||
otherwise False.
|
||||
|
||||
Note that a ConfigHelper object also implements `__contains__`,
|
||||
which is an alias for `has_section`, ie: `section_name in config_instance`
|
||||
|
||||
#### *ConfigHelper.getsection(section_name)*
|
||||
|
||||
Returns a Config object for the section matching `section_name`. If the
|
||||
section does not exist in the configuration raises a `ConfigError`.
|
||||
|
||||
Note that a ConfigHelper object also implements `__getitem__`,
|
||||
which is an alias for `get_section`, ie: `config_instance[section_name]`
|
||||
|
||||
#### *ConfigHelper.get_options()*
|
||||
|
||||
Returns a dict mapping options to values for all options in the Config
|
||||
object.
|
||||
|
||||
#### *ConfigHelper.get_prefix_sections(prefix)*
|
||||
|
||||
Returns a list section names in the configuration that start with `prefix`.
|
||||
These strings can be used to retreve ConfigHelpers via
|
||||
[get_section()](#confighelpergetsectionsection_name).
|
||||
|
||||
### The Server Object
|
||||
|
||||
The server instance represents the central management object in Moonraker.
|
||||
It can be used to register endpoints, register notifications, look up other
|
||||
components, send events, and more.
|
||||
|
||||
#### *Server.lookup_component(component_name, default=Sentinel)*
|
||||
|
||||
Attempts to look up a loaded component, returning the result. If
|
||||
the component has not been loaded, `default` will be returned.
|
||||
If `default` is not provided a `ServerError` will be raised.
|
||||
|
||||
#### *Server.load_component(config, component_name, default=Sentinel)*
|
||||
|
||||
Attempts to load an uninitialized component and returns the result. It is
|
||||
only valid to call this within a a component's `__init__()` method, and
|
||||
should only be necessary if one optional component relies on another. Core components will always be loaded before optional components, thus an optional
|
||||
component may always call
|
||||
[lookup_component()](#serverlookup_componentcomponent_name-defaultsentinel)
|
||||
when it needs a reference to core component.
|
||||
|
||||
If the component fails to load `default` will be returned. If `default`
|
||||
is not provided a `ServerError` will be raised.
|
||||
|
||||
#### *Server.register_endpoint(uri, request_methods, callback, transports=["http", "websocket", "mqtt"], wrap_result=True)*
|
||||
|
||||
Registers the supplied `uri` with the server.
|
||||
|
||||
The `request_methods` argument should be a list of strings containing any
|
||||
combination of `GET`, `POST`, and `DELETE`.
|
||||
|
||||
The `callback` is executed when a request matching the `uri` and a
|
||||
`request_method` is received. The callback function will be passed a
|
||||
`WebRequest` object with details about the request. This function
|
||||
should be able of handling each registered `request_method`. The
|
||||
provided callback must be a coroutine.
|
||||
|
||||
The `transports` argument is a list containing any combination of
|
||||
`http`, `websocket` and `mqtt`. JSON-RPC methods for `websocket` and `mqtt`
|
||||
will be generated based on what is supplied by the `uri` and
|
||||
request_methods` argument. A unique JSON_RPC method is generated for each
|
||||
request method. For example:
|
||||
```python
|
||||
self.server.register_endpoint("/server/example", ["POST"], self._handle_request)
|
||||
```
|
||||
would register a JSON-RPC method like:
|
||||
```
|
||||
server.example
|
||||
```
|
||||
|
||||
However, if multiple requests methods are supplied, the generated JSON-RPC
|
||||
methods will differ:
|
||||
```python
|
||||
self.server.register_endpoint("/server/example", ["GET", "POST", "DELETE"],
|
||||
self._handle_request)
|
||||
```
|
||||
would register:
|
||||
```
|
||||
server.get_example
|
||||
server.post_example
|
||||
server.delete_example
|
||||
```
|
||||
|
||||
The `wrap_result` argument applies only to the `http` protocol. In Moonraker
|
||||
all http requests return a result with a JSON body. By default, the value returned
|
||||
by a `callback` is wrapped in a dict:
|
||||
```python
|
||||
{"result": return_value}
|
||||
```
|
||||
It is only necessary to set this to false if you need to return a body that
|
||||
does not match this result. For example, the `[octoprint_compat]` component
|
||||
uses this functionality to return results in a format that match what
|
||||
OctoPrint itself would return.
|
||||
|
||||
#### *Server.register_event_handler(event, callback)*
|
||||
|
||||
Registers the provided `callback` method to be executed when the
|
||||
provided `event` is sent. The callback may be a coroutine, however it
|
||||
is not required.
|
||||
|
||||
#### *Server.send_event(event, \*args)*
|
||||
|
||||
Emits the event named `event`, calling all callbacks registered to the
|
||||
event. All positional arguments in `*args` will be passed to each
|
||||
callback. Event names should be in the form of
|
||||
`"module_name:event_description"`.
|
||||
|
||||
#### *Server.register_notification(event_name, notify_name=None)*
|
||||
|
||||
Registers a websocket notification to be pushed when `event_name`
|
||||
is emitted. By default JSON-RPC notifcation sent will be in the form of
|
||||
`notify_{event_description}`. For example, when the server sends the
|
||||
`server:klippy_connected` event, the JSON_RPC notification will be
|
||||
`notify_klippy_connected`.
|
||||
|
||||
If a `notify_name` is provided it will override the `{event_description}`
|
||||
extracted from the `event_name`. For example, if the `notify_name="kconnect`
|
||||
were specfied when registering the `server:klippy_connected` event, the
|
||||
websocket would emit a `notify_kconnect` notification.
|
||||
|
||||
#### *Server.get_host_info()*
|
||||
|
||||
Returns a tuple of the current host name of the PC and the port Moonraker
|
||||
is serving on.
|
||||
|
||||
#### *Server.get_klippy_info()*
|
||||
|
||||
Returns a dict containing the values from the most recent `info` request to
|
||||
Klippy. If Klippy has never connected this will be an empty dict.
|
||||
|
||||
### The WebRequest Object
|
||||
|
||||
All callbacks registered with the
|
||||
[register_endpoint()](#serverregister_endpointuri-request_methods-callback-protocolhttp-websocket-wrap_resulttrue)
|
||||
method are passed a WebRequest object when they are executed. This object
|
||||
contains information about the request including its endpoint name and arguments
|
||||
parsed from the request.
|
||||
|
||||
#### *WebRequest.get_endpoint()*
|
||||
|
||||
Returns the URI registered with this request, ie: `/server/example`.
|
||||
|
||||
#### *WebRequest.get_action()*
|
||||
|
||||
Returns the request action, which is synonomous with its HTTP request
|
||||
method. Will be either `GET`, `POST`, or `DELETE`. This is useful
|
||||
if your endpoint was registered with multiple request methods and
|
||||
needs to handle each differently.
|
||||
|
||||
#### *WebRequest.get_connection()*
|
||||
|
||||
Returns the associated Websocket connection ID. This will be `None`
|
||||
for HTTP requests when no associated websocket is connected to
|
||||
the client.
|
||||
|
||||
#### *WebRequest.get_args()*
|
||||
|
||||
Returns a reference to the entire argument dictionary. Useful if
|
||||
one request handler needs to preprocess the arguments before
|
||||
passing the WebRequest on to another request handler.
|
||||
|
||||
#### *WebRequest.get(key, default=Sentinel)*
|
||||
|
||||
Returns the request argument at the provided `key`. If the key is not
|
||||
present `default` will be returned. If `default` is not provided a
|
||||
`ServerError` will be raised.
|
||||
|
||||
#### *WebRequest.get_str(key, default=Sentinel)*
|
||||
|
||||
Retrieves the request argument at the provided `key` and converts it
|
||||
to a string, returning the result. If the key is not present the `default`
|
||||
value will be returned. If `default` is not provided or if the attempt at
|
||||
type conversion fails a `ServerError` will be raised.
|
||||
|
||||
#### *WebRequest.get_int(key, default=Sentinel)*
|
||||
|
||||
Retrieves the request argument at the provided `key` and converts it
|
||||
to an integer, returning the result. If the key is not present the `default`
|
||||
value will be returned. If `default` is not provided or if the attempt at
|
||||
type conversion fails a `ServerError` will be raised.
|
||||
|
||||
#### *WebRequest.get_float(key, default=Sentinel)*
|
||||
|
||||
Retrieves the request argument at the provided `key` and converts it
|
||||
to a float, returning the result. If the key is not present the `default`
|
||||
value will be returned. If `default` is not provided or if the attempt at
|
||||
type conversion fails a `ServerError` will be raised.
|
||||
|
||||
#### *WebRequest.get_boolean(key, default=Sentinel)*
|
||||
|
||||
Retrieves the request argument at the provided `key` and converts it
|
||||
to a boolean, returning the result. If the key is not present the `default`
|
||||
value will be returned. If `default` is not provided or if the attempt at
|
||||
type conversion fails a `ServerError` will be raised.
|
||||
|
||||
### MQTT
|
||||
|
||||
If configured by the user the MQTT component is available for lookup.
|
||||
Developers may use this to subscribe and publish topics.
|
||||
|
||||
#### *MQTTClient.is_connected()*
|
||||
|
||||
Returns true if Moonraker is currently connected to the Broker, false
|
||||
otherwise.
|
||||
|
||||
#### *MQTTClient.wait_connection(timeout=None)*
|
||||
|
||||
Blocks until a connection with the broker has been successfully established
|
||||
or until the specified timeout has exceeded. Returns true if the connection
|
||||
was successful established, or False on timeout. If no timeout is specified
|
||||
then this method will block indefinitely until a connection has been
|
||||
established.
|
||||
|
||||
#### *MQTTClient.publish_topic(topic, payload=None, qos=None, retain=False)*
|
||||
|
||||
Attempts to publish a topic to the Broker. The `payload` may be a bool, int,
|
||||
float, string, or json encodable (Dict or List). If omitted then an empty
|
||||
payload is sent. The `qos` may be an integer from 0 to 2. If not specifed
|
||||
then the QOS level will use the configured default. If `retain` is set to
|
||||
`True` then the retain flag for the payload will be set.
|
||||
|
||||
Returns a Future that will block until topic is confirmed as published.
|
||||
For QOS level 0 an exception will be raised if the broker is not connected.
|
||||
|
||||
|
||||
#### *MQTTClient.publish_topic_with_response(topic, response_topic, payload=None, qos=None, retain=False, timeout=None)*
|
||||
|
||||
Publishes the supplied `topic` with the arguments specified by `payload`,
|
||||
`qos`, and `retain`, then subscribes to the `response_topic`. The payload
|
||||
delivered by the response topic is returned. Note that this method is
|
||||
a coroutine, it must always be awaited. The call will block until the
|
||||
entire process has completed unless a `timeout` (in seconds) is specifed.
|
||||
The `timeout` is applied to both the attempt to publish and the pending
|
||||
response, so the maximum waiting time would be approximately 2*timeout.
|
||||
|
||||
!!! warning
|
||||
This should only be used when it is guaranteed that the `response_topic`
|
||||
does not have a retained value. Otherwise the returned response will
|
||||
be the retained value.
|
||||
|
||||
#### *MQTTClient.subscribe_topic(topic, callback, qos=None)*
|
||||
|
||||
Subscibes to the supplied `topic` with the specified `qos`. If `qos` is not
|
||||
supplied the configured default will be used. The `callback` should be a
|
||||
callable that accepts a `payload` argument of a `bytes` type. The callable
|
||||
may be a coroutine. The callback will be run each time the subscribed topic
|
||||
is published by another client.
|
||||
|
||||
Returns a `SubscriptionHandle` that may be used to unsubscribe the topic.
|
||||
|
||||
#### *MQTTClinet.unsubscribe(hdl)*
|
||||
|
||||
Unsubscribes the callback associated with `hdl`. If no outstanding callbacks
|
||||
exist for the topic then the topic is unsubscribed from the broker.
|
||||
2067
docs/configuration.md
Normal file
2067
docs/configuration.md
Normal file
File diff suppressed because it is too large
Load Diff
128
docs/contributing.md
Normal file
128
docs/contributing.md
Normal file
@@ -0,0 +1,128 @@
|
||||
# Contributing to Moonraker
|
||||
|
||||
While Moonraker exists as a service independently from Klipper, it relies
|
||||
on Klipper to be useful. Thus, the tentative plan is to eventually merge
|
||||
the Moonraker application into the Klipper repo after Moonraker matures,
|
||||
at which point this repo will be archived. As such, contibuting guidelines
|
||||
are near those of Klipper:
|
||||
|
||||
#### New Module Contributions
|
||||
|
||||
All source files should begin with a copyright notice in the following format:
|
||||
|
||||
```python
|
||||
# Module name and brief description of module
|
||||
#
|
||||
# Copyright (C) 2021 YOUR NAME <YOUR EMAIL ADDRESS>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
```
|
||||
|
||||
#### Git Commit Format
|
||||
|
||||
Commits should be contain one functional change. Changes that are unrelated
|
||||
or independent should be broken up into multiple commits. It is acceptable
|
||||
for a commit to contain multiple files if a change to one module depends on a
|
||||
change to another (ie: changing the name of a method).
|
||||
|
||||
Avoid merge commits. If it is necessary to update a Pull Request from the
|
||||
master branch use git's interactive rebase and force push.
|
||||
|
||||
Each Commit message should be in the following format:
|
||||
|
||||
```text
|
||||
module: brief description of commit
|
||||
|
||||
More detailed explanation of the change if required
|
||||
|
||||
Signed-off-by: Your Name <your email address>
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- `module`: is the name of the Python module you are changing or parent
|
||||
folder if not applicable
|
||||
- `Your Name`: Your real first and last name
|
||||
- `<your email address>`: A real, reachable email address
|
||||
|
||||
For example, the git log of a new `power.py` device implementation might look
|
||||
like the following:
|
||||
|
||||
```git
|
||||
power: add support for mqtt devices
|
||||
|
||||
Signed-off-by: Eric Callahan <arksine.code@gmail.com>
|
||||
```
|
||||
```git
|
||||
docs: add mqtt power device documentation
|
||||
|
||||
Signed-off-by: Eric Callahan <arksine.code@gmail.com>
|
||||
```
|
||||
|
||||
By signing off on commits, you acknowledge that you agree to the
|
||||
[developer certificate of origin](../developer-certificate-of-origin)
|
||||
shown below. As mentioned above, your signature must contain your
|
||||
real name and a current email address.
|
||||
|
||||
```text
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
1 Letterman Drive
|
||||
Suite D4700
|
||||
San Francisco, CA, 94129
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
#### Code Style
|
||||
Python methods should be fully annotated. Variables should be annotated where
|
||||
the type cannot be inferred. Moonraker uses the `mypy` static type checker for
|
||||
code validation with the following options:
|
||||
|
||||
- `--ignore-missing-imports`
|
||||
- `--follow-imports=silent`
|
||||
|
||||
No line in the source code should exceed 80 characters. Be sure there is no
|
||||
trailing whitespace. To validate code before submission one may use
|
||||
`pycodestyle` with the following options:
|
||||
|
||||
- `--ignore=E226,E301,E302,E303,W503,W504`
|
||||
- `--max-line-length=80`
|
||||
- `--max-doc-length=80`
|
||||
|
||||
Generally speaking, each line in submitted documentation should also be no
|
||||
longer than 80 characters, however there are situations where this isn't
|
||||
possible, such as long hyperlinks or example return values. Documentation
|
||||
isn't linted, so it
|
||||
|
||||
Don't peek into the member variables of another class. Use getters or
|
||||
properties to access object state.
|
||||
350
docs/dev_changelog.md
Normal file
350
docs/dev_changelog.md
Normal file
@@ -0,0 +1,350 @@
|
||||
### Moonraker Version 0.1 - August 11 2020
|
||||
- It is no longer possible to configure the subscription timer. All subscribed
|
||||
objects will update at an interval of 250ms.
|
||||
- Request timeout configuration has been removed. The server will no longer
|
||||
apply a timeout to requests. Any requests pending when Klippy disconnects
|
||||
will be aborted with an error. All pending requests are logged each minute.
|
||||
- The RESET_SD gcode is now SDCARD_RESET_FILE
|
||||
- The "virtual_sdcard" object has removed the following items:
|
||||
- "filename"
|
||||
- "total_duration"
|
||||
- "print_duration"
|
||||
- "filament_used"
|
||||
- A new object, "print_stats", has been added. It reports the following items:
|
||||
- "filename"
|
||||
- "total_duration"
|
||||
- "print_duration"
|
||||
- "filament_used"
|
||||
- "state" - can be one of the following:
|
||||
- "standby" - sd print not in progress
|
||||
- "printing" - print in progress
|
||||
- "paused" - print paused
|
||||
- "error" - print experienced an error
|
||||
- "complete" - print complete
|
||||
- "message" - contains error message when state is "error"
|
||||
- The behavior of print_stats is slightly different. When a print is finished the stats are
|
||||
not cleared. They will remain populated with the final data until the user issues a
|
||||
SDCARD_RESET_FILE gcode.
|
||||
- Moonraker Configuration has moved to moonraker.conf
|
||||
- Klippy now hosts the Unix Domain Socket. As a result, the order in which the
|
||||
Klipper and Moonraker services are started no longer matters.
|
||||
- The `notify_filelist_changed` event has been refactored for clarity. It now
|
||||
returns a result in the following format:
|
||||
```json
|
||||
{
|
||||
action: "<action>",
|
||||
item: {
|
||||
path: "<file or directory path>",
|
||||
root: "<root_name>",
|
||||
size: <file size>,
|
||||
modified: "<date modified>"
|
||||
},
|
||||
source_item: {
|
||||
path: "<file or directory path>",
|
||||
root: "<root_name>"
|
||||
}
|
||||
}
|
||||
```
|
||||
Note that the `source_item` is only present for `move_item` and `copy_item`
|
||||
actions. Below is a list of all available actions:
|
||||
- `upload_file`
|
||||
- `delete_file`
|
||||
- `create_dir`
|
||||
- `delete_dir`
|
||||
- `move_item`
|
||||
- `copy_item`
|
||||
|
||||
### Moonraker Version .08-alpha - 7/2/2020
|
||||
- Moonraker has moved to its own repo.
|
||||
- Python 3 support has been added.
|
||||
- API Key management has moved from Klippy to Moonraker
|
||||
- File Management has moved from Klippy to Moonraker. All static files are now
|
||||
located in the the `/server/files` root path:
|
||||
- klippy.log - `/server/files/klippy.log`
|
||||
- moonraker.log - `/server/files/moonraker.log`
|
||||
- gcode files - `/server/files/gcodes/(.*)`
|
||||
Note that the new file manager will be capable of serving and listing files
|
||||
in directories aside from "gcodes".
|
||||
- Added basic plugin support
|
||||
- Added metadata support for SuperSlicer
|
||||
- Added thumbnail extraction from SuperSlicer and PrusaSlicer gcode files
|
||||
- For status requests, `virtual_sdcard.current_file` has been renamed to
|
||||
`virtual_sdcard.filename`
|
||||
- Clients should not send `M112` via gcode to execute an emegency shutdown.
|
||||
They should instead use the new API which exposes this functionality.
|
||||
- New APIs:
|
||||
- `POST /printer/emergency_stop` - `post_printer_emergency_stop`
|
||||
- `GET /server/files/metadata` - `get_file_metadata`
|
||||
- `GET /server/files/directory`
|
||||
- `POST /server/files/directory`
|
||||
- `DELETE /server/files/directory`
|
||||
- The following API changes have been made:
|
||||
| Previous URI | New URI | Previous JSON_RPC method | New JSON_RPC method |
|
||||
|--------------|---------|--------------------------| --------------------|
|
||||
| GET /printer/objects | GET /printer/objects/list | get_printer_objects | get_printer_objects_list |
|
||||
| GET /printer/subscriptions | GET /printer/objects/subscription | get_printer_subscriptions | get_printer_objects_subscription |
|
||||
| POST /printer/subscriptions | POST /printer/objects/subscription | post_printer_subscriptions | post_printer_objects_subscription |
|
||||
| GET /printer/status | GET /printer/objects/status | get_printer_status | get_printer_objects_status |
|
||||
| POST /printer/gcode | POST /printer/gcode/script | post_printer_gcode | post_printer_gcode_script |
|
||||
| GET /printer/klippy.log | GET /server/files/klippy.log | | |
|
||||
| GET /server/moonraker.log | GET /server/files/moonraker.log | | |
|
||||
| GET /printer/files | GET /server/files/list | get_printer_files | get_file_list |
|
||||
| POST /printer/files/upload | POST /server/files/upload | | |
|
||||
| GET /printer/files/<filename> | GET /server/files/gcodes/<filename> | | |
|
||||
| DELETE /printer/files/<filename> | DELETE /server/files/<filename> | | |
|
||||
| GET /printer/endstops | GET /printer/query_endstops/status | get_printer_endstops | get_printer_query_endstops_status |
|
||||
|
||||
### Moonraker Version .07-alpha - 5/7/2020
|
||||
- The server process is no longer managed directly by Klippy. It has moved
|
||||
into its own process dubbed Moonraker. Please see README.md for
|
||||
installation instructions.
|
||||
- API Changes:
|
||||
- `/printer/temperature_store` is now `/server/temperature_store`, or
|
||||
`get_server_temperature_store` via the websocket
|
||||
- `/printer/log` is now `/printer/klippy.log`
|
||||
- `/server/moonraker.log` has been added to fetch the server's log file
|
||||
- Klippy Changes:
|
||||
- The remote_api directory has been removed. There is now a single
|
||||
remote_api.py module that handles server configuration.
|
||||
- webhooks.py has been changed to handle communications with the server
|
||||
- klippy.py has been changed to pass itself to webhooks
|
||||
- file_manager.py has been changed to specifiy the correct status code
|
||||
when an error is generated attempting to upload or delete a file
|
||||
- The nginx configuration will need the following additional section:
|
||||
```
|
||||
location /server {
|
||||
proxy_pass http://apiserver/server;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Scheme $scheme;
|
||||
}
|
||||
```
|
||||
|
||||
### Version .06-alpha - 5/4/2020
|
||||
- Add `/machine/reboot` and `/machine/shutdown` endpoints. These may be used
|
||||
to reboot or shutdown the host machine
|
||||
- Fix issue where websocket was blocked on long transactions, resulting in the
|
||||
connection being closed
|
||||
- Log all client requests over the websocket
|
||||
- Add `/printer/temperature_store` endpoint. Clients may use this to fetch
|
||||
stored temperature data. By default the store for each temperature sensor
|
||||
is updated every 1s, with the store holding 20 minutes of data.
|
||||
|
||||
### Version .05-alpha - 04/23/2020
|
||||
- The `[web_server]` module has been renamed to `[remote_api]`. Please update
|
||||
printer.cfg accordingly
|
||||
- Static files no longer served by the API server. As a result, there is
|
||||
no `web_path` option in `[remote_api]`.
|
||||
- The server process now now forwards logging requests back to the Klippy
|
||||
Host, thus all logging is done in klippy.log. The temporary endpoint serving
|
||||
klippy_server.log has been removed.
|
||||
- `/printer/info` now includes two additional keys:
|
||||
- `error_detected` - Boolean value set to true if a host error has been
|
||||
detected
|
||||
- `message` - The current Klippy State message. If an error is detected this
|
||||
message may be presented to the user. This is the same message returned
|
||||
when by the STATUS gcode.
|
||||
- The server process is now launched immediately after the config file is read.
|
||||
This allows the client limited access to Klippy in the event of a startup
|
||||
error, assuming the config file was successfully parsed and the
|
||||
`remote_api` configuration section is valid. Note that when the server is
|
||||
initally launched not all endpoints will be available. The following
|
||||
endponts are guaranteed when the server is launched:
|
||||
- `/websocket`
|
||||
- `/printer/info`
|
||||
- `/printer/restart`
|
||||
- `/printer/firmware_restart`
|
||||
- `/printer/log`
|
||||
- `/printer/gcode`
|
||||
- `/access/api_key`
|
||||
- `/access/oneshot_token`
|
||||
The following startup sequence is recommened for clients which make use of
|
||||
the websocket:
|
||||
- Attempt to connect to `/websocket` until successful
|
||||
- Once connected, query `/printer/info` for the ready status. If not ready
|
||||
check `error_detected`. If not ready and no error, continue querying on
|
||||
a timer until the printer is either ready or an error is detected.
|
||||
- After the printer has identified itself as ready make subscription requests,
|
||||
get the current file list, etc
|
||||
- If the websocket disconnects the client can assume that the server is shutdown.
|
||||
It should consider the printer's state to be NOT ready and try reconnecting to
|
||||
the websocket until successful.
|
||||
|
||||
### Version .04-alpha - 04/20/2020
|
||||
- Add `/printer/gcode/help` endpoint to gcode.py
|
||||
- Allow the clients to fetch .json files in the root web directory
|
||||
- Add support for detailed print tracking to virtual_sdcard.py. This
|
||||
includes filament usage and print time tracking
|
||||
- Add new file_manager.py module for advanced gcode file management. Gcode
|
||||
files may exist in subdirectories. This module also supports extracting
|
||||
metadata from gcode files.
|
||||
- Clean up API registration. All endpoints are now registered by Klippy
|
||||
host modules outside of static files and `/api/version`, which is used for
|
||||
compatibility with OctoPrint's file upload API.
|
||||
- The server now runs in its own process. Communication between the Host and
|
||||
the server is done over a duplex pipe. Currently this results in a second
|
||||
log file being generated specifically for the server at
|
||||
`/tmp/klippy_server.log`. This is likely a temporary solution, and as such
|
||||
a temporary endpoint has been added at `/printer/klippy_server.log`. Users
|
||||
can use the browser to download the log by navigating to
|
||||
`http://<host>/printer/klippy_server.log`.
|
||||
|
||||
### Version .03-alpha - 03/09/2020
|
||||
- Require that the configured port be above 1024.
|
||||
- Fix hard crash if the webserver fails to start.
|
||||
- Fix file uploads with names containing whitespace
|
||||
- Serve static files based on their relative directory, ie a request
|
||||
for "/js/main.js" will now look for the files in "<web_path>/js/main.js".
|
||||
- Fix bug in CORS where DELETE requests raised an exception
|
||||
- Disable the server when running Klippy in batch mode
|
||||
- The the `/printer/cancel`, `/printer/pause` and `/printer/resume` gcodes
|
||||
are now registed by the pause_resume module. This results in the following
|
||||
changes:
|
||||
- The `cancel_gcode`, `pause_gcode`, and `resume_gcode` options have
|
||||
been removed from the [web_server] section.
|
||||
- The `/printer/pause` and `/printer/resume` endpoints will run the "PAUSE"
|
||||
and "RESUME" gcodes respectively. These gcodes can be overridden by a
|
||||
gcode_macro to run custom PAUSE and RESUME commands. For example:
|
||||
```
|
||||
[gcode_macro PAUSE]
|
||||
rename_existing: BASE_PAUSE
|
||||
gcode:
|
||||
{% if not printer.pause_resume.is_paused %}
|
||||
M600
|
||||
{% endif %}
|
||||
|
||||
[gcode_macro M600]
|
||||
default_parameter_X: 50
|
||||
default_parameter_Y: 0
|
||||
default_parameter_Z: 10
|
||||
gcode:
|
||||
SET_IDLE_TIMEOUT TIMEOUT=18000
|
||||
{% if not printer.pause_resume.is_paused %}
|
||||
BASE_PAUSE
|
||||
{% endif %}
|
||||
G1 E-.8 F2700
|
||||
G91
|
||||
G1 Z{Z}
|
||||
G90
|
||||
G1 X{X} Y{Y} F3000
|
||||
```
|
||||
If you are calling "PAUSE" in any other macro of config section, please
|
||||
remember that it will execute the macro. If that is not your intention,
|
||||
change "PAUSE" in those sections to the renamed version, in the example
|
||||
above it is BASE_PAUSE.
|
||||
- The cancel endpoint runs a "CANCEL_PRINT" gcode. Users will need to
|
||||
define their own gcode macro for this
|
||||
- Remove "notify_paused_state_changed" and "notify_printer_state_changed"
|
||||
events. The data from these events can be fetched via status
|
||||
subscriptions.
|
||||
- "idle_timeout" and "pause_resume" now default to tier 1 status updates,
|
||||
which sets their default refresh time is 250ms.
|
||||
- Some additional status attributes have been added to virtual_sdcard.py. At
|
||||
the moment they are experimental and subject to change:
|
||||
- 'is_active' - returns true when the virtual_sdcard is processing. Note
|
||||
that this will return false when the printer is paused
|
||||
- 'current_file' - The name of the currently loaded file. If no file is
|
||||
loaded returns an empty string.
|
||||
- 'print_duration' - The approximate duration (in seconds) of the current
|
||||
print. This value does not include time spent paused. Returns 0 when
|
||||
no file is loaded.
|
||||
- 'total_duration' - The total duration of the current print, including time
|
||||
spent paused. This can be useful for approximating the local time the
|
||||
print started Returns 0 when no file is loaded.
|
||||
- 'filament_used' - The approximate amount of filament used. This does not
|
||||
include changes to flow rate. Returns 0 when no file is loaded.
|
||||
- 'file_position' - The current position (in bytes) of the loaded file
|
||||
Returns 0 when no file is loaded.
|
||||
- 'progress' - This attribute already exists, however it has been changed
|
||||
to retain its value while the print is paused. Previously it would reset
|
||||
to 0 when paused. Returns 0 when no file is loaded.
|
||||
|
||||
### Version .02-alpha - 02/27/2020
|
||||
- Migrated Framework and Server from Bottle/Eventlet to Tornado. This
|
||||
resolves an issue where the server hangs for a period of time if the
|
||||
network connection abruptly drops.
|
||||
- A `webhooks` host module has been created. Other modules can use this
|
||||
the webhooks to register endpoints, even if the web_server is not
|
||||
configured.
|
||||
- Two modules have been renamed, subscription_handler.py is now
|
||||
status_handler.py and ws_handler.py is now ws_manager.py. These names
|
||||
more accurately reflect their current functionality.
|
||||
- Tornado Websockets support string encoded frames. Thus it is no longer
|
||||
necessary for clients to use a FileReader object to convert incoming
|
||||
websocket data from a Blob into a String.
|
||||
- The endpoint for querying endstops has changed from `GET
|
||||
/printer/extras/endstops` to `GET /printer/endstops`
|
||||
- Serveral API changes have been made to accomodate the addition of webhooks:
|
||||
- `GET /printer/klippy_info` is now `GET /printer/info`. This endpoint no
|
||||
longer returns host information, as that can be retrieved direct via the
|
||||
`location` object in javascript. Instead it returns CPU information.
|
||||
- `GET /printer/objects` is no longer used to accomodate multiple request
|
||||
types by modifying the "Accept" headers. Each request has been broken
|
||||
down in their their own endpoints:
|
||||
- `GET /printer/objects` returns all available printer objects that may
|
||||
be queried
|
||||
- `GET /printer/status?gcode=gcode_position,speed&toolhead` returns the
|
||||
status of the printer objects and attribtues
|
||||
- `GET /printer/subscriptions` returns all printer objects that are current
|
||||
being subscribed to along with their poll times
|
||||
- `POST /printer/subscriptions?gcode&toolhead` requests that the printer
|
||||
add the specified objects and attributes to the list of subscribed objects
|
||||
- Requests that query the Klippy host with additional parameters can no
|
||||
longer use variable paths. For example, `POST /printer/gcode/<gcode>` is no
|
||||
longer valid. Parameters must be added to the query string. This currently
|
||||
affects two endpoints:
|
||||
- `POST /printer/gcode/<gcode>` is now `POST /printer/gcode?script=<gcode>`
|
||||
- `POST printer/print/start/<filename>` is now
|
||||
`POST /printer/print/start?filename=<filename>`
|
||||
- The websocket API also required changes to accomodate dynamically registered
|
||||
endpoints. Each method name is now generated from its comparable HTTP
|
||||
request. The new method names are listed below:
|
||||
| new method | old method |
|
||||
|------------|------------|
|
||||
| get_printer_files | get_file_list |
|
||||
| get_printer_info | get_klippy_info |
|
||||
| get_printer_objects | get_object_info |
|
||||
| get_printer_subscriptions | get_subscribed |
|
||||
| get_printer_status | get_status |
|
||||
| post_printer_subscriptions | add_subscription |
|
||||
| post_printer_gcode | run_gcode |
|
||||
| post_printer_print_start | start_print |
|
||||
| post_printer_print_pause | pause_print |
|
||||
| post_printer_print_resume | resume_print |
|
||||
| post_printer_print_cancel | cancel_print |
|
||||
| post_printer_restart | restart |
|
||||
| post_printer_firmware_restart | firmware_restart |
|
||||
| get_printer_endstops | get_endstops |
|
||||
- As with the http API, a change was necessary to the way arguments are send
|
||||
along with the request. Webocket requests should now send "keyword
|
||||
arguments" rather than "variable arguments". The test client has been
|
||||
updated to reflect these changes, see main.js and json-rpc.js, specifically
|
||||
the new method `call_method_with_kwargs`. For status requests this simply
|
||||
means that it is no longer necessary to wrap the Object in an Array. The
|
||||
gcode and start print requests now look for named parameters, ie:
|
||||
- gcode requests - `{jsonrpc: "2.0", method: "post_printer_gcode",
|
||||
params: {script: "M117 FooBar"}, id: <request id>}`
|
||||
- start print - `{jsonrpc: "2.0", method: "post_printer_print_start",
|
||||
params: {filename: "my_file.gcode"}, id:<request id>}`
|
||||
|
||||
|
||||
### Version .01-alpha - 02/14/2020
|
||||
- The api.py module has been refactored to contain the bottle application and
|
||||
all routes within a class. Bottle is now imported and patched dynamically
|
||||
within this class's constructor. This resolves an issue where the "request"
|
||||
context was lost when the Klippy host restarts.
|
||||
- Change the Websocket API to use the JSON-RPC 2.0 protocol. See the test
|
||||
client (main.js and json-rpc.js) for an example client side implementation.
|
||||
- Remove file transfer support from the websocket. Use the HTTP for all file
|
||||
transfer requests.
|
||||
- Add support for Klippy Host modules to register their own urls.
|
||||
Query_endstops.py has been updated with an example. As a result of this
|
||||
change, the endpoint for endstop query has been changed to
|
||||
`/printer/extras/endstops`.
|
||||
- Add support for "paused", "resumed", and "cleared" pause events.
|
||||
- Add routes for downloading klippy.log, restart, and firmware_restart.
|
||||
- Remove support for trailing slashes in HTTP API routes.
|
||||
- Support "start print after upload" requests
|
||||
- Add support for user configured request timeouts
|
||||
- The test client has been updated to work with the new changes
|
||||
37
docs/developer-certificate-of-origin
Normal file
37
docs/developer-certificate-of-origin
Normal file
@@ -0,0 +1,37 @@
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
1 Letterman Drive
|
||||
Suite D4700
|
||||
San Francisco, CA, 94129
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
2
docs/doc-requirements.txt
Normal file
2
docs/doc-requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
mkdocs==1.3.0
|
||||
pymdown-extensions==9.1
|
||||
106
docs/example-home-assistant-extended.yaml
Normal file
106
docs/example-home-assistant-extended.yaml
Normal file
@@ -0,0 +1,106 @@
|
||||
# Example Home Assistant configuration file for a Artillery Sidewinder X1
|
||||
# Credit to GitHub users @Kruppes and @pedrolamas
|
||||
# extended by @tispokes
|
||||
camera:
|
||||
- platform: generic
|
||||
still_image_url: http://192.168.178.66/webcam/?action=snapshot
|
||||
stream_source: http://192.168.178.66/webcam/?action=stream
|
||||
framerate: 10
|
||||
|
||||
sensor:
|
||||
- platform: rest
|
||||
name: SWX1_sensor
|
||||
resource: "http://192.168.178.66:7125/printer/objects/query?heater_bed&extruder&print_stats&toolhead&display_status&virtual_sdcard"
|
||||
json_attributes_path: "$.result.status"
|
||||
json_attributes:
|
||||
- heater_bed
|
||||
- extruder
|
||||
- print_stats
|
||||
- toolhead
|
||||
- display_status
|
||||
- virtual_sdcard
|
||||
value_template: >-
|
||||
{{ "OK" if ("result" in value_json) else "offline" }}
|
||||
# Adding an API key is only necessary while using the [authorization] component
|
||||
# and if Home Assistant is not a trusted client
|
||||
headers:
|
||||
x-api-key: 123456789abcdefghijklmno
|
||||
|
||||
- platform: template
|
||||
sensors:
|
||||
swx1_state:
|
||||
unique_id: sensor.swx1_state
|
||||
friendly_name: "Status"
|
||||
icon_template: mdi:printer-3d
|
||||
value_template: >-
|
||||
{{ states.sensor.swx1_sensor.attributes['print_stats']['state'] if is_state('sensor.swx1_sensor', 'OK') else None }}
|
||||
|
||||
swx1_current_print:
|
||||
unique_id: sensor.swx1_current_print
|
||||
friendly_name: >-
|
||||
{{ "Printed" if states.sensor.swx1_sensor.attributes['display_status']['progress'] == 1 else "Printing..." }}
|
||||
icon_template: mdi:video-3d
|
||||
value_template: >-
|
||||
{{ states.sensor.swx1_sensor.attributes['print_stats']['filename'].split(".")[0] if is_state('sensor.swx1_sensor', 'OK') else None }}
|
||||
|
||||
swx1_current_progress:
|
||||
unique_id: sensor.swx1_current_progress
|
||||
friendly_name: "Progress"
|
||||
unit_of_measurement: '%'
|
||||
icon_template: mdi:file-percent
|
||||
value_template: >-
|
||||
{{ (states.sensor.swx1_sensor.attributes['display_status']['progress'] * 100) | round(1) if is_state('sensor.swx1_sensor', 'OK') else None }}
|
||||
|
||||
swx1_print_time:
|
||||
unique_id: sensor.swx1_print_time
|
||||
friendly_name: "T-elapsed"
|
||||
icon_template: mdi:clock-start
|
||||
value_template: >-
|
||||
{{ states.sensor.swx1_sensor.attributes['print_stats']['print_duration'] | timestamp_custom("%H:%M:%S", 0) if is_state('sensor.swx1_sensor', 'OK') else None }}
|
||||
|
||||
swx1_time_remaining:
|
||||
unique_id: sensor.swx1_time_remaining
|
||||
friendly_name: "T-remaining"
|
||||
icon_template: mdi:clock-end
|
||||
value_template: >-
|
||||
{{ (((states.sensor.swx1_sensor.attributes['print_stats']['print_duration'] / states.sensor.swx1_sensor.attributes['display_status']['progress'] - states.sensor.swx1_sensor.attributes['print_stats']['print_duration']) if states.sensor.swx1_sensor.attributes['display_status']['progress'] > 0 else 0) | timestamp_custom('%H:%M:%S', 0)) if is_state('sensor.swx1_sensor', 'OK') else None }}
|
||||
|
||||
swx1_eta:
|
||||
unique_id: sensor.swx1_eta
|
||||
friendly_name: "T-ETA"
|
||||
icon_template: mdi:clock-outline
|
||||
value_template: >-
|
||||
{{ (as_timestamp(now()) + 2 * 60 * 60 + ((states.sensor.swx1_sensor.attributes['print_stats']['print_duration'] / states.sensor.swx1_sensor.attributes['display_status']['progress'] - states.sensor.swx1_sensor.attributes['print_stats']['print_duration']) if states.sensor.swx1_sensor.attributes['display_status']['progress'] > 0 else 0)) | timestamp_custom("%H:%M:%S", 0) if is_state('sensor.swx1_sensor', 'OK') else None }}
|
||||
|
||||
swx1_nozzletemp:
|
||||
unique_id: sensor.swx1_nozzletemp
|
||||
friendly_name: >-
|
||||
Nozzle
|
||||
{{ ["(shall ", (states.sensor.swx1_sensor.attributes['extruder']['target'] | float | round(1)), "°C)"] | join if states.sensor.swx1_sensor.attributes['display_status']['progress'] < 1 }}
|
||||
icon_template: >-
|
||||
{{ "mdi:printer-3d-nozzle-heat" if states.sensor.swx1_sensor.attributes['extruder']['target'] > 0 else "mdi:printer-3d-nozzle-heat-outline" }}
|
||||
value_template: >-
|
||||
{{ states.sensor.swx1_sensor.attributes['extruder']['temperature'] | float | round(1) if is_state('sensor.swx1_sensor', 'OK') else None }}
|
||||
|
||||
swx1_bedtemp:
|
||||
unique_id: sensor.swx1_bedtemp
|
||||
friendly_name: >-
|
||||
Bed
|
||||
{{ ["(shall ", (states.sensor.swx1_sensor.attributes['heater_bed']['target'] | float | round(1)), "°C)"] | join if states.sensor.swx1_sensor.attributes['display_status']['progress'] < 1 }}
|
||||
icon_template: >-
|
||||
{{ "mdi:radiator" if states.sensor.swx1_sensor.attributes['extruder']['target'] > 0 else "mdi:radiator-off" }}
|
||||
value_template: >-
|
||||
{{ states.sensor.swx1_sensor.attributes['heater_bed']['temperature'] | float | round(1) if is_state('sensor.swx1_sensor', 'OK') else None }}
|
||||
# The following will allow you to control the power of devices configured in the "[power]" sections of moonraker
|
||||
# Make sure to change the `Printer` name below to the device name on your configuration
|
||||
#
|
||||
switch:
|
||||
- platform: rest
|
||||
name: SWX1_power
|
||||
resource: "http://192.168.178.66:7125/machine/device_power/device?device=SWX1"
|
||||
body_on: '{"action": "on"}'
|
||||
body_off: '{"action": "off"}'
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
is_on_template: >-
|
||||
{{ 'result' in value_json and (value_json.result.values() | list | first == "on") }}
|
||||
111
docs/example-home-assistant.yaml
Normal file
111
docs/example-home-assistant.yaml
Normal file
@@ -0,0 +1,111 @@
|
||||
# Example Home Assistant configuration file for a Voron V0.
|
||||
# Credit to GitHub users @Kruppes and @pedrolamas
|
||||
#
|
||||
sensor:
|
||||
- platform: rest
|
||||
name: Voron_V0_sensor
|
||||
resource: "http://192.168.178.56:7125/printer/objects/query?heater_bed&extruder&print_stats&toolhead&display_status&virtual_sdcard"
|
||||
json_attributes_path: "$.result.status"
|
||||
json_attributes:
|
||||
- heater_bed
|
||||
- extruder
|
||||
- print_stats
|
||||
- toolhead
|
||||
- display_status
|
||||
- virtual_sdcard
|
||||
value_template: >-
|
||||
{{ 'OK' if ('result' in value_json) else None }}
|
||||
# Adding an API key is only necessary while using the [authorization] component
|
||||
# and if Home Assistant is not a trusted client
|
||||
headers:
|
||||
x-api-key: 123456789abcdefghijklmno
|
||||
|
||||
- platform: template
|
||||
sensors:
|
||||
|
||||
vzero_hotend_target:
|
||||
friendly_name: 'V0.126 Hotend Target'
|
||||
device_class: temperature
|
||||
unit_of_measurement: '°C'
|
||||
value_template: >-
|
||||
{{ states.sensor.voron_v0_sensor.attributes['extruder']['target'] | float | round(1) if is_state('sensor.voron_v0_sensor', 'OK') else None }}
|
||||
|
||||
vzero_hotend_actual:
|
||||
device_class: temperature
|
||||
unit_of_measurement: '°C'
|
||||
value_template: >-
|
||||
{{ states.sensor.voron_v0_sensor.attributes['extruder']['temperature'] | float | round(1) if is_state('sensor.voron_v0_sensor', 'OK') else None }}
|
||||
|
||||
vzero_bed_target:
|
||||
device_class: temperature
|
||||
unit_of_measurement: '°C'
|
||||
value_template: >-
|
||||
{{ states.sensor.voron_v0_sensor.attributes['heater_bed']['target'] | float | round(1) if is_state('sensor.voron_v0_sensor', 'OK') else None }}
|
||||
|
||||
vzero_bed_actual:
|
||||
device_class: temperature
|
||||
unit_of_measurement: '°C'
|
||||
value_template: >-
|
||||
{{ states.sensor.voron_v0_sensor.attributes['heater_bed']['temperature'] | float | round(1) if is_state('sensor.voron_v0_sensor', 'OK') else None }}
|
||||
|
||||
vzero_state:
|
||||
icon_template: mdi:printer-3d
|
||||
value_template: >-
|
||||
{{ states.sensor.voron_v0_sensor.attributes['print_stats']['state'] if is_state('sensor.voron_v0_sensor', 'OK') else None }}
|
||||
|
||||
vzero_current_print:
|
||||
value_template: >-
|
||||
{{ states.sensor.voron_v0_sensor.attributes['print_stats']['filename'] if is_state('sensor.voron_v0_sensor', 'OK') else None }}
|
||||
|
||||
vzero_current_progress:
|
||||
unit_of_measurement: '%'
|
||||
icon_template: mdi:file-percent
|
||||
value_template: >-
|
||||
{{ (states.sensor.voron_v0_sensor.attributes['display_status']['progress'] * 100) | round(1) if is_state('sensor.voron_v0_sensor', 'OK') else None }}
|
||||
|
||||
vzero_print_time:
|
||||
icon_template: mdi:clock-start
|
||||
value_template: >-
|
||||
{{ states.sensor.voron_v0_sensor.attributes['print_stats']['print_duration'] | timestamp_custom("%H:%M:%S", 0) if is_state('sensor.voron_v0_sensor', 'OK') else None }}
|
||||
|
||||
vzero_time_remaining:
|
||||
icon_template: mdi:clock-end
|
||||
value_template: >-
|
||||
{{ (((states.sensor.voron_v0_sensor.attributes['print_stats']['print_duration'] / states.sensor.voron_v0_sensor.attributes['display_status']['progress'] - states.sensor.voron_v0_sensor.attributes['print_stats']['print_duration']) if states.sensor.voron_v0_sensor.attributes['display_status']['progress'] > 0 else 0) | timestamp_custom('%H:%M:%S', 0)) if is_state('sensor.voron_v0_sensor', 'OK') else None }}
|
||||
|
||||
vzero_eta:
|
||||
icon_template: mdi:clock-outline
|
||||
value_template: >-
|
||||
{{ (as_timestamp(now()) + 2 * 60 * 60 + ((states.sensor.voron_v0_sensor.attributes['print_stats']['print_duration'] / states.sensor.voron_v0_sensor.attributes['display_status']['progress'] - states.sensor.voron_v0_sensor.attributes['print_stats']['print_duration']) if states.sensor.voron_v0_sensor.attributes['display_status']['progress'] > 0 else 0)) | timestamp_custom("%H:%M:%S", 0) if is_state('sensor.voron_v0_sensor', 'OK') else None }}
|
||||
|
||||
vzero_nozzletemp:
|
||||
icon_template: mdi:thermometer
|
||||
value_template: >-
|
||||
{{ [(states.sensor.voron_v0_sensor.attributes['extruder']['temperature'] | float | round(1) | string), " / ", (states.sensor.voron_v0_sensor.attributes['extruder']['target'] | float | round(1) | string)] | join if is_state('sensor.voron_v0_sensor', 'OK') else None }}
|
||||
|
||||
vzero_bedtemp:
|
||||
icon_template: mdi:thermometer
|
||||
value_template: >-
|
||||
{{ [(states.sensor.voron_v0_sensor.attributes['heater_bed']['temperature'] | float | round(1) | string), " / ", (states.sensor.voron_v0_sensor.attributes['heater_bed']['target'] | float | round(1) | string)] | join if is_state('sensor.voron_v0_sensor', 'OK') else None }}
|
||||
|
||||
# The following will allow you to control the power of devices configured in the "[power]" sections of moonraker
|
||||
# Make sure to change the `Printer` name below to the device name on your configuration
|
||||
#
|
||||
switch:
|
||||
- platform: rest
|
||||
name: Voron_V0_power
|
||||
resource: "http://192.168.178.56:7125/machine/device_power/device?device=Printer"
|
||||
body_on: '{"action": "on"}'
|
||||
body_off: '{"action": "off"}'
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
is_on_template: >-
|
||||
{{ 'result' in value_json and (value_json.result.values() | list | first == "on") }}
|
||||
|
||||
# MJPEG camera can be exposed to HA
|
||||
#
|
||||
camera:
|
||||
- platform: mjpeg
|
||||
name: Voron_V0_camera
|
||||
still_image_url: http://192.168.178.56/webcam/?action=snapshot
|
||||
mjpeg_url: http://192.168.178.56/webcam/?action=stream
|
||||
21
docs/index.md
Normal file
21
docs/index.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Welcome to Moonraker Documentation
|
||||
|
||||
Moonraker is a Python 3 based web server that exposes APIs with which
|
||||
client applications may use to interact with the 3D printing firmware
|
||||
[Klipper](https://github.com/KevinOConnor/klipper). Communcation between
|
||||
the Klippy host and Moonraker is done over a Unix Domain Socket. Tornado
|
||||
is used to provide Moonraker's server functionality.
|
||||
|
||||
Users should refer to the [Installation](installation.md) and
|
||||
[Configuration](configuration.md) sections for documentation on how
|
||||
to install and configure Moonraker.
|
||||
|
||||
Client developers may refer to the [Client API](web_api.md)
|
||||
documentation.
|
||||
|
||||
Backend developers should refer to the
|
||||
[contibuting](contributing.md) section for basic contribution
|
||||
guidelines prior to creating a pull request. The
|
||||
[components](components.md) document provides a brief overview
|
||||
of how to create a component and interact with Moonraker's
|
||||
primary internal APIs.
|
||||
342
docs/installation.md
Normal file
342
docs/installation.md
Normal file
@@ -0,0 +1,342 @@
|
||||
## Installation
|
||||
|
||||
This document provides a guide on how to install Moonraker on a Raspberry
|
||||
Pi running Raspian/Rasperry Pi OS. Other SBCs and/or linux distributions
|
||||
may work, however they may need a custom install script. Moonraker
|
||||
requires Python 3.7 or greater, verify that your distribution's
|
||||
Python 3 packages meet this requirement.
|
||||
|
||||
### Installing Klipper
|
||||
|
||||
Klipper should be installed prior to installing Moonraker. Please see
|
||||
[Klipper's Documention](https://klipper3d.com/Overview.html) for details.
|
||||
After installing Klipper you should make sure to add Moonraker's
|
||||
[configuration requirements](#klipper-configuration-requirements).
|
||||
|
||||
### Klipper Configuration Requirements
|
||||
|
||||
Moonraker depends on the following Klippy extras for full functionality:
|
||||
|
||||
- `[virtual_sdcard]`
|
||||
- `[pause_resume]`
|
||||
- `[display_status]`
|
||||
|
||||
If you have a `[filament_switch_sensor]` configured then `[pause_resume]` will
|
||||
automatically be loaded. Likewise, if you have a `[display]` configured then
|
||||
`[display_status]` will be automatically loaded. If your configuration is
|
||||
missing one or both, you can simply add the bare sections to `printer.cfg`:
|
||||
```ini
|
||||
[pause_resume]
|
||||
|
||||
[display_status]
|
||||
|
||||
[virtual_sdcard]
|
||||
path: ~/gcode_files
|
||||
```
|
||||
|
||||
### Enabling the Unix Socket
|
||||
|
||||
After Klipper is installed it may be necessary to modify its `defaults` file in
|
||||
order to enable the Unix Domain Socket. Begin by opening the file in your
|
||||
editor of choice, for example:
|
||||
```
|
||||
sudo nano /etc/default/klipper
|
||||
```
|
||||
You should see a file that looks something like the following:
|
||||
```
|
||||
# Configuration for /etc/init.d/klipper
|
||||
|
||||
KLIPPY_USER=pi
|
||||
|
||||
KLIPPY_EXEC=/home/pi/klippy-env/bin/python
|
||||
|
||||
KLIPPY_ARGS="/home/pi/klipper/klippy/klippy.py /home/pi/printer.cfg -l /tmp/klippy.log"
|
||||
```
|
||||
|
||||
Add `-a /tmp/klippy_uds` to KLIPPY_ARGS:
|
||||
```
|
||||
# Configuration for /etc/init.d/klipper
|
||||
|
||||
KLIPPY_USER=pi
|
||||
|
||||
KLIPPY_EXEC=/home/pi/klippy-env/bin/python
|
||||
|
||||
KLIPPY_ARGS="/home/pi/klipper/klippy/klippy.py /home/pi/printer.cfg -l /tmp/klippy.log -a /tmp/klippy_uds"
|
||||
```
|
||||
|
||||
!!! note
|
||||
Your installation of Klipper may use systemd instead of
|
||||
the default LSB script. In this case, you need to modify the
|
||||
klipper.service file.
|
||||
|
||||
You may also want to take this opportunity to change the location of
|
||||
printer.cfg to match Moonraker's `config_path` option (see the
|
||||
[configuration document](configuration.md#primary-configuration)
|
||||
for more information on the config_path). For example, if the `config_path`
|
||||
is set to `~/printer_config`, your klipper defaults file might look
|
||||
like the following:
|
||||
```
|
||||
# Configuration for /etc/init.d/klipper
|
||||
|
||||
KLIPPY_USER=pi
|
||||
|
||||
KLIPPY_EXEC=/home/pi/klippy-env/bin/python
|
||||
|
||||
KLIPPY_ARGS="/home/pi/klipper/klippy/klippy.py /home/pi/printer_config/printer.cfg -l /tmp/klippy.log -a /tmp/klippy_uds"
|
||||
```
|
||||
|
||||
If necessary, create the config directory and move printer.cfg to it:
|
||||
```
|
||||
cd ~
|
||||
mkdir printer_config
|
||||
mv printer.cfg printer_config
|
||||
```
|
||||
|
||||
### Installing Moonraker
|
||||
|
||||
Begin by cloning the git respository:
|
||||
|
||||
```
|
||||
cd ~
|
||||
git clone https://github.com/Arksine/moonraker.git
|
||||
```
|
||||
|
||||
Now is a good time to create [moonraker.conf](configuration.md). If you are
|
||||
using the `config_path`, create it in the specified directory otherwise create
|
||||
it in the HOME directory. The [sample moonraker.conf](./moonraker.conf) in
|
||||
the `docs` directory may be used as a starting point.
|
||||
|
||||
For a default installation run the following commands:
|
||||
```
|
||||
cd ~/moonraker/scripts
|
||||
./install-moonraker.sh
|
||||
```
|
||||
|
||||
Or to install with `moonraker.conf` in the `config_path`:
|
||||
```
|
||||
cd ~/moonraker/scripts
|
||||
./install-moonraker.sh -f -c /home/pi/printer_config/moonraker.conf
|
||||
```
|
||||
|
||||
The install script has a few command line options that may be useful,
|
||||
particularly for those upgrading:
|
||||
|
||||
- `-r`:
|
||||
Rebuilds the virtual environment for existing installations.
|
||||
Sometimes this is necessary when a dependency has been added.
|
||||
- `-f`:
|
||||
Force an overwrite of Moonraker's systemd script. By default the
|
||||
the systemd script will not be modified if it exists.
|
||||
- `-c /home/pi/moonraker.conf`:
|
||||
Specifies the path to Moonraker's config file. The default location
|
||||
is `/home/<user>/moonraker.conf`. When using this option to modify
|
||||
an existing installation it is necessary to add `-f` as well.
|
||||
- `-z`:
|
||||
Disables `systemctl` commands during install (ie: daemon-reload, restart).
|
||||
This is useful for installations that occur outside of a standard environment
|
||||
where systemd is not running.
|
||||
|
||||
Additionally, installation may be customized with the following environment
|
||||
variables:
|
||||
|
||||
- `MOONRAKER_VENV`
|
||||
- `MOONRAKER_REBUILD_ENV`
|
||||
- `MOONRAKER_FORCE_DEFAULTS`
|
||||
- `MOONRAKER_DISABLE_SYSTEMCTL`
|
||||
- `MOONRAKER_CONFIG_PATH`
|
||||
- `MOONRAKER_LOG_PATH`
|
||||
|
||||
When the script completes it should start both Moonraker and Klipper. In
|
||||
`/tmp/klippy.log` you should find the following entry:
|
||||
|
||||
`webhooks client <uid>: Client info {'program': 'Moonraker', 'version': '<version>'}`
|
||||
|
||||
Now you may install a client, such as
|
||||
[Mainsail](https://github.com/mainsail-crew/mainsail) or
|
||||
[Fluidd](https://github.com/cadriel/fluidd).
|
||||
|
||||
!!! Note
|
||||
Moonraker's install script no longer includes the nginx dependency.
|
||||
If you want to install one of the above clients on the local machine,
|
||||
you may want to first install nginx (`sudo apt install nginx` on
|
||||
debian/ubuntu distros).
|
||||
|
||||
|
||||
### Command line usage
|
||||
|
||||
This section is intended for users that need to write their own
|
||||
installation script. Detailed are the command line arguments
|
||||
available to Moonraker:
|
||||
```
|
||||
usage: moonraker.py [-h] [-c <configfile>] [-l <logfile>] [-n]
|
||||
|
||||
Moonraker - Klipper API Server
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c <configfile>, --configfile <configfile>
|
||||
Location of moonraker configuration file
|
||||
-l <logfile>, --logfile <logfile>
|
||||
log file name and location
|
||||
-n, --nologfile disable logging to a file
|
||||
```
|
||||
|
||||
The default configuration is:
|
||||
- config file path- `~/moonraker.conf`
|
||||
- log file path - `/tmp/moonraker.log`
|
||||
- logging to a file is enabled
|
||||
|
||||
If one needs to start moonraker without generating a log file, the
|
||||
`-n` option may be used, for example:
|
||||
```
|
||||
~/moonraker-env/bin/python ~/moonraker/moonraker/moonraker.py -n -c /path/to/moonraker.conf
|
||||
```
|
||||
In general it is not recommended to install moonraker with this option.
|
||||
While moonraker will still log to stdout, all requests for support must
|
||||
be accompanied by moonraker.log.
|
||||
|
||||
These options may be changed by editing
|
||||
`/etc/systemd/system/moonraker.service`. The `install-moonraker.sh` script
|
||||
may also be used to modify the config file location.
|
||||
|
||||
### PolicyKit Permissions
|
||||
|
||||
Some of Moonraker's components require elevated privileges to perform actions.
|
||||
Previously these actions could only be run via commandline programs launched
|
||||
with the `sudo` prefix. This has significant downsides:
|
||||
|
||||
- The user must be granted `NOPASSWD` sudo access. Raspberry Pi OS
|
||||
grants the Pi user this access by default, however most other distros
|
||||
require that this be enabled through editing `visudo` or adding files
|
||||
in `/etc/sudoers.d/`.
|
||||
- Some linux distributions require additional steps such as those taken
|
||||
in `sudo_fix.sh`.
|
||||
- Running CLI programs is relatively expensive. This isn't an issue for
|
||||
programs that are run once at startup, but is undesirable if Moonraker
|
||||
wants to poll information about the system.
|
||||
|
||||
Moonraker now supports communicating with system services via D-Bus.
|
||||
Operations that require elevated privileges are authrorized through
|
||||
PolicyKit. On startup Moonraker will check for the necessary privileges
|
||||
and warn users if they are not available. Warnings are presented in
|
||||
`moonraker.log` and directly to the user through some clients.
|
||||
|
||||
To resolve these warnings users have two options:
|
||||
|
||||
1) Install the PolicyKit permissions with the `set-policykit-rules.sh` script,
|
||||
for example:
|
||||
|
||||
```shell
|
||||
cd ~/moonraker/scripts
|
||||
./set-policykit-rules.sh
|
||||
sudo service moonraker restart
|
||||
```
|
||||
|
||||
!!! tip
|
||||
If you still get warnings after installing the PolKit rules, run the
|
||||
install script with no options to make sure that all new dependencies
|
||||
are installed.
|
||||
|
||||
```shell
|
||||
cd ~/moonraker/scripts
|
||||
./install-moonraker.sh
|
||||
```
|
||||
|
||||
2) Configure Moonraker to use the legacy backend implementations for
|
||||
the `machine` and/or `update_manager` components, ie:
|
||||
|
||||
```ini
|
||||
# Use the systemd CLI provider rather than the DBus Provider
|
||||
[machine]
|
||||
provider: systemd_cli
|
||||
|
||||
# Edit your existing [update_manager] section to disable
|
||||
# PackageKit. This will fallback to the APT CLI Package Update
|
||||
# implementation.
|
||||
[update_manager]
|
||||
#..other update manager options
|
||||
enable_packagekit: False
|
||||
|
||||
# Alternatively system updates can be disabled
|
||||
[update_manager]
|
||||
#..other update manager options
|
||||
enable_system_updates: False
|
||||
```
|
||||
|
||||
!!! Note
|
||||
Previously installed PolicyKit rules can be removed by running
|
||||
`set-policykit-rules.sh -c`
|
||||
|
||||
### Retrieving the API Key
|
||||
|
||||
Some clients may require an API Key to connect to Moonraker. After the
|
||||
`[authorization]` component is first configured Moonraker will automatically
|
||||
generate an API Key. There are two ways in which the key may be retrieved
|
||||
by the user:
|
||||
|
||||
Retrieve the API Key via the command line (SSH):
|
||||
```
|
||||
cd ~/moonraker/scripts
|
||||
./fetch-apikey.sh
|
||||
```
|
||||
|
||||
Retrieve the API Key via the browser from a trusted client:
|
||||
|
||||
- Navigate to `http://{moonraker-host}/access/api_key`, where
|
||||
`{moonraker-host}` is the host name or ip address of the desired
|
||||
moonraker instance.
|
||||
- The result will appear in the browser window in JSON format. Copy
|
||||
The API Key without the quotes.
|
||||
|
||||
{"result": "8ce6ae5d354a4365812b83140ed62e4b"}
|
||||
|
||||
### Recovering a broken repo
|
||||
|
||||
Currently Moonraker is deployed using `git`. Without going into the gritty
|
||||
details,`git` is effectively a file system, and as such is subject to
|
||||
file system corruption in the event of a loss of power, bad sdcard, etc.
|
||||
If this occurs, updates using the `[update_manager]` may fail. In most
|
||||
cases Moonraker provides an automated method to recover, however in some
|
||||
edge cases this is not possible and the user will need to do so manually.
|
||||
This requires that you `ssh` into your machine. The example below assumes
|
||||
the following:
|
||||
|
||||
- You are using a Raspberry Pi
|
||||
- Moonraker and Klipper are installed at the default locations in the `home`
|
||||
directory
|
||||
- Both Moonraker and Klipper have been corrupted and need to be restored
|
||||
|
||||
The following commands may be used to restore Moonraker:
|
||||
|
||||
```shell
|
||||
cd ~
|
||||
rm -rf moonraker
|
||||
git clone https://github.com/Arksine/moonraker.git
|
||||
cd moonraker/scripts
|
||||
./install-moonraker.sh
|
||||
./set-policykit-rules.sh
|
||||
sudo systemctl restart moonraker
|
||||
```
|
||||
|
||||
And for Klipper:
|
||||
|
||||
```shell
|
||||
cd ~
|
||||
rm -rf klipper
|
||||
git clone https://github.com/Klipper3d/klipper.git
|
||||
sudo systemctl restart klipper
|
||||
```
|
||||
|
||||
### Additional Notes
|
||||
|
||||
- Make sure that Moonraker and Klipper both have read and write access to the
|
||||
directory set in the `path` option for the `[virtual_sdcard]` in
|
||||
`printer.cfg`.
|
||||
- Upon first starting Moonraker is not aware of the gcode file path, thus
|
||||
it cannot serve gcode files, add directories, etc. After Klippy enters
|
||||
the "ready" state it sends Moonraker the gcode file path.
|
||||
Once Moonraker receives the path it will retain it regardless of Klippy's
|
||||
state, and update it if the path is changed in printer.cfg.
|
||||
|
||||
Please see [configuration.md](configuration.md) for details on how to
|
||||
configure moonraker.conf.
|
||||
30
docs/moonraker.conf
Normal file
30
docs/moonraker.conf
Normal file
@@ -0,0 +1,30 @@
|
||||
# Sample Moonraker Configuration File
|
||||
#
|
||||
# !!! Moonraker does not load this file. See configuration.md !!!
|
||||
# !!! for details on path to Moonraker's configuration. !!!
|
||||
#
|
||||
|
||||
[server]
|
||||
# Bind server defaults of 0.0.0.0, port 7125
|
||||
enable_debug_logging: True
|
||||
config_path: ~/printer_config
|
||||
|
||||
[authorization]
|
||||
enabled: True
|
||||
trusted_clients:
|
||||
# Enter your client IP here or range here
|
||||
192.168.1.0/24
|
||||
cors_domains:
|
||||
# Allow CORS requests for Fluidd
|
||||
http://app.fluidd.xyz
|
||||
|
||||
# Enable OctoPrint compatibility for Slicer uploads
|
||||
# Supports Cura, Slic3r, and Slic3r dervivatives
|
||||
# (PrusaSlicer, SuperSlicer)
|
||||
[octoprint_compat]
|
||||
# Default webcam config values:
|
||||
# flip_h = false
|
||||
# flip_v = false
|
||||
# rotate_90 = false
|
||||
# stream_url = /webcam/?action=stream
|
||||
# webcam_enabled = true
|
||||
385
docs/printer_objects.md
Normal file
385
docs/printer_objects.md
Normal file
@@ -0,0 +1,385 @@
|
||||
#
|
||||
As mentioned in the API documentation, it is possible to
|
||||
[query](web_api.md#query-printer-object-status) or
|
||||
[subscribe](web_api.md#subscribe-to-printer-object-status)
|
||||
to "Klipper Printer Objects." There are numerous printer objects in
|
||||
Klipper, many of which are optional and only report status if they are
|
||||
enabled by Klipper's configuration. Client's may retrieve a list of
|
||||
available printer objects via the
|
||||
[list objects endpoint](web_api.md#list-available-printer-objects). This
|
||||
should be done after Klipper reports its state as "ready".
|
||||
|
||||
This section will provide an overview of the most useful printer objects.
|
||||
If a developer is interested in retrieving state for an object not listed here,
|
||||
look in Klipper's source code for module you wish to query. If the module
|
||||
contains a "get_status()" method, its return value will contain a dictionary
|
||||
that reports state which can be queried.
|
||||
|
||||
## webhooks
|
||||
```json
|
||||
{
|
||||
"state": "startup",
|
||||
"state_message": "message"
|
||||
}
|
||||
```
|
||||
The `webhooks` object contains the current printer state and the current
|
||||
state message. These fields match those returned via the `/printer/info`
|
||||
endpoint. This is provided as a convience, clients may subscribe to `webhooks`
|
||||
so they are asynchonously notified of a change to printer state. The `state`
|
||||
may be `startup`, `ready`, `shutdown`, or `error`. The `state_message`
|
||||
contains a message specific to the current printers state.
|
||||
|
||||
## gcode_move
|
||||
```json
|
||||
{
|
||||
"speed_factor": 1.0,
|
||||
"speed": 100.0,
|
||||
"extrude_factor": 1.0,
|
||||
"absolute_coordinates": true,
|
||||
"absolute_extrude": false,
|
||||
"homing_origin": [0.0, 0.0, 0.0, 0.0],
|
||||
"position": [0.0, 0.0, 0.0, 0.0],
|
||||
"gcode_position": [0.0, 0.0, 0.0, 0.0]
|
||||
}
|
||||
```
|
||||
The `gcode_move` object reports the current gcode state:
|
||||
|
||||
- `speed_factor`: AKA "feedrate", this is the current speed multiplier
|
||||
- `speed`: The current gcode speed in mm/s.
|
||||
- `extrude_factor`: AKA "extrusion multiplier".
|
||||
- `absolute_coorinates`: true if the machine axes are moved using
|
||||
absolute coordinates, false if using relative coordinates.
|
||||
- `absolute_extrude`: true if the extruder is moved using absolute
|
||||
coordinates, false if using relative coordinates.
|
||||
- `homing_origin`: [X, Y, Z, E] - returns the "gcode offset" applied to
|
||||
each axis. For example, the "Z" axis can be checked to determine how
|
||||
much offset has been applied via "babystepping".
|
||||
- `position`: [X, Y, Z, E] - The internal gcode position, including
|
||||
any offsets (gcode_offset, G92, etc) added to an axis.
|
||||
- `gcode_position`: [X, Y, Z, E] - The current gcode position
|
||||
sans any offsets applied. For X, Y, and Z, this should match
|
||||
the most recent "G1" or "G0" processed assuming the machine is
|
||||
using absolute coordinates.
|
||||
|
||||
!!! Note
|
||||
The printer's actual movement will lag behind the reported positional
|
||||
coordinates due to lookahead.
|
||||
|
||||
## toolhead
|
||||
```json
|
||||
{
|
||||
"homed_axes": "xyz",
|
||||
"print_time": 0.0,
|
||||
"estimated_print_time": 0.0,
|
||||
"extruder": "extruder",
|
||||
"position": [0.0, 0.0, 0.0, 0.0],
|
||||
"max_velocity": 500.0,
|
||||
"max_accel": 3000.0,
|
||||
"max_accel_to_decel": 1500.0,
|
||||
"square_corner_velocity": 5.0
|
||||
}
|
||||
```
|
||||
The `toolhead` object reports state of the current tool:
|
||||
|
||||
- `homed_axes`: a string containing the axes that are homed. If no axes
|
||||
are homed, returns a null string.
|
||||
- `print_time`: internal value, not generally useful for clients
|
||||
- `estimated_print_time`: internal value, not generally useful for clients.
|
||||
- `extruder`: the name of the currently selected extruder, ie "extruder"
|
||||
or "extruder1".
|
||||
- `position`: [X, Y, Z, E] - This the the last position toward which the tool
|
||||
was commanded to move. It includes any offsets applied via gcode as well
|
||||
as any transforms made by modules such as "bed_mesh", "bed_tilt", or
|
||||
"skew_correction".
|
||||
- `max_velocity`: The currently set maximum velocity of the tool (mm/s^2).
|
||||
- `max_accel`: The currently set maximum acceleration of the tool (mm/s^2).
|
||||
- `max_accel_to_decel`: The currently set maximum accel to decel of the tool.
|
||||
This value is the maximum rate at which the tool can transition from
|
||||
acceleration to deceleration (mm/s^2).
|
||||
- `square_corner_velocity`: The currently set square corner velocity. This
|
||||
is the maximum velocity at which the tool may travel a 90 degree corner.
|
||||
|
||||
!!! tip
|
||||
`max_velocity`, `max_accel`, `max_accel_to_decel`, and
|
||||
`square_corner_velocity` can be changed by the `SET_VELOCITY_LIMIT` gcode.
|
||||
`M204` can also change `max_accel`.
|
||||
|
||||
## configfile
|
||||
```json
|
||||
{
|
||||
"config": {},
|
||||
"settings": {},
|
||||
"save_config_pending": false
|
||||
}
|
||||
```
|
||||
The `configfile` object reports printer configuration state:
|
||||
|
||||
- `config`: This is an object containing the configuration as read from
|
||||
printer.cfg. Each config section will be an object containing the
|
||||
configured options. Values will ALWAYS be reported as
|
||||
strings. Note that default values are not reported, only options
|
||||
configured in printer.cfg are present.
|
||||
- `settings`: Similar to `config`, however this object includes default
|
||||
values that may not have been included in `printer.cfg`. It is possible
|
||||
for a value to be a string, integer, boolean, or float.
|
||||
- `save_config_pending`: True if the printer has taken an action which
|
||||
has updated the internal configuration (ie: PID calibration, probe
|
||||
calibration, bed mesh calibration). This allows clients to present
|
||||
the user with the option to execute a SAVE_CONFIG gcode which will
|
||||
save the configuration to printer.cfg and restart the Klippy Host.
|
||||
|
||||
## extruder
|
||||
*Enabled when `[extruder]` is included in printer.cfg*
|
||||
!!! note
|
||||
If multiple extruders are configured, extruder 0 is available as
|
||||
`extruder`, extruder 1 as `extruder1` and so on.
|
||||
```json
|
||||
{
|
||||
"temperature": 0.0,
|
||||
"target": 0.0,
|
||||
"power": 0.0,
|
||||
"pressure_advance": 0.0,
|
||||
"smooth_time": 0.0
|
||||
}
|
||||
```
|
||||
The `extruder` object reports state of an extruder:
|
||||
|
||||
- `temperature`: The extruder's current temperature (in C).
|
||||
- `target`: The extruder's target temperature (in C).
|
||||
- `power`: The current pwm value applied to the heater. This value is
|
||||
expressed as a percentage from 0.0 to 1.0.
|
||||
- `pressure_advance`: The extruder's current pressure advance value.
|
||||
- `smooth_time`: The currently set time range to use when calculating the
|
||||
average extruder velocity for pressure advance.
|
||||
|
||||
## heater_bed
|
||||
*Enabled when `[heater_bed]` is included in printer.cfg*
|
||||
```json
|
||||
{
|
||||
"temperature": 0.0,
|
||||
"target": 0.0,
|
||||
"power": 0.0,
|
||||
}
|
||||
```
|
||||
The `heater_bed` object reports state of the heated bed:
|
||||
|
||||
- `temperature`: The bed's current temperature
|
||||
- `target`: The bed's target temperature
|
||||
- `power`: The current pwm value applied to the heater. This value is
|
||||
expressed as a percentage from 0.0 to 1.0.
|
||||
|
||||
## fan
|
||||
*Enabled when `[fan]` is included in printer.cfg*
|
||||
```json
|
||||
{
|
||||
"speed": 0.0,
|
||||
"rpm": 4000
|
||||
}
|
||||
```
|
||||
The `fan` object returns state of the part cooling fan:
|
||||
|
||||
- `speed`: The current fan speed. This is reported as a
|
||||
percentage of maximum speed in the range of 0.0 - 1.0.
|
||||
- `rpm`: The fan's revolutions per minute if the tachometer
|
||||
pin has been configured. Will report `null` if no tach
|
||||
has been configured.
|
||||
|
||||
## idle_timeout
|
||||
```json
|
||||
{
|
||||
"state": "Idle",
|
||||
"printing_time": 0.0
|
||||
}
|
||||
```
|
||||
|
||||
The `idle_timeout` object reports the idle state of the printer:
|
||||
|
||||
- `state`: Can be `Idle`, `Ready`, or `Printing`. The printer will
|
||||
transition to the `Printing` state whenever a gcode is issued that
|
||||
commands the tool, this includes manual commands. Thus this should
|
||||
not be used to determine if a gcode file print is in progress. It can
|
||||
however be used to determine if the printer is busy.
|
||||
- `printing_time`: The amount of time the printer has been in the
|
||||
`Printing` state. This is reset to 0 whenever the printer transitions
|
||||
from `Printing` to `Ready`.
|
||||
|
||||
## virtual_sdcard
|
||||
*Enabled when `[virtual_sdcard]` is included in printer.cfg*
|
||||
```json
|
||||
{
|
||||
"progress": 0.0,
|
||||
"is_active": false,
|
||||
"file_position": 0
|
||||
}
|
||||
```
|
||||
The `virtual_sdcard` object reports the state of the virtual sdcard:
|
||||
|
||||
- `progress`: The print progress reported as a percentage of the file
|
||||
read, in the range of 0.0 - 1.0.
|
||||
- `is_active`: Returns true if the virtual sdcard is currently processing
|
||||
a file. Note that this will return false if a virtual sdcard print is
|
||||
paused.
|
||||
- `file_position`: The current file position in bytes. This will always
|
||||
be an integer value
|
||||
|
||||
!!! Note
|
||||
`progress` and `file_position` will persist after a print has
|
||||
paused, completed, or errored. They are cleared when the user issues
|
||||
a SDCARD_RESET_FILE gcode or when a new print has started.
|
||||
|
||||
## print_stats
|
||||
*Enabled when `[virtual_sdcard]` is included in printer.cfg*
|
||||
```json
|
||||
{
|
||||
"filename": "",
|
||||
"total_duration": 0.0,
|
||||
"print_duration": 0.0,
|
||||
"filament_used": 0.0,
|
||||
"state": "standby",
|
||||
"message": ""
|
||||
}
|
||||
```
|
||||
The `print_stats` object reports `virtual_sdcard` print state:
|
||||
|
||||
- `filename`: The name of the current file loaded. This will be a null
|
||||
string if no file is loaded. Note that name is a path relative to the
|
||||
gcode folder, thus if the file is located in a subdirectory it would
|
||||
be reported as "my_sub_dir/myprint.gcode".
|
||||
- `total_duration`: The total time (in seconds) elapsed since a print
|
||||
has started. This includes time spent paused.
|
||||
- `print_duration`: The total time spent printing (in seconds). This is
|
||||
equivalent to `total_duration` - time paused.
|
||||
- `filament_used`: The amount of filament used during the current print
|
||||
(in mm). Any extrusion during a pause is excluded.
|
||||
- `state`: Current print state. Can be one of the following values:
|
||||
- `"standby"`
|
||||
- `"printing"`
|
||||
- `"paused"`
|
||||
- `"complete"`
|
||||
- `"cancelled"`
|
||||
- `"error"` - Note that if an error is detected the print will abort
|
||||
- `message`: If an error is detected, this field contains the error
|
||||
message generated. Otherwise it will be a null string.
|
||||
|
||||
!!! Note
|
||||
After a print has started all of the values above will persist until
|
||||
the user issues a SDCARD_RESET_FILE gcode or when a new print has started.
|
||||
|
||||
## display_status
|
||||
*Enabled when `[display]` or `[display_status]` is included in printer.cfg*
|
||||
```json
|
||||
{
|
||||
"message": "",
|
||||
"progress": 0.0
|
||||
}
|
||||
```
|
||||
The `display_status` object contains state typically used to update displays:
|
||||
|
||||
- `message`: The message set by a M117 gcode. If no message is set this will
|
||||
be a null string.
|
||||
- `progress`: The percentage of print progress, as reported by M73. This
|
||||
will be in the range of 0.0 - 1.0. If no M73 has been issued this value
|
||||
will fallback to the eqivalent of `virtual_sdcard.progress`. Note that
|
||||
progress updated via M73 has a timeout. If no M73 is received after 5
|
||||
seconds, `progress` will be set to the fallback value.
|
||||
|
||||
## temperature_sensor sensor_name
|
||||
*Enabled when `[temperature_sensor sensor_name]` is included in printer.cfg.
|
||||
It is possible for multiple temperature sensors to be configured.*
|
||||
```json
|
||||
{
|
||||
"temperature": 0.0,
|
||||
"measured_min_temp": 0.0,
|
||||
"measured_max_temp": 0.0
|
||||
}
|
||||
```
|
||||
A `temperature_sensor` reports the following state:
|
||||
|
||||
- `temperature`: Sensor's current reported temperature
|
||||
- `measured_min_temp`: The mimimum temperature read from the sensor
|
||||
- `measured_max_temp`: The maximum temperature read from the sensor
|
||||
|
||||
## temperature_fan fan_name
|
||||
*Enabled when `[temperature_fan fan_name]` is included in printer.cfg. It is
|
||||
possible for multiple temperature fans to be configured.*
|
||||
```json
|
||||
{
|
||||
"speed": 0.0,
|
||||
"temperature": 0.0,
|
||||
"target": 0.0
|
||||
}
|
||||
```
|
||||
A `temperature_fan` reports the following state:
|
||||
|
||||
- `speed`: Current fan speed as a percentage of maximum speed, reported
|
||||
in the range of 0.0 - 1.0
|
||||
- `temperature`: Currently reported temperature of the sensor associated
|
||||
with the fan
|
||||
- `target`: The current target temperature for the `temperature_fan`.
|
||||
|
||||
## filament_switch_sensor sensor_name
|
||||
*Enabled when `[filament_switch_sensor sensor_name]` is included in
|
||||
printer.cfg. It is possible for multiple filament sensors to be configured.*
|
||||
```json
|
||||
{
|
||||
"filament_detected": false,
|
||||
"enabled": true
|
||||
}
|
||||
```
|
||||
A `filament_switch_sensor` reports the following state:
|
||||
|
||||
- `filament_detected`: Set to true if the switch detects filament, otherwise
|
||||
false
|
||||
- `enabled`: Set to true if the sensor is currently enabled, otherwise false
|
||||
|
||||
## output_pin pin_name
|
||||
*Enabled when `[output_pin pin_name]` is included in printer.cfg.
|
||||
It is possible for multiple output pins to be configured.*
|
||||
```json
|
||||
{
|
||||
"value": 0.0
|
||||
}
|
||||
```
|
||||
An `output_pin` reports the following state:
|
||||
|
||||
- `value`: The currently set value of the pin, in the range of 0.0 - 1.0.
|
||||
A digital pin will always be 0 or 1, whereas a pwm pin may report a value
|
||||
across the entire range.
|
||||
|
||||
## bed_mesh
|
||||
*Enabled when `[bed_mesh]` is included in printer.cfg.*
|
||||
```json
|
||||
{
|
||||
"profile_name": "",
|
||||
"mesh_min": [0.0, 0.0],
|
||||
"mesh_max": [0.0, 0.0],
|
||||
"probed_matrix": [[]],
|
||||
"mesh_matrix": [[]]
|
||||
}
|
||||
```
|
||||
The `bed_mesh` printer object reports the following state:
|
||||
|
||||
- `profile_name`: The name of the currently loaded profile. If no profile is
|
||||
loaded then this will report a null string. If the user is not using
|
||||
bed_mesh profile management then this will report `default` after mesh
|
||||
calibration completes.
|
||||
- `mesh_min`: [X, Y] - The minimum x and y coordinates of the mesh.
|
||||
- `mesh_max`: [X, Y] - The maximum x and y coordinates of the mesh.
|
||||
- `probed_matrix`: A 2 dimensional array representing the matrix of probed
|
||||
values. If the matrix has not been probed the the result is `[[]]`.
|
||||
- `mesh_matrix`: A 2 dimension array representing the interpolated mesh. If
|
||||
no matrix has been generated the result is `[[]]`.
|
||||
|
||||
!!! tip
|
||||
See [web_api.md](web_api.md##bed-mesh-coordinates) for an example
|
||||
of how to use this information to generate (X,Y,Z) coordinates.
|
||||
|
||||
## gcode_macro macro_name
|
||||
*Enabled when `[gcode_macro macro_name]` is included in printer.cfg.
|
||||
It is possible for multiple gcode macros to be configured.*
|
||||
|
||||
Gcode macros will report the state of configured `variables`.
|
||||
While user defined macros likely won't report state that is useful
|
||||
for a client, it is possible for client developers to recommend or
|
||||
request a specific gcode_macro configuration, then have the client
|
||||
take action based on the variables reported by the macro.
|
||||
136
docs/user_changes.md
Normal file
136
docs/user_changes.md
Normal file
@@ -0,0 +1,136 @@
|
||||
##
|
||||
This file will track changes that require user intervention,
|
||||
such as a configuration change or a reinstallation.
|
||||
|
||||
### April 6th 2022
|
||||
- The ability to configure core components in the `[server]`section
|
||||
is now deprecated. When legacy items are detected in `[server]` a
|
||||
warning will be generated. It is crucially important to move configuration
|
||||
to the correct section as in the future it will be a hard requirement.
|
||||
|
||||
### Feburary 22nd 2022
|
||||
- The `on_when_upload_queued` option for [power] devices has been
|
||||
deprecated in favor of `on_when_job_queued`. As the new option
|
||||
name implies, this option will power on the device when any new
|
||||
job is queued, not only when its sourced from an upload. The
|
||||
`on_when_upload_queued` option will be treated as an alias to
|
||||
`on_when_job_queued` until its removal.
|
||||
|
||||
### February 16th 2022
|
||||
- Webcam settings can now be defined in the `moonraker.conf` file, under
|
||||
the `[octoprint_compat]` section. The default values are being used as
|
||||
default values.
|
||||
|
||||
Default values:
|
||||
| Setting | Default value |
|
||||
|---------|---------------|
|
||||
| flip_h | False |
|
||||
| flip_v | False |
|
||||
| rotate_90 | False |
|
||||
| stream_url | /webcam/?action=stream |
|
||||
| webcam_enabled | True |
|
||||
|
||||
### January 22th 2022
|
||||
- The `color_order` option in the `[wled]` section has been deprecated.
|
||||
This is configured in wled directly. This is not a breaking change,
|
||||
the setting will simply be ignored not affecting functionality.
|
||||
|
||||
### December 24th 2021
|
||||
- The `password_file` option in the `[mqtt]` section has been deprecated.
|
||||
Use the `password` option instead. This option may be a template, thus
|
||||
can resolve secrets stored in the `[secrets]` module.
|
||||
|
||||
### November 7th 2021
|
||||
- Previously all core components received configuration through
|
||||
the `[server]` config section. As Moonraker's core functionality
|
||||
has expanded this is becoming unsustainable, thus core components
|
||||
should now be configured in their own section. For example, the
|
||||
`config_path` and `log_path` should now be configured in the
|
||||
`[file_manager]` section of `moonraker.conf`. See the
|
||||
[configuration documentation](https://moonraker.readthedocs.io/en/latest/configuration/)
|
||||
for details. This is not a breaking change, core components
|
||||
will still fall back to checking the `[server]` section for
|
||||
configuration.
|
||||
|
||||
### April 19th 2021
|
||||
- The `[authorization]` module is now a component, thus is only
|
||||
loaded if the user has it configured in `moonraker.conf`. This
|
||||
deprecates the previous `enable` option, as it is enabled
|
||||
if configured and disabled otherwise.
|
||||
- The API Key is now stored in the database. This deprecates the
|
||||
`api_key_file` option in the `[authorization]` module. Users can
|
||||
no longer read the contents of the API Key file to retrieve the
|
||||
API Key. Instead, users can run `scripts/fetch-apikey.sh` to
|
||||
print the API Key. Alternative a user can navigate to
|
||||
`http://{moonraker-host}/access/api_key` from a trusted client
|
||||
to retrieve the API Key.
|
||||
|
||||
### March 10th 2021
|
||||
- The `cors_domain` option in the `[authoriztion]` section is now
|
||||
checked for dangerous entries. If a domain entry contains a
|
||||
wildcard in the top level domain (ie: `http://www.*`) then it
|
||||
will be rejected, as malicious website can easily reproduce
|
||||
this match.
|
||||
|
||||
### March 6th 2021
|
||||
- The `enable_debug_logging` in the `[server]` section now defaults
|
||||
to `False`. This dramatically reduces the amount of logging produced
|
||||
by Moonraker for the typical user.
|
||||
|
||||
### March 4th 2021
|
||||
- To enable OctoPrint compatibility with slicer uploads it is now
|
||||
required to add `[octoprint_compat]` to `moonraker.conf`. After
|
||||
making this change it is necessary to restart the Moonraker service
|
||||
so the module is loaded.
|
||||
|
||||
### December 31st 2020
|
||||
- The file manager no longer restricts the `config_path` to a folder
|
||||
within the HOME directory. The path may not be the system root,
|
||||
however it can reside anywhere else on the file system as long as
|
||||
Moonraker has read and write access to the directory. This applies
|
||||
to gcode path received from Klipper via the `virtual_sdcard` section
|
||||
as well.
|
||||
|
||||
### December 6th 2020
|
||||
- Moonraker is now installed as a systemd service. This allows logging
|
||||
to stdout which can be viewed with the `journalctl -u moonraker` command.
|
||||
This changes requires the user to rerun the install script. If
|
||||
`moonraker.conf` is not located in the home directory, the command
|
||||
will looks something like the following:
|
||||
|
||||
cd ~/moonraker
|
||||
./scripts/install-moonraker.sh -f -c /home/pi/klipper_config/moonraker.conf
|
||||
|
||||
Otherwise you can run the install script with no arguments.
|
||||
|
||||
### November 19th 2020
|
||||
- The install script (`install-moonraker.sh`) now has command-line
|
||||
options:\
|
||||
`-r` Rebuild the python virtual env\
|
||||
`-f` Force an overwrite of `/etc/default/moonraker` during installation\
|
||||
`-c /path/to/moonraker.conf` Allows user to specify the path to
|
||||
moonraker.conf during configuration. Using this in conjunction with `-f`
|
||||
will update the defaults file wih the new path.
|
||||
- New dependencies have been added to Moonraker which require reinstallation.
|
||||
Run the following command to reinstall and rebuild the virtualenv:
|
||||
|
||||
~/moonraker/scripts/install-moonraker.sh -r
|
||||
|
||||
- The power plugin configuration has changed. See the
|
||||
[install guide](installation.md#power-control-plugin) for
|
||||
details on the new configuration.
|
||||
- Users transitioning from the previous version of the power plugin will need
|
||||
to unexport any curently used pins. For example, the following command
|
||||
may be used to unexport pin 19:
|
||||
|
||||
echo 19 > /sys/class/gpio/unexport
|
||||
|
||||
Alternatively one may reboot the machine after upgrading:
|
||||
|
||||
cd ~/moonraker/
|
||||
git pull
|
||||
~/moonraker/scripts/install-moonraker.sh -r
|
||||
sudo reboot
|
||||
|
||||
Make sure that the power plugin configuration has been updated prior
|
||||
to rebooting the machine.
|
||||
5768
docs/web_api.md
Normal file
5768
docs/web_api.md
Normal file
File diff suppressed because it is too large
Load Diff
25
mkdocs.yml
Normal file
25
mkdocs.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
site_name: Moonraker
|
||||
site_url: https://moonraker.readthedocs.io
|
||||
repo_url: https://github.com/Arksine/moonraker
|
||||
nav:
|
||||
- 'User Documentation':
|
||||
- Installation: installation.md
|
||||
- Configuration : configuration.md
|
||||
- User Changes: user_changes.md
|
||||
- 'Client Developers':
|
||||
- Client API: web_api.md
|
||||
- Printer Objects: printer_objects.md
|
||||
- API Changes: api_changes.md
|
||||
- 'Backend Developers':
|
||||
- Contributing: contributing.md
|
||||
- Components: components.md
|
||||
theme:
|
||||
name: readthedocs
|
||||
plugins:
|
||||
- search
|
||||
markdown_extensions:
|
||||
- admonition
|
||||
- pymdownx.superfences
|
||||
- pymdownx.highlight:
|
||||
use_pygments: false
|
||||
- pymdownx.inlinehilite
|
||||
1022
moonraker/app.py
Normal file
1022
moonraker/app.py
Normal file
File diff suppressed because it is too large
Load Diff
235
moonraker/assets/welcome.html
Normal file
235
moonraker/assets/welcome.html
Normal file
@@ -0,0 +1,235 @@
|
||||
<html>
|
||||
<head>
|
||||
<title>Moonraker {{ version }}</title>
|
||||
<style>
|
||||
body {
|
||||
background-color: rgb(48, 48, 48);
|
||||
color: rgb(230, 230, 230);
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
margin: 0px;
|
||||
}
|
||||
.nav-bar {
|
||||
width: 100%;
|
||||
overflow: hidden;
|
||||
}
|
||||
.nav-bar a {
|
||||
float: right;
|
||||
text-align: center;
|
||||
padding: .5em .75em;
|
||||
font-size: 1.2em;
|
||||
text-decoration: none;
|
||||
color: rgb(230, 230, 230);
|
||||
}
|
||||
.intro {
|
||||
font-size: 1.1em;
|
||||
margin-top: 7rem;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
width: 60%;
|
||||
}
|
||||
.intro h1 {
|
||||
font-size: 2.2rem;
|
||||
text-align: center;
|
||||
}
|
||||
.status {
|
||||
display: flex;
|
||||
margin-top: 2rem;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
width: 80%;
|
||||
align-items: baseline;
|
||||
justify-content: center;
|
||||
column-gap: 1.5rem;
|
||||
row-gap: 1.5rem;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
.card {
|
||||
background:none;
|
||||
border: 0px;
|
||||
border-radius: .75rem;
|
||||
width: 25rem;
|
||||
}
|
||||
.card h1 {
|
||||
background-color: #225353;
|
||||
text-align: center;
|
||||
line-height: 2.2rem;
|
||||
font-size: 1.1rem;
|
||||
height: 2.2rem;
|
||||
margin: 0;
|
||||
border-top-left-radius: .75rem;
|
||||
border-top-right-radius: .75rem;
|
||||
}
|
||||
.card .content{
|
||||
background-color: #1a1a1a;
|
||||
border-bottom-left-radius: .75rem;
|
||||
border-bottom-right-radius: .75rem;
|
||||
padding: .5rem;
|
||||
}
|
||||
.card .entry {
|
||||
display: inline-block;
|
||||
width: 100%;
|
||||
}
|
||||
.card .entry:not(:last-child) {
|
||||
margin-bottom: .4rem;
|
||||
}
|
||||
.card .value {
|
||||
float: right;
|
||||
display: inline;
|
||||
}
|
||||
.messages {
|
||||
margin-top: 2rem;
|
||||
width: 51.5rem;
|
||||
padding: .5rem;
|
||||
}
|
||||
.messages .item {
|
||||
padding: .5rem;
|
||||
background: #242424;
|
||||
border-radius: .4rem;
|
||||
}
|
||||
.messages .announcement {
|
||||
cursor: pointer;
|
||||
}
|
||||
.messages .item:not(:last-child) {
|
||||
margin-bottom: .5rem;
|
||||
}
|
||||
.messages .item a {
|
||||
font-size: 1.2rem;
|
||||
margin: 0;
|
||||
font-weight: 600;
|
||||
padding: 0rem;
|
||||
margin-bottom: .1rem;
|
||||
text-decoration: None;
|
||||
color: rgb(230, 230, 230);
|
||||
}
|
||||
.messages .item:hover {
|
||||
background-color: #3f3f3f;
|
||||
}
|
||||
.messages .item:hover a,
|
||||
.messages .item a:hover {
|
||||
color: rgb(235, 199, 0);
|
||||
}
|
||||
.messages .item p {
|
||||
font-size: 1rem;
|
||||
}
|
||||
.warning h1 {
|
||||
animation: glow 1s ease-in-out infinite alternate;
|
||||
}
|
||||
|
||||
@keyframes glow {
|
||||
from {
|
||||
background-color: #225353;
|
||||
}
|
||||
to {
|
||||
background-color: rgb(160, 64, 8);
|
||||
}
|
||||
}
|
||||
</style>
|
||||
<script>
|
||||
function setClickable(id) {
|
||||
const item = document.querySelector(`#${id}`);
|
||||
const primary_link = item.querySelector("a");
|
||||
const clickable = Array.from(item.querySelectorAll(".clickable"));
|
||||
clickable.forEach((element) => {
|
||||
element.addEventListener("click", (evt) => {
|
||||
evt.stopPropagation();
|
||||
})
|
||||
});
|
||||
function onItemClicked(evt) {
|
||||
const noTextSelected = !window.getSelection().toString();
|
||||
if (noTextSelected)
|
||||
primary_link.click();
|
||||
}
|
||||
item.addEventListener("click", onItemClicked);
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<main>
|
||||
<div class="nav-bar">
|
||||
<a href="https://github.com/Arksine/moonraker">GitHub</a>
|
||||
<a href="https://moonraker.readthedocs.io">Documentation</a>
|
||||
</div>
|
||||
<div class="intro">
|
||||
<h1>Welcome to Moonraker</h1>
|
||||
<p>You may have intended
|
||||
to navigate to one of Moonraker's front ends, if so check
|
||||
that you entered the correct port in the address bar.
|
||||
</p>
|
||||
</div>
|
||||
<div class="status">
|
||||
<article class="card">
|
||||
<h1>Authorization</h1>
|
||||
<div class="content">
|
||||
<div class="entry">
|
||||
Request IP:
|
||||
<div class="value">{{ ip_address }}</div>
|
||||
</div>
|
||||
<div class="entry">
|
||||
Trusted:
|
||||
<div class="value">{{ authorized}}</div>
|
||||
</div>
|
||||
<div class="entry">
|
||||
CORS Enabled:
|
||||
<div class="value">{{ cors_enabled }}</div>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article class="card">
|
||||
<h1>Status</h1>
|
||||
<div class="content">
|
||||
<div class="entry">
|
||||
Version:
|
||||
<div class="value">{{ version }}</div>
|
||||
</div>
|
||||
<div class="entry">
|
||||
Websocket Count:
|
||||
<div class="value">{{ ws_count }}</div>
|
||||
</div>
|
||||
<div class="entry">
|
||||
Klipper State:
|
||||
<div class="value">{{ klippy_state }}</div>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
{% if summary %}
|
||||
<article class="card messages">
|
||||
<h1>Summary</h1>
|
||||
<div class="content">
|
||||
{% for item in summary %}
|
||||
<article class="item">{{ item }}</article>
|
||||
{% end %}
|
||||
</div>
|
||||
</article>
|
||||
{% end %}
|
||||
{% if announcements %}
|
||||
<article class="card messages">
|
||||
<h1>Announcements</h1>
|
||||
<div class="content">
|
||||
{% for item in announcements %}
|
||||
{% set id = "announcement-" + item["entry_id"].replace("/", "-") %}
|
||||
<article class="item announcement" id="{{ id }}">
|
||||
{% set url = item["url"] %}
|
||||
<a href="{{ url }}" class="clickable">{{ item["title"] }}</a>
|
||||
<p>{{ item["description"] }}</p>
|
||||
</article>
|
||||
<script>
|
||||
setClickable("{{ id }}");
|
||||
</script>
|
||||
{% end %}
|
||||
</div>
|
||||
</article>
|
||||
{% end %}
|
||||
{% if warnings %}
|
||||
<article class="card messages warning">
|
||||
<h1>Warnings</h1>
|
||||
<div class="content">
|
||||
{% for warn in warnings %}
|
||||
<article class="item">{{ warn }}</article>
|
||||
{% end %}
|
||||
</div>
|
||||
</article>
|
||||
{% end %}
|
||||
</div>
|
||||
</main>
|
||||
</body>
|
||||
</html>
|
||||
6
moonraker/components/__init__.py
Normal file
6
moonraker/components/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
|
||||
# Package definition for the components directory
|
||||
#
|
||||
# Copyright (C) 2020 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
541
moonraker/components/announcements.py
Normal file
541
moonraker/components/announcements.py
Normal file
@@ -0,0 +1,541 @@
|
||||
# Support for Moonraker/Klipper/Client announcements
|
||||
#
|
||||
# Copyright (C) 2022 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
|
||||
from __future__ import annotations
|
||||
import datetime
|
||||
import pathlib
|
||||
import asyncio
|
||||
import logging
|
||||
import email.utils
|
||||
import xml.etree.ElementTree as etree
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Awaitable,
|
||||
List,
|
||||
Dict,
|
||||
Any,
|
||||
Optional,
|
||||
Union
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest
|
||||
from http_client import HttpClient
|
||||
from components.database import MoonrakerDatabase
|
||||
|
||||
|
||||
MOONLIGHT_URL = "https://arksine.github.io/moonlight"
|
||||
UPDATE_CHECK_TIME = 1800.
|
||||
etree.register_namespace("moonlight", MOONLIGHT_URL)
|
||||
|
||||
class Announcements:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.entry_mgr = EntryManager(config)
|
||||
self.eventloop = self.server.get_event_loop()
|
||||
self.update_timer = self.eventloop.register_timer(
|
||||
self._handle_update_timer
|
||||
)
|
||||
self.request_lock = asyncio.Lock()
|
||||
self.dev_mode = config.getboolean("dev_mode", False)
|
||||
self.subscriptions: Dict[str, RssFeed] = {
|
||||
"moonraker": RssFeed("moonraker", self.entry_mgr, self.dev_mode),
|
||||
"klipper": RssFeed("klipper", self.entry_mgr, self.dev_mode)
|
||||
}
|
||||
self.stored_feeds: List[str] = []
|
||||
sub_list: List[str] = config.getlist("subscriptions", [])
|
||||
self.configured_feeds: List[str] = ["moonraker", "klipper"]
|
||||
for sub in sub_list:
|
||||
sub = sub.lower()
|
||||
if sub in self.subscriptions:
|
||||
continue
|
||||
self.configured_feeds.append(sub)
|
||||
self.subscriptions[sub] = RssFeed(
|
||||
sub, self.entry_mgr, self.dev_mode
|
||||
)
|
||||
|
||||
self.server.register_endpoint(
|
||||
"/server/announcements/list", ["GET"],
|
||||
self._list_announcements
|
||||
)
|
||||
self.server.register_endpoint(
|
||||
"/server/announcements/dismiss", ["POST"],
|
||||
self._handle_dismiss_request
|
||||
)
|
||||
self.server.register_endpoint(
|
||||
"/server/announcements/update", ["POST"],
|
||||
self._handle_update_request
|
||||
)
|
||||
self.server.register_endpoint(
|
||||
"/server/announcements/feed", ["POST", "DELETE"],
|
||||
self._handle_feed_request
|
||||
)
|
||||
self.server.register_endpoint(
|
||||
"/server/announcements/feeds", ["GET"],
|
||||
self._handle_list_feeds
|
||||
)
|
||||
self.server.register_notification(
|
||||
"announcements:dismissed", "announcement_dismissed"
|
||||
)
|
||||
self.server.register_notification(
|
||||
"announcements:entries_updated", "announcement_update"
|
||||
)
|
||||
self.server.register_notification(
|
||||
"announcements:dismiss_wake", "announcement_wake"
|
||||
)
|
||||
|
||||
async def component_init(self) -> None:
|
||||
db: MoonrakerDatabase = self.server.lookup_component("database")
|
||||
stored_feeds: List[str] = await db.get_item(
|
||||
"moonraker", "announcements.stored_feeds", []
|
||||
)
|
||||
self.stored_feeds = stored_feeds
|
||||
for name in stored_feeds:
|
||||
if name in self.subscriptions:
|
||||
continue
|
||||
feed = RssFeed(name, self.entry_mgr, self.dev_mode)
|
||||
self.subscriptions[name] = feed
|
||||
async with self.request_lock:
|
||||
await self.entry_mgr.initialize()
|
||||
for sub in self.subscriptions.values():
|
||||
await sub.initialize()
|
||||
self.update_timer.start()
|
||||
|
||||
async def _handle_update_timer(self, eventtime: float) -> float:
|
||||
changed = False
|
||||
entries: List[Dict[str, Any]] = []
|
||||
async with self.request_lock:
|
||||
for sub in self.subscriptions.values():
|
||||
ret = await sub.update_entries()
|
||||
changed |= ret
|
||||
if changed:
|
||||
entries = await self.entry_mgr.list_entries()
|
||||
self.server.send_event(
|
||||
"announcements:entries_updated", {"entries": entries}
|
||||
)
|
||||
return eventtime + UPDATE_CHECK_TIME
|
||||
|
||||
async def _handle_dismiss_request(
|
||||
self, web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
async with self.request_lock:
|
||||
entry_id: str = web_request.get_str("entry_id")
|
||||
wake_time: Optional[int] = web_request.get_int("wake_time", None)
|
||||
await self.entry_mgr.dismiss_entry(entry_id, wake_time)
|
||||
return {
|
||||
"entry_id": entry_id
|
||||
}
|
||||
|
||||
async def _list_announcements(
|
||||
self, web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
async with self.request_lock:
|
||||
incl_dsm = web_request.get_boolean("include_dismissed", True)
|
||||
entries = await self.entry_mgr.list_entries(incl_dsm)
|
||||
return {
|
||||
"entries": entries,
|
||||
"feeds": list(self.subscriptions.keys())
|
||||
}
|
||||
|
||||
async def _handle_update_request(
|
||||
self, web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
subs: Optional[Union[str, List[str]]]
|
||||
subs = web_request.get("subscriptions", None)
|
||||
if isinstance(subs, str):
|
||||
subs = [sub.strip() for sub in subs.split(",") if sub.strip()]
|
||||
elif subs is None:
|
||||
subs = list(self.subscriptions.keys())
|
||||
for sub in subs:
|
||||
if sub not in self.subscriptions:
|
||||
raise self.server.error(f"No subscription for {sub}")
|
||||
async with self.request_lock:
|
||||
changed = False
|
||||
for sub in subs:
|
||||
ret = await self.subscriptions[sub].update_entries()
|
||||
changed |= ret
|
||||
entries = await self.entry_mgr.list_entries()
|
||||
if changed:
|
||||
self.eventloop.delay_callback(
|
||||
.05, self.server.send_event,
|
||||
"announcements:entries_updated",
|
||||
{"entries": entries})
|
||||
return {
|
||||
"entries": entries,
|
||||
"modified": changed
|
||||
}
|
||||
|
||||
async def _handle_list_feeds(
|
||||
self, web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
return {"feeds": list(self.subscriptions.keys())}
|
||||
|
||||
async def _handle_feed_request(
|
||||
self, web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
action = web_request.get_action()
|
||||
name: str = web_request.get("name")
|
||||
name = name.lower()
|
||||
changed: bool = False
|
||||
db: MoonrakerDatabase = self.server.lookup_component("database")
|
||||
result = "skipped"
|
||||
if action == "POST":
|
||||
if name not in self.subscriptions:
|
||||
feed = RssFeed(name, self.entry_mgr, self.dev_mode)
|
||||
self.subscriptions[name] = feed
|
||||
await feed.initialize()
|
||||
changed = await feed.update_entries()
|
||||
self.stored_feeds.append(name)
|
||||
db.insert_item(
|
||||
"moonraker", "announcements.stored_feeds", self.stored_feeds
|
||||
)
|
||||
result = "added"
|
||||
elif action == "DELETE":
|
||||
if name not in self.stored_feeds:
|
||||
raise self.server.error(f"Feed '{name}' not stored")
|
||||
if name in self.configured_feeds:
|
||||
raise self.server.error(
|
||||
f"Feed '{name}' exists in the configuration, cannot remove"
|
||||
)
|
||||
self.stored_feeds.remove(name)
|
||||
db.insert_item(
|
||||
"moonraker", "announcements.stored_feeds", self.stored_feeds
|
||||
)
|
||||
if name in self.subscriptions:
|
||||
del self.subscriptions[name]
|
||||
changed = await self.entry_mgr.prune_by_feed(name)
|
||||
logging.info(f"Removed Announcement Feed: {name}")
|
||||
result = "removed"
|
||||
else:
|
||||
raise self.server.error(f"Feed does not exist: {name}")
|
||||
if changed:
|
||||
entries = await self.entry_mgr.list_entries()
|
||||
self.eventloop.delay_callback(
|
||||
.05, self.server.send_event, "announcements:entries_updated",
|
||||
{"entries": entries}
|
||||
)
|
||||
return {
|
||||
"feed": name,
|
||||
"action": result
|
||||
}
|
||||
|
||||
def add_internal_announcement(
|
||||
self, title: str, desc: str, url: str, priority: str, feed: str
|
||||
) -> Dict[str, Any]:
|
||||
date = datetime.datetime.utcnow()
|
||||
entry_id: str = f"{feed}/{date.isoformat(timespec='seconds')}"
|
||||
entry = {
|
||||
"entry_id": entry_id,
|
||||
"url": url,
|
||||
"title": title,
|
||||
"description": desc,
|
||||
"priority": priority,
|
||||
"date": date.timestamp(),
|
||||
"dismissed": False,
|
||||
"date_dismissed": None,
|
||||
"dismiss_wake": None,
|
||||
"source": "internal",
|
||||
"feed": feed
|
||||
}
|
||||
self.entry_mgr.add_entry(entry)
|
||||
return entry
|
||||
|
||||
async def remove_announcement(self, entry_id: str) -> None:
|
||||
ret = await self.entry_mgr.remove_entry(entry_id)
|
||||
if ret is not None:
|
||||
entries = await self.entry_mgr.list_entries()
|
||||
self.server.send_event(
|
||||
"announcements:entries_updated", {"entries": entries}
|
||||
)
|
||||
async def dismiss_announcement(
|
||||
self, entry_id, wake_time: Optional[int] = None
|
||||
) -> None:
|
||||
await self.entry_mgr.dismiss_entry(entry_id, wake_time)
|
||||
|
||||
async def get_announcements(
|
||||
self, include_dismissed: bool = False
|
||||
) -> List[Dict[str, Any]]:
|
||||
return await self.entry_mgr.list_entries(include_dismissed)
|
||||
|
||||
def close(self):
|
||||
self.entry_mgr.close()
|
||||
|
||||
class EntryManager:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
database: MoonrakerDatabase
|
||||
database = self.server.lookup_component("database")
|
||||
database.register_local_namespace("announcements")
|
||||
self.announce_db = database.wrap_namespace("announcements")
|
||||
self.entry_id_map: Dict[str, str] = {}
|
||||
self.next_key = 0
|
||||
self.dismiss_handles: Dict[str, asyncio.TimerHandle] = {}
|
||||
|
||||
async def initialize(self) -> None:
|
||||
last_key = ""
|
||||
eventloop = self.server.get_event_loop()
|
||||
curtime = datetime.datetime.utcnow().timestamp()
|
||||
for key, entry in await self.announce_db.items():
|
||||
last_key = key
|
||||
aid = entry["entry_id"]
|
||||
self.entry_id_map[aid] = key
|
||||
if entry["dismissed"]:
|
||||
wake_time: Optional[float] = entry.get("dismiss_wake")
|
||||
if wake_time is not None:
|
||||
time_diff = wake_time - curtime
|
||||
if time_diff - 10. < 0.:
|
||||
# announcement is near or past wake time
|
||||
entry["dismissed"] = False
|
||||
entry["date_dismissed"] = None
|
||||
entry["dismiss_wake"] = None
|
||||
self.announce_db[key] = entry
|
||||
else:
|
||||
self.dismiss_handles[key] = eventloop.delay_callback(
|
||||
time_diff, self._wake_dismissed, key
|
||||
)
|
||||
if last_key:
|
||||
self.next_key = int(last_key, 16) + 1
|
||||
|
||||
async def list_entries(
|
||||
self, include_dismissed: bool = True
|
||||
) -> List[Dict[str, Any]]:
|
||||
vals: List[Dict[str, Any]] = await self.announce_db.values()
|
||||
vals = sorted(vals, key=lambda x: x["date"], reverse=True)
|
||||
if include_dismissed:
|
||||
return vals
|
||||
return [val for val in vals if not val["dismissed"]]
|
||||
|
||||
def has_entry(self, entry_id: str) -> bool:
|
||||
return entry_id in self.entry_id_map
|
||||
|
||||
def add_entry(self, entry: Dict[str, Any]) -> Awaitable[None]:
|
||||
aid = entry["entry_id"]
|
||||
key = f"{self.next_key:06X}"
|
||||
self.next_key += 1
|
||||
self.entry_id_map[aid] = key
|
||||
return self.announce_db.insert(key, entry)
|
||||
|
||||
def remove_entry(self, entry_id: str) -> Awaitable[Any]:
|
||||
key = self.entry_id_map.pop(entry_id, None)
|
||||
if key is None:
|
||||
raise self.server.error(f"No key matching entry id: {entry_id}")
|
||||
return self.announce_db.pop(key, None)
|
||||
|
||||
async def dismiss_entry(
|
||||
self, entry_id: str, wake_time: Optional[int] = None
|
||||
) -> None:
|
||||
key = self.entry_id_map.get(entry_id)
|
||||
if key is None:
|
||||
raise self.server.error(f"No key matching entry id: {entry_id}")
|
||||
entry = await self.announce_db[key]
|
||||
is_dismissed = entry["dismissed"]
|
||||
if is_dismissed:
|
||||
return
|
||||
entry["dismissed"] = True
|
||||
eventloop = self.server.get_event_loop()
|
||||
curtime = datetime.datetime.utcnow().timestamp()
|
||||
entry["date_dismissed"] = curtime
|
||||
if wake_time is not None:
|
||||
entry["dismiss_wake"] = curtime + wake_time
|
||||
self.dismiss_handles[key] = eventloop.delay_callback(
|
||||
wake_time, self._wake_dismissed, key
|
||||
)
|
||||
self.announce_db[key] = entry
|
||||
eventloop.delay_callback(
|
||||
.05, self.server.send_event, "announcements:dismissed",
|
||||
{"entry_id": entry_id}
|
||||
)
|
||||
|
||||
async def _wake_dismissed(self, key: str) -> None:
|
||||
self.dismiss_handles.pop(key, None)
|
||||
entry = await self.announce_db.get(key, None)
|
||||
if entry is None:
|
||||
return
|
||||
if not entry["dismissed"]:
|
||||
return
|
||||
entry["dismissed"] = False
|
||||
entry["date_dismissed"] = None
|
||||
entry["dismiss_wake"] = None
|
||||
self.announce_db[key] = entry
|
||||
self.server.send_event(
|
||||
"announcements:dismiss_wake", {"entry_id": entry["entry_id"]}
|
||||
)
|
||||
|
||||
def prune_by_prefix(self, prefix: str, valid_ids: List[str]) -> bool:
|
||||
del_keys: List[str] = []
|
||||
for entry_id in list(self.entry_id_map.keys()):
|
||||
if not entry_id.startswith(prefix) or entry_id in valid_ids:
|
||||
continue
|
||||
# Entry is no longer valid and should be removed
|
||||
key = self.entry_id_map.pop(entry_id, None)
|
||||
if key is not None:
|
||||
del_keys.append(key)
|
||||
if del_keys:
|
||||
self.announce_db.delete_batch(del_keys)
|
||||
return True
|
||||
return False
|
||||
|
||||
async def prune_by_feed(self, feed: str) -> bool:
|
||||
entries = await self.list_entries()
|
||||
del_keys: List[str] = []
|
||||
for entry in entries:
|
||||
if entry["feed"].lower() == feed:
|
||||
key = self.entry_id_map.pop(entry["entry_id"], None)
|
||||
if key is not None:
|
||||
del_keys.append(key)
|
||||
if del_keys:
|
||||
self.announce_db.delete_batch(del_keys)
|
||||
return True
|
||||
return False
|
||||
|
||||
def close(self):
|
||||
for handle in self.dismiss_handles.values():
|
||||
handle.cancel()
|
||||
|
||||
class RssFeed:
|
||||
def __init__(
|
||||
self, name: str, entry_mgr: EntryManager, dev_mode: bool
|
||||
) -> None:
|
||||
self.server = entry_mgr.server
|
||||
self.name = name
|
||||
self.entry_mgr = entry_mgr
|
||||
self.client: HttpClient = self.server.lookup_component("http_client")
|
||||
self.database: MoonrakerDatabase
|
||||
self.database = self.server.lookup_component("database")
|
||||
self.xml_file = f"{self.name}.xml"
|
||||
self.asset_url = f"{MOONLIGHT_URL}/assets/{self.xml_file}"
|
||||
self.last_modified: int = 0
|
||||
self.etag: Optional[str] = None
|
||||
self.dev_xml_path: Optional[pathlib.Path] = None
|
||||
if dev_mode:
|
||||
res_dir = pathlib.Path(__file__).parent.parent.parent.resolve()
|
||||
res_path = res_dir.joinpath(".devel/announcement_xml")
|
||||
self.dev_xml_path = res_path.joinpath(self.xml_file)
|
||||
|
||||
async def initialize(self) -> None:
|
||||
self.etag = await self.database.get_item(
|
||||
"moonraker", f"announcements.{self.name}.etag", None
|
||||
)
|
||||
|
||||
async def update_entries(self) -> bool:
|
||||
if self.dev_xml_path is None:
|
||||
xml_data = await self._fetch_moonlight()
|
||||
else:
|
||||
xml_data = await self._fetch_local_folder()
|
||||
if not xml_data:
|
||||
return False
|
||||
return self._parse_xml(xml_data)
|
||||
|
||||
async def _fetch_moonlight(self) -> str:
|
||||
headers = {"Accept": "application/xml"}
|
||||
if self.etag is not None:
|
||||
headers["If-None-Match"] = self.etag
|
||||
resp = await self.client.get(
|
||||
self.asset_url, headers, attempts=5,
|
||||
retry_pause_time=.5, enable_cache=False,
|
||||
)
|
||||
if resp.has_error():
|
||||
logging.info(
|
||||
f"Failed to update subscription '{self.name}': {resp.error}"
|
||||
)
|
||||
return ""
|
||||
if resp.status_code == 304:
|
||||
logging.debug(f"Content at {self.xml_file} not modified")
|
||||
return ""
|
||||
# update etag
|
||||
self.etag = resp.etag
|
||||
try:
|
||||
if self.etag is not None:
|
||||
self.database.insert_item(
|
||||
"moonraker", f"announcements.{self.name}.etag", resp.etag
|
||||
)
|
||||
else:
|
||||
self.database.delete_item(
|
||||
"moonraker", f"announcements.{self.name}.etag",
|
||||
)
|
||||
except self.server.error:
|
||||
pass
|
||||
return resp.text
|
||||
|
||||
async def _fetch_local_folder(self) -> str:
|
||||
if self.dev_xml_path is None:
|
||||
return ""
|
||||
if not self.dev_xml_path.is_file():
|
||||
logging.info(f"No file at path {self.dev_xml_path}")
|
||||
return ""
|
||||
mtime = self.dev_xml_path.stat().st_mtime_ns
|
||||
if mtime <= self.last_modified:
|
||||
logging.debug(f"Content at {self.xml_file} not modified")
|
||||
return ""
|
||||
try:
|
||||
eventloop = self.server.get_event_loop()
|
||||
xml_data = await eventloop.run_in_thread(
|
||||
self.dev_xml_path.read_text)
|
||||
except Exception:
|
||||
logging.exception(f"Unable read xml file {self.dev_xml_path}")
|
||||
return ""
|
||||
self.last_modified = mtime
|
||||
return xml_data
|
||||
|
||||
def _parse_xml(self, xml_data: str) -> bool:
|
||||
root = etree.fromstring(xml_data)
|
||||
channel = root.find("channel")
|
||||
if channel is None:
|
||||
root_str = etree.tostring(root, encoding="unicode")
|
||||
logging.debug(f"Feed {self.name}: no channel found\n{root_str}")
|
||||
return False
|
||||
# extract prefix
|
||||
prefix = channel.findtext("title", "").lower()
|
||||
if not prefix:
|
||||
logging.info(f"Feed {self.name}: No prefix found")
|
||||
items = channel.findall("item")
|
||||
valid_ids: List[str] = []
|
||||
changed: bool = False
|
||||
for item in items:
|
||||
guid = item.findtext("guid")
|
||||
if guid is None:
|
||||
item_str = etree.tostring(item, encoding="unicode")
|
||||
logging.debug(f"Feed {self.name}: Invalid Item\n{item_str}")
|
||||
continue
|
||||
if not prefix:
|
||||
# fall back to first guid prefix
|
||||
prefix = "/".join(guid.split("/")[:2])
|
||||
elif not guid.startswith(prefix):
|
||||
logging.debug(
|
||||
f"Feed {self.name}: Guid {guid} is not "
|
||||
f"prefixed with {prefix}")
|
||||
valid_ids.append(guid)
|
||||
if self.entry_mgr.has_entry(guid):
|
||||
continue
|
||||
try:
|
||||
rfc_date = item.findtext("pubDate", "")
|
||||
dt = email.utils.parsedate_to_datetime(rfc_date)
|
||||
except Exception:
|
||||
dt = datetime.datetime.utcnow()
|
||||
entry: Dict[str, Any] = {
|
||||
"entry_id": guid,
|
||||
"url": item.findtext("link"),
|
||||
"title": item.findtext("title"),
|
||||
"description": item.findtext("description"),
|
||||
"priority": item.findtext("category"),
|
||||
"date": dt.timestamp(),
|
||||
"dismissed": False,
|
||||
"date_dismissed": None,
|
||||
"dismiss_wake": None,
|
||||
"source": "moonlight",
|
||||
"feed": self.name
|
||||
}
|
||||
changed = True
|
||||
self.entry_mgr.add_entry(entry)
|
||||
logging.debug(f"Feed {self.name}: found entries {valid_ids}")
|
||||
if prefix:
|
||||
pruned = self.entry_mgr.prune_by_prefix(prefix, valid_ids)
|
||||
changed = changed or pruned
|
||||
return changed
|
||||
|
||||
|
||||
def load_component(config: ConfigHelper) -> Announcements:
|
||||
return Announcements(config)
|
||||
816
moonraker/components/authorization.py
Normal file
816
moonraker/components/authorization.py
Normal file
@@ -0,0 +1,816 @@
|
||||
# API Key Based Authorization
|
||||
#
|
||||
# Copyright (C) 2020 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
|
||||
from __future__ import annotations
|
||||
import asyncio
|
||||
import base64
|
||||
import uuid
|
||||
import hashlib
|
||||
import secrets
|
||||
import os
|
||||
import time
|
||||
import datetime
|
||||
import ipaddress
|
||||
import re
|
||||
import socket
|
||||
import logging
|
||||
import json
|
||||
from tornado.web import HTTPError
|
||||
from libnacl.sign import Signer, Verifier
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Tuple,
|
||||
Set,
|
||||
Optional,
|
||||
Union,
|
||||
Dict,
|
||||
List,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest
|
||||
from tornado.httputil import HTTPServerRequest
|
||||
from tornado.web import RequestHandler
|
||||
from .database import MoonrakerDatabase as DBComp
|
||||
from .ldap import MoonrakerLDAP
|
||||
IPAddr = Union[ipaddress.IPv4Address, ipaddress.IPv6Address]
|
||||
IPNetwork = Union[ipaddress.IPv4Network, ipaddress.IPv6Network]
|
||||
OneshotToken = Tuple[IPAddr, Optional[Dict[str, Any]], asyncio.Handle]
|
||||
|
||||
# Helpers for base64url encoding and decoding
|
||||
def base64url_encode(data: bytes) -> bytes:
|
||||
return base64.urlsafe_b64encode(data).rstrip(b"=")
|
||||
|
||||
def base64url_decode(data: str) -> bytes:
|
||||
pad_cnt = len(data) % 4
|
||||
if pad_cnt:
|
||||
data += "=" * (4 - pad_cnt)
|
||||
return base64.urlsafe_b64decode(data)
|
||||
|
||||
|
||||
ONESHOT_TIMEOUT = 5
|
||||
TRUSTED_CONNECTION_TIMEOUT = 3600
|
||||
PRUNE_CHECK_TIME = 300.
|
||||
|
||||
AUTH_SOURCES = ["moonraker", "ldap"]
|
||||
HASH_ITER = 100000
|
||||
API_USER = "_API_KEY_USER_"
|
||||
TRUSTED_USER = "_TRUSTED_USER_"
|
||||
RESERVED_USERS = [API_USER, TRUSTED_USER]
|
||||
JWT_EXP_TIME = datetime.timedelta(hours=1)
|
||||
JWT_HEADER = {
|
||||
'alg': "EdDSA",
|
||||
'typ': "JWT"
|
||||
}
|
||||
|
||||
class Authorization:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.login_timeout = config.getint('login_timeout', 90)
|
||||
self.force_logins = config.getboolean('force_logins', False)
|
||||
self.default_source = config.get('default_source', "moonraker").lower()
|
||||
if self.default_source not in AUTH_SOURCES:
|
||||
raise config.error(
|
||||
"[authorization]: option 'default_source' - Invalid "
|
||||
f"value '{self.default_source}'"
|
||||
)
|
||||
self.ldap: Optional[MoonrakerLDAP] = None
|
||||
if config.has_section("ldap"):
|
||||
self.ldap = self.server.load_component(config, "ldap", None)
|
||||
if self.default_source == "ldap" and self.ldap is None:
|
||||
self.server.add_warning(
|
||||
"[authorization]: Option 'default_source' set to 'ldap',"
|
||||
" however [ldap] section failed to load or not configured"
|
||||
)
|
||||
database: DBComp = self.server.lookup_component('database')
|
||||
database.register_local_namespace('authorized_users', forbidden=True)
|
||||
self.user_db = database.wrap_namespace('authorized_users')
|
||||
self.users: Dict[str, Dict[str, Any]] = self.user_db.as_dict()
|
||||
api_user: Optional[Dict[str, Any]] = self.users.get(API_USER, None)
|
||||
if api_user is None:
|
||||
self.api_key = uuid.uuid4().hex
|
||||
self.users[API_USER] = {
|
||||
'username': API_USER,
|
||||
'api_key': self.api_key,
|
||||
'created_on': time.time()
|
||||
}
|
||||
else:
|
||||
self.api_key = api_user['api_key']
|
||||
hi = self.server.get_host_info()
|
||||
self.issuer = f"http://{hi['hostname']}:{hi['port']}"
|
||||
self.public_jwks: Dict[str, Dict[str, Any]] = {}
|
||||
for username, user_info in list(self.users.items()):
|
||||
if username == API_USER:
|
||||
# Validate the API User
|
||||
for item in ["username", "api_key", "created_on"]:
|
||||
if item not in user_info:
|
||||
self.users[API_USER] = {
|
||||
'username': API_USER,
|
||||
'api_key': self.api_key,
|
||||
'created_on': time.time()
|
||||
}
|
||||
break
|
||||
continue
|
||||
else:
|
||||
# validate created users
|
||||
valid = True
|
||||
for item in ["username", "password", "salt", "created_on"]:
|
||||
if item not in user_info:
|
||||
logging.info(
|
||||
f"Authorization: User {username} does not "
|
||||
f"contain field {item}, removing")
|
||||
del self.users[username]
|
||||
valid = False
|
||||
break
|
||||
if not valid:
|
||||
continue
|
||||
# generate jwks for valid users
|
||||
if 'jwt_secret' in user_info:
|
||||
try:
|
||||
priv_key = self._load_private_key(user_info['jwt_secret'])
|
||||
jwk_id = user_info['jwk_id']
|
||||
except (self.server.error, KeyError):
|
||||
logging.info("Invalid key found for user, removing")
|
||||
user_info.pop('jwt_secret', None)
|
||||
user_info.pop('jwk_id', None)
|
||||
self.users[username] = user_info
|
||||
continue
|
||||
self.public_jwks[jwk_id] = self._generate_public_jwk(priv_key)
|
||||
# sync user changes to the database
|
||||
self.user_db.sync(self.users)
|
||||
self.trusted_users: Dict[IPAddr, Any] = {}
|
||||
self.oneshot_tokens: Dict[str, OneshotToken] = {}
|
||||
self.permitted_paths: Set[str] = set()
|
||||
|
||||
# Get allowed cors domains
|
||||
self.cors_domains: List[str] = []
|
||||
for domain in config.getlist('cors_domains', []):
|
||||
bad_match = re.search(r"^.+\.[^:]*\*", domain)
|
||||
if bad_match is not None:
|
||||
raise config.error(
|
||||
f"Unsafe CORS Domain '{domain}'. Wildcards are not"
|
||||
" permitted in the top level domain.")
|
||||
if domain.endswith("/"):
|
||||
self.server.add_warning(
|
||||
f"[authorization]: Invalid domain '{domain}' in option "
|
||||
"'cors_domains'. Domain's cannot contain a trailing "
|
||||
"slash.")
|
||||
else:
|
||||
self.cors_domains.append(
|
||||
domain.replace(".", "\\.").replace("*", ".*"))
|
||||
|
||||
# Get Trusted Clients
|
||||
self.trusted_ips: List[IPAddr] = []
|
||||
self.trusted_ranges: List[IPNetwork] = []
|
||||
self.trusted_domains: List[str] = []
|
||||
for val in config.getlist('trusted_clients', []):
|
||||
# Check IP address
|
||||
try:
|
||||
tc = ipaddress.ip_address(val)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
self.trusted_ips.append(tc)
|
||||
continue
|
||||
# Check ip network
|
||||
try:
|
||||
tn = ipaddress.ip_network(val)
|
||||
except ValueError as e:
|
||||
if "has host bits set" in str(e):
|
||||
self.server.add_warning(
|
||||
f"[authorization]: Invalid CIDR expression '{val}' "
|
||||
"in option 'trusted_clients'")
|
||||
continue
|
||||
pass
|
||||
else:
|
||||
self.trusted_ranges.append(tn)
|
||||
continue
|
||||
# Check hostname
|
||||
match = re.match(r"([a-z0-9]+(-[a-z0-9]+)*\.?)+[a-z]{2,}$", val)
|
||||
if match is not None:
|
||||
self.trusted_domains.append(val.lower())
|
||||
else:
|
||||
self.server.add_warning(
|
||||
f"[authorization]: Invalid domain name '{val}' "
|
||||
"in option 'trusted_clients'")
|
||||
|
||||
t_clients = "\n".join(
|
||||
[str(ip) for ip in self.trusted_ips] +
|
||||
[str(rng) for rng in self.trusted_ranges] +
|
||||
self.trusted_domains)
|
||||
c_domains = "\n".join(self.cors_domains)
|
||||
|
||||
logging.info(
|
||||
f"Authorization Configuration Loaded\n"
|
||||
f"Trusted Clients:\n{t_clients}\n"
|
||||
f"CORS Domains:\n{c_domains}")
|
||||
|
||||
eventloop = self.server.get_event_loop()
|
||||
self.prune_timer = eventloop.register_timer(
|
||||
self._prune_conn_handler)
|
||||
|
||||
# Register Authorization Endpoints
|
||||
self.permitted_paths.add("/server/redirect")
|
||||
self.permitted_paths.add("/access/login")
|
||||
self.permitted_paths.add("/access/refresh_jwt")
|
||||
self.permitted_paths.add("/access/info")
|
||||
self.server.register_endpoint(
|
||||
"/access/login", ['POST'], self._handle_login,
|
||||
transports=['http'])
|
||||
self.server.register_endpoint(
|
||||
"/access/logout", ['POST'], self._handle_logout,
|
||||
transports=['http'])
|
||||
self.server.register_endpoint(
|
||||
"/access/refresh_jwt", ['POST'], self._handle_refresh_jwt,
|
||||
transports=['http'])
|
||||
self.server.register_endpoint(
|
||||
"/access/user", ['GET', 'POST', 'DELETE'],
|
||||
self._handle_user_request, transports=['http'])
|
||||
self.server.register_endpoint(
|
||||
"/access/users/list", ['GET'], self._handle_list_request,
|
||||
transports=['http'])
|
||||
self.server.register_endpoint(
|
||||
"/access/user/password", ['POST'], self._handle_password_reset,
|
||||
transports=['http'])
|
||||
self.server.register_endpoint(
|
||||
"/access/api_key", ['GET', 'POST'],
|
||||
self._handle_apikey_request, transports=['http'])
|
||||
self.server.register_endpoint(
|
||||
"/access/oneshot_token", ['GET'],
|
||||
self._handle_oneshot_request, transports=['http'])
|
||||
self.server.register_endpoint(
|
||||
"/access/info", ['GET'],
|
||||
self._handle_info_request, transports=['http'])
|
||||
self.server.register_notification("authorization:user_created")
|
||||
self.server.register_notification("authorization:user_deleted")
|
||||
|
||||
def _sync_user(self, username: str) -> None:
|
||||
self.user_db[username] = self.users[username]
|
||||
|
||||
async def component_init(self) -> None:
|
||||
self.prune_timer.start(delay=PRUNE_CHECK_TIME)
|
||||
|
||||
async def _handle_apikey_request(self, web_request: WebRequest) -> str:
|
||||
action = web_request.get_action()
|
||||
if action.upper() == 'POST':
|
||||
self.api_key = uuid.uuid4().hex
|
||||
self.users[API_USER]['api_key'] = self.api_key
|
||||
self._sync_user(API_USER)
|
||||
return self.api_key
|
||||
|
||||
async def _handle_oneshot_request(self, web_request: WebRequest) -> str:
|
||||
ip = web_request.get_ip_address()
|
||||
assert ip is not None
|
||||
user_info = web_request.get_current_user()
|
||||
return self.get_oneshot_token(ip, user_info)
|
||||
|
||||
async def _handle_login(self, web_request: WebRequest) -> Dict[str, Any]:
|
||||
return await self._login_jwt_user(web_request)
|
||||
|
||||
async def _handle_logout(self, web_request: WebRequest) -> Dict[str, str]:
|
||||
user_info = web_request.get_current_user()
|
||||
if user_info is None:
|
||||
raise self.server.error("No user logged in")
|
||||
username: str = user_info['username']
|
||||
if username in RESERVED_USERS:
|
||||
raise self.server.error(
|
||||
f"Invalid log out request for user {username}")
|
||||
self.users[username].pop("jwt_secret", None)
|
||||
jwk_id: str = self.users[username].pop("jwk_id", None)
|
||||
self._sync_user(username)
|
||||
self.public_jwks.pop(jwk_id, None)
|
||||
return {
|
||||
"username": username,
|
||||
"action": "user_logged_out"
|
||||
}
|
||||
|
||||
async def _handle_info_request(
|
||||
self, web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
sources = ["moonraker"]
|
||||
if self.ldap is not None:
|
||||
sources.append("ldap")
|
||||
return {
|
||||
"default_source": self.default_source,
|
||||
"available_sources": sources
|
||||
}
|
||||
|
||||
async def _handle_refresh_jwt(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, str]:
|
||||
refresh_token: str = web_request.get_str('refresh_token')
|
||||
try:
|
||||
user_info = self._decode_jwt(refresh_token, token_type="refresh")
|
||||
except Exception:
|
||||
raise self.server.error("Invalid Refresh Token", 401)
|
||||
username: str = user_info['username']
|
||||
if 'jwt_secret' not in user_info or "jwk_id" not in user_info:
|
||||
raise self.server.error("User not logged in", 401)
|
||||
private_key = self._load_private_key(user_info['jwt_secret'])
|
||||
jwk_id: str = user_info['jwk_id']
|
||||
token = self._generate_jwt(username, jwk_id, private_key)
|
||||
return {
|
||||
'username': username,
|
||||
'token': token,
|
||||
'source': user_info.get("source", "moonraker"),
|
||||
'action': 'user_jwt_refresh'
|
||||
}
|
||||
|
||||
async def _handle_user_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
action = web_request.get_action()
|
||||
if action == "GET":
|
||||
user = web_request.get_current_user()
|
||||
if user is None:
|
||||
return {
|
||||
'username': None,
|
||||
'source': None,
|
||||
'created_on': None,
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'username': user['username'],
|
||||
'source': user.get("source", "moonraker"),
|
||||
'created_on': user.get('created_on')
|
||||
}
|
||||
elif action == "POST":
|
||||
# Create User
|
||||
return await self._login_jwt_user(web_request, create=True)
|
||||
elif action == "DELETE":
|
||||
# Delete User
|
||||
return self._delete_jwt_user(web_request)
|
||||
raise self.server.error("Invalid Request Method")
|
||||
|
||||
async def _handle_list_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, List[Dict[str, Any]]]:
|
||||
user_list = []
|
||||
for user in self.users.values():
|
||||
if user['username'] == API_USER:
|
||||
continue
|
||||
user_list.append({
|
||||
'username': user['username'],
|
||||
'source': user.get("source", "moonraker"),
|
||||
'created_on': user['created_on']
|
||||
})
|
||||
return {
|
||||
'users': user_list
|
||||
}
|
||||
|
||||
async def _handle_password_reset(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, str]:
|
||||
password: str = web_request.get_str('password')
|
||||
new_pass: str = web_request.get_str('new_password')
|
||||
user_info = web_request.get_current_user()
|
||||
if user_info is None:
|
||||
raise self.server.error("No Current User")
|
||||
username = user_info['username']
|
||||
if user_info.get("source", "moonraker") == "ldap":
|
||||
raise self.server.error(
|
||||
f"Can´t Reset password for ldap user {username}")
|
||||
if username in RESERVED_USERS:
|
||||
raise self.server.error(
|
||||
f"Invalid Reset Request for user {username}")
|
||||
salt = bytes.fromhex(user_info['salt'])
|
||||
hashed_pass = hashlib.pbkdf2_hmac(
|
||||
'sha256', password.encode(), salt, HASH_ITER).hex()
|
||||
if hashed_pass != user_info['password']:
|
||||
raise self.server.error("Invalid Password")
|
||||
new_hashed_pass = hashlib.pbkdf2_hmac(
|
||||
'sha256', new_pass.encode(), salt, HASH_ITER).hex()
|
||||
self.users[username]['password'] = new_hashed_pass
|
||||
self._sync_user(username)
|
||||
return {
|
||||
'username': username,
|
||||
'action': "user_password_reset"
|
||||
}
|
||||
|
||||
async def _login_jwt_user(
|
||||
self, web_request: WebRequest, create: bool = False
|
||||
) -> Dict[str, Any]:
|
||||
username: str = web_request.get_str('username')
|
||||
password: str = web_request.get_str('password')
|
||||
source: str = web_request.get_str(
|
||||
'source', self.default_source
|
||||
).lower()
|
||||
if source not in AUTH_SOURCES:
|
||||
raise self.server.error(f"Invalid 'source': {source}")
|
||||
user_info: Dict[str, Any]
|
||||
if username in RESERVED_USERS:
|
||||
raise self.server.error(
|
||||
f"Invalid Request for user {username}")
|
||||
if source == "ldap":
|
||||
if create:
|
||||
raise self.server.error("Cannot Create LDAP User")
|
||||
if self.ldap is None:
|
||||
raise self.server.error(
|
||||
"LDAP authentication not available", 401
|
||||
)
|
||||
await self.ldap.authenticate_ldap_user(username, password)
|
||||
if username not in self.users:
|
||||
create = True
|
||||
if create:
|
||||
if username in self.users:
|
||||
raise self.server.error(f"User {username} already exists")
|
||||
salt = secrets.token_bytes(32)
|
||||
hashed_pass = hashlib.pbkdf2_hmac(
|
||||
'sha256', password.encode(), salt, HASH_ITER).hex()
|
||||
user_info = {
|
||||
'username': username,
|
||||
'password': hashed_pass,
|
||||
'salt': salt.hex(),
|
||||
'source': source,
|
||||
'created_on': time.time()
|
||||
}
|
||||
self.users[username] = user_info
|
||||
self._sync_user(username)
|
||||
action = "user_created"
|
||||
if source == "ldap":
|
||||
# Dont notify user created
|
||||
action = "user_logged_in"
|
||||
create = False
|
||||
else:
|
||||
if username not in self.users:
|
||||
raise self.server.error(f"Unregistered User: {username}")
|
||||
user_info = self.users[username]
|
||||
auth_src = user_info.get("source", "moonraker")
|
||||
if auth_src != source:
|
||||
raise self.server.error(
|
||||
f"Moonraker cannot authenticate user '{username}', must "
|
||||
f"specify source '{auth_src}'", 401
|
||||
)
|
||||
salt = bytes.fromhex(user_info['salt'])
|
||||
hashed_pass = hashlib.pbkdf2_hmac(
|
||||
'sha256', password.encode(), salt, HASH_ITER).hex()
|
||||
action = "user_logged_in"
|
||||
if hashed_pass != user_info['password']:
|
||||
raise self.server.error("Invalid Password")
|
||||
jwt_secret_hex: Optional[str] = user_info.get('jwt_secret', None)
|
||||
if jwt_secret_hex is None:
|
||||
private_key = Signer()
|
||||
jwk_id = base64url_encode(secrets.token_bytes()).decode()
|
||||
user_info['jwt_secret'] = private_key.hex_seed().decode()
|
||||
user_info['jwk_id'] = jwk_id
|
||||
self.users[username] = user_info
|
||||
self._sync_user(username)
|
||||
self.public_jwks[jwk_id] = self._generate_public_jwk(private_key)
|
||||
else:
|
||||
private_key = self._load_private_key(jwt_secret_hex)
|
||||
jwk_id = user_info['jwk_id']
|
||||
token = self._generate_jwt(username, jwk_id, private_key)
|
||||
refresh_token = self._generate_jwt(
|
||||
username, jwk_id, private_key, token_type="refresh",
|
||||
exp_time=datetime.timedelta(days=self.login_timeout))
|
||||
if create:
|
||||
event_loop = self.server.get_event_loop()
|
||||
event_loop.delay_callback(
|
||||
.005, self.server.send_event,
|
||||
"authorization:user_created",
|
||||
{'username': username})
|
||||
return {
|
||||
'username': username,
|
||||
'token': token,
|
||||
'source': user_info.get("source", "moonraker"),
|
||||
'refresh_token': refresh_token,
|
||||
'action': action
|
||||
}
|
||||
|
||||
def _delete_jwt_user(self, web_request: WebRequest) -> Dict[str, str]:
|
||||
username: str = web_request.get_str('username')
|
||||
current_user = web_request.get_current_user()
|
||||
if current_user is not None:
|
||||
curname = current_user.get('username', None)
|
||||
if curname is not None and curname == username:
|
||||
raise self.server.error(
|
||||
f"Cannot delete logged in user {curname}")
|
||||
if username in RESERVED_USERS:
|
||||
raise self.server.error(
|
||||
f"Invalid Request for reserved user {username}")
|
||||
user_info: Optional[Dict[str, Any]] = self.users.get(username)
|
||||
if user_info is None:
|
||||
raise self.server.error(f"No registered user: {username}")
|
||||
if 'jwk_id' in user_info:
|
||||
self.public_jwks.pop(user_info['jwk_id'], None)
|
||||
del self.users[username]
|
||||
del self.user_db[username]
|
||||
event_loop = self.server.get_event_loop()
|
||||
event_loop.delay_callback(
|
||||
.005, self.server.send_event,
|
||||
"authorization:user_deleted",
|
||||
{'username': username})
|
||||
return {
|
||||
"username": username,
|
||||
"action": "user_deleted"
|
||||
}
|
||||
|
||||
def _generate_jwt(self,
|
||||
username: str,
|
||||
jwk_id: str,
|
||||
private_key: Signer,
|
||||
token_type: str = "access",
|
||||
exp_time: datetime.timedelta = JWT_EXP_TIME
|
||||
) -> str:
|
||||
curtime = int(time.time())
|
||||
payload = {
|
||||
'iss': self.issuer,
|
||||
'aud': "Moonraker",
|
||||
'iat': curtime,
|
||||
'exp': curtime + int(exp_time.total_seconds()),
|
||||
'username': username,
|
||||
'token_type': token_type
|
||||
}
|
||||
header = {'kid': jwk_id}
|
||||
header.update(JWT_HEADER)
|
||||
jwt_header = base64url_encode(json.dumps(header).encode())
|
||||
jwt_payload = base64url_encode(json.dumps(payload).encode())
|
||||
jwt_msg = b".".join([jwt_header, jwt_payload])
|
||||
sig = private_key.signature(jwt_msg)
|
||||
jwt_sig = base64url_encode(sig)
|
||||
return b".".join([jwt_msg, jwt_sig]).decode()
|
||||
|
||||
def _decode_jwt(self,
|
||||
token: str,
|
||||
token_type: str = "access"
|
||||
) -> Dict[str, Any]:
|
||||
message, sig = token.rsplit('.', maxsplit=1)
|
||||
enc_header, enc_payload = message.split('.')
|
||||
header: Dict[str, Any] = json.loads(base64url_decode(enc_header))
|
||||
sig_bytes = base64url_decode(sig)
|
||||
|
||||
# verify header
|
||||
if header.get('typ') != "JWT" or header.get('alg') != "EdDSA":
|
||||
raise self.server.error("Invalid JWT header")
|
||||
jwk_id = header.get('kid')
|
||||
if jwk_id not in self.public_jwks:
|
||||
raise self.server.error("Invalid key ID")
|
||||
|
||||
# validate signature
|
||||
public_key = self._public_key_from_jwk(self.public_jwks[jwk_id])
|
||||
public_key.verify(sig_bytes + message.encode())
|
||||
|
||||
# validate claims
|
||||
payload: Dict[str, Any] = json.loads(base64url_decode(enc_payload))
|
||||
if payload['token_type'] != token_type:
|
||||
raise self.server.error(
|
||||
f"JWT Token type mismatch: Expected {token_type}, "
|
||||
f"Recd: {payload['token_type']}", 401)
|
||||
if payload['iss'] != self.issuer:
|
||||
raise self.server.error("Invalid JWT Issuer", 401)
|
||||
if payload['aud'] != "Moonraker":
|
||||
raise self.server.error("Invalid JWT Audience", 401)
|
||||
if payload['exp'] < int(time.time()):
|
||||
raise self.server.error("JWT Expired", 401)
|
||||
|
||||
# get user
|
||||
user_info: Optional[Dict[str, Any]] = self.users.get(
|
||||
payload.get('username', ""), None)
|
||||
if user_info is None:
|
||||
raise self.server.error("Unknown user", 401)
|
||||
return user_info
|
||||
|
||||
def _load_private_key(self, secret: str) -> Signer:
|
||||
try:
|
||||
key = Signer(bytes.fromhex(secret))
|
||||
except Exception:
|
||||
raise self.server.error(
|
||||
"Error decoding private key, user data may"
|
||||
" be corrupt", 500) from None
|
||||
return key
|
||||
|
||||
def _generate_public_jwk(self, private_key: Signer) -> Dict[str, Any]:
|
||||
public_key = private_key.vk
|
||||
return {
|
||||
'x': base64url_encode(public_key).decode(),
|
||||
'kty': "OKP",
|
||||
'crv': "Ed25519",
|
||||
'use': "sig"
|
||||
}
|
||||
|
||||
def _public_key_from_jwk(self, jwk: Dict[str, Any]) -> Verifier:
|
||||
if jwk.get('kty') != "OKP":
|
||||
raise self.server.error("Not an Octet Key Pair")
|
||||
if jwk.get('crv') != "Ed25519":
|
||||
raise self.server.error("Invalid Curve")
|
||||
if 'x' not in jwk:
|
||||
raise self.server.error("No 'x' argument in jwk")
|
||||
key = base64url_decode(jwk['x'])
|
||||
return Verifier(key.hex().encode())
|
||||
|
||||
def _prune_conn_handler(self, eventtime: float) -> float:
|
||||
cur_time = time.time()
|
||||
for ip, user_info in list(self.trusted_users.items()):
|
||||
exp_time: float = user_info['expires_at']
|
||||
if cur_time >= exp_time:
|
||||
self.trusted_users.pop(ip, None)
|
||||
logging.info(
|
||||
f"Trusted Connection Expired, IP: {ip}")
|
||||
return eventtime + PRUNE_CHECK_TIME
|
||||
|
||||
def _oneshot_token_expire_handler(self, token):
|
||||
self.oneshot_tokens.pop(token, None)
|
||||
|
||||
def get_oneshot_token(self,
|
||||
ip_addr: IPAddr,
|
||||
user: Optional[Dict[str, Any]]
|
||||
) -> str:
|
||||
token = base64.b32encode(os.urandom(20)).decode()
|
||||
event_loop = self.server.get_event_loop()
|
||||
hdl = event_loop.delay_callback(
|
||||
ONESHOT_TIMEOUT, self._oneshot_token_expire_handler, token)
|
||||
self.oneshot_tokens[token] = (ip_addr, user, hdl)
|
||||
return token
|
||||
|
||||
def _check_json_web_token(self,
|
||||
request: HTTPServerRequest
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
auth_token: Optional[str] = request.headers.get("Authorization")
|
||||
if auth_token is None:
|
||||
auth_token = request.headers.get("X-Access-Token")
|
||||
if auth_token is None:
|
||||
qtoken = request.query_arguments.get('access_token', None)
|
||||
if qtoken is not None:
|
||||
auth_token = qtoken[-1].decode()
|
||||
else:
|
||||
if auth_token.startswith("Bearer "):
|
||||
auth_token = auth_token[7:]
|
||||
elif auth_token.startswith("Basic "):
|
||||
raise HTTPError(401, "Basic Auth is not supported")
|
||||
else:
|
||||
raise HTTPError(
|
||||
401, f"Invalid Authorization Header: {auth_token}")
|
||||
if auth_token:
|
||||
try:
|
||||
return self._decode_jwt(auth_token)
|
||||
except Exception:
|
||||
logging.exception(f"JWT Decode Error {auth_token}")
|
||||
raise HTTPError(401, f"Error decoding JWT: {auth_token}")
|
||||
return None
|
||||
|
||||
def _check_authorized_ip(self, ip: IPAddr) -> bool:
|
||||
if ip in self.trusted_ips:
|
||||
return True
|
||||
for rng in self.trusted_ranges:
|
||||
if ip in rng:
|
||||
return True
|
||||
fqdn = socket.getfqdn(str(ip)).lower()
|
||||
if fqdn in self.trusted_domains:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _check_trusted_connection(self,
|
||||
ip: Optional[IPAddr]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
if ip is not None:
|
||||
curtime = time.time()
|
||||
exp_time = curtime + TRUSTED_CONNECTION_TIMEOUT
|
||||
if ip in self.trusted_users:
|
||||
self.trusted_users[ip]['expires_at'] = exp_time
|
||||
return self.trusted_users[ip]
|
||||
elif self._check_authorized_ip(ip):
|
||||
logging.info(
|
||||
f"Trusted Connection Detected, IP: {ip}")
|
||||
self.trusted_users[ip] = {
|
||||
'username': TRUSTED_USER,
|
||||
'password': None,
|
||||
'created_on': curtime,
|
||||
'expires_at': exp_time
|
||||
}
|
||||
return self.trusted_users[ip]
|
||||
return None
|
||||
|
||||
def _check_oneshot_token(self,
|
||||
token: str,
|
||||
cur_ip: Optional[IPAddr]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
if token in self.oneshot_tokens:
|
||||
ip_addr, user, hdl = self.oneshot_tokens.pop(token)
|
||||
hdl.cancel()
|
||||
if cur_ip != ip_addr:
|
||||
logging.info(f"Oneshot Token IP Mismatch: expected{ip_addr}"
|
||||
f", Recd: {cur_ip}")
|
||||
return None
|
||||
return user
|
||||
else:
|
||||
return None
|
||||
|
||||
def check_authorized(self,
|
||||
request: HTTPServerRequest
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
if (
|
||||
request.path in self.permitted_paths
|
||||
or request.method == "OPTIONS"
|
||||
):
|
||||
return None
|
||||
|
||||
# Check JSON Web Token
|
||||
jwt_user = self._check_json_web_token(request)
|
||||
if jwt_user is not None:
|
||||
return jwt_user
|
||||
|
||||
try:
|
||||
ip = ipaddress.ip_address(request.remote_ip) # type: ignore
|
||||
except ValueError:
|
||||
logging.exception(
|
||||
f"Unable to Create IP Address {request.remote_ip}")
|
||||
ip = None
|
||||
|
||||
# Check oneshot access token
|
||||
ost: Optional[List[bytes]] = request.arguments.get('token', None)
|
||||
if ost is not None:
|
||||
ost_user = self._check_oneshot_token(ost[-1].decode(), ip)
|
||||
if ost_user is not None:
|
||||
return ost_user
|
||||
|
||||
# Check API Key Header
|
||||
key: Optional[str] = request.headers.get("X-Api-Key")
|
||||
if key and key == self.api_key:
|
||||
return self.users[API_USER]
|
||||
|
||||
# If the force_logins option is enabled and at least one
|
||||
# user is created this is an unauthorized request
|
||||
if self.force_logins and len(self.users) > 1:
|
||||
raise HTTPError(401, "Unauthorized")
|
||||
|
||||
# Check if IP is trusted
|
||||
trusted_user = self._check_trusted_connection(ip)
|
||||
if trusted_user is not None:
|
||||
return trusted_user
|
||||
|
||||
raise HTTPError(401, "Unauthorized")
|
||||
|
||||
def check_cors(self,
|
||||
origin: Optional[str],
|
||||
req_hdlr: Optional[RequestHandler] = None
|
||||
) -> bool:
|
||||
if origin is None or not self.cors_domains:
|
||||
return False
|
||||
for regex in self.cors_domains:
|
||||
match = re.match(regex, origin)
|
||||
if match is not None:
|
||||
if match.group() == origin:
|
||||
logging.debug(f"CORS Pattern Matched, origin: {origin} "
|
||||
f" | pattern: {regex}")
|
||||
self._set_cors_headers(origin, req_hdlr)
|
||||
return True
|
||||
else:
|
||||
logging.debug(f"Partial Cors Match: {match.group()}")
|
||||
else:
|
||||
# Check to see if the origin contains an IP that matches a
|
||||
# current trusted connection
|
||||
match = re.search(r"^https?://([^/:]+)", origin)
|
||||
if match is not None:
|
||||
ip = match.group(1)
|
||||
try:
|
||||
ipaddr = ipaddress.ip_address(ip)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
if self._check_authorized_ip(ipaddr):
|
||||
logging.debug(
|
||||
f"Cors request matched trusted IP: {ip}")
|
||||
self._set_cors_headers(origin, req_hdlr)
|
||||
return True
|
||||
logging.debug(f"No CORS match for origin: {origin}\n"
|
||||
f"Patterns: {self.cors_domains}")
|
||||
return False
|
||||
|
||||
def _set_cors_headers(self,
|
||||
origin: str,
|
||||
req_hdlr: Optional[RequestHandler]
|
||||
) -> None:
|
||||
if req_hdlr is None:
|
||||
return
|
||||
req_hdlr.set_header("Access-Control-Allow-Origin", origin)
|
||||
if req_hdlr.request.method == "OPTIONS":
|
||||
req_hdlr.set_header(
|
||||
"Access-Control-Allow-Methods",
|
||||
"GET, POST, PUT, DELETE, OPTIONS")
|
||||
req_hdlr.set_header(
|
||||
"Access-Control-Allow-Headers",
|
||||
"Origin, Accept, Content-Type, X-Requested-With, "
|
||||
"X-CRSF-Token, Authorization, X-Access-Token, "
|
||||
"X-Api-Key")
|
||||
if req_hdlr.request.headers.get(
|
||||
"Access-Control-Request-Private-Network", None) == "true":
|
||||
req_hdlr.set_header(
|
||||
"Access-Control-Allow-Private-Network",
|
||||
"true")
|
||||
|
||||
def cors_enabled(self) -> bool:
|
||||
return self.cors_domains is not None
|
||||
|
||||
def close(self) -> None:
|
||||
self.prune_timer.stop()
|
||||
|
||||
|
||||
def load_component(config: ConfigHelper) -> Authorization:
|
||||
return Authorization(config)
|
||||
132
moonraker/components/button.py
Normal file
132
moonraker/components/button.py
Normal file
@@ -0,0 +1,132 @@
|
||||
# Support for GPIO Button actions
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
from __future__ import annotations
|
||||
import asyncio
|
||||
import logging
|
||||
from confighelper import SentinelClass
|
||||
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from .gpio import GpioFactory
|
||||
from app import InternalTransport as ITransport
|
||||
|
||||
SENTINEL = SentinelClass.get_instance()
|
||||
|
||||
class ButtonManager:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.buttons: Dict[str, GpioButton] = {}
|
||||
prefix_sections = config.get_prefix_sections("button")
|
||||
logging.info(f"Loading Buttons: {prefix_sections}")
|
||||
for section in prefix_sections:
|
||||
cfg = config[section]
|
||||
# Reserve the "type" option for future use
|
||||
btn_type = cfg.get('type', "gpio")
|
||||
try:
|
||||
btn = GpioButton(cfg)
|
||||
except Exception as e:
|
||||
msg = f"Failed to load button [{cfg.get_name()}]\n{e}"
|
||||
self.server.add_warning(msg)
|
||||
continue
|
||||
self.buttons[btn.name] = btn
|
||||
self.server.register_notification("button:button_event")
|
||||
|
||||
def component_init(self) -> None:
|
||||
for btn in self.buttons.values():
|
||||
btn.initialize()
|
||||
|
||||
class GpioButton:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.eventloop = self.server.get_event_loop()
|
||||
self.name = config.get_name().split()[-1]
|
||||
self.itransport: ITransport = self.server.lookup_component(
|
||||
'internal_transport')
|
||||
self.mutex = asyncio.Lock()
|
||||
gpio: GpioFactory = self.server.load_component(config, 'gpio')
|
||||
self.gpio_event = gpio.register_gpio_event(
|
||||
config.get('pin'), self._on_gpio_event)
|
||||
min_event_time = config.getfloat(
|
||||
'minimum_event_time', .05, minval=.010)
|
||||
self.gpio_event.setup_debounce(min_event_time, self._on_gpio_error)
|
||||
self.press_template = config.gettemplate(
|
||||
"on_press", None, is_async=True)
|
||||
self.release_template = config.gettemplate(
|
||||
"on_release", None, is_async=True)
|
||||
if (
|
||||
self.press_template is None and
|
||||
self.release_template is None
|
||||
):
|
||||
raise config.error(
|
||||
f"[{config.get_name()}]: No template option configured")
|
||||
self.notification_sent: bool = False
|
||||
self.user_data: Dict[str, Any] = {}
|
||||
self.context: Dict[str, Any] = {
|
||||
'call_method': self.itransport.call_method,
|
||||
'send_notification': self._send_notification,
|
||||
'event': {
|
||||
'elapsed_time': 0.,
|
||||
'received_time': 0.,
|
||||
'render_time': 0.,
|
||||
'pressed': False,
|
||||
},
|
||||
'user_data': self.user_data
|
||||
}
|
||||
|
||||
def initialize(self) -> None:
|
||||
self.gpio_event.start()
|
||||
self.context['event']['pressed'] = bool(self.gpio_event.get_value())
|
||||
|
||||
def get_status(self) -> Dict[str, Any]:
|
||||
return {
|
||||
'name': self.name,
|
||||
'type': "gpio",
|
||||
'event': self.context['event'],
|
||||
}
|
||||
|
||||
def _send_notification(self, result: Any = None) -> None:
|
||||
if self.notification_sent:
|
||||
# Only allow execution once per template
|
||||
return
|
||||
self.notification_sent = True
|
||||
data = self.get_status()
|
||||
data['aux'] = result
|
||||
self.server.send_event("button:button_event", data)
|
||||
|
||||
async def _on_gpio_event(self,
|
||||
eventtime: float,
|
||||
elapsed_time: float,
|
||||
pressed: int
|
||||
) -> None:
|
||||
template = self.press_template if pressed else self.release_template
|
||||
if template is None:
|
||||
return
|
||||
async with self.mutex:
|
||||
self.notification_sent = False
|
||||
event_info: Dict[str, Any] = {
|
||||
'elapsed_time': elapsed_time,
|
||||
'received_time': eventtime,
|
||||
'render_time': self.eventloop.get_loop_time(),
|
||||
'pressed': bool(pressed)
|
||||
}
|
||||
self.context['event'] = event_info
|
||||
try:
|
||||
await template.render_async(self.context)
|
||||
except Exception:
|
||||
action = "on_press" if pressed else "on_release"
|
||||
logging.exception(
|
||||
f"Button {self.name}: '{action}' template error")
|
||||
|
||||
def _on_gpio_error(self, message: str) -> None:
|
||||
self.server.add_warning(f"Button {self.name}: {message}")
|
||||
|
||||
def load_component(config: ConfigHelper) -> ButtonManager:
|
||||
return ButtonManager(config)
|
||||
171
moonraker/components/data_store.py
Normal file
171
moonraker/components/data_store.py
Normal file
@@ -0,0 +1,171 @@
|
||||
# Klipper data logging and storage storage
|
||||
#
|
||||
# Copyright (C) 2020 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import logging
|
||||
import time
|
||||
from collections import deque
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Optional,
|
||||
Dict,
|
||||
List,
|
||||
Tuple,
|
||||
Deque,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest
|
||||
from . import klippy_apis
|
||||
APIComp = klippy_apis.KlippyAPI
|
||||
GCQueue = Deque[Dict[str, Any]]
|
||||
TempStore = Dict[str, Dict[str, Deque[float]]]
|
||||
|
||||
TEMP_UPDATE_TIME = 1.
|
||||
|
||||
class DataStore:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.temp_store_size = config.getint('temperature_store_size', 1200)
|
||||
self.gcode_store_size = config.getint('gcode_store_size', 1000)
|
||||
|
||||
# Temperature Store Tracking
|
||||
self.last_temps: Dict[str, Tuple[float, ...]] = {}
|
||||
self.gcode_queue: GCQueue = deque(maxlen=self.gcode_store_size)
|
||||
self.temperature_store: TempStore = {}
|
||||
eventloop = self.server.get_event_loop()
|
||||
self.temp_update_timer = eventloop.register_timer(
|
||||
self._update_temperature_store)
|
||||
|
||||
# Register status update event
|
||||
self.server.register_event_handler(
|
||||
"server:status_update", self._set_current_temps)
|
||||
self.server.register_event_handler(
|
||||
"server:gcode_response", self._update_gcode_store)
|
||||
self.server.register_event_handler(
|
||||
"server:klippy_ready", self._init_sensors)
|
||||
self.server.register_event_handler(
|
||||
"klippy_connection:gcode_received", self._store_gcode_command
|
||||
)
|
||||
|
||||
# Register endpoints
|
||||
self.server.register_endpoint(
|
||||
"/server/temperature_store", ['GET'],
|
||||
self._handle_temp_store_request)
|
||||
self.server.register_endpoint(
|
||||
"/server/gcode_store", ['GET'],
|
||||
self._handle_gcode_store_request)
|
||||
|
||||
async def _init_sensors(self) -> None:
|
||||
klippy_apis: APIComp = self.server.lookup_component('klippy_apis')
|
||||
# Fetch sensors
|
||||
try:
|
||||
result: Dict[str, Any]
|
||||
result = await klippy_apis.query_objects({'heaters': None})
|
||||
except self.server.error as e:
|
||||
logging.info(f"Error Configuring Sensors: {e}")
|
||||
return
|
||||
sensors: List[str]
|
||||
sensors = result.get("heaters", {}).get("available_sensors", [])
|
||||
|
||||
if sensors:
|
||||
# Add Subscription
|
||||
sub: Dict[str, Optional[List[str]]] = {s: None for s in sensors}
|
||||
try:
|
||||
status: Dict[str, Any]
|
||||
status = await klippy_apis.subscribe_objects(sub)
|
||||
except self.server.error as e:
|
||||
logging.info(f"Error subscribing to sensors: {e}")
|
||||
return
|
||||
logging.info(f"Configuring available sensors: {sensors}")
|
||||
new_store: TempStore = {}
|
||||
for sensor in sensors:
|
||||
fields = list(status.get(sensor, {}).keys())
|
||||
if sensor in self.temperature_store:
|
||||
new_store[sensor] = self.temperature_store[sensor]
|
||||
else:
|
||||
new_store[sensor] = {
|
||||
'temperatures': deque(maxlen=self.temp_store_size)}
|
||||
for item in ["target", "power", "speed"]:
|
||||
if item in fields:
|
||||
new_store[sensor][f"{item}s"] = deque(
|
||||
maxlen=self.temp_store_size)
|
||||
if sensor not in self.last_temps:
|
||||
self.last_temps[sensor] = (0., 0., 0., 0.)
|
||||
self.temperature_store = new_store
|
||||
# Prune unconfigured sensors in self.last_temps
|
||||
for sensor in list(self.last_temps.keys()):
|
||||
if sensor not in self.temperature_store:
|
||||
del self.last_temps[sensor]
|
||||
# Update initial temperatures
|
||||
self._set_current_temps(status)
|
||||
self.temp_update_timer.start()
|
||||
else:
|
||||
logging.info("No sensors found")
|
||||
self.last_temps = {}
|
||||
self.temperature_store = {}
|
||||
self.temp_update_timer.stop()
|
||||
|
||||
def _set_current_temps(self, data: Dict[str, Any]) -> None:
|
||||
for sensor in self.temperature_store:
|
||||
if sensor in data:
|
||||
last_val = self.last_temps[sensor]
|
||||
self.last_temps[sensor] = (
|
||||
round(data[sensor].get('temperature', last_val[0]), 2),
|
||||
data[sensor].get('target', last_val[1]),
|
||||
data[sensor].get('power', last_val[2]),
|
||||
data[sensor].get('speed', last_val[3]))
|
||||
|
||||
def _update_temperature_store(self, eventtime: float) -> float:
|
||||
# XXX - If klippy is not connected, set values to zero
|
||||
# as they are unknown?
|
||||
for sensor, vals in self.last_temps.items():
|
||||
self.temperature_store[sensor]['temperatures'].append(vals[0])
|
||||
for val, item in zip(vals[1:], ["targets", "powers", "speeds"]):
|
||||
if item in self.temperature_store[sensor]:
|
||||
self.temperature_store[sensor][item].append(val)
|
||||
return eventtime + TEMP_UPDATE_TIME
|
||||
|
||||
async def _handle_temp_store_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Dict[str, List[float]]]:
|
||||
store = {}
|
||||
for name, sensor in self.temperature_store.items():
|
||||
store[name] = {k: list(v) for k, v in sensor.items()}
|
||||
return store
|
||||
|
||||
async def close(self) -> None:
|
||||
self.temp_update_timer.stop()
|
||||
|
||||
def _update_gcode_store(self, response: str) -> None:
|
||||
curtime = time.time()
|
||||
self.gcode_queue.append(
|
||||
{'message': response, 'time': curtime, 'type': "response"})
|
||||
|
||||
def _store_gcode_command(self, script: str) -> None:
|
||||
curtime = time.time()
|
||||
for cmd in script.split('\n'):
|
||||
cmd = cmd.strip()
|
||||
if not cmd:
|
||||
continue
|
||||
self.gcode_queue.append(
|
||||
{'message': script, 'time': curtime, 'type': "command"})
|
||||
|
||||
async def _handle_gcode_store_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, List[Dict[str, Any]]]:
|
||||
count = web_request.get_int("count", None)
|
||||
if count is not None:
|
||||
gc_responses = list(self.gcode_queue)[-count:]
|
||||
else:
|
||||
gc_responses = list(self.gcode_queue)
|
||||
return {'gcode_store': gc_responses}
|
||||
|
||||
def load_component(config: ConfigHelper) -> DataStore:
|
||||
return DataStore(config)
|
||||
900
moonraker/components/database.py
Normal file
900
moonraker/components/database.py
Normal file
@@ -0,0 +1,900 @@
|
||||
# Mimimal database for moonraker storage
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import os
|
||||
import json
|
||||
import struct
|
||||
import operator
|
||||
import logging
|
||||
from asyncio import Future, Task
|
||||
from io import BytesIO
|
||||
from functools import reduce
|
||||
from threading import Lock as ThreadLock
|
||||
import lmdb
|
||||
from utils import SentinelClass, ServerError
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Mapping,
|
||||
TypeVar,
|
||||
Tuple,
|
||||
Optional,
|
||||
Union,
|
||||
Dict,
|
||||
List,
|
||||
cast
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest
|
||||
DBRecord = Union[int, float, bool, str, List[Any], Dict[str, Any]]
|
||||
DBType = Optional[DBRecord]
|
||||
_T = TypeVar("_T")
|
||||
|
||||
DATABASE_VERSION = 1
|
||||
MAX_NAMESPACES = 100
|
||||
MAX_DB_SIZE = 200 * 2**20
|
||||
|
||||
RECORD_ENCODE_FUNCS = {
|
||||
int: lambda x: b"q" + struct.pack("q", x),
|
||||
float: lambda x: b"d" + struct.pack("d", x),
|
||||
bool: lambda x: b"?" + struct.pack("?", x),
|
||||
str: lambda x: b"s" + x.encode(),
|
||||
list: lambda x: json.dumps(x).encode(),
|
||||
dict: lambda x: json.dumps(x).encode(),
|
||||
}
|
||||
|
||||
RECORD_DECODE_FUNCS = {
|
||||
ord("q"): lambda x: struct.unpack("q", x[1:])[0],
|
||||
ord("d"): lambda x: struct.unpack("d", x[1:])[0],
|
||||
ord("?"): lambda x: struct.unpack("?", x[1:])[0],
|
||||
ord("s"): lambda x: bytes(x[1:]).decode(),
|
||||
ord("["): lambda x: json.load(BytesIO(x)),
|
||||
ord("{"): lambda x: json.load(BytesIO(x)),
|
||||
}
|
||||
|
||||
SENTINEL = SentinelClass.get_instance()
|
||||
|
||||
def getitem_with_default(item: Dict, field: Any) -> Any:
|
||||
if not isinstance(item, Dict):
|
||||
raise ServerError(
|
||||
f"Cannot reduce a value of type {type(item)}")
|
||||
if field not in item:
|
||||
item[field] = {}
|
||||
return item[field]
|
||||
|
||||
|
||||
class MoonrakerDatabase:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.eventloop = self.server.get_event_loop()
|
||||
self.namespaces: Dict[str, object] = {}
|
||||
self.thread_lock = ThreadLock()
|
||||
self.database_path = os.path.expanduser(config.get(
|
||||
'database_path', "~/.moonraker_database"))
|
||||
if not os.path.isdir(self.database_path):
|
||||
os.mkdir(self.database_path)
|
||||
self.lmdb_env = lmdb.open(self.database_path, map_size=MAX_DB_SIZE,
|
||||
max_dbs=MAX_NAMESPACES)
|
||||
with self.lmdb_env.begin(write=True, buffers=True) as txn:
|
||||
# lookup existing namespaces
|
||||
with txn.cursor() as cursor:
|
||||
remaining = cursor.first()
|
||||
while remaining:
|
||||
key = bytes(cursor.key())
|
||||
self.namespaces[key.decode()] = self.lmdb_env.open_db(
|
||||
key, txn)
|
||||
remaining = cursor.next()
|
||||
if "moonraker" not in self.namespaces:
|
||||
mrdb = self.lmdb_env.open_db(b"moonraker", txn)
|
||||
self.namespaces["moonraker"] = mrdb
|
||||
txn.put(b'database_version',
|
||||
self._encode_value(DATABASE_VERSION),
|
||||
db=mrdb)
|
||||
# Iterate through all records, checking for invalid keys
|
||||
for ns, db in self.namespaces.items():
|
||||
with txn.cursor(db=db) as cursor:
|
||||
remaining = cursor.first()
|
||||
while remaining:
|
||||
key_buf = cursor.key()
|
||||
try:
|
||||
decoded_key = bytes(key_buf).decode()
|
||||
except Exception:
|
||||
logging.info("Database Key Decode Error")
|
||||
decoded_key = ''
|
||||
if not decoded_key:
|
||||
hex_key = bytes(key_buf).hex()
|
||||
try:
|
||||
invalid_val = self._decode_value(cursor.value())
|
||||
except Exception:
|
||||
invalid_val = ""
|
||||
logging.info(
|
||||
f"Invalid Key '{hex_key}' found in namespace "
|
||||
f"'{ns}', dropping value: {repr(invalid_val)}")
|
||||
try:
|
||||
remaining = cursor.delete()
|
||||
except Exception:
|
||||
logging.exception("Error Deleting LMDB Key")
|
||||
else:
|
||||
continue
|
||||
remaining = cursor.next()
|
||||
|
||||
# Protected Namespaces have read-only API access. Write access can
|
||||
# be granted by enabling the debug option. Forbidden namespaces
|
||||
# have no API access. This cannot be overridden.
|
||||
self.protected_namespaces = set(self.get_item(
|
||||
"moonraker", "database.protected_namespaces",
|
||||
["moonraker"]).result())
|
||||
self.forbidden_namespaces = set(self.get_item(
|
||||
"moonraker", "database.forbidden_namespaces",
|
||||
[]).result())
|
||||
# Remove stale debug counter
|
||||
config.getboolean("enable_database_debug", False, deprecate=True)
|
||||
try:
|
||||
self.delete_item("moonraker", "database.debug_counter")
|
||||
except Exception:
|
||||
pass
|
||||
# Track unsafe shutdowns
|
||||
unsafe_shutdowns: int = self.get_item(
|
||||
"moonraker", "database.unsafe_shutdowns", 0).result()
|
||||
msg = f"Unsafe Shutdown Count: {unsafe_shutdowns}"
|
||||
self.server.add_log_rollover_item("database", msg)
|
||||
|
||||
# Increment unsafe shutdown counter. This will be reset if
|
||||
# moonraker is safely restarted
|
||||
self.insert_item("moonraker", "database.unsafe_shutdowns",
|
||||
unsafe_shutdowns + 1)
|
||||
self.server.register_endpoint(
|
||||
"/server/database/list", ['GET'], self._handle_list_request)
|
||||
self.server.register_endpoint(
|
||||
"/server/database/item", ["GET", "POST", "DELETE"],
|
||||
self._handle_item_request)
|
||||
|
||||
def get_database_path(self) -> str:
|
||||
return self.database_path
|
||||
|
||||
def _run_command(self,
|
||||
command_func: Callable[..., _T],
|
||||
*args
|
||||
) -> Future[_T]:
|
||||
def func_wrapper():
|
||||
with self.thread_lock:
|
||||
return command_func(*args)
|
||||
|
||||
if self.server.is_running():
|
||||
return cast(Future, self.eventloop.run_in_thread(func_wrapper))
|
||||
else:
|
||||
ret = func_wrapper()
|
||||
fut = self.eventloop.create_future()
|
||||
fut.set_result(ret)
|
||||
return fut
|
||||
|
||||
# *** Nested Database operations***
|
||||
# The insert_item(), delete_item(), and get_item() methods may operate on
|
||||
# nested objects within a namespace. Each operation takes a key argument
|
||||
# that may either be a string or a list of strings. If the argument is
|
||||
# a string nested keys may be delitmted by a "." by which the string
|
||||
# will be split into a list of strings. The first key in the list must
|
||||
# identify the database record. Subsequent keys are optional and are
|
||||
# used to access elements in the deserialized objects.
|
||||
|
||||
def insert_item(self,
|
||||
namespace: str,
|
||||
key: Union[List[str], str],
|
||||
value: DBType
|
||||
) -> Future[None]:
|
||||
return self._run_command(self._insert_impl, namespace, key, value)
|
||||
|
||||
def _insert_impl(self,
|
||||
namespace: str,
|
||||
key: Union[List[str], str],
|
||||
value: DBType
|
||||
) -> None:
|
||||
key_list = self._process_key(key)
|
||||
if namespace not in self.namespaces:
|
||||
self.namespaces[namespace] = self.lmdb_env.open_db(
|
||||
namespace.encode())
|
||||
record = value
|
||||
if len(key_list) > 1:
|
||||
record = self._get_record(namespace, key_list[0], force=True)
|
||||
if not isinstance(record, dict):
|
||||
prev_type = type(record)
|
||||
record = {}
|
||||
logging.info(
|
||||
f"Warning: Key {key_list[0]} contains a value of type "
|
||||
f"{prev_type}. Overwriting with an object.")
|
||||
item: Dict[str, Any] = reduce(
|
||||
getitem_with_default, key_list[1:-1], record)
|
||||
if not isinstance(item, dict):
|
||||
rpt_key = ".".join(key_list[:-1])
|
||||
raise self.server.error(
|
||||
f"Item at key '{rpt_key}' in namespace '{namespace}'is "
|
||||
"not a dictionary object, cannot insert"
|
||||
)
|
||||
item[key_list[-1]] = value
|
||||
if not self._insert_record(namespace, key_list[0], record):
|
||||
logging.info(
|
||||
f"Error inserting key '{key}' in namespace '{namespace}'")
|
||||
|
||||
def update_item(self,
|
||||
namespace: str,
|
||||
key: Union[List[str], str],
|
||||
value: DBType
|
||||
) -> Future[None]:
|
||||
return self._run_command(self._update_impl, namespace, key, value)
|
||||
|
||||
def _update_impl(self,
|
||||
namespace: str,
|
||||
key: Union[List[str], str],
|
||||
value: DBType
|
||||
) -> None:
|
||||
key_list = self._process_key(key)
|
||||
record = self._get_record(namespace, key_list[0])
|
||||
if len(key_list) == 1:
|
||||
if isinstance(record, dict) and isinstance(value, dict):
|
||||
record.update(value)
|
||||
else:
|
||||
if value is None:
|
||||
raise self.server.error(
|
||||
f"Item at key '{key}', namespace '{namespace}': "
|
||||
"Cannot assign a record level null value")
|
||||
record = value
|
||||
else:
|
||||
try:
|
||||
assert isinstance(record, dict)
|
||||
item: Dict[str, Any] = reduce(
|
||||
operator.getitem, key_list[1:-1], record)
|
||||
except Exception:
|
||||
raise self.server.error(
|
||||
f"Key '{key}' in namespace '{namespace}' not found",
|
||||
404)
|
||||
if not isinstance(item, dict) or key_list[-1] not in item:
|
||||
rpt_key = ".".join(key_list[:-1])
|
||||
raise self.server.error(
|
||||
f"Item at key '{rpt_key}' in namespace '{namespace}'is "
|
||||
"not a dictionary object, cannot update"
|
||||
)
|
||||
if isinstance(item[key_list[-1]], dict) \
|
||||
and isinstance(value, dict):
|
||||
item[key_list[-1]].update(value)
|
||||
else:
|
||||
item[key_list[-1]] = value
|
||||
if not self._insert_record(namespace, key_list[0], record):
|
||||
logging.info(
|
||||
f"Error updating key '{key}' in namespace '{namespace}'")
|
||||
|
||||
def delete_item(self,
|
||||
namespace: str,
|
||||
key: Union[List[str], str],
|
||||
drop_empty_db: bool = False
|
||||
) -> Future[Any]:
|
||||
return self._run_command(self._delete_impl, namespace, key,
|
||||
drop_empty_db)
|
||||
|
||||
def _delete_impl(self,
|
||||
namespace: str,
|
||||
key: Union[List[str], str],
|
||||
drop_empty_db: bool = False
|
||||
) -> Any:
|
||||
key_list = self._process_key(key)
|
||||
val = record = self._get_record(namespace, key_list[0])
|
||||
remove_record = True
|
||||
if len(key_list) > 1:
|
||||
try:
|
||||
assert isinstance(record, dict)
|
||||
item: Dict[str, Any] = reduce(
|
||||
operator.getitem, key_list[1:-1], record)
|
||||
val = item.pop(key_list[-1])
|
||||
except Exception:
|
||||
raise self.server.error(
|
||||
f"Key '{key}' in namespace '{namespace}' not found",
|
||||
404)
|
||||
remove_record = False if record else True
|
||||
if remove_record:
|
||||
db = self.namespaces[namespace]
|
||||
with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn:
|
||||
ret = txn.delete(key_list[0].encode())
|
||||
with txn.cursor() as cursor:
|
||||
if not cursor.first() and drop_empty_db:
|
||||
txn.drop(db)
|
||||
del self.namespaces[namespace]
|
||||
else:
|
||||
ret = self._insert_record(namespace, key_list[0], record)
|
||||
if not ret:
|
||||
logging.info(
|
||||
f"Error deleting key '{key}' from namespace "
|
||||
f"'{namespace}'")
|
||||
return val
|
||||
|
||||
def get_item(self,
|
||||
namespace: str,
|
||||
key: Optional[Union[List[str], str]] = None,
|
||||
default: Any = SENTINEL
|
||||
) -> Future[Any]:
|
||||
return self._run_command(self._get_impl, namespace, key, default)
|
||||
|
||||
def _get_impl(self,
|
||||
namespace: str,
|
||||
key: Optional[Union[List[str], str]] = None,
|
||||
default: Any = SENTINEL
|
||||
) -> Any:
|
||||
try:
|
||||
if key is None:
|
||||
return self._get_namespace(namespace)
|
||||
key_list = self._process_key(key)
|
||||
ns = self._get_record(namespace, key_list[0])
|
||||
val = reduce(operator.getitem, # type: ignore
|
||||
key_list[1:], ns)
|
||||
except Exception as e:
|
||||
if not isinstance(default, SentinelClass):
|
||||
return default
|
||||
if isinstance(e, self.server.error):
|
||||
raise
|
||||
raise self.server.error(
|
||||
f"Key '{key}' in namespace '{namespace}' not found", 404)
|
||||
return val
|
||||
|
||||
# *** Batch operations***
|
||||
# The insert_batch(), move_batch(), delete_batch(), and get_batch()
|
||||
# methods can be used to perform record level batch operations on
|
||||
# a namespace in a single transaction.
|
||||
|
||||
def insert_batch(self,
|
||||
namespace: str,
|
||||
records: Dict[str, Any]
|
||||
) -> Future[None]:
|
||||
return self._run_command(self._insert_batch_impl, namespace, records)
|
||||
|
||||
def _insert_batch_impl(self,
|
||||
namespace: str,
|
||||
records: Dict[str, Any]
|
||||
) -> None:
|
||||
if namespace not in self.namespaces:
|
||||
self.namespaces[namespace] = self.lmdb_env.open_db(
|
||||
namespace.encode())
|
||||
db = self.namespaces[namespace]
|
||||
with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn:
|
||||
for key, val in records.items():
|
||||
ret = txn.put(key.encode(), self._encode_value(val))
|
||||
if not ret:
|
||||
logging.info(f"Error inserting record {key} into "
|
||||
f"namespace {namespace}")
|
||||
|
||||
def move_batch(self,
|
||||
namespace: str,
|
||||
source_keys: List[str],
|
||||
dest_keys: List[str]
|
||||
) -> Future[None]:
|
||||
return self._run_command(self._move_batch_impl, namespace,
|
||||
source_keys, dest_keys)
|
||||
|
||||
def _move_batch_impl(self,
|
||||
namespace: str,
|
||||
source_keys: List[str],
|
||||
dest_keys: List[str]
|
||||
) -> None:
|
||||
db = self._get_db(namespace)
|
||||
if len(source_keys) != len(dest_keys):
|
||||
raise self.server.error(
|
||||
"Source key list and destination key list must "
|
||||
"be of the same length")
|
||||
with self.lmdb_env.begin(write=True, db=db) as txn:
|
||||
for source, dest in zip(source_keys, dest_keys):
|
||||
val = txn.pop(source.encode())
|
||||
if val is not None:
|
||||
txn.put(dest.encode(), val)
|
||||
|
||||
def delete_batch(self,
|
||||
namespace: str,
|
||||
keys: List[str]
|
||||
) -> Future[Dict[str, Any]]:
|
||||
return self._run_command(self._del_batch_impl, namespace, keys)
|
||||
|
||||
def _del_batch_impl(self,
|
||||
namespace: str,
|
||||
keys: List[str]
|
||||
) -> Dict[str, Any]:
|
||||
db = self._get_db(namespace)
|
||||
result: Dict[str, Any] = {}
|
||||
with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn:
|
||||
for key in keys:
|
||||
val = txn.pop(key.encode())
|
||||
if val is not None:
|
||||
result[key] = self._decode_value(val)
|
||||
return result
|
||||
|
||||
def get_batch(self,
|
||||
namespace: str,
|
||||
keys: List[str]
|
||||
) -> Future[Dict[str, Any]]:
|
||||
return self._run_command(self._get_batch_impl, namespace, keys)
|
||||
|
||||
def _get_batch_impl(self,
|
||||
namespace: str,
|
||||
keys: List[str]
|
||||
) -> Dict[str, Any]:
|
||||
db = self._get_db(namespace)
|
||||
result: Dict[str, Any] = {}
|
||||
encoded_keys: List[bytes] = [k.encode() for k in keys]
|
||||
with self.lmdb_env.begin(buffers=True, db=db) as txn:
|
||||
with txn.cursor() as cursor:
|
||||
vals = cursor.getmulti(encoded_keys)
|
||||
result = {bytes(k).decode(): self._decode_value(v)
|
||||
for k, v in vals}
|
||||
return result
|
||||
|
||||
# *** Namespace level operations***
|
||||
|
||||
def update_namespace(self,
|
||||
namespace: str,
|
||||
value: Mapping[str, DBRecord]
|
||||
) -> Future[None]:
|
||||
return self._run_command(self._update_ns_impl, namespace, value)
|
||||
|
||||
def _update_ns_impl(self,
|
||||
namespace: str,
|
||||
value: Mapping[str, DBRecord]
|
||||
) -> None:
|
||||
if not value:
|
||||
return
|
||||
db = self._get_db(namespace)
|
||||
with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn:
|
||||
# We only need to update the keys that changed
|
||||
for key, val in value.items():
|
||||
stored = txn.get(key.encode())
|
||||
if stored is not None:
|
||||
decoded = self._decode_value(stored)
|
||||
if val == decoded:
|
||||
continue
|
||||
ret = txn.put(key.encode(), self._encode_value(val))
|
||||
if not ret:
|
||||
logging.info(f"Error inserting key '{key}' "
|
||||
f"in namespace '{namespace}'")
|
||||
|
||||
def clear_namespace(self,
|
||||
namespace: str,
|
||||
drop_empty_db: bool = False
|
||||
) -> Future[None]:
|
||||
return self._run_command(self._clear_ns_impl, namespace, drop_empty_db)
|
||||
|
||||
def _clear_ns_impl(self,
|
||||
namespace: str,
|
||||
drop_empty_db: bool = False
|
||||
) -> None:
|
||||
db = self._get_db(namespace)
|
||||
with self.lmdb_env.begin(write=True, db=db) as txn:
|
||||
txn.drop(db, delete=drop_empty_db)
|
||||
if drop_empty_db:
|
||||
del self.namespaces[namespace]
|
||||
|
||||
def sync_namespace(self,
|
||||
namespace: str,
|
||||
value: Mapping[str, DBRecord]
|
||||
) -> Future[None]:
|
||||
return self._run_command(self._sync_ns_impl, namespace, value)
|
||||
|
||||
def _sync_ns_impl(self,
|
||||
namespace: str,
|
||||
value: Mapping[str, DBRecord]
|
||||
) -> None:
|
||||
if not value:
|
||||
raise self.server.error("Cannot sync to an empty value")
|
||||
db = self._get_db(namespace)
|
||||
new_keys = set(value.keys())
|
||||
with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn:
|
||||
with txn.cursor() as cursor:
|
||||
remaining = cursor.first()
|
||||
while remaining:
|
||||
bkey, bval = cursor.item()
|
||||
key = bytes(bkey).decode()
|
||||
if key not in value:
|
||||
remaining = cursor.delete()
|
||||
else:
|
||||
decoded = self._decode_value(bval)
|
||||
if decoded != value[key]:
|
||||
new_val = self._encode_value(value[key])
|
||||
txn.put(key.encode(), new_val)
|
||||
new_keys.remove(key)
|
||||
remaining = cursor.next()
|
||||
for key in new_keys:
|
||||
val = value[key]
|
||||
ret = txn.put(key.encode(), self._encode_value(val))
|
||||
if not ret:
|
||||
logging.info(f"Error inserting key '{key}' "
|
||||
f"in namespace '{namespace}'")
|
||||
|
||||
def ns_length(self, namespace: str) -> Future[int]:
|
||||
return self._run_command(self._ns_length_impl, namespace)
|
||||
|
||||
def _ns_length_impl(self, namespace: str) -> int:
|
||||
db = self._get_db(namespace)
|
||||
with self.lmdb_env.begin(db=db) as txn:
|
||||
stats = txn.stat(db)
|
||||
return stats['entries']
|
||||
|
||||
def ns_keys(self, namespace: str) -> Future[List[str]]:
|
||||
return self._run_command(self._ns_keys_impl, namespace)
|
||||
|
||||
def _ns_keys_impl(self, namespace: str) -> List[str]:
|
||||
keys: List[str] = []
|
||||
db = self._get_db(namespace)
|
||||
with self.lmdb_env.begin(db=db) as txn:
|
||||
with txn.cursor() as cursor:
|
||||
remaining = cursor.first()
|
||||
while remaining:
|
||||
keys.append(cursor.key().decode())
|
||||
remaining = cursor.next()
|
||||
return keys
|
||||
|
||||
def ns_values(self, namespace: str) -> Future[List[Any]]:
|
||||
return self._run_command(self._ns_values_impl, namespace)
|
||||
|
||||
def _ns_values_impl(self, namespace: str) -> List[Any]:
|
||||
values: List[Any] = []
|
||||
db = self._get_db(namespace)
|
||||
with self.lmdb_env.begin(db=db, buffers=True) as txn:
|
||||
with txn.cursor() as cursor:
|
||||
remaining = cursor.first()
|
||||
while remaining:
|
||||
values.append(self._decode_value(cursor.value()))
|
||||
remaining = cursor.next()
|
||||
return values
|
||||
|
||||
def ns_items(self, namespace: str) -> Future[List[Tuple[str, Any]]]:
|
||||
return self._run_command(self._ns_items_impl, namespace)
|
||||
|
||||
def _ns_items_impl(self, namespace: str) -> List[Tuple[str, Any]]:
|
||||
ns = self._get_namespace(namespace)
|
||||
return list(ns.items())
|
||||
|
||||
def ns_contains(self,
|
||||
namespace: str,
|
||||
key: Union[List[str], str]
|
||||
) -> Future[bool]:
|
||||
return self._run_command(self._ns_contains_impl, namespace, key)
|
||||
|
||||
def _ns_contains_impl(self,
|
||||
namespace: str,
|
||||
key: Union[List[str], str]
|
||||
) -> bool:
|
||||
self._get_db(namespace)
|
||||
try:
|
||||
key_list = self._process_key(key)
|
||||
record = self._get_record(namespace, key_list[0])
|
||||
if len(key_list) == 1:
|
||||
return True
|
||||
reduce(operator.getitem, # type: ignore
|
||||
key_list[1:], record)
|
||||
except Exception:
|
||||
return False
|
||||
return True
|
||||
|
||||
def register_local_namespace(self,
|
||||
namespace: str,
|
||||
forbidden: bool = False
|
||||
) -> None:
|
||||
if self.server.is_running():
|
||||
raise self.server.error(
|
||||
"Cannot register a namespace while the "
|
||||
"server is running")
|
||||
if namespace not in self.namespaces:
|
||||
self.namespaces[namespace] = self.lmdb_env.open_db(
|
||||
namespace.encode())
|
||||
if forbidden:
|
||||
if namespace not in self.forbidden_namespaces:
|
||||
self.forbidden_namespaces.add(namespace)
|
||||
self.insert_item(
|
||||
"moonraker", "database.forbidden_namespaces",
|
||||
list(self.forbidden_namespaces))
|
||||
elif namespace not in self.protected_namespaces:
|
||||
self.protected_namespaces.add(namespace)
|
||||
self.insert_item("moonraker", "database.protected_namespaces",
|
||||
sorted(self.protected_namespaces))
|
||||
|
||||
def wrap_namespace(self,
|
||||
namespace: str,
|
||||
parse_keys: bool = True
|
||||
) -> NamespaceWrapper:
|
||||
if self.server.is_running():
|
||||
raise self.server.error(
|
||||
"Cannot wrap a namespace while the "
|
||||
"server is running")
|
||||
if namespace not in self.namespaces:
|
||||
raise self.server.error(
|
||||
f"Namespace '{namespace}' not found", 404)
|
||||
return NamespaceWrapper(namespace, self, parse_keys)
|
||||
|
||||
def _get_db(self, namespace: str) -> object:
|
||||
if namespace not in self.namespaces:
|
||||
raise self.server.error(f"Namespace '{namespace}' not found", 404)
|
||||
return self.namespaces[namespace]
|
||||
|
||||
def _process_key(self, key: Union[List[str], str]) -> List[str]:
|
||||
try:
|
||||
key_list = key if isinstance(key, list) else key.split('.')
|
||||
except Exception:
|
||||
key_list = []
|
||||
if not key_list or "" in key_list:
|
||||
raise self.server.error(f"Invalid Key Format: '{key}'")
|
||||
return key_list
|
||||
|
||||
def _insert_record(self, namespace: str, key: str, val: DBType) -> bool:
|
||||
db = self._get_db(namespace)
|
||||
if val is None:
|
||||
return False
|
||||
with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn:
|
||||
ret = txn.put(key.encode(), self._encode_value(val))
|
||||
return ret
|
||||
|
||||
def _get_record(self,
|
||||
namespace: str,
|
||||
key: str,
|
||||
force: bool = False
|
||||
) -> DBRecord:
|
||||
db = self._get_db(namespace)
|
||||
with self.lmdb_env.begin(buffers=True, db=db) as txn:
|
||||
value = txn.get(key.encode())
|
||||
if value is None:
|
||||
if force:
|
||||
return {}
|
||||
raise self.server.error(
|
||||
f"Key '{key}' in namespace '{namespace}' not found", 404)
|
||||
return self._decode_value(value)
|
||||
|
||||
def _get_namespace(self, namespace: str) -> Dict[str, Any]:
|
||||
db = self._get_db(namespace)
|
||||
result = {}
|
||||
invalid_key_result = None
|
||||
with self.lmdb_env.begin(write=True, buffers=True, db=db) as txn:
|
||||
with txn.cursor() as cursor:
|
||||
has_remaining = cursor.first()
|
||||
while has_remaining:
|
||||
db_key, value = cursor.item()
|
||||
k = bytes(db_key).decode()
|
||||
if not k:
|
||||
invalid_key_result = self._decode_value(value)
|
||||
logging.info(
|
||||
f"Invalid Key '{db_key}' found in namespace "
|
||||
f"'{namespace}', dropping value: "
|
||||
f"{repr(invalid_key_result)}")
|
||||
try:
|
||||
has_remaining = cursor.delete()
|
||||
except Exception:
|
||||
logging.exception("Error Deleting LMDB Key")
|
||||
has_remaining = cursor.next()
|
||||
else:
|
||||
result[k] = self._decode_value(value)
|
||||
has_remaining = cursor.next()
|
||||
return result
|
||||
|
||||
def _encode_value(self, value: DBRecord) -> bytes:
|
||||
try:
|
||||
enc_func = RECORD_ENCODE_FUNCS[type(value)]
|
||||
return enc_func(value)
|
||||
except Exception:
|
||||
raise self.server.error(
|
||||
f"Error encoding val: {value}, type: {type(value)}")
|
||||
|
||||
def _decode_value(self, bvalue: bytes) -> DBRecord:
|
||||
fmt = bvalue[0]
|
||||
try:
|
||||
decode_func = RECORD_DECODE_FUNCS[fmt]
|
||||
return decode_func(bvalue)
|
||||
except Exception:
|
||||
val = bytes(bvalue).decode()
|
||||
raise self.server.error(
|
||||
f"Error decoding value {val}, format: {chr(fmt)}")
|
||||
|
||||
async def _handle_list_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, List[str]]:
|
||||
await self.eventloop.run_in_thread(self.thread_lock.acquire)
|
||||
try:
|
||||
ns_list = set(self.namespaces.keys()) - self.forbidden_namespaces
|
||||
finally:
|
||||
self.thread_lock.release()
|
||||
return {'namespaces': list(ns_list)}
|
||||
|
||||
async def _handle_item_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
action = web_request.get_action()
|
||||
namespace = web_request.get_str("namespace")
|
||||
if namespace in self.forbidden_namespaces:
|
||||
raise self.server.error(
|
||||
f"Read/Write access to namespace '{namespace}'"
|
||||
" is forbidden", 403)
|
||||
key: Any
|
||||
valid_types: Tuple[type, ...]
|
||||
if action != "GET":
|
||||
if namespace in self.protected_namespaces:
|
||||
raise self.server.error(
|
||||
f"Write access to namespace '{namespace}'"
|
||||
" is forbidden", 403)
|
||||
key = web_request.get("key")
|
||||
valid_types = (list, str)
|
||||
else:
|
||||
key = web_request.get("key", None)
|
||||
valid_types = (list, str, type(None))
|
||||
if not isinstance(key, valid_types):
|
||||
raise self.server.error(
|
||||
"Value for argument 'key' is an invalid type: "
|
||||
f"{type(key).__name__}")
|
||||
if action == "GET":
|
||||
val = await self.get_item(namespace, key)
|
||||
elif action == "POST":
|
||||
val = web_request.get("value")
|
||||
await self.insert_item(namespace, key, val)
|
||||
elif action == "DELETE":
|
||||
val = await self.delete_item(namespace, key, drop_empty_db=True)
|
||||
return {'namespace': namespace, 'key': key, 'value': val}
|
||||
|
||||
async def close(self) -> None:
|
||||
# Decrement unsafe shutdown counter
|
||||
unsafe_shutdowns: int = await self.get_item(
|
||||
"moonraker", "database.unsafe_shutdowns", 0)
|
||||
await self.insert_item(
|
||||
"moonraker", "database.unsafe_shutdowns",
|
||||
unsafe_shutdowns - 1)
|
||||
await self.eventloop.run_in_thread(self.thread_lock.acquire)
|
||||
try:
|
||||
# log db stats
|
||||
msg = ""
|
||||
with self.lmdb_env.begin() as txn:
|
||||
for db_name, db in self.namespaces.items():
|
||||
stats = txn.stat(db)
|
||||
msg += f"\n{db_name}:\n"
|
||||
msg += "\n".join([f"{k}: {v}" for k, v in stats.items()])
|
||||
logging.info(f"Database statistics:\n{msg}")
|
||||
self.lmdb_env.sync()
|
||||
self.lmdb_env.close()
|
||||
finally:
|
||||
self.thread_lock.release()
|
||||
|
||||
class NamespaceWrapper:
|
||||
def __init__(self,
|
||||
namespace: str,
|
||||
database: MoonrakerDatabase,
|
||||
parse_keys: bool
|
||||
) -> None:
|
||||
self.namespace = namespace
|
||||
self.db = database
|
||||
self.eventloop = database.eventloop
|
||||
self.server = database.server
|
||||
# If parse keys is true, keys of a string type
|
||||
# will be passed straight to the DB methods.
|
||||
self.parse_keys = parse_keys
|
||||
|
||||
def insert(self,
|
||||
key: Union[List[str], str],
|
||||
value: DBType
|
||||
) -> Awaitable[None]:
|
||||
if isinstance(key, str) and not self.parse_keys:
|
||||
key = [key]
|
||||
return self.db.insert_item(self.namespace, key, value)
|
||||
|
||||
def update_child(self,
|
||||
key: Union[List[str], str],
|
||||
value: DBType
|
||||
) -> Awaitable[None]:
|
||||
if isinstance(key, str) and not self.parse_keys:
|
||||
key = [key]
|
||||
return self.db.update_item(self.namespace, key, value)
|
||||
|
||||
def update(self, value: Mapping[str, DBRecord]) -> Awaitable[None]:
|
||||
return self.db.update_namespace(self.namespace, value)
|
||||
|
||||
def sync(self, value: Mapping[str, DBRecord]) -> Awaitable[None]:
|
||||
return self.db.sync_namespace(self.namespace, value)
|
||||
|
||||
def get(self,
|
||||
key: Union[List[str], str],
|
||||
default: Any = None
|
||||
) -> Future[Any]:
|
||||
if isinstance(key, str) and not self.parse_keys:
|
||||
key = [key]
|
||||
return self.db.get_item(self.namespace, key, default)
|
||||
|
||||
def delete(self, key: Union[List[str], str]) -> Future[Any]:
|
||||
if isinstance(key, str) and not self.parse_keys:
|
||||
key = [key]
|
||||
return self.db.delete_item(self.namespace, key)
|
||||
|
||||
def insert_batch(self, records: Dict[str, Any]) -> Future[None]:
|
||||
return self.db.insert_batch(self.namespace, records)
|
||||
|
||||
def move_batch(self,
|
||||
source_keys: List[str],
|
||||
dest_keys: List[str]
|
||||
) -> Future[None]:
|
||||
return self.db.move_batch(self.namespace, source_keys, dest_keys)
|
||||
|
||||
def delete_batch(self, keys: List[str]) -> Future[Dict[str, Any]]:
|
||||
return self.db.delete_batch(self.namespace, keys)
|
||||
|
||||
def get_batch(self, keys: List[str]) -> Future[Dict[str, Any]]:
|
||||
return self.db.get_batch(self.namespace, keys)
|
||||
|
||||
def length(self) -> Future[int]:
|
||||
return self.db.ns_length(self.namespace)
|
||||
|
||||
def as_dict(self) -> Dict[str, Any]:
|
||||
self._check_sync_method("as_dict")
|
||||
return self.db._get_namespace(self.namespace)
|
||||
|
||||
def __getitem__(self, key: Union[List[str], str]) -> Future[Any]:
|
||||
return self.get(key, default=SENTINEL)
|
||||
|
||||
def __setitem__(self,
|
||||
key: Union[List[str], str],
|
||||
value: DBType
|
||||
) -> None:
|
||||
self.insert(key, value)
|
||||
|
||||
def __delitem__(self, key: Union[List[str], str]):
|
||||
self.delete(key)
|
||||
|
||||
def __contains__(self, key: Union[List[str], str]) -> bool:
|
||||
self._check_sync_method("__contains__")
|
||||
if isinstance(key, str) and not self.parse_keys:
|
||||
key = [key]
|
||||
return self.db.ns_contains(self.namespace, key).result()
|
||||
|
||||
def contains(self, key: Union[List[str], str]) -> Future[bool]:
|
||||
if isinstance(key, str) and not self.parse_keys:
|
||||
key = [key]
|
||||
return self.db.ns_contains(self.namespace, key)
|
||||
|
||||
def keys(self) -> Future[List[str]]:
|
||||
return self.db.ns_keys(self.namespace)
|
||||
|
||||
def values(self) -> Future[List[Any]]:
|
||||
return self.db.ns_values(self.namespace)
|
||||
|
||||
def items(self) -> Future[List[Tuple[str, Any]]]:
|
||||
return self.db.ns_items(self.namespace)
|
||||
|
||||
def pop(self,
|
||||
key: Union[List[str], str],
|
||||
default: Any = SENTINEL
|
||||
) -> Union[Future[Any], Task[Any]]:
|
||||
if not self.server.is_running():
|
||||
try:
|
||||
val = self.delete(key).result()
|
||||
except Exception:
|
||||
if isinstance(default, SentinelClass):
|
||||
raise
|
||||
val = default
|
||||
fut = self.eventloop.create_future()
|
||||
fut.set_result(val)
|
||||
return fut
|
||||
|
||||
async def _do_pop() -> Any:
|
||||
try:
|
||||
val = await self.delete(key)
|
||||
except Exception:
|
||||
if isinstance(default, SentinelClass):
|
||||
raise
|
||||
val = default
|
||||
return val
|
||||
return self.eventloop.create_task(_do_pop())
|
||||
|
||||
def clear(self) -> Awaitable[None]:
|
||||
return self.db.clear_namespace(self.namespace)
|
||||
|
||||
def _check_sync_method(self, func_name: str) -> None:
|
||||
if self.server.is_running():
|
||||
raise self.server.error(
|
||||
f"Cannot call method {func_name} while "
|
||||
"the eventloop is running")
|
||||
|
||||
def load_component(config: ConfigHelper) -> MoonrakerDatabase:
|
||||
return MoonrakerDatabase(config)
|
||||
130
moonraker/components/dbus_manager.py
Normal file
130
moonraker/components/dbus_manager.py
Normal file
@@ -0,0 +1,130 @@
|
||||
# DBus Connection Management
|
||||
#
|
||||
# Copyright (C) 2022 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
from __future__ import annotations
|
||||
import os
|
||||
import pathlib
|
||||
import logging
|
||||
import dbus_next
|
||||
from dbus_next.aio import MessageBus, ProxyInterface
|
||||
from dbus_next.constants import BusType
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
List,
|
||||
Optional,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
|
||||
DOC_URL = (
|
||||
"https://moonraker.readthedocs.io/en/latest/"
|
||||
"installation/#policykit-permissions"
|
||||
)
|
||||
|
||||
class DbusManager:
|
||||
Variant = dbus_next.Variant
|
||||
DbusError = dbus_next.errors.DBusError
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.bus: Optional[MessageBus] = None
|
||||
self.polkit: Optional[ProxyInterface] = None
|
||||
self.warned: bool = False
|
||||
proc_data = pathlib.Path(f"/proc/self/stat").read_text()
|
||||
start_clk_ticks = int(proc_data.split()[21])
|
||||
self.polkit_subject = [
|
||||
"unix-process",
|
||||
{
|
||||
"pid": dbus_next.Variant("u", os.getpid()),
|
||||
"start-time": dbus_next.Variant("t", start_clk_ticks)
|
||||
}
|
||||
]
|
||||
|
||||
def is_connected(self) -> bool:
|
||||
return self.bus is not None and self.bus.connected
|
||||
|
||||
async def component_init(self) -> None:
|
||||
try:
|
||||
self.bus = MessageBus(bus_type=BusType.SYSTEM)
|
||||
await self.bus.connect()
|
||||
except Exception:
|
||||
logging.info("Unable to Connect to D-Bus")
|
||||
return
|
||||
# Make sure that all required actions are register
|
||||
try:
|
||||
self.polkit = await self.get_interface(
|
||||
"org.freedesktop.PolicyKit1",
|
||||
"/org/freedesktop/PolicyKit1/Authority",
|
||||
"org.freedesktop.PolicyKit1.Authority")
|
||||
except self.DbusError:
|
||||
self.server.add_warning(
|
||||
"Unable to find DBus PolKit Interface, this suggests PolKit "
|
||||
"is not installed on your OS.")
|
||||
|
||||
async def check_permission(self,
|
||||
action: str,
|
||||
err_msg: str = ""
|
||||
) -> bool:
|
||||
if self.polkit is None:
|
||||
return False
|
||||
try:
|
||||
ret = await self.polkit.call_check_authorization( # type: ignore
|
||||
self.polkit_subject, action, {}, 0, "")
|
||||
except Exception as e:
|
||||
self._check_warned()
|
||||
self.server.add_warning(
|
||||
f"Error checking authorization for action [{action}]: {e}. "
|
||||
"This suggests that a dependency is not installed or "
|
||||
f"up to date. {err_msg}.")
|
||||
return False
|
||||
if not ret[0]:
|
||||
self._check_warned()
|
||||
self.server.add_warning(
|
||||
"Moonraker not authorized for PolicyKit action: "
|
||||
f"[{action}], {err_msg}")
|
||||
return ret[0]
|
||||
|
||||
def _check_warned(self):
|
||||
if not self.warned:
|
||||
self.server.add_warning(
|
||||
f"PolKit warnings detected. See {DOC_URL} for instructions "
|
||||
"on how to resolve.")
|
||||
self.warned = True
|
||||
|
||||
async def get_interface(self,
|
||||
bus_name: str,
|
||||
bus_path: str,
|
||||
interface_name: str
|
||||
) -> ProxyInterface:
|
||||
ret = await self.get_interfaces(bus_name, bus_path,
|
||||
[interface_name])
|
||||
return ret[0]
|
||||
|
||||
async def get_interfaces(self,
|
||||
bus_name: str,
|
||||
bus_path: str,
|
||||
interface_names: List[str]
|
||||
) -> List[ProxyInterface]:
|
||||
if self.bus is None:
|
||||
raise self.server.error("Bus not avaialable")
|
||||
interfaces: List[ProxyInterface] = []
|
||||
introspection = await self.bus.introspect(bus_name, bus_path)
|
||||
proxy_obj = self.bus.get_proxy_object(bus_name, bus_path,
|
||||
introspection)
|
||||
for ifname in interface_names:
|
||||
intf = proxy_obj.get_interface(ifname)
|
||||
interfaces.append(intf)
|
||||
return interfaces
|
||||
|
||||
async def close(self):
|
||||
if self.bus is not None and self.bus.connected:
|
||||
self.bus.disconnect()
|
||||
await self.bus.wait_for_disconnect()
|
||||
|
||||
|
||||
def load_component(config: ConfigHelper) -> DbusManager:
|
||||
return DbusManager(config)
|
||||
107
moonraker/components/extensions.py
Normal file
107
moonraker/components/extensions.py
Normal file
@@ -0,0 +1,107 @@
|
||||
# Moonraker extension management
|
||||
#
|
||||
# Copyright (C) 2022 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
from __future__ import annotations
|
||||
from websockets import WebSocket
|
||||
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Union,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest
|
||||
|
||||
class ExtensionManager:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.agents: Dict[str, WebSocket] = {}
|
||||
self.server.register_endpoint(
|
||||
"/connection/send_event", ["POST"], self._handle_agent_event,
|
||||
transports=["websocket"]
|
||||
)
|
||||
self.server.register_endpoint(
|
||||
"/server/extensions/list", ["GET"], self._handle_list_extensions
|
||||
)
|
||||
self.server.register_endpoint(
|
||||
"/server/extensions/request", ["POST"], self._handle_call_agent
|
||||
)
|
||||
|
||||
def register_agent(self, connection: WebSocket) -> None:
|
||||
data = connection.client_data
|
||||
name = data["name"]
|
||||
client_type = data["type"]
|
||||
if client_type != "agent":
|
||||
raise self.server.error(
|
||||
f"Cannot register client type '{client_type}' as an agent"
|
||||
)
|
||||
if name in self.agents:
|
||||
raise self.server.error(
|
||||
f"Agent '{name}' already registered and connected'"
|
||||
)
|
||||
self.agents[name] = connection
|
||||
data = connection.client_data
|
||||
evt: Dict[str, Any] = {
|
||||
"agent": name, "event": "connected", "data": data
|
||||
}
|
||||
connection.send_notification("agent_event", [evt])
|
||||
|
||||
def remove_agent(self, connection: WebSocket) -> None:
|
||||
name = connection.client_data["name"]
|
||||
if name in self.agents:
|
||||
del self.agents[name]
|
||||
evt: Dict[str, Any] = {"agent": name, "event": "disconnected"}
|
||||
connection.send_notification("agent_event", [evt])
|
||||
|
||||
async def _handle_agent_event(self, web_request: WebRequest) -> str:
|
||||
conn = web_request.get_connection()
|
||||
if not isinstance(conn, WebSocket):
|
||||
raise self.server.error("No connection detected")
|
||||
if conn.client_data["type"] != "agent":
|
||||
raise self.server.error(
|
||||
"Only connections of the 'agent' type can send events"
|
||||
)
|
||||
name = conn.client_data["name"]
|
||||
evt_name = web_request.get_str("event")
|
||||
if evt_name in ["connected", "disconnected"]:
|
||||
raise self.server.error(f"Event '{evt_name}' is reserved")
|
||||
data: Optional[Union[List, Dict[str, Any]]]
|
||||
data = web_request.get("data", None)
|
||||
evt: Dict[str, Any] = {"agent": name, "event": evt_name}
|
||||
if data is not None:
|
||||
evt["data"] = data
|
||||
conn.send_notification("agent_event", [evt])
|
||||
return "ok"
|
||||
|
||||
async def _handle_list_extensions(
|
||||
self, web_request: WebRequest
|
||||
) -> Dict[str, List[Dict[str, Any]]]:
|
||||
agents: List[Dict[str, Any]]
|
||||
agents = [agt.client_data for agt in self.agents.values()]
|
||||
return {"agents": agents}
|
||||
|
||||
async def _handle_call_agent(self, web_request: WebRequest) -> Any:
|
||||
agent = web_request.get_str("agent")
|
||||
method: str = web_request.get_str("method")
|
||||
args: Optional[Union[List, Dict[str, Any]]]
|
||||
args = web_request.get("arguments", None)
|
||||
if args is not None and not isinstance(args, (list, dict)):
|
||||
raise self.server.error(
|
||||
"The 'arguments' field must contain an object or a list"
|
||||
)
|
||||
if agent not in self.agents:
|
||||
raise self.server.error(f"Agent {agent} not connected")
|
||||
conn = self.agents[agent]
|
||||
return await conn.call_method(method, args)
|
||||
|
||||
def load_component(config: ConfigHelper) -> ExtensionManager:
|
||||
return ExtensionManager(config)
|
||||
15
moonraker/components/file_manager/__init__.py
Normal file
15
moonraker/components/file_manager/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# Package definition for the file_manager
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
from . import file_manager as fm
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
|
||||
def load_component(config: ConfigHelper) -> fm.FileManager:
|
||||
return fm.load_component(config)
|
||||
1804
moonraker/components/file_manager/file_manager.py
Normal file
1804
moonraker/components/file_manager/file_manager.py
Normal file
File diff suppressed because it is too large
Load Diff
1168
moonraker/components/file_manager/metadata.py
Normal file
1168
moonraker/components/file_manager/metadata.py
Normal file
File diff suppressed because it is too large
Load Diff
1312
moonraker/components/file_manager/update_manager/update_manager.py
Normal file
1312
moonraker/components/file_manager/update_manager/update_manager.py
Normal file
File diff suppressed because it is too large
Load Diff
280
moonraker/components/gpio.py
Normal file
280
moonraker/components/gpio.py
Normal file
@@ -0,0 +1,280 @@
|
||||
# GPIO Factory helper
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
from __future__ import annotations
|
||||
import logging
|
||||
from utils import load_system_module
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Optional
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from eventloop import EventLoop
|
||||
GPIO_CALLBACK = Callable[[float, float, int], Optional[Awaitable[None]]]
|
||||
|
||||
class GpioFactory:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.gpiod: Any = load_system_module("gpiod")
|
||||
GpioEvent.init_constants(self.gpiod)
|
||||
self.chips: Dict[str, Any] = {}
|
||||
self.reserved_gpios: Dict[str, GpioBase] = {}
|
||||
version: str = self.gpiod.version_string()
|
||||
self.gpiod_version = tuple(int(v) for v in version.split('.'))
|
||||
self.server.add_log_rollover_item(
|
||||
"gpiod_version", f"libgpiod version: {version}")
|
||||
|
||||
def _get_gpio_chip(self, chip_name) -> Any:
|
||||
if chip_name in self.chips:
|
||||
return self.chips[chip_name]
|
||||
chip = self.gpiod.Chip(chip_name, self.gpiod.Chip.OPEN_BY_NAME)
|
||||
self.chips[chip_name] = chip
|
||||
return chip
|
||||
|
||||
def setup_gpio_out(self,
|
||||
pin_name: str,
|
||||
initial_value: int = 0
|
||||
) -> GpioOutputPin:
|
||||
initial_value = int(not not initial_value)
|
||||
pparams = self._parse_pin(pin_name)
|
||||
pparams['initial_value'] = initial_value
|
||||
line = self._request_gpio(pparams)
|
||||
try:
|
||||
gpio_out = GpioOutputPin(line, pparams)
|
||||
except Exception:
|
||||
logging.exception("Error Instantiating GpioOutputPin")
|
||||
line.release()
|
||||
raise
|
||||
full_name = pparams['full_name']
|
||||
self.reserved_gpios[full_name] = gpio_out
|
||||
return gpio_out
|
||||
|
||||
def register_gpio_event(self,
|
||||
pin_name: str,
|
||||
callback: GPIO_CALLBACK
|
||||
) -> GpioEvent:
|
||||
pin_params = self._parse_pin(pin_name, type="event")
|
||||
line = self._request_gpio(pin_params)
|
||||
event_loop = self.server.get_event_loop()
|
||||
try:
|
||||
gpio_event = GpioEvent(event_loop, line, pin_params, callback)
|
||||
except Exception:
|
||||
logging.exception("Error Instantiating GpioEvent")
|
||||
line.release()
|
||||
raise
|
||||
full_name = pin_params['full_name']
|
||||
self.reserved_gpios[full_name] = gpio_event
|
||||
return gpio_event
|
||||
|
||||
def _request_gpio(self, pin_params: Dict[str, Any]) -> Any:
|
||||
full_name = pin_params['full_name']
|
||||
if full_name in self.reserved_gpios:
|
||||
raise self.server.error(f"GPIO {full_name} already reserved")
|
||||
try:
|
||||
chip = self._get_gpio_chip(pin_params['chip_id'])
|
||||
line = chip.get_line(pin_params['pin_id'])
|
||||
args: Dict[str, Any] = {
|
||||
'consumer': "moonraker",
|
||||
'type': pin_params['request_type']
|
||||
}
|
||||
if 'flags' in pin_params:
|
||||
args['flags'] = pin_params['flags']
|
||||
if 'initial_value' in pin_params:
|
||||
if self.gpiod_version < (1, 3):
|
||||
args['default_vals'] = [pin_params['initial_value']]
|
||||
else:
|
||||
args['default_val'] = pin_params['initial_value']
|
||||
line.request(**args)
|
||||
except Exception:
|
||||
logging.exception(
|
||||
f"Unable to init {full_name}. Make sure the gpio is not in "
|
||||
"use by another program or exported by sysfs.")
|
||||
raise
|
||||
return line
|
||||
|
||||
def _parse_pin(self,
|
||||
pin_name: str,
|
||||
type: str = "out"
|
||||
) -> Dict[str, Any]:
|
||||
params: Dict[str, Any] = {
|
||||
'orig': pin_name,
|
||||
'invert': False,
|
||||
}
|
||||
pin = pin_name
|
||||
if type == "event":
|
||||
params['request_type'] = self.gpiod.LINE_REQ_EV_BOTH_EDGES
|
||||
flag: str = "disable"
|
||||
if pin[0] == "^":
|
||||
pin = pin[1:]
|
||||
flag = "pullup"
|
||||
elif pin[0] == "~":
|
||||
pin = pin[1:]
|
||||
flag = "pulldown"
|
||||
if self.gpiod_version >= (1, 5):
|
||||
flag_to_enum = {
|
||||
"disable": self.gpiod.LINE_REQ_FLAG_BIAS_DISABLE,
|
||||
"pullup": self.gpiod.LINE_REQ_FLAG_BIAS_PULL_UP,
|
||||
"pulldown": self.gpiod.LINE_REQ_FLAG_BIAS_PULL_DOWN
|
||||
}
|
||||
params['flags'] = flag_to_enum[flag]
|
||||
elif flag != "disable":
|
||||
raise self.server.error(
|
||||
f"Flag {flag} configured for event GPIO '{pin_name}'"
|
||||
" requires libgpiod version 1.5 or later. "
|
||||
f"Current Version: {self.gpiod.version_string()}")
|
||||
elif type == "out":
|
||||
params['request_type'] = self.gpiod.LINE_REQ_DIR_OUT
|
||||
if pin[0] == "!":
|
||||
pin = pin[1:]
|
||||
params['invert'] = True
|
||||
if 'flags' in params:
|
||||
params['flags'] |= self.gpiod.LINE_REQ_FLAG_ACTIVE_LOW
|
||||
else:
|
||||
params['flags'] = self.gpiod.LINE_REQ_FLAG_ACTIVE_LOW
|
||||
chip_id: str = "gpiochip0"
|
||||
pin_parts = pin.split("/")
|
||||
if len(pin_parts) == 2:
|
||||
chip_id, pin = pin_parts
|
||||
elif len(pin_parts) == 1:
|
||||
pin = pin_parts[0]
|
||||
# Verify pin
|
||||
if not chip_id.startswith("gpiochip") or \
|
||||
not chip_id[-1].isdigit() or \
|
||||
not pin.startswith("gpio") or \
|
||||
not pin[4:].isdigit():
|
||||
raise self.server.error(
|
||||
f"Invalid Gpio Pin: {pin_name}")
|
||||
pin_id = int(pin[4:])
|
||||
params['pin_id'] = pin_id
|
||||
params['chip_id'] = chip_id
|
||||
params['full_name'] = f"{chip_id}:{pin}"
|
||||
return params
|
||||
|
||||
def close(self) -> None:
|
||||
for line in self.reserved_gpios.values():
|
||||
line.release()
|
||||
for chip in self.chips.values():
|
||||
chip.close()
|
||||
|
||||
class GpioBase:
|
||||
def __init__(self,
|
||||
line: Any,
|
||||
pin_params: Dict[str, Any]
|
||||
) -> None:
|
||||
self.orig: str = pin_params['orig']
|
||||
self.name: str = pin_params['full_name']
|
||||
self.inverted: bool = pin_params['invert']
|
||||
self.line: Any = line
|
||||
self.value: int = pin_params.get('initial_value', 0)
|
||||
|
||||
def release(self) -> None:
|
||||
self.line.release()
|
||||
|
||||
def is_inverted(self) -> bool:
|
||||
return self.inverted
|
||||
|
||||
def get_value(self) -> int:
|
||||
return self.value
|
||||
|
||||
def get_name(self) -> str:
|
||||
return self.name
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.orig
|
||||
|
||||
class GpioOutputPin(GpioBase):
|
||||
def write(self, value: int) -> None:
|
||||
self.value = int(not not value)
|
||||
self.line.set_value(self.value)
|
||||
|
||||
|
||||
MAX_ERRORS = 20
|
||||
|
||||
class GpioEvent(GpioBase):
|
||||
EVENT_FALLING_EDGE = 0
|
||||
EVENT_RISING_EDGE = 1
|
||||
def __init__(self,
|
||||
event_loop: EventLoop,
|
||||
line: Any,
|
||||
pin_params: Dict[str, Any],
|
||||
callback: GPIO_CALLBACK
|
||||
) -> None:
|
||||
super().__init__(line, pin_params)
|
||||
self.event_loop = event_loop
|
||||
self.fd = line.event_get_fd()
|
||||
self.callback = callback
|
||||
self.on_error: Optional[Callable[[str], None]] = None
|
||||
self.min_evt_time = 0.
|
||||
self.last_event_time = 0.
|
||||
self.error_count = 0
|
||||
self.started = False
|
||||
|
||||
@classmethod
|
||||
def init_constants(cls, gpiod: Any) -> None:
|
||||
cls.EVENT_RISING_EDGE = gpiod.LineEvent.RISING_EDGE
|
||||
cls.EVENT_FALLING_EDGE = gpiod.LineEvent.FALLING_EDGE
|
||||
|
||||
def setup_debounce(self,
|
||||
min_evt_time: float,
|
||||
err_callback: Optional[Callable[[str], None]]
|
||||
) -> None:
|
||||
self.min_evt_time = max(min_evt_time, 0.)
|
||||
self.on_error = err_callback
|
||||
|
||||
def start(self) -> None:
|
||||
if not self.started:
|
||||
self.value = self.line.get_value()
|
||||
self.last_event_time = self.event_loop.get_loop_time()
|
||||
self.event_loop.add_reader(self.fd, self._on_event_trigger)
|
||||
self.started = True
|
||||
logging.debug(f"GPIO {self.name}: Listening for events, "
|
||||
f"current state: {self.value}")
|
||||
|
||||
def stop(self) -> None:
|
||||
if self.started:
|
||||
self.event_loop.remove_reader(self.fd)
|
||||
self.started = False
|
||||
|
||||
def release(self) -> None:
|
||||
self.stop()
|
||||
self.line.release()
|
||||
|
||||
def _on_event_trigger(self) -> None:
|
||||
evt = self.line.event_read()
|
||||
last_val = self.value
|
||||
if evt.type == self.EVENT_RISING_EDGE:
|
||||
self.value = 1
|
||||
elif evt.type == self.EVENT_FALLING_EDGE:
|
||||
self.value = 0
|
||||
eventtime = self.event_loop.get_loop_time()
|
||||
evt_duration = eventtime - self.last_event_time
|
||||
if last_val == self.value or evt_duration < self.min_evt_time:
|
||||
self._increment_error()
|
||||
return
|
||||
self.last_event_time = eventtime
|
||||
self.error_count = 0
|
||||
ret = self.callback(eventtime, evt_duration, self.value)
|
||||
if ret is not None:
|
||||
self.event_loop.create_task(ret)
|
||||
|
||||
def _increment_error(self) -> None:
|
||||
self.error_count += 1
|
||||
if self.error_count >= MAX_ERRORS:
|
||||
self.stop()
|
||||
if self.on_error is not None:
|
||||
self.on_error("Too Many Consecutive Errors, "
|
||||
f"GPIO Event Disabled on {self.name}")
|
||||
|
||||
|
||||
def load_component(config: ConfigHelper) -> GpioFactory:
|
||||
return GpioFactory(config)
|
||||
401
moonraker/components/history.py
Normal file
401
moonraker/components/history.py
Normal file
@@ -0,0 +1,401 @@
|
||||
# History cache for printer jobs
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import time
|
||||
import logging
|
||||
from asyncio import Lock
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Union,
|
||||
Optional,
|
||||
Dict,
|
||||
List,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest
|
||||
from .database import MoonrakerDatabase as DBComp
|
||||
from .job_state import JobState
|
||||
from .file_manager.file_manager import FileManager
|
||||
|
||||
HIST_NAMESPACE = "history"
|
||||
MAX_JOBS = 10000
|
||||
|
||||
class History:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.file_manager: FileManager = self.server.lookup_component(
|
||||
'file_manager')
|
||||
self.request_lock = Lock()
|
||||
database: DBComp = self.server.lookup_component("database")
|
||||
self.job_totals: Dict[str, float] = database.get_item(
|
||||
"moonraker", "history.job_totals",
|
||||
{
|
||||
'total_jobs': 0,
|
||||
'total_time': 0.,
|
||||
'total_print_time': 0.,
|
||||
'total_filament_used': 0.,
|
||||
'longest_job': 0.,
|
||||
'longest_print': 0.
|
||||
}).result()
|
||||
|
||||
self.server.register_event_handler(
|
||||
"server:klippy_disconnect", self._handle_disconnect)
|
||||
self.server.register_event_handler(
|
||||
"server:klippy_shutdown", self._handle_shutdown)
|
||||
self.server.register_event_handler(
|
||||
"job_state:started", self._on_job_started)
|
||||
self.server.register_event_handler(
|
||||
"job_state:complete", self._on_job_complete)
|
||||
self.server.register_event_handler(
|
||||
"job_state:cancelled", self._on_job_cancelled)
|
||||
self.server.register_event_handler(
|
||||
"job_state:standby", self._on_job_standby)
|
||||
self.server.register_event_handler(
|
||||
"job_state:error", self._on_job_error)
|
||||
self.server.register_notification("history:history_changed")
|
||||
|
||||
self.server.register_endpoint(
|
||||
"/server/history/job", ['GET', 'DELETE'], self._handle_job_request)
|
||||
self.server.register_endpoint(
|
||||
"/server/history/list", ['GET'], self._handle_jobs_list)
|
||||
self.server.register_endpoint(
|
||||
"/server/history/totals", ['GET'], self._handle_job_totals)
|
||||
self.server.register_endpoint(
|
||||
"/server/history/reset_totals", ['POST'],
|
||||
self._handle_job_total_reset)
|
||||
|
||||
database.register_local_namespace(HIST_NAMESPACE)
|
||||
self.history_ns = database.wrap_namespace(HIST_NAMESPACE,
|
||||
parse_keys=False)
|
||||
|
||||
self.current_job: Optional[PrinterJob] = None
|
||||
self.current_job_id: Optional[str] = None
|
||||
self.next_job_id: int = 0
|
||||
self.cached_job_ids = self.history_ns.keys().result()
|
||||
if self.cached_job_ids:
|
||||
self.next_job_id = int(self.cached_job_ids[-1], 16) + 1
|
||||
|
||||
async def _handle_job_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
async with self.request_lock:
|
||||
action = web_request.get_action()
|
||||
if action == "GET":
|
||||
job_id = web_request.get_str("uid")
|
||||
if job_id not in self.cached_job_ids:
|
||||
raise self.server.error(f"Invalid job uid: {job_id}", 404)
|
||||
job = await self.history_ns[job_id]
|
||||
return {"job": self._prep_requested_job(job, job_id)}
|
||||
if action == "DELETE":
|
||||
all = web_request.get_boolean("all", False)
|
||||
if all:
|
||||
deljobs = self.cached_job_ids
|
||||
self.history_ns.clear()
|
||||
self.cached_job_ids = []
|
||||
self.next_job_id = 0
|
||||
return {'deleted_jobs': deljobs}
|
||||
|
||||
job_id = web_request.get_str("uid")
|
||||
if job_id not in self.cached_job_ids:
|
||||
raise self.server.error(f"Invalid job uid: {job_id}", 404)
|
||||
|
||||
self.delete_job(job_id)
|
||||
return {'deleted_jobs': [job_id]}
|
||||
raise self.server.error("Invalid Request Method")
|
||||
|
||||
async def _handle_jobs_list(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
async with self.request_lock:
|
||||
i = 0
|
||||
count = 0
|
||||
end_num = len(self.cached_job_ids)
|
||||
jobs: List[Dict[str, Any]] = []
|
||||
start_num = 0
|
||||
|
||||
before = web_request.get_float("before", -1)
|
||||
since = web_request.get_float("since", -1)
|
||||
limit = web_request.get_int("limit", 50)
|
||||
start = web_request.get_int("start", 0)
|
||||
order = web_request.get_str("order", "desc")
|
||||
|
||||
if order not in ["asc", "desc"]:
|
||||
raise self.server.error(f"Invalid `order` value: {order}", 400)
|
||||
|
||||
reverse_order = (order == "desc")
|
||||
|
||||
# cached jobs is asc order, find lower and upper boundary
|
||||
if since != -1:
|
||||
while start_num < end_num:
|
||||
job_id = self.cached_job_ids[start_num]
|
||||
job: Dict[str, Any] = await self.history_ns[job_id]
|
||||
if job['start_time'] > since:
|
||||
break
|
||||
start_num += 1
|
||||
|
||||
if before != -1:
|
||||
while end_num > 0:
|
||||
job_id = self.cached_job_ids[end_num-1]
|
||||
job = await self.history_ns[job_id]
|
||||
if job['end_time'] < before:
|
||||
break
|
||||
end_num -= 1
|
||||
|
||||
if start_num >= end_num or end_num == 0:
|
||||
return {"count": 0, "jobs": []}
|
||||
|
||||
i = start
|
||||
count = end_num - start_num
|
||||
|
||||
if limit == 0:
|
||||
limit = MAX_JOBS
|
||||
|
||||
while i < count and len(jobs) < limit:
|
||||
if reverse_order:
|
||||
job_id = self.cached_job_ids[end_num - i - 1]
|
||||
else:
|
||||
job_id = self.cached_job_ids[start_num + i]
|
||||
job = await self.history_ns[job_id]
|
||||
jobs.append(self._prep_requested_job(job, job_id))
|
||||
i += 1
|
||||
|
||||
return {"count": count, "jobs": jobs}
|
||||
|
||||
async def _handle_job_totals(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Dict[str, float]]:
|
||||
return {'job_totals': self.job_totals}
|
||||
|
||||
async def _handle_job_total_reset(self,
|
||||
web_request: WebRequest,
|
||||
) -> Dict[str, Dict[str, float]]:
|
||||
if self.current_job is not None:
|
||||
raise self.server.error(
|
||||
"Job in progress, cannot reset totals")
|
||||
last_totals = dict(self.job_totals)
|
||||
self.job_totals = {
|
||||
'total_jobs': 0,
|
||||
'total_time': 0.,
|
||||
'total_print_time': 0.,
|
||||
'total_filament_used': 0.,
|
||||
'longest_job': 0.,
|
||||
'longest_print': 0.
|
||||
}
|
||||
database: DBComp = self.server.lookup_component("database")
|
||||
await database.insert_item(
|
||||
"moonraker", "history.job_totals", self.job_totals)
|
||||
return {'last_totals': last_totals}
|
||||
|
||||
def _on_job_started(self,
|
||||
prev_stats: Dict[str, Any],
|
||||
new_stats: Dict[str, Any]
|
||||
) -> None:
|
||||
if self.current_job is not None:
|
||||
# Finish with the previous state
|
||||
self.finish_job("cancelled", prev_stats)
|
||||
self.add_job(PrinterJob(new_stats))
|
||||
|
||||
def _on_job_complete(self,
|
||||
prev_stats: Dict[str, Any],
|
||||
new_stats: Dict[str, Any]
|
||||
) -> None:
|
||||
self.finish_job("completed", new_stats)
|
||||
|
||||
def _on_job_cancelled(self,
|
||||
prev_stats: Dict[str, Any],
|
||||
new_stats: Dict[str, Any]
|
||||
) -> None:
|
||||
self.finish_job("cancelled", new_stats)
|
||||
|
||||
def _on_job_error(self,
|
||||
prev_stats: Dict[str, Any],
|
||||
new_stats: Dict[str, Any]
|
||||
) -> None:
|
||||
self.finish_job("error", new_stats)
|
||||
|
||||
def _on_job_standby(self,
|
||||
prev_stats: Dict[str, Any],
|
||||
new_stats: Dict[str, Any]
|
||||
) -> None:
|
||||
# Backward compatibility with
|
||||
# `CLEAR_PAUSE/SDCARD_RESET_FILE` workflow
|
||||
self.finish_job("cancelled", prev_stats)
|
||||
|
||||
def _handle_shutdown(self) -> None:
|
||||
jstate: JobState = self.server.lookup_component("job_state")
|
||||
last_ps = jstate.get_last_stats()
|
||||
self.finish_job("klippy_shutdown", last_ps)
|
||||
|
||||
def _handle_disconnect(self) -> None:
|
||||
jstate: JobState = self.server.lookup_component("job_state")
|
||||
last_ps = jstate.get_last_stats()
|
||||
self.finish_job("klippy_disconnect", last_ps)
|
||||
|
||||
def add_job(self, job: PrinterJob) -> None:
|
||||
if len(self.cached_job_ids) >= MAX_JOBS:
|
||||
self.delete_job(self.cached_job_ids[0])
|
||||
job_id = f"{self.next_job_id:06X}"
|
||||
self.current_job = job
|
||||
self.current_job_id = job_id
|
||||
self.grab_job_metadata()
|
||||
self.history_ns[job_id] = job.get_stats()
|
||||
self.cached_job_ids.append(job_id)
|
||||
self.next_job_id += 1
|
||||
logging.debug(
|
||||
f"History Job Added - Id: {job_id}, File: {job.filename}"
|
||||
)
|
||||
self.send_history_event("added")
|
||||
|
||||
def delete_job(self, job_id: Union[int, str]) -> None:
|
||||
if isinstance(job_id, int):
|
||||
job_id = f"{job_id:06X}"
|
||||
|
||||
if job_id in self.cached_job_ids:
|
||||
del self.history_ns[job_id]
|
||||
self.cached_job_ids.remove(job_id)
|
||||
|
||||
def finish_job(self, status: str, pstats: Dict[str, Any]) -> None:
|
||||
if self.current_job is None:
|
||||
return
|
||||
cj = self.current_job
|
||||
if (
|
||||
pstats.get('filename') != cj.get('filename') or
|
||||
pstats.get('total_duration', 0.) < cj.get('total_duration')
|
||||
):
|
||||
# Print stats have been reset, do not update this job with them
|
||||
pstats = {}
|
||||
|
||||
self.current_job.finish(status, pstats)
|
||||
# Regrab metadata incase metadata wasn't parsed yet due to file upload
|
||||
self.grab_job_metadata()
|
||||
self.save_current_job()
|
||||
self._update_job_totals()
|
||||
logging.debug(
|
||||
f"History Job Finished - Id: {self.current_job_id}, "
|
||||
f"File: {self.current_job.filename}, "
|
||||
f"Status: {status}"
|
||||
)
|
||||
self.send_history_event("finished")
|
||||
self.current_job = None
|
||||
self.current_job_id = None
|
||||
|
||||
async def get_job(self,
|
||||
job_id: Union[int, str]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
if isinstance(job_id, int):
|
||||
job_id = f"{job_id:06X}"
|
||||
return await self.history_ns.get(job_id, None)
|
||||
|
||||
def grab_job_metadata(self) -> None:
|
||||
if self.current_job is None:
|
||||
return
|
||||
filename: str = self.current_job.get("filename")
|
||||
mdst = self.file_manager.get_metadata_storage()
|
||||
metadata: Dict[str, Any] = mdst.get(filename, {})
|
||||
if metadata:
|
||||
# Add the start time and job id to the
|
||||
# persistent metadata storage
|
||||
metadata.update({
|
||||
'print_start_time': self.current_job.get('start_time'),
|
||||
'job_id': self.current_job_id
|
||||
})
|
||||
mdst.insert(filename, metadata.copy())
|
||||
# We don't need to store these fields in the
|
||||
# job metadata, as they are redundant
|
||||
metadata.pop('print_start_time', None)
|
||||
metadata.pop('job_id', None)
|
||||
if "thumbnails" in metadata:
|
||||
thumb: Dict[str, Any]
|
||||
for thumb in metadata['thumbnails']:
|
||||
thumb.pop('data', None)
|
||||
self.current_job.set("metadata", metadata)
|
||||
|
||||
def save_current_job(self) -> None:
|
||||
if self.current_job is None or self.current_job_id is None:
|
||||
return
|
||||
self.history_ns[self.current_job_id] = self.current_job.get_stats()
|
||||
|
||||
def _update_job_totals(self) -> None:
|
||||
if self.current_job is None:
|
||||
return
|
||||
job = self.current_job
|
||||
self.job_totals['total_jobs'] += 1
|
||||
self.job_totals['total_time'] += job.get('total_duration')
|
||||
self.job_totals['total_print_time'] += job.get('print_duration')
|
||||
self.job_totals['total_filament_used'] += job.get('filament_used')
|
||||
self.job_totals['longest_job'] = max(
|
||||
self.job_totals['longest_job'], job.get('total_duration'))
|
||||
self.job_totals['longest_print'] = max(
|
||||
self.job_totals['longest_print'], job.get('print_duration'))
|
||||
database: DBComp = self.server.lookup_component("database")
|
||||
database.insert_item(
|
||||
"moonraker", "history.job_totals", self.job_totals)
|
||||
|
||||
def send_history_event(self, evt_action: str) -> None:
|
||||
if self.current_job is None or self.current_job_id is None:
|
||||
return
|
||||
job = self._prep_requested_job(
|
||||
self.current_job.get_stats(), self.current_job_id)
|
||||
self.server.send_event("history:history_changed",
|
||||
{'action': evt_action, 'job': job})
|
||||
|
||||
def _prep_requested_job(self,
|
||||
job: Dict[str, Any],
|
||||
job_id: str
|
||||
) -> Dict[str, Any]:
|
||||
job['job_id'] = job_id
|
||||
job['exists'] = self.file_manager.check_file_exists(
|
||||
"gcodes", job['filename'])
|
||||
return job
|
||||
|
||||
def on_exit(self) -> None:
|
||||
jstate: JobState = self.server.lookup_component("job_state")
|
||||
last_ps = jstate.get_last_stats()
|
||||
self.finish_job("server_exit", last_ps)
|
||||
|
||||
class PrinterJob:
|
||||
def __init__(self, data: Dict[str, Any] = {}) -> None:
|
||||
self.end_time: Optional[float] = None
|
||||
self.filament_used: float = 0
|
||||
self.filename: Optional[str] = None
|
||||
self.metadata: Optional[Dict[str, Any]] = None
|
||||
self.print_duration: float = 0.
|
||||
self.status: str = "in_progress"
|
||||
self.start_time = time.time()
|
||||
self.total_duration: float = 0.
|
||||
self.update_from_ps(data)
|
||||
|
||||
def finish(self,
|
||||
status: str,
|
||||
print_stats: Dict[str, Any] = {}
|
||||
) -> None:
|
||||
self.end_time = time.time()
|
||||
self.status = status
|
||||
self.update_from_ps(print_stats)
|
||||
|
||||
def get(self, name: str) -> Any:
|
||||
if not hasattr(self, name):
|
||||
return None
|
||||
return getattr(self, name)
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
return self.__dict__.copy()
|
||||
|
||||
def set(self, name: str, val: Any) -> None:
|
||||
if not hasattr(self, name):
|
||||
return
|
||||
setattr(self, name, val)
|
||||
|
||||
def update_from_ps(self, data: Dict[str, Any]) -> None:
|
||||
for i in data:
|
||||
if hasattr(self, i):
|
||||
setattr(self, i, data[i])
|
||||
|
||||
def load_component(config: ConfigHelper) -> History:
|
||||
return History(config)
|
||||
488
moonraker/components/http_client.py
Normal file
488
moonraker/components/http_client.py
Normal file
@@ -0,0 +1,488 @@
|
||||
# Wrapper around Tornado's HTTP Client with a "requests-like" interface
|
||||
#
|
||||
# Copyright (C) 2022 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
|
||||
from __future__ import annotations
|
||||
import re
|
||||
import json
|
||||
import time
|
||||
import asyncio
|
||||
import pathlib
|
||||
import tempfile
|
||||
import logging
|
||||
from utils import ServerError
|
||||
from tornado.escape import url_escape, url_unescape
|
||||
from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError
|
||||
from tornado.httputil import HTTPHeaders
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Callable,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
Dict,
|
||||
List,
|
||||
Any
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from moonraker import Server
|
||||
from confighelper import ConfigHelper
|
||||
from io import BufferedWriter
|
||||
StrOrPath = Union[str, pathlib.Path]
|
||||
|
||||
MAX_BODY_SIZE = 512 * 1024 * 1024
|
||||
AsyncHTTPClient.configure(
|
||||
None, defaults=dict(user_agent="Moonraker"),
|
||||
max_body_size=MAX_BODY_SIZE
|
||||
)
|
||||
|
||||
GITHUB_PREFIX = "https://api.github.com/"
|
||||
|
||||
def escape_query_string(qs: str) -> str:
|
||||
parts = qs.split("&")
|
||||
escaped: List[str] = []
|
||||
for p in parts:
|
||||
item = p.split("=", 1)
|
||||
key = url_escape(item[0])
|
||||
if len(item) == 2:
|
||||
escaped.append(f"{key}={url_escape(item[1])}")
|
||||
else:
|
||||
escaped.append(key)
|
||||
return "&".join(escaped)
|
||||
|
||||
class HttpClient:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.client = AsyncHTTPClient()
|
||||
self.response_cache: Dict[str, HttpResponse] = {}
|
||||
|
||||
self.gh_rate_limit: Optional[int] = None
|
||||
self.gh_limit_remaining: Optional[int] = None
|
||||
self.gh_limit_reset_time: Optional[float] = None
|
||||
|
||||
def register_cached_url(
|
||||
self,
|
||||
url: str,
|
||||
etag: Optional[str] = None,
|
||||
last_modified: Optional[str] = None
|
||||
) -> None:
|
||||
headers = HTTPHeaders()
|
||||
if etag is not None:
|
||||
headers["etag"] = etag
|
||||
if last_modified is not None:
|
||||
headers["last-modified"] = last_modified
|
||||
if len(headers) == 0:
|
||||
raise self.server.error(
|
||||
"Either an Etag or Last Modified Date must be specified")
|
||||
empty_resp = HttpResponse(url, 200, b"", headers, None)
|
||||
self.response_cache[url] = empty_resp
|
||||
|
||||
def escape_url(self, url: str) -> str:
|
||||
# escape the url
|
||||
match = re.match(r"(https?://[^/?#]+)([^?#]+)?(\?[^#]+)?(#.+)?", url)
|
||||
if match is not None:
|
||||
uri, path, qs, fragment = match.groups()
|
||||
if path is not None:
|
||||
uri += "/".join([url_escape(p, plus=False)
|
||||
for p in path.split("/")])
|
||||
if qs is not None:
|
||||
uri += "?" + escape_query_string(qs[1:])
|
||||
if fragment is not None:
|
||||
uri += "#" + url_escape(fragment[1:], plus=False)
|
||||
url = uri
|
||||
return url
|
||||
|
||||
async def request(
|
||||
self,
|
||||
method: str,
|
||||
url: str,
|
||||
body: Optional[Union[str, List[Any], Dict[str, Any]]] = None,
|
||||
headers: Optional[Dict[str, Any]] = None,
|
||||
connect_timeout: float = 5.,
|
||||
request_timeout: float = 10.,
|
||||
attempts: int = 1,
|
||||
retry_pause_time: float = .1,
|
||||
enable_cache: bool = False,
|
||||
send_etag: bool = True,
|
||||
send_if_modified_since: bool = True
|
||||
) -> HttpResponse:
|
||||
cache_key = url.split("?", 1)[0]
|
||||
method = method.upper()
|
||||
# prepare the body if required
|
||||
req_headers: Dict[str, Any] = {}
|
||||
if isinstance(body, (list, dict)):
|
||||
body = json.dumps(body)
|
||||
req_headers["Content-Type"] = "application/json"
|
||||
cached: Optional[HttpResponse] = None
|
||||
if enable_cache:
|
||||
cached = self.response_cache.get(cache_key)
|
||||
if cached is not None and send_etag:
|
||||
if cached.etag is not None and send_etag:
|
||||
req_headers["If-None-Match"] = cached.etag
|
||||
if cached.last_modified and send_if_modified_since:
|
||||
req_headers["If-Modified-Since"] = cached.last_modified
|
||||
if headers is not None:
|
||||
headers.update(req_headers)
|
||||
elif req_headers:
|
||||
headers = req_headers
|
||||
|
||||
timeout = 1 + connect_timeout + request_timeout
|
||||
request = HTTPRequest(url, method, headers, body=body,
|
||||
request_timeout=request_timeout,
|
||||
connect_timeout=connect_timeout)
|
||||
err: Optional[BaseException] = None
|
||||
for i in range(attempts):
|
||||
if i:
|
||||
await asyncio.sleep(retry_pause_time)
|
||||
try:
|
||||
fut = self.client.fetch(request, raise_error=False)
|
||||
resp = await asyncio.wait_for(fut, timeout)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
err = e
|
||||
else:
|
||||
err = resp.error
|
||||
if resp.code == 304:
|
||||
err = None
|
||||
if cached is None:
|
||||
if enable_cache:
|
||||
logging.info(
|
||||
"Request returned 304, however no cached "
|
||||
"item was found")
|
||||
result = b""
|
||||
else:
|
||||
logging.debug(f"Request returned from cache: {url}")
|
||||
result = cached.content
|
||||
elif resp.error is not None and attempts - i != 1:
|
||||
continue
|
||||
else:
|
||||
result = resp.body
|
||||
ret = HttpResponse(url, resp.code, result, resp.headers, err)
|
||||
break
|
||||
else:
|
||||
ret = HttpResponse(url, 500, b"", HTTPHeaders(), err)
|
||||
if enable_cache and ret.is_cachable():
|
||||
logging.debug(f"Caching HTTP Response: {url}")
|
||||
self.response_cache[cache_key] = ret
|
||||
else:
|
||||
self.response_cache.pop(cache_key, None)
|
||||
return ret
|
||||
|
||||
async def get(
|
||||
self, url: str, headers: Optional[Dict[str, Any]] = None, **kwargs
|
||||
) -> HttpResponse:
|
||||
if "enable_cache" not in kwargs:
|
||||
kwargs["enable_cache"] = True
|
||||
return await self.request("GET", url, None, headers, **kwargs)
|
||||
|
||||
async def post(
|
||||
self,
|
||||
url: str,
|
||||
body: Union[str, List[Any], Dict[str, Any]] = "",
|
||||
headers: Optional[Dict[str, Any]] = None,
|
||||
**kwargs
|
||||
) -> HttpResponse:
|
||||
return await self.request("POST", url, body, headers, **kwargs)
|
||||
|
||||
async def delete(
|
||||
self,
|
||||
url: str,
|
||||
headers: Optional[Dict[str, Any]] = None,
|
||||
**kwargs
|
||||
) -> HttpResponse:
|
||||
return await self.request("DELETE", url, None, headers, **kwargs)
|
||||
|
||||
async def github_api_request(
|
||||
self,
|
||||
resource: str,
|
||||
attempts: int = 1,
|
||||
retry_pause_time: float = .1
|
||||
) -> HttpResponse:
|
||||
url = f"{GITHUB_PREFIX}{resource.strip('/')}"
|
||||
if (
|
||||
self.gh_limit_reset_time is not None and
|
||||
self.gh_limit_remaining == 0
|
||||
):
|
||||
curtime = time.time()
|
||||
if curtime < self.gh_limit_reset_time:
|
||||
reset_time = time.ctime(self.gh_limit_reset_time)
|
||||
raise self.server.error(
|
||||
f"GitHub Rate Limit Reached\n"
|
||||
f"Request: {url}\n"
|
||||
f"Limit Reset Time: {reset_time}"
|
||||
)
|
||||
headers = {"Accept": "application/vnd.github.v3+json"}
|
||||
resp = await self.get(
|
||||
url, headers, attempts=attempts,
|
||||
retry_pause_time=retry_pause_time)
|
||||
resp_hdrs = resp.headers
|
||||
if 'X-Ratelimit-Limit' in resp_hdrs:
|
||||
self.gh_rate_limit = int(resp_hdrs['X-Ratelimit-Limit'])
|
||||
self.gh_limit_remaining = int(
|
||||
resp_hdrs['X-Ratelimit-Remaining'])
|
||||
self.gh_limit_reset_time = float(
|
||||
resp_hdrs['X-Ratelimit-Reset'])
|
||||
return resp
|
||||
|
||||
def github_api_stats(self) -> Dict[str, Any]:
|
||||
return {
|
||||
'github_rate_limit': self.gh_rate_limit,
|
||||
'github_requests_remaining': self.gh_limit_remaining,
|
||||
'github_limit_reset_time': self.gh_limit_reset_time,
|
||||
}
|
||||
|
||||
async def get_file(
|
||||
self,
|
||||
url: str,
|
||||
content_type: str,
|
||||
connect_timeout: float = 5.,
|
||||
request_timeout: float = 180.,
|
||||
attempts: int = 1,
|
||||
retry_pause_time: float = .1,
|
||||
enable_cache: bool = False,
|
||||
) -> bytes:
|
||||
headers = {"Accept": content_type}
|
||||
resp = await self.get(
|
||||
url, headers, connect_timeout=connect_timeout,
|
||||
request_timeout=request_timeout, attempts=attempts,
|
||||
retry_pause_time=retry_pause_time, enable_cache=enable_cache
|
||||
)
|
||||
resp.raise_for_status()
|
||||
return resp.content
|
||||
|
||||
async def download_file(
|
||||
self,
|
||||
url: str,
|
||||
content_type: str,
|
||||
destination_path: Optional[StrOrPath] = None,
|
||||
download_size: int = -1,
|
||||
progress_callback: Optional[Callable[[int, int, int], None]] = None,
|
||||
connect_timeout: float = 5.,
|
||||
request_timeout: float = 180.,
|
||||
attempts: int = 1,
|
||||
retry_pause_time: float = 1.
|
||||
) -> pathlib.Path:
|
||||
for i in range(attempts):
|
||||
dl = StreamingDownload(
|
||||
self.server, destination_path, download_size,
|
||||
progress_callback)
|
||||
try:
|
||||
fut = self.client.fetch(
|
||||
url, headers={"Accept": content_type},
|
||||
connect_timeout=connect_timeout,
|
||||
request_timeout=request_timeout,
|
||||
streaming_callback=dl.on_chunk_recd,
|
||||
header_callback=dl.on_headers_recd)
|
||||
timeout = connect_timeout + request_timeout + 1.
|
||||
resp = await asyncio.wait_for(fut, timeout)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
if i + 1 == attempts:
|
||||
raise
|
||||
await asyncio.sleep(retry_pause_time)
|
||||
continue
|
||||
finally:
|
||||
await dl.close()
|
||||
if resp.code < 400:
|
||||
return dl.dest_file
|
||||
raise self.server.error(f"Retries exceeded for request: {url}")
|
||||
|
||||
def close(self):
|
||||
self.client.close()
|
||||
|
||||
class HttpResponse:
|
||||
def __init__(self,
|
||||
url: str,
|
||||
code: int,
|
||||
result: bytes,
|
||||
response_headers: HTTPHeaders,
|
||||
error: Optional[BaseException]
|
||||
) -> None:
|
||||
self._url = url
|
||||
self._code = code
|
||||
self._result: bytes = result
|
||||
self._encoding: str = "utf-8"
|
||||
self._response_headers: HTTPHeaders = response_headers
|
||||
self._etag: Optional[str] = response_headers.get("etag", None)
|
||||
self._error = error
|
||||
self._last_modified: Optional[str] = response_headers.get(
|
||||
"last-modified", None)
|
||||
|
||||
def json(self, **kwargs) -> Union[List[Any], Dict[str, Any]]:
|
||||
return json.loads(self._result, **kwargs)
|
||||
|
||||
def is_cachable(self) -> bool:
|
||||
return self._last_modified is not None or self._etag is not None
|
||||
|
||||
def has_error(self) -> bool:
|
||||
return self._error is not None
|
||||
|
||||
def raise_for_status(self, message: Optional[str] = None) -> None:
|
||||
if self._error is not None:
|
||||
code = 500
|
||||
msg = f"HTTP Request Error: {self.url}"
|
||||
if isinstance(self._error, HTTPError):
|
||||
code = self._code
|
||||
if self._error.message is not None:
|
||||
msg = self._error.message
|
||||
if message is not None:
|
||||
msg = message
|
||||
raise ServerError(msg, code) from self._error
|
||||
|
||||
@property
|
||||
def encoding(self) -> str:
|
||||
return self._encoding
|
||||
|
||||
@encoding.setter
|
||||
def encoding(self, new_enc: str) -> None:
|
||||
self._encoding = new_enc
|
||||
|
||||
@property
|
||||
def text(self) -> str:
|
||||
return self._result.decode(encoding=self._encoding)
|
||||
|
||||
@property
|
||||
def content(self) -> bytes:
|
||||
return self._result
|
||||
|
||||
@property
|
||||
def url(self) -> str:
|
||||
return self._url
|
||||
|
||||
@property
|
||||
def status_code(self) -> int:
|
||||
return self._code
|
||||
|
||||
@property
|
||||
def headers(self) -> HTTPHeaders:
|
||||
return self._response_headers
|
||||
|
||||
@property
|
||||
def last_modified(self) -> Optional[str]:
|
||||
return self._last_modified
|
||||
|
||||
@property
|
||||
def etag(self) -> Optional[str]:
|
||||
return self._etag
|
||||
|
||||
@property
|
||||
def error(self) -> Optional[BaseException]:
|
||||
return self._error
|
||||
|
||||
class StreamingDownload:
|
||||
def __init__(
|
||||
self,
|
||||
server: Server,
|
||||
dest_path: Optional[StrOrPath],
|
||||
download_size: int,
|
||||
progress_callback: Optional[Callable[[int, int, int], None]]
|
||||
) -> None:
|
||||
self.server = server
|
||||
self.event_loop = server.get_event_loop()
|
||||
self.need_content_length: bool = True
|
||||
self.need_content_disposition: bool = False
|
||||
self.request_ok: bool = False
|
||||
if dest_path is None:
|
||||
# If no destination is provided initialize to a procedurally
|
||||
# generated temp file. We will attempt to extract the filename
|
||||
# from the Content-Disposition Header
|
||||
tmp_dir = tempfile.gettempdir()
|
||||
loop_time = int(self.event_loop.get_loop_time())
|
||||
tmp_fname = f"moonraker.download-{loop_time}.mrd"
|
||||
self.dest_file = pathlib.Path(tmp_dir).joinpath(tmp_fname)
|
||||
self.need_content_disposition = True
|
||||
elif isinstance(dest_path, str):
|
||||
self.dest_file = pathlib.Path(dest_path)
|
||||
else:
|
||||
self.dest_file = dest_path
|
||||
self.filename = self.dest_file.name
|
||||
self.file_hdl: Optional[BufferedWriter] = None
|
||||
self.total_recd: int = 0
|
||||
self.download_size: int = download_size
|
||||
self.pct_done: int = 0
|
||||
self.chunk_buffer: List[bytes] = []
|
||||
self.progress_callback = progress_callback
|
||||
self.busy_evt: asyncio.Event = asyncio.Event()
|
||||
self.busy_evt.set()
|
||||
|
||||
def on_headers_recd(self, line: str) -> None:
|
||||
if not self.need_content_length and not self.need_content_disposition:
|
||||
return
|
||||
line = line.strip()
|
||||
rc_match = re.match(r"HTTP/\d.?\d? (\d+)", line)
|
||||
if rc_match is not None:
|
||||
self.request_ok = rc_match.group(1) == "200"
|
||||
return
|
||||
if not self.request_ok:
|
||||
return
|
||||
parts = line.split(":", 1)
|
||||
if len(parts) < 2:
|
||||
return
|
||||
hname = parts[0].strip().lower()
|
||||
hval = parts[1].strip()
|
||||
if hname == "content-length" and self.need_content_length:
|
||||
self.download_size = int(hval)
|
||||
self.need_content_length = False
|
||||
logging.debug(
|
||||
f"Content-Length header received: "
|
||||
f"size = {self.download_size}")
|
||||
elif (
|
||||
hname == "content-disposition" and
|
||||
self.need_content_disposition
|
||||
):
|
||||
fnr = r"filename[^;\n=]*=(['\"])?(utf-8\'\')?([^\n;]*)(?(1)\1|)"
|
||||
matches: List[Tuple[str, str, str]] = re.findall(fnr, hval)
|
||||
is_utf8 = False
|
||||
for (_, encoding, fname) in matches:
|
||||
if encoding.startswith("utf-8"):
|
||||
# Prefer the utf8 filename if included
|
||||
self.filename = url_unescape(
|
||||
fname, encoding="utf-8", plus=False)
|
||||
is_utf8 = True
|
||||
break
|
||||
self.filename = fname
|
||||
self.need_content_disposition = False
|
||||
# Use the filename extracted from the content-disposition header
|
||||
self.dest_file = self.dest_file.parent.joinpath(self.filename)
|
||||
logging.debug(
|
||||
"Content-Disposition header received: filename = "
|
||||
f"{self.filename}, utf8: {is_utf8}")
|
||||
|
||||
def on_chunk_recd(self, chunk: bytes) -> None:
|
||||
if not chunk:
|
||||
return
|
||||
self.chunk_buffer.append(chunk)
|
||||
if not self.busy_evt.is_set():
|
||||
return
|
||||
self.busy_evt.clear()
|
||||
self.event_loop.register_callback(self._process_buffer)
|
||||
|
||||
async def close(self):
|
||||
await self.busy_evt.wait()
|
||||
if self.file_hdl is not None:
|
||||
await self.event_loop.run_in_thread(self.file_hdl.close)
|
||||
|
||||
async def _process_buffer(self):
|
||||
if self.file_hdl is None:
|
||||
self.file_hdl = await self.event_loop.run_in_thread(
|
||||
self.dest_file.open, "wb")
|
||||
while self.chunk_buffer:
|
||||
chunk = self.chunk_buffer.pop(0)
|
||||
await self.event_loop.run_in_thread(self.file_hdl.write, chunk)
|
||||
self.total_recd += len(chunk)
|
||||
if self.download_size > 0 and self.progress_callback is not None:
|
||||
pct = int(self.total_recd / self.download_size * 100 + .5)
|
||||
pct = min(100, pct)
|
||||
if pct != self.pct_done:
|
||||
self.pct_done = pct
|
||||
self.progress_callback(
|
||||
pct, self.download_size, self.total_recd)
|
||||
self.busy_evt.set()
|
||||
|
||||
|
||||
def load_component(config: ConfigHelper) -> HttpClient:
|
||||
return HttpClient(config)
|
||||
317
moonraker/components/job_queue.py
Normal file
317
moonraker/components/job_queue.py
Normal file
@@ -0,0 +1,317 @@
|
||||
# Print Job Queue Implementation
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import asyncio
|
||||
import time
|
||||
import logging
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Optional,
|
||||
Dict,
|
||||
List,
|
||||
Union,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest
|
||||
from .klippy_apis import KlippyAPI
|
||||
from .file_manager.file_manager import FileManager
|
||||
|
||||
class JobQueue:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.queued_jobs: Dict[str, QueuedJob] = {}
|
||||
self.lock = asyncio.Lock()
|
||||
self.load_on_start = config.getboolean("load_on_startup", False)
|
||||
self.automatic = config.getboolean("automatic_transition", False)
|
||||
self.queue_state: str = "ready" if self.automatic else "paused"
|
||||
self.job_delay = config.getfloat("job_transition_delay", 0.01)
|
||||
if self.job_delay <= 0.:
|
||||
raise config.error(
|
||||
"Value for option 'job_transition_delay' in section [job_queue]"
|
||||
" must be above 0.0")
|
||||
self.job_transition_gcode = config.get(
|
||||
"job_transition_gcode", "").strip()
|
||||
self.pop_queue_handle: Optional[asyncio.TimerHandle] = None
|
||||
|
||||
self.server.register_event_handler(
|
||||
"server:klippy_ready", self._handle_ready)
|
||||
self.server.register_event_handler(
|
||||
"server:klippy_shutdown", self._handle_shutdown)
|
||||
self.server.register_event_handler(
|
||||
"job_state:complete", self._on_job_complete)
|
||||
self.server.register_event_handler(
|
||||
"job_state:error", self._on_job_abort)
|
||||
self.server.register_event_handler(
|
||||
"job_state:cancelled", self._on_job_abort)
|
||||
|
||||
self.server.register_notification("job_queue:job_queue_changed")
|
||||
self.server.register_remote_method("pause_job_queue", self.pause_queue)
|
||||
self.server.register_remote_method("start_job_queue",
|
||||
self.start_queue)
|
||||
|
||||
self.server.register_endpoint(
|
||||
"/server/job_queue/job", ['POST', 'DELETE'],
|
||||
self._handle_job_request)
|
||||
self.server.register_endpoint(
|
||||
"/server/job_queue/pause", ['POST'], self._handle_pause_queue)
|
||||
self.server.register_endpoint(
|
||||
"/server/job_queue/start", ['POST'], self._handle_start_queue)
|
||||
self.server.register_endpoint(
|
||||
"/server/job_queue/status", ['GET'], self._handle_queue_status)
|
||||
|
||||
async def _handle_ready(self) -> None:
|
||||
async with self.lock:
|
||||
if not self.load_on_start or not self.queued_jobs:
|
||||
return
|
||||
# start a queued print
|
||||
if self.queue_state in ['ready', 'paused']:
|
||||
event_loop = self.server.get_event_loop()
|
||||
self._set_queue_state("loading")
|
||||
self.pop_queue_handle = event_loop.delay_callback(
|
||||
1., self._pop_job, False)
|
||||
|
||||
async def _handle_shutdown(self) -> None:
|
||||
await self.pause_queue()
|
||||
if not self.queued_jobs and self.automatic:
|
||||
self._set_queue_state("ready")
|
||||
|
||||
async def _on_job_complete(self,
|
||||
prev_stats: Dict[str, Any],
|
||||
new_stats: Dict[str, Any]
|
||||
) -> None:
|
||||
async with self.lock:
|
||||
# Transition to the next job in the queue
|
||||
if self.queue_state == "ready" and self.queued_jobs:
|
||||
event_loop = self.server.get_event_loop()
|
||||
self._set_queue_state("loading")
|
||||
self.pop_queue_handle = event_loop.delay_callback(
|
||||
self.job_delay, self._pop_job)
|
||||
|
||||
async def _on_job_abort(self,
|
||||
prev_stats: Dict[str, Any],
|
||||
new_stats: Dict[str, Any]
|
||||
) -> None:
|
||||
async with self.lock:
|
||||
if self.queued_jobs:
|
||||
self._set_queue_state("paused")
|
||||
|
||||
async def _pop_job(self, need_transition: bool = True) -> None:
|
||||
self.pop_queue_handle = None
|
||||
async with self.lock:
|
||||
if self.queue_state == "paused":
|
||||
return
|
||||
if not self.queued_jobs:
|
||||
qs = "ready" if self.automatic else "paused"
|
||||
self._set_queue_state(qs)
|
||||
return
|
||||
kapis: KlippyAPI = self.server.lookup_component('klippy_apis')
|
||||
uid, job = list(self.queued_jobs.items())[0]
|
||||
filename = str(job)
|
||||
can_print = await self._check_can_print()
|
||||
if not can_print or self.queue_state != "loading":
|
||||
self._set_queue_state("paused")
|
||||
return
|
||||
try:
|
||||
if self.job_transition_gcode and need_transition:
|
||||
await kapis.run_gcode(self.job_transition_gcode)
|
||||
# Check to see if the queue was paused while running
|
||||
# the job transition gcode
|
||||
if self.queue_state != "loading":
|
||||
raise self.server.error(
|
||||
"Queue State Changed during Transition Gcode")
|
||||
self._set_queue_state("starting")
|
||||
await kapis.start_print(filename)
|
||||
except self.server.error:
|
||||
logging.exception(f"Error Loading print: {filename}")
|
||||
self._set_queue_state("paused")
|
||||
else:
|
||||
self.queued_jobs.pop(uid, None)
|
||||
if self.queue_state == "starting":
|
||||
# If the queue was not paused while starting the print,
|
||||
set_ready = not self.queued_jobs or self.automatic
|
||||
self.queue_state = "ready" if set_ready else "paused"
|
||||
self._send_queue_event(action="job_loaded")
|
||||
|
||||
async def _check_can_print(self) -> bool:
|
||||
# Query the latest stats
|
||||
kapis: KlippyAPI = self.server.lookup_component('klippy_apis')
|
||||
try:
|
||||
result = await kapis.query_objects({"print_stats": None})
|
||||
except Exception:
|
||||
# Klippy not connected
|
||||
return False
|
||||
if 'print_stats' not in result:
|
||||
return False
|
||||
state: str = result['print_stats']['state']
|
||||
if state in ["printing", "paused"]:
|
||||
return False
|
||||
return True
|
||||
|
||||
async def queue_job(self,
|
||||
filenames: Union[str, List[str]],
|
||||
check_exists: bool = True
|
||||
) -> None:
|
||||
async with self.lock:
|
||||
# Make sure that the file exists
|
||||
if isinstance(filenames, str):
|
||||
filenames = [filenames]
|
||||
if check_exists:
|
||||
# Make sure all files exist before adding them to the queue
|
||||
for fname in filenames:
|
||||
self._check_job_file(fname)
|
||||
for fname in filenames:
|
||||
queued_job = QueuedJob(fname)
|
||||
self.queued_jobs[queued_job.job_id] = queued_job
|
||||
self._send_queue_event(action="jobs_added")
|
||||
|
||||
async def delete_job(self,
|
||||
job_ids: Union[str, List[str]],
|
||||
all: bool = False
|
||||
) -> None:
|
||||
async with self.lock:
|
||||
if not self.queued_jobs:
|
||||
# No jobs in queue, nothing to delete
|
||||
return
|
||||
if all:
|
||||
self.queued_jobs.clear()
|
||||
elif job_ids:
|
||||
if isinstance(job_ids, str):
|
||||
job_ids = [job_ids]
|
||||
for uid in job_ids:
|
||||
self.queued_jobs.pop(uid, None)
|
||||
else:
|
||||
# Don't notify, nothing was deleted
|
||||
return
|
||||
self._send_queue_event(action="jobs_removed")
|
||||
|
||||
async def pause_queue(self) -> None:
|
||||
self._set_queue_state("paused")
|
||||
if self.pop_queue_handle is not None:
|
||||
self.pop_queue_handle.cancel()
|
||||
self.pop_queue_handle = None
|
||||
# Acquire the lock to wait for any pending operations to
|
||||
# complete
|
||||
await self.lock.acquire()
|
||||
self.lock.release()
|
||||
|
||||
async def start_queue(self) -> None:
|
||||
async with self.lock:
|
||||
if self.queue_state != "loading":
|
||||
if self.queued_jobs and await self._check_can_print():
|
||||
self._set_queue_state("loading")
|
||||
event_loop = self.server.get_event_loop()
|
||||
self.pop_queue_handle = event_loop.delay_callback(
|
||||
0.01, self._pop_job)
|
||||
else:
|
||||
self._set_queue_state("ready")
|
||||
def _job_map_to_list(self) -> List[Dict[str, Any]]:
|
||||
cur_time = time.time()
|
||||
return [job.as_dict(cur_time) for
|
||||
job in self.queued_jobs.values()]
|
||||
|
||||
def _check_job_file(self, job_name: str) -> None:
|
||||
fm: FileManager = self.server.lookup_component('file_manager')
|
||||
if not fm.check_file_exists("gcodes", job_name):
|
||||
raise self.server.error(
|
||||
f"G-Code File {job_name} does not exist")
|
||||
|
||||
def _set_queue_state(self, new_state: str) -> None:
|
||||
if new_state != self.queue_state:
|
||||
self.queue_state = new_state
|
||||
self._send_queue_event()
|
||||
|
||||
def _send_queue_event(self, action: str = "state_changed"):
|
||||
updated_queue: Optional[List[Dict[str, Any]]] = None
|
||||
if action != "state_changed":
|
||||
updated_queue = self._job_map_to_list()
|
||||
event_loop = self.server.get_event_loop()
|
||||
event_loop.delay_callback(
|
||||
.05, self.server.send_event, "job_queue:job_queue_changed",
|
||||
{
|
||||
'action': action,
|
||||
'updated_queue': updated_queue,
|
||||
'queue_state': self.queue_state
|
||||
})
|
||||
|
||||
async def _handle_job_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
action = web_request.get_action()
|
||||
if action == "POST":
|
||||
files: Union[List[str], str] = web_request.get('filenames')
|
||||
if isinstance(files, str):
|
||||
files = [f.strip() for f in files.split(',') if f.strip()]
|
||||
# Validate that all files exist before queueing
|
||||
await self.queue_job(files)
|
||||
elif action == "DELETE":
|
||||
if web_request.get_boolean("all", False):
|
||||
await self.delete_job([], all=True)
|
||||
else:
|
||||
job_ids: Union[List[str], str] = web_request.get('job_ids')
|
||||
if isinstance(job_ids, str):
|
||||
job_ids = [f.strip() for f in job_ids.split(',')
|
||||
if f.strip()]
|
||||
await self.delete_job(job_ids)
|
||||
else:
|
||||
raise self.server.error(f"Invalid action: {action}")
|
||||
return {
|
||||
'queued_jobs': self._job_map_to_list(),
|
||||
'queue_state': self.queue_state
|
||||
}
|
||||
|
||||
async def _handle_pause_queue(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
await self.pause_queue()
|
||||
return {
|
||||
'queued_jobs': self._job_map_to_list(),
|
||||
'queue_state': self.queue_state
|
||||
}
|
||||
|
||||
async def _handle_start_queue(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
await self.start_queue()
|
||||
return {
|
||||
'queued_jobs': self._job_map_to_list(),
|
||||
'queue_state': self.queue_state
|
||||
}
|
||||
|
||||
async def _handle_queue_status(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
return {
|
||||
'queued_jobs': self._job_map_to_list(),
|
||||
'queue_state': self.queue_state
|
||||
}
|
||||
|
||||
async def close(self):
|
||||
await self.pause_queue()
|
||||
|
||||
class QueuedJob:
|
||||
def __init__(self, filename: str) -> None:
|
||||
self.filename = filename
|
||||
self.job_id = f"{id(self):016X}"
|
||||
self.time_added = time.time()
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.filename
|
||||
|
||||
def as_dict(self, cur_time: float) -> Dict[str, Any]:
|
||||
return {
|
||||
'filename': self.filename,
|
||||
'job_id': self.job_id,
|
||||
'time_added': self.time_added,
|
||||
'time_in_queue': cur_time - self.time_added
|
||||
}
|
||||
|
||||
def load_component(config: ConfigHelper) -> JobQueue:
|
||||
return JobQueue(config)
|
||||
88
moonraker/components/job_state.py
Normal file
88
moonraker/components/job_state.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# Klippy job state event handlers
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import logging
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Optional,
|
||||
Dict,
|
||||
List,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from .klippy_apis import KlippyAPI
|
||||
|
||||
class JobState:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.last_print_stats: Dict[str, Any] = {}
|
||||
self.server.register_event_handler(
|
||||
"server:klippy_started", self._handle_started)
|
||||
self.server.register_event_handler(
|
||||
"server:status_update", self._status_update)
|
||||
|
||||
async def _handle_started(self, state: str) -> None:
|
||||
if state != "ready":
|
||||
return
|
||||
kapis: KlippyAPI = self.server.lookup_component('klippy_apis')
|
||||
sub: Dict[str, Optional[List[str]]] = {"print_stats": None}
|
||||
try:
|
||||
result = await kapis.subscribe_objects(sub)
|
||||
except self.server.error as e:
|
||||
logging.info(f"Error subscribing to print_stats")
|
||||
self.last_print_stats = result.get("print_stats", {})
|
||||
if "state" in self.last_print_stats:
|
||||
state = self.last_print_stats["state"]
|
||||
logging.info(f"Job state initialized: {state}")
|
||||
|
||||
async def _status_update(self, data: Dict[str, Any]) -> None:
|
||||
if 'print_stats' not in data:
|
||||
return
|
||||
ps = data['print_stats']
|
||||
if "state" in ps:
|
||||
prev_ps = dict(self.last_print_stats)
|
||||
old_state: str = prev_ps['state']
|
||||
new_state: str = ps['state']
|
||||
new_ps = dict(self.last_print_stats)
|
||||
new_ps.update(ps)
|
||||
if new_state is not old_state:
|
||||
if new_state == "printing":
|
||||
# The "printing" state needs some special handling
|
||||
# to detect "resets" and a transition from pause to resume
|
||||
if self._check_resumed(prev_ps, new_ps):
|
||||
new_state = "resumed"
|
||||
else:
|
||||
logging.info(
|
||||
f"Job Started: {new_ps['filename']}"
|
||||
)
|
||||
new_state = "started"
|
||||
logging.debug(
|
||||
f"Job State Changed - Prev State: {old_state}, "
|
||||
f"New State: {new_state}"
|
||||
)
|
||||
self.server.send_event(
|
||||
f"job_state:{new_state}", prev_ps, new_ps)
|
||||
self.last_print_stats.update(ps)
|
||||
|
||||
def _check_resumed(self,
|
||||
prev_ps: Dict[str, Any],
|
||||
new_ps: Dict[str, Any]
|
||||
) -> bool:
|
||||
return (
|
||||
prev_ps['state'] == "paused" and
|
||||
prev_ps['filename'] == new_ps['filename'] and
|
||||
prev_ps['total_duration'] < new_ps['total_duration']
|
||||
)
|
||||
|
||||
def get_last_stats(self) -> Dict[str, Any]:
|
||||
return dict(self.last_print_stats)
|
||||
|
||||
def load_component(config: ConfigHelper) -> JobState:
|
||||
return JobState(config)
|
||||
236
moonraker/components/klippy_apis.py
Normal file
236
moonraker/components/klippy_apis.py
Normal file
@@ -0,0 +1,236 @@
|
||||
# Helper for Moonraker to Klippy API calls.
|
||||
#
|
||||
# Copyright (C) 2020 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
from utils import SentinelClass
|
||||
from websockets import WebRequest, Subscribable
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Union,
|
||||
Optional,
|
||||
Dict,
|
||||
List,
|
||||
TypeVar,
|
||||
Mapping,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest
|
||||
from klippy_connection import KlippyConnection as Klippy
|
||||
Subscription = Dict[str, Optional[List[Any]]]
|
||||
_T = TypeVar("_T")
|
||||
|
||||
INFO_ENDPOINT = "info"
|
||||
ESTOP_ENDPOINT = "emergency_stop"
|
||||
LIST_EPS_ENDPOINT = "list_endpoints"
|
||||
GC_OUTPUT_ENDPOINT = "gcode/subscribe_output"
|
||||
GCODE_ENDPOINT = "gcode/script"
|
||||
SUBSCRIPTION_ENDPOINT = "objects/subscribe"
|
||||
STATUS_ENDPOINT = "objects/query"
|
||||
OBJ_LIST_ENDPOINT = "objects/list"
|
||||
REG_METHOD_ENDPOINT = "register_remote_method"
|
||||
SENTINEL = SentinelClass.get_instance()
|
||||
|
||||
class KlippyAPI(Subscribable):
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.klippy: Klippy = self.server.lookup_component("klippy_connection")
|
||||
app_args = self.server.get_app_args()
|
||||
self.version = app_args.get('software_version')
|
||||
# Maintain a subscription for all moonraker requests, as
|
||||
# we do not want to overwrite them
|
||||
self.host_subscription: Subscription = {}
|
||||
|
||||
# Register GCode Aliases
|
||||
self.server.register_endpoint(
|
||||
"/printer/print/pause", ['POST'], self._gcode_pause)
|
||||
self.server.register_endpoint(
|
||||
"/printer/print/resume", ['POST'], self._gcode_resume)
|
||||
self.server.register_endpoint(
|
||||
"/printer/print/cancel", ['POST'], self._gcode_cancel)
|
||||
self.server.register_endpoint(
|
||||
"/printer/print/start", ['POST'], self._gcode_start_print)
|
||||
self.server.register_endpoint(
|
||||
"/printer/restart", ['POST'], self._gcode_restart)
|
||||
self.server.register_endpoint(
|
||||
"/printer/firmware_restart", ['POST'], self._gcode_firmware_restart)
|
||||
|
||||
async def _gcode_pause(self, web_request: WebRequest) -> str:
|
||||
return await self.pause_print()
|
||||
|
||||
async def _gcode_resume(self, web_request: WebRequest) -> str:
|
||||
return await self.resume_print()
|
||||
|
||||
async def _gcode_cancel(self, web_request: WebRequest) -> str:
|
||||
return await self.cancel_print()
|
||||
|
||||
async def _gcode_start_print(self, web_request: WebRequest) -> str:
|
||||
filename: str = web_request.get_str('filename')
|
||||
return await self.start_print(filename)
|
||||
|
||||
async def _gcode_restart(self, web_request: WebRequest) -> str:
|
||||
return await self.do_restart("RESTART")
|
||||
|
||||
async def _gcode_firmware_restart(self, web_request: WebRequest) -> str:
|
||||
return await self.do_restart("FIRMWARE_RESTART")
|
||||
|
||||
async def _send_klippy_request(self,
|
||||
method: str,
|
||||
params: Dict[str, Any],
|
||||
default: Any = SENTINEL
|
||||
) -> Any:
|
||||
try:
|
||||
result = await self.klippy.request(
|
||||
WebRequest(method, params, conn=self))
|
||||
except self.server.error:
|
||||
if isinstance(default, SentinelClass):
|
||||
raise
|
||||
result = default
|
||||
return result
|
||||
|
||||
async def run_gcode(self,
|
||||
script: str,
|
||||
default: Any = SENTINEL
|
||||
) -> str:
|
||||
params = {'script': script}
|
||||
result = await self._send_klippy_request(
|
||||
GCODE_ENDPOINT, params, default)
|
||||
return result
|
||||
|
||||
async def start_print(self, filename: str) -> str:
|
||||
# WARNING: Do not call this method from within the following
|
||||
# event handlers:
|
||||
# klippy_identified, klippy_started, klippy_ready, klippy_disconnect
|
||||
# Doing so will result in "wait_started" blocking for the specifed
|
||||
# timeout (default 20s) and returning False.
|
||||
# XXX - validate that file is on disk
|
||||
if filename[0] == '/':
|
||||
filename = filename[1:]
|
||||
# Escape existing double quotes in the file name
|
||||
filename = filename.replace("\"", "\\\"")
|
||||
script = f'SDCARD_PRINT_FILE FILENAME="{filename}"'
|
||||
await self.klippy.wait_started()
|
||||
return await self.run_gcode(script)
|
||||
|
||||
async def pause_print(
|
||||
self, default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[_T, str]:
|
||||
self.server.send_event("klippy_apis:pause_requested")
|
||||
return await self._send_klippy_request(
|
||||
"pause_resume/pause", {}, default)
|
||||
|
||||
async def resume_print(
|
||||
self, default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[_T, str]:
|
||||
self.server.send_event("klippy_apis:resume_requested")
|
||||
return await self._send_klippy_request(
|
||||
"pause_resume/resume", {}, default)
|
||||
|
||||
async def cancel_print(
|
||||
self, default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[_T, str]:
|
||||
self.server.send_event("klippy_apis:cancel_requested")
|
||||
return await self._send_klippy_request(
|
||||
"pause_resume/cancel", {}, default)
|
||||
|
||||
async def do_restart(self, gc: str) -> str:
|
||||
# WARNING: Do not call this method from within the following
|
||||
# event handlers:
|
||||
# klippy_identified, klippy_started, klippy_ready, klippy_disconnect
|
||||
# Doing so will result in "wait_started" blocking for the specifed
|
||||
# timeout (default 20s) and returning False.
|
||||
await self.klippy.wait_started()
|
||||
try:
|
||||
result = await self.run_gcode(gc)
|
||||
except self.server.error as e:
|
||||
if str(e) == "Klippy Disconnected":
|
||||
result = "ok"
|
||||
else:
|
||||
raise
|
||||
return result
|
||||
|
||||
async def list_endpoints(self,
|
||||
default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[_T, Dict[str, List[str]]]:
|
||||
return await self._send_klippy_request(
|
||||
LIST_EPS_ENDPOINT, {}, default)
|
||||
|
||||
async def emergency_stop(self) -> str:
|
||||
return await self._send_klippy_request(ESTOP_ENDPOINT, {})
|
||||
|
||||
async def get_klippy_info(self,
|
||||
send_id: bool = False,
|
||||
default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[_T, Dict[str, Any]]:
|
||||
params = {}
|
||||
if send_id:
|
||||
ver = self.version
|
||||
params = {'client_info': {'program': "Moonraker", 'version': ver}}
|
||||
return await self._send_klippy_request(INFO_ENDPOINT, params, default)
|
||||
|
||||
async def get_object_list(self,
|
||||
default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[_T, List[str]]:
|
||||
result = await self._send_klippy_request(
|
||||
OBJ_LIST_ENDPOINT, {}, default)
|
||||
if isinstance(result, dict) and 'objects' in result:
|
||||
return result['objects']
|
||||
return result
|
||||
|
||||
async def query_objects(self,
|
||||
objects: Mapping[str, Optional[List[str]]],
|
||||
default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[_T, Dict[str, Any]]:
|
||||
params = {'objects': objects}
|
||||
result = await self._send_klippy_request(
|
||||
STATUS_ENDPOINT, params, default)
|
||||
if isinstance(result, dict) and 'status' in result:
|
||||
return result['status']
|
||||
return result
|
||||
|
||||
async def subscribe_objects(self,
|
||||
objects: Mapping[str, Optional[List[str]]],
|
||||
default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[_T, Dict[str, Any]]:
|
||||
for obj, items in objects.items():
|
||||
if obj in self.host_subscription:
|
||||
prev = self.host_subscription[obj]
|
||||
if items is None or prev is None:
|
||||
self.host_subscription[obj] = None
|
||||
else:
|
||||
uitems = list(set(prev) | set(items))
|
||||
self.host_subscription[obj] = uitems
|
||||
else:
|
||||
self.host_subscription[obj] = items
|
||||
params = {'objects': self.host_subscription}
|
||||
result = await self._send_klippy_request(
|
||||
SUBSCRIPTION_ENDPOINT, params, default)
|
||||
if isinstance(result, dict) and 'status' in result:
|
||||
return result['status']
|
||||
return result
|
||||
|
||||
async def subscribe_gcode_output(self) -> str:
|
||||
template = {'response_template':
|
||||
{'method': "process_gcode_response"}}
|
||||
return await self._send_klippy_request(GC_OUTPUT_ENDPOINT, template)
|
||||
|
||||
async def register_method(self, method_name: str) -> str:
|
||||
return await self._send_klippy_request(
|
||||
REG_METHOD_ENDPOINT,
|
||||
{'response_template': {"method": method_name},
|
||||
'remote_method': method_name})
|
||||
|
||||
def send_status(self,
|
||||
status: Dict[str, Any],
|
||||
eventtime: float
|
||||
) -> None:
|
||||
self.server.send_event("server:status_update", status)
|
||||
|
||||
def load_component(config: ConfigHelper) -> KlippyAPI:
|
||||
return KlippyAPI(config)
|
||||
118
moonraker/components/ldap.py
Normal file
118
moonraker/components/ldap.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# LDAP authentication for Moonraker
|
||||
#
|
||||
# Copyright (C) 2022 Eric Callahan <arksine.code@gmail.com>
|
||||
# Copyright (C) 2022 Luca Schöneberg <luca-schoeneberg@outlook.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
|
||||
from __future__ import annotations
|
||||
import asyncio
|
||||
import logging
|
||||
import ldap3
|
||||
from ldap3.core.exceptions import LDAPExceptionError
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Optional
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from ldap3.abstract.entry import Entry
|
||||
|
||||
class MoonrakerLDAP:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.ldap_host = config.get('ldap_host')
|
||||
self.ldap_port = config.getint("ldap_port", None)
|
||||
self.ldap_secure = config.getboolean("ldap_secure", False)
|
||||
base_dn_template = config.gettemplate('base_dn')
|
||||
self.base_dn = base_dn_template.render()
|
||||
self.group_dn: Optional[str] = None
|
||||
group_dn_template = config.gettemplate("group_dn", None)
|
||||
if group_dn_template is not None:
|
||||
self.group_dn = group_dn_template.render()
|
||||
self.active_directory = config.getboolean('is_active_directory', False)
|
||||
self.bind_dn: Optional[str] = None
|
||||
self.bind_password: Optional[str] = None
|
||||
bind_dn_template = config.gettemplate('bind_dn', None)
|
||||
bind_pass_template = config.gettemplate('bind_password', None)
|
||||
if bind_dn_template is not None:
|
||||
self.bind_dn = bind_dn_template.render()
|
||||
if bind_pass_template is None:
|
||||
raise config.error(
|
||||
"Section [ldap]: Option 'bind_password' is "
|
||||
"required when 'bind_dn' is provided"
|
||||
)
|
||||
self.bind_password = bind_pass_template.render()
|
||||
self.lock = asyncio.Lock()
|
||||
|
||||
async def authenticate_ldap_user(self, username, password) -> None:
|
||||
eventloop = self.server.get_event_loop()
|
||||
async with self.lock:
|
||||
await eventloop.run_in_thread(
|
||||
self._perform_ldap_auth, username, password
|
||||
)
|
||||
|
||||
def _perform_ldap_auth(self, username, password) -> None:
|
||||
server = ldap3.Server(
|
||||
self.ldap_host, self.ldap_port, use_ssl=self.ldap_secure,
|
||||
connect_timeout=10.
|
||||
)
|
||||
conn_args = {
|
||||
"user": self.bind_dn,
|
||||
"password": self.bind_password,
|
||||
"auto_bind": ldap3.AUTO_BIND_NO_TLS,
|
||||
}
|
||||
attr_name = "sAMAccountName" if self.active_directory else "uid"
|
||||
ldfilt = f"(&(objectClass=Person)({attr_name}={username}))"
|
||||
try:
|
||||
with ldap3.Connection(server, **conn_args) as conn:
|
||||
ret = conn.search(
|
||||
self.base_dn, ldfilt, attributes=["memberOf"]
|
||||
)
|
||||
if not ret:
|
||||
raise self.server.error(
|
||||
f"LDAP User '{username}' Not Found", 401
|
||||
)
|
||||
user: Entry = conn.entries[0]
|
||||
rebind_success = conn.rebind(user.entry_dn, password)
|
||||
if not rebind_success:
|
||||
# Server may not allow rebinding, attempt to start
|
||||
# a new connection to validate credentials
|
||||
logging.debug(
|
||||
"LDAP Rebind failed, attempting to validate credentials "
|
||||
"with new connection."
|
||||
)
|
||||
conn_args["user"] = user.entry_dn
|
||||
conn_args["password"] = password
|
||||
with ldap3.Connection(server, **conn_args) as conn:
|
||||
if self._validate_group(username, user):
|
||||
return
|
||||
elif self._validate_group(username, user):
|
||||
return
|
||||
except LDAPExceptionError:
|
||||
err_msg = "LDAP authentication failed"
|
||||
else:
|
||||
err_msg = "Invalid LDAP Username or Password"
|
||||
raise self.server.error(err_msg, 401)
|
||||
|
||||
def _validate_group(self, username: str, user: Entry) -> bool:
|
||||
if self.group_dn is None:
|
||||
logging.debug(f"LDAP User {username} login successful")
|
||||
return True
|
||||
if not hasattr(user, "memberOf"):
|
||||
return False
|
||||
for group in user.memberOf.values:
|
||||
if group == self.group_dn:
|
||||
logging.debug(
|
||||
f"LDAP User {username} group match success, "
|
||||
"login successful"
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def load_component(config: ConfigHelper) -> MoonrakerLDAP:
|
||||
return MoonrakerLDAP(config)
|
||||
798
moonraker/components/machine.py
Normal file
798
moonraker/components/machine.py
Normal file
@@ -0,0 +1,798 @@
|
||||
# Machine manipulation request handlers
|
||||
#
|
||||
# Copyright (C) 2020 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import pathlib
|
||||
import logging
|
||||
import asyncio
|
||||
import platform
|
||||
import socket
|
||||
import ipaddress
|
||||
import distro
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Tuple
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest
|
||||
from .shell_command import ShellCommandFactory as SCMDComp
|
||||
from .proc_stats import ProcStats
|
||||
from .dbus_manager import DbusManager
|
||||
from dbus_next.aio import ProxyInterface
|
||||
from dbus_next import Variant
|
||||
|
||||
ALLOWED_SERVICES = [
|
||||
"moonraker", "klipper", "webcamd", "MoonCord",
|
||||
"KlipperScreen", "moonraker-telegram-bot",
|
||||
"sonar", "crowsnest"
|
||||
]
|
||||
CGROUP_PATH = "/proc/1/cgroup"
|
||||
SCHED_PATH = "/proc/1/sched"
|
||||
SYSTEMD_PATH = "/etc/systemd/system"
|
||||
SD_CID_PATH = "/sys/block/mmcblk0/device/cid"
|
||||
SD_CSD_PATH = "/sys/block/mmcblk0/device/csd"
|
||||
SD_MFGRS = {
|
||||
'1b': "Samsung",
|
||||
'03': "Sandisk",
|
||||
'74': "PNY"
|
||||
}
|
||||
IP_FAMILIES = {'inet': 'ipv4', 'inet6': 'ipv6'}
|
||||
NETWORK_UPDATE_SEQUENCE = 10
|
||||
|
||||
class Machine:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
dist_info: Dict[str, Any]
|
||||
dist_info = {'name': distro.name(pretty=True)}
|
||||
dist_info.update(distro.info())
|
||||
dist_info['release_info'] = distro.distro_release_info()
|
||||
self.inside_container = False
|
||||
self.system_info: Dict[str, Any] = {
|
||||
'python': {
|
||||
"version": sys.version_info,
|
||||
"version_string": sys.version.replace("\n", " ")
|
||||
},
|
||||
'cpu_info': self._get_cpu_info(),
|
||||
'sd_info': self._get_sdcard_info(),
|
||||
'distribution': dist_info,
|
||||
'virtualization': self._check_inside_container()
|
||||
}
|
||||
self._update_log_rollover(log=True)
|
||||
providers: Dict[str, type] = {
|
||||
"none": BaseProvider,
|
||||
"systemd_cli": SystemdCliProvider,
|
||||
"systemd_dbus": SystemdDbusProvider
|
||||
}
|
||||
ptype = config.get('provider', 'systemd_dbus')
|
||||
pclass = providers.get(ptype)
|
||||
if pclass is None:
|
||||
raise config.error(f"Invalid Provider: {ptype}")
|
||||
self.sys_provider: BaseProvider = pclass(config)
|
||||
logging.info(f"Using System Provider: {ptype}")
|
||||
|
||||
self.server.register_endpoint(
|
||||
"/machine/reboot", ['POST'], self._handle_machine_request)
|
||||
self.server.register_endpoint(
|
||||
"/machine/shutdown", ['POST'], self._handle_machine_request)
|
||||
self.server.register_endpoint(
|
||||
"/machine/services/restart", ['POST'],
|
||||
self._handle_service_request)
|
||||
self.server.register_endpoint(
|
||||
"/machine/services/stop", ['POST'],
|
||||
self._handle_service_request)
|
||||
self.server.register_endpoint(
|
||||
"/machine/services/start", ['POST'],
|
||||
self._handle_service_request)
|
||||
self.server.register_endpoint(
|
||||
"/machine/system_info", ['GET'],
|
||||
self._handle_sysinfo_request)
|
||||
self.server.register_endpoint(
|
||||
"/machine/system_info", ['POST'],
|
||||
self._handle_sysinfo_request)
|
||||
# self.server.register_endpoint(
|
||||
# "/machine/dev_name", ['GET'],
|
||||
# self._handle_devname_request)
|
||||
|
||||
|
||||
self.server.register_notification("machine:service_state_changed")
|
||||
|
||||
# Register remote methods
|
||||
self.server.register_remote_method(
|
||||
"shutdown_machine", self.sys_provider.shutdown)
|
||||
self.server.register_remote_method(
|
||||
"reboot_machine", self.sys_provider.reboot)
|
||||
|
||||
# IP network shell commands
|
||||
shell_cmd: SCMDComp = self.server.load_component(
|
||||
config, 'shell_command')
|
||||
self.addr_cmd = shell_cmd.build_shell_command("ip -json address")
|
||||
iwgetbin = "/sbin/iwgetid"
|
||||
if not pathlib.Path(iwgetbin).exists():
|
||||
iwgetbin = "iwgetid"
|
||||
self.iwgetid_cmd = shell_cmd.build_shell_command(iwgetbin)
|
||||
self.init_evt = asyncio.Event()
|
||||
|
||||
def _update_log_rollover(self, log: bool = False) -> None:
|
||||
sys_info_msg = "\nSystem Info:"
|
||||
for header, info in self.system_info.items():
|
||||
sys_info_msg += f"\n\n***{header}***"
|
||||
if not isinstance(info, dict):
|
||||
sys_info_msg += f"\n {repr(info)}"
|
||||
else:
|
||||
for key, val in info.items():
|
||||
sys_info_msg += f"\n {key}: {val}"
|
||||
self.server.add_log_rollover_item('system_info', sys_info_msg, log=log)
|
||||
|
||||
async def wait_for_init(self, timeout: float = None) -> None:
|
||||
try:
|
||||
await asyncio.wait_for(self.init_evt.wait(), timeout)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
|
||||
async def component_init(self):
|
||||
await self.sys_provider.initialize()
|
||||
if not self.inside_container:
|
||||
virt_info = await self.sys_provider.check_virt_status()
|
||||
self.system_info['virtualization'] = virt_info
|
||||
await self._parse_network_interfaces(0, notify=False)
|
||||
pstats: ProcStats = self.server.lookup_component('proc_stats')
|
||||
pstats.register_stat_callback(self._parse_network_interfaces)
|
||||
available_svcs = self.sys_provider.get_available_services()
|
||||
avail_list = list(available_svcs.keys())
|
||||
self.system_info['available_services'] = avail_list
|
||||
self.system_info['service_state'] = available_svcs
|
||||
self.init_evt.set()
|
||||
|
||||
async def _handle_machine_request(self, web_request: WebRequest) -> str:
|
||||
ep = web_request.get_endpoint()
|
||||
if self.inside_container:
|
||||
virt_id = self.system_info['virtualization'].get('virt_id', "none")
|
||||
raise self.server.error(
|
||||
f"Cannot {ep.split('/')[-1]} from within a "
|
||||
f"{virt_id} container")
|
||||
if ep == "/machine/shutdown":
|
||||
await self.sys_provider.shutdown()
|
||||
elif ep == "/machine/reboot":
|
||||
await self.sys_provider.reboot()
|
||||
else:
|
||||
raise self.server.error("Unsupported machine request")
|
||||
return "ok"
|
||||
|
||||
async def do_service_action(self,
|
||||
action: str,
|
||||
service_name: str
|
||||
) -> None:
|
||||
await self.sys_provider.do_service_action(action, service_name)
|
||||
|
||||
async def _handle_service_request(self, web_request: WebRequest) -> str:
|
||||
name: str = web_request.get('service')
|
||||
action = web_request.get_endpoint().split('/')[-1]
|
||||
if name == "moonraker":
|
||||
if action != "restart":
|
||||
raise self.server.error(
|
||||
f"Service action '{action}' not available for moonraker")
|
||||
event_loop = self.server.get_event_loop()
|
||||
event_loop.register_callback(self.do_service_action, action, name)
|
||||
elif self.sys_provider.is_service_available(name):
|
||||
await self.do_service_action(action, name)
|
||||
else:
|
||||
if name in ALLOWED_SERVICES:
|
||||
raise self.server.error(f"Service '{name}' not installed")
|
||||
raise self.server.error(
|
||||
f"Service '{name}' not allowed")
|
||||
return "ok"
|
||||
|
||||
async def _handle_sysinfo_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
# with open('../../../../../root/www/dev_info.txt', 'r') as f:
|
||||
dev_name = web_request.get_str('dev_name',default=None)
|
||||
if dev_name !=None:
|
||||
Note=open('dev_info.txt',mode='w')
|
||||
Note.write(dev_name)
|
||||
Note.close()
|
||||
# path=os.path.abspath('.')
|
||||
with open('dev_info.txt', 'r') as f:
|
||||
content = f.read()
|
||||
f.close()
|
||||
self.system_info["machine_name"] = content
|
||||
return {'system_info': self.system_info}
|
||||
|
||||
|
||||
|
||||
def get_system_info(self) -> Dict[str, Any]:
|
||||
return self.system_info
|
||||
|
||||
def _get_sdcard_info(self) -> Dict[str, Any]:
|
||||
sd_info: Dict[str, Any] = {}
|
||||
cid_file = pathlib.Path(SD_CID_PATH)
|
||||
if not cid_file.exists():
|
||||
# No SDCard detected at mmcblk0
|
||||
return {}
|
||||
try:
|
||||
cid_text = cid_file.read_text().strip().lower()
|
||||
mid = cid_text[:2]
|
||||
sd_info['manufacturer_id'] = mid
|
||||
sd_info['manufacturer'] = SD_MFGRS.get(mid, "Unknown")
|
||||
sd_info['oem_id'] = cid_text[2:6]
|
||||
sd_info['product_name'] = bytes.fromhex(cid_text[6:16]).decode(
|
||||
encoding="ascii", errors="ignore")
|
||||
sd_info['product_revision'] = \
|
||||
f"{int(cid_text[16], 16)}.{int(cid_text[17], 16)}"
|
||||
sd_info['serial_number'] = cid_text[18:26]
|
||||
mfg_year = int(cid_text[27:29], 16) + 2000
|
||||
mfg_month = int(cid_text[29], 16)
|
||||
sd_info['manufacturer_date'] = f"{mfg_month}/{mfg_year}"
|
||||
except Exception:
|
||||
logging.info("Error reading SDCard CID Register")
|
||||
return {}
|
||||
sd_info['capacity'] = "Unknown"
|
||||
sd_info['total_bytes'] = 0
|
||||
csd_file = pathlib.Path(SD_CSD_PATH)
|
||||
# Read CSD Register
|
||||
try:
|
||||
csd_reg = bytes.fromhex(csd_file.read_text().strip())
|
||||
csd_type = (csd_reg[0] >> 6) & 0x3
|
||||
if csd_type == 0:
|
||||
# Standard Capacity (CSD Version 1.0)
|
||||
max_block_len: int = 2**(csd_reg[5] & 0xF)
|
||||
c_size = ((csd_reg[6] & 0x3) << 10) | (csd_reg[7] << 2) | \
|
||||
((csd_reg[8] >> 6) & 0x3)
|
||||
c_mult_reg = ((csd_reg[9] & 0x3) << 1) | (csd_reg[10] >> 7)
|
||||
c_mult = 2**(c_mult_reg + 2)
|
||||
total_bytes: int = (c_size + 1) * c_mult * max_block_len
|
||||
sd_info['capacity'] = f"{(total_bytes / (1024.0**2)):.1f} MiB"
|
||||
elif csd_type == 1:
|
||||
# High Capacity (CSD Version 2.0)
|
||||
c_size = ((csd_reg[7] & 0x3F) << 16) | (csd_reg[8] << 8) | \
|
||||
csd_reg[9]
|
||||
total_bytes = (c_size + 1) * 512 * 1024
|
||||
sd_info['capacity'] = f"{(total_bytes / (1024.0**3)):.1f} GiB"
|
||||
elif csd_type == 2:
|
||||
# Ultra Capacity (CSD Version 3.0)
|
||||
c_size = ((csd_reg[6]) & 0xF) << 24 | (csd_reg[7] << 16) | \
|
||||
(csd_reg[8] << 8) | csd_reg[9]
|
||||
total_bytes = (c_size + 1) * 512 * 1024
|
||||
sd_info['capacity'] = f"{(total_bytes / (1024.0**4)):.1f} TiB"
|
||||
else:
|
||||
# Invalid CSD, skip capacity check
|
||||
return sd_info
|
||||
sd_info['total_bytes'] = total_bytes
|
||||
except Exception:
|
||||
logging.info("Error Reading SDCard CSD Register")
|
||||
return sd_info
|
||||
|
||||
def _get_cpu_info(self) -> Dict[str, Any]:
|
||||
cpu_file = pathlib.Path("/proc/cpuinfo")
|
||||
mem_file = pathlib.Path("/proc/meminfo")
|
||||
cpu_info = {
|
||||
'cpu_count': os.cpu_count(),
|
||||
'bits': platform.architecture()[0],
|
||||
'processor': platform.processor() or platform.machine(),
|
||||
'cpu_desc': "",
|
||||
'serial_number': "",
|
||||
'hardware_desc': "",
|
||||
'model': "",
|
||||
'total_memory': None,
|
||||
'memory_units': ""
|
||||
}
|
||||
if cpu_file.exists():
|
||||
try:
|
||||
cpu_text = cpu_file.read_text().strip()
|
||||
cpu_items = [item.strip() for item in cpu_text.split("\n\n")
|
||||
if item.strip()]
|
||||
for item in cpu_items:
|
||||
cpu_desc_match = re.search(r"model name\s+:\s+(.+)", item)
|
||||
if cpu_desc_match is not None:
|
||||
cpu_info['cpu_desc'] = cpu_desc_match.group(1).strip()
|
||||
break
|
||||
hw_match = re.search(r"Hardware\s+:\s+(.+)", cpu_items[-1])
|
||||
if hw_match is not None:
|
||||
cpu_info['hardware_desc'] = hw_match.group(1).strip()
|
||||
sn_match = re.search(r"Serial\s+:\s+0*(.+)", cpu_items[-1])
|
||||
if sn_match is not None:
|
||||
cpu_info['serial_number'] = sn_match.group(1).strip()
|
||||
model_match = re.search(r"Model\s+:\s+(.+)", cpu_items[-1])
|
||||
if model_match is not None:
|
||||
cpu_info['model'] = model_match.group(1).strip()
|
||||
except Exception:
|
||||
logging.info("Error Reading /proc/cpuinfo")
|
||||
if mem_file.exists():
|
||||
try:
|
||||
mem_text = mem_file.read_text().strip()
|
||||
for line in mem_text.split('\n'):
|
||||
line = line.strip()
|
||||
if line.startswith("MemTotal:"):
|
||||
parts = line.split()
|
||||
cpu_info['total_memory'] = int(parts[1])
|
||||
cpu_info['memory_units'] = parts[2]
|
||||
break
|
||||
except Exception:
|
||||
logging.info("Error Reading /proc/meminfo")
|
||||
return cpu_info
|
||||
|
||||
def _check_inside_container(self) -> Dict[str, Any]:
|
||||
cgroup_file = pathlib.Path(CGROUP_PATH)
|
||||
virt_type = virt_id = "none"
|
||||
if cgroup_file.exists():
|
||||
try:
|
||||
data = cgroup_file.read_text()
|
||||
container_types = ["docker", "lxc"]
|
||||
for ct in container_types:
|
||||
if ct in data:
|
||||
self.inside_container = True
|
||||
virt_type = "container"
|
||||
virt_id = ct
|
||||
break
|
||||
except Exception:
|
||||
logging.exception(f"Error reading {CGROUP_PATH}")
|
||||
|
||||
# Fall back to process schedule check
|
||||
if not self.inside_container:
|
||||
sched_file = pathlib.Path(SCHED_PATH)
|
||||
if sched_file.exists():
|
||||
try:
|
||||
data = sched_file.read_text().strip()
|
||||
proc_name = data.split('\n')[0].split()[0]
|
||||
if proc_name not in ["init", "systemd"]:
|
||||
self.inside_container = True
|
||||
virt_type = "container"
|
||||
virt_id = "lxc"
|
||||
if (
|
||||
os.path.exists("/.dockerenv") or
|
||||
os.path.exists("/.dockerinit")
|
||||
):
|
||||
virt_id = "docker"
|
||||
except Exception:
|
||||
logging.exception(f"Error reading {SCHED_PATH}")
|
||||
return {
|
||||
'virt_type': virt_type,
|
||||
'virt_identifier': virt_id
|
||||
}
|
||||
|
||||
async def _parse_network_interfaces(self,
|
||||
sequence: int,
|
||||
notify: bool = True
|
||||
) -> None:
|
||||
if sequence % NETWORK_UPDATE_SEQUENCE:
|
||||
return
|
||||
network: Dict[str, Any] = {}
|
||||
try:
|
||||
# get network interfaces
|
||||
resp = await self.addr_cmd.run_with_response(log_complete=False)
|
||||
decoded = json.loads(resp)
|
||||
for interface in decoded:
|
||||
if (
|
||||
interface['operstate'] != "UP" or
|
||||
interface['link_type'] != "ether" or
|
||||
'address' not in interface
|
||||
):
|
||||
continue
|
||||
addresses: List[Dict[str, Any]] = [
|
||||
{
|
||||
'family': IP_FAMILIES[addr['family']],
|
||||
'address': addr['local'],
|
||||
'is_link_local': addr.get('scope', "") == "link"
|
||||
}
|
||||
for addr in interface.get('addr_info', [])
|
||||
if 'family' in addr and 'local' in addr
|
||||
]
|
||||
if not addresses:
|
||||
continue
|
||||
network[interface['ifname']] = {
|
||||
'mac_address': interface['address'],
|
||||
'ip_addresses': addresses
|
||||
}
|
||||
except Exception:
|
||||
logging.exception("Error processing network update")
|
||||
return
|
||||
prev_network = self.system_info.get('network', {})
|
||||
if notify and network != prev_network:
|
||||
self.server.send_event("machine:net_state_changed", network)
|
||||
self.system_info['network'] = network
|
||||
|
||||
async def get_public_network(self) -> Dict[str, Any]:
|
||||
wifis = await self._get_wifi_interfaces()
|
||||
public_intf = self._find_public_interface()
|
||||
ifname = public_intf["ifname"]
|
||||
is_wifi = ifname in wifis
|
||||
public_intf["is_wifi"] = is_wifi
|
||||
if is_wifi:
|
||||
public_intf["ssid"] = wifis[ifname]
|
||||
# TODO: Can we detect the private top level domain? That
|
||||
# would be ideal
|
||||
public_intf["hostname"] = socket.gethostname()
|
||||
return public_intf
|
||||
|
||||
def _find_public_interface(self) -> Dict[str, Any]:
|
||||
src_ip = self._find_public_ip()
|
||||
networks = self.system_info.get("network", {})
|
||||
for ifname, ifinfo in networks.items():
|
||||
for addrinfo in ifinfo["ip_addresses"]:
|
||||
if addrinfo["is_link_local"]:
|
||||
continue
|
||||
fam = addrinfo["family"]
|
||||
addr = addrinfo["address"]
|
||||
if fam == "ipv6" and src_ip is None:
|
||||
ip = ipaddress.ip_address(addr)
|
||||
if ip.is_global:
|
||||
return {
|
||||
"ifname": ifname,
|
||||
"address": addr,
|
||||
"family": fam
|
||||
}
|
||||
elif src_ip == addr:
|
||||
return {
|
||||
"ifname": ifname,
|
||||
"address": addr,
|
||||
"family": fam
|
||||
}
|
||||
return {
|
||||
"ifname": "",
|
||||
"address": src_ip or "",
|
||||
"family": ""
|
||||
}
|
||||
|
||||
def _find_public_ip(self) -> Optional[str]:
|
||||
# Check for an IPv4 Source IP
|
||||
# NOTE: It should also be possible to extract this from
|
||||
# the routing table, ie: ip -json route
|
||||
# It would be an entry with a "gateway" with the lowest
|
||||
# metric. Might also be able to get IPv6 info from this.
|
||||
# However, it would be better to use NETLINK for this rather
|
||||
# than run another shell command
|
||||
src_ip: Optional[str] = None
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
try:
|
||||
s.settimeout(0)
|
||||
s.connect(('10.255.255.255', 1))
|
||||
src_ip = s.getsockname()[0]
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
s.close()
|
||||
return src_ip
|
||||
|
||||
async def _get_wifi_interfaces(self) -> Dict[str, Any]:
|
||||
# get wifi interfaces
|
||||
shell_cmd: SCMDComp = self.server.lookup_component('shell_command')
|
||||
wifi_intfs: Dict[str, Any] = {}
|
||||
try:
|
||||
resp = await self.iwgetid_cmd.run_with_response(log_complete=False)
|
||||
except shell_cmd.error:
|
||||
logging.exception("Failed to run 'iwgetid' command")
|
||||
return {}
|
||||
if resp:
|
||||
for line in resp.split("\n"):
|
||||
parts = line.strip().split(maxsplit=1)
|
||||
wifi_intfs[parts[0]] = parts[1].split(":")[-1].strip('"')
|
||||
return wifi_intfs
|
||||
|
||||
class BaseProvider:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.available_services: Dict[str, Dict[str, str]] = {}
|
||||
self.shell_cmd: SCMDComp = self.server.load_component(
|
||||
config, 'shell_command')
|
||||
|
||||
async def initialize(self) -> None:
|
||||
pass
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
await self.shell_cmd.exec_cmd(f"sudo shutdown now")
|
||||
|
||||
async def reboot(self) -> None:
|
||||
await self.shell_cmd.exec_cmd(f"sudo shutdown -r now")
|
||||
|
||||
async def do_service_action(self,
|
||||
action: str,
|
||||
service_name: str
|
||||
) -> None:
|
||||
raise self.server.error("Serice Actions Not Available", 503)
|
||||
|
||||
async def check_virt_status(self) -> Dict[str, Any]:
|
||||
return {
|
||||
'virt_type': "unknown",
|
||||
'virt_identifier': "unknown"
|
||||
}
|
||||
|
||||
def is_service_available(self, service: str) -> bool:
|
||||
return service in self.available_services
|
||||
|
||||
def get_available_services(self) -> Dict[str, Dict[str, str]]:
|
||||
return self.available_services
|
||||
|
||||
class SystemdCliProvider(BaseProvider):
|
||||
async def initialize(self) -> None:
|
||||
await self._detect_active_services()
|
||||
if self.available_services:
|
||||
svcs = list(self.available_services.keys())
|
||||
self.svc_cmd = self.shell_cmd.build_shell_command(
|
||||
"systemctl show -p ActiveState,SubState --value "
|
||||
f"{' '.join(svcs)}")
|
||||
await self._update_service_status(0, notify=True)
|
||||
pstats: ProcStats = self.server.lookup_component('proc_stats')
|
||||
pstats.register_stat_callback(self._update_service_status)
|
||||
|
||||
async def do_service_action(self,
|
||||
action: str,
|
||||
service_name: str
|
||||
) -> None:
|
||||
await self.shell_cmd.exec_cmd(
|
||||
f'sudo systemctl {action} {service_name}')
|
||||
|
||||
async def check_virt_status(self) -> Dict[str, Any]:
|
||||
# Fallback virtualization check
|
||||
virt_id = virt_type = "none"
|
||||
|
||||
# Check for any form of virtualization. This will report the innermost
|
||||
# virtualization type in the event that nested virtualization is used
|
||||
try:
|
||||
resp: str = await self.shell_cmd.exec_cmd("systemd-detect-virt")
|
||||
except self.shell_cmd.error:
|
||||
pass
|
||||
else:
|
||||
virt_id = resp.strip()
|
||||
|
||||
if virt_id != "none":
|
||||
# Check explicitly for container virtualization
|
||||
try:
|
||||
resp = await self.shell_cmd.exec_cmd(
|
||||
"systemd-detect-virt --container")
|
||||
except self.shell_cmd.error:
|
||||
virt_type = "vm"
|
||||
else:
|
||||
if virt_id == resp.strip():
|
||||
virt_type = "container"
|
||||
else:
|
||||
# Moonraker is run from within a VM inside a container
|
||||
virt_type = "vm"
|
||||
logging.info(
|
||||
f"Virtualized Environment Detected, Type: {virt_type} "
|
||||
f"id: {virt_id}")
|
||||
else:
|
||||
logging.info("No Virtualization Detected")
|
||||
return {
|
||||
'virt_type': virt_type,
|
||||
'virt_identifier': virt_id
|
||||
}
|
||||
|
||||
async def _detect_active_services(self):
|
||||
try:
|
||||
resp: str = await self.shell_cmd.exec_cmd(
|
||||
"systemctl list-units --all --type=service --plain"
|
||||
" --no-legend")
|
||||
lines = resp.split('\n')
|
||||
services = [line.split()[0].strip() for line in lines
|
||||
if ".service" in line.strip()]
|
||||
except Exception:
|
||||
services = []
|
||||
for svc in services:
|
||||
sname = svc.rsplit('.', 1)[0]
|
||||
for allowed in ALLOWED_SERVICES:
|
||||
if sname.startswith(allowed):
|
||||
self.available_services[sname] = {
|
||||
'active_state': "unknown",
|
||||
'sub_state': "unknown"
|
||||
}
|
||||
|
||||
async def _update_service_status(self,
|
||||
sequence: int,
|
||||
notify: bool = True
|
||||
) -> None:
|
||||
if sequence % 2:
|
||||
# Update every other sequence
|
||||
return
|
||||
svcs = list(self.available_services.keys())
|
||||
try:
|
||||
resp = await self.svc_cmd.run_with_response(log_complete=False)
|
||||
for svc, state in zip(svcs, resp.strip().split('\n\n')):
|
||||
active_state, sub_state = state.split('\n', 1)
|
||||
new_state: Dict[str, str] = {
|
||||
'active_state': active_state,
|
||||
'sub_state': sub_state
|
||||
}
|
||||
if self.available_services[svc] != new_state:
|
||||
self.available_services[svc] = new_state
|
||||
if notify:
|
||||
self.server.send_event(
|
||||
"machine:service_state_changed",
|
||||
{svc: new_state})
|
||||
except Exception:
|
||||
logging.exception("Error processing service state update")
|
||||
|
||||
class SystemdDbusProvider(BaseProvider):
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
super().__init__(config)
|
||||
self.dbus_mgr: DbusManager = self.server.lookup_component(
|
||||
"dbus_manager")
|
||||
self.login_mgr: Optional[ProxyInterface] = None
|
||||
self.props: List[Tuple[ProxyInterface, Callable]] = []
|
||||
|
||||
async def initialize(self) -> None:
|
||||
if not self.dbus_mgr.is_connected():
|
||||
self.server.add_warning(
|
||||
"[machine]: DBus Connection Not available, systemd "
|
||||
" service tracking and actions are disabled")
|
||||
return
|
||||
# Get the systemd manager interface
|
||||
self.systemd_mgr = await self.dbus_mgr.get_interface(
|
||||
"org.freedesktop.systemd1",
|
||||
"/org/freedesktop/systemd1",
|
||||
"org.freedesktop.systemd1.Manager"
|
||||
)
|
||||
# Check for systemd PolicyKit Permissions
|
||||
await self.dbus_mgr.check_permission(
|
||||
"org.freedesktop.systemd1.manage-units",
|
||||
"System Service Management (start, stop, restart) "
|
||||
"will be disabled")
|
||||
await self.dbus_mgr.check_permission(
|
||||
"org.freedesktop.login1.power-off",
|
||||
"The shutdown API will be disabled"
|
||||
)
|
||||
await self.dbus_mgr.check_permission(
|
||||
"org.freedesktop.login1.power-off-multiple-sessions",
|
||||
"The shutdown API will be disabled if multiple user "
|
||||
"sessions are open."
|
||||
)
|
||||
try:
|
||||
# Get the login manaager interface
|
||||
self.login_mgr = await self.dbus_mgr.get_interface(
|
||||
"org.freedesktop.login1",
|
||||
"/org/freedesktop/login1",
|
||||
"org.freedesktop.login1.Manager"
|
||||
)
|
||||
except self.dbus_mgr.DbusError as e:
|
||||
logging.info(
|
||||
"Unable to acquire the systemd-logind D-Bus interface, "
|
||||
f"falling back to CLI Reboot and Shutdown APIs. {e}")
|
||||
self.login_mgr = None
|
||||
else:
|
||||
# Check for logind permissions
|
||||
await self.dbus_mgr.check_permission(
|
||||
"org.freedesktop.login1.reboot",
|
||||
"The reboot API will be disabled"
|
||||
)
|
||||
await self.dbus_mgr.check_permission(
|
||||
"org.freedesktop.login1.reboot-multiple-sessions",
|
||||
"The reboot API will be disabled if multiple user "
|
||||
"sessions are open."
|
||||
)
|
||||
await self._detect_active_services()
|
||||
|
||||
async def reboot(self) -> None:
|
||||
if self.login_mgr is None:
|
||||
await super().reboot()
|
||||
await self.login_mgr.call_reboot(False) # type: ignore
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
if self.login_mgr is None:
|
||||
await super().shutdown()
|
||||
await self.login_mgr.call_power_off(False) # type: ignore
|
||||
|
||||
async def do_service_action(self,
|
||||
action: str,
|
||||
service_name: str
|
||||
) -> None:
|
||||
if not self.dbus_mgr.is_connected():
|
||||
raise self.server.error("DBus Not Connected, ", 503)
|
||||
mgr = self.systemd_mgr
|
||||
if not service_name.endswith(".service"):
|
||||
service_name += ".service"
|
||||
if action == "start":
|
||||
await mgr.call_start_unit(service_name, "replace") # type: ignore
|
||||
elif action == "stop":
|
||||
await mgr.call_stop_unit(service_name, "replace") # type: ignore
|
||||
elif action == "restart":
|
||||
await mgr.call_restart_unit( # type: ignore
|
||||
service_name, "replace")
|
||||
else:
|
||||
raise self.server.error(f"Invalid service action: {action}")
|
||||
|
||||
async def check_virt_status(self) -> Dict[str, Any]:
|
||||
if not self.dbus_mgr.is_connected():
|
||||
return {
|
||||
'virt_type': "unknown",
|
||||
'virt_identifier': "unknown"
|
||||
}
|
||||
mgr = self.systemd_mgr
|
||||
virt_id = virt_type = "none"
|
||||
virt: str = await mgr.get_virtualization() # type: ignore
|
||||
virt = virt.strip()
|
||||
if virt:
|
||||
virt_id = virt
|
||||
container_types = [
|
||||
"openvz", "lxc", "lxc-libvirt", "systemd-nspawn",
|
||||
"docker", "podman", "rkt", "wsl", "proot", "pouch"]
|
||||
if virt_id in container_types:
|
||||
virt_type = "container"
|
||||
else:
|
||||
virt_type = "vm"
|
||||
logging.info(
|
||||
f"Virtualized Environment Detected, Type: {virt_type} "
|
||||
f"id: {virt_id}")
|
||||
else:
|
||||
logging.info("No Virtualization Detected")
|
||||
return {
|
||||
'virt_type': virt_type,
|
||||
'virt_identifier': virt_id
|
||||
}
|
||||
|
||||
async def _detect_active_services(self) -> None:
|
||||
# Get loaded service
|
||||
mgr = self.systemd_mgr
|
||||
patterns = [f"{svc}*.service" for svc in ALLOWED_SERVICES]
|
||||
units = await mgr.call_list_units_by_patterns( # type: ignore
|
||||
["loaded"], patterns)
|
||||
for unit in units:
|
||||
name: str = unit[0].split('.')[0]
|
||||
state: str = unit[3]
|
||||
substate: str = unit[4]
|
||||
dbus_path: str = unit[6]
|
||||
if name in self.available_services:
|
||||
continue
|
||||
self.available_services[name] = {
|
||||
'active_state': state,
|
||||
'sub_state': substate
|
||||
}
|
||||
# setup state monitoring
|
||||
props = await self.dbus_mgr.get_interface(
|
||||
"org.freedesktop.systemd1", dbus_path,
|
||||
"org.freedesktop.DBus.Properties"
|
||||
)
|
||||
prop_callback = self._create_properties_callback(name)
|
||||
self.props.append((props, prop_callback))
|
||||
props.on_properties_changed( # type: ignore
|
||||
prop_callback)
|
||||
|
||||
def _create_properties_callback(self, name) -> Callable:
|
||||
def prop_wrapper(dbus_obj: str,
|
||||
changed_props: Dict[str, Variant],
|
||||
invalid_props: Dict[str, Variant]
|
||||
) -> None:
|
||||
if dbus_obj != 'org.freedesktop.systemd1.Unit':
|
||||
return
|
||||
self._on_service_update(name, changed_props)
|
||||
return prop_wrapper
|
||||
|
||||
def _on_service_update(self,
|
||||
service_name: str,
|
||||
changed_props: Dict[str, Variant]
|
||||
) -> None:
|
||||
if service_name not in self.available_services:
|
||||
return
|
||||
svc = self.available_services[service_name]
|
||||
notify = False
|
||||
if "ActiveState" in changed_props:
|
||||
state: str = changed_props['ActiveState'].value
|
||||
if state != svc['active_state']:
|
||||
notify = True
|
||||
svc['active_state'] = state
|
||||
if "SubState" in changed_props:
|
||||
state = changed_props['SubState'].value
|
||||
if state != svc['sub_state']:
|
||||
notify = True
|
||||
svc['sub_state'] = state
|
||||
if notify:
|
||||
self.server.send_event("machine:service_state_changed",
|
||||
{service_name: dict(svc)})
|
||||
|
||||
|
||||
def load_component(config: ConfigHelper) -> Machine:
|
||||
return Machine(config)
|
||||
757
moonraker/components/mqtt.py
Normal file
757
moonraker/components/mqtt.py
Normal file
@@ -0,0 +1,757 @@
|
||||
# MQTT client implementation for Moonraker
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import socket
|
||||
import asyncio
|
||||
import logging
|
||||
import json
|
||||
import pathlib
|
||||
import ssl
|
||||
from collections import deque
|
||||
import paho.mqtt.client as paho_mqtt
|
||||
from websockets import Subscribable, WebRequest, JsonRPC, APITransport
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
List,
|
||||
Optional,
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Coroutine,
|
||||
Dict,
|
||||
Union,
|
||||
Tuple,
|
||||
Awaitable,
|
||||
Deque,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from app import APIDefinition
|
||||
from confighelper import ConfigHelper
|
||||
from klippy_connection import KlippyConnection as Klippy
|
||||
FlexCallback = Callable[[bytes], Optional[Coroutine]]
|
||||
RPCCallback = Callable[..., Coroutine]
|
||||
|
||||
DUP_API_REQ_CODE = -10000
|
||||
MQTT_PROTOCOLS = {
|
||||
'v3.1': paho_mqtt.MQTTv31,
|
||||
'v3.1.1': paho_mqtt.MQTTv311,
|
||||
'v5': paho_mqtt.MQTTv5
|
||||
}
|
||||
|
||||
class ExtPahoClient(paho_mqtt.Client):
|
||||
# Override reconnection to take a connected socket. This allows Moonraker
|
||||
# create the socket connection asynchronously
|
||||
def reconnect(self, sock: Optional[socket.socket] = None):
|
||||
"""Reconnect the client after a disconnect. Can only be called after
|
||||
connect()/connect_async()."""
|
||||
if len(self._host) == 0:
|
||||
raise ValueError('Invalid host.')
|
||||
if self._port <= 0:
|
||||
raise ValueError('Invalid port number.')
|
||||
|
||||
self._in_packet = {
|
||||
"command": 0,
|
||||
"have_remaining": 0,
|
||||
"remaining_count": [],
|
||||
"remaining_mult": 1,
|
||||
"remaining_length": 0,
|
||||
"packet": b"",
|
||||
"to_process": 0,
|
||||
"pos": 0}
|
||||
|
||||
with self._out_packet_mutex:
|
||||
self._out_packet = deque() # type: ignore
|
||||
|
||||
with self._current_out_packet_mutex:
|
||||
self._current_out_packet = None
|
||||
|
||||
with self._msgtime_mutex:
|
||||
self._last_msg_in = paho_mqtt.time_func()
|
||||
self._last_msg_out = paho_mqtt.time_func()
|
||||
|
||||
self._ping_t = 0
|
||||
self._state = paho_mqtt.mqtt_cs_new
|
||||
|
||||
self._sock_close()
|
||||
|
||||
# Put messages in progress in a valid state.
|
||||
self._messages_reconnect_reset()
|
||||
|
||||
if sock is None:
|
||||
sock = self._create_socket_connection()
|
||||
|
||||
if self._ssl:
|
||||
# SSL is only supported when SSLContext is available
|
||||
# (implies Python >= 2.7.9 or >= 3.2)
|
||||
|
||||
verify_host = not self._tls_insecure
|
||||
try:
|
||||
# Try with server_hostname, even it's not supported in
|
||||
# certain scenarios
|
||||
sock = self._ssl_context.wrap_socket(
|
||||
sock,
|
||||
server_hostname=self._host,
|
||||
do_handshake_on_connect=False,
|
||||
)
|
||||
except ssl.CertificateError:
|
||||
# CertificateError is derived from ValueError
|
||||
raise
|
||||
except ValueError:
|
||||
# Python version requires SNI in order to handle
|
||||
# server_hostname, but SNI is not available
|
||||
sock = self._ssl_context.wrap_socket(
|
||||
sock,
|
||||
do_handshake_on_connect=False,
|
||||
)
|
||||
else:
|
||||
# If SSL context has already checked hostname, then don't need
|
||||
# to do it again
|
||||
if (hasattr(self._ssl_context, 'check_hostname') and
|
||||
self._ssl_context.check_hostname):
|
||||
verify_host = False
|
||||
|
||||
assert isinstance(sock, ssl.SSLSocket)
|
||||
sock.settimeout(self._keepalive)
|
||||
sock.do_handshake()
|
||||
|
||||
if verify_host:
|
||||
ssl.match_hostname(sock.getpeercert(), self._host)
|
||||
|
||||
if self._transport == "websockets":
|
||||
sock.settimeout(self._keepalive)
|
||||
sock = paho_mqtt.WebsocketWrapper(
|
||||
sock, self._host, self._port, self._ssl,
|
||||
self._websocket_path, self._websocket_extra_headers
|
||||
)
|
||||
|
||||
self._sock = sock
|
||||
assert self._sock is not None
|
||||
self._sock.setblocking(False)
|
||||
self._registered_write = False
|
||||
self._call_socket_open()
|
||||
|
||||
return self._send_connect(self._keepalive)
|
||||
|
||||
class SubscriptionHandle:
|
||||
def __init__(self, topic: str, callback: FlexCallback) -> None:
|
||||
self.callback = callback
|
||||
self.topic = topic
|
||||
|
||||
class BrokerAckLogger:
|
||||
def __init__(self, topics: List[str], action: str) -> None:
|
||||
self.topics = topics
|
||||
self.action = action
|
||||
|
||||
def __call__(self, fut: asyncio.Future) -> None:
|
||||
if self.action == "subscribe":
|
||||
res: Union[List[int], List[paho_mqtt.ReasonCodes]]
|
||||
res = fut.result()
|
||||
log_msg = "MQTT Subscriptions Acknowledged"
|
||||
if len(res) != len(self.topics):
|
||||
log_msg += "\nTopic / QOS count mismatch, " \
|
||||
f"\nTopics: {self.topics} " \
|
||||
f"\nQoS responses: {res}"
|
||||
else:
|
||||
for topic, qos in zip(self.topics, res):
|
||||
log_msg += f"\n Topic: {topic} | "
|
||||
if isinstance(qos, paho_mqtt.ReasonCodes):
|
||||
log_msg += qos.getName()
|
||||
else:
|
||||
log_msg += f"Granted QoS {qos}"
|
||||
elif self.action == "unsubscribe":
|
||||
log_msg = "MQTT Unsubscribe Acknowledged"
|
||||
for topic in self.topics:
|
||||
log_msg += f"\n Topic: {topic}"
|
||||
else:
|
||||
log_msg = f"Unknown action: {self.action}"
|
||||
logging.debug(log_msg)
|
||||
|
||||
|
||||
SubscribedDict = Dict[str, Tuple[int, List[SubscriptionHandle]]]
|
||||
|
||||
class AIOHelper:
|
||||
def __init__(self, client: paho_mqtt.Client) -> None:
|
||||
self.loop = asyncio.get_running_loop()
|
||||
self.client = client
|
||||
self.client.on_socket_open = self._on_socket_open
|
||||
self.client.on_socket_close = self._on_socket_close
|
||||
self.client._on_socket_register_write = self._on_socket_register_write
|
||||
self.client._on_socket_unregister_write = \
|
||||
self._on_socket_unregister_write
|
||||
self.misc_task: Optional[asyncio.Task] = None
|
||||
|
||||
def _on_socket_open(self,
|
||||
client: paho_mqtt.Client,
|
||||
userdata: Any,
|
||||
sock: socket.socket
|
||||
) -> None:
|
||||
logging.info("MQTT Socket Opened")
|
||||
self.loop.add_reader(sock, client.loop_read)
|
||||
self.misc_task = self.loop.create_task(self.misc_loop())
|
||||
|
||||
def _on_socket_close(self,
|
||||
client: paho_mqtt.Client,
|
||||
userdata: Any,
|
||||
sock: socket.socket
|
||||
) -> None:
|
||||
logging.info("MQTT Socket Closed")
|
||||
self.loop.remove_reader(sock)
|
||||
if self.misc_task is not None:
|
||||
self.misc_task.cancel()
|
||||
|
||||
def _on_socket_register_write(self,
|
||||
client: paho_mqtt.Client,
|
||||
userdata: Any,
|
||||
sock: socket.socket
|
||||
) -> None:
|
||||
self.loop.add_writer(sock, client.loop_write)
|
||||
|
||||
def _on_socket_unregister_write(self,
|
||||
client: paho_mqtt.Client,
|
||||
userdata: Any,
|
||||
sock: socket.socket
|
||||
) -> None:
|
||||
self.loop.remove_writer(sock)
|
||||
|
||||
async def misc_loop(self) -> None:
|
||||
while self.client.loop_misc() == paho_mqtt.MQTT_ERR_SUCCESS:
|
||||
try:
|
||||
await asyncio.sleep(1)
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
logging.info("MQTT Misc Loop Complete")
|
||||
|
||||
|
||||
class MQTTClient(APITransport, Subscribable):
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.event_loop = self.server.get_event_loop()
|
||||
self.klippy: Klippy = self.server.lookup_component("klippy_connection")
|
||||
self.address: str = config.get('address')
|
||||
self.port: int = config.getint('port', 1883)
|
||||
user = config.gettemplate('username', None)
|
||||
self.user_name: Optional[str] = None
|
||||
if user:
|
||||
self.user_name = user.render()
|
||||
pw_file_path = config.get('password_file', None, deprecate=True)
|
||||
pw_template = config.gettemplate('password', None)
|
||||
self.password: Optional[str] = None
|
||||
if pw_file_path is not None:
|
||||
pw_file = pathlib.Path(pw_file_path).expanduser().absolute()
|
||||
if not pw_file.exists():
|
||||
raise config.error(
|
||||
f"Password file '{pw_file}' does not exist")
|
||||
self.password = pw_file.read_text().strip()
|
||||
if pw_template is not None:
|
||||
self.password = pw_template.render()
|
||||
protocol = config.get('mqtt_protocol', "v3.1.1")
|
||||
self.protocol = MQTT_PROTOCOLS.get(protocol, None)
|
||||
if self.protocol is None:
|
||||
raise config.error(
|
||||
f"Invalid value '{protocol}' for option 'mqtt_protocol' "
|
||||
"in section [mqtt]. Must be one of "
|
||||
f"{MQTT_PROTOCOLS.values()}")
|
||||
self.instance_name = config.get('instance_name', socket.gethostname())
|
||||
if '+' in self.instance_name or '#' in self.instance_name:
|
||||
raise config.error(
|
||||
"Option 'instance_name' in section [mqtt] cannot "
|
||||
"contain a wildcard.")
|
||||
self.qos = config.getint("default_qos", 0)
|
||||
if self.qos > 2 or self.qos < 0:
|
||||
raise config.error(
|
||||
"Option 'default_qos' in section [mqtt] must be "
|
||||
"between 0 and 2")
|
||||
self.client = ExtPahoClient(protocol=self.protocol)
|
||||
self.client.on_connect = self._on_connect
|
||||
self.client.on_message = self._on_message
|
||||
self.client.on_disconnect = self._on_disconnect
|
||||
self.client.on_publish = self._on_publish
|
||||
self.client.on_subscribe = self._on_subscribe
|
||||
self.client.on_unsubscribe = self._on_unsubscribe
|
||||
self.connect_evt: asyncio.Event = asyncio.Event()
|
||||
self.disconnect_evt: Optional[asyncio.Event] = None
|
||||
self.connect_task: Optional[asyncio.Task] = None
|
||||
self.subscribed_topics: SubscribedDict = {}
|
||||
self.pending_responses: List[asyncio.Future] = []
|
||||
self.pending_acks: Dict[int, asyncio.Future] = {}
|
||||
|
||||
self.server.register_endpoint(
|
||||
"/server/mqtt/publish", ["POST"],
|
||||
self._handle_publish_request,
|
||||
transports=["http", "websocket", "internal"])
|
||||
self.server.register_endpoint(
|
||||
"/server/mqtt/subscribe", ["POST"],
|
||||
self._handle_subscription_request,
|
||||
transports=["http", "websocket", "internal"])
|
||||
|
||||
# Subscribe to API requests
|
||||
self.json_rpc = JsonRPC(transport="MQTT")
|
||||
self.api_request_topic = f"{self.instance_name}/moonraker/api/request"
|
||||
self.api_resp_topic = f"{self.instance_name}/moonraker/api/response"
|
||||
self.klipper_status_topic = f"{self.instance_name}/klipper/status"
|
||||
self.moonraker_status_topic = f"{self.instance_name}/moonraker/status"
|
||||
status_cfg: Dict[str, Any] = config.getdict("status_objects", {},
|
||||
allow_empty_fields=True)
|
||||
self.status_objs: Dict[str, Any] = {}
|
||||
for key, val in status_cfg.items():
|
||||
if val is not None:
|
||||
self.status_objs[key] = [v.strip() for v in val.split(',')
|
||||
if v.strip()]
|
||||
else:
|
||||
self.status_objs[key] = None
|
||||
if status_cfg:
|
||||
logging.debug(f"MQTT: Status Objects Set: {self.status_objs}")
|
||||
self.server.register_event_handler("server:klippy_identified",
|
||||
self._handle_klippy_identified)
|
||||
|
||||
self.timestamp_deque: Deque = deque(maxlen=20)
|
||||
self.api_qos = config.getint('api_qos', self.qos)
|
||||
if config.getboolean("enable_moonraker_api", True):
|
||||
api_cache = self.server.register_api_transport("mqtt", self)
|
||||
for api_def in api_cache.values():
|
||||
if "mqtt" in api_def.supported_transports:
|
||||
self.register_api_handler(api_def)
|
||||
self.subscribe_topic(self.api_request_topic,
|
||||
self._process_api_request,
|
||||
self.api_qos)
|
||||
|
||||
self.server.register_remote_method("publish_mqtt_topic",
|
||||
self._publish_from_klipper)
|
||||
logging.info(
|
||||
f"\nReserved MQTT topics:\n"
|
||||
f"API Request: {self.api_request_topic}\n"
|
||||
f"API Response: {self.api_resp_topic}\n"
|
||||
f"Moonraker Status: {self.moonraker_status_topic}\n"
|
||||
f"Klipper Status: {self.klipper_status_topic}")
|
||||
|
||||
async def component_init(self) -> None:
|
||||
# We must wait for the IOLoop (asyncio event loop) to start
|
||||
# prior to retrieving it
|
||||
self.helper = AIOHelper(self.client)
|
||||
if self.user_name is not None:
|
||||
self.client.username_pw_set(self.user_name, self.password)
|
||||
self.client.will_set(self.moonraker_status_topic,
|
||||
payload=json.dumps({'server': 'offline'}),
|
||||
qos=self.qos, retain=True)
|
||||
self.client.connect_async(self.address, self.port)
|
||||
self.connect_task = self.event_loop.create_task(
|
||||
self._do_reconnect(first=True)
|
||||
)
|
||||
|
||||
async def _handle_klippy_identified(self) -> None:
|
||||
if self.status_objs:
|
||||
args = {'objects': self.status_objs}
|
||||
try:
|
||||
await self.klippy.request(
|
||||
WebRequest("objects/subscribe", args, conn=self))
|
||||
except self.server.error:
|
||||
pass
|
||||
|
||||
def _on_message(self,
|
||||
client: str,
|
||||
user_data: Any,
|
||||
message: paho_mqtt.MQTTMessage
|
||||
) -> None:
|
||||
topic = message.topic
|
||||
if topic in self.subscribed_topics:
|
||||
cb_hdls = self.subscribed_topics[topic][1]
|
||||
for hdl in cb_hdls:
|
||||
self.event_loop.register_callback(
|
||||
hdl.callback, message.payload)
|
||||
else:
|
||||
logging.debug(
|
||||
f"Unregistered MQTT Topic Received: {topic}, "
|
||||
f"payload: {message.payload.decode()}")
|
||||
|
||||
def _on_connect(self,
|
||||
client: paho_mqtt.Client,
|
||||
user_data: Any,
|
||||
flags: Dict[str, Any],
|
||||
reason_code: Union[int, paho_mqtt.ReasonCodes],
|
||||
properties: Optional[paho_mqtt.Properties] = None
|
||||
) -> None:
|
||||
logging.info("MQTT Client Connected")
|
||||
if reason_code == 0:
|
||||
self.publish_topic(self.moonraker_status_topic,
|
||||
{'server': 'online'}, retain=True)
|
||||
subs = [(k, v[0]) for k, v in self.subscribed_topics.items()]
|
||||
if subs:
|
||||
res, msg_id = client.subscribe(subs)
|
||||
if msg_id is not None:
|
||||
sub_fut: asyncio.Future = asyncio.Future()
|
||||
topics = list(self.subscribed_topics.keys())
|
||||
sub_fut.add_done_callback(
|
||||
BrokerAckLogger(topics, "subscribe"))
|
||||
self.pending_acks[msg_id] = sub_fut
|
||||
self.connect_evt.set()
|
||||
self.server.send_event("mqtt:connected")
|
||||
else:
|
||||
if isinstance(reason_code, int):
|
||||
err_str = paho_mqtt.connack_string(reason_code)
|
||||
else:
|
||||
err_str = reason_code.getName()
|
||||
self.server.set_failed_component("mqtt")
|
||||
self.server.add_warning(f"MQTT Connection Failed: {err_str}")
|
||||
|
||||
def _on_disconnect(self,
|
||||
client: paho_mqtt.Client,
|
||||
user_data: Any,
|
||||
reason_code: int,
|
||||
properties: Optional[paho_mqtt.Properties] = None
|
||||
) -> None:
|
||||
if self.disconnect_evt is not None:
|
||||
self.disconnect_evt.set()
|
||||
elif self.is_connected():
|
||||
# The server connection was dropped, attempt to reconnect
|
||||
logging.info("MQTT Server Disconnected, reason: "
|
||||
f"{paho_mqtt.error_string(reason_code)}")
|
||||
if self.connect_task is None:
|
||||
self.connect_task = asyncio.create_task(self._do_reconnect())
|
||||
self.server.send_event("mqtt:disconnected")
|
||||
self.connect_evt.clear()
|
||||
|
||||
def _on_publish(self,
|
||||
client: paho_mqtt.Client,
|
||||
user_data: Any,
|
||||
msg_id: int
|
||||
) -> None:
|
||||
pub_fut = self.pending_acks.pop(msg_id, None)
|
||||
if pub_fut is not None and not pub_fut.done():
|
||||
pub_fut.set_result(None)
|
||||
|
||||
def _on_subscribe(self,
|
||||
client: paho_mqtt.Client,
|
||||
user_data: Any,
|
||||
msg_id: int,
|
||||
flex: Union[List[int], List[paho_mqtt.ReasonCodes]],
|
||||
properties: Optional[paho_mqtt.Properties] = None
|
||||
) -> None:
|
||||
sub_fut = self.pending_acks.pop(msg_id, None)
|
||||
if sub_fut is not None and not sub_fut.done():
|
||||
sub_fut.set_result(flex)
|
||||
|
||||
def _on_unsubscribe(self,
|
||||
client: paho_mqtt.Client,
|
||||
user_data: Any,
|
||||
msg_id: int,
|
||||
properties: Optional[paho_mqtt.Properties] = None,
|
||||
reasoncodes: Optional[paho_mqtt.ReasonCodes] = None
|
||||
) -> None:
|
||||
unsub_fut = self.pending_acks.pop(msg_id, None)
|
||||
if unsub_fut is not None and not unsub_fut.done():
|
||||
unsub_fut.set_result(None)
|
||||
|
||||
async def _do_reconnect(self, first: bool = False) -> None:
|
||||
logging.info("Attempting MQTT Connect/Reconnect")
|
||||
last_err: Exception = Exception()
|
||||
while True:
|
||||
if not first:
|
||||
try:
|
||||
await asyncio.sleep(2.)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
first = False
|
||||
try:
|
||||
sock = await self.event_loop.create_socket_connection(
|
||||
(self.address, self.port), timeout=10
|
||||
)
|
||||
self.client.reconnect(sock)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
if type(last_err) != type(e) or last_err.args != e.args:
|
||||
logging.exception("MQTT Connection Error")
|
||||
last_err = e
|
||||
continue
|
||||
self.client.socket().setsockopt(
|
||||
socket.SOL_SOCKET, socket.SO_SNDBUF, 2048)
|
||||
break
|
||||
self.connect_task = None
|
||||
|
||||
async def wait_connection(self, timeout: Optional[float] = None) -> bool:
|
||||
try:
|
||||
await asyncio.wait_for(self.connect_evt.wait(), timeout)
|
||||
except asyncio.TimeoutError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def is_connected(self) -> bool:
|
||||
return self.connect_evt.is_set()
|
||||
|
||||
def subscribe_topic(self,
|
||||
topic: str,
|
||||
callback: FlexCallback,
|
||||
qos: Optional[int] = None
|
||||
) -> SubscriptionHandle:
|
||||
if '#' in topic or '+' in topic:
|
||||
raise self.server.error("Wildcards may not be used")
|
||||
qos = qos or self.qos
|
||||
if qos > 2 or qos < 0:
|
||||
raise self.server.error("QOS must be between 0 and 2")
|
||||
hdl = SubscriptionHandle(topic, callback)
|
||||
sub_handles = [hdl]
|
||||
need_sub = True
|
||||
if topic in self.subscribed_topics:
|
||||
prev_qos, sub_handles = self.subscribed_topics[topic]
|
||||
qos = max(qos, prev_qos)
|
||||
sub_handles.append(hdl)
|
||||
need_sub = qos != prev_qos
|
||||
self.subscribed_topics[topic] = (qos, sub_handles)
|
||||
if self.is_connected() and need_sub:
|
||||
res, msg_id = self.client.subscribe(topic, qos)
|
||||
if msg_id is not None:
|
||||
sub_fut: asyncio.Future = asyncio.Future()
|
||||
sub_fut.add_done_callback(
|
||||
BrokerAckLogger([topic], "subscribe"))
|
||||
self.pending_acks[msg_id] = sub_fut
|
||||
return hdl
|
||||
|
||||
def unsubscribe(self, hdl: SubscriptionHandle) -> None:
|
||||
topic = hdl.topic
|
||||
if topic in self.subscribed_topics:
|
||||
sub_hdls = self.subscribed_topics[topic][1]
|
||||
try:
|
||||
sub_hdls.remove(hdl)
|
||||
except Exception:
|
||||
pass
|
||||
if not sub_hdls:
|
||||
del self.subscribed_topics[topic]
|
||||
res, msg_id = self.client.unsubscribe(topic)
|
||||
if msg_id is not None:
|
||||
unsub_fut: asyncio.Future = asyncio.Future()
|
||||
unsub_fut.add_done_callback(
|
||||
BrokerAckLogger([topic], "unsubscribe"))
|
||||
self.pending_acks[msg_id] = unsub_fut
|
||||
|
||||
def publish_topic(self,
|
||||
topic: str,
|
||||
payload: Any = None,
|
||||
qos: Optional[int] = None,
|
||||
retain: bool = False
|
||||
) -> Awaitable[None]:
|
||||
qos = qos or self.qos
|
||||
if qos > 2 or qos < 0:
|
||||
raise self.server.error("QOS must be between 0 and 2")
|
||||
pub_fut: asyncio.Future = asyncio.Future()
|
||||
if isinstance(payload, (dict, list)):
|
||||
try:
|
||||
payload = json.dumps(payload)
|
||||
except json.JSONDecodeError:
|
||||
raise self.server.error(
|
||||
"Dict or List is not json encodable") from None
|
||||
elif isinstance(payload, bool):
|
||||
payload = str(payload).lower()
|
||||
try:
|
||||
msg_info = self.client.publish(topic, payload, qos, retain)
|
||||
if msg_info.is_published():
|
||||
pub_fut.set_result(None)
|
||||
else:
|
||||
if qos == 0:
|
||||
# There is no delivery guarantee for qos == 0, so
|
||||
# it is possible that the on_publish event will
|
||||
# not be called if paho mqtt encounters an error
|
||||
# during publication. Return immediately as
|
||||
# a workaround.
|
||||
if msg_info.rc != paho_mqtt.MQTT_ERR_SUCCESS:
|
||||
err_str = paho_mqtt.error_string(msg_info.rc)
|
||||
pub_fut.set_exception(self.server.error(
|
||||
f"MQTT Publish Error: {err_str}", 503))
|
||||
else:
|
||||
pub_fut.set_result(None)
|
||||
return pub_fut
|
||||
self.pending_acks[msg_info.mid] = pub_fut
|
||||
except ValueError:
|
||||
pub_fut.set_exception(self.server.error(
|
||||
"MQTT Message Queue Full", 529))
|
||||
except Exception as e:
|
||||
pub_fut.set_exception(self.server.error(
|
||||
f"MQTT Publish Error: {e}", 503))
|
||||
return pub_fut
|
||||
|
||||
async def publish_topic_with_response(self,
|
||||
topic: str,
|
||||
response_topic: str,
|
||||
payload: Any = None,
|
||||
qos: Optional[int] = None,
|
||||
retain: bool = False,
|
||||
timeout: Optional[float] = None
|
||||
) -> bytes:
|
||||
qos = qos or self.qos
|
||||
if qos > 2 or qos < 0:
|
||||
raise self.server.error("QOS must be between 0 and 2")
|
||||
resp_fut: asyncio.Future = asyncio.Future()
|
||||
resp_hdl = self.subscribe_topic(
|
||||
response_topic, resp_fut.set_result, qos)
|
||||
self.pending_responses.append(resp_fut)
|
||||
try:
|
||||
await asyncio.wait_for(self.publish_topic(
|
||||
topic, payload, qos, retain), timeout)
|
||||
await asyncio.wait_for(resp_fut, timeout)
|
||||
except asyncio.TimeoutError:
|
||||
logging.info(f"Response to request {topic} timed out")
|
||||
raise self.server.error("MQTT Request Timed Out", 504)
|
||||
finally:
|
||||
try:
|
||||
self.pending_responses.remove(resp_fut)
|
||||
except Exception:
|
||||
pass
|
||||
self.unsubscribe(resp_hdl)
|
||||
return resp_fut.result()
|
||||
|
||||
async def _handle_publish_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
topic: str = web_request.get_str("topic")
|
||||
payload: Any = web_request.get("payload", None)
|
||||
qos: int = web_request.get_int("qos", self.qos)
|
||||
retain: bool = web_request.get_boolean("retain", False)
|
||||
timeout: Optional[float] = web_request.get_float('timeout', None)
|
||||
try:
|
||||
await asyncio.wait_for(self.publish_topic(
|
||||
topic, payload, qos, retain), timeout)
|
||||
except asyncio.TimeoutError:
|
||||
raise self.server.error("MQTT Publish Timed Out", 504)
|
||||
return {
|
||||
"topic": topic
|
||||
}
|
||||
|
||||
async def _handle_subscription_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
topic: str = web_request.get_str("topic")
|
||||
qos: int = web_request.get_int("qos", self.qos)
|
||||
timeout: Optional[float] = web_request.get_float('timeout', None)
|
||||
resp: asyncio.Future = asyncio.Future()
|
||||
hdl: Optional[SubscriptionHandle] = None
|
||||
try:
|
||||
hdl = self.subscribe_topic(topic, resp.set_result, qos)
|
||||
self.pending_responses.append(resp)
|
||||
await asyncio.wait_for(resp, timeout)
|
||||
ret: bytes = resp.result()
|
||||
except asyncio.TimeoutError:
|
||||
raise self.server.error("MQTT Subscribe Timed Out", 504)
|
||||
finally:
|
||||
try:
|
||||
self.pending_responses.remove(resp)
|
||||
except Exception:
|
||||
pass
|
||||
if hdl is not None:
|
||||
self.unsubscribe(hdl)
|
||||
try:
|
||||
payload = json.loads(ret)
|
||||
except json.JSONDecodeError:
|
||||
payload = ret.decode()
|
||||
return {
|
||||
'topic': topic,
|
||||
'payload': payload
|
||||
}
|
||||
|
||||
async def _process_api_request(self, payload: bytes) -> None:
|
||||
response = await self.json_rpc.dispatch(payload.decode())
|
||||
if response is not None:
|
||||
await self.publish_topic(self.api_resp_topic, response,
|
||||
self.api_qos)
|
||||
|
||||
def register_api_handler(self, api_def: APIDefinition) -> None:
|
||||
if api_def.callback is None:
|
||||
# Remote API, uses RPC to reach out to Klippy
|
||||
mqtt_method = api_def.jrpc_methods[0]
|
||||
rpc_cb = self._generate_remote_callback(api_def.endpoint)
|
||||
self.json_rpc.register_method(mqtt_method, rpc_cb)
|
||||
else:
|
||||
# Local API, uses local callback
|
||||
for mqtt_method, req_method in \
|
||||
zip(api_def.jrpc_methods, api_def.request_methods):
|
||||
rpc_cb = self._generate_local_callback(
|
||||
api_def.endpoint, req_method, api_def.callback)
|
||||
self.json_rpc.register_method(mqtt_method, rpc_cb)
|
||||
logging.info(
|
||||
"Registering MQTT JSON-RPC methods: "
|
||||
f"{', '.join(api_def.jrpc_methods)}")
|
||||
|
||||
def remove_api_handler(self, api_def: APIDefinition) -> None:
|
||||
for jrpc_method in api_def.jrpc_methods:
|
||||
self.json_rpc.remove_method(jrpc_method)
|
||||
|
||||
def _generate_local_callback(self,
|
||||
endpoint: str,
|
||||
request_method: str,
|
||||
callback: Callable[[WebRequest], Coroutine]
|
||||
) -> RPCCallback:
|
||||
async def func(args: Dict[str, Any]) -> Any:
|
||||
self._check_timestamp(args)
|
||||
result = await callback(WebRequest(endpoint, args, request_method))
|
||||
return result
|
||||
return func
|
||||
|
||||
def _generate_remote_callback(self, endpoint: str) -> RPCCallback:
|
||||
async def func(args: Dict[str, Any]) -> Any:
|
||||
self._check_timestamp(args)
|
||||
result = await self.klippy.request(WebRequest(endpoint, args))
|
||||
return result
|
||||
return func
|
||||
|
||||
def _check_timestamp(self, args: Dict[str, Any]) -> None:
|
||||
ts = args.pop("mqtt_timestamp", None)
|
||||
if ts is not None:
|
||||
if ts in self.timestamp_deque:
|
||||
logging.debug("Duplicate MQTT API request received")
|
||||
raise self.server.error(
|
||||
"Duplicate MQTT Request", DUP_API_REQ_CODE)
|
||||
else:
|
||||
self.timestamp_deque.append(ts)
|
||||
|
||||
def send_status(self,
|
||||
status: Dict[str, Any],
|
||||
eventtime: float
|
||||
) -> None:
|
||||
if not status or not self.is_connected():
|
||||
return
|
||||
payload = {'eventtime': eventtime, 'status': status}
|
||||
self.publish_topic(self.klipper_status_topic, payload)
|
||||
|
||||
def get_instance_name(self) -> str:
|
||||
return self.instance_name
|
||||
|
||||
async def close(self) -> None:
|
||||
if self.connect_task is not None:
|
||||
self.connect_task.cancel()
|
||||
self.connect_task = None
|
||||
if not self.is_connected():
|
||||
return
|
||||
await self.publish_topic(self.moonraker_status_topic,
|
||||
{'server': 'offline'},
|
||||
retain=True)
|
||||
self.disconnect_evt = asyncio.Event()
|
||||
self.client.disconnect()
|
||||
try:
|
||||
await asyncio.wait_for(self.disconnect_evt.wait(), 2.)
|
||||
except asyncio.TimeoutError:
|
||||
logging.info("MQTT Disconnect Timeout")
|
||||
futs = list(self.pending_acks.values())
|
||||
futs.extend(self.pending_responses)
|
||||
for fut in futs:
|
||||
if fut.done():
|
||||
continue
|
||||
fut.set_exception(
|
||||
self.server.error("Moonraker Shutdown", 503))
|
||||
|
||||
async def _publish_from_klipper(self,
|
||||
topic: str,
|
||||
payload: Any = None,
|
||||
qos: Optional[int] = None,
|
||||
retain: bool = False,
|
||||
use_prefix: bool = False
|
||||
) -> None:
|
||||
if use_prefix:
|
||||
topic = f"{self.instance_name}/{topic.lstrip('/')}"
|
||||
await self.publish_topic(topic, payload, qos, retain)
|
||||
|
||||
|
||||
def load_component(config: ConfigHelper) -> MQTTClient:
|
||||
return MQTTClient(config)
|
||||
185
moonraker/components/notifier.py
Normal file
185
moonraker/components/notifier.py
Normal file
@@ -0,0 +1,185 @@
|
||||
# Notifier
|
||||
#
|
||||
# Copyright (C) 2022 Pataar <me@pataar.nl>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import apprise
|
||||
import logging
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Type,
|
||||
Optional,
|
||||
Dict,
|
||||
Any,
|
||||
List,
|
||||
Union,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from . import klippy_apis
|
||||
|
||||
APIComp = klippy_apis.KlippyAPI
|
||||
|
||||
|
||||
class Notifier:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.notifiers: Dict[str, NotifierInstance] = {}
|
||||
self.events: Dict[str, NotifierEvent] = {}
|
||||
prefix_sections = config.get_prefix_sections("notifier")
|
||||
|
||||
self.register_events(config)
|
||||
|
||||
for section in prefix_sections:
|
||||
cfg = config[section]
|
||||
try:
|
||||
notifier = NotifierInstance(cfg)
|
||||
|
||||
for event in self.events:
|
||||
if event in notifier.events or "*" in notifier.events:
|
||||
self.events[event].register_notifier(notifier)
|
||||
|
||||
logging.info(f"Registered notifier: '{notifier.get_name()}'")
|
||||
|
||||
except Exception as e:
|
||||
msg = f"Failed to load notifier[{cfg.get_name()}]\n{e}"
|
||||
self.server.add_warning(msg)
|
||||
continue
|
||||
self.notifiers[notifier.get_name()] = notifier
|
||||
|
||||
def register_events(self, config: ConfigHelper):
|
||||
|
||||
self.events["started"] = NotifierEvent(
|
||||
"started",
|
||||
"job_state:started",
|
||||
config)
|
||||
|
||||
self.events["complete"] = NotifierEvent(
|
||||
"complete",
|
||||
"job_state:complete",
|
||||
config)
|
||||
|
||||
self.events["error"] = NotifierEvent(
|
||||
"error",
|
||||
"job_state:error",
|
||||
config)
|
||||
|
||||
self.events["cancelled"] = NotifierEvent(
|
||||
"cancelled",
|
||||
"job_state:cancelled",
|
||||
config)
|
||||
|
||||
self.events["paused"] = NotifierEvent(
|
||||
"paused",
|
||||
"job_state:paused",
|
||||
config)
|
||||
|
||||
self.events["resumed"] = NotifierEvent(
|
||||
"resumed",
|
||||
"job_state:resumed",
|
||||
config)
|
||||
|
||||
|
||||
class NotifierEvent:
|
||||
def __init__(self, identifier: str, event_name: str, config: ConfigHelper):
|
||||
self.identifier = identifier
|
||||
self.event_name = event_name
|
||||
self.server = config.get_server()
|
||||
self.notifiers: Dict[str, NotifierInstance] = {}
|
||||
self.config = config
|
||||
|
||||
self.server.register_event_handler(self.event_name, self._handle)
|
||||
|
||||
def register_notifier(self, notifier: NotifierInstance):
|
||||
self.notifiers[notifier.get_name()] = notifier
|
||||
|
||||
async def _handle(self, *args) -> None:
|
||||
logging.info(f"'{self.identifier}' notifier event triggered'")
|
||||
await self.invoke_notifiers(args)
|
||||
|
||||
async def invoke_notifiers(self, args):
|
||||
for notifier_name in self.notifiers:
|
||||
try:
|
||||
notifier = self.notifiers[notifier_name]
|
||||
await notifier.notify(self.identifier, args)
|
||||
except Exception as e:
|
||||
logging.info(f"Failed to notify [{notifier_name}]\n{e}")
|
||||
continue
|
||||
|
||||
|
||||
class NotifierInstance:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
|
||||
self.config = config
|
||||
name_parts = config.get_name().split(maxsplit=1)
|
||||
if len(name_parts) != 2:
|
||||
raise config.error(f"Invalid Section Name: {config.get_name()}")
|
||||
self.server = config.get_server()
|
||||
self.name = name_parts[1]
|
||||
self.apprise = apprise.Apprise()
|
||||
self.warned = False
|
||||
|
||||
self.attach_requires_file_system_check = True
|
||||
self.attach = config.get("attach", None)
|
||||
if self.attach is None or \
|
||||
(self.attach.startswith("http://") or
|
||||
self.attach.startswith("https://")):
|
||||
self.attach_requires_file_system_check = False
|
||||
|
||||
url_template = config.gettemplate('url')
|
||||
self.url = url_template.render()
|
||||
|
||||
if len(self.url) < 2:
|
||||
raise config.error(f"Invalid url for: {config.get_name()}")
|
||||
|
||||
self.title = config.gettemplate('title', None)
|
||||
self.body = config.gettemplate("body", None)
|
||||
|
||||
self.events: List[str] = config.getlist("events", separator=",")
|
||||
|
||||
self.apprise.add(self.url)
|
||||
|
||||
async def notify(self, event_name: str, event_args: List) -> None:
|
||||
context = {
|
||||
"event_name": event_name,
|
||||
"event_args": event_args
|
||||
}
|
||||
|
||||
rendered_title = (
|
||||
'' if self.title is None else self.title.render(context)
|
||||
)
|
||||
rendered_body = (
|
||||
event_name if self.body is None else self.body.render(context)
|
||||
)
|
||||
|
||||
# Verify the attachment
|
||||
if self.attach_requires_file_system_check and self.attach is not None:
|
||||
fm = self.server.lookup_component("file_manager")
|
||||
if not fm.can_access_path(self.attach):
|
||||
if not self.warned:
|
||||
self.server.add_warning(
|
||||
f"Attachment of notifier '{self.name}' is not "
|
||||
"valid. The location of the "
|
||||
"attachment is not "
|
||||
"accessible.")
|
||||
self.warned = True
|
||||
self.attach = None
|
||||
|
||||
await self.apprise.async_notify(
|
||||
rendered_body.strip(),
|
||||
rendered_title.strip(),
|
||||
attach=self.attach
|
||||
)
|
||||
|
||||
def get_name(self) -> str:
|
||||
return self.name
|
||||
|
||||
|
||||
def load_component(config: ConfigHelper) -> Notifier:
|
||||
return Notifier(config)
|
||||
400
moonraker/components/octoprint_compat.py
Normal file
400
moonraker/components/octoprint_compat.py
Normal file
@@ -0,0 +1,400 @@
|
||||
# OctoPrint API compatibility
|
||||
#
|
||||
# Copyright (C) 2021 Nickolas Grigoriadis <nagrigoriadis@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import logging
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest
|
||||
from .klippy_apis import KlippyAPI as APIComp
|
||||
from .file_manager.file_manager import FileManager
|
||||
from .job_queue import JobQueue
|
||||
|
||||
OCTO_VERSION = '1.5.0'
|
||||
|
||||
|
||||
class OctoPrintCompat:
|
||||
"""
|
||||
Minimal implementation of the REST API as described here:
|
||||
https://docs.octoprint.org/en/master/api/index.html
|
||||
|
||||
So that Cura OctoPrint plugin will function for:
|
||||
* Handshake
|
||||
* Upload gcode/ufp
|
||||
* Webcam config
|
||||
* Manual GCode submission
|
||||
* Heater temperatures
|
||||
"""
|
||||
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.software_version = self.server.get_app_args().get(
|
||||
'software_version')
|
||||
self.enable_ufp: bool = config.getboolean('enable_ufp', True)
|
||||
|
||||
# Get webcam settings from config
|
||||
self.webcam: Dict[str, Any] = {
|
||||
'flipH': config.getboolean('flip_h', False),
|
||||
'flipV': config.getboolean('flip_v', False),
|
||||
'rotate90': config.getboolean('rotate_90', False),
|
||||
'streamUrl': config.get('stream_url', '/webcam/?action=stream'),
|
||||
'webcamEnabled': config.getboolean('webcam_enabled', True),
|
||||
}
|
||||
|
||||
# Local variables
|
||||
self.klippy_apis: APIComp = self.server.lookup_component('klippy_apis')
|
||||
self.heaters: Dict[str, Dict[str, Any]] = {}
|
||||
self.last_print_stats: Dict[str, Any] = {}
|
||||
|
||||
# Register status update event
|
||||
self.server.register_event_handler(
|
||||
'server:klippy_ready', self._init)
|
||||
self.server.register_event_handler(
|
||||
'server:status_update', self._handle_status_update)
|
||||
|
||||
# Version & Server information
|
||||
self.server.register_endpoint(
|
||||
'/api/version', ['GET'], self._get_version,
|
||||
transports=['http'], wrap_result=False)
|
||||
self.server.register_endpoint(
|
||||
'/api/server', ['GET'], self._get_server,
|
||||
transports=['http'], wrap_result=False)
|
||||
|
||||
# Login, User & Settings
|
||||
self.server.register_endpoint(
|
||||
'/api/login', ['POST'], self._post_login_user,
|
||||
transports=['http'], wrap_result=False)
|
||||
self.server.register_endpoint(
|
||||
'/api/currentuser', ['GET'], self._post_login_user,
|
||||
transports=['http'], wrap_result=False)
|
||||
self.server.register_endpoint(
|
||||
'/api/settings', ['GET'], self._get_settings,
|
||||
transports=['http'], wrap_result=False)
|
||||
|
||||
# File operations
|
||||
# Note that file upload is handled in file_manager.py
|
||||
# TODO: List/info/select/delete files
|
||||
|
||||
# Job operations
|
||||
self.server.register_endpoint(
|
||||
'/api/job', ['GET'], self._get_job,
|
||||
transports=['http'], wrap_result=False)
|
||||
# TODO: start/cancel/restart/pause jobs
|
||||
|
||||
# Printer operations
|
||||
self.server.register_endpoint(
|
||||
'/api/printer', ['GET'], self._get_printer,
|
||||
transports=['http'], wrap_result=False)
|
||||
self.server.register_endpoint(
|
||||
'/api/printer/command', ['POST'], self._post_command,
|
||||
transports=['http'], wrap_result=False)
|
||||
# TODO: head/tool/bed/chamber specific read/issue
|
||||
|
||||
# Printer profiles
|
||||
self.server.register_endpoint(
|
||||
'/api/printerprofiles', ['GET'], self._get_printerprofiles,
|
||||
transports=['http'], wrap_result=False)
|
||||
|
||||
# Upload Handlers
|
||||
self.server.register_upload_handler(
|
||||
"/api/files/local", location_prefix="api/files/moonraker")
|
||||
self.server.register_endpoint(
|
||||
"/api/files/moonraker/(?P<relative_path>.+)", ['POST'],
|
||||
self._select_file, transports=['http'], wrap_result=False)
|
||||
|
||||
# System
|
||||
# TODO: shutdown/reboot/restart operations
|
||||
|
||||
async def _init(self) -> None:
|
||||
self.heaters = {}
|
||||
# Fetch heaters
|
||||
try:
|
||||
result: Dict[str, Any]
|
||||
sensors: List[str]
|
||||
result = await self.klippy_apis.query_objects({'heaters': None})
|
||||
sensors = result.get('heaters', {}).get('available_sensors', [])
|
||||
except self.server.error as e:
|
||||
logging.info(f'Error Configuring heaters: {e}')
|
||||
sensors = []
|
||||
# subscribe objects
|
||||
sub: Dict[str, Any] = {s: None for s in sensors}
|
||||
sub['print_stats'] = None
|
||||
result = await self.klippy_apis.subscribe_objects(sub)
|
||||
self.last_print_stats = result.get('print_stats', {})
|
||||
if sensors:
|
||||
self.heaters = {name: result.get(name, {}) for name in sensors}
|
||||
|
||||
def _handle_status_update(self, status: Dict[str, Any]) -> None:
|
||||
if 'print_stats' in status:
|
||||
self.last_print_stats.update(status['print_stats'])
|
||||
for heater_name, data in self.heaters.items():
|
||||
if heater_name in status:
|
||||
data.update(status[heater_name])
|
||||
|
||||
def printer_state(self) -> str:
|
||||
klippy_state = self.server.get_klippy_state()
|
||||
if klippy_state in ["disconnected", "startup"]:
|
||||
return 'Offline'
|
||||
elif klippy_state != 'ready':
|
||||
return 'Error'
|
||||
return {
|
||||
'standby': 'Operational',
|
||||
'printing': 'Printing',
|
||||
'paused': 'Paused',
|
||||
'complete': 'Operational'
|
||||
}.get(self.last_print_stats.get('state', 'standby'), 'Error')
|
||||
|
||||
def printer_temps(self) -> Dict[str, Any]:
|
||||
temps: Dict[str, Any] = {}
|
||||
for heater, data in self.heaters.items():
|
||||
name = 'bed'
|
||||
if heater.startswith('extruder'):
|
||||
try:
|
||||
tool_no = int(heater[8:])
|
||||
except ValueError:
|
||||
tool_no = 0
|
||||
name = f'tool{tool_no}'
|
||||
elif heater != "heater_bed":
|
||||
continue
|
||||
temps[name] = {
|
||||
'actual': round(data.get('temperature', 0.), 2),
|
||||
'offset': 0,
|
||||
'target': data.get('target', 0.),
|
||||
}
|
||||
return temps
|
||||
|
||||
async def _get_version(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, str]:
|
||||
"""
|
||||
Version information
|
||||
"""
|
||||
return {
|
||||
'server': OCTO_VERSION,
|
||||
'api': '0.1',
|
||||
'text': f'OctoPrint (Moonraker {self.software_version})',
|
||||
}
|
||||
|
||||
async def _get_server(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Server status
|
||||
"""
|
||||
klippy_state = self.server.get_klippy_state()
|
||||
return {
|
||||
'server': OCTO_VERSION,
|
||||
'safemode': (
|
||||
None if klippy_state == 'ready' else 'settings')
|
||||
}
|
||||
|
||||
async def _post_login_user(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Confirm session login.
|
||||
|
||||
Since we only support apikey auth, do nothing.
|
||||
Report hardcoded user called _api
|
||||
"""
|
||||
return {
|
||||
'_is_external_client': False,
|
||||
'_login_mechanism': 'apikey',
|
||||
'name': '_api',
|
||||
'active': True,
|
||||
'user': True,
|
||||
'admin': True,
|
||||
'apikey': None,
|
||||
'permissions': [],
|
||||
'groups': ['admins', 'users'],
|
||||
}
|
||||
|
||||
async def _get_settings(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Used to parse OctoPrint capabilities
|
||||
"""
|
||||
settings = {
|
||||
'plugins': {},
|
||||
'feature': {
|
||||
'sdSupport': False,
|
||||
'temperatureGraph': False
|
||||
},
|
||||
'webcam': self.webcam,
|
||||
}
|
||||
if self.enable_ufp:
|
||||
settings['plugins'] = {
|
||||
'UltimakerFormatPackage': {
|
||||
'align_inline_thumbnail': False,
|
||||
'inline_thumbnail': False,
|
||||
'inline_thumbnail_align_value': 'left',
|
||||
'inline_thumbnail_scale_value': '50',
|
||||
'installed': True,
|
||||
'installed_version': '0.2.2',
|
||||
'scale_inline_thumbnail': False,
|
||||
'state_panel_thumbnail': True,
|
||||
},
|
||||
}
|
||||
return settings
|
||||
|
||||
async def _get_job(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get current job status
|
||||
"""
|
||||
return {
|
||||
'job': {
|
||||
'file': {'name': None},
|
||||
'estimatedPrintTime': None,
|
||||
'filament': {'length': None},
|
||||
'user': None,
|
||||
},
|
||||
'progress': {
|
||||
'completion': None,
|
||||
'filepos': None,
|
||||
'printTime': None,
|
||||
'printTimeLeft': None,
|
||||
'printTimeOrigin': None,
|
||||
},
|
||||
'state': self.printer_state()
|
||||
}
|
||||
|
||||
async def _get_printer(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get Printer status
|
||||
"""
|
||||
state = self.printer_state()
|
||||
return {
|
||||
'temperature': self.printer_temps(),
|
||||
'state': {
|
||||
'text': state,
|
||||
'flags': {
|
||||
'operational': state not in ['Error', 'Offline'],
|
||||
'paused': state == 'Paused',
|
||||
'printing': state == 'Printing',
|
||||
'cancelling': state == 'Cancelling',
|
||||
'pausing': False,
|
||||
'error': state == 'Error',
|
||||
'ready': state == 'Operational',
|
||||
'closedOrError': state in ['Error', 'Offline'],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
async def _post_command(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict:
|
||||
"""
|
||||
Request to run some gcode command
|
||||
"""
|
||||
commands: List[str] = web_request.get('commands', [])
|
||||
for command in commands:
|
||||
logging.info(f'Executing GCode: {command}')
|
||||
try:
|
||||
await self.klippy_apis.run_gcode(command)
|
||||
except self.server.error:
|
||||
msg = f"Error executing GCode {command}"
|
||||
logging.exception(msg)
|
||||
|
||||
return {}
|
||||
|
||||
async def _get_printerprofiles(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get Printer profiles
|
||||
"""
|
||||
return {
|
||||
'profiles': {
|
||||
'_default': {
|
||||
'id': '_default',
|
||||
'name': 'Default',
|
||||
'color': 'default',
|
||||
'model': 'Default',
|
||||
'default': True,
|
||||
'current': True,
|
||||
'heatedBed': 'heater_bed' in self.heaters,
|
||||
'heatedChamber': 'chamber' in self.heaters,
|
||||
'axes': {
|
||||
'x': {
|
||||
'speed': 6000.,
|
||||
'inverted': False
|
||||
},
|
||||
'y': {
|
||||
'speed': 6000.,
|
||||
'inverted': False
|
||||
},
|
||||
'z': {
|
||||
'speed': 6000.,
|
||||
'inverted': False
|
||||
},
|
||||
'e': {
|
||||
'speed': 300.,
|
||||
'inverted': False
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async def _select_file(self,
|
||||
web_request: WebRequest
|
||||
) -> None:
|
||||
command: str = web_request.get('command')
|
||||
rel_path: str = web_request.get('relative_path')
|
||||
root, filename = rel_path.strip("/").split("/", 1)
|
||||
fmgr: FileManager = self.server.lookup_component('file_manager')
|
||||
if command == "select":
|
||||
start_print: bool = web_request.get('print', False)
|
||||
if not start_print:
|
||||
# No-op, selecting a file has no meaning in Moonraker
|
||||
return
|
||||
if root != "gcodes":
|
||||
raise self.server.error(
|
||||
"File must be located in the 'gcodes' root", 400)
|
||||
if not fmgr.check_file_exists(root, filename):
|
||||
raise self.server.error("File does not exist")
|
||||
try:
|
||||
ret = await self.klippy_apis.query_objects(
|
||||
{'print_stats': None})
|
||||
pstate: str = ret['print_stats']['state']
|
||||
except self.server.error:
|
||||
pstate = "not_avail"
|
||||
started: bool = False
|
||||
if pstate not in ["printing", "paused", "not_avail"]:
|
||||
try:
|
||||
await self.klippy_apis.start_print(filename)
|
||||
except self.server.error:
|
||||
started = False
|
||||
else:
|
||||
logging.debug(f"Job '{filename}' started via OctoPrint API")
|
||||
started = True
|
||||
if not started:
|
||||
if fmgr.upload_queue_enabled():
|
||||
job_queue: JobQueue = self.server.lookup_component(
|
||||
'job_queue')
|
||||
await job_queue.queue_job(filename, check_exists=False)
|
||||
logging.debug(f"Job '{filename}' queued via OctoPrint API")
|
||||
else:
|
||||
raise self.server.error("Conflict", 409)
|
||||
else:
|
||||
raise self.server.error(f"Unsupported Command: {command}")
|
||||
|
||||
|
||||
def load_component(config: ConfigHelper) -> OctoPrintCompat:
|
||||
return OctoPrintCompat(config)
|
||||
840
moonraker/components/paneldue.py
Normal file
840
moonraker/components/paneldue.py
Normal file
@@ -0,0 +1,840 @@
|
||||
# PanelDue LCD display support
|
||||
#
|
||||
# Copyright (C) 2020 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import serial
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
import errno
|
||||
import logging
|
||||
import asyncio
|
||||
from collections import deque
|
||||
from utils import ServerError
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Deque,
|
||||
Any,
|
||||
Tuple,
|
||||
Optional,
|
||||
Dict,
|
||||
List,
|
||||
Callable,
|
||||
Coroutine,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from . import klippy_apis
|
||||
from .file_manager import file_manager
|
||||
APIComp = klippy_apis.KlippyAPI
|
||||
FMComp = file_manager.FileManager
|
||||
FlexCallback = Callable[..., Optional[Coroutine]]
|
||||
|
||||
MIN_EST_TIME = 10.
|
||||
INITIALIZE_TIMEOUT = 10.
|
||||
|
||||
class PanelDueError(ServerError):
|
||||
pass
|
||||
|
||||
|
||||
RESTART_GCODES = ["RESTART", "FIRMWARE_RESTART"]
|
||||
|
||||
class SerialConnection:
|
||||
def __init__(self,
|
||||
config: ConfigHelper,
|
||||
paneldue: PanelDue
|
||||
) -> None:
|
||||
self.event_loop = config.get_server().get_event_loop()
|
||||
self.paneldue = paneldue
|
||||
self.port: str = config.get('serial')
|
||||
self.baud = config.getint('baud', 57600)
|
||||
self.partial_input: bytes = b""
|
||||
self.ser: Optional[serial.Serial] = None
|
||||
self.fd: Optional[int] = None
|
||||
self.connected: bool = False
|
||||
self.send_busy: bool = False
|
||||
self.send_buffer: bytes = b""
|
||||
self.attempting_connect: bool = True
|
||||
|
||||
def disconnect(self, reconnect: bool = False) -> None:
|
||||
if self.connected:
|
||||
if self.fd is not None:
|
||||
self.event_loop.remove_reader(self.fd)
|
||||
self.fd = None
|
||||
self.connected = False
|
||||
if self.ser is not None:
|
||||
self.ser.close()
|
||||
self.ser = None
|
||||
self.partial_input = b""
|
||||
self.send_buffer = b""
|
||||
self.paneldue.initialized = False
|
||||
logging.info("PanelDue Disconnected")
|
||||
if reconnect and not self.attempting_connect:
|
||||
self.attempting_connect = True
|
||||
self.event_loop.delay_callback(1., self.connect)
|
||||
|
||||
async def connect(self) -> None:
|
||||
self.attempting_connect = True
|
||||
start_time = connect_time = time.time()
|
||||
while not self.connected:
|
||||
if connect_time > start_time + 30.:
|
||||
logging.info("Unable to connect, aborting")
|
||||
break
|
||||
logging.info(f"Attempting to connect to: {self.port}")
|
||||
try:
|
||||
# XXX - sometimes the port cannot be exclusively locked, this
|
||||
# would likely be due to a restart where the serial port was
|
||||
# not correctly closed. Maybe don't use exclusive mode?
|
||||
self.ser = serial.Serial(
|
||||
self.port, self.baud, timeout=0, exclusive=True)
|
||||
except (OSError, IOError, serial.SerialException):
|
||||
logging.exception(f"Unable to open port: {self.port}")
|
||||
await asyncio.sleep(2.)
|
||||
connect_time += time.time()
|
||||
continue
|
||||
self.fd = self.ser.fileno()
|
||||
fd = self.fd = self.ser.fileno()
|
||||
os.set_blocking(fd, False)
|
||||
self.event_loop.add_reader(fd, self._handle_incoming)
|
||||
self.connected = True
|
||||
logging.info("PanelDue Connected")
|
||||
self.attempting_connect = False
|
||||
|
||||
def _handle_incoming(self) -> None:
|
||||
# Process incoming data using same method as gcode.py
|
||||
if self.fd is None:
|
||||
return
|
||||
try:
|
||||
data = os.read(self.fd, 4096)
|
||||
except os.error:
|
||||
return
|
||||
|
||||
if not data:
|
||||
# possibly an error, disconnect
|
||||
self.disconnect(reconnect=True)
|
||||
logging.info("serial_display: No data received, disconnecting")
|
||||
return
|
||||
|
||||
# Remove null bytes, separate into lines
|
||||
data = data.strip(b'\x00')
|
||||
lines = data.split(b'\n')
|
||||
lines[0] = self.partial_input + lines[0]
|
||||
self.partial_input = lines.pop()
|
||||
for line in lines:
|
||||
try:
|
||||
decoded_line = line.strip().decode('utf-8', 'ignore')
|
||||
self.paneldue.process_line(decoded_line)
|
||||
except ServerError:
|
||||
logging.exception(
|
||||
f"GCode Processing Error: {decoded_line}")
|
||||
self.paneldue.handle_gcode_response(
|
||||
f"!! GCode Processing Error: {decoded_line}")
|
||||
except Exception:
|
||||
logging.exception("Error during gcode processing")
|
||||
|
||||
def send(self, data: bytes) -> None:
|
||||
self.send_buffer += data
|
||||
if not self.send_busy:
|
||||
self.send_busy = True
|
||||
self.event_loop.register_callback(self._do_send)
|
||||
|
||||
async def _do_send(self) -> None:
|
||||
assert self.fd is not None
|
||||
while self.send_buffer:
|
||||
if not self.connected:
|
||||
break
|
||||
try:
|
||||
sent = os.write(self.fd, self.send_buffer)
|
||||
except os.error as e:
|
||||
if e.errno == errno.EBADF or e.errno == errno.EPIPE:
|
||||
sent = 0
|
||||
else:
|
||||
await asyncio.sleep(.001)
|
||||
continue
|
||||
if sent:
|
||||
self.send_buffer = self.send_buffer[sent:]
|
||||
else:
|
||||
logging.exception(
|
||||
"Error writing data, closing serial connection")
|
||||
self.disconnect(reconnect=True)
|
||||
return
|
||||
self.send_busy = False
|
||||
|
||||
class PanelDue:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.event_loop = self.server.get_event_loop()
|
||||
self.file_manager: FMComp = \
|
||||
self.server.lookup_component('file_manager')
|
||||
self.klippy_apis: APIComp = \
|
||||
self.server.lookup_component('klippy_apis')
|
||||
self.kinematics: str = "none"
|
||||
self.machine_name = config.get('machine_name', "Klipper")
|
||||
self.firmware_name: str = "Repetier | Klipper"
|
||||
self.last_message: Optional[str] = None
|
||||
self.last_gcode_response: Optional[str] = None
|
||||
self.current_file: str = ""
|
||||
self.file_metadata: Dict[str, Any] = {}
|
||||
self.enable_checksum = config.getboolean('enable_checksum', True)
|
||||
self.debug_queue: Deque[str] = deque(maxlen=100)
|
||||
|
||||
# Initialize tracked state.
|
||||
self.printer_state: Dict[str, Dict[str, Any]] = {
|
||||
'gcode_move': {}, 'toolhead': {}, 'virtual_sdcard': {},
|
||||
'fan': {}, 'display_status': {}, 'print_stats': {},
|
||||
'idle_timeout': {}, 'gcode_macro PANELDUE_BEEP': {}}
|
||||
self.extruder_count: int = 0
|
||||
self.heaters: List[str] = []
|
||||
self.is_ready: bool = False
|
||||
self.is_shutdown: bool = False
|
||||
self.initialized: bool = False
|
||||
self.cq_busy: bool = False
|
||||
self.gq_busy: bool = False
|
||||
self.command_queue: List[Tuple[FlexCallback, Any, Any]] = []
|
||||
self.gc_queue: List[str] = []
|
||||
self.last_printer_state: str = 'O'
|
||||
self.last_update_time: float = 0.
|
||||
|
||||
# Set up macros
|
||||
self.confirmed_gcode: str = ""
|
||||
self.mbox_sequence: int = 0
|
||||
self.available_macros: Dict[str, str] = {}
|
||||
self.confirmed_macros = {
|
||||
"RESTART": "RESTART",
|
||||
"FIRMWARE_RESTART": "FIRMWARE_RESTART"}
|
||||
macros = config.getlist('macros', None)
|
||||
if macros is not None:
|
||||
# The macro's configuration name is the key, whereas the full
|
||||
# command is the value
|
||||
self.available_macros = {m.split()[0]: m for m in macros}
|
||||
conf_macros = config.getlist('confirmed_macros', None)
|
||||
if conf_macros is not None:
|
||||
# The macro's configuration name is the key, whereas the full
|
||||
# command is the value
|
||||
self.confirmed_macros = {m.split()[0]: m for m in conf_macros}
|
||||
self.available_macros.update(self.confirmed_macros)
|
||||
|
||||
self.non_trivial_keys = config.getlist('non_trivial_keys',
|
||||
["Klipper state"])
|
||||
self.ser_conn = SerialConnection(config, self)
|
||||
logging.info("PanelDue Configured")
|
||||
|
||||
# Register server events
|
||||
self.server.register_event_handler(
|
||||
"server:klippy_ready", self._process_klippy_ready)
|
||||
self.server.register_event_handler(
|
||||
"server:klippy_shutdown", self._process_klippy_shutdown)
|
||||
self.server.register_event_handler(
|
||||
"server:klippy_disconnect", self._process_klippy_disconnect)
|
||||
self.server.register_event_handler(
|
||||
"server:status_update", self.handle_status_update)
|
||||
self.server.register_event_handler(
|
||||
"server:gcode_response", self.handle_gcode_response)
|
||||
|
||||
self.server.register_remote_method(
|
||||
"paneldue_beep", self.paneldue_beep)
|
||||
|
||||
# These commands are directly executued on the server and do not to
|
||||
# make a request to Klippy
|
||||
self.direct_gcodes: Dict[str, FlexCallback] = {
|
||||
'M20': self._run_paneldue_M20,
|
||||
'M30': self._run_paneldue_M30,
|
||||
'M36': self._run_paneldue_M36,
|
||||
'M408': self._run_paneldue_M408
|
||||
}
|
||||
|
||||
# These gcodes require special parsing or handling prior to being
|
||||
# sent via Klippy's "gcode/script" api command.
|
||||
self.special_gcodes: Dict[str, Callable[[List[str]], str]] = {
|
||||
'M0': lambda args: "CANCEL_PRINT",
|
||||
'M23': self._prepare_M23,
|
||||
'M24': lambda args: "RESUME",
|
||||
'M25': lambda args: "PAUSE",
|
||||
'M32': self._prepare_M32,
|
||||
'M98': self._prepare_M98,
|
||||
'M120': lambda args: "SAVE_GCODE_STATE STATE=PANELDUE",
|
||||
'M121': lambda args: "RESTORE_GCODE_STATE STATE=PANELDUE",
|
||||
'M290': self._prepare_M290,
|
||||
'M292': self._prepare_M292,
|
||||
'M999': lambda args: "FIRMWARE_RESTART"
|
||||
}
|
||||
|
||||
async def component_init(self) -> None:
|
||||
await self.ser_conn.connect()
|
||||
|
||||
async def _process_klippy_ready(self) -> None:
|
||||
# Request "info" and "configfile" status
|
||||
retries = 10
|
||||
printer_info = cfg_status = {}
|
||||
while retries:
|
||||
try:
|
||||
printer_info = await self.klippy_apis.get_klippy_info()
|
||||
cfg_status = await self.klippy_apis.query_objects(
|
||||
{'configfile': None})
|
||||
except self.server.error:
|
||||
logging.exception("PanelDue initialization request failed")
|
||||
retries -= 1
|
||||
if not retries:
|
||||
raise
|
||||
await asyncio.sleep(1.)
|
||||
continue
|
||||
break
|
||||
|
||||
self.firmware_name = "Repetier | Klipper " + \
|
||||
printer_info['software_version']
|
||||
config: Dict[str, Any] = cfg_status.get(
|
||||
'configfile', {}).get('config', {})
|
||||
printer_cfg: Dict[str, Any] = config.get('printer', {})
|
||||
self.kinematics = printer_cfg.get('kinematics', "none")
|
||||
|
||||
logging.info(
|
||||
f"PanelDue Config Received:\n"
|
||||
f"Firmware Name: {self.firmware_name}\n"
|
||||
f"Kinematics: {self.kinematics}\n"
|
||||
f"Printer Config: {config}\n")
|
||||
|
||||
# Initalize printer state and make subscription request
|
||||
self.printer_state = {
|
||||
'gcode_move': {}, 'toolhead': {}, 'virtual_sdcard': {},
|
||||
'fan': {}, 'display_status': {}, 'print_stats': {},
|
||||
'idle_timeout': {}, 'gcode_macro PANELDUE_BEEP': {}}
|
||||
sub_args = {k: None for k in self.printer_state.keys()}
|
||||
self.extruder_count = 0
|
||||
self.heaters = []
|
||||
extruders = []
|
||||
for cfg in config:
|
||||
if cfg.startswith("extruder"):
|
||||
self.extruder_count += 1
|
||||
self.printer_state[cfg] = {}
|
||||
extruders.append(cfg)
|
||||
sub_args[cfg] = None
|
||||
elif cfg == "heater_bed":
|
||||
self.printer_state[cfg] = {}
|
||||
self.heaters.append(cfg)
|
||||
sub_args[cfg] = None
|
||||
extruders.sort()
|
||||
self.heaters.extend(extruders)
|
||||
try:
|
||||
status: Dict[str, Any]
|
||||
status = await self.klippy_apis.subscribe_objects(sub_args)
|
||||
except self.server.error:
|
||||
logging.exception("Unable to complete subscription request")
|
||||
else:
|
||||
self.printer_state.update(status)
|
||||
self.is_shutdown = False
|
||||
self.is_ready = True
|
||||
|
||||
def _process_klippy_shutdown(self) -> None:
|
||||
self.is_shutdown = True
|
||||
|
||||
def _process_klippy_disconnect(self) -> None:
|
||||
# Tell the PD that the printer is "off"
|
||||
self.write_response({'status': 'O'})
|
||||
self.last_printer_state = 'O'
|
||||
self.is_shutdown = self.is_shutdown = False
|
||||
|
||||
def handle_status_update(self, status: Dict[str, Any]) -> None:
|
||||
for obj, items in status.items():
|
||||
if obj in self.printer_state:
|
||||
self.printer_state[obj].update(items)
|
||||
else:
|
||||
self.printer_state[obj] = items
|
||||
|
||||
def paneldue_beep(self, frequency: int, duration: float) -> None:
|
||||
duration = int(duration * 1000.)
|
||||
self.write_response(
|
||||
{'beep_freq': frequency, 'beep_length': duration})
|
||||
|
||||
def process_line(self, line: str) -> None:
|
||||
self.debug_queue.append(line)
|
||||
# If we find M112 in the line then skip verification
|
||||
if "M112" in line.upper():
|
||||
self.event_loop.register_callback(self.klippy_apis.emergency_stop)
|
||||
return
|
||||
|
||||
if self.enable_checksum:
|
||||
# Get line number
|
||||
line_index = line.find(' ')
|
||||
try:
|
||||
line_no: Optional[int] = int(line[1:line_index])
|
||||
except Exception:
|
||||
line_index = -1
|
||||
line_no = None
|
||||
|
||||
# Verify checksum
|
||||
cs_index = line.rfind('*')
|
||||
try:
|
||||
checksum = int(line[cs_index+1:])
|
||||
except Exception:
|
||||
# Invalid checksum, do not process
|
||||
msg = "!! Invalid Checksum"
|
||||
if line_no is not None:
|
||||
msg += f" Line Number: {line_no}"
|
||||
logging.exception("PanelDue: " + msg)
|
||||
raise PanelDueError(msg)
|
||||
|
||||
# Checksum is calculated by XORing every byte in the line other
|
||||
# than the checksum itself
|
||||
calculated_cs = 0
|
||||
for c in line[:cs_index]:
|
||||
calculated_cs ^= ord(c)
|
||||
if calculated_cs & 0xFF != checksum:
|
||||
msg = "!! Invalid Checksum"
|
||||
if line_no is not None:
|
||||
msg += f" Line Number: {line_no}"
|
||||
logging.info("PanelDue: " + msg)
|
||||
raise PanelDueError(msg)
|
||||
|
||||
script = line[line_index+1:cs_index]
|
||||
else:
|
||||
script = line
|
||||
# Execute the gcode. Check for special RRF gcodes that
|
||||
# require special handling
|
||||
parts = script.split()
|
||||
cmd = parts[0].strip()
|
||||
if cmd in ["M23", "M30", "M32", "M36", "M37", "M98"]:
|
||||
arg = script[len(cmd):].strip()
|
||||
parts = [cmd, arg]
|
||||
|
||||
# Check for commands that query state and require immediate response
|
||||
if cmd in self.direct_gcodes:
|
||||
params: Dict[str, Any] = {}
|
||||
for p in parts[1:]:
|
||||
if p[0] not in "PSR":
|
||||
params["arg_p"] = p.strip(" \"\t\n")
|
||||
continue
|
||||
arg = p[0].lower()
|
||||
try:
|
||||
val = int(p[1:].strip()) if arg in "sr" \
|
||||
else p[1:].strip(" \"\t\n")
|
||||
except Exception:
|
||||
msg = f"paneldue: Error parsing direct gcode {script}"
|
||||
self.handle_gcode_response("!! " + msg)
|
||||
logging.exception(msg)
|
||||
return
|
||||
params[f"arg_{arg}"] = val
|
||||
func = self.direct_gcodes[cmd]
|
||||
self.queue_command(func, **params)
|
||||
return
|
||||
|
||||
# Prepare GCodes that require special handling
|
||||
if cmd in self.special_gcodes:
|
||||
sgc_func = self.special_gcodes[cmd]
|
||||
script = sgc_func(parts[1:])
|
||||
|
||||
if not script:
|
||||
return
|
||||
self.queue_gcode(script)
|
||||
|
||||
def queue_gcode(self, script: str) -> None:
|
||||
self.gc_queue.append(script)
|
||||
if not self.gq_busy:
|
||||
self.gq_busy = True
|
||||
self.event_loop.register_callback(self._process_gcode_queue)
|
||||
|
||||
async def _process_gcode_queue(self) -> None:
|
||||
while self.gc_queue:
|
||||
script = self.gc_queue.pop(0)
|
||||
try:
|
||||
if script in RESTART_GCODES:
|
||||
await self.klippy_apis.do_restart(script)
|
||||
else:
|
||||
await self.klippy_apis.run_gcode(script)
|
||||
except self.server.error:
|
||||
msg = f"Error executing script {script}"
|
||||
self.handle_gcode_response("!! " + msg)
|
||||
logging.exception(msg)
|
||||
self.gq_busy = False
|
||||
|
||||
def queue_command(self, cmd: FlexCallback, *args, **kwargs) -> None:
|
||||
self.command_queue.append((cmd, args, kwargs))
|
||||
if not self.cq_busy:
|
||||
self.cq_busy = True
|
||||
self.event_loop.register_callback(self._process_command_queue)
|
||||
|
||||
async def _process_command_queue(self) -> None:
|
||||
while self.command_queue:
|
||||
cmd, args, kwargs = self.command_queue.pop(0)
|
||||
try:
|
||||
ret = cmd(*args, **kwargs)
|
||||
if ret is not None:
|
||||
await ret
|
||||
except Exception:
|
||||
logging.exception("Error processing command")
|
||||
self.cq_busy = False
|
||||
|
||||
def _clean_filename(self, filename: str) -> str:
|
||||
# Remove quotes and whitespace
|
||||
filename.strip(" \"\t\n")
|
||||
# Remove drive number
|
||||
if filename.startswith("0:/"):
|
||||
filename = filename[3:]
|
||||
# Remove initial "gcodes" folder. This is necessary
|
||||
# due to the HACK in the paneldue_M20 gcode.
|
||||
if filename.startswith("gcodes/"):
|
||||
filename = filename[6:]
|
||||
elif filename.startswith("/gcodes/"):
|
||||
filename = filename[7:]
|
||||
# Start with a "/" so the gcode parser can correctly
|
||||
# handle files that begin with digits or special chars
|
||||
if filename[0] != "/":
|
||||
filename = "/" + filename
|
||||
return filename
|
||||
|
||||
def _prepare_M23(self, args: List[str]) -> str:
|
||||
filename = self._clean_filename(args[0])
|
||||
return f"M23 {filename}"
|
||||
|
||||
def _prepare_M32(self, args: List[str]) -> str:
|
||||
filename = self._clean_filename(args[0])
|
||||
# Escape existing double quotes in the file name
|
||||
filename = filename.replace("\"", "\\\"")
|
||||
return f"SDCARD_PRINT_FILE FILENAME=\"{filename}\""
|
||||
|
||||
def _prepare_M98(self, args: List[str]) -> str:
|
||||
macro = args[0][1:].strip(" \"\t\n")
|
||||
name_start = macro.rfind('/') + 1
|
||||
macro = macro[name_start:]
|
||||
cmd = self.available_macros.get(macro)
|
||||
if cmd is None:
|
||||
raise PanelDueError(f"Macro {macro} invalid")
|
||||
if macro in self.confirmed_macros:
|
||||
self._create_confirmation(macro, cmd)
|
||||
cmd = ""
|
||||
return cmd
|
||||
|
||||
def _prepare_M290(self, args: List[str]) -> str:
|
||||
# args should in in the format Z0.02
|
||||
offset = args[0][1:].strip()
|
||||
return f"SET_GCODE_OFFSET Z_ADJUST={offset} MOVE=1"
|
||||
|
||||
def _prepare_M292(self, args: List[str]) -> str:
|
||||
p_val = int(args[0][1])
|
||||
if p_val == 0:
|
||||
cmd = self.confirmed_gcode
|
||||
self.confirmed_gcode = ""
|
||||
return cmd
|
||||
return ""
|
||||
|
||||
def _create_confirmation(self, name: str, gcode: str) -> None:
|
||||
self.mbox_sequence += 1
|
||||
self.confirmed_gcode = gcode
|
||||
title = "Confirmation Dialog"
|
||||
msg = f"Please confirm your intent to run {name}." \
|
||||
" Press OK to continue, or CANCEL to abort."
|
||||
mbox: Dict[str, Any] = {}
|
||||
mbox['msgBox.mode'] = 3
|
||||
mbox['msgBox.msg'] = msg
|
||||
mbox['msgBox.seq'] = self.mbox_sequence
|
||||
mbox['msgBox.title'] = title
|
||||
mbox['msgBox.controls'] = 0
|
||||
mbox['msgBox.timeout'] = 0
|
||||
logging.debug(f"Creating PanelDue Confirmation: {mbox}")
|
||||
self.write_response(mbox)
|
||||
|
||||
def handle_gcode_response(self, response: str) -> None:
|
||||
# Only queue up "non-trivial" gcode responses. At the
|
||||
# moment we'll handle state changes and errors
|
||||
if "Klipper state" in response \
|
||||
or response.startswith('!!'):
|
||||
self.last_gcode_response = response
|
||||
else:
|
||||
for key in self.non_trivial_keys:
|
||||
if key in response:
|
||||
self.last_gcode_response = response
|
||||
return
|
||||
|
||||
def write_response(self, response: Dict[str, Any]) -> None:
|
||||
byte_resp = json.dumps(response) + "\r\n"
|
||||
self.ser_conn.send(byte_resp.encode())
|
||||
|
||||
def _get_printer_status(self) -> str:
|
||||
# PanelDue States applicable to Klipper:
|
||||
# I = idle, P = printing from SD, S = stopped (shutdown),
|
||||
# C = starting up (not ready), A = paused, D = pausing,
|
||||
# B = busy
|
||||
if self.is_shutdown:
|
||||
return 'S'
|
||||
|
||||
printer_state = self.printer_state
|
||||
sd_state: str
|
||||
sd_state = printer_state['print_stats'].get('state', "standby")
|
||||
if sd_state == "printing":
|
||||
if self.last_printer_state == 'A':
|
||||
# Resuming
|
||||
return 'R'
|
||||
# Printing
|
||||
return 'P'
|
||||
elif sd_state == "paused":
|
||||
p_active = printer_state['idle_timeout'].get(
|
||||
'state', 'Idle') == "Printing"
|
||||
if p_active and self.last_printer_state != 'A':
|
||||
# Pausing
|
||||
return 'D'
|
||||
else:
|
||||
# Paused
|
||||
return 'A'
|
||||
|
||||
return 'I'
|
||||
|
||||
def _run_paneldue_M408(self,
|
||||
arg_r: Optional[int] = None,
|
||||
arg_s: int = 1
|
||||
) -> None:
|
||||
response: Dict[str, Any] = {}
|
||||
sequence = arg_r
|
||||
response_type = arg_s
|
||||
|
||||
curtime = self.event_loop.get_loop_time()
|
||||
if curtime - self.last_update_time > INITIALIZE_TIMEOUT:
|
||||
self.initialized = False
|
||||
self.last_update_time = curtime
|
||||
|
||||
if not self.initialized:
|
||||
response['dir'] = "/macros"
|
||||
response['files'] = list(self.available_macros.keys())
|
||||
self.initialized = True
|
||||
if not self.is_ready:
|
||||
self.last_printer_state = 'O'
|
||||
response['status'] = self.last_printer_state
|
||||
self.write_response(response)
|
||||
return
|
||||
if sequence is not None and self.last_gcode_response:
|
||||
# Send gcode responses
|
||||
response['seq'] = sequence + 1
|
||||
response['resp'] = self.last_gcode_response
|
||||
self.last_gcode_response = None
|
||||
if response_type == 1:
|
||||
# Extended response Request
|
||||
response['myName'] = self.machine_name
|
||||
response['firmwareName'] = self.firmware_name
|
||||
response['numTools'] = self.extruder_count
|
||||
response['geometry'] = self.kinematics
|
||||
response['axes'] = 3
|
||||
|
||||
p_state = self.printer_state
|
||||
self.last_printer_state = self._get_printer_status()
|
||||
response['status'] = self.last_printer_state
|
||||
response['babystep'] = round(p_state['gcode_move'].get(
|
||||
'homing_origin', [0., 0., 0., 0.])[2], 3)
|
||||
|
||||
# Current position
|
||||
pos: List[float]
|
||||
homed_pos: str
|
||||
sfactor: float
|
||||
pos = p_state['toolhead'].get('position', [0., 0., 0., 0.])
|
||||
response['pos'] = [round(p, 2) for p in pos[:3]]
|
||||
homed_pos = p_state['toolhead'].get('homed_axes', "")
|
||||
response['homed'] = [int(a in homed_pos) for a in "xyz"]
|
||||
sfactor = round(p_state['gcode_move'].get('speed_factor', 1.) * 100, 2)
|
||||
response['sfactor'] = sfactor
|
||||
|
||||
# Print Progress Tracking
|
||||
sd_status = p_state['virtual_sdcard']
|
||||
print_stats = p_state['print_stats']
|
||||
fname: str = print_stats.get('filename', "")
|
||||
sd_print_state: Optional[str] = print_stats.get('state')
|
||||
if sd_print_state in ['printing', 'paused']:
|
||||
# We know a file has been loaded, initialize metadata
|
||||
if self.current_file != fname:
|
||||
self.current_file = fname
|
||||
self.file_metadata = self.file_manager.get_file_metadata(fname)
|
||||
progress: float = sd_status.get('progress', 0)
|
||||
# progress and print tracking
|
||||
if progress:
|
||||
response['fraction_printed'] = round(progress, 3)
|
||||
est_time: float = self.file_metadata.get('estimated_time', 0)
|
||||
if est_time > MIN_EST_TIME:
|
||||
# file read estimate
|
||||
times_left = [int(est_time - est_time * progress)]
|
||||
# filament estimate
|
||||
est_total_fil: Optional[float]
|
||||
est_total_fil = self.file_metadata.get('filament_total')
|
||||
if est_total_fil:
|
||||
cur_filament: float = print_stats.get(
|
||||
'filament_used', 0.)
|
||||
fpct = min(1., cur_filament / est_total_fil)
|
||||
times_left.append(int(est_time - est_time * fpct))
|
||||
# object height estimate
|
||||
obj_height: Optional[float]
|
||||
obj_height = self.file_metadata.get('object_height')
|
||||
if obj_height:
|
||||
cur_height: float = p_state['gcode_move'].get(
|
||||
'gcode_position', [0., 0., 0., 0.])[2]
|
||||
hpct = min(1., cur_height / obj_height)
|
||||
times_left.append(int(est_time - est_time * hpct))
|
||||
else:
|
||||
# The estimated time is not in the metadata, however we
|
||||
# can still provide an estimate based on file progress
|
||||
duration: float = print_stats.get('print_duration', 0.)
|
||||
times_left = [int(duration / progress - duration)]
|
||||
response['timesLeft'] = times_left
|
||||
else:
|
||||
# clear filename and metadata
|
||||
self.current_file = ""
|
||||
self.file_metadata = {}
|
||||
|
||||
fan_speed: Optional[float] = p_state['fan'].get('speed')
|
||||
if fan_speed is not None:
|
||||
response['fanPercent'] = [round(fan_speed * 100, 1)]
|
||||
|
||||
extruder_name: str = ""
|
||||
if self.extruder_count > 0:
|
||||
extruder_name = p_state['toolhead'].get('extruder', "")
|
||||
if extruder_name:
|
||||
tool = 0
|
||||
if extruder_name != "extruder":
|
||||
tool = int(extruder_name[-1])
|
||||
response['tool'] = tool
|
||||
|
||||
# Report Heater Status
|
||||
efactor: float = round(p_state['gcode_move'].get(
|
||||
'extrude_factor', 1.) * 100., 2)
|
||||
|
||||
for name in self.heaters:
|
||||
temp: float = round(p_state[name].get('temperature', 0.0), 1)
|
||||
target: float = round(p_state[name].get('target', 0.0), 1)
|
||||
response.setdefault('heaters', []).append(temp)
|
||||
response.setdefault('active', []).append(target)
|
||||
response.setdefault('standby', []).append(target)
|
||||
if name.startswith('extruder'):
|
||||
a_stat = 2 if name == extruder_name else 1
|
||||
response.setdefault('hstat', []).append(a_stat if target else 0)
|
||||
response.setdefault('efactor', []).append(efactor)
|
||||
response.setdefault('extr', []).append(round(pos[3], 2))
|
||||
else:
|
||||
response.setdefault('hstat', []).append(2 if target else 0)
|
||||
|
||||
# Display message (via M117)
|
||||
msg: str = p_state['display_status'].get('message', "")
|
||||
if msg and msg != self.last_message:
|
||||
response['message'] = msg
|
||||
# reset the message so it only shows once. The paneldue
|
||||
# is strange about this, and displays it as a full screen
|
||||
# notification
|
||||
self.last_message = msg
|
||||
self.write_response(response)
|
||||
|
||||
def _run_paneldue_M20(self, arg_p: str, arg_s: int = 0) -> None:
|
||||
response_type = arg_s
|
||||
if response_type != 2:
|
||||
logging.info(
|
||||
f"Cannot process response type {response_type} in M20")
|
||||
return
|
||||
path = arg_p
|
||||
|
||||
# Strip quotes if they exist
|
||||
path = path.strip('\"')
|
||||
|
||||
# Path should come in as "0:/macros, or 0:/<gcode_folder>". With
|
||||
# repetier compatibility enabled, the default folder is root,
|
||||
# ie. "0:/"
|
||||
if path.startswith("0:/"):
|
||||
path = path[2:]
|
||||
response: Dict[str, Any] = {'dir': path}
|
||||
response['files'] = []
|
||||
|
||||
if path == "/macros":
|
||||
response['files'] = list(self.available_macros.keys())
|
||||
else:
|
||||
# HACK: The PanelDue has a bug where it does not correctly detect
|
||||
# subdirectories if we return the root as "/". Moonraker can
|
||||
# support a "gcodes" directory, however we must choose between this
|
||||
# support or disabling RRF specific gcodes (this is done by
|
||||
# identifying as Repetier).
|
||||
# The workaround below converts both "/" and "/gcodes" paths to
|
||||
# "gcodes".
|
||||
if path == "/":
|
||||
response['dir'] = "/gcodes"
|
||||
path = "gcodes"
|
||||
elif path.startswith("/gcodes"):
|
||||
path = path[1:]
|
||||
|
||||
flist = self.file_manager.list_dir(path, simple_format=True)
|
||||
if flist:
|
||||
response['files'] = flist
|
||||
self.write_response(response)
|
||||
|
||||
async def _run_paneldue_M30(self, arg_p: str = "") -> None:
|
||||
# Delete a file. Clean up the file name and make sure
|
||||
# it is relative to the "gcodes" root.
|
||||
path = arg_p
|
||||
path = path.strip('\"')
|
||||
if path.startswith("0:/"):
|
||||
path = path[3:]
|
||||
elif path[0] == "/":
|
||||
path = path[1:]
|
||||
|
||||
if not path.startswith("gcodes/"):
|
||||
path = "gcodes/" + path
|
||||
await self.file_manager.delete_file(path)
|
||||
|
||||
def _run_paneldue_M36(self, arg_p: Optional[str] = None) -> None:
|
||||
response: Dict[str, Any] = {}
|
||||
filename: Optional[str] = arg_p
|
||||
sd_status = self.printer_state.get('virtual_sdcard', {})
|
||||
print_stats = self.printer_state.get('print_stats', {})
|
||||
if filename is None:
|
||||
# PanelDue is requesting file information on a
|
||||
# currently printed file
|
||||
active = False
|
||||
if sd_status and print_stats:
|
||||
filename = print_stats['filename']
|
||||
active = sd_status['is_active']
|
||||
if not filename or not active:
|
||||
# Either no file printing or no virtual_sdcard
|
||||
response['err'] = 1
|
||||
self.write_response(response)
|
||||
return
|
||||
else:
|
||||
response['fileName'] = filename.split("/")[-1]
|
||||
|
||||
# For consistency make sure that the filename begins with the
|
||||
# "gcodes/" root. The M20 HACK should add this in some cases.
|
||||
# Ideally we would add support to the PanelDue firmware that
|
||||
# indicates Moonraker supports a "gcodes" directory.
|
||||
if filename[0] == "/":
|
||||
filename = filename[1:]
|
||||
if not filename.startswith("gcodes/"):
|
||||
filename = "gcodes/" + filename
|
||||
|
||||
metadata: Dict[str, Any] = \
|
||||
self.file_manager.get_file_metadata(filename)
|
||||
if metadata:
|
||||
response['err'] = 0
|
||||
response['size'] = metadata['size']
|
||||
# workaround for PanelDue replacing the first "T" found
|
||||
response['lastModified'] = "T" + time.ctime(metadata['modified'])
|
||||
slicer: Optional[str] = metadata.get('slicer')
|
||||
if slicer is not None:
|
||||
response['generatedBy'] = slicer
|
||||
height: Optional[float] = metadata.get('object_height')
|
||||
if height is not None:
|
||||
response['height'] = round(height, 2)
|
||||
layer_height: Optional[float] = metadata.get('layer_height')
|
||||
if layer_height is not None:
|
||||
response['layerHeight'] = round(layer_height, 2)
|
||||
filament: Optional[float] = metadata.get('filament_total')
|
||||
if filament is not None:
|
||||
response['filament'] = [round(filament, 1)]
|
||||
est_time: Optional[float] = metadata.get('estimated_time')
|
||||
if est_time is not None:
|
||||
response['printTime'] = int(est_time + .5)
|
||||
else:
|
||||
response['err'] = 1
|
||||
self.write_response(response)
|
||||
|
||||
def close(self) -> None:
|
||||
self.ser_conn.disconnect()
|
||||
msg = "\nPanelDue GCode Dump:"
|
||||
for i, gc in enumerate(self.debug_queue):
|
||||
msg += f"\nSequence {i}: {gc}"
|
||||
logging.debug(msg)
|
||||
|
||||
def load_component(config: ConfigHelper) -> PanelDue:
|
||||
return PanelDue(config)
|
||||
1322
moonraker/components/power.py
Normal file
1322
moonraker/components/power.py
Normal file
File diff suppressed because it is too large
Load Diff
336
moonraker/components/proc_stats.py
Normal file
336
moonraker/components/proc_stats.py
Normal file
@@ -0,0 +1,336 @@
|
||||
# Moonraker Process Stat Tracking
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import asyncio
|
||||
import time
|
||||
import re
|
||||
import os
|
||||
import pathlib
|
||||
import logging
|
||||
from collections import deque
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Deque,
|
||||
Any,
|
||||
List,
|
||||
Tuple,
|
||||
Optional,
|
||||
Dict,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest, WebsocketManager
|
||||
from . import shell_command
|
||||
STAT_CALLBACK = Callable[[int], Optional[Awaitable]]
|
||||
|
||||
VC_GEN_CMD_FILE = "/usr/bin/vcgencmd"
|
||||
STATM_FILE_PATH = "/proc/self/smaps_rollup"
|
||||
NET_DEV_PATH = "/proc/net/dev"
|
||||
TEMPERATURE_PATH = "/sys/class/thermal/thermal_zone0/temp"
|
||||
CPU_STAT_PATH = "/proc/stat"
|
||||
MEM_AVAIL_PATH = "/proc/meminfo"
|
||||
STAT_UPDATE_TIME = 1.
|
||||
REPORT_QUEUE_SIZE = 30
|
||||
THROTTLE_CHECK_INTERVAL = 10
|
||||
WATCHDOG_REFRESH_TIME = 2.
|
||||
REPORT_BLOCKED_TIME = 4.
|
||||
|
||||
THROTTLED_FLAGS = {
|
||||
1: "Under-Voltage Detected",
|
||||
1 << 1: "Frequency Capped",
|
||||
1 << 2: "Currently Throttled",
|
||||
1 << 3: "Temperature Limit Active",
|
||||
1 << 16: "Previously Under-Volted",
|
||||
1 << 17: "Previously Frequency Capped",
|
||||
1 << 18: "Previously Throttled",
|
||||
1 << 19: "Previously Temperature Limited"
|
||||
}
|
||||
|
||||
class ProcStats:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.event_loop = self.server.get_event_loop()
|
||||
self.watchdog = Watchdog(self)
|
||||
self.stat_update_timer = self.event_loop.register_timer(
|
||||
self._handle_stat_update)
|
||||
self.vcgencmd: Optional[shell_command.ShellCommand] = None
|
||||
if os.path.exists(VC_GEN_CMD_FILE):
|
||||
logging.info("Detected 'vcgencmd', throttle checking enabled")
|
||||
shell_cmd: shell_command.ShellCommandFactory
|
||||
shell_cmd = self.server.load_component(config, "shell_command")
|
||||
self.vcgencmd = shell_cmd.build_shell_command(
|
||||
"vcgencmd get_throttled")
|
||||
self.server.register_notification("proc_stats:cpu_throttled")
|
||||
else:
|
||||
logging.info("Unable to find 'vcgencmd', throttle checking "
|
||||
"disabled")
|
||||
self.temp_file = pathlib.Path(TEMPERATURE_PATH)
|
||||
self.smaps = pathlib.Path(STATM_FILE_PATH)
|
||||
self.netdev_file = pathlib.Path(NET_DEV_PATH)
|
||||
self.cpu_stats_file = pathlib.Path(CPU_STAT_PATH)
|
||||
self.meminfo_file = pathlib.Path(MEM_AVAIL_PATH)
|
||||
self.server.register_endpoint(
|
||||
"/machine/proc_stats", ["GET"], self._handle_stat_request)
|
||||
self.server.register_event_handler(
|
||||
"server:klippy_shutdown", self._handle_shutdown)
|
||||
self.server.register_notification("proc_stats:proc_stat_update")
|
||||
self.proc_stat_queue: Deque[Dict[str, Any]] = deque(maxlen=30)
|
||||
self.last_update_time = time.time()
|
||||
self.last_proc_time = time.process_time()
|
||||
self.throttle_check_lock = asyncio.Lock()
|
||||
self.total_throttled: int = 0
|
||||
self.last_throttled: int = 0
|
||||
self.update_sequence: int = 0
|
||||
self.last_net_stats: Dict[str, Dict[str, Any]] = {}
|
||||
self.last_cpu_stats: Dict[str, Tuple[int, int]] = {}
|
||||
self.cpu_usage: Dict[str, float] = {}
|
||||
self.memory_usage: Dict[str, int] = {}
|
||||
self.stat_callbacks: List[STAT_CALLBACK] = []
|
||||
|
||||
async def component_init(self) -> None:
|
||||
self.stat_update_timer.start()
|
||||
self.watchdog.start()
|
||||
|
||||
def register_stat_callback(self, callback: STAT_CALLBACK) -> None:
|
||||
self.stat_callbacks.append(callback)
|
||||
|
||||
async def _handle_stat_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
ts: Optional[Dict[str, Any]] = None
|
||||
if self.vcgencmd is not None:
|
||||
ts = await self._check_throttled_state()
|
||||
cpu_temp = await self.event_loop.run_in_thread(
|
||||
self._get_cpu_temperature)
|
||||
wsm: WebsocketManager = self.server.lookup_component("websockets")
|
||||
websocket_count = wsm.get_count()
|
||||
return {
|
||||
'moonraker_stats': list(self.proc_stat_queue),
|
||||
'throttled_state': ts,
|
||||
'cpu_temp': cpu_temp,
|
||||
'network': self.last_net_stats,
|
||||
'system_cpu_usage': self.cpu_usage,
|
||||
'system_uptime': time.clock_gettime(time.CLOCK_BOOTTIME),
|
||||
'system_memory': self.memory_usage,
|
||||
'websocket_connections': websocket_count
|
||||
}
|
||||
|
||||
async def _handle_shutdown(self) -> None:
|
||||
msg = "\nMoonraker System Usage Statistics:"
|
||||
for stats in self.proc_stat_queue:
|
||||
msg += f"\n{self._format_stats(stats)}"
|
||||
cpu_temp = await self.event_loop.run_in_thread(
|
||||
self._get_cpu_temperature)
|
||||
msg += f"\nCPU Temperature: {cpu_temp}"
|
||||
logging.info(msg)
|
||||
if self.vcgencmd is not None:
|
||||
ts = await self._check_throttled_state()
|
||||
logging.info(f"Throttled Flags: {' '.join(ts['flags'])}")
|
||||
|
||||
async def _handle_stat_update(self, eventtime: float) -> float:
|
||||
update_time = eventtime
|
||||
proc_time = time.process_time()
|
||||
time_diff = update_time - self.last_update_time
|
||||
usage = round((proc_time - self.last_proc_time) / time_diff * 100, 2)
|
||||
cpu_temp, mem, mem_units, net = (
|
||||
await self.event_loop.run_in_thread(self._read_system_files)
|
||||
)
|
||||
for dev in net:
|
||||
bytes_sec = 0.
|
||||
if dev in self.last_net_stats:
|
||||
last_dev_stats = self.last_net_stats[dev]
|
||||
cur_total: int = net[dev]['rx_bytes'] + net[dev]['tx_bytes']
|
||||
last_total: int = last_dev_stats['rx_bytes'] + \
|
||||
last_dev_stats['tx_bytes']
|
||||
bytes_sec = round((cur_total - last_total) / time_diff, 2)
|
||||
net[dev]['bandwidth'] = bytes_sec
|
||||
self.last_net_stats = net
|
||||
result = {
|
||||
'time': time.time(),
|
||||
'cpu_usage': usage,
|
||||
'memory': mem,
|
||||
'mem_units': mem_units
|
||||
}
|
||||
self.proc_stat_queue.append(result)
|
||||
wsm: WebsocketManager = self.server.lookup_component("websockets")
|
||||
websocket_count = wsm.get_count()
|
||||
self.server.send_event("proc_stats:proc_stat_update", {
|
||||
'moonraker_stats': result,
|
||||
'cpu_temp': cpu_temp,
|
||||
'network': net,
|
||||
'system_cpu_usage': self.cpu_usage,
|
||||
'system_memory': self.memory_usage,
|
||||
'websocket_connections': websocket_count
|
||||
})
|
||||
if not self.update_sequence % THROTTLE_CHECK_INTERVAL:
|
||||
if self.vcgencmd is not None:
|
||||
ts = await self._check_throttled_state()
|
||||
cur_throttled = ts['bits']
|
||||
if cur_throttled & ~self.total_throttled:
|
||||
self.server.add_log_rollover_item(
|
||||
'throttled', f"CPU Throttled Flags: {ts['flags']}")
|
||||
if cur_throttled != self.last_throttled:
|
||||
self.server.send_event("proc_stats:cpu_throttled", ts)
|
||||
self.last_throttled = cur_throttled
|
||||
self.total_throttled |= cur_throttled
|
||||
for cb in self.stat_callbacks:
|
||||
ret = cb(self.update_sequence)
|
||||
if ret is not None:
|
||||
await ret
|
||||
self.last_update_time = update_time
|
||||
self.last_proc_time = proc_time
|
||||
self.update_sequence += 1
|
||||
return eventtime + STAT_UPDATE_TIME
|
||||
|
||||
async def _check_throttled_state(self) -> Dict[str, Any]:
|
||||
async with self.throttle_check_lock:
|
||||
assert self.vcgencmd is not None
|
||||
try:
|
||||
resp = await self.vcgencmd.run_with_response(
|
||||
timeout=.5, log_complete=False)
|
||||
ts = int(resp.strip().split("=")[-1], 16)
|
||||
except Exception:
|
||||
return {'bits': 0, 'flags': ["?"]}
|
||||
flags = []
|
||||
for flag, desc in THROTTLED_FLAGS.items():
|
||||
if flag & ts:
|
||||
flags.append(desc)
|
||||
return {'bits': ts, 'flags': flags}
|
||||
|
||||
def _read_system_files(self) -> Tuple:
|
||||
mem, units = self._get_memory_usage()
|
||||
temp = self._get_cpu_temperature()
|
||||
net_stats = self._get_net_stats()
|
||||
self._update_cpu_stats()
|
||||
self._update_system_memory()
|
||||
return temp, mem, units, net_stats
|
||||
|
||||
def _get_memory_usage(self) -> Tuple[Optional[int], Optional[str]]:
|
||||
try:
|
||||
mem_data = self.smaps.read_text()
|
||||
rss_match = re.search(r"Rss:\s+(\d+)\s+(\w+)", mem_data)
|
||||
if rss_match is None:
|
||||
return None, None
|
||||
mem = int(rss_match.group(1))
|
||||
units = rss_match.group(2)
|
||||
except Exception:
|
||||
return None, None
|
||||
return mem, units
|
||||
|
||||
def _get_cpu_temperature(self) -> Optional[float]:
|
||||
try:
|
||||
res = int(self.temp_file.read_text().strip())
|
||||
temp = res / 1000.
|
||||
except Exception:
|
||||
return None
|
||||
return temp
|
||||
|
||||
def _get_net_stats(self) -> Dict[str, Any]:
|
||||
net_stats: Dict[str, Any] = {}
|
||||
try:
|
||||
ret = self.netdev_file.read_text()
|
||||
dev_info = re.findall(r"([\w]+):(.+)", ret)
|
||||
for (dev_name, stats) in dev_info:
|
||||
parsed_stats = stats.strip().split()
|
||||
net_stats[dev_name] = {
|
||||
'rx_bytes': int(parsed_stats[0]),
|
||||
'tx_bytes': int(parsed_stats[8])
|
||||
}
|
||||
return net_stats
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
def _update_system_memory(self) -> None:
|
||||
mem_stats: Dict[str, Any] = {}
|
||||
try:
|
||||
ret = self.meminfo_file.read_text()
|
||||
total_match = re.search(r"MemTotal:\s+(\d+)", ret)
|
||||
avail_match = re.search(r"MemAvailable:\s+(\d+)", ret)
|
||||
if total_match is not None and avail_match is not None:
|
||||
mem_stats["total"] = int(total_match.group(1))
|
||||
mem_stats["available"] = int(avail_match.group(1))
|
||||
mem_stats["used"] = mem_stats["total"] - mem_stats["available"]
|
||||
self.memory_usage.update(mem_stats)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _update_cpu_stats(self) -> None:
|
||||
try:
|
||||
cpu_usage: Dict[str, Any] = {}
|
||||
ret = self.cpu_stats_file.read_text()
|
||||
usage_info: List[str] = re.findall(r"cpu[^\n]+", ret)
|
||||
for cpu in usage_info:
|
||||
parts = cpu.split()
|
||||
name = parts[0]
|
||||
cpu_sum = sum([int(t) for t in parts[1:]])
|
||||
cpu_idle = int(parts[4])
|
||||
if name in self.last_cpu_stats:
|
||||
last_sum, last_idle = self.last_cpu_stats[name]
|
||||
cpu_delta = cpu_sum - last_sum
|
||||
idle_delta = cpu_idle - last_idle
|
||||
cpu_used = cpu_delta - idle_delta
|
||||
cpu_usage[name] = round(
|
||||
100 * (cpu_used / cpu_delta), 2)
|
||||
self.cpu_usage = cpu_usage
|
||||
self.last_cpu_stats[name] = (cpu_sum, cpu_idle)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _format_stats(self, stats: Dict[str, Any]) -> str:
|
||||
return f"System Time: {stats['time']:2f}, " \
|
||||
f"Usage: {stats['cpu_usage']}%, " \
|
||||
f"Memory: {stats['memory']} {stats['mem_units']}"
|
||||
|
||||
def log_last_stats(self, count: int = 1):
|
||||
count = min(len(self.proc_stat_queue), count)
|
||||
msg = ""
|
||||
for stats in list(self.proc_stat_queue)[-count:]:
|
||||
msg += f"\n{self._format_stats(stats)}"
|
||||
logging.info(msg)
|
||||
|
||||
def close(self) -> None:
|
||||
self.stat_update_timer.stop()
|
||||
self.watchdog.stop()
|
||||
|
||||
class Watchdog:
|
||||
def __init__(self, proc_stats: ProcStats) -> None:
|
||||
self.proc_stats = proc_stats
|
||||
self.event_loop = proc_stats.event_loop
|
||||
self.blocked_count: int = 0
|
||||
self.last_watch_time: float = 0.
|
||||
self.watchdog_timer = self.event_loop.register_timer(
|
||||
self._watchdog_callback
|
||||
)
|
||||
|
||||
def _watchdog_callback(self, eventtime: float) -> float:
|
||||
time_diff = eventtime - self.last_watch_time
|
||||
if time_diff > REPORT_BLOCKED_TIME:
|
||||
self.blocked_count += 1
|
||||
logging.info(
|
||||
f"EVENT LOOP BLOCKED: {round(time_diff, 2)} seconds"
|
||||
f", total blocked count: {self.blocked_count}")
|
||||
# delay the stat logging so we capture the CPU percentage after
|
||||
# the next cycle
|
||||
self.event_loop.delay_callback(
|
||||
.2, self.proc_stats.log_last_stats, 5)
|
||||
self.last_watch_time = eventtime
|
||||
return eventtime + WATCHDOG_REFRESH_TIME
|
||||
|
||||
def start(self):
|
||||
if not self.watchdog_timer.is_running():
|
||||
self.last_watch_time = self.event_loop.get_loop_time()
|
||||
self.watchdog_timer.start()
|
||||
|
||||
def stop(self):
|
||||
self.watchdog_timer.stop()
|
||||
|
||||
def load_component(config: ConfigHelper) -> ProcStats:
|
||||
return ProcStats(config)
|
||||
84
moonraker/components/secrets.py
Normal file
84
moonraker/components/secrets.py
Normal file
@@ -0,0 +1,84 @@
|
||||
# Support for password/token secrets
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
from __future__ import annotations
|
||||
import pathlib
|
||||
import logging
|
||||
import configparser
|
||||
import json
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
Optional,
|
||||
Any
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
|
||||
class Secrets:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
server = config.get_server()
|
||||
self.secrets_file: Optional[pathlib.Path] = None
|
||||
path: Optional[str] = config.get('secrets_path', None)
|
||||
self.type = "invalid"
|
||||
self.values: Dict[str, Any] = {}
|
||||
if path is not None:
|
||||
self.secrets_file = pathlib.Path(path).expanduser().resolve()
|
||||
if not self.secrets_file.is_file():
|
||||
server.add_warning(
|
||||
"[secrets]: option 'secrets_path', file does not exist: "
|
||||
f"'{self.secrets_file}'")
|
||||
return
|
||||
data = self.secrets_file.read_text()
|
||||
vals = self._parse_json(data)
|
||||
if vals is not None:
|
||||
if not isinstance(vals, dict):
|
||||
server.add_warning(
|
||||
f"[secrets]: option 'secrets_path', top level item in"
|
||||
f" json file '{self.secrets_file}' must be an Object.")
|
||||
return
|
||||
self.values = vals
|
||||
self.type = "json"
|
||||
else:
|
||||
vals = self._parse_ini(data)
|
||||
if vals is None:
|
||||
server.add_warning(
|
||||
"[secrets]: option 'secrets_path', invalid file "
|
||||
f"format, must be json or ini: '{self.secrets_file}'")
|
||||
return
|
||||
self.values = vals
|
||||
self.type = "ini"
|
||||
logging.debug(f"[secrets]: Loaded {self.type} file: "
|
||||
f"{self.secrets_file}")
|
||||
else:
|
||||
logging.debug(
|
||||
"[secrets]: Option `secrets_path` not supplied")
|
||||
|
||||
def _parse_ini(self, data: str) -> Optional[Dict[str, Any]]:
|
||||
try:
|
||||
cfg = configparser.ConfigParser(interpolation=None)
|
||||
cfg.read_string(data)
|
||||
return {sec: dict(cfg.items(sec)) for sec in cfg.sections()}
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _parse_json(self, data: str) -> Optional[Dict[str, Any]]:
|
||||
try:
|
||||
return json.loads(data)
|
||||
except json.JSONDecodeError:
|
||||
return None
|
||||
|
||||
def get_type(self) -> str:
|
||||
return self.type
|
||||
|
||||
def __getitem__(self, key: str) -> Any:
|
||||
return self.values[key]
|
||||
|
||||
def get(self, key: str, default: Any = None) -> Any:
|
||||
return self.values.get(key, default)
|
||||
|
||||
|
||||
def load_component(config: ConfigHelper) -> Secrets:
|
||||
return Secrets(config)
|
||||
375
moonraker/components/shell_command.py
Normal file
375
moonraker/components/shell_command.py
Normal file
@@ -0,0 +1,375 @@
|
||||
# linux shell command execution utility
|
||||
#
|
||||
# Copyright (C) 2020 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import os
|
||||
import shlex
|
||||
import logging
|
||||
import signal
|
||||
import asyncio
|
||||
from utils import ServerError
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Awaitable,
|
||||
List,
|
||||
Optional,
|
||||
Callable,
|
||||
Coroutine,
|
||||
Dict,
|
||||
Set,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
OutputCallback = Optional[Callable[[bytes], None]]
|
||||
|
||||
class ShellCommandError(ServerError):
|
||||
def __init__(self,
|
||||
message: str,
|
||||
return_code: Optional[int],
|
||||
stdout: Optional[bytes] = b"",
|
||||
stderr: Optional[bytes] = b"",
|
||||
status_code: int = 500
|
||||
) -> None:
|
||||
super().__init__(message, status_code=status_code)
|
||||
self.stdout = stdout or b""
|
||||
self.stderr = stderr or b""
|
||||
self.return_code = return_code
|
||||
|
||||
class ShellCommandProtocol(asyncio.subprocess.SubprocessStreamProtocol):
|
||||
def __init__(self,
|
||||
limit: int,
|
||||
loop: asyncio.events.AbstractEventLoop,
|
||||
program_name: str = "",
|
||||
std_out_cb: OutputCallback = None,
|
||||
std_err_cb: OutputCallback = None,
|
||||
log_stderr: bool = False
|
||||
) -> None:
|
||||
self._loop = loop
|
||||
self._pipe_fds: List[int] = []
|
||||
super().__init__(limit, loop)
|
||||
self.program_name = program_name
|
||||
self.std_out_cb = std_out_cb
|
||||
self.std_err_cb = std_err_cb
|
||||
self.log_stderr = log_stderr
|
||||
self.pending_data: List[bytes] = [b"", b""]
|
||||
|
||||
def connection_made(self,
|
||||
transport: asyncio.transports.BaseTransport
|
||||
) -> None:
|
||||
self._transport = transport
|
||||
assert isinstance(transport, asyncio.SubprocessTransport)
|
||||
stdout_transport = transport.get_pipe_transport(1)
|
||||
if stdout_transport is not None:
|
||||
self._pipe_fds.append(1)
|
||||
|
||||
stderr_transport = transport.get_pipe_transport(2)
|
||||
if stderr_transport is not None:
|
||||
self._pipe_fds.append(2)
|
||||
|
||||
stdin_transport = transport.get_pipe_transport(0)
|
||||
if stdin_transport is not None:
|
||||
self.stdin = asyncio.streams.StreamWriter(
|
||||
stdin_transport,
|
||||
protocol=self,
|
||||
reader=None,
|
||||
loop=self._loop)
|
||||
|
||||
def pipe_data_received(self, fd: int, data: bytes | str) -> None:
|
||||
cb = None
|
||||
data_idx = fd - 1
|
||||
if fd == 1:
|
||||
cb = self.std_out_cb
|
||||
elif fd == 2:
|
||||
cb = self.std_err_cb
|
||||
if self.log_stderr:
|
||||
if isinstance(data, bytes):
|
||||
msg = data.decode(errors='ignore')
|
||||
else:
|
||||
msg = data
|
||||
logging.info(f"{self.program_name}: {msg}")
|
||||
if cb is not None:
|
||||
if isinstance(data, str):
|
||||
data = data.encode()
|
||||
lines = data.split(b'\n')
|
||||
lines[0] = self.pending_data[data_idx] + lines[0]
|
||||
self.pending_data[data_idx] = lines.pop()
|
||||
for line in lines:
|
||||
if not line:
|
||||
continue
|
||||
cb(line)
|
||||
|
||||
def pipe_connection_lost(self,
|
||||
fd: int,
|
||||
exc: Exception | None
|
||||
) -> None:
|
||||
cb = None
|
||||
pending = b""
|
||||
if fd == 1:
|
||||
cb = self.std_out_cb
|
||||
pending = self.pending_data[0]
|
||||
elif fd == 2:
|
||||
cb = self.std_err_cb
|
||||
pending = self.pending_data[1]
|
||||
if pending and cb is not None:
|
||||
cb(pending)
|
||||
super().pipe_connection_lost(fd, exc)
|
||||
|
||||
|
||||
class ShellCommand:
|
||||
IDX_SIGINT = 0
|
||||
IDX_SIGTERM = 1
|
||||
IDX_SIGKILL = 2
|
||||
def __init__(self,
|
||||
factory: ShellCommandFactory,
|
||||
cmd: str,
|
||||
std_out_callback: OutputCallback,
|
||||
std_err_callback: OutputCallback,
|
||||
env: Optional[Dict[str, str]] = None,
|
||||
log_stderr: bool = False,
|
||||
cwd: Optional[str] = None
|
||||
) -> None:
|
||||
self.factory = factory
|
||||
self.name = cmd
|
||||
self.std_out_cb = std_out_callback
|
||||
self.std_err_cb = std_err_callback
|
||||
cmd = os.path.expanduser(cmd)
|
||||
self.command = shlex.split(cmd)
|
||||
self.log_stderr = log_stderr
|
||||
self.env = env
|
||||
self.cwd = cwd
|
||||
self.proc: Optional[asyncio.subprocess.Process] = None
|
||||
self.cancelled = False
|
||||
self.return_code: Optional[int] = None
|
||||
self.run_lock = asyncio.Lock()
|
||||
|
||||
async def cancel(self, sig_idx: int = 1) -> None:
|
||||
if self.cancelled:
|
||||
return
|
||||
self.cancelled = True
|
||||
if self.proc is not None:
|
||||
exit_success = False
|
||||
sig_idx = min(2, max(0, sig_idx))
|
||||
sigs = [signal.SIGINT, signal.SIGTERM, signal.SIGKILL][sig_idx:]
|
||||
for sig in sigs:
|
||||
try:
|
||||
self.proc.send_signal(sig)
|
||||
ret = self.proc.wait()
|
||||
await asyncio.wait_for(ret, timeout=2.)
|
||||
except asyncio.TimeoutError:
|
||||
continue
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
logging.debug(f"Command '{self.name}' exited with "
|
||||
f"signal: {sig.name}")
|
||||
exit_success = True
|
||||
break
|
||||
if not exit_success:
|
||||
logging.info(f"WARNING: {self.name} did not cleanly exit")
|
||||
|
||||
def get_return_code(self) -> Optional[int]:
|
||||
return self.return_code
|
||||
|
||||
def _reset_command_data(self) -> None:
|
||||
self.return_code = self.proc = None
|
||||
self.cancelled = False
|
||||
|
||||
async def run(self,
|
||||
timeout: float = 2.,
|
||||
verbose: bool = True,
|
||||
log_complete: bool = True,
|
||||
sig_idx: int = 1,
|
||||
proc_input: Optional[str] = None
|
||||
) -> bool:
|
||||
async with self.run_lock:
|
||||
self.factory.add_running_command(self)
|
||||
self._reset_command_data()
|
||||
if not timeout:
|
||||
# Never timeout
|
||||
timeout = 9999999999999999.
|
||||
if (
|
||||
self.std_out_cb is None
|
||||
and self.std_err_cb is None and
|
||||
not self.log_stderr
|
||||
):
|
||||
# No callbacks set so output cannot be verbose
|
||||
verbose = False
|
||||
created = await self._create_subprocess(
|
||||
verbose, proc_input is not None)
|
||||
if not created:
|
||||
self.factory.remove_running_command(self)
|
||||
return False
|
||||
assert self.proc is not None
|
||||
try:
|
||||
if proc_input is not None:
|
||||
ret: Coroutine = self.proc.communicate(
|
||||
input=proc_input.encode())
|
||||
else:
|
||||
ret = self.proc.wait()
|
||||
await asyncio.wait_for(ret, timeout=timeout)
|
||||
except asyncio.TimeoutError:
|
||||
complete = False
|
||||
await self.cancel(sig_idx)
|
||||
else:
|
||||
complete = not self.cancelled
|
||||
self.factory.remove_running_command(self)
|
||||
return self._check_proc_success(complete, log_complete)
|
||||
|
||||
async def run_with_response(self,
|
||||
timeout: float = 2.,
|
||||
retries: int = 1,
|
||||
log_complete: bool = True,
|
||||
sig_idx: int = 1,
|
||||
proc_input: Optional[str] = None
|
||||
) -> str:
|
||||
async with self.run_lock:
|
||||
self.factory.add_running_command(self)
|
||||
retries = max(1, retries)
|
||||
stdin: Optional[bytes] = None
|
||||
if proc_input is not None:
|
||||
stdin = proc_input.encode()
|
||||
while retries > 0:
|
||||
self._reset_command_data()
|
||||
timed_out = False
|
||||
stdout = stderr = b""
|
||||
if await self._create_subprocess(has_input=stdin is not None):
|
||||
assert self.proc is not None
|
||||
try:
|
||||
ret = self.proc.communicate(input=stdin)
|
||||
stdout, stderr = await asyncio.wait_for(
|
||||
ret, timeout=timeout)
|
||||
except asyncio.TimeoutError:
|
||||
complete = False
|
||||
timed_out = True
|
||||
await self.cancel(sig_idx)
|
||||
else:
|
||||
complete = not self.cancelled
|
||||
if self.log_stderr and stderr:
|
||||
logging.info(
|
||||
f"{self.command[0]}: "
|
||||
f"{stderr.decode(errors='ignore')}")
|
||||
if self._check_proc_success(complete, log_complete):
|
||||
self.factory.remove_running_command(self)
|
||||
return stdout.decode(errors='ignore').rstrip("\n")
|
||||
if stdout:
|
||||
logging.debug(
|
||||
f"Shell command '{self.name}' output:"
|
||||
f"\n{stdout.decode(errors='ignore')}")
|
||||
if self.cancelled and not timed_out:
|
||||
break
|
||||
retries -= 1
|
||||
await asyncio.sleep(.5)
|
||||
self.factory.remove_running_command(self)
|
||||
raise ShellCommandError(
|
||||
f"Error running shell command: '{self.command}'",
|
||||
self.return_code, stdout, stderr)
|
||||
|
||||
async def _create_subprocess(self,
|
||||
use_callbacks: bool = False,
|
||||
has_input: bool = False
|
||||
) -> bool:
|
||||
loop = asyncio.get_running_loop()
|
||||
|
||||
def protocol_factory():
|
||||
return ShellCommandProtocol(
|
||||
limit=2**20, loop=loop, program_name=self.command[0],
|
||||
std_out_cb=self.std_out_cb, std_err_cb=self.std_err_cb,
|
||||
log_stderr=self.log_stderr)
|
||||
try:
|
||||
stdpipe: Optional[int] = None
|
||||
if has_input:
|
||||
stdpipe = asyncio.subprocess.PIPE
|
||||
if self.std_err_cb is not None or self.log_stderr:
|
||||
errpipe = asyncio.subprocess.PIPE
|
||||
else:
|
||||
errpipe = asyncio.subprocess.STDOUT
|
||||
if use_callbacks:
|
||||
transport, protocol = await loop.subprocess_exec(
|
||||
protocol_factory, *self.command, stdin=stdpipe,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=errpipe, env=self.env, cwd=self.cwd)
|
||||
self.proc = asyncio.subprocess.Process(
|
||||
transport, protocol, loop)
|
||||
else:
|
||||
self.proc = await asyncio.create_subprocess_exec(
|
||||
*self.command, stdin=stdpipe,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=errpipe, env=self.env, cwd=self.cwd)
|
||||
except Exception:
|
||||
logging.exception(
|
||||
f"shell_command: Command ({self.name}) failed")
|
||||
return False
|
||||
return True
|
||||
|
||||
def _check_proc_success(self,
|
||||
complete: bool,
|
||||
log_complete: bool
|
||||
) -> bool:
|
||||
assert self.proc is not None
|
||||
self.return_code = self.proc.returncode
|
||||
success = self.return_code == 0 and complete
|
||||
if success:
|
||||
msg = f"Command ({self.name}) successfully finished"
|
||||
elif self.cancelled:
|
||||
msg = f"Command ({self.name}) cancelled"
|
||||
elif not complete:
|
||||
msg = f"Command ({self.name}) timed out"
|
||||
else:
|
||||
msg = f"Command ({self.name}) exited with return code" \
|
||||
f" {self.return_code}"
|
||||
if log_complete:
|
||||
logging.info(msg)
|
||||
return success
|
||||
|
||||
class ShellCommandFactory:
|
||||
error = ShellCommandError
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.running_commands: Set[ShellCommand] = set()
|
||||
|
||||
def add_running_command(self, cmd: ShellCommand) -> None:
|
||||
self.running_commands.add(cmd)
|
||||
|
||||
def remove_running_command(self, cmd: ShellCommand) -> None:
|
||||
try:
|
||||
self.running_commands.remove(cmd)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def build_shell_command(self,
|
||||
cmd: str,
|
||||
callback: OutputCallback = None,
|
||||
std_err_callback: OutputCallback = None,
|
||||
env: Optional[Dict[str, str]] = None,
|
||||
log_stderr: bool = False,
|
||||
cwd: Optional[str] = None
|
||||
) -> ShellCommand:
|
||||
return ShellCommand(self, cmd, callback, std_err_callback, env,
|
||||
log_stderr, cwd)
|
||||
|
||||
def exec_cmd(self,
|
||||
cmd: str,
|
||||
timeout: float = 2.,
|
||||
retries: int = 1,
|
||||
sig_idx: int = 1,
|
||||
proc_input: Optional[str] = None,
|
||||
log_complete: bool = True,
|
||||
log_stderr: bool = False,
|
||||
env: Optional[Dict[str, str]] = None,
|
||||
cwd: Optional[str] = None
|
||||
) -> Awaitable:
|
||||
scmd = ShellCommand(self, cmd, None, None, env,
|
||||
log_stderr, cwd)
|
||||
coro = scmd.run_with_response(timeout, retries, log_complete,
|
||||
sig_idx, proc_input)
|
||||
return asyncio.create_task(coro)
|
||||
|
||||
async def close(self) -> None:
|
||||
for cmd in self.running_commands:
|
||||
await cmd.cancel()
|
||||
|
||||
def load_component(config: ConfigHelper) -> ShellCommandFactory:
|
||||
return ShellCommandFactory(config)
|
||||
87
moonraker/components/template.py
Normal file
87
moonraker/components/template.py
Normal file
@@ -0,0 +1,87 @@
|
||||
# Template Factory helper
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
from __future__ import annotations
|
||||
import logging
|
||||
import jinja2
|
||||
import json
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from moonraker import Server
|
||||
from confighelper import ConfigHelper
|
||||
from .secrets import Secrets
|
||||
|
||||
class TemplateFactory:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
secrets: Secrets = self.server.load_component(config, 'secrets')
|
||||
self.jenv = jinja2.Environment('{%', '%}', '{', '}')
|
||||
self.async_env = jinja2.Environment('{%', '%}', '{', '}',
|
||||
enable_async=True)
|
||||
self.jenv.add_extension("jinja2.ext.do")
|
||||
self.jenv.filters['fromjson'] = json.loads
|
||||
self.async_env.add_extension("jinja2.ext.do")
|
||||
self.async_env.filters['fromjson'] = json.loads
|
||||
self.add_environment_global('raise_error', self._raise_error)
|
||||
self.add_environment_global('secrets', secrets)
|
||||
|
||||
def add_environment_global(self, name: str, value: Any):
|
||||
if name in self.jenv.globals:
|
||||
raise self.server.error(
|
||||
f"Jinja 2 environment already contains global {name}")
|
||||
self.jenv.globals[name] = value
|
||||
self.async_env.globals[name] = value
|
||||
|
||||
def _raise_error(self, err_msg: str, err_code: int = 400) -> None:
|
||||
raise self.server.error(err_msg, err_code)
|
||||
|
||||
def create_template(self,
|
||||
source: str,
|
||||
is_async: bool = False
|
||||
) -> JinjaTemplate:
|
||||
env = self.async_env if is_async else self.jenv
|
||||
try:
|
||||
template = env.from_string(source)
|
||||
except Exception:
|
||||
logging.exception(f"Error creating template from source:\n{source}")
|
||||
raise
|
||||
return JinjaTemplate(source, self.server, template, is_async)
|
||||
|
||||
|
||||
class JinjaTemplate:
|
||||
def __init__(self,
|
||||
source: str,
|
||||
server: Server,
|
||||
template: jinja2.Template,
|
||||
is_async: bool
|
||||
) -> None:
|
||||
self.server = server
|
||||
self.orig_source = source
|
||||
self.template = template
|
||||
self.is_async = is_async
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.orig_source
|
||||
|
||||
def render(self, context: Dict[str, Any] = {}) -> str:
|
||||
if self.is_async:
|
||||
raise self.server.error(
|
||||
"Cannot render async templates with the render() method"
|
||||
", use render_async()")
|
||||
return self.template.render(context).strip()
|
||||
|
||||
async def render_async(self, context: Dict[str, Any] = {}) -> str:
|
||||
ret = await self.template.render_async(context)
|
||||
return ret.strip()
|
||||
|
||||
def load_component(config: ConfigHelper) -> TemplateFactory:
|
||||
return TemplateFactory(config)
|
||||
15
moonraker/components/update_manager/__init__.py
Normal file
15
moonraker/components/update_manager/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# Package definition for the update_manager
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
from . import update_manager as um
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
|
||||
def load_component(config: ConfigHelper) -> um.UpdateManager:
|
||||
return um.load_component(config)
|
||||
301
moonraker/components/update_manager/app_deploy.py
Normal file
301
moonraker/components/update_manager/app_deploy.py
Normal file
@@ -0,0 +1,301 @@
|
||||
# Deploy updates for applications managed by Moonraker
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import pathlib
|
||||
import shutil
|
||||
import hashlib
|
||||
import logging
|
||||
from .base_deploy import BaseDeploy
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Optional,
|
||||
Union,
|
||||
Dict,
|
||||
List,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from .update_manager import CommandHelper
|
||||
from ..machine import Machine
|
||||
|
||||
SUPPORTED_CHANNELS = {
|
||||
"zip": ["stable", "beta"],
|
||||
"git_repo": ["dev", "beta"]
|
||||
}
|
||||
TYPE_TO_CHANNEL = {
|
||||
"zip": "stable",
|
||||
"zip_beta": "beta",
|
||||
"git_repo": "dev"
|
||||
}
|
||||
|
||||
class AppDeploy(BaseDeploy):
|
||||
def __init__(self, config: ConfigHelper, cmd_helper: CommandHelper) -> None:
|
||||
super().__init__(config, cmd_helper, prefix="Application")
|
||||
self.config = config
|
||||
self.debug = self.cmd_helper.is_debug_enabled()
|
||||
type_choices = list(TYPE_TO_CHANNEL.keys())
|
||||
self.type = config.get('type').lower()
|
||||
if self.type not in type_choices:
|
||||
raise config.error(
|
||||
f"Config Error: Section [{config.get_name()}], Option "
|
||||
f"'type: {self.type}': value must be one "
|
||||
f"of the following choices: {type_choices}"
|
||||
)
|
||||
self.channel = config.get(
|
||||
"channel", TYPE_TO_CHANNEL[self.type]
|
||||
)
|
||||
if self.type == "zip_beta":
|
||||
self.server.add_warning(
|
||||
f"Config Section [{config.get_name()}], Option 'type: "
|
||||
"zip_beta', value 'zip_beta' is deprecated. Set 'type' "
|
||||
"to zip and 'channel' to 'beta'")
|
||||
self.type = "zip"
|
||||
self.path = pathlib.Path(
|
||||
config.get('path')).expanduser().resolve()
|
||||
executable = config.get('env', None)
|
||||
if self.channel not in SUPPORTED_CHANNELS[self.type]:
|
||||
raise config.error(
|
||||
f"Invalid Channel '{self.channel}' for config "
|
||||
f"section [{config.get_name()}], type: {self.type}")
|
||||
self._verify_path(config, 'path', self.path)
|
||||
self.executable: Optional[pathlib.Path] = None
|
||||
self.pip_exe: Optional[pathlib.Path] = None
|
||||
self.venv_args: Optional[str] = None
|
||||
if executable is not None:
|
||||
self.executable = pathlib.Path(executable).expanduser()
|
||||
self.pip_exe = self.executable.parent.joinpath("pip")
|
||||
if not self.pip_exe.exists():
|
||||
self.server.add_warning(
|
||||
f"Update Manger {self.name}: Unable to locate pip "
|
||||
"executable")
|
||||
self._verify_path(config, 'env', self.executable)
|
||||
self.venv_args = config.get('venv_args', None)
|
||||
|
||||
self.info_tags: List[str] = config.getlist("info_tags", [])
|
||||
self.managed_services: List[str] = []
|
||||
svc_default = []
|
||||
if config.getboolean("is_system_service", True):
|
||||
svc_default.append(self.name)
|
||||
svc_choices = [self.name, "klipper", "moonraker"]
|
||||
services: List[str] = config.getlist(
|
||||
"managed_services", svc_default, separator=None
|
||||
)
|
||||
for svc in services:
|
||||
if svc not in svc_choices:
|
||||
raw = " ".join(services)
|
||||
self.server.add_warning(
|
||||
f"[{config.get_name()}]: Option 'restart_action: {raw}' "
|
||||
f"contains an invalid value '{svc}'. All values must be "
|
||||
f"one of the following choices: {svc_choices}"
|
||||
)
|
||||
break
|
||||
for svc in svc_choices:
|
||||
if svc in services and svc not in self.managed_services:
|
||||
self.managed_services.append(svc)
|
||||
logging.debug(
|
||||
f"Extension {self.name} managed services: {self.managed_services}"
|
||||
)
|
||||
# We need to fetch all potential options for an Application. Not
|
||||
# all options apply to each subtype, however we can't limit the
|
||||
# options in children if we want to switch between channels and
|
||||
# satisfy the confighelper's requirements.
|
||||
self.moved_origin: Optional[str] = config.get('moved_origin', None)
|
||||
self.origin: str = config.get('origin')
|
||||
self.primary_branch = config.get("primary_branch", "master")
|
||||
self.npm_pkg_json: Optional[pathlib.Path] = None
|
||||
if config.getboolean("enable_node_updates", False):
|
||||
self.npm_pkg_json = self.path.joinpath("package-lock.json")
|
||||
self._verify_path(config, 'enable_node_updates', self.npm_pkg_json)
|
||||
self.python_reqs: Optional[pathlib.Path] = None
|
||||
if self.executable is not None:
|
||||
self.python_reqs = self.path.joinpath(config.get("requirements"))
|
||||
self._verify_path(config, 'requirements', self.python_reqs)
|
||||
self.install_script: Optional[pathlib.Path] = None
|
||||
install_script = config.get('install_script', None)
|
||||
if install_script is not None:
|
||||
self.install_script = self.path.joinpath(install_script).resolve()
|
||||
self._verify_path(config, 'install_script', self.install_script)
|
||||
|
||||
@staticmethod
|
||||
def _is_git_repo(app_path: Union[str, pathlib.Path]) -> bool:
|
||||
if isinstance(app_path, str):
|
||||
app_path = pathlib.Path(app_path).expanduser()
|
||||
return app_path.joinpath('.git').exists()
|
||||
|
||||
async def initialize(self) -> Dict[str, Any]:
|
||||
storage = await super().initialize()
|
||||
self.need_channel_update = storage.get('need_channel_upate', False)
|
||||
self._is_valid = storage.get('is_valid', False)
|
||||
return storage
|
||||
|
||||
def _verify_path(self,
|
||||
config: ConfigHelper,
|
||||
option: str,
|
||||
file_path: pathlib.Path
|
||||
) -> None:
|
||||
if not file_path.exists():
|
||||
raise config.error(
|
||||
f"Invalid path for option `{option}` in section "
|
||||
f"[{config.get_name()}]: Path `{file_path}` does not exist")
|
||||
|
||||
def check_need_channel_swap(self) -> bool:
|
||||
return self.need_channel_update
|
||||
|
||||
def get_configured_type(self) -> str:
|
||||
return self.type
|
||||
|
||||
def check_same_paths(self,
|
||||
app_path: Union[str, pathlib.Path],
|
||||
executable: Union[str, pathlib.Path]
|
||||
) -> bool:
|
||||
if isinstance(app_path, str):
|
||||
app_path = pathlib.Path(app_path)
|
||||
if isinstance(executable, str):
|
||||
executable = pathlib.Path(executable)
|
||||
app_path = app_path.expanduser()
|
||||
executable = executable.expanduser()
|
||||
if self.executable is None:
|
||||
return False
|
||||
try:
|
||||
return self.path.samefile(app_path) and \
|
||||
self.executable.samefile(executable)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
async def recover(self,
|
||||
hard: bool = False,
|
||||
force_dep_update: bool = False
|
||||
) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
async def reinstall(self):
|
||||
raise NotImplementedError
|
||||
|
||||
async def restart_service(self):
|
||||
if not self.managed_services:
|
||||
return
|
||||
is_full = self.cmd_helper.is_full_update()
|
||||
for svc in self.managed_services:
|
||||
if is_full and svc != self.name:
|
||||
self.notify_status(f"Service {svc} restart postponed...")
|
||||
self.cmd_helper.add_pending_restart(svc)
|
||||
continue
|
||||
self.cmd_helper.remove_pending_restart(svc)
|
||||
self.notify_status(f"Restarting service {svc}...")
|
||||
if svc == "moonraker":
|
||||
# Launch restart async so the request can return
|
||||
# before the server restarts
|
||||
event_loop = self.server.get_event_loop()
|
||||
event_loop.delay_callback(.1, self._do_restart, svc)
|
||||
else:
|
||||
await self._do_restart(svc)
|
||||
|
||||
async def _do_restart(self, svc_name: str) -> None:
|
||||
machine: Machine = self.server.lookup_component("machine")
|
||||
try:
|
||||
await machine.do_service_action("restart", svc_name)
|
||||
except Exception:
|
||||
if svc_name == "moonraker":
|
||||
# We will always get an error when restarting moonraker
|
||||
# from within the child process, so ignore it
|
||||
return
|
||||
raise self.log_exc("Error restarting service")
|
||||
|
||||
def get_update_status(self) -> Dict[str, Any]:
|
||||
return {
|
||||
'channel': self.channel,
|
||||
'debug_enabled': self.debug,
|
||||
'need_channel_update': self.need_channel_update,
|
||||
'is_valid': self._is_valid,
|
||||
'configured_type': self.type,
|
||||
'info_tags': self.info_tags
|
||||
}
|
||||
|
||||
def get_persistent_data(self) -> Dict[str, Any]:
|
||||
storage = super().get_persistent_data()
|
||||
storage['is_valid'] = self._is_valid
|
||||
storage['need_channel_update'] = self.need_channel_update
|
||||
return storage
|
||||
|
||||
async def _get_file_hash(self,
|
||||
filename: Optional[pathlib.Path]
|
||||
) -> Optional[str]:
|
||||
if filename is None or not filename.is_file():
|
||||
return None
|
||||
|
||||
def hash_func(f: pathlib.Path) -> str:
|
||||
return hashlib.sha256(f.read_bytes()).hexdigest()
|
||||
try:
|
||||
event_loop = self.server.get_event_loop()
|
||||
return await event_loop.run_in_thread(hash_func, filename)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
async def _check_need_update(self,
|
||||
prev_hash: Optional[str],
|
||||
filename: Optional[pathlib.Path]
|
||||
) -> bool:
|
||||
cur_hash = await self._get_file_hash(filename)
|
||||
if prev_hash is None or cur_hash is None:
|
||||
return False
|
||||
return prev_hash != cur_hash
|
||||
|
||||
async def _install_packages(self, package_list: List[str]) -> None:
|
||||
self.notify_status("Installing system dependencies...")
|
||||
# Install packages with apt-get
|
||||
try:
|
||||
await self.cmd_helper.install_packages(
|
||||
package_list, timeout=3600., notify=True)
|
||||
except Exception:
|
||||
self.log_exc("Error updating packages")
|
||||
return
|
||||
|
||||
async def _update_virtualenv(self,
|
||||
requirements: Union[pathlib.Path, List[str]]
|
||||
) -> None:
|
||||
if self.pip_exe is None:
|
||||
return
|
||||
# Update python dependencies
|
||||
if isinstance(requirements, pathlib.Path):
|
||||
if not requirements.is_file():
|
||||
self.log_info(
|
||||
f"Invalid path to requirements_file '{requirements}'")
|
||||
return
|
||||
args = f"-r {requirements}"
|
||||
else:
|
||||
args = " ".join(requirements)
|
||||
self.notify_status("Updating python packages...")
|
||||
try:
|
||||
# First attempt to update pip
|
||||
# await self.cmd_helper.run_cmd(
|
||||
# f"{self.pip_exe} install -U pip", timeout=1200., notify=True,
|
||||
# retries=3)
|
||||
await self.cmd_helper.run_cmd(
|
||||
f"{self.pip_exe} install {args}", timeout=1200., notify=True,
|
||||
retries=3)
|
||||
except Exception:
|
||||
self.log_exc("Error updating python requirements")
|
||||
|
||||
async def _build_virtualenv(self) -> None:
|
||||
if self.pip_exe is None or self.venv_args is None:
|
||||
return
|
||||
bin_dir = self.pip_exe.parent
|
||||
env_path = bin_dir.parent.resolve()
|
||||
self.notify_status(f"Creating virtualenv at: {env_path}...")
|
||||
if env_path.exists():
|
||||
shutil.rmtree(env_path)
|
||||
try:
|
||||
await self.cmd_helper.run_cmd(
|
||||
f"virtualenv {self.venv_args} {env_path}", timeout=300.)
|
||||
except Exception:
|
||||
self.log_exc(f"Error creating virtualenv")
|
||||
return
|
||||
if not self.pip_exe.exists():
|
||||
raise self.log_exc("Failed to create new virtualenv", False)
|
||||
62
moonraker/components/update_manager/base_config.py
Normal file
62
moonraker/components/update_manager/base_config.py
Normal file
@@ -0,0 +1,62 @@
|
||||
# Moonraker/Klipper update configuration
|
||||
#
|
||||
# Copyright (C) 2022 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import os
|
||||
import sys
|
||||
import copy
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from components.database import MoonrakerDatabase
|
||||
|
||||
MOONRAKER_PATH = os.path.normpath(os.path.join(
|
||||
os.path.dirname(__file__), "../../.."))
|
||||
KLIPPER_DEFAULT_PATH = os.path.expanduser("~/klipper")
|
||||
KLIPPER_DEFAULT_EXEC = os.path.expanduser("~/klippy-env/bin/python")
|
||||
|
||||
BASE_CONFIG: Dict[str, Dict[str, str]] = {
|
||||
"moonraker": {
|
||||
"origin": "https://github.com/arksine/moonraker.git",
|
||||
"requirements": "scripts/moonraker-requirements.txt",
|
||||
"venv_args": "-p python3",
|
||||
"install_script": "scripts/install-moonraker.sh",
|
||||
"host_repo": "arksine/moonraker",
|
||||
"env": sys.executable,
|
||||
"path": MOONRAKER_PATH,
|
||||
"managed_services": "moonraker"
|
||||
},
|
||||
"klipper": {
|
||||
"moved_origin": "https://github.com/kevinoconnor/klipper.git",
|
||||
"origin": "https://github.com/Klipper3d/klipper.git",
|
||||
"requirements": "scripts/klippy-requirements.txt",
|
||||
"venv_args": "-p python2",
|
||||
"install_script": "scripts/install-octopi.sh",
|
||||
"host_repo": "arksine/moonraker",
|
||||
"managed_services": "klipper"
|
||||
}
|
||||
}
|
||||
|
||||
def get_base_configuration(config: ConfigHelper, channel: str) -> ConfigHelper:
|
||||
server = config.get_server()
|
||||
base_cfg = copy.deepcopy(BASE_CONFIG)
|
||||
app_type = "zip" if channel == "stable" else "git_repo"
|
||||
base_cfg["moonraker"]["channel"] = channel
|
||||
base_cfg["moonraker"]["type"] = app_type
|
||||
base_cfg["klipper"]["channel"] = channel
|
||||
base_cfg["klipper"]["type"] = app_type
|
||||
db: MoonrakerDatabase = server.lookup_component('database')
|
||||
base_cfg["klipper"]["path"] = db.get_item(
|
||||
"moonraker", "update_manager.klipper_path", KLIPPER_DEFAULT_PATH
|
||||
).result()
|
||||
base_cfg["klipper"]["env"] = db.get_item(
|
||||
"moonraker", "update_manager.klipper_exec", KLIPPER_DEFAULT_EXEC
|
||||
).result()
|
||||
return config.read_supplemental_dict(base_cfg)
|
||||
94
moonraker/components/update_manager/base_deploy.py
Normal file
94
moonraker/components/update_manager/base_deploy.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# Base Deployment Interface
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import logging
|
||||
import time
|
||||
|
||||
from typing import TYPE_CHECKING, Dict, Any, Optional
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from utils import ServerError
|
||||
from .update_manager import CommandHelper
|
||||
|
||||
class BaseDeploy:
|
||||
def __init__(self,
|
||||
config: ConfigHelper,
|
||||
cmd_helper: CommandHelper,
|
||||
name: Optional[str] = None,
|
||||
prefix: str = "",
|
||||
cfg_hash: Optional[str] = None
|
||||
) -> None:
|
||||
if name is None:
|
||||
name = config.get_name().split()[-1]
|
||||
self.name = name
|
||||
if prefix:
|
||||
prefix = f"{prefix} {self.name}: "
|
||||
self.prefix = prefix
|
||||
self.server = config.get_server()
|
||||
self.cmd_helper = cmd_helper
|
||||
self.refresh_interval = cmd_helper.get_refresh_interval()
|
||||
refresh_interval = config.getint('refresh_interval', None)
|
||||
if refresh_interval is not None:
|
||||
self.refresh_interval = refresh_interval * 60 * 60
|
||||
if cfg_hash is None:
|
||||
cfg_hash = config.get_hash().hexdigest()
|
||||
self.cfg_hash = cfg_hash
|
||||
|
||||
async def initialize(self) -> Dict[str, Any]:
|
||||
umdb = self.cmd_helper.get_umdb()
|
||||
storage: Dict[str, Any] = await umdb.get(self.name, {})
|
||||
self.last_refresh_time: float = storage.get('last_refresh_time', 0.0)
|
||||
self.last_cfg_hash: str = storage.get('last_config_hash', "")
|
||||
return storage
|
||||
|
||||
def needs_refresh(self) -> bool:
|
||||
next_refresh_time = self.last_refresh_time + self.refresh_interval
|
||||
return (
|
||||
self.cfg_hash != self.last_cfg_hash or
|
||||
time.time() > next_refresh_time
|
||||
)
|
||||
|
||||
def get_last_refresh_time(self) -> float:
|
||||
return self.last_refresh_time
|
||||
|
||||
async def refresh(self) -> None:
|
||||
pass
|
||||
|
||||
async def update(self) -> bool:
|
||||
return False
|
||||
|
||||
def get_update_status(self) -> Dict[str, Any]:
|
||||
return {}
|
||||
|
||||
def get_persistent_data(self) -> Dict[str, Any]:
|
||||
return {
|
||||
'last_config_hash': self.cfg_hash,
|
||||
'last_refresh_time': self.last_refresh_time
|
||||
}
|
||||
|
||||
def _save_state(self) -> None:
|
||||
umdb = self.cmd_helper.get_umdb()
|
||||
self.last_refresh_time = time.time()
|
||||
self.last_cfg_hash = self.cfg_hash
|
||||
umdb[self.name] = self.get_persistent_data()
|
||||
|
||||
def log_exc(self, msg: str, traceback: bool = True) -> ServerError:
|
||||
log_msg = f"{self.prefix}{msg}"
|
||||
if traceback:
|
||||
logging.exception(log_msg)
|
||||
else:
|
||||
logging.info(log_msg)
|
||||
return self.server.error(msg)
|
||||
|
||||
def log_info(self, msg: str) -> None:
|
||||
log_msg = f"{self.prefix}{msg}"
|
||||
logging.info(log_msg)
|
||||
|
||||
def notify_status(self, msg: str, is_complete: bool = False) -> None:
|
||||
log_msg = f"{self.prefix}{msg}"
|
||||
logging.debug(log_msg)
|
||||
self.cmd_helper.notify_update_response(log_msg, is_complete)
|
||||
991
moonraker/components/update_manager/git_deploy.py
Normal file
991
moonraker/components/update_manager/git_deploy.py
Normal file
@@ -0,0 +1,991 @@
|
||||
# Git Deployment implementation
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import asyncio
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
import re
|
||||
import logging
|
||||
from .app_deploy import AppDeploy
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Tuple,
|
||||
Optional,
|
||||
Dict,
|
||||
List,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from components import shell_command
|
||||
from .update_manager import CommandHelper
|
||||
|
||||
class GitDeploy(AppDeploy):
|
||||
def __init__(self, config: ConfigHelper, cmd_helper: CommandHelper) -> None:
|
||||
super().__init__(config, cmd_helper)
|
||||
self.repo = GitRepo(
|
||||
cmd_helper, self.path, self.name, self.origin,
|
||||
self.moved_origin, self.channel
|
||||
)
|
||||
if self.type != 'git_repo':
|
||||
self.need_channel_update = True
|
||||
|
||||
@staticmethod
|
||||
async def from_application(app: AppDeploy) -> GitDeploy:
|
||||
new_app = GitDeploy(app.config, app.cmd_helper)
|
||||
await new_app.reinstall()
|
||||
return new_app
|
||||
|
||||
async def initialize(self) -> Dict[str, Any]:
|
||||
storage = await super().initialize()
|
||||
self.repo.restore_state(storage)
|
||||
return storage
|
||||
|
||||
async def refresh(self) -> None:
|
||||
try:
|
||||
await self._update_repo_state()
|
||||
except Exception:
|
||||
logging.exception("Error Refreshing git state")
|
||||
|
||||
async def _update_repo_state(self, need_fetch: bool = True) -> None:
|
||||
self._is_valid = False
|
||||
await self.repo.initialize(need_fetch=need_fetch)
|
||||
self.log_info(
|
||||
f"Channel: {self.channel}, "
|
||||
f"Need Channel Update: {self.need_channel_update}"
|
||||
)
|
||||
invalids = self.repo.report_invalids(self.primary_branch)
|
||||
if invalids:
|
||||
msgs = '\n'.join(invalids)
|
||||
self.log_info(
|
||||
f"Repo validation checks failed:\n{msgs}")
|
||||
if self.debug:
|
||||
self._is_valid = True
|
||||
self.log_info(
|
||||
"Repo debug enabled, overriding validity checks")
|
||||
else:
|
||||
self.log_info("Updates on repo disabled")
|
||||
else:
|
||||
self._is_valid = True
|
||||
self.log_info("Validity check for git repo passed")
|
||||
self._save_state()
|
||||
|
||||
async def update(self) -> bool:
|
||||
await self.repo.wait_for_init()
|
||||
if not self._is_valid:
|
||||
raise self.log_exc("Update aborted, repo not valid", False)
|
||||
if self.repo.is_dirty():
|
||||
raise self.log_exc(
|
||||
"Update aborted, repo has been modified", False)
|
||||
if self.repo.is_current():
|
||||
# No need to update
|
||||
return False
|
||||
self.cmd_helper.notify_update_response(
|
||||
f"Updating Application {self.name}...")
|
||||
inst_hash = await self._get_file_hash(self.install_script)
|
||||
pyreqs_hash = await self._get_file_hash(self.python_reqs)
|
||||
npm_hash = await self._get_file_hash(self.npm_pkg_json)
|
||||
await self._pull_repo()
|
||||
# Check Semantic Versions
|
||||
await self._update_dependencies(inst_hash, pyreqs_hash, npm_hash)
|
||||
# Refresh local repo state
|
||||
await self._update_repo_state(need_fetch=False)
|
||||
await self.restart_service()
|
||||
self.notify_status("Update Finished...", is_complete=True)
|
||||
return True
|
||||
|
||||
async def recover(self,
|
||||
hard: bool = False,
|
||||
force_dep_update: bool = False
|
||||
) -> None:
|
||||
self.notify_status("Attempting Repo Recovery...")
|
||||
inst_hash = await self._get_file_hash(self.install_script)
|
||||
pyreqs_hash = await self._get_file_hash(self.python_reqs)
|
||||
npm_hash = await self._get_file_hash(self.npm_pkg_json)
|
||||
|
||||
if hard:
|
||||
await self.repo.clone()
|
||||
await self._update_repo_state()
|
||||
else:
|
||||
self.notify_status("Resetting Git Repo...")
|
||||
await self.repo.reset()
|
||||
await self._update_repo_state()
|
||||
|
||||
if self.repo.is_dirty() or not self._is_valid:
|
||||
raise self.server.error(
|
||||
"Recovery attempt failed, repo state not pristine", 500)
|
||||
await self._update_dependencies(inst_hash, pyreqs_hash, npm_hash,
|
||||
force=force_dep_update)
|
||||
await self.restart_service()
|
||||
self.notify_status("Reinstall Complete", is_complete=True)
|
||||
|
||||
async def reinstall(self):
|
||||
# Clear the persistent storage prior to a channel swap.
|
||||
# After the next update is complete new data will be
|
||||
# restored.
|
||||
umdb = self.cmd_helper.get_umdb()
|
||||
await umdb.pop(self.name, None)
|
||||
await self.initialize()
|
||||
await self.recover(True, True)
|
||||
|
||||
def get_update_status(self) -> Dict[str, Any]:
|
||||
status = super().get_update_status()
|
||||
status.update(self.repo.get_repo_status())
|
||||
return status
|
||||
|
||||
def get_persistent_data(self) -> Dict[str, Any]:
|
||||
storage = super().get_persistent_data()
|
||||
storage.update(self.repo.get_persistent_data())
|
||||
return storage
|
||||
|
||||
async def _pull_repo(self) -> None:
|
||||
self.notify_status("Updating Repo...")
|
||||
try:
|
||||
await self.repo.fetch()
|
||||
if self.repo.is_detached():
|
||||
await self.repo.checkout()
|
||||
elif await self.repo.check_diverged():
|
||||
self.notify_status(
|
||||
"Repo has diverged, attempting git reset"
|
||||
)
|
||||
await self.repo.reset()
|
||||
else:
|
||||
await self.repo.pull()
|
||||
except Exception:
|
||||
raise self.log_exc("Error updating git repo")
|
||||
|
||||
async def _update_dependencies(self,
|
||||
inst_hash: Optional[str],
|
||||
pyreqs_hash: Optional[str],
|
||||
npm_hash: Optional[str],
|
||||
force: bool = False
|
||||
) -> None:
|
||||
ret = await self._check_need_update(inst_hash, self.install_script)
|
||||
if force or ret:
|
||||
package_list = await self._parse_install_script()
|
||||
if package_list is not None:
|
||||
await self._install_packages(package_list)
|
||||
ret = await self._check_need_update(pyreqs_hash, self.python_reqs)
|
||||
if force or ret:
|
||||
if self.python_reqs is not None:
|
||||
await self._update_virtualenv(self.python_reqs)
|
||||
ret = await self._check_need_update(npm_hash, self.npm_pkg_json)
|
||||
if force or ret:
|
||||
if self.npm_pkg_json is not None:
|
||||
self.notify_status("Updating Node Packages...")
|
||||
try:
|
||||
await self.cmd_helper.run_cmd(
|
||||
"npm ci --only=prod", notify=True, timeout=600.,
|
||||
cwd=str(self.path))
|
||||
except Exception:
|
||||
self.notify_status("Node Package Update failed")
|
||||
|
||||
async def _parse_install_script(self) -> Optional[List[str]]:
|
||||
if self.install_script is None:
|
||||
return None
|
||||
# Open install file file and read
|
||||
inst_path: pathlib.Path = self.install_script
|
||||
if not inst_path.is_file():
|
||||
self.log_info(f"Unable to open install script: {inst_path}")
|
||||
return None
|
||||
event_loop = self.server.get_event_loop()
|
||||
data = await event_loop.run_in_thread(inst_path.read_text)
|
||||
plines: List[str] = re.findall(r'PKGLIST="(.*)"', data)
|
||||
plines = [p.lstrip("${PKGLIST}").strip() for p in plines]
|
||||
packages: List[str] = []
|
||||
for line in plines:
|
||||
packages.extend(line.split())
|
||||
if not packages:
|
||||
self.log_info(f"No packages found in script: {inst_path}")
|
||||
return None
|
||||
logging.debug(f"Repo {self.name}: Detected Packages: {repr(packages)}")
|
||||
return packages
|
||||
|
||||
|
||||
GIT_ASYNC_TIMEOUT = 300.
|
||||
GIT_ENV_VARS = {
|
||||
'GIT_HTTP_LOW_SPEED_LIMIT': "1000",
|
||||
'GIT_HTTP_LOW_SPEED_TIME ': "20"
|
||||
}
|
||||
GIT_MAX_LOG_CNT = 100
|
||||
GIT_LOG_FMT = (
|
||||
"\"sha:%H%x1Dauthor:%an%x1Ddate:%ct%x1Dsubject:%s%x1Dmessage:%b%x1E\""
|
||||
)
|
||||
GIT_OBJ_ERR = "fatal: loose object"
|
||||
GIT_REF_FMT = (
|
||||
"'%(if)%(*objecttype)%(then)%(*objecttype) (*objectname)"
|
||||
"%(else)%(objecttype) %(objectname)%(end) %(refname)'"
|
||||
)
|
||||
|
||||
class GitRepo:
|
||||
tag_r = re.compile(r"(v?\d+\.\d+\.\d+(-(alpha|beta)(\.\d+)?)?)(-\d+)?")
|
||||
def __init__(self,
|
||||
cmd_helper: CommandHelper,
|
||||
git_path: pathlib.Path,
|
||||
alias: str,
|
||||
origin_url: str,
|
||||
moved_origin_url: Optional[str],
|
||||
channel: str
|
||||
) -> None:
|
||||
self.server = cmd_helper.get_server()
|
||||
self.cmd_helper = cmd_helper
|
||||
self.alias = alias
|
||||
self.git_path = git_path
|
||||
git_dir = git_path.parent
|
||||
git_base = git_path.name
|
||||
self.backup_path = git_dir.joinpath(f".{git_base}_repo_backup")
|
||||
self.origin_url = origin_url
|
||||
self.moved_origin_url = moved_origin_url
|
||||
self.recovery_message = \
|
||||
f"""
|
||||
Manually restore via SSH with the following commands:
|
||||
sudo service {self.alias} stop
|
||||
cd {git_dir}
|
||||
rm -rf {git_base}
|
||||
git clone {self.origin_url}
|
||||
sudo service {self.alias} start
|
||||
"""
|
||||
|
||||
self.init_evt: Optional[asyncio.Event] = None
|
||||
self.initialized: bool = False
|
||||
self.git_operation_lock = asyncio.Lock()
|
||||
self.fetch_timeout_handle: Optional[asyncio.Handle] = None
|
||||
self.fetch_input_recd: bool = False
|
||||
self.is_beta = channel == "beta"
|
||||
self.bound_repo = None
|
||||
if self.is_beta and self.alias == "klipper":
|
||||
# Bind Klipper Updates Moonraker
|
||||
self.bound_repo = "moonraker"
|
||||
|
||||
def restore_state(self, storage: Dict[str, Any]) -> None:
|
||||
self.valid_git_repo: bool = storage.get('repo_valid', False)
|
||||
self.git_owner: str = storage.get('git_owner', "?")
|
||||
self.git_repo_name: str = storage.get('git_repo_name', "?")
|
||||
self.git_remote: str = storage.get('git_remote', "?")
|
||||
self.git_branch: str = storage.get('git_branch', "?")
|
||||
self.current_version: str = storage.get('current_version', "?")
|
||||
self.upstream_version: str = storage.get('upstream_version', "?")
|
||||
self.current_commit: str = storage.get('current_commit', "?")
|
||||
self.upstream_commit: str = storage.get('upstream_commit', "?")
|
||||
self.upstream_url: str = storage.get('upstream_url', "?")
|
||||
self.full_version_string: str = storage.get('full_version_string', "?")
|
||||
self.branches: List[str] = storage.get('branches', [])
|
||||
self.dirty: bool = storage.get('dirty', False)
|
||||
self.head_detached: bool = storage.get('head_detached', False)
|
||||
self.git_messages: List[str] = storage.get('git_messages', [])
|
||||
self.commits_behind: List[Dict[str, Any]] = storage.get(
|
||||
'commits_behind', [])
|
||||
self.tag_data: Dict[str, Any] = storage.get('tag_data', {})
|
||||
self.diverged: bool = storage.get("diverged", False)
|
||||
|
||||
def get_persistent_data(self) -> Dict[str, Any]:
|
||||
return {
|
||||
'repo_valid': self.valid_git_repo,
|
||||
'git_owner': self.git_owner,
|
||||
'git_repo_name': self.git_repo_name,
|
||||
'git_remote': self.git_remote,
|
||||
'git_branch': self.git_branch,
|
||||
'current_version': self.current_version,
|
||||
'upstream_version': self.upstream_version,
|
||||
'current_commit': self.current_commit,
|
||||
'upstream_commit': self.upstream_commit,
|
||||
'upstream_url': self.upstream_url,
|
||||
'full_version_string': self.full_version_string,
|
||||
'branches': self.branches,
|
||||
'dirty': self.dirty,
|
||||
'head_detached': self.head_detached,
|
||||
'git_messages': self.git_messages,
|
||||
'commits_behind': self.commits_behind,
|
||||
'tag_data': self.tag_data,
|
||||
'diverged': self.diverged
|
||||
}
|
||||
|
||||
async def initialize(self, need_fetch: bool = True) -> None:
|
||||
if self.init_evt is not None:
|
||||
# No need to initialize multiple requests
|
||||
await self.init_evt.wait()
|
||||
if self.initialized:
|
||||
return
|
||||
self.initialized = False
|
||||
self.init_evt = asyncio.Event()
|
||||
self.git_messages.clear()
|
||||
try:
|
||||
await self.update_repo_status()
|
||||
self._verify_repo()
|
||||
if not self.head_detached:
|
||||
# lookup remote via git config
|
||||
self.git_remote = await self.get_config_item(
|
||||
f"branch.{self.git_branch}.remote")
|
||||
|
||||
# Fetch the upstream url. If the repo has been moved,
|
||||
# set the new url
|
||||
self.upstream_url = await self.remote(f"get-url {self.git_remote}")
|
||||
if self.moved_origin_url is not None:
|
||||
origin = self.upstream_url.lower().strip()
|
||||
if not origin.endswith(".git"):
|
||||
origin += ".git"
|
||||
moved_origin = self.moved_origin_url.lower().strip()
|
||||
if not moved_origin.endswith(".git"):
|
||||
moved_origin += ".git"
|
||||
if origin == moved_origin:
|
||||
logging.info(
|
||||
f"Git Repo {self.alias}: Moved Repo Detected, Moving "
|
||||
f"from {self.upstream_url} to {self.origin_url}")
|
||||
need_fetch = True
|
||||
await self.remote(
|
||||
f"set-url {self.git_remote} {self.origin_url}")
|
||||
self.upstream_url = self.origin_url
|
||||
|
||||
if need_fetch:
|
||||
await self.fetch()
|
||||
self.diverged = await self.check_diverged()
|
||||
|
||||
# Populate list of current branches
|
||||
blist = await self.list_branches()
|
||||
self.branches = []
|
||||
for branch in blist:
|
||||
branch = branch.strip()
|
||||
if branch[0] == "*":
|
||||
branch = branch[2:]
|
||||
if branch[0] == "(":
|
||||
continue
|
||||
self.branches.append(branch)
|
||||
|
||||
# Parse GitHub Owner from URL
|
||||
owner_match = re.match(r"https?://[^/]+/([^/]+)", self.upstream_url)
|
||||
self.git_owner = "?"
|
||||
if owner_match is not None:
|
||||
self.git_owner = owner_match.group(1)
|
||||
|
||||
# Parse GitHub Repository Name from URL
|
||||
repo_match = re.match(r".*\/([^\.]*).*", self.upstream_url)
|
||||
self.git_repo_name = "?"
|
||||
if repo_match is not None:
|
||||
self.git_repo_name = repo_match.group(1)
|
||||
self.current_commit = await self.rev_parse("HEAD")
|
||||
git_desc = await self.describe(
|
||||
"--always --tags --long --dirty")
|
||||
self.full_version_string = git_desc.strip()
|
||||
self.dirty = git_desc.endswith("dirty")
|
||||
self.tag_data = {}
|
||||
if self.is_beta and self.bound_repo is None:
|
||||
await self._get_beta_versions(git_desc)
|
||||
else:
|
||||
await self._get_dev_versions(git_desc)
|
||||
|
||||
# Get Commits Behind
|
||||
self.commits_behind = []
|
||||
cbh = await self.get_commits_behind()
|
||||
if cbh:
|
||||
tagged_commits = await self.get_tagged_commits()
|
||||
debug_msg = '\n'.join([f"{k}: {v}" for k, v in
|
||||
tagged_commits.items()])
|
||||
logging.debug(f"Git Repo {self.alias}: Tagged Commits\n"
|
||||
f"{debug_msg}")
|
||||
for i, commit in enumerate(cbh):
|
||||
tag = tagged_commits.get(commit['sha'], None)
|
||||
if i < 30 or tag is not None:
|
||||
commit['tag'] = tag
|
||||
self.commits_behind.append(commit)
|
||||
|
||||
self.log_repo_info()
|
||||
except Exception:
|
||||
logging.exception(f"Git Repo {self.alias}: Initialization failure")
|
||||
raise
|
||||
else:
|
||||
self.initialized = True
|
||||
finally:
|
||||
self.init_evt.set()
|
||||
self.init_evt = None
|
||||
|
||||
async def _get_dev_versions(self, current_version: str) -> None:
|
||||
self.upstream_commit = await self.rev_parse(
|
||||
f"{self.git_remote}/{self.git_branch}")
|
||||
upstream_version = await self.describe(
|
||||
f"{self.git_remote}/{self.git_branch} "
|
||||
"--always --tags --long")
|
||||
# Get the latest tag as a fallback for shallow clones
|
||||
commit, tag = await self._parse_latest_tag()
|
||||
# Parse Version Info
|
||||
versions: List[str] = []
|
||||
for ver in [current_version, upstream_version]:
|
||||
tag_version = "?"
|
||||
ver_match = self.tag_r.match(ver)
|
||||
if ver_match:
|
||||
tag_version = ver_match.group()
|
||||
elif tag != "?":
|
||||
if len(versions) == 0:
|
||||
count = await self.rev_list(f"{tag}..HEAD --count")
|
||||
full_ver = f"{tag}-{count}-g{ver}-shallow"
|
||||
self.full_version_string = full_ver
|
||||
else:
|
||||
count = await self.rev_list(
|
||||
f"{tag}..{self.upstream_commit} --count")
|
||||
tag_version = f"{tag}-{count}"
|
||||
versions.append(tag_version)
|
||||
self.current_version, self.upstream_version = versions
|
||||
if self.bound_repo is not None:
|
||||
await self._get_bound_versions(self.current_version)
|
||||
|
||||
async def _get_beta_versions(self, current_version: str) -> None:
|
||||
upstream_commit, upstream_tag = await self._parse_latest_tag()
|
||||
ver_match = self.tag_r.match(current_version)
|
||||
current_tag = "?"
|
||||
if ver_match:
|
||||
current_tag = ver_match.group(1)
|
||||
elif upstream_tag != "?":
|
||||
count = await self.rev_list(f"{upstream_tag}..HEAD --count")
|
||||
full_ver = f"{upstream_tag}-{count}-g{current_version}-shallow"
|
||||
self.full_version_string = full_ver
|
||||
current_tag = upstream_tag
|
||||
self.upstream_commit = upstream_commit
|
||||
if current_tag == upstream_tag:
|
||||
self.upstream_commit = self.current_commit
|
||||
self.current_version = current_tag
|
||||
self.upstream_version = upstream_tag
|
||||
# Check the tag for annotations
|
||||
self.tag_data = await self.get_tag_data(upstream_tag)
|
||||
if self.tag_data:
|
||||
# TODO: need to force a repo update by resetting its refresh time?
|
||||
logging.debug(
|
||||
f"Git Repo {self.alias}: Found Tag Annotation: {self.tag_data}"
|
||||
)
|
||||
|
||||
async def _get_bound_versions(self, current_version: str) -> None:
|
||||
if self.bound_repo is None:
|
||||
return
|
||||
umdb = self.cmd_helper.get_umdb()
|
||||
key = f"{self.bound_repo}.tag_data"
|
||||
tag_data: Dict[str, Any] = await umdb.get(key, {})
|
||||
if tag_data.get("repo", "") != self.alias:
|
||||
logging.info(
|
||||
f"Git Repo {self.alias}: Invalid bound tag data: "
|
||||
f"{tag_data}"
|
||||
)
|
||||
return
|
||||
if tag_data["branch"] != self.git_branch:
|
||||
logging.info(f"Git Repo {self.alias}: Repo not on bound branch")
|
||||
return
|
||||
bound_vlist: List[int] = tag_data["version_as_list"]
|
||||
current_vlist = self._convert_semver(current_version)
|
||||
if self.full_version_string.endswith("shallow"):
|
||||
# We need to recalculate the commit count for shallow clones
|
||||
if current_vlist[:4] == bound_vlist[:4]:
|
||||
commit = tag_data["commit"]
|
||||
tag = current_version.split("-")[0]
|
||||
try:
|
||||
resp = await self.rev_list(f"{tag}..{commit} --count")
|
||||
count = int(resp)
|
||||
except Exception:
|
||||
count = 0
|
||||
bound_vlist[4] == count
|
||||
if current_vlist < bound_vlist:
|
||||
bound_ver_match = self.tag_r.match(tag_data["version"])
|
||||
if bound_ver_match is not None:
|
||||
self.upstream_commit = tag_data["commit"]
|
||||
self.upstream_version = bound_ver_match.group()
|
||||
else:
|
||||
# The repo is currently ahead of the bound tag/commmit,
|
||||
# so pin the version
|
||||
self.upstream_commit = self.current_commit
|
||||
self.upstream_version = self.current_version
|
||||
|
||||
async def _parse_latest_tag(self) -> Tuple[str, str]:
|
||||
commit = tag = "?"
|
||||
try:
|
||||
commit = await self.rev_list("--tags --max-count=1")
|
||||
tag = await self.describe(f"--tags {commit}")
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
tag_match = self.tag_r.match(tag)
|
||||
if tag_match is not None:
|
||||
tag = tag_match.group(1)
|
||||
else:
|
||||
tag = "?"
|
||||
return commit, tag
|
||||
|
||||
async def wait_for_init(self) -> None:
|
||||
if self.init_evt is not None:
|
||||
await self.init_evt.wait()
|
||||
if not self.initialized:
|
||||
raise self.server.error(
|
||||
f"Git Repo {self.alias}: Initialization failure")
|
||||
|
||||
async def update_repo_status(self) -> bool:
|
||||
async with self.git_operation_lock:
|
||||
if not self.git_path.joinpath(".git").is_dir():
|
||||
logging.info(
|
||||
f"Git Repo {self.alias}: path '{self.git_path}'"
|
||||
" is not a valid git repo")
|
||||
return False
|
||||
await self._wait_for_lock_release()
|
||||
self.valid_git_repo = False
|
||||
retries = 3
|
||||
while retries:
|
||||
self.git_messages.clear()
|
||||
try:
|
||||
resp: Optional[str] = await self._run_git_cmd(
|
||||
"status -u no", retries=1)
|
||||
except Exception:
|
||||
retries -= 1
|
||||
resp = None
|
||||
# Attempt to recover from "loose object" error
|
||||
if retries and GIT_OBJ_ERR in "\n".join(self.git_messages):
|
||||
ret = await self._repair_loose_objects()
|
||||
if not ret:
|
||||
# Since we are unable to recover, immediately
|
||||
# return
|
||||
return False
|
||||
else:
|
||||
break
|
||||
if resp is None:
|
||||
return False
|
||||
resp = resp.strip().split('\n', 1)[0]
|
||||
self.head_detached = resp.startswith("HEAD detached")
|
||||
branch_info = resp.split()[-1]
|
||||
if self.head_detached:
|
||||
bparts = branch_info.split("/", 1)
|
||||
if len(bparts) == 2:
|
||||
self.git_remote, self.git_branch = bparts
|
||||
else:
|
||||
if self.git_remote == "?":
|
||||
msg = "Resolve by manually checking out" \
|
||||
" a branch via SSH."
|
||||
else:
|
||||
msg = "Defaulting to previously tracked " \
|
||||
f"{self.git_remote}/{self.git_branch}."
|
||||
logging.info(
|
||||
f"Git Repo {self.alias}: HEAD detached on untracked "
|
||||
f"commit {branch_info}. {msg}")
|
||||
else:
|
||||
self.git_branch = branch_info
|
||||
self.valid_git_repo = True
|
||||
return True
|
||||
|
||||
async def check_diverged(self) -> bool:
|
||||
self._verify_repo(check_remote=True)
|
||||
async with self.git_operation_lock:
|
||||
if self.head_detached:
|
||||
return False
|
||||
cmd = (
|
||||
"merge-base --is-ancestor HEAD "
|
||||
f"{self.git_remote}/{self.git_branch}"
|
||||
)
|
||||
try:
|
||||
await self._run_git_cmd(cmd, retries=1)
|
||||
except self.cmd_helper.scmd_error:
|
||||
return True
|
||||
return False
|
||||
|
||||
def log_repo_info(self) -> None:
|
||||
logging.info(
|
||||
f"Git Repo {self.alias} Detected:\n"
|
||||
f"Owner: {self.git_owner}\n"
|
||||
f"Repository Name: {self.git_repo_name}\n"
|
||||
f"Path: {self.git_path}\n"
|
||||
f"Remote: {self.git_remote}\n"
|
||||
f"Branch: {self.git_branch}\n"
|
||||
f"Remote URL: {self.upstream_url}\n"
|
||||
f"Current Commit SHA: {self.current_commit}\n"
|
||||
f"Upstream Commit SHA: {self.upstream_commit}\n"
|
||||
f"Current Version: {self.current_version}\n"
|
||||
f"Upstream Version: {self.upstream_version}\n"
|
||||
f"Is Dirty: {self.dirty}\n"
|
||||
f"Is Detached: {self.head_detached}\n"
|
||||
f"Commits Behind: {len(self.commits_behind)}\n"
|
||||
f"Tag Data: {self.tag_data}\n"
|
||||
f"Bound Repo: {self.bound_repo}\n"
|
||||
f"Diverged: {self.diverged}"
|
||||
)
|
||||
|
||||
def report_invalids(self, primary_branch: str) -> List[str]:
|
||||
invalids: List[str] = []
|
||||
upstream_url = self.upstream_url.lower()
|
||||
if upstream_url[-4:] != ".git":
|
||||
upstream_url += ".git"
|
||||
if upstream_url != self.origin_url.lower():
|
||||
invalids.append(f"Unofficial remote url: {self.upstream_url}")
|
||||
if self.git_branch != primary_branch or self.git_remote != "origin":
|
||||
invalids.append(
|
||||
"Repo not on valid remote branch, expected: "
|
||||
f"origin/{primary_branch}, detected: "
|
||||
f"{self.git_remote}/{self.git_branch}")
|
||||
if self.head_detached:
|
||||
invalids.append("Detached HEAD detected")
|
||||
if self.diverged:
|
||||
invalids.append("Repo has diverged from remote")
|
||||
return invalids
|
||||
|
||||
def _verify_repo(self, check_remote: bool = False) -> None:
|
||||
if not self.valid_git_repo:
|
||||
raise self.server.error(
|
||||
f"Git Repo {self.alias}: repo not initialized")
|
||||
if check_remote:
|
||||
if self.git_remote == "?":
|
||||
raise self.server.error(
|
||||
f"Git Repo {self.alias}: No valid git remote detected")
|
||||
|
||||
async def reset(self) -> None:
|
||||
if self.git_remote == "?" or self.git_branch == "?":
|
||||
raise self.server.error("Cannot reset, unknown remote/branch")
|
||||
async with self.git_operation_lock:
|
||||
reset_cmd = f"reset --hard {self.git_remote}/{self.git_branch}"
|
||||
if self.is_beta:
|
||||
reset_cmd = f"reset --hard {self.upstream_commit}"
|
||||
await self._run_git_cmd(reset_cmd, retries=2)
|
||||
|
||||
async def fetch(self) -> None:
|
||||
self._verify_repo(check_remote=True)
|
||||
async with self.git_operation_lock:
|
||||
await self._run_git_cmd_async(
|
||||
f"fetch {self.git_remote} --prune --progress")
|
||||
|
||||
async def clean(self) -> None:
|
||||
self._verify_repo()
|
||||
async with self.git_operation_lock:
|
||||
await self._run_git_cmd("clean -d -f", retries=2)
|
||||
|
||||
async def pull(self) -> None:
|
||||
self._verify_repo()
|
||||
if self.head_detached:
|
||||
raise self.server.error(
|
||||
f"Git Repo {self.alias}: Cannot perform pull on a "
|
||||
"detached HEAD")
|
||||
cmd = "pull --progress"
|
||||
if self.cmd_helper.is_debug_enabled():
|
||||
cmd = f"{cmd} --rebase"
|
||||
if self.is_beta:
|
||||
cmd = f"{cmd} {self.git_remote} {self.upstream_commit}"
|
||||
async with self.git_operation_lock:
|
||||
await self._run_git_cmd_async(cmd)
|
||||
|
||||
async def list_branches(self) -> List[str]:
|
||||
self._verify_repo()
|
||||
async with self.git_operation_lock:
|
||||
resp = await self._run_git_cmd("branch --list")
|
||||
return resp.strip().split("\n")
|
||||
|
||||
async def remote(self, command: str) -> str:
|
||||
self._verify_repo(check_remote=True)
|
||||
async with self.git_operation_lock:
|
||||
resp = await self._run_git_cmd(
|
||||
f"remote {command}")
|
||||
return resp.strip()
|
||||
|
||||
async def describe(self, args: str = "") -> str:
|
||||
self._verify_repo()
|
||||
async with self.git_operation_lock:
|
||||
resp = await self._run_git_cmd(f"describe {args}".strip())
|
||||
return resp.strip()
|
||||
|
||||
async def rev_parse(self, args: str = "") -> str:
|
||||
self._verify_repo()
|
||||
async with self.git_operation_lock:
|
||||
resp = await self._run_git_cmd(f"rev-parse {args}".strip())
|
||||
return resp.strip()
|
||||
|
||||
async def rev_list(self, args: str = "") -> str:
|
||||
self._verify_repo()
|
||||
async with self.git_operation_lock:
|
||||
resp = await self._run_git_cmd(f"rev-list {args}".strip())
|
||||
return resp.strip()
|
||||
|
||||
async def get_config_item(self, item: str) -> str:
|
||||
self._verify_repo()
|
||||
async with self.git_operation_lock:
|
||||
resp = await self._run_git_cmd(f"config --get {item}")
|
||||
return resp.strip()
|
||||
|
||||
async def checkout(self, branch: Optional[str] = None) -> None:
|
||||
self._verify_repo()
|
||||
async with self.git_operation_lock:
|
||||
if branch is None:
|
||||
if self.is_beta:
|
||||
branch = self.upstream_commit
|
||||
else:
|
||||
branch = f"{self.git_remote}/{self.git_branch}"
|
||||
await self._run_git_cmd(f"checkout -q {branch}")
|
||||
|
||||
async def run_fsck(self) -> None:
|
||||
async with self.git_operation_lock:
|
||||
await self._run_git_cmd("fsck --full", timeout=300., retries=1)
|
||||
|
||||
async def clone(self) -> None:
|
||||
async with self.git_operation_lock:
|
||||
self.cmd_helper.notify_update_response(
|
||||
f"Git Repo {self.alias}: Starting Clone Recovery...")
|
||||
event_loop = self.server.get_event_loop()
|
||||
if self.backup_path.exists():
|
||||
await event_loop.run_in_thread(shutil.rmtree, self.backup_path)
|
||||
await self._check_lock_file_exists(remove=True)
|
||||
git_cmd = f"clone {self.origin_url} {self.backup_path}"
|
||||
try:
|
||||
await self._run_git_cmd_async(git_cmd, 1, False, False)
|
||||
except Exception as e:
|
||||
self.cmd_helper.notify_update_response(
|
||||
f"Git Repo {self.alias}: Git Clone Failed")
|
||||
raise self.server.error("Git Clone Error") from e
|
||||
if self.git_path.exists():
|
||||
await event_loop.run_in_thread(shutil.rmtree, self.git_path)
|
||||
await event_loop.run_in_thread(
|
||||
shutil.move, str(self.backup_path), str(self.git_path))
|
||||
self.cmd_helper.notify_update_response(
|
||||
f"Git Repo {self.alias}: Git Clone Complete")
|
||||
|
||||
async def get_commits_behind(self) -> List[Dict[str, Any]]:
|
||||
self._verify_repo()
|
||||
if self.is_current():
|
||||
return []
|
||||
async with self.git_operation_lock:
|
||||
if self.is_beta:
|
||||
ref = self.upstream_commit
|
||||
else:
|
||||
ref = f"{self.git_remote}/{self.git_branch}"
|
||||
resp = await self._run_git_cmd(
|
||||
f"log {self.current_commit}..{ref} "
|
||||
f"--format={GIT_LOG_FMT} --max-count={GIT_MAX_LOG_CNT}")
|
||||
commits_behind: List[Dict[str, Any]] = []
|
||||
for log_entry in resp.split('\x1E'):
|
||||
log_entry = log_entry.strip()
|
||||
if not log_entry:
|
||||
continue
|
||||
log_items = [li.strip() for li in log_entry.split('\x1D')
|
||||
if li.strip()]
|
||||
cbh = [li.split(':', 1) for li in log_items]
|
||||
commits_behind.append(dict(cbh)) # type: ignore
|
||||
return commits_behind
|
||||
|
||||
async def get_tagged_commits(self) -> Dict[str, Any]:
|
||||
self._verify_repo()
|
||||
async with self.git_operation_lock:
|
||||
resp = await self._run_git_cmd(
|
||||
"for-each-ref --count=10 --sort='-creatordate' "
|
||||
f"--format={GIT_REF_FMT} 'refs/tags'")
|
||||
tagged_commits: Dict[str, Any] = {}
|
||||
for line in resp.split('\n'):
|
||||
parts = line.strip().split()
|
||||
if len(parts) != 3 or parts[0] != "commit":
|
||||
continue
|
||||
sha, ref = parts[1:]
|
||||
tag = ref.split('/')[-1]
|
||||
tagged_commits[sha] = tag
|
||||
# Return tagged commits as SHA keys mapped to tag values
|
||||
return tagged_commits
|
||||
|
||||
async def get_tag_data(self, tag: str) -> Dict[str, Any]:
|
||||
self._verify_repo()
|
||||
async with self.git_operation_lock:
|
||||
cmd = f"tag -l --format='%(contents)' {tag}"
|
||||
resp = (await self._run_git_cmd(cmd)).strip()
|
||||
req_fields = ["repo", "branch", "version", "commit"]
|
||||
tag_data: Dict[str, Any] = {}
|
||||
for line in resp.split("\n"):
|
||||
parts = line.strip().split(":", 1)
|
||||
if len(parts) != 2:
|
||||
continue
|
||||
field, value = parts
|
||||
field = field.strip()
|
||||
if field not in req_fields:
|
||||
continue
|
||||
tag_data[field] = value.strip()
|
||||
if len(tag_data) != len(req_fields):
|
||||
return {}
|
||||
vlist = self._convert_semver(tag_data["version"])
|
||||
tag_data["version_as_list"] = vlist
|
||||
return tag_data
|
||||
|
||||
def get_repo_status(self) -> Dict[str, Any]:
|
||||
return {
|
||||
'detected_type': "git_repo",
|
||||
'remote_alias': self.git_remote,
|
||||
'branch': self.git_branch,
|
||||
'owner': self.git_owner,
|
||||
'repo_name': self.git_repo_name,
|
||||
'version': self.current_version,
|
||||
'remote_version': self.upstream_version,
|
||||
'current_hash': self.current_commit,
|
||||
'remote_hash': self.upstream_commit,
|
||||
'is_dirty': self.dirty,
|
||||
'detached': self.head_detached,
|
||||
'commits_behind': self.commits_behind,
|
||||
'git_messages': self.git_messages,
|
||||
'full_version_string': self.full_version_string,
|
||||
'pristine': not self.dirty
|
||||
}
|
||||
|
||||
def get_version(self, upstream: bool = False) -> Tuple[Any, ...]:
|
||||
version = self.upstream_version if upstream else self.current_version
|
||||
return tuple(re.findall(r"\d+", version))
|
||||
|
||||
def is_detached(self) -> bool:
|
||||
return self.head_detached
|
||||
|
||||
def is_dirty(self) -> bool:
|
||||
return self.dirty
|
||||
|
||||
def is_current(self) -> bool:
|
||||
return self.current_commit == self.upstream_commit
|
||||
|
||||
def _convert_semver(self, version: str) -> List[int]:
|
||||
ver_match = self.tag_r.match(version)
|
||||
if ver_match is None:
|
||||
return []
|
||||
try:
|
||||
tag = ver_match.group(1)
|
||||
core = tag.split("-")[0]
|
||||
if core[0] == "v":
|
||||
core = core[1:]
|
||||
base_ver = [int(part) for part in core.split(".")]
|
||||
base_ver.append({"alpha": 0, "beta": 1}.get(ver_match.group(3), 2))
|
||||
base_ver.append(int(ver_match.group(5)[1:]))
|
||||
except Exception:
|
||||
return []
|
||||
return base_ver
|
||||
|
||||
async def _check_lock_file_exists(self, remove: bool = False) -> bool:
|
||||
lock_path = self.git_path.joinpath(".git/index.lock")
|
||||
if lock_path.is_file():
|
||||
if remove:
|
||||
logging.info(f"Git Repo {self.alias}: Git lock file found "
|
||||
"after git process exited, removing")
|
||||
try:
|
||||
event_loop = self.server.get_event_loop()
|
||||
await event_loop.run_in_thread(os.remove, lock_path)
|
||||
except Exception:
|
||||
pass
|
||||
return True
|
||||
return False
|
||||
|
||||
async def _wait_for_lock_release(self, timeout: int = 60) -> None:
|
||||
while timeout:
|
||||
if await self._check_lock_file_exists():
|
||||
if not timeout % 10:
|
||||
logging.info(f"Git Repo {self.alias}: Git lock file "
|
||||
f"exists, {timeout} seconds remaining "
|
||||
"before removal.")
|
||||
await asyncio.sleep(1.)
|
||||
timeout -= 1
|
||||
else:
|
||||
return
|
||||
await self._check_lock_file_exists(remove=True)
|
||||
|
||||
async def _repair_loose_objects(self) -> bool:
|
||||
try:
|
||||
await self.cmd_helper.run_cmd_with_response(
|
||||
"find .git/objects/ -type f -empty | xargs rm",
|
||||
timeout=10., retries=1, cwd=str(self.git_path))
|
||||
await self._run_git_cmd_async(
|
||||
"fetch --all -p", retries=1, fix_loose=False)
|
||||
await self._run_git_cmd("fsck --full", timeout=300., retries=1)
|
||||
except Exception:
|
||||
logging.exception("Attempt to repair loose objects failed")
|
||||
return False
|
||||
return True
|
||||
|
||||
async def _run_git_cmd_async(self,
|
||||
cmd: str,
|
||||
retries: int = 5,
|
||||
need_git_path: bool = True,
|
||||
fix_loose: bool = True
|
||||
) -> None:
|
||||
# Fetch and pull require special handling. If the request
|
||||
# gets delayed we do not want to terminate it while the command
|
||||
# is processing.
|
||||
await self._wait_for_lock_release()
|
||||
event_loop = self.server.get_event_loop()
|
||||
env = os.environ.copy()
|
||||
env.update(GIT_ENV_VARS)
|
||||
if need_git_path:
|
||||
git_cmd = f"git -C {self.git_path} {cmd}"
|
||||
else:
|
||||
git_cmd = f"git {cmd}"
|
||||
scmd = self.cmd_helper.build_shell_command(
|
||||
git_cmd, callback=self._handle_process_output,
|
||||
env=env)
|
||||
while retries:
|
||||
self.git_messages.clear()
|
||||
self.fetch_input_recd = False
|
||||
self.fetch_timeout_handle = event_loop.delay_callback(
|
||||
GIT_ASYNC_TIMEOUT, self._check_process_active,
|
||||
scmd, cmd)
|
||||
try:
|
||||
await scmd.run(timeout=0)
|
||||
except Exception:
|
||||
pass
|
||||
self.fetch_timeout_handle.cancel()
|
||||
ret = scmd.get_return_code()
|
||||
if ret == 0:
|
||||
self.git_messages.clear()
|
||||
return
|
||||
elif fix_loose:
|
||||
if GIT_OBJ_ERR in "\n".join(self.git_messages):
|
||||
ret = await self._repair_loose_objects()
|
||||
if ret:
|
||||
break
|
||||
# since the attept to repair failed, bypass retries
|
||||
# and immediately raise an exception
|
||||
raise self.server.error(
|
||||
f"Unable to repair loose objects, use hard recovery")
|
||||
retries -= 1
|
||||
await asyncio.sleep(.5)
|
||||
await self._check_lock_file_exists(remove=True)
|
||||
raise self.server.error(f"Git Command '{cmd}' failed")
|
||||
|
||||
def _handle_process_output(self, output: bytes) -> None:
|
||||
self.fetch_input_recd = True
|
||||
out = output.decode().strip()
|
||||
if out:
|
||||
self.git_messages.append(out)
|
||||
self.cmd_helper.notify_update_response(out)
|
||||
logging.debug(
|
||||
f"Git Repo {self.alias}: {out}")
|
||||
|
||||
async def _check_process_active(self,
|
||||
scmd: shell_command.ShellCommand,
|
||||
cmd_name: str
|
||||
) -> None:
|
||||
ret = scmd.get_return_code()
|
||||
if ret is not None:
|
||||
logging.debug(f"Git Repo {self.alias}: {cmd_name} returned")
|
||||
return
|
||||
if self.fetch_input_recd:
|
||||
# Received some input, reschedule timeout
|
||||
logging.debug(
|
||||
f"Git Repo {self.alias}: {cmd_name} active, rescheduling")
|
||||
event_loop = self.server.get_event_loop()
|
||||
self.fetch_input_recd = False
|
||||
self.fetch_timeout_handle = event_loop.delay_callback(
|
||||
GIT_ASYNC_TIMEOUT, self._check_process_active,
|
||||
scmd, cmd_name)
|
||||
else:
|
||||
# Request has timed out with no input, terminate it
|
||||
logging.debug(f"Git Repo {self.alias}: {cmd_name} timed out")
|
||||
# Cancel with SIGKILL
|
||||
await scmd.cancel(2)
|
||||
|
||||
async def _run_git_cmd(self,
|
||||
git_args: str,
|
||||
timeout: float = 20.,
|
||||
retries: int = 5,
|
||||
env: Optional[Dict[str, str]] = None
|
||||
) -> str:
|
||||
try:
|
||||
return await self.cmd_helper.run_cmd_with_response(
|
||||
f"git -C {self.git_path} {git_args}",
|
||||
timeout=timeout, retries=retries, env=env, sig_idx=2)
|
||||
except self.cmd_helper.scmd_error as e:
|
||||
stdout = e.stdout.decode().strip()
|
||||
stderr = e.stderr.decode().strip()
|
||||
if stdout:
|
||||
self.git_messages.append(stdout)
|
||||
if stderr:
|
||||
self.git_messages.append(stderr)
|
||||
raise
|
||||
1312
moonraker/components/update_manager/update_manager.py
Normal file
1312
moonraker/components/update_manager/update_manager.py
Normal file
File diff suppressed because it is too large
Load Diff
431
moonraker/components/update_manager/zip_deploy.py
Normal file
431
moonraker/components/update_manager/zip_deploy.py
Normal file
@@ -0,0 +1,431 @@
|
||||
# Zip Application Deployment implementation
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
from __future__ import annotations
|
||||
import os
|
||||
import pathlib
|
||||
import json
|
||||
import shutil
|
||||
import re
|
||||
import time
|
||||
import zipfile
|
||||
from .app_deploy import AppDeploy
|
||||
from utils import verify_source
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Tuple,
|
||||
Optional,
|
||||
Dict,
|
||||
List,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from .update_manager import CommandHelper
|
||||
|
||||
RINFO_KEYS = [
|
||||
"git_version", "long_version", "commit_hash", "source_checksum",
|
||||
"ignored_exts", "ignored_dirs", "build_date", "channel",
|
||||
"owner_repo", "host_repo", "release_tag"
|
||||
]
|
||||
|
||||
class ZipDeploy(AppDeploy):
|
||||
def __init__(self, config: ConfigHelper, cmd_helper: CommandHelper) -> None:
|
||||
super().__init__(config, cmd_helper)
|
||||
self.need_channel_update = self.type != "zip"
|
||||
self.official_repo: str = "?"
|
||||
self.owner: str = "?"
|
||||
# Extract repo from origin for validation
|
||||
match = re.match(r"https?://(?:www\.)?github.com/([^/]+/[^.]+)",
|
||||
self.origin)
|
||||
if match is not None:
|
||||
self.official_repo = match.group(1)
|
||||
self.owner = self.official_repo.split('/')[0]
|
||||
else:
|
||||
raise config.error(
|
||||
"Invalid url set for 'origin' option in section "
|
||||
f"[{config.get_name()}]. Unable to extract owner/repo.")
|
||||
self.host_repo: str = config.get('host_repo', self.official_repo)
|
||||
self.package_list: List[str] = []
|
||||
self.python_pkg_list: List[str] = []
|
||||
self.release_download_info: Tuple[str, str, int] = ("?", "?", 0)
|
||||
|
||||
@staticmethod
|
||||
async def from_application(app: AppDeploy) -> ZipDeploy:
|
||||
new_app = ZipDeploy(app.config, app.cmd_helper)
|
||||
await new_app.reinstall()
|
||||
return new_app
|
||||
|
||||
async def initialize(self) -> Dict[str, Any]:
|
||||
storage = await super().initialize()
|
||||
self.source_checksum: str = storage.get("source_checksum", "?")
|
||||
self.pristine = storage.get('pristine', False)
|
||||
self.verified = storage.get('verified', False)
|
||||
self.build_date: int = storage.get('build_date', 0)
|
||||
self.full_version: str = storage.get('full_version', "?")
|
||||
self.short_version: str = storage.get('short_version', "?")
|
||||
self.commit_hash: str = storage.get('commit_hash', "?")
|
||||
self.lastest_hash: str = storage.get('latest_hash', "?")
|
||||
self.latest_version: str = storage.get('latest_version', "?")
|
||||
self.latest_checksum: str = storage.get('latest_checksum', "?")
|
||||
self.latest_build_date: int = storage.get('latest_build_date', 0)
|
||||
self.errors: List[str] = storage.get('errors', [])
|
||||
self.commit_log: List[Dict[str, Any]] = storage.get('commit_log', [])
|
||||
return storage
|
||||
|
||||
def get_persistent_data(self) -> Dict[str, Any]:
|
||||
storage = super().get_persistent_data()
|
||||
storage.update({
|
||||
'source_checksum': self.source_checksum,
|
||||
'pristine': self.pristine,
|
||||
'verified': self.verified,
|
||||
'build_date': self.build_date,
|
||||
'full_version': self.full_version,
|
||||
'short_version': self.short_version,
|
||||
'commit_hash': self.commit_hash,
|
||||
'latest_hash': self.lastest_hash,
|
||||
'latest_version': self.latest_version,
|
||||
'latest_checksum': self.latest_checksum,
|
||||
'latest_build_date': self.latest_build_date,
|
||||
'commit_log': self.commit_log,
|
||||
'errors': self.errors
|
||||
})
|
||||
return storage
|
||||
|
||||
async def _parse_info_file(self, file_name: str) -> Dict[str, Any]:
|
||||
info_file = self.path.joinpath(file_name)
|
||||
if not info_file.exists():
|
||||
self.log_info(f"Unable to locate file '{info_file}'")
|
||||
return {}
|
||||
try:
|
||||
event_loop = self.server.get_event_loop()
|
||||
info_bytes = await event_loop.run_in_thread(info_file.read_text)
|
||||
info: Dict[str, Any] = json.loads(info_bytes)
|
||||
except Exception:
|
||||
self.log_exc(f"Unable to parse info file {file_name}")
|
||||
info = {}
|
||||
return info
|
||||
|
||||
def _get_tag_version(self, version_string: str) -> str:
|
||||
tag_version: str = "?"
|
||||
ver_match = re.match(r"v\d+\.\d+\.\d-\d+", version_string)
|
||||
if ver_match:
|
||||
tag_version = ver_match.group()
|
||||
return tag_version
|
||||
|
||||
async def refresh(self) -> None:
|
||||
try:
|
||||
await self._update_repo_state()
|
||||
except Exception:
|
||||
self.verified = False
|
||||
self.log_exc("Error refreshing application state")
|
||||
|
||||
async def _update_repo_state(self) -> None:
|
||||
self.errors = []
|
||||
self._is_valid = False
|
||||
self.verified = False
|
||||
release_info = await self._parse_info_file(".release_info")
|
||||
dep_info = await self._parse_info_file(".dependencies")
|
||||
for key in RINFO_KEYS:
|
||||
if key not in release_info:
|
||||
self._add_error(f"Missing release info item: {key}")
|
||||
if 'channel' in release_info:
|
||||
local_channel = release_info['channel']
|
||||
if self.channel == "stable" and local_channel == "beta":
|
||||
self.need_channel_update = True
|
||||
self.full_version = release_info.get('long_version', "?")
|
||||
self.short_version = self._get_tag_version(
|
||||
release_info.get('git_version', ""))
|
||||
self.commit_hash = release_info.get('commit_hash', "?")
|
||||
self.build_date = release_info.get('build_date', 0)
|
||||
owner_repo = release_info.get('owner_repo', "?")
|
||||
if self.official_repo != owner_repo:
|
||||
self._add_error(
|
||||
f"Owner repo mismatch. Received {owner_repo}, "
|
||||
f"official: {self.official_repo}")
|
||||
# validate the local source code
|
||||
event_loop = self.server.get_event_loop()
|
||||
res = await event_loop.run_in_thread(verify_source, self.path)
|
||||
if res is not None:
|
||||
self.source_checksum, self.pristine = res
|
||||
if self.name in ["moonraker", "klipper"]:
|
||||
self.server.add_log_rollover_item(
|
||||
f"{self.name}_validation",
|
||||
f"{self.name} checksum: {self.source_checksum}, "
|
||||
f"pristine: {self.pristine}")
|
||||
else:
|
||||
self._add_error("Unable to validate source checksum")
|
||||
self.source_checksum = ""
|
||||
self.pristine = False
|
||||
self.package_list = sorted(dep_info.get(
|
||||
'debian', {}).get('packages', []))
|
||||
self.python_pkg_list = sorted(dep_info.get('python', []))
|
||||
# Retrieve version info from github to check for updates and
|
||||
# validate local release info
|
||||
host_repo = release_info.get('host_repo', "?")
|
||||
release_tag = release_info.get('release_tag', "?")
|
||||
if host_repo != self.host_repo:
|
||||
self._add_error(
|
||||
f"Host repo mismatch, received: {host_repo}, "
|
||||
f"expected: {self.host_repo}. This could result in "
|
||||
" a failed update.")
|
||||
resource = f"repos/{self.host_repo}/releases"
|
||||
current_release, latest_release = await self._fetch_github_releases(
|
||||
resource, release_tag)
|
||||
await self._validate_current_release(release_info, current_release)
|
||||
if not self.errors:
|
||||
self.verified = True
|
||||
await self._process_latest_release(latest_release)
|
||||
self._save_state()
|
||||
self._log_zipapp_info()
|
||||
|
||||
async def _fetch_github_releases(self,
|
||||
resource: str,
|
||||
current_tag: Optional[str] = None
|
||||
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
|
||||
try:
|
||||
client = self.cmd_helper.get_http_client()
|
||||
resp = await client.github_api_request(resource, attempts=3)
|
||||
resp.raise_for_status()
|
||||
releases = resp.json()
|
||||
assert isinstance(releases, list)
|
||||
except Exception:
|
||||
self.log_exc("Error fetching releases from GitHub")
|
||||
return {}, {}
|
||||
release: Dict[str, Any]
|
||||
latest_release: Dict[str, Any] = {}
|
||||
current_release: Dict[str, Any] = {}
|
||||
for release in releases:
|
||||
if not latest_release:
|
||||
if self.channel != "stable":
|
||||
# Allow the beta channel to update regardless
|
||||
latest_release = release
|
||||
elif not release['prerelease']:
|
||||
# This is a stable release on the stable channle
|
||||
latest_release = release
|
||||
if current_tag is not None:
|
||||
if not current_release and release['tag_name'] == current_tag:
|
||||
current_release = release
|
||||
if latest_release and current_release:
|
||||
break
|
||||
elif latest_release:
|
||||
break
|
||||
return current_release, latest_release
|
||||
|
||||
async def _validate_current_release(self,
|
||||
release_info: Dict[str, Any],
|
||||
release: Dict[str, Any]
|
||||
) -> None:
|
||||
if not release:
|
||||
self._add_error("Unable to find current release on GitHub")
|
||||
return
|
||||
asset_info = self._get_asset_urls(release, ["RELEASE_INFO"])
|
||||
if "RELEASE_INFO" not in asset_info:
|
||||
self._add_error(
|
||||
"RELEASE_INFO not found in current release assets")
|
||||
info_url, content_type, size = asset_info['RELEASE_INFO']
|
||||
client = self.cmd_helper.get_http_client()
|
||||
rinfo_bytes = await client.get_file(info_url, content_type)
|
||||
github_rinfo: Dict[str, Any] = json.loads(rinfo_bytes)
|
||||
if github_rinfo.get(self.name, {}) != release_info:
|
||||
self._add_error(
|
||||
"Local release info does not match the remote")
|
||||
else:
|
||||
self.log_info("Current Release Info Validated")
|
||||
|
||||
async def _process_latest_release(self, release: Dict[str, Any]):
|
||||
if not release:
|
||||
self._add_error("Unable to find latest release on GitHub")
|
||||
return
|
||||
zip_file_name = f"{self.name}.zip"
|
||||
asset_names = ["RELEASE_INFO", "COMMIT_LOG", zip_file_name]
|
||||
asset_info = self._get_asset_urls(release, asset_names)
|
||||
if "RELEASE_INFO" in asset_info:
|
||||
asset_url, content_type, size = asset_info['RELEASE_INFO']
|
||||
client = self.cmd_helper.get_http_client()
|
||||
rinfo_bytes = await client.get_file(asset_url, content_type)
|
||||
update_release_info: Dict[str, Any] = json.loads(rinfo_bytes)
|
||||
update_info = update_release_info.get(self.name, {})
|
||||
self.lastest_hash = update_info.get('commit_hash', "?")
|
||||
self.latest_checksum = update_info.get('source_checksum', "?")
|
||||
self.latest_version = self._get_tag_version(
|
||||
update_info.get('git_version', "?"))
|
||||
self.latest_build_date = update_info.get('build_date', 0)
|
||||
else:
|
||||
self._add_error(
|
||||
"RELEASE_INFO not found in latest release assets")
|
||||
self.commit_log = []
|
||||
if self.short_version != self.latest_version:
|
||||
# Only report commit log if versions change
|
||||
if "COMMIT_LOG" in asset_info:
|
||||
asset_url, content_type, size = asset_info['COMMIT_LOG']
|
||||
client = self.cmd_helper.get_http_client()
|
||||
commit_bytes = await client.get_file(asset_url, content_type)
|
||||
commit_info: Dict[str, Any] = json.loads(commit_bytes)
|
||||
self.commit_log = commit_info.get(self.name, [])
|
||||
if zip_file_name in asset_info:
|
||||
self.release_download_info = asset_info[zip_file_name]
|
||||
self._is_valid = True
|
||||
else:
|
||||
self.release_download_info = ("?", "?", 0)
|
||||
self._add_error(f"Release asset {zip_file_name} not found")
|
||||
|
||||
def _get_asset_urls(self,
|
||||
release: Dict[str, Any],
|
||||
filenames: List[str]
|
||||
) -> Dict[str, Tuple[str, str, int]]:
|
||||
asset_info: Dict[str, Tuple[str, str, int]] = {}
|
||||
asset: Dict[str, Any]
|
||||
for asset in release.get('assets', []):
|
||||
name = asset['name']
|
||||
if name in filenames:
|
||||
rinfo_url = asset['browser_download_url']
|
||||
content_type = asset['content_type']
|
||||
size = asset['size']
|
||||
asset_info[name] = (rinfo_url, content_type, size)
|
||||
filenames.remove(name)
|
||||
if not filenames:
|
||||
break
|
||||
return asset_info
|
||||
|
||||
def _add_error(self, warning: str):
|
||||
self.log_info(warning)
|
||||
self.errors.append(warning)
|
||||
|
||||
def _log_zipapp_info(self):
|
||||
self.log_info(
|
||||
"\nZip Application Distribution Detected\n"
|
||||
f" Valid: {self._is_valid}\n"
|
||||
f" Verified: {self.verified}\n"
|
||||
f" Channel: {self.channel}\n"
|
||||
f" Repo: {self.official_repo}\n"
|
||||
f" Path: {self.path}\n"
|
||||
f" Pristine: {self.pristine}\n"
|
||||
f" Need Channel Update: {self.need_channel_update}\n"
|
||||
f" Commits Behind: {len(self.commit_log)}\n"
|
||||
f"Current Release Info:\n"
|
||||
f" Source Checksum: {self.source_checksum}\n"
|
||||
f" Commit SHA: {self.commit_hash}\n"
|
||||
f" Long Version: {self.full_version}\n"
|
||||
f" Short Version: {self.short_version}\n"
|
||||
f" Build Date: {time.ctime(self.build_date)}\n"
|
||||
f"Latest Available Release Info:\n"
|
||||
f" Source Checksum: {self.latest_checksum}\n"
|
||||
f" Commit SHA: {self.lastest_hash}\n"
|
||||
f" Version: {self.latest_version}\n"
|
||||
f" Build Date: {time.ctime(self.latest_build_date)}\n"
|
||||
f" Download URL: {self.release_download_info[0]}\n"
|
||||
f" Content Type: {self.release_download_info[1]}\n"
|
||||
f" Download Size: {self.release_download_info[2]}"
|
||||
)
|
||||
|
||||
async def _update_dependencies(self,
|
||||
npm_hash,
|
||||
force: bool = False
|
||||
) -> None:
|
||||
new_deps = await self._parse_info_file('.dependencies')
|
||||
system_pkgs = sorted(
|
||||
new_deps.get('debian', {}).get('packages', []))
|
||||
python_pkgs = sorted(new_deps.get('python', []))
|
||||
if system_pkgs:
|
||||
if force or system_pkgs != self.package_list:
|
||||
await self._install_packages(system_pkgs)
|
||||
if python_pkgs:
|
||||
if force or python_pkgs != self.python_pkg_list:
|
||||
await self._update_virtualenv(python_pkgs)
|
||||
ret = await self._check_need_update(npm_hash, self.npm_pkg_json)
|
||||
if force or ret:
|
||||
if self.npm_pkg_json is not None:
|
||||
self.notify_status("Updating Node Packages...")
|
||||
try:
|
||||
await self.cmd_helper.run_cmd(
|
||||
"npm ci --only=prod", notify=True, timeout=600.,
|
||||
cwd=str(self.path))
|
||||
except Exception:
|
||||
self.notify_status("Node Package Update failed")
|
||||
|
||||
def _extract_release(self, release_zip: pathlib.Path) -> None:
|
||||
if self.path.is_dir():
|
||||
shutil.rmtree(self.path)
|
||||
os.mkdir(self.path)
|
||||
with zipfile.ZipFile(release_zip) as zf:
|
||||
zf.extractall(self.path)
|
||||
|
||||
async def update(self, force_dep_update: bool = False) -> bool:
|
||||
if not self._is_valid:
|
||||
raise self.log_exc("Update aborted, repo not valid", False)
|
||||
if self.short_version == self.latest_version:
|
||||
# already up to date
|
||||
return False
|
||||
self.cmd_helper.notify_update_response(
|
||||
f"Updating Application {self.name}...")
|
||||
npm_hash = await self._get_file_hash(self.npm_pkg_json)
|
||||
dl_url, content_type, size = self.release_download_info
|
||||
self.notify_status("Starting Download...")
|
||||
td = await self.cmd_helper.create_tempdir(self.name, "app")
|
||||
try:
|
||||
tempdir = pathlib.Path(td.name)
|
||||
temp_download_file = tempdir.joinpath(f"{self.name}.zip")
|
||||
client = self.cmd_helper.get_http_client()
|
||||
await client.download_file(
|
||||
dl_url, content_type, temp_download_file, size,
|
||||
self.cmd_helper.on_download_progress)
|
||||
self.notify_status(
|
||||
f"Download Complete, extracting release to '{self.path}'")
|
||||
event_loop = self.server.get_event_loop()
|
||||
await event_loop.run_in_thread(
|
||||
self._extract_release, temp_download_file)
|
||||
finally:
|
||||
await event_loop.run_in_thread(td.cleanup)
|
||||
await self._update_dependencies(npm_hash, force=force_dep_update)
|
||||
await self._update_repo_state()
|
||||
await self.restart_service()
|
||||
self.notify_status("Update Finished...", is_complete=True)
|
||||
return True
|
||||
|
||||
async def recover(self,
|
||||
hard: bool = False,
|
||||
force_dep_update: bool = False
|
||||
) -> None:
|
||||
res = f"repos/{self.host_repo}/releases"
|
||||
releases = await self._fetch_github_releases(res)
|
||||
await self._process_latest_release(releases[1])
|
||||
await self.update(force_dep_update=force_dep_update)
|
||||
|
||||
async def reinstall(self) -> None:
|
||||
# Clear the persistent storage prior to a channel swap.
|
||||
# After the next update is complete new data will be
|
||||
# restored.
|
||||
umdb = self.cmd_helper.get_umdb()
|
||||
await umdb.pop(self.name, None)
|
||||
await self.initialize()
|
||||
await self.recover(force_dep_update=True)
|
||||
|
||||
def get_update_status(self) -> Dict[str, Any]:
|
||||
status = super().get_update_status()
|
||||
# XXX - Currently this reports status matching
|
||||
# that of the git repo so as to not break existing
|
||||
# client functionality. In the future it would be
|
||||
# good to report values that are specifc
|
||||
status.update({
|
||||
'detected_type': "zip",
|
||||
'remote_alias': "origin",
|
||||
'branch': "master",
|
||||
'owner': self.owner,
|
||||
'version': self.short_version,
|
||||
'remote_version': self.latest_version,
|
||||
'current_hash': self.commit_hash,
|
||||
'remote_hash': self.lastest_hash,
|
||||
'is_dirty': False,
|
||||
'detached': not self.verified,
|
||||
'commits_behind': self.commit_log,
|
||||
'git_messages': self.errors,
|
||||
'full_version_string': self.full_version,
|
||||
'pristine': self.pristine,
|
||||
})
|
||||
return status
|
||||
359
moonraker/components/webcam.py
Normal file
359
moonraker/components/webcam.py
Normal file
@@ -0,0 +1,359 @@
|
||||
# Centralized webcam configuration
|
||||
#
|
||||
# Copyright (C) 2022 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
|
||||
from __future__ import annotations
|
||||
import re
|
||||
import ipaddress
|
||||
import socket
|
||||
import uuid
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Optional,
|
||||
Dict,
|
||||
List,
|
||||
Any,
|
||||
Tuple
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from moonraker import Server
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest
|
||||
from .database import MoonrakerDatabase
|
||||
from .machine import Machine
|
||||
from .shell_command import ShellCommandFactory
|
||||
from .http_client import HttpClient
|
||||
|
||||
# This provides a mapping of fields defined by Moonraker to fields
|
||||
# defined by the database.
|
||||
CAM_FIELDS = {
|
||||
"name": "name", "service": "service", "target_fps": "targetFps",
|
||||
"stream_url": "urlStream", "snapshot_url": "urlSnapshot",
|
||||
"flip_horizontal": "flipX", "flip_vertical": "flipY"
|
||||
}
|
||||
|
||||
class WebcamManager:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.webcams: Dict[str, WebCam] = {}
|
||||
# parse user configured webcams
|
||||
prefix_sections = config.get_prefix_sections("webcam ")
|
||||
for section in prefix_sections:
|
||||
cam_cfg = config[section]
|
||||
webcam = WebCam.from_config(cam_cfg)
|
||||
self.webcams[webcam.name] = webcam
|
||||
|
||||
self.server.register_endpoint(
|
||||
"/server/webcams/list", ["GET"], self._handle_webcam_list
|
||||
)
|
||||
self.server.register_endpoint(
|
||||
"/server/webcams/item", ["GET", "POST", "DELETE"],
|
||||
self._handle_webcam_request
|
||||
)
|
||||
self.server.register_endpoint(
|
||||
"/server/webcams/test", ["POST"], self._handle_webcam_test
|
||||
)
|
||||
self.server.register_notification("webcam:webcams_changed")
|
||||
|
||||
async def component_init(self) -> None:
|
||||
machine: Machine = self.server.lookup_component("machine")
|
||||
pubnet = await machine.get_public_network()
|
||||
ip: Optional[str] = pubnet.get("address")
|
||||
default_host = f"http://{pubnet['hostname']}"
|
||||
if ip is not None:
|
||||
default_host = f"http://{ip}"
|
||||
WebCam.set_default_host(default_host)
|
||||
db: MoonrakerDatabase = self.server.lookup_component("database")
|
||||
saved_cams: Dict[str, Any] = await db.get_item("webcams", default={})
|
||||
for cam_data in saved_cams.values():
|
||||
try:
|
||||
webcam = WebCam.from_database(self.server, cam_data)
|
||||
if webcam.name in self.webcams:
|
||||
continue
|
||||
self.webcams[webcam.name] = webcam
|
||||
except Exception:
|
||||
logging.exception("Failed to process webcam from db")
|
||||
continue
|
||||
|
||||
def get_webcams(self) -> Dict[str, WebCam]:
|
||||
return self.webcams
|
||||
|
||||
def _list_webcams(self) -> List[Dict[str, Any]]:
|
||||
return [wc.as_dict() for wc in self.webcams.values()]
|
||||
|
||||
async def _find_dbcam_by_uuid(
|
||||
self, name: str
|
||||
) -> Tuple[str, Dict[str, Any]]:
|
||||
db: MoonrakerDatabase = self.server.lookup_component("database")
|
||||
saved_cams: Dict[str, Dict[str, Any]]
|
||||
saved_cams = await db.get_item("webcams", default={})
|
||||
for uid, cam_data in saved_cams.items():
|
||||
if name == cam_data["name"]:
|
||||
return uid, cam_data
|
||||
return "", {}
|
||||
|
||||
async def _save_cam(self, webcam: WebCam) -> None:
|
||||
uid, cam_data = await self._find_dbcam_by_uuid(webcam.name)
|
||||
if not uid:
|
||||
uid = str(uuid.uuid4())
|
||||
for mfield, dbfield in CAM_FIELDS.items():
|
||||
cam_data[dbfield] = getattr(webcam, mfield)
|
||||
cam_data["location"] = webcam.location
|
||||
cam_data["rotation"] = webcam.rotation
|
||||
if "icon" not in cam_data:
|
||||
cam_data["icon"] = "mdi-webcam"
|
||||
db: MoonrakerDatabase = self.server.lookup_component("database")
|
||||
db.insert_item("webcams", uid, cam_data)
|
||||
|
||||
async def _delete_cam(self, webcam: WebCam) -> None:
|
||||
uid, cam = await self._find_dbcam_by_uuid(webcam.name)
|
||||
if not uid:
|
||||
return
|
||||
db: MoonrakerDatabase = self.server.lookup_component("database")
|
||||
db.delete_item("webcams", uid)
|
||||
|
||||
async def _handle_webcam_request(
|
||||
self, web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
action = web_request.get_action()
|
||||
name = web_request.get_str("name")
|
||||
webcam_data: Dict[str, Any] = {}
|
||||
if action == "GET":
|
||||
if name not in self.webcams:
|
||||
raise self.server.error(f"Webcam {name} not found", 404)
|
||||
webcam_data = self.webcams[name].as_dict()
|
||||
elif action == "POST":
|
||||
if (
|
||||
name in self.webcams and
|
||||
self.webcams[name].source == "config"
|
||||
):
|
||||
raise self.server.error(
|
||||
f"Cannot overwrite webcam '{name}' sourced from "
|
||||
"Moonraker configuration"
|
||||
)
|
||||
webcam = WebCam.from_web_request(self.server, web_request)
|
||||
self.webcams[name] = webcam
|
||||
webcam_data = webcam.as_dict()
|
||||
await self._save_cam(webcam)
|
||||
elif action == "DELETE":
|
||||
if name not in self.webcams:
|
||||
raise self.server.error(f"Webcam {name} not found", 404)
|
||||
elif self.webcams[name].source == "config":
|
||||
raise self.server.error(
|
||||
f"Cannot delete webcam '{name}' sourced from "
|
||||
"Moonraker configuration"
|
||||
)
|
||||
webcam = self.webcams.pop(name)
|
||||
webcam_data = webcam.as_dict()
|
||||
await self._delete_cam(webcam)
|
||||
if action != "GET":
|
||||
self.server.send_event(
|
||||
"webcam:webcams_changed", {"webcams": self._list_webcams()}
|
||||
)
|
||||
return {"webcam": webcam_data}
|
||||
|
||||
async def _handle_webcam_list(
|
||||
self, web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
return {"webcams": self._list_webcams()}
|
||||
|
||||
async def _handle_webcam_test(
|
||||
self, web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
name = web_request.get_str("name")
|
||||
if name not in self.webcams:
|
||||
raise self.server.error(f"Webcam '{name}' not found", 404)
|
||||
client: HttpClient = self.server.lookup_component("http_client")
|
||||
cam = self.webcams[name]
|
||||
result: Dict[str, Any] = {
|
||||
"name": name,
|
||||
"snapshot_reachable": False
|
||||
}
|
||||
for img_type in ["snapshot", "stream"]:
|
||||
try:
|
||||
func = getattr(cam, f"get_{img_type}_url")
|
||||
result[f"{img_type}_url"] = await func(True)
|
||||
except Exception:
|
||||
logging.exception(f"Error Processing {img_type} url")
|
||||
result[f"{img_type}_url"] = ""
|
||||
if result.get("snapshot_url", "").startswith("http"):
|
||||
url = client.escape_url(result["snapshot_url"])
|
||||
ret = await client.get(url, connect_timeout=1., request_timeout=1.)
|
||||
result["snapshot_reachable"] = not ret.has_error()
|
||||
return result
|
||||
|
||||
|
||||
class WebCam:
|
||||
_default_host: str = "http://127.0.0.1"
|
||||
def __init__(self, server: Server, **kwargs) -> None:
|
||||
self._server = server
|
||||
self.name: str = kwargs["name"]
|
||||
self.location: str = kwargs["location"]
|
||||
self.service: str = kwargs["service"]
|
||||
self.target_fps: int = kwargs["target_fps"]
|
||||
self.stream_url: str = kwargs["stream_url"]
|
||||
self.snapshot_url: str = kwargs["snapshot_url"]
|
||||
self.flip_horizontal: bool = kwargs["flip_horizontal"]
|
||||
self.flip_vertical: bool = kwargs["flip_vertical"]
|
||||
self.rotation: int = kwargs["rotation"]
|
||||
self.source: str = kwargs["source"]
|
||||
|
||||
def as_dict(self):
|
||||
return {k: v for k, v in self.__dict__.items() if k[0] != "_"}
|
||||
|
||||
async def get_stream_url(self, convert_local: bool = False) -> str:
|
||||
return await self._get_url(self.stream_url, convert_local)
|
||||
|
||||
async def get_snapshot_url(self, convert_local: bool = False) -> str:
|
||||
return await self._get_url(self.snapshot_url, convert_local)
|
||||
|
||||
async def _get_url(self, url: str, convert_local: bool) -> str:
|
||||
if not url:
|
||||
raise self._server.error("Empty URL Provided")
|
||||
match = re.match(r"\w+://[^/]+", url)
|
||||
if match is None:
|
||||
# assume a partial URL on the default host
|
||||
url = f"{self._default_host}/{url.lstrip('/')}"
|
||||
if not convert_local:
|
||||
return url
|
||||
return await self.convert_local(url)
|
||||
|
||||
def _get_local_ips(self) -> List[str]:
|
||||
all_ips: List[str] = []
|
||||
machine: Machine = self._server.lookup_component("machine")
|
||||
sys_info = machine.get_system_info()
|
||||
network = sys_info.get("network", {})
|
||||
iface: Dict[str, Any]
|
||||
for iface in network.values():
|
||||
addresses: List[Dict[str, Any]] = iface["ip_addresses"]
|
||||
for addr_info in addresses:
|
||||
all_ips.append(addr_info["address"])
|
||||
return all_ips
|
||||
|
||||
async def convert_local(self, url: str) -> str:
|
||||
match = re.match(r"(\w+)://([^/]+)(/.*)?", url)
|
||||
if match is None:
|
||||
return url
|
||||
scheme = match.group(1)
|
||||
addr = match.group(2)
|
||||
fragment = match.group(3)
|
||||
if fragment is None:
|
||||
fragment = ""
|
||||
if addr[0] == "[":
|
||||
# ipv6 address
|
||||
addr_match = re.match(r"\[(.+)\](:\d+)?", addr)
|
||||
else:
|
||||
# ipv4 address or hostname
|
||||
addr_match = re.match(r"([^:]+)(:\d+)?", addr)
|
||||
if addr_match is None:
|
||||
return url
|
||||
addr = addr_match.group(1)
|
||||
port: Optional[str] = addr_match.group(2)
|
||||
default_ports = {"http": "80", "https": "443", "rtsp": "554"}
|
||||
if port is None:
|
||||
if scheme not in default_ports:
|
||||
return url
|
||||
port = default_ports[scheme]
|
||||
else:
|
||||
port = port.lstrip(":")
|
||||
# attempt to convert hostname to IP
|
||||
try:
|
||||
eventloop = self._server.get_event_loop()
|
||||
addr_info = await eventloop.run_in_thread(
|
||||
socket.getaddrinfo, addr, int(port)
|
||||
)
|
||||
if addr_info:
|
||||
addr = addr_info[0][4][0]
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
ip = ipaddress.ip_address(addr)
|
||||
except Exception:
|
||||
# Invalid IP, can't convert.
|
||||
return url
|
||||
else:
|
||||
if ip.is_loopback:
|
||||
return url
|
||||
# Check to see if this ip address is on the local machine
|
||||
if addr not in self._get_local_ips():
|
||||
return url
|
||||
scmd: ShellCommandFactory
|
||||
scmd = self._server.lookup_component("shell_command")
|
||||
try:
|
||||
# Use the ss command to list all tcp ports
|
||||
resp: str = await scmd.exec_cmd("ss -ltn")
|
||||
lines = resp.split("\n")[1:]
|
||||
for line in lines:
|
||||
parts = line.strip().split()
|
||||
if len(parts) < 5:
|
||||
continue
|
||||
laddr, lport = parts[3].split(":")
|
||||
if lport == port:
|
||||
if laddr == "[::]":
|
||||
return f"{scheme}://[::1]:{port}{fragment}"
|
||||
elif laddr == "0.0.0.0":
|
||||
return f"{scheme}://127.0.0.1:{port}{fragment}"
|
||||
except scmd.error:
|
||||
pass
|
||||
return url
|
||||
|
||||
@staticmethod
|
||||
def set_default_host(host: str) -> None:
|
||||
WebCam._default_host = host
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, config: ConfigHelper) -> WebCam:
|
||||
webcam: Dict[str, Any] = {}
|
||||
webcam["name"] = config.get_name().split(maxsplit=1)[-1]
|
||||
webcam["location"] = config.get("location", "printer")
|
||||
webcam["service"] = config.get("service", "mjpegstreamer")
|
||||
webcam["target_fps"] = config.getint("target_fps", 15)
|
||||
webcam["stream_url"] = config.get("stream_url")
|
||||
webcam["snapshot_url"] = config.get("snapshot_url")
|
||||
webcam["flip_horizontal"] = config.getboolean("flip_horizontal", False)
|
||||
webcam["flip_vertical"] = config.getboolean("flip_vertical", False)
|
||||
webcam["rotation"] = config.getint("rotation", 0)
|
||||
if webcam["rotation"] not in [0, 90, 180, 270]:
|
||||
raise config.error("Invalid value for option 'rotation'")
|
||||
webcam["source"] = "config"
|
||||
return cls(config.get_server(), **webcam)
|
||||
|
||||
@classmethod
|
||||
def from_web_request(
|
||||
cls, server: Server, web_request: WebRequest
|
||||
) -> WebCam:
|
||||
webcam: Dict[str, Any] = {}
|
||||
webcam["name"] = web_request.get_str("name")
|
||||
webcam["location"] = web_request.get_str("location", "printer")
|
||||
webcam["service"] = web_request.get_str("service", "mjpegstreamer")
|
||||
webcam["target_fps"] = web_request.get_int("target_fps", 15)
|
||||
webcam["stream_url"] = web_request.get_str("stream_url")
|
||||
webcam["snapshot_url"] = web_request.get_str("snapshot_url")
|
||||
webcam["flip_horizontal"] = web_request.get_boolean(
|
||||
"flip_horizontal", False
|
||||
)
|
||||
webcam["flip_vertical"] = web_request.get_boolean(
|
||||
"flip_vertical", False
|
||||
)
|
||||
webcam["rotation"] = web_request.get_str("rotation", 0)
|
||||
if webcam["rotation"] not in [0, 90, 180, 270]:
|
||||
raise server.error("Invalid value for parameter 'rotate'")
|
||||
webcam["source"] = "database"
|
||||
return cls(server, **webcam)
|
||||
|
||||
@classmethod
|
||||
def from_database(cls, server: Server, cam_data: Dict[str, Any]) -> WebCam:
|
||||
webcam: Dict[str, Any] = {}
|
||||
for mfield, dbfield in CAM_FIELDS.items():
|
||||
webcam[mfield] = cam_data[dbfield]
|
||||
webcam["location"] = webcam.get("location", "printer")
|
||||
webcam["rotation"] = webcam.get("rotation", 0)
|
||||
webcam["source"] = "database"
|
||||
return cls(server, **webcam)
|
||||
|
||||
def load_component(config: ConfigHelper) -> WebcamManager:
|
||||
return WebcamManager(config)
|
||||
590
moonraker/components/wled.py
Normal file
590
moonraker/components/wled.py
Normal file
@@ -0,0 +1,590 @@
|
||||
# WLED neopixel support
|
||||
#
|
||||
# Copyright (C) 2021-2022 Richard Mitchell <richardjm+moonraker@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
|
||||
# Component to control the wled neopixel home system from AirCookie
|
||||
# Github at https://github.com/Aircoookie/WLED
|
||||
# Wiki at https://kno.wled.ge/
|
||||
|
||||
from __future__ import annotations
|
||||
from enum import Enum
|
||||
import logging
|
||||
import json
|
||||
import asyncio
|
||||
import serial_asyncio
|
||||
from tornado.httpclient import AsyncHTTPClient
|
||||
from tornado.httpclient import HTTPRequest
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Type,
|
||||
List,
|
||||
Any,
|
||||
Optional,
|
||||
Dict,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from websockets import WebRequest
|
||||
from . import klippy_apis
|
||||
APIComp = klippy_apis.KlippyAPI
|
||||
|
||||
class OnOff(str, Enum):
|
||||
on: str = "on"
|
||||
off: str = "off"
|
||||
|
||||
class Strip():
|
||||
_COLORSIZE: int = 4
|
||||
|
||||
def __init__(self: Strip,
|
||||
name: str,
|
||||
cfg: ConfigHelper):
|
||||
self.server = cfg.get_server()
|
||||
self.request_mutex = asyncio.Lock()
|
||||
|
||||
self.name = name
|
||||
|
||||
self.initial_preset: int = cfg.getint("initial_preset", -1)
|
||||
self.initial_red: float = cfg.getfloat("initial_red", 0.5)
|
||||
self.initial_green: float = cfg.getfloat("initial_green", 0.5)
|
||||
self.initial_blue: float = cfg.getfloat("initial_blue", 0.5)
|
||||
self.initial_white: float = cfg.getfloat("initial_white", 0.5)
|
||||
self.chain_count: int = cfg.getint("chain_count", 1)
|
||||
|
||||
# Supports rgbw always
|
||||
self._chain_data = bytearray(
|
||||
self.chain_count * self._COLORSIZE)
|
||||
|
||||
self.onoff = OnOff.off
|
||||
self.preset = self.initial_preset
|
||||
|
||||
def get_strip_info(self: Strip) -> Dict[str, Any]:
|
||||
return {
|
||||
"strip": self.name,
|
||||
"status": self.onoff.value,
|
||||
"chain_count": self.chain_count,
|
||||
"preset": self.preset,
|
||||
"brightness": self.brightness,
|
||||
"intensity": self.intensity,
|
||||
"speed": self.speed,
|
||||
"error": self.error_state
|
||||
}
|
||||
|
||||
async def initialize(self: Strip) -> None:
|
||||
self.send_full_chain_data = True
|
||||
self.onoff = OnOff.on
|
||||
self.preset = self.initial_preset
|
||||
self.brightness = 255
|
||||
self.intensity = -1
|
||||
self.speed = -1
|
||||
if self.initial_preset >= 0:
|
||||
self._update_color_data(self.initial_red,
|
||||
self.initial_green,
|
||||
self.initial_blue,
|
||||
self.initial_white,
|
||||
None)
|
||||
await self.wled_on(self.initial_preset)
|
||||
else:
|
||||
await self.set_wled(self.initial_red,
|
||||
self.initial_green,
|
||||
self.initial_blue,
|
||||
self.initial_white,
|
||||
None,
|
||||
True)
|
||||
|
||||
def _update_color_data(self: Strip,
|
||||
red: float, green: float, blue: float, white: float,
|
||||
index: Optional[int]) -> None:
|
||||
red = int(red * 255. + .5)
|
||||
blue = int(blue * 255. + .5)
|
||||
green = int(green * 255. + .5)
|
||||
white = int(white * 255. + .5)
|
||||
led_data = [red, green, blue, white]
|
||||
|
||||
if index is None:
|
||||
self._chain_data[:] = led_data * self.chain_count
|
||||
else:
|
||||
elem_size = len(led_data)
|
||||
self._chain_data[(index-1)*elem_size:index*elem_size] = led_data
|
||||
|
||||
async def send_wled_command_impl(self: Strip,
|
||||
state: Dict[str, Any]) -> None:
|
||||
pass
|
||||
|
||||
def close(self: Strip):
|
||||
pass
|
||||
|
||||
async def _send_wled_command(self: Strip,
|
||||
state: Dict[str, Any]) -> None:
|
||||
try:
|
||||
await self.send_wled_command_impl(state)
|
||||
|
||||
self.error_state = None
|
||||
except Exception as e:
|
||||
msg = f"WLED: Error {e}"
|
||||
self.error_state = msg
|
||||
logging.exception(msg)
|
||||
raise self.server.error(msg)
|
||||
|
||||
async def wled_on(self: Strip, preset: int) -> None:
|
||||
self.onoff = OnOff.on
|
||||
logging.debug(f"WLED: {self.name} on PRESET={preset}")
|
||||
if preset < 0:
|
||||
# WLED_ON STRIP=strip (no args) - reset to default
|
||||
await self.initialize()
|
||||
else:
|
||||
self.send_full_chain_data = True
|
||||
self.preset = preset
|
||||
# Without reading the data back from wled we don't know the values
|
||||
self.brightness = -1
|
||||
self.intensity = -1
|
||||
self.speed = -1
|
||||
await self._send_wled_command({"on": True, "ps": preset})
|
||||
|
||||
async def wled_off(self: Strip) -> None:
|
||||
logging.debug(f"WLED: {self.name} off")
|
||||
self.onoff = OnOff.off
|
||||
# Without this calling SET_WLED for a single pixel after WLED_OFF
|
||||
# would send just that pixel
|
||||
self.send_full_chain_data = True
|
||||
await self._send_wled_command({"on": False})
|
||||
|
||||
async def wled_control(self: Strip, brightness: int, intensity: int,
|
||||
speed: int) -> None:
|
||||
logging.debug(
|
||||
f"WLED: {self.name} control {self.onoff} BRIGHTNESS={brightness} "
|
||||
f"INTENSITY={intensity} SPEED={speed} CURRENTPRESET={self.preset}")
|
||||
|
||||
if self.onoff == OnOff.off:
|
||||
logging.info("wled control only permitted when strip is on")
|
||||
return
|
||||
|
||||
# Even if a preset is not activated sending seg {} information will
|
||||
# turn it back on
|
||||
control: Dict[str, Any]
|
||||
if self.preset != -1:
|
||||
control = {"tt": 0, "seg": {}}
|
||||
else:
|
||||
control = {"tt": 0}
|
||||
|
||||
shouldSend: bool = False
|
||||
# Using 0 is not recommended in wled docs
|
||||
if brightness > 0:
|
||||
if brightness > 255:
|
||||
logging.info("BRIGHTNESS should be between 1 and 255")
|
||||
else:
|
||||
shouldSend = True
|
||||
self.brightness = brightness
|
||||
control["bri"] = self.brightness
|
||||
# Brightness in seg {} - only if a preset is on
|
||||
if self.preset != -1:
|
||||
control["seg"]["bri"] = self.brightness
|
||||
|
||||
# Intensity - only if a preset is on
|
||||
if intensity > -1 and self.preset != -1:
|
||||
if intensity > 255:
|
||||
logging.info("INTENSITY should be between 0 and 255")
|
||||
else:
|
||||
shouldSend = True
|
||||
self.intensity = intensity
|
||||
control["seg"]["ix"] = self.intensity
|
||||
|
||||
# Speed - only if a preset is on
|
||||
if speed > -1 and self.preset != -1:
|
||||
if speed > 255:
|
||||
logging.info("SPEED should be between 0 and 255")
|
||||
else:
|
||||
shouldSend = True
|
||||
self.speed = speed
|
||||
control["seg"]["sx"] = self.speed
|
||||
|
||||
# Control brightness, intensity, and speed for segment
|
||||
# This will allow full control for effects such as "Percent"
|
||||
if shouldSend:
|
||||
await self._send_wled_command(control)
|
||||
|
||||
def _wled_pixel(self: Strip, index: int) -> List[int]:
|
||||
led_color_data: List[int] = []
|
||||
for p in self._chain_data[(index-1)*self._COLORSIZE:
|
||||
(index)*self._COLORSIZE]:
|
||||
led_color_data.append(p)
|
||||
return led_color_data
|
||||
|
||||
async def set_wled(self: Strip,
|
||||
red: float, green: float, blue: float, white: float,
|
||||
index: Optional[int], transmit: bool) -> None:
|
||||
logging.debug(
|
||||
f"WLED: {self.name} R={red} G={green} B={blue} W={white} "
|
||||
f"INDEX={index} TRANSMIT={transmit}")
|
||||
self._update_color_data(red, green, blue, white, index)
|
||||
if transmit:
|
||||
# Clear preset (issues with sending seg{} will revert to preset)
|
||||
self.preset = -1
|
||||
|
||||
# If we are coming from a preset without a wled_control
|
||||
# we don't know a brightness, this will also ensure
|
||||
# behaviour is consistent prior to introduction of wled_control
|
||||
if self.brightness == -1:
|
||||
self.brightness = 255
|
||||
|
||||
# Base command for setting an led (for all active segments)
|
||||
# See https://kno.wled.ge/interfaces/json-api/
|
||||
state: Dict[str, Any] = {"on": True,
|
||||
"tt": 0,
|
||||
"bri": self.brightness,
|
||||
"seg": {"bri": self.brightness, "i": []}}
|
||||
if index is None:
|
||||
# All pixels same color only send range command of first color
|
||||
self.send_full_chain_data = False
|
||||
state["seg"]["i"] = [0, self.chain_count, self._wled_pixel(1)]
|
||||
elif self.send_full_chain_data:
|
||||
# Send a full set of color data (e.g. previous preset)
|
||||
self.send_full_chain_data = False
|
||||
cdata = []
|
||||
for i in range(self.chain_count):
|
||||
cdata.append(self._wled_pixel(i+1))
|
||||
state["seg"]["i"] = cdata
|
||||
else:
|
||||
# Only one pixel has changed since last full data sent
|
||||
# so send just that one
|
||||
state["seg"]["i"] = [index-1, self._wled_pixel(index)]
|
||||
|
||||
# Send wled control command
|
||||
await self._send_wled_command(state)
|
||||
|
||||
if self.onoff == OnOff.off:
|
||||
# Without a repeated call individual led control doesn't
|
||||
# turn the led strip back on or doesn't set brightness
|
||||
# correctly from off
|
||||
# Confirmed as a bug:
|
||||
# https://discord.com/channels/473448917040758787/757254961640898622/934135556370202645
|
||||
self.onoff = OnOff.on
|
||||
await self._send_wled_command(state)
|
||||
else:
|
||||
# If not transmitting this time easiest just to send all data when
|
||||
# next transmitting
|
||||
self.send_full_chain_data = True
|
||||
|
||||
class StripHttp(Strip):
|
||||
def __init__(self: StripHttp,
|
||||
name: str,
|
||||
cfg: ConfigHelper):
|
||||
super().__init__(name, cfg)
|
||||
|
||||
# Read the uri information
|
||||
addr: str = cfg.get("address")
|
||||
port: int = cfg.getint("port", 80)
|
||||
protocol: str = cfg.get("protocol", "http")
|
||||
self.url = f"{protocol}://{addr}:{port}/json"
|
||||
|
||||
self.timeout: float = cfg.getfloat("timeout", 2.)
|
||||
self.client = AsyncHTTPClient()
|
||||
|
||||
async def send_wled_command_impl(self: StripHttp,
|
||||
state: Dict[str, Any],
|
||||
retries: int = 3
|
||||
) -> None:
|
||||
async with self.request_mutex:
|
||||
logging.debug(f"WLED: url:{self.url} json:{state}")
|
||||
|
||||
headers = {"Content-Type": "application/json"}
|
||||
request = HTTPRequest(url=self.url,
|
||||
method="POST",
|
||||
headers=headers,
|
||||
body=json.dumps(state),
|
||||
connect_timeout=self.timeout,
|
||||
request_timeout=self.timeout)
|
||||
for i in range(retries):
|
||||
try:
|
||||
response = await self.client.fetch(request)
|
||||
except Exception:
|
||||
if i == retries - 1:
|
||||
raise
|
||||
await asyncio.sleep(1.0)
|
||||
else:
|
||||
break
|
||||
|
||||
logging.debug(
|
||||
f"WLED: url:{self.url} status:{response.code} "
|
||||
f"response:{response.body.decode()}")
|
||||
|
||||
class StripSerial(Strip):
|
||||
def __init__(self: StripSerial,
|
||||
name: str,
|
||||
cfg: ConfigHelper):
|
||||
super().__init__(name, cfg)
|
||||
|
||||
# Read the serial information (requires wled 0.13 2108250 or greater)
|
||||
self.serialport: str = cfg.get("serial")
|
||||
self.baud: int = cfg.getint("baud", 115200, above=49)
|
||||
|
||||
async def send_wled_command_impl(self: StripSerial,
|
||||
state: Dict[str, Any]) -> None:
|
||||
async with self.request_mutex:
|
||||
if not hasattr(self, 'ser'):
|
||||
_, self.ser = await serial_asyncio.open_serial_connection(
|
||||
url=self.serialport, baudrate=self.baud)
|
||||
|
||||
logging.debug(f"WLED: serial:{self.serialport} json:{state}")
|
||||
|
||||
self.ser.write(json.dumps(state).encode())
|
||||
|
||||
def close(self: StripSerial):
|
||||
if hasattr(self, 'ser'):
|
||||
self.ser.close()
|
||||
logging.info(f"WLED: Closing serial {self.serialport}")
|
||||
|
||||
class WLED:
|
||||
def __init__(self: WLED, config: ConfigHelper) -> None:
|
||||
# root_logger = logging.getLogger()
|
||||
# root_logger.setLevel(logging.DEBUG)
|
||||
|
||||
self.server = config.get_server()
|
||||
prefix_sections = config.get_prefix_sections("wled")
|
||||
logging.info(f"WLED component loading strips: {prefix_sections}")
|
||||
|
||||
strip_types = {
|
||||
"HTTP": StripHttp,
|
||||
"SERIAL": StripSerial
|
||||
}
|
||||
self.strips = {}
|
||||
for section in prefix_sections:
|
||||
cfg = config[section]
|
||||
|
||||
try:
|
||||
name_parts = cfg.get_name().split(maxsplit=1)
|
||||
if len(name_parts) != 2:
|
||||
raise cfg.error(
|
||||
f"Invalid Section Name: {cfg.get_name()}")
|
||||
name: str = name_parts[1]
|
||||
|
||||
logging.info(f"WLED strip: {name}")
|
||||
|
||||
# Discard old color_order setting, always support 4 color strips
|
||||
_ = cfg.get("color_order", "", deprecate=True)
|
||||
|
||||
strip_type: str = cfg.get("type", "http")
|
||||
strip_class: Optional[Type[Strip]]
|
||||
strip_class = strip_types.get(strip_type.upper())
|
||||
if strip_class is None:
|
||||
raise config.error(f"Unsupported Strip Type: {strip_type}")
|
||||
|
||||
self.strips[name] = strip_class(name, cfg)
|
||||
|
||||
except Exception as e:
|
||||
# Ensures errors such as "Color not supported" are visible
|
||||
msg = f"Failed to initialise strip [{cfg.get_name()}]\n{e}"
|
||||
self.server.add_warning(msg)
|
||||
continue
|
||||
|
||||
# Register two remote methods for GCODE
|
||||
self.server.register_remote_method(
|
||||
"set_wled_state", self.set_wled_state)
|
||||
self.server.register_remote_method(
|
||||
"set_wled", self.set_wled)
|
||||
|
||||
# As moonraker is about making things a web api, let's try it
|
||||
# Yes, this is largely a cut-n-paste from power.py
|
||||
self.server.register_endpoint(
|
||||
"/machine/wled/strips", ["GET"],
|
||||
self._handle_list_strips)
|
||||
self.server.register_endpoint(
|
||||
"/machine/wled/status", ["GET"],
|
||||
self._handle_batch_wled_request)
|
||||
self.server.register_endpoint(
|
||||
"/machine/wled/on", ["POST"],
|
||||
self._handle_batch_wled_request)
|
||||
self.server.register_endpoint(
|
||||
"/machine/wled/off", ["POST"],
|
||||
self._handle_batch_wled_request)
|
||||
self.server.register_endpoint(
|
||||
"/machine/wled/toggle", ["POST"],
|
||||
self._handle_batch_wled_request)
|
||||
self.server.register_endpoint(
|
||||
"/machine/wled/strip", ["GET", "POST"],
|
||||
self._handle_single_wled_request)
|
||||
|
||||
async def component_init(self) -> None:
|
||||
try:
|
||||
logging.debug("Initializing wled")
|
||||
event_loop = self.server.get_event_loop()
|
||||
cur_time = event_loop.get_loop_time()
|
||||
endtime = cur_time + 120.
|
||||
query_strips = list(self.strips.values())
|
||||
failed_strips: List[Strip] = []
|
||||
while cur_time < endtime:
|
||||
for strip in query_strips:
|
||||
ret = strip.initialize()
|
||||
if ret is not None:
|
||||
await ret
|
||||
if strip.error_state is not None:
|
||||
failed_strips.append(strip)
|
||||
if not failed_strips:
|
||||
logging.debug("All wled strips initialized")
|
||||
return
|
||||
query_strips = failed_strips
|
||||
failed_strips = []
|
||||
await asyncio.sleep(2.)
|
||||
cur_time = event_loop.get_loop_time()
|
||||
if failed_strips:
|
||||
failed_names = [s.name for s in failed_strips]
|
||||
self.server.add_warning(
|
||||
"The following wled strips failed init:"
|
||||
f" {failed_names}")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
|
||||
async def wled_on(self: WLED, strip: str, preset: int) -> None:
|
||||
if strip not in self.strips:
|
||||
logging.info(f"Unknown WLED strip: {strip}")
|
||||
return
|
||||
await self.strips[strip].wled_on(preset)
|
||||
|
||||
# Full control of wled
|
||||
# state: True, False, "on", "off"
|
||||
# preset: wled preset (int) to use (ignored if state False or "Off")
|
||||
async def set_wled_state(self: WLED, strip: str, state: str = None,
|
||||
preset: int = -1, brightness: int = -1,
|
||||
intensity: int = -1, speed: int = -1) -> None:
|
||||
status = None
|
||||
|
||||
if isinstance(state, bool):
|
||||
status = OnOff.on if state else OnOff.off
|
||||
elif isinstance(state, str):
|
||||
status = state.lower()
|
||||
if status in ["true", "false"]:
|
||||
status = OnOff.on if status == "true" else OnOff.off
|
||||
|
||||
if status is None and preset == -1 and brightness == -1 and \
|
||||
intensity == -1 and speed == -1:
|
||||
logging.info(
|
||||
f"Invalid state received but no control or preset data passed")
|
||||
return
|
||||
|
||||
if strip not in self.strips:
|
||||
logging.info(f"Unknown WLED strip: {strip}")
|
||||
return
|
||||
|
||||
# All other arguments are ignored
|
||||
if status == OnOff.off:
|
||||
await self.strips[strip].wled_off()
|
||||
|
||||
# Turn on if on or a preset is specified
|
||||
if status == OnOff.on or preset != -1:
|
||||
await self.strips[strip].wled_on(preset)
|
||||
|
||||
# Control
|
||||
if brightness != -1 or intensity != -1 or speed != -1:
|
||||
await self.strips[strip].wled_control(brightness, intensity, speed)
|
||||
|
||||
# Individual pixel control, for compatibility with SET_LED
|
||||
async def set_wled(self: WLED,
|
||||
strip: str,
|
||||
red: float = 0.,
|
||||
green: float = 0.,
|
||||
blue: float = 0.,
|
||||
white: float = 0.,
|
||||
index: Optional[int] = None,
|
||||
transmit: int = 1) -> None:
|
||||
if strip not in self.strips:
|
||||
logging.info(f"Unknown WLED strip: {strip}")
|
||||
return
|
||||
if isinstance(index, int) and index < 0:
|
||||
index = None
|
||||
await self.strips[strip].set_wled(red, green, blue, white,
|
||||
index,
|
||||
True if transmit == 1 else False)
|
||||
|
||||
async def _handle_list_strips(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
strips = {name: strip.get_strip_info()
|
||||
for name, strip in self.strips.items()}
|
||||
output = {"strips": strips}
|
||||
return output
|
||||
|
||||
async def _handle_single_wled_request(self: WLED,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
strip_name: str = web_request.get_str('strip')
|
||||
preset: int = web_request.get_int('preset', -1)
|
||||
brightness: int = web_request.get_int('brightness', -1)
|
||||
intensity: int = web_request.get_int('intensity', -1)
|
||||
speed: int = web_request.get_int('speed', -1)
|
||||
|
||||
req_action = web_request.get_action()
|
||||
if strip_name not in self.strips:
|
||||
raise self.server.error(f"No valid strip named {strip_name}")
|
||||
strip = self.strips[strip_name]
|
||||
if req_action == 'GET':
|
||||
return {strip_name: strip.get_strip_info()}
|
||||
elif req_action == "POST":
|
||||
action = web_request.get_str('action').lower()
|
||||
if action not in ["on", "off", "toggle", "control"]:
|
||||
raise self.server.error(
|
||||
f"Invalid requested action '{action}'")
|
||||
result = await self._process_request(strip, action, preset,
|
||||
brightness, intensity, speed)
|
||||
return {strip_name: result}
|
||||
|
||||
async def _handle_batch_wled_request(self: WLED,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
args = web_request.get_args()
|
||||
ep = web_request.get_endpoint()
|
||||
if not args:
|
||||
raise self.server.error("No arguments provided")
|
||||
requested_strips = {k: self.strips.get(k, None) for k in args}
|
||||
result = {}
|
||||
req = ep.split("/")[-1]
|
||||
for name, strip in requested_strips.items():
|
||||
if strip is not None:
|
||||
result[name] = await self._process_request(strip, req, -1,
|
||||
-1, -1, -1)
|
||||
else:
|
||||
result[name] = {"error": "strip_not_found"}
|
||||
return result
|
||||
|
||||
async def _process_request(self: WLED,
|
||||
strip: Strip,
|
||||
req: str,
|
||||
preset: int,
|
||||
brightness: int,
|
||||
intensity: int,
|
||||
speed: int
|
||||
) -> Dict[str, Any]:
|
||||
strip_onoff = strip.onoff
|
||||
|
||||
if req == "status":
|
||||
return strip.get_strip_info()
|
||||
if req == "toggle":
|
||||
req = "on" if strip_onoff == OnOff.off else "off"
|
||||
|
||||
if req in ["on", "off", "control"]:
|
||||
# Always do something, could be turning off colors, or changing
|
||||
# preset, easier not to have to worry
|
||||
if req == "on" or req == "control":
|
||||
if req == "on":
|
||||
strip_onoff = OnOff.on
|
||||
await strip.wled_on(preset)
|
||||
|
||||
if brightness != -1 or intensity != -1 or speed != -1:
|
||||
await strip.wled_control(brightness, intensity, speed)
|
||||
else:
|
||||
strip_onoff = OnOff.off
|
||||
await strip.wled_off()
|
||||
|
||||
return strip.get_strip_info()
|
||||
|
||||
raise self.server.error(f"Unsupported wled request: {req}")
|
||||
|
||||
def close(self) -> None:
|
||||
for strip in self.strips.values():
|
||||
strip.close()
|
||||
|
||||
def load_component(config: ConfigHelper) -> WLED:
|
||||
return WLED(config)
|
||||
102
moonraker/components/zeroconf.py
Normal file
102
moonraker/components/zeroconf.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# Zeroconf registration implementation for Moonraker
|
||||
#
|
||||
# Copyright (C) 2021 Clifford Roche <clifford.roche@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license.
|
||||
from __future__ import annotations
|
||||
import socket
|
||||
import asyncio
|
||||
import logging
|
||||
from zeroconf import IPVersion
|
||||
from zeroconf.asyncio import AsyncServiceInfo, AsyncZeroconf
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from confighelper import ConfigHelper
|
||||
from .machine import Machine
|
||||
|
||||
|
||||
class AsyncRunner:
|
||||
def __init__(self, ip_version: IPVersion) -> None:
|
||||
self.ip_version = ip_version
|
||||
self.aiozc: Optional[AsyncZeroconf] = None
|
||||
|
||||
async def register_services(self, infos: List[AsyncServiceInfo]) -> None:
|
||||
self.aiozc = AsyncZeroconf(ip_version=self.ip_version)
|
||||
tasks = [
|
||||
self.aiozc.async_register_service(info, allow_name_change=True)
|
||||
for info in infos
|
||||
]
|
||||
background_tasks = await asyncio.gather(*tasks)
|
||||
await asyncio.gather(*background_tasks)
|
||||
|
||||
async def unregister_services(self, infos: List[AsyncServiceInfo]) -> None:
|
||||
assert self.aiozc is not None
|
||||
tasks = [self.aiozc.async_unregister_service(info) for info in infos]
|
||||
background_tasks = await asyncio.gather(*tasks)
|
||||
await asyncio.gather(*background_tasks)
|
||||
await self.aiozc.async_close()
|
||||
|
||||
async def update_services(self, infos: List[AsyncServiceInfo]) -> None:
|
||||
assert self.aiozc is not None
|
||||
tasks = [self.aiozc.async_update_service(info) for info in infos]
|
||||
background_tasks = await asyncio.gather(*tasks)
|
||||
await asyncio.gather(*background_tasks)
|
||||
|
||||
|
||||
class ZeroconfRegistrar:
|
||||
def __init__(self, config: ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.runner = AsyncRunner(IPVersion.All)
|
||||
hi = self.server.get_host_info()
|
||||
addresses: Optional[List[bytes]] = [socket.inet_aton(hi["address"])]
|
||||
self.bound_all = hi["address"] == "0.0.0.0"
|
||||
self.service_info = self._build_service_info(addresses)
|
||||
if self.bound_all:
|
||||
self.server.register_event_handler(
|
||||
"machine:net_state_changed", self._update_service)
|
||||
|
||||
async def component_init(self) -> None:
|
||||
logging.info("Starting Zeroconf services")
|
||||
if self.bound_all:
|
||||
machine: Machine = self.server.lookup_component("machine")
|
||||
network = machine.get_system_info()["network"]
|
||||
addresses = [x for x in self._extract_ip_addresses(network)]
|
||||
self.service_info = self._build_service_info(addresses)
|
||||
await self.runner.register_services([self.service_info])
|
||||
|
||||
async def close(self) -> None:
|
||||
await self.runner.unregister_services([self.service_info])
|
||||
|
||||
async def _update_service(self, network: Dict[str, Any]) -> None:
|
||||
if self.bound_all:
|
||||
addresses = [x for x in self._extract_ip_addresses(network)]
|
||||
self.service_info = self._build_service_info(addresses)
|
||||
await self.runner.update_services([self.service_info])
|
||||
|
||||
def _build_service_info(self,
|
||||
addresses: Optional[List[bytes]] = None
|
||||
) -> AsyncServiceInfo:
|
||||
hi = self.server.get_host_info()
|
||||
return AsyncServiceInfo(
|
||||
"_moonraker._tcp.local.",
|
||||
f"Moonraker Instance on {hi['hostname']}._moonraker._tcp.local.",
|
||||
addresses=addresses,
|
||||
port=hi["port"],
|
||||
properties={"path": "/"},
|
||||
server=f"{hi['hostname']}.local.",
|
||||
)
|
||||
|
||||
def _extract_ip_addresses(self, network: Dict[str, Any]) -> Iterator[bytes]:
|
||||
for ifname, ifinfo in network.items():
|
||||
for addr_info in ifinfo["ip_addresses"]:
|
||||
if addr_info["is_link_local"]:
|
||||
continue
|
||||
is_ipv6 = addr_info['family'] == "ipv6"
|
||||
family = socket.AF_INET6 if is_ipv6 else socket.AF_INET
|
||||
yield socket.inet_pton(family, addr_info["address"])
|
||||
|
||||
|
||||
def load_component(config: ConfigHelper) -> ZeroconfRegistrar:
|
||||
return ZeroconfRegistrar(config)
|
||||
557
moonraker/confighelper.py
Normal file
557
moonraker/confighelper.py
Normal file
@@ -0,0 +1,557 @@
|
||||
# Configuration Helper
|
||||
#
|
||||
# Copyright (C) 2020 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
|
||||
from __future__ import annotations
|
||||
import configparser
|
||||
import os
|
||||
import hashlib
|
||||
import pathlib
|
||||
import re
|
||||
import logging
|
||||
from utils import SentinelClass
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
IO,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
Dict,
|
||||
List,
|
||||
Type,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from moonraker import Server
|
||||
from components.gpio import GpioFactory, GpioOutputPin
|
||||
from components.template import TemplateFactory, JinjaTemplate
|
||||
from io import TextIOWrapper
|
||||
_T = TypeVar("_T")
|
||||
ConfigVal = Union[None, int, float, bool, str, dict, list]
|
||||
|
||||
SENTINEL = SentinelClass.get_instance()
|
||||
DOCS_URL = "https://moonraker.readthedocs.io/en/latest"
|
||||
|
||||
class ConfigError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ConfigHelper:
|
||||
error = ConfigError
|
||||
def __init__(self,
|
||||
server: Server,
|
||||
config: configparser.ConfigParser,
|
||||
section: str,
|
||||
parsed: Dict[str, Dict[str, ConfigVal]],
|
||||
file_section_map: Dict[str, List[str]],
|
||||
fallback_section: Optional[str] = None
|
||||
) -> None:
|
||||
self.server = server
|
||||
self.config = config
|
||||
self.section = section
|
||||
self.fallback_section: Optional[str] = fallback_section
|
||||
self.parsed = parsed
|
||||
if self.section not in self.parsed:
|
||||
self.parsed[self.section] = {}
|
||||
self.file_section_map = file_section_map
|
||||
self.sections = config.sections
|
||||
self.has_section = config.has_section
|
||||
|
||||
def get_server(self) -> Server:
|
||||
return self.server
|
||||
|
||||
def __getitem__(self, key: str) -> ConfigHelper:
|
||||
return self.getsection(key)
|
||||
|
||||
def __contains__(self, key: str) -> bool:
|
||||
return key in self.config
|
||||
|
||||
def has_option(self, option: str) -> bool:
|
||||
return self.config.has_option(self.section, option)
|
||||
|
||||
def set_option(self, option: str, value: str) -> None:
|
||||
self.config[self.section][option] = value
|
||||
|
||||
def get_name(self) -> str:
|
||||
return self.section
|
||||
|
||||
def get_file(self) -> Optional[pathlib.Path]:
|
||||
for fname in reversed(self.file_section_map.keys()):
|
||||
if self.section in self.file_section_map[fname]:
|
||||
return pathlib.Path(fname)
|
||||
return None
|
||||
|
||||
def get_options(self) -> Dict[str, str]:
|
||||
if self.section not in self.config:
|
||||
return {}
|
||||
return dict(self.config[self.section])
|
||||
|
||||
def get_hash(self) -> hashlib._Hash:
|
||||
hash = hashlib.sha256()
|
||||
section = self.section
|
||||
if self.section not in self.config:
|
||||
return hash
|
||||
for option, val in self.config[section].items():
|
||||
hash.update(option.encode())
|
||||
hash.update(val.encode())
|
||||
return hash
|
||||
|
||||
def get_prefix_sections(self, prefix: str) -> List[str]:
|
||||
return [s for s in self.sections() if s.startswith(prefix)]
|
||||
|
||||
def getsection(
|
||||
self, section: str, fallback: Optional[str] = None
|
||||
) -> ConfigHelper:
|
||||
return ConfigHelper(
|
||||
self.server, self.config, section, self.parsed,
|
||||
self.file_section_map, fallback
|
||||
)
|
||||
|
||||
def _get_option(self,
|
||||
func: Callable[..., Any],
|
||||
option: str,
|
||||
default: Union[SentinelClass, _T],
|
||||
above: Optional[Union[int, float]] = None,
|
||||
below: Optional[Union[int, float]] = None,
|
||||
minval: Optional[Union[int, float]] = None,
|
||||
maxval: Optional[Union[int, float]] = None,
|
||||
deprecate: bool = False
|
||||
) -> _T:
|
||||
section = self.section
|
||||
warn_fallback = False
|
||||
if (
|
||||
self.section not in self.config and
|
||||
self.fallback_section is not None
|
||||
):
|
||||
section = self.fallback_section
|
||||
warn_fallback = True
|
||||
try:
|
||||
val = func(section, option)
|
||||
except (configparser.NoOptionError, configparser.NoSectionError) as e:
|
||||
if isinstance(default, SentinelClass):
|
||||
raise ConfigError(str(e)) from None
|
||||
val = default
|
||||
section = self.section
|
||||
except Exception:
|
||||
raise ConfigError(
|
||||
f"Error parsing option ({option}) from "
|
||||
f"section [{self.section}]")
|
||||
else:
|
||||
if deprecate:
|
||||
self.server.add_warning(
|
||||
f"[{self.section}]: Option '{option}' is "
|
||||
"deprecated, see the configuration documention "
|
||||
f"at {DOCS_URL}/configuration/")
|
||||
if warn_fallback:
|
||||
help = f"{DOCS_URL}/configuration/#option-moved-deprecations"
|
||||
self.server.add_warning(
|
||||
f"[{section}]: Option '{option}' has been moved "
|
||||
f"to section [{self.section}]. Please correct your "
|
||||
f"configuration, see {help} for detailed documentation."
|
||||
)
|
||||
self._check_option(option, val, above, below, minval, maxval)
|
||||
if (
|
||||
val is None or
|
||||
isinstance(val, (int, float, bool, str, dict, list))
|
||||
):
|
||||
self.parsed[section][option] = val
|
||||
else:
|
||||
# If the item cannot be encoded to json serialize to a string
|
||||
self.parsed[section][option] = str(val)
|
||||
return val
|
||||
|
||||
def _check_option(self,
|
||||
option: str,
|
||||
value: Union[int, float],
|
||||
above: Optional[Union[int, float]],
|
||||
below: Optional[Union[int, float]],
|
||||
minval: Optional[Union[int, float]],
|
||||
maxval: Optional[Union[int, float]]
|
||||
) -> None:
|
||||
if above is not None and value <= above:
|
||||
raise self.error(
|
||||
f"Config Error: Section [{self.section}], Option "
|
||||
f"'{option}: {value}': value is not above {above}")
|
||||
if below is not None and value >= below:
|
||||
raise self.error(
|
||||
f"Config Error: Section [{self.section}], Option "
|
||||
f"'{option}: {value}': value is not below {below}")
|
||||
if minval is not None and value < minval:
|
||||
raise self.error(
|
||||
f"Config Error: Section [{self.section}], Option "
|
||||
f"'{option}: {value}': value is below minimum value {minval}")
|
||||
if maxval is not None and value > maxval:
|
||||
raise self.error(
|
||||
f"Config Error: Section [{self.section}], Option "
|
||||
f"'{option}: {value}': value is above maximum value {minval}")
|
||||
|
||||
def get(self,
|
||||
option: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL,
|
||||
deprecate: bool = False
|
||||
) -> Union[str, _T]:
|
||||
return self._get_option(
|
||||
self.config.get, option, default,
|
||||
deprecate=deprecate)
|
||||
|
||||
def getint(self,
|
||||
option: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL,
|
||||
above: Optional[int] = None,
|
||||
below: Optional[int] = None,
|
||||
minval: Optional[int] = None,
|
||||
maxval: Optional[int] = None,
|
||||
deprecate: bool = False
|
||||
) -> Union[int, _T]:
|
||||
return self._get_option(
|
||||
self.config.getint, option, default,
|
||||
above, below, minval, maxval, deprecate)
|
||||
|
||||
def getboolean(self,
|
||||
option: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL,
|
||||
deprecate: bool = False
|
||||
) -> Union[bool, _T]:
|
||||
return self._get_option(
|
||||
self.config.getboolean, option, default,
|
||||
deprecate=deprecate)
|
||||
|
||||
def getfloat(self,
|
||||
option: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL,
|
||||
above: Optional[float] = None,
|
||||
below: Optional[float] = None,
|
||||
minval: Optional[float] = None,
|
||||
maxval: Optional[float] = None,
|
||||
deprecate: bool = False
|
||||
) -> Union[float, _T]:
|
||||
return self._get_option(
|
||||
self.config.getfloat, option, default,
|
||||
above, below, minval, maxval, deprecate)
|
||||
|
||||
def getlists(self,
|
||||
option: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL,
|
||||
list_type: Type = str,
|
||||
separators: Tuple[Optional[str], ...] = ('\n',),
|
||||
count: Optional[Tuple[Optional[int], ...]] = None,
|
||||
deprecate: bool = False
|
||||
) -> Union[List[Any], _T]:
|
||||
if count is not None and len(count) != len(separators):
|
||||
raise ConfigError(
|
||||
f"Option '{option}' in section "
|
||||
f"[{self.section}]: length of 'count' argument must ",
|
||||
"match length of 'separators' argument")
|
||||
else:
|
||||
count = tuple(None for _ in range(len(separators)))
|
||||
|
||||
def list_parser(value: str,
|
||||
ltype: Type,
|
||||
seps: Tuple[Optional[str], ...],
|
||||
expected_cnt: Tuple[Optional[int], ...]
|
||||
) -> List[Any]:
|
||||
sep = seps[0]
|
||||
seps = seps[1:]
|
||||
cnt = expected_cnt[0]
|
||||
expected_cnt = expected_cnt[1:]
|
||||
ret: List[Any] = []
|
||||
if seps:
|
||||
sub_lists = [val.strip() for val in value.split(sep)
|
||||
if val.strip()]
|
||||
for sub_list in sub_lists:
|
||||
ret.append(list_parser(sub_list, ltype, seps,
|
||||
expected_cnt))
|
||||
else:
|
||||
ret = [ltype(val.strip()) for val in value.split(sep)
|
||||
if val.strip()]
|
||||
if cnt is not None and len(ret) != cnt:
|
||||
raise ConfigError(
|
||||
f"List length mismatch, expected {cnt}, "
|
||||
f"parsed {len(ret)}")
|
||||
return ret
|
||||
|
||||
def getlist_wrapper(sec: str, opt: str) -> List[Any]:
|
||||
val = self.config.get(sec, opt)
|
||||
assert count is not None
|
||||
return list_parser(val, list_type, separators, count)
|
||||
|
||||
return self._get_option(getlist_wrapper, option, default,
|
||||
deprecate=deprecate)
|
||||
|
||||
|
||||
def getlist(self,
|
||||
option: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL,
|
||||
separator: Optional[str] = '\n',
|
||||
count: Optional[int] = None,
|
||||
deprecate: bool = False
|
||||
) -> Union[List[str], _T]:
|
||||
return self.getlists(option, default, str, (separator,), (count,),
|
||||
deprecate=deprecate)
|
||||
|
||||
def getintlist(self,
|
||||
option: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL,
|
||||
separator: Optional[str] = '\n',
|
||||
count: Optional[int] = None,
|
||||
deprecate: bool = False
|
||||
) -> Union[List[int], _T]:
|
||||
return self.getlists(option, default, int, (separator,), (count,),
|
||||
deprecate=deprecate)
|
||||
|
||||
def getfloatlist(self,
|
||||
option: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL,
|
||||
separator: Optional[str] = '\n',
|
||||
count: Optional[int] = None,
|
||||
deprecate: bool = False
|
||||
) -> Union[List[float], _T]:
|
||||
return self.getlists(option, default, float, (separator,), (count,),
|
||||
deprecate=deprecate)
|
||||
|
||||
def getdict(self,
|
||||
option: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL,
|
||||
separators: Tuple[Optional[str], Optional[str]] = ('\n', '='),
|
||||
dict_type: Type = str,
|
||||
allow_empty_fields: bool = False,
|
||||
deprecate: bool = False
|
||||
) -> Union[Dict[str, Any], _T]:
|
||||
if len(separators) != 2:
|
||||
raise ConfigError(
|
||||
"The `separators` argument of getdict() must be a Tuple"
|
||||
"of length of 2")
|
||||
|
||||
def getdict_wrapper(sec: str, opt: str) -> Dict[str, Any]:
|
||||
val = self.config.get(sec, opt)
|
||||
ret: Dict[str, Any] = {}
|
||||
for line in val.split(separators[0]):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
parts = line.split(separators[1], 1)
|
||||
if len(parts) == 1:
|
||||
if allow_empty_fields:
|
||||
ret[parts[0].strip()] = None
|
||||
else:
|
||||
raise ConfigError(
|
||||
f"Failed to parse dictionary field, {line}")
|
||||
else:
|
||||
ret[parts[0].strip()] = dict_type(parts[1].strip())
|
||||
return ret
|
||||
|
||||
return self._get_option(getdict_wrapper, option, default,
|
||||
deprecate=deprecate)
|
||||
|
||||
def getgpioout(self,
|
||||
option: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL,
|
||||
initial_value: int = 0,
|
||||
deprecate: bool = False
|
||||
) -> Union[GpioOutputPin, _T]:
|
||||
try:
|
||||
gpio: GpioFactory = self.server.load_component(self, 'gpio')
|
||||
except Exception:
|
||||
raise ConfigError(
|
||||
f"Section [{self.section}], option '{option}', "
|
||||
"GPIO Component not available")
|
||||
|
||||
def getgpio_wrapper(sec: str, opt: str) -> GpioOutputPin:
|
||||
val = self.config.get(sec, opt)
|
||||
return gpio.setup_gpio_out(val, initial_value)
|
||||
return self._get_option(getgpio_wrapper, option, default,
|
||||
deprecate=deprecate)
|
||||
|
||||
def gettemplate(self,
|
||||
option: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL,
|
||||
is_async: bool = False,
|
||||
deprecate: bool = False
|
||||
) -> Union[JinjaTemplate, _T]:
|
||||
try:
|
||||
template: TemplateFactory
|
||||
template = self.server.load_component(self, 'template')
|
||||
except Exception:
|
||||
raise ConfigError(
|
||||
f"Section [{self.section}], option '{option}', "
|
||||
"Template Component not available")
|
||||
|
||||
def gettemplate_wrapper(sec: str, opt: str) -> JinjaTemplate:
|
||||
val = self.config.get(sec, opt)
|
||||
return template.create_template(val.strip(), is_async)
|
||||
|
||||
return self._get_option(gettemplate_wrapper, option, default,
|
||||
deprecate=deprecate)
|
||||
|
||||
def load_template(self,
|
||||
option: str,
|
||||
default: Union[SentinelClass, str] = SENTINEL,
|
||||
is_async: bool = False,
|
||||
deprecate: bool = False
|
||||
) -> JinjaTemplate:
|
||||
val = self.gettemplate(option, default, is_async, deprecate)
|
||||
if isinstance(val, str):
|
||||
template: TemplateFactory
|
||||
template = self.server.lookup_component('template')
|
||||
return template.create_template(val.strip(), is_async)
|
||||
return val
|
||||
|
||||
def read_supplemental_dict(self, obj: Dict[str, Any]) -> ConfigHelper:
|
||||
if not obj:
|
||||
raise ConfigError(f"Cannot ready Empty Dict")
|
||||
try:
|
||||
sup_cfg = configparser.ConfigParser(interpolation=None)
|
||||
sup_cfg.read_dict(obj)
|
||||
except Exception:
|
||||
raise ConfigError("Error Reading Object")
|
||||
sections = sup_cfg.sections()
|
||||
return ConfigHelper(self.server, sup_cfg, sections[0], {}, {})
|
||||
|
||||
def read_supplemental_config(self, file_name: str) -> ConfigHelper:
|
||||
cfg_file_path = os.path.normpath(os.path.expanduser(file_name))
|
||||
if not os.path.isfile(cfg_file_path):
|
||||
raise ConfigError(
|
||||
f"Configuration File Not Found: '{cfg_file_path}''")
|
||||
try:
|
||||
sup_cfg = configparser.ConfigParser(interpolation=None)
|
||||
sup_cfg.read_file(open(cfg_file_path))
|
||||
except Exception:
|
||||
raise ConfigError(f"Error Reading Config: '{cfg_file_path}'")
|
||||
sections = sup_cfg.sections()
|
||||
return ConfigHelper(self.server, sup_cfg, sections[0], {}, {})
|
||||
|
||||
def write_config(self, file_obj: IO[str]) -> None:
|
||||
self.config.write(file_obj)
|
||||
|
||||
def get_parsed_config(self) -> Dict[str, Dict[str, ConfigVal]]:
|
||||
return dict(self.parsed)
|
||||
|
||||
def get_orig_config(self) -> Dict[str, Dict[str, str]]:
|
||||
return {
|
||||
key: dict(val) for key, val in self.config.items()
|
||||
}
|
||||
|
||||
def get_file_sections(self) -> Dict[str, List[str]]:
|
||||
return dict(self.file_section_map)
|
||||
|
||||
def get_config_files(self) -> List[str]:
|
||||
return list(self.file_section_map.keys())
|
||||
|
||||
def validate_config(self) -> None:
|
||||
for sect in self.config.sections():
|
||||
if sect not in self.parsed:
|
||||
self.server.add_warning(
|
||||
f"Unparsed config section [{sect}] detected. This "
|
||||
"may be the result of a component that failed to "
|
||||
"load. In the future this will result in a startup "
|
||||
"error.")
|
||||
continue
|
||||
parsed_opts = self.parsed[sect]
|
||||
for opt, val in self.config.items(sect):
|
||||
if opt not in parsed_opts:
|
||||
self.server.add_warning(
|
||||
f"Unparsed config option '{opt}: {val}' detected in "
|
||||
f"section [{sect}]. This may be an option no longer "
|
||||
"available or could be the result of a module that "
|
||||
"failed to load. In the future this will result "
|
||||
"in a startup error.")
|
||||
|
||||
def create_backup(self):
|
||||
cfg_path = self.server.get_app_args()["config_file"]
|
||||
cfg = pathlib.Path(cfg_path).expanduser().resolve()
|
||||
backup = cfg.parent.joinpath(f".{cfg.name}.bkp")
|
||||
backup_fp: Optional[TextIOWrapper] = None
|
||||
try:
|
||||
if backup.exists():
|
||||
cfg_mtime: int = 0
|
||||
for cfg_fname in set(self.file_section_map.keys()):
|
||||
cfg = pathlib.Path(cfg_fname)
|
||||
cfg_mtime = max(cfg_mtime, cfg.stat().st_mtime_ns)
|
||||
backup_mtime = backup.stat().st_mtime_ns
|
||||
if backup_mtime >= cfg_mtime:
|
||||
# Backup already exists and is current
|
||||
return
|
||||
backup_fp = backup.open("w")
|
||||
self.config.write(backup_fp)
|
||||
logging.info(f"Backing up last working configuration to '{backup}'")
|
||||
except Exception:
|
||||
logging.exception("Failed to create a backup")
|
||||
finally:
|
||||
if backup_fp is not None:
|
||||
backup_fp.close()
|
||||
|
||||
def get_configuration(
|
||||
server: Server, app_args: Dict[str, Any]
|
||||
) -> ConfigHelper:
|
||||
config = configparser.ConfigParser(interpolation=None)
|
||||
section_map = parse_config_file(config, app_args)
|
||||
if not config.has_section('server'):
|
||||
raise ConfigError("No section [server] in config")
|
||||
return ConfigHelper(server, config, 'server', {}, section_map)
|
||||
|
||||
def parse_config_file(
|
||||
config: configparser.ConfigParser, app_args: Dict[str, Any]
|
||||
) -> Dict[str, List[str]]:
|
||||
start_path = pathlib.Path(app_args['config_file']).expanduser().resolve()
|
||||
config_files: List[pathlib.Path] = [start_path]
|
||||
visited_files: Set[Tuple[int, int]] = set()
|
||||
file_sections: Dict[str, List[str]] = {}
|
||||
while config_files:
|
||||
config_path = config_files.pop(0)
|
||||
try:
|
||||
stat = config_path.stat()
|
||||
visited = (stat.st_dev, stat.st_ino)
|
||||
if visited in visited_files:
|
||||
raise ConfigError("Recursive include directive detected")
|
||||
visited_files.add(visited)
|
||||
data = config_path.read_text()
|
||||
config.read_string(data)
|
||||
except Exception as e:
|
||||
if not config_path.is_file():
|
||||
raise ConfigError(
|
||||
f"Configuration File Not Found: '{config_path}''") from e
|
||||
if not os.access(config_path, os.R_OK):
|
||||
raise ConfigError(
|
||||
"Moonraker does not have Read/Write permission for "
|
||||
f"config file at path '{config_path}'") from e
|
||||
raise ConfigError(f"Error Reading Config: '{config_path}'") from e
|
||||
all_sections: List[str] = re.findall(
|
||||
r"^\[([^]]+)\]\s*$", data, flags=re.MULTILINE
|
||||
)
|
||||
file_sections[str(config_path)] = [
|
||||
sec for sec in all_sections if not sec.startswith("include")
|
||||
]
|
||||
for sec in config.sections():
|
||||
if not sec.startswith("include"):
|
||||
continue
|
||||
str_path = sec[8:].strip()
|
||||
if not str_path:
|
||||
raise ConfigError(
|
||||
f"Invalid include directive: [{sec}]"
|
||||
)
|
||||
config.remove_section(sec)
|
||||
if str_path[0] == "/":
|
||||
path = pathlib.Path(str_path)
|
||||
paths = sorted(path.parent.glob(path.name))
|
||||
else:
|
||||
paths = sorted(config_path.parent.glob(str_path))
|
||||
if not paths:
|
||||
raise ConfigError(
|
||||
f"No files matching include directive [{sec}]"
|
||||
)
|
||||
config_files.extend(paths)
|
||||
return file_sections
|
||||
|
||||
def find_config_backup(cfg_path: str) -> Optional[str]:
|
||||
cfg = pathlib.Path(cfg_path).expanduser().resolve()
|
||||
backup = cfg.parent.joinpath(f".{cfg.name}.bkp")
|
||||
if backup.is_file():
|
||||
return str(backup)
|
||||
return None
|
||||
193
moonraker/eventloop.py
Normal file
193
moonraker/eventloop.py
Normal file
@@ -0,0 +1,193 @@
|
||||
# Wrapper around the asyncio eventloop
|
||||
#
|
||||
# Copyright (C) 2021 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
|
||||
from __future__ import annotations
|
||||
import asyncio
|
||||
import inspect
|
||||
import functools
|
||||
import socket
|
||||
import time
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Coroutine,
|
||||
Optional,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
_T = TypeVar("_T")
|
||||
FlexCallback = Callable[..., Optional[Awaitable]]
|
||||
TimerCallback = Callable[[float], Union[float, Awaitable[float]]]
|
||||
|
||||
class EventLoop:
|
||||
TimeoutError = asyncio.TimeoutError
|
||||
def __init__(self) -> None:
|
||||
self.reset()
|
||||
|
||||
def reset(self) -> None:
|
||||
self.aioloop = self._create_new_loop()
|
||||
self.add_signal_handler = self.aioloop.add_signal_handler
|
||||
self.remove_signal_handler = self.aioloop.remove_signal_handler
|
||||
self.add_reader = self.aioloop.add_reader
|
||||
self.add_writer = self.aioloop.add_writer
|
||||
self.remove_reader = self.aioloop.remove_reader
|
||||
self.remove_writer = self.aioloop.remove_writer
|
||||
self.get_loop_time = self.aioloop.time
|
||||
self.create_future = self.aioloop.create_future
|
||||
self.create_task = self.aioloop.create_task
|
||||
self.call_at = self.aioloop.call_at
|
||||
self.set_debug = self.aioloop.set_debug
|
||||
self.is_running = self.aioloop.is_running
|
||||
|
||||
def _create_new_loop(self) -> asyncio.AbstractEventLoop:
|
||||
for _ in range(5):
|
||||
# Sometimes the new loop does not properly instantiate.
|
||||
# Give 5 attempts before raising an exception
|
||||
new_loop = asyncio.new_event_loop()
|
||||
if not new_loop.is_closed():
|
||||
break
|
||||
logging.info("Failed to create open eventloop, "
|
||||
"retyring in .5 seconds...")
|
||||
time.sleep(.5)
|
||||
else:
|
||||
raise RuntimeError("Unable to create new open eventloop")
|
||||
asyncio.set_event_loop(new_loop)
|
||||
return new_loop
|
||||
|
||||
def register_callback(self,
|
||||
callback: FlexCallback,
|
||||
*args,
|
||||
**kwargs
|
||||
) -> None:
|
||||
if inspect.iscoroutinefunction(callback):
|
||||
self.aioloop.create_task(callback(*args, **kwargs)) # type: ignore
|
||||
else:
|
||||
self.aioloop.call_soon(
|
||||
functools.partial(callback, *args, **kwargs))
|
||||
|
||||
def delay_callback(self,
|
||||
delay: float,
|
||||
callback: FlexCallback,
|
||||
*args,
|
||||
**kwargs
|
||||
) -> asyncio.TimerHandle:
|
||||
if inspect.iscoroutinefunction(callback):
|
||||
return self.aioloop.call_later(
|
||||
delay, self._async_callback,
|
||||
functools.partial(callback, *args, **kwargs))
|
||||
else:
|
||||
return self.aioloop.call_later(
|
||||
delay, functools.partial(callback, *args, **kwargs))
|
||||
|
||||
def register_timer(self, callback: TimerCallback):
|
||||
return FlexTimer(self, callback)
|
||||
|
||||
def _async_callback(self, callback: Callable[[], Coroutine]) -> None:
|
||||
# This wrapper delays creation of the coroutine object. In the
|
||||
# event that a callback is cancelled this prevents "coroutine
|
||||
# was never awaited" warnings in asyncio
|
||||
self.aioloop.create_task(callback())
|
||||
|
||||
def run_in_thread(self,
|
||||
callback: Callable[..., _T],
|
||||
*args
|
||||
) -> Awaitable[_T]:
|
||||
return self.aioloop.run_in_executor(None, callback, *args)
|
||||
|
||||
async def create_socket_connection(
|
||||
self, address: Tuple[str, int], timeout: Optional[float] = None
|
||||
) -> socket.socket:
|
||||
host, port = address
|
||||
"""
|
||||
async port of socket.create_connection()
|
||||
"""
|
||||
loop = self.aioloop
|
||||
err = None
|
||||
ainfo = await loop.getaddrinfo(
|
||||
host, port, family=0, type=socket.SOCK_STREAM
|
||||
)
|
||||
for res in ainfo:
|
||||
af, socktype, proto, canonname, sa = res
|
||||
sock = None
|
||||
try:
|
||||
sock = socket.socket(af, socktype, proto)
|
||||
sock.settimeout(0)
|
||||
sock.setblocking(False)
|
||||
await asyncio.wait_for(
|
||||
loop.sock_connect(sock, (host, port)), timeout
|
||||
)
|
||||
# Break explicitly a reference cycle
|
||||
err = None
|
||||
return sock
|
||||
except (socket.error, asyncio.TimeoutError) as _:
|
||||
err = _
|
||||
if sock is not None:
|
||||
loop.remove_writer(sock.fileno())
|
||||
sock.close()
|
||||
if err is not None:
|
||||
try:
|
||||
raise err
|
||||
finally:
|
||||
# Break explicitly a reference cycle
|
||||
err = None
|
||||
else:
|
||||
raise socket.error("getaddrinfo returns an empty list")
|
||||
|
||||
def start(self):
|
||||
self.aioloop.run_forever()
|
||||
|
||||
def stop(self):
|
||||
self.aioloop.stop()
|
||||
|
||||
def close(self):
|
||||
self.aioloop.close()
|
||||
|
||||
class FlexTimer:
|
||||
def __init__(self,
|
||||
eventloop: EventLoop,
|
||||
callback: TimerCallback
|
||||
) -> None:
|
||||
self.eventloop = eventloop
|
||||
self.callback = callback
|
||||
self.timer_handle: Optional[asyncio.TimerHandle] = None
|
||||
self.running: bool = False
|
||||
|
||||
def start(self, delay: float = 0.):
|
||||
if self.running:
|
||||
return
|
||||
self.running = True
|
||||
call_time = self.eventloop.get_loop_time() + delay
|
||||
self.timer_handle = self.eventloop.call_at(
|
||||
call_time, self._schedule_task)
|
||||
|
||||
def stop(self):
|
||||
if not self.running:
|
||||
return
|
||||
self.running = False
|
||||
if self.timer_handle is not None:
|
||||
self.timer_handle.cancel()
|
||||
self.timer_handle = None
|
||||
|
||||
def _schedule_task(self):
|
||||
self.timer_handle = None
|
||||
self.eventloop.create_task(self._call_wrapper())
|
||||
|
||||
def is_running(self) -> bool:
|
||||
return self.running
|
||||
|
||||
async def _call_wrapper(self):
|
||||
if not self.running:
|
||||
return
|
||||
ret = self.callback(self.eventloop.get_loop_time())
|
||||
if isinstance(ret, Awaitable):
|
||||
ret = await ret
|
||||
if self.running:
|
||||
self.timer_handle = self.eventloop.call_at(ret, self._schedule_task)
|
||||
605
moonraker/klippy_connection.py
Normal file
605
moonraker/klippy_connection.py
Normal file
@@ -0,0 +1,605 @@
|
||||
|
||||
# KlippyConnection - manage unix socket connection to Klipper
|
||||
#
|
||||
# Copyright (C) 2022 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
|
||||
from __future__ import annotations
|
||||
import os
|
||||
import time
|
||||
import logging
|
||||
import json
|
||||
import getpass
|
||||
import confighelper
|
||||
import asyncio
|
||||
import socket
|
||||
import struct
|
||||
from utils import ServerError
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Optional,
|
||||
Callable,
|
||||
Coroutine,
|
||||
Dict,
|
||||
List,
|
||||
Set,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from app import MoonrakerApp
|
||||
from websockets import WebRequest, Subscribable
|
||||
from components.klippy_apis import KlippyAPI
|
||||
from components.file_manager.file_manager import FileManager
|
||||
from asyncio.trsock import TransportSocket
|
||||
FlexCallback = Callable[..., Optional[Coroutine]]
|
||||
|
||||
INIT_TIME = .25
|
||||
LOG_ATTEMPT_INTERVAL = int(2. / INIT_TIME + .5)
|
||||
MAX_LOG_ATTEMPTS = 10 * LOG_ATTEMPT_INTERVAL
|
||||
UNIX_BUFFER_LIMIT = 20 * 1024 * 1024
|
||||
|
||||
class KlippyConnection:
|
||||
def __init__(self, config: confighelper.ConfigHelper) -> None:
|
||||
self.server = config.get_server()
|
||||
self.uds_address: str = config.get(
|
||||
'klippy_uds_address', "/tmp/klippy_uds")
|
||||
self.writer: Optional[asyncio.StreamWriter] = None
|
||||
self.connection_mutex: asyncio.Lock = asyncio.Lock()
|
||||
self.event_loop = self.server.get_event_loop()
|
||||
self.log_no_access = True
|
||||
# Connection State
|
||||
self.connection_task: Optional[asyncio.Task] = None
|
||||
self.closing: bool = False
|
||||
self._klippy_info: Dict[str, Any] = {}
|
||||
self.init_list: List[str] = []
|
||||
self._klipper_version: str = ""
|
||||
self._missing_reqs: Set[str] = set()
|
||||
self._peer_cred: Dict[str, int] = {}
|
||||
self.init_attempts: int = 0
|
||||
self._state: str = "disconnected"
|
||||
self.subscriptions: Dict[Subscribable, Dict[str, Any]] = {}
|
||||
# Setup remote methods accessable to Klippy. Note that all
|
||||
# registered remote methods should be of the notification type,
|
||||
# they do not return a response to Klippy after execution
|
||||
self.pending_requests: Dict[int, KlippyRequest] = {}
|
||||
self.remote_methods: Dict[str, FlexCallback] = {}
|
||||
self.klippy_reg_methods: List[str] = []
|
||||
self.register_remote_method(
|
||||
'process_gcode_response', self._process_gcode_response,
|
||||
need_klippy_reg=False)
|
||||
self.register_remote_method(
|
||||
'process_status_update', self._process_status_update,
|
||||
need_klippy_reg=False)
|
||||
self.server.register_component("klippy_connection", self)
|
||||
|
||||
@property
|
||||
def klippy_apis(self) -> KlippyAPI:
|
||||
return self.server.lookup_component("klippy_apis")
|
||||
|
||||
@property
|
||||
def state(self) -> str:
|
||||
return self._state
|
||||
|
||||
@property
|
||||
def klippy_info(self) -> Dict[str, Any]:
|
||||
return self._klippy_info
|
||||
|
||||
@property
|
||||
def missing_requirements(self) -> List[str]:
|
||||
return list(self._missing_reqs)
|
||||
|
||||
@property
|
||||
def peer_credentials(self) -> Dict[str, int]:
|
||||
return dict(self._peer_cred)
|
||||
|
||||
async def wait_connected(self) -> bool:
|
||||
if (
|
||||
self.connection_task is None or
|
||||
self.connection_task.done()
|
||||
):
|
||||
return self.is_connected()
|
||||
try:
|
||||
await self.connection_task
|
||||
except Exception:
|
||||
pass
|
||||
return self.is_connected()
|
||||
|
||||
async def wait_started(self, timeout: float = 20.) -> bool:
|
||||
if self.connection_task is None or not self.is_connected():
|
||||
return False
|
||||
if not self.connection_task.done():
|
||||
await asyncio.wait_for(
|
||||
asyncio.shield(self.connection_task), timeout=timeout)
|
||||
return self.is_connected()
|
||||
|
||||
async def _read_stream(self, reader: asyncio.StreamReader) -> None:
|
||||
errors_remaining: int = 10
|
||||
while not reader.at_eof():
|
||||
try:
|
||||
data = await reader.readuntil(b'\x03')
|
||||
except (ConnectionError, asyncio.IncompleteReadError):
|
||||
break
|
||||
except asyncio.CancelledError:
|
||||
logging.exception("Klippy Stream Read Cancelled")
|
||||
raise
|
||||
except Exception:
|
||||
logging.exception("Klippy Stream Read Error")
|
||||
errors_remaining -= 1
|
||||
if not errors_remaining or not self.is_connected():
|
||||
break
|
||||
continue
|
||||
errors_remaining = 10
|
||||
try:
|
||||
decoded_cmd = json.loads(data[:-1])
|
||||
self._process_command(decoded_cmd)
|
||||
except Exception:
|
||||
logging.exception(
|
||||
f"Error processing Klippy Host Response: {data.decode()}")
|
||||
if not self.closing:
|
||||
logging.debug("Klippy Disconnection From _read_stream()")
|
||||
await self.close()
|
||||
|
||||
async def _write_request(self, request: KlippyRequest) -> None:
|
||||
if self.writer is None or self.closing:
|
||||
self.pending_requests.pop(request.id, None)
|
||||
request.notify(ServerError("Klippy Host not connected", 503))
|
||||
return
|
||||
data = json.dumps(request.to_dict()).encode() + b"\x03"
|
||||
try:
|
||||
self.writer.write(data)
|
||||
await self.writer.drain()
|
||||
except asyncio.CancelledError:
|
||||
self.pending_requests.pop(request.id, None)
|
||||
request.notify(ServerError("Klippy Write Request Cancelled", 503))
|
||||
raise
|
||||
except Exception:
|
||||
self.pending_requests.pop(request.id, None)
|
||||
request.notify(ServerError("Klippy Write Request Error", 503))
|
||||
if not self.closing:
|
||||
logging.debug("Klippy Disconnection From _write_request()")
|
||||
await self.close()
|
||||
|
||||
def register_remote_method(self,
|
||||
method_name: str,
|
||||
cb: FlexCallback,
|
||||
need_klippy_reg: bool = True
|
||||
) -> None:
|
||||
if method_name in self.remote_methods:
|
||||
raise self.server.error(
|
||||
f"Remote method ({method_name}) already registered")
|
||||
if self.server.is_running():
|
||||
raise self.server.error(
|
||||
f"Failed to register remote method {method_name}, "
|
||||
"methods must be registered during initialization")
|
||||
self.remote_methods[method_name] = cb
|
||||
if need_klippy_reg:
|
||||
# These methods need to be registered with Klippy
|
||||
self.klippy_reg_methods.append(method_name)
|
||||
|
||||
def connect(self) -> Awaitable[bool]:
|
||||
if (
|
||||
self.is_connected() or
|
||||
not self.server.is_running() or
|
||||
(self.connection_task is not None and
|
||||
not self.connection_task.done())
|
||||
):
|
||||
# already connecting
|
||||
fut = self.event_loop.create_future()
|
||||
fut.set_result(self.is_connected())
|
||||
return fut
|
||||
self.connection_task = self.event_loop.create_task(self._do_connect())
|
||||
return self.connection_task
|
||||
|
||||
async def _do_connect(self) -> bool:
|
||||
async with self.connection_mutex:
|
||||
while self.writer is None:
|
||||
await asyncio.sleep(INIT_TIME)
|
||||
if self.closing or not self.server.is_running():
|
||||
return False
|
||||
if not os.path.exists(self.uds_address):
|
||||
continue
|
||||
if not os.access(self.uds_address, os.R_OK | os.W_OK):
|
||||
if self.log_no_access:
|
||||
user = getpass.getuser()
|
||||
logging.info(
|
||||
f"Cannot connect to Klippy, Linux user '{user}' "
|
||||
"lacks permission to open Unix Domain Socket: "
|
||||
f"{self.uds_address}")
|
||||
self.log_no_access = False
|
||||
continue
|
||||
self.log_no_access = True
|
||||
try:
|
||||
reader, writer = await asyncio.open_unix_connection(
|
||||
self.uds_address, limit=UNIX_BUFFER_LIMIT)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
continue
|
||||
logging.info("Klippy Connection Established")
|
||||
self.writer = writer
|
||||
self._get_peer_credentials(writer)
|
||||
self.event_loop.create_task(self._read_stream(reader))
|
||||
return await self._init_klippy_connection()
|
||||
|
||||
def _get_peer_credentials(self, writer: asyncio.StreamWriter) -> None:
|
||||
sock: TransportSocket
|
||||
sock = writer.get_extra_info("socket", None)
|
||||
if sock is None:
|
||||
logging.debug(
|
||||
"Unable to get Unix Socket, cant fetch peer credentials"
|
||||
)
|
||||
return
|
||||
data: bytes = b""
|
||||
try:
|
||||
data = sock.getsockopt(socket.SOL_SOCKET, socket.SO_PEERCRED, 12)
|
||||
pid, uid, gid = struct.unpack("@LLL", data)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
logging.exception(
|
||||
f"Failed to get Klippy Peer Credentials, raw: 0x{data.hex()}"
|
||||
)
|
||||
return
|
||||
self._peer_cred = {
|
||||
"process_id": pid,
|
||||
"user_id": uid,
|
||||
"group_id": gid
|
||||
}
|
||||
logging.debug(
|
||||
f"Klippy Connection: Received Peer Credentials: {self._peer_cred}"
|
||||
)
|
||||
|
||||
async def _init_klippy_connection(self) -> bool:
|
||||
self.init_list = []
|
||||
self._missing_reqs.clear()
|
||||
self.init_attempts = 0
|
||||
self._state = "initializing"
|
||||
webhooks_err_logged = False
|
||||
gcout_err_logged = False
|
||||
while self.server.is_running():
|
||||
await asyncio.sleep(INIT_TIME)
|
||||
# Subscribe to "webhooks"
|
||||
# Register "webhooks" subscription
|
||||
if "webhooks_sub" not in self.init_list:
|
||||
try:
|
||||
await self.klippy_apis.subscribe_objects(
|
||||
{'webhooks': None})
|
||||
except ServerError as e:
|
||||
if not webhooks_err_logged:
|
||||
webhooks_err_logged = True
|
||||
logging.info(
|
||||
f"{e}\nUnable to subscribe to webhooks object")
|
||||
else:
|
||||
logging.info("Webhooks Subscribed")
|
||||
self.init_list.append("webhooks_sub")
|
||||
# Subscribe to Gcode Output
|
||||
if "gcode_output_sub" not in self.init_list:
|
||||
try:
|
||||
await self.klippy_apis.subscribe_gcode_output()
|
||||
except ServerError as e:
|
||||
if not gcout_err_logged:
|
||||
gcout_err_logged = True
|
||||
logging.info(
|
||||
f"{e}\nUnable to register gcode output "
|
||||
"subscription")
|
||||
else:
|
||||
logging.info("GCode Output Subscribed")
|
||||
self.init_list.append("gcode_output_sub")
|
||||
if "startup_complete" not in self.init_list:
|
||||
await self._check_ready()
|
||||
if len(self.init_list) == 5:
|
||||
logging.debug("Klippy Connection Initialized")
|
||||
return True
|
||||
elif not self.is_connected():
|
||||
break
|
||||
else:
|
||||
self.init_attempts += 1
|
||||
logging.debug("Klippy Connection Failed to Init")
|
||||
return False
|
||||
|
||||
async def _request_endpoints(self) -> None:
|
||||
result = await self.klippy_apis.list_endpoints(default=None)
|
||||
if result is None:
|
||||
return
|
||||
endpoints = result.get('endpoints', [])
|
||||
app: MoonrakerApp = self.server.lookup_component("application")
|
||||
for ep in endpoints:
|
||||
app.register_remote_handler(ep)
|
||||
|
||||
async def _check_ready(self) -> None:
|
||||
send_id = "identified" not in self.init_list
|
||||
result: Dict[str, Any]
|
||||
try:
|
||||
result = await self.klippy_apis.get_klippy_info(send_id)
|
||||
except ServerError as e:
|
||||
if self.init_attempts % LOG_ATTEMPT_INTERVAL == 0 and \
|
||||
self.init_attempts <= MAX_LOG_ATTEMPTS:
|
||||
logging.info(
|
||||
f"{e}\nKlippy info request error. This indicates that\n"
|
||||
f"Klippy may have experienced an error during startup.\n"
|
||||
f"Please check klippy.log for more information")
|
||||
return
|
||||
version = result.get("software_version", "")
|
||||
if version != self._klipper_version:
|
||||
self._klipper_version = version
|
||||
msg = f"Klipper Version: {version}"
|
||||
self.server.add_log_rollover_item("klipper_version", msg)
|
||||
self._klippy_info = dict(result)
|
||||
state = result.get('state', "unknown")
|
||||
if state != "startup" and "endpoints_requested" not in self.init_list:
|
||||
await self._request_endpoints()
|
||||
self.init_list.append("endpoints_requested")
|
||||
self._state = state
|
||||
if send_id:
|
||||
self.init_list.append("identified")
|
||||
await self.server.send_event("server:klippy_identified")
|
||||
if self._state != "startup":
|
||||
self.init_list.append('startup_complete')
|
||||
await self.server.send_event("server:klippy_started",
|
||||
self._state)
|
||||
if self._state != "ready":
|
||||
msg = result.get('state_message', "Klippy Not Ready")
|
||||
logging.info("\n" + msg)
|
||||
else:
|
||||
await self._verify_klippy_requirements()
|
||||
# register methods with klippy
|
||||
for method in self.klippy_reg_methods:
|
||||
try:
|
||||
await self.klippy_apis.register_method(method)
|
||||
except ServerError:
|
||||
logging.exception(
|
||||
f"Unable to register method '{method}'")
|
||||
logging.info("Klippy ready")
|
||||
await self.server.send_event("server:klippy_ready")
|
||||
|
||||
async def _verify_klippy_requirements(self) -> None:
|
||||
result = await self.klippy_apis.get_object_list(default=None)
|
||||
if result is None:
|
||||
logging.info(
|
||||
f"Unable to retrieve Klipper Object List")
|
||||
return
|
||||
req_objs = set(["virtual_sdcard", "display_status", "pause_resume"])
|
||||
self._missing_reqs = req_objs - set(result)
|
||||
if self._missing_reqs:
|
||||
err_str = ", ".join([f"[{o}]" for o in self._missing_reqs])
|
||||
logging.info(
|
||||
f"\nWarning, unable to detect the following printer "
|
||||
f"objects:\n{err_str}\nPlease add the the above sections "
|
||||
f"to printer.cfg for full Moonraker functionality.")
|
||||
if "virtual_sdcard" not in self._missing_reqs:
|
||||
# Update the gcode path
|
||||
query_res = await self.klippy_apis.query_objects(
|
||||
{'configfile': None}, default=None)
|
||||
if query_res is None:
|
||||
logging.info(f"Unable to set SD Card path")
|
||||
else:
|
||||
config = query_res.get('configfile', {}).get('config', {})
|
||||
vsd_config = config.get('virtual_sdcard', {})
|
||||
vsd_path = vsd_config.get('path', None)
|
||||
if vsd_path is not None:
|
||||
file_manager: FileManager = self.server.lookup_component(
|
||||
'file_manager')
|
||||
file_manager.register_directory('gcodes', vsd_path,
|
||||
full_access=True)
|
||||
else:
|
||||
logging.info(
|
||||
"Configuration for [virtual_sdcard] not found,"
|
||||
" unable to set SD Card path")
|
||||
|
||||
def _process_command(self, cmd: Dict[str, Any]) -> None:
|
||||
method = cmd.get('method', None)
|
||||
if method is not None:
|
||||
# This is a remote method called from klippy
|
||||
if method in self.remote_methods:
|
||||
params = cmd.get('params', {})
|
||||
self.event_loop.register_callback(
|
||||
self._execute_method, method, **params)
|
||||
else:
|
||||
logging.info(f"Unknown method received: {method}")
|
||||
return
|
||||
# This is a response to a request, process
|
||||
req_id = cmd.get('id', None)
|
||||
request: Optional[KlippyRequest]
|
||||
request = self.pending_requests.pop(req_id, None)
|
||||
if request is None:
|
||||
logging.info(
|
||||
f"No request matching request ID: {req_id}, "
|
||||
f"response: {cmd}")
|
||||
return
|
||||
if 'result' in cmd:
|
||||
result = cmd['result']
|
||||
if not result:
|
||||
result = "ok"
|
||||
else:
|
||||
err = cmd.get('error', "Malformed Klippy Response")
|
||||
result = ServerError(err, 400)
|
||||
request.notify(result)
|
||||
|
||||
async def _execute_method(self, method_name: str, **kwargs) -> None:
|
||||
try:
|
||||
ret = self.remote_methods[method_name](**kwargs)
|
||||
if ret is not None:
|
||||
await ret
|
||||
except Exception:
|
||||
logging.exception(f"Error running remote method: {method_name}")
|
||||
|
||||
def _process_gcode_response(self, response: str) -> None:
|
||||
self.server.send_event("server:gcode_response", response)
|
||||
|
||||
def _process_status_update(self,
|
||||
eventtime: float,
|
||||
status: Dict[str, Any]
|
||||
) -> None:
|
||||
if 'webhooks' in status:
|
||||
# XXX - process other states (startup, ready, error, etc)?
|
||||
state: Optional[str] = status['webhooks'].get('state', None)
|
||||
if state is not None:
|
||||
if state == "shutdown":
|
||||
logging.info("Klippy has shutdown")
|
||||
self.server.send_event("server:klippy_shutdown")
|
||||
self._state = state
|
||||
for conn, sub in self.subscriptions.items():
|
||||
conn_status: Dict[str, Any] = {}
|
||||
for name, fields in sub.items():
|
||||
if name in status:
|
||||
val: Dict[str, Any] = dict(status[name])
|
||||
if fields is not None:
|
||||
val = {k: v for k, v in val.items() if k in fields}
|
||||
if val:
|
||||
conn_status[name] = val
|
||||
conn.send_status(conn_status, eventtime)
|
||||
|
||||
async def request(self, web_request: WebRequest) -> Any:
|
||||
if not self.is_connected():
|
||||
raise ServerError("Klippy Host not connected", 503)
|
||||
rpc_method = web_request.get_endpoint()
|
||||
if rpc_method == "objects/subscribe":
|
||||
return await self._request_subscripton(web_request)
|
||||
else:
|
||||
if rpc_method == "gcode/script":
|
||||
script = web_request.get_str('script', "")
|
||||
if script:
|
||||
self.server.send_event(
|
||||
"klippy_connection:gcode_received", script)
|
||||
return await self._request_standard(web_request)
|
||||
|
||||
async def _request_subscripton(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
args = web_request.get_args()
|
||||
conn = web_request.get_connection()
|
||||
|
||||
# Build the subscription request from a superset of all client
|
||||
# subscriptions
|
||||
sub = args.get('objects', {})
|
||||
if conn is None:
|
||||
raise self.server.error(
|
||||
"No connection associated with subscription request")
|
||||
self.subscriptions[conn] = sub
|
||||
all_subs: Dict[str, Any] = {}
|
||||
# request superset of all client subscriptions
|
||||
for sub in self.subscriptions.values():
|
||||
for obj, items in sub.items():
|
||||
if obj in all_subs:
|
||||
pi = all_subs[obj]
|
||||
if items is None or pi is None:
|
||||
all_subs[obj] = None
|
||||
else:
|
||||
uitems = list(set(pi) | set(items))
|
||||
all_subs[obj] = uitems
|
||||
else:
|
||||
all_subs[obj] = items
|
||||
args['objects'] = all_subs
|
||||
args['response_template'] = {'method': "process_status_update"}
|
||||
|
||||
result = await self._request_standard(web_request)
|
||||
|
||||
# prune the status response
|
||||
pruned_status = {}
|
||||
all_status = result['status']
|
||||
sub = self.subscriptions.get(conn, {})
|
||||
for obj, fields in all_status.items():
|
||||
if obj in sub:
|
||||
valid_fields = sub[obj]
|
||||
if valid_fields is None:
|
||||
pruned_status[obj] = fields
|
||||
else:
|
||||
pruned_status[obj] = {k: v for k, v in fields.items()
|
||||
if k in valid_fields}
|
||||
result['status'] = pruned_status
|
||||
return result
|
||||
|
||||
async def _request_standard(self, web_request: WebRequest) -> Any:
|
||||
rpc_method = web_request.get_endpoint()
|
||||
args = web_request.get_args()
|
||||
# Create a base klippy request
|
||||
base_request = KlippyRequest(rpc_method, args)
|
||||
self.pending_requests[base_request.id] = base_request
|
||||
self.event_loop.register_callback(self._write_request, base_request)
|
||||
return await base_request.wait()
|
||||
|
||||
def remove_subscription(self, conn: Subscribable) -> None:
|
||||
self.subscriptions.pop(conn, None)
|
||||
|
||||
def is_connected(self) -> bool:
|
||||
return self.writer is not None and not self.closing
|
||||
|
||||
async def _on_connection_closed(self) -> None:
|
||||
self.init_list = []
|
||||
self._state = "disconnected"
|
||||
for request in self.pending_requests.values():
|
||||
request.notify(ServerError("Klippy Disconnected", 503))
|
||||
self.pending_requests = {}
|
||||
self.subscriptions = {}
|
||||
self._peer_cred = {}
|
||||
self._missing_reqs.clear()
|
||||
logging.info("Klippy Connection Removed")
|
||||
await self.server.send_event("server:klippy_disconnect")
|
||||
if self.server.is_running():
|
||||
# Reconnect if server is running
|
||||
loop = self.event_loop
|
||||
self.connection_task = loop.create_task(self._do_connect())
|
||||
|
||||
async def close(self, wait_closed: bool = False) -> None:
|
||||
if self.closing:
|
||||
if wait_closed:
|
||||
await self.connection_mutex.acquire()
|
||||
self.connection_mutex.release()
|
||||
return
|
||||
self.closing = True
|
||||
if (
|
||||
self.connection_task is not None and
|
||||
not self.connection_task.done()
|
||||
):
|
||||
self.connection_task.cancel()
|
||||
async with self.connection_mutex:
|
||||
if self.writer is not None:
|
||||
try:
|
||||
self.writer.close()
|
||||
await self.writer.wait_closed()
|
||||
except Exception:
|
||||
logging.exception("Error closing Klippy Unix Socket")
|
||||
self.writer = None
|
||||
await self._on_connection_closed()
|
||||
self.closing = False
|
||||
|
||||
# Basic KlippyRequest class, easily converted to dict for json encoding
|
||||
class KlippyRequest:
|
||||
def __init__(self, rpc_method: str, params: Dict[str, Any]) -> None:
|
||||
self.id = id(self)
|
||||
self.rpc_method = rpc_method
|
||||
self.params = params
|
||||
self._event = asyncio.Event()
|
||||
self.response: Any = None
|
||||
|
||||
async def wait(self) -> Any:
|
||||
# Log pending requests every 60 seconds
|
||||
start_time = time.time()
|
||||
while True:
|
||||
try:
|
||||
await asyncio.wait_for(self._event.wait(), 60.)
|
||||
except asyncio.TimeoutError:
|
||||
pending_time = time.time() - start_time
|
||||
logging.info(
|
||||
f"Request '{self.rpc_method}' pending: "
|
||||
f"{pending_time:.2f} seconds")
|
||||
self._event.clear()
|
||||
continue
|
||||
break
|
||||
if isinstance(self.response, ServerError):
|
||||
raise self.response
|
||||
return self.response
|
||||
|
||||
def notify(self, response: Any) -> None:
|
||||
if self._event.is_set():
|
||||
return
|
||||
self.response = response
|
||||
self._event.set()
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {'id': self.id, 'method': self.rpc_method,
|
||||
'params': self.params}
|
||||
517
moonraker/moonraker.py
Normal file
517
moonraker/moonraker.py
Normal file
@@ -0,0 +1,517 @@
|
||||
#!/usr/bin/env python3
|
||||
# Moonraker - HTTP/Websocket API Server for Klipper
|
||||
#
|
||||
# Copyright (C) 2020 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
|
||||
from __future__ import annotations
|
||||
import pathlib
|
||||
import sys
|
||||
import argparse
|
||||
import importlib
|
||||
import os
|
||||
import io
|
||||
import time
|
||||
import socket
|
||||
import logging
|
||||
import signal
|
||||
import confighelper
|
||||
import utils
|
||||
import asyncio
|
||||
from eventloop import EventLoop
|
||||
from app import MoonrakerApp
|
||||
from klippy_connection import KlippyConnection
|
||||
from utils import ServerError, SentinelClass
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Optional,
|
||||
Callable,
|
||||
Coroutine,
|
||||
Dict,
|
||||
List,
|
||||
Tuple,
|
||||
Union,
|
||||
TypeVar,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from websockets import WebRequest, WebsocketManager
|
||||
from components.file_manager.file_manager import FileManager
|
||||
FlexCallback = Callable[..., Optional[Coroutine]]
|
||||
_T = TypeVar("_T")
|
||||
|
||||
API_VERSION = (1, 0, 5)
|
||||
|
||||
CORE_COMPONENTS = [
|
||||
'dbus_manager', 'database', 'file_manager', 'klippy_apis',
|
||||
'machine', 'data_store', 'shell_command', 'proc_stats',
|
||||
'job_state', 'job_queue', 'http_client', 'announcements',
|
||||
'webcam', 'extensions',
|
||||
]
|
||||
|
||||
SENTINEL = SentinelClass.get_instance()
|
||||
|
||||
class Server:
|
||||
error = ServerError
|
||||
def __init__(self,
|
||||
args: Dict[str, Any],
|
||||
file_logger: Optional[utils.MoonrakerLoggingHandler],
|
||||
event_loop: EventLoop
|
||||
) -> None:
|
||||
self.event_loop = event_loop
|
||||
self.file_logger = file_logger
|
||||
self.app_args = args
|
||||
self.config = config = self._parse_config()
|
||||
self.host: str = config.get('host', "0.0.0.0")
|
||||
self.port: int = config.getint('port', 7125)
|
||||
self.ssl_port: int = config.getint('ssl_port', 7130)
|
||||
self.exit_reason: str = ""
|
||||
self.server_running: bool = False
|
||||
|
||||
# Configure Debug Logging
|
||||
self.debug = config.getboolean('enable_debug_logging', False)
|
||||
asyncio_debug = config.getboolean('enable_asyncio_debug', False)
|
||||
log_level = logging.DEBUG if self.debug else logging.INFO
|
||||
logging.getLogger().setLevel(log_level)
|
||||
self.event_loop.set_debug(asyncio_debug)
|
||||
|
||||
# Event initialization
|
||||
self.events: Dict[str, List[FlexCallback]] = {}
|
||||
self.components: Dict[str, Any] = {}
|
||||
self.failed_components: List[str] = []
|
||||
self.warnings: List[str] = []
|
||||
self.klippy_connection = KlippyConnection(config)
|
||||
|
||||
# Tornado Application/Server
|
||||
self.moonraker_app = app = MoonrakerApp(config)
|
||||
self.register_endpoint = app.register_local_handler
|
||||
self.register_static_file_handler = app.register_static_file_handler
|
||||
self.register_upload_handler = app.register_upload_handler
|
||||
self.register_api_transport = app.register_api_transport
|
||||
|
||||
log_warn = args.get('log_warning', "")
|
||||
if log_warn:
|
||||
self.add_warning(log_warn)
|
||||
cfg_warn = args.get("config_warning", "")
|
||||
if cfg_warn:
|
||||
self.add_warning(cfg_warn)
|
||||
|
||||
self.register_endpoint(
|
||||
"/server/info", ['GET'], self._handle_info_request)
|
||||
self.register_endpoint(
|
||||
"/server/config", ['GET'], self._handle_config_request)
|
||||
self.register_endpoint(
|
||||
"/server/restart", ['POST'], self._handle_server_restart)
|
||||
self.register_notification("server:klippy_ready")
|
||||
self.register_notification("server:klippy_shutdown")
|
||||
self.register_notification("server:klippy_disconnect",
|
||||
"klippy_disconnected")
|
||||
self.register_notification("server:gcode_response")
|
||||
|
||||
def get_app_args(self) -> Dict[str, Any]:
|
||||
return dict(self.app_args)
|
||||
|
||||
def get_event_loop(self) -> EventLoop:
|
||||
return self.event_loop
|
||||
|
||||
def get_api_version(self) -> Tuple[int, int, int]:
|
||||
return API_VERSION
|
||||
|
||||
def get_warnings(self) -> List[str]:
|
||||
return self.warnings
|
||||
|
||||
def is_running(self) -> bool:
|
||||
return self.server_running
|
||||
|
||||
def is_debug_enabled(self) -> bool:
|
||||
return self.debug
|
||||
|
||||
def _parse_config(self) -> confighelper.ConfigHelper:
|
||||
config = confighelper.get_configuration(self, self.app_args)
|
||||
# log config file
|
||||
cfg_files = "\n".join(config.get_config_files())
|
||||
strio = io.StringIO()
|
||||
config.write_config(strio)
|
||||
cfg_item = f"\n{'#'*20} Moonraker Configuration {'#'*20}\n\n"
|
||||
cfg_item += strio.getvalue()
|
||||
cfg_item += "#"*65
|
||||
cfg_item += f"\nAll Configuration Files:\n{cfg_files}\n"
|
||||
cfg_item += "#"*65
|
||||
strio.close()
|
||||
self.add_log_rollover_item('config', cfg_item)
|
||||
return config
|
||||
|
||||
async def server_init(self, start_server: bool = True) -> None:
|
||||
self.event_loop.add_signal_handler(
|
||||
signal.SIGTERM, self._handle_term_signal)
|
||||
|
||||
# Perform asynchronous init after the event loop starts
|
||||
optional_comps: List[Coroutine] = []
|
||||
for name, component in self.components.items():
|
||||
if not hasattr(component, "component_init"):
|
||||
continue
|
||||
if name in CORE_COMPONENTS:
|
||||
# Process core components in order synchronously
|
||||
await self._initialize_component(name, component)
|
||||
else:
|
||||
optional_comps.append(
|
||||
self._initialize_component(name, component))
|
||||
|
||||
# Asynchronous Optional Component Initialization
|
||||
if optional_comps:
|
||||
await asyncio.gather(*optional_comps)
|
||||
|
||||
if not self.warnings:
|
||||
await self.event_loop.run_in_thread(self.config.create_backup)
|
||||
|
||||
if start_server:
|
||||
await self.start_server()
|
||||
|
||||
async def start_server(self, connect_to_klippy: bool = True) -> None:
|
||||
# Start HTTP Server
|
||||
logging.info(
|
||||
f"Starting Moonraker on ({self.host}, {self.port}), "
|
||||
f"Hostname: {socket.gethostname()}")
|
||||
self.moonraker_app.listen(self.host, self.port, self.ssl_port)
|
||||
self.server_running = True
|
||||
if connect_to_klippy:
|
||||
self.klippy_connection.connect()
|
||||
|
||||
def add_log_rollover_item(self, name: str, item: str,
|
||||
log: bool = True) -> None:
|
||||
if self.file_logger is not None:
|
||||
self.file_logger.set_rollover_info(name, item)
|
||||
if log and item is not None:
|
||||
logging.info(item)
|
||||
|
||||
def add_warning(self, warning: str, log: bool = True) -> None:
|
||||
self.warnings.append(warning)
|
||||
if log:
|
||||
logging.warning(warning)
|
||||
|
||||
# ***** Component Management *****
|
||||
async def _initialize_component(self, name: str, component: Any) -> None:
|
||||
logging.info(f"Performing Component Post Init: [{name}]")
|
||||
try:
|
||||
ret = component.component_init()
|
||||
if ret is not None:
|
||||
await ret
|
||||
except Exception as e:
|
||||
logging.exception(f"Component [{name}] failed post init")
|
||||
self.add_warning(f"Component '{name}' failed to load with "
|
||||
f"error: {e}")
|
||||
self.set_failed_component(name)
|
||||
|
||||
def load_components(self) -> None:
|
||||
config = self.config
|
||||
cfg_sections = [s.split()[0] for s in config.sections()]
|
||||
cfg_sections.remove('server')
|
||||
|
||||
# load core components
|
||||
for component in CORE_COMPONENTS:
|
||||
self.load_component(config, component)
|
||||
if component in cfg_sections:
|
||||
cfg_sections.remove(component)
|
||||
|
||||
# load remaining optional components
|
||||
for section in cfg_sections:
|
||||
self.load_component(config, section, None)
|
||||
|
||||
config.validate_config()
|
||||
|
||||
def load_component(self,
|
||||
config: confighelper.ConfigHelper,
|
||||
component_name: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[_T, Any]:
|
||||
if component_name in self.components:
|
||||
return self.components[component_name]
|
||||
try:
|
||||
module = importlib.import_module("components." + component_name)
|
||||
is_core = component_name in CORE_COMPONENTS
|
||||
fallback: Optional[str] = "server" if is_core else None
|
||||
config = config.getsection(component_name, fallback)
|
||||
load_func = getattr(module, "load_component")
|
||||
component = load_func(config)
|
||||
except Exception:
|
||||
msg = f"Unable to load component: ({component_name})"
|
||||
logging.exception(msg)
|
||||
if component_name not in self.failed_components:
|
||||
self.failed_components.append(component_name)
|
||||
if isinstance(default, SentinelClass):
|
||||
raise ServerError(msg)
|
||||
return default
|
||||
self.components[component_name] = component
|
||||
logging.info(f"Component ({component_name}) loaded")
|
||||
return component
|
||||
|
||||
def lookup_component(self,
|
||||
component_name: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[_T, Any]:
|
||||
component = self.components.get(component_name, default)
|
||||
if isinstance(component, SentinelClass):
|
||||
raise ServerError(f"Component ({component_name}) not found")
|
||||
return component
|
||||
|
||||
def set_failed_component(self, component_name: str) -> None:
|
||||
if component_name not in self.failed_components:
|
||||
self.failed_components.append(component_name)
|
||||
|
||||
def register_component(self, component_name: str, component: Any) -> None:
|
||||
if component_name in self.components:
|
||||
raise self.error(
|
||||
f"Component '{component_name}' already registered")
|
||||
self.components[component_name] = component
|
||||
|
||||
def register_notification(self,
|
||||
event_name: str,
|
||||
notify_name: Optional[str] = None
|
||||
) -> None:
|
||||
wsm: WebsocketManager = self.lookup_component("websockets")
|
||||
wsm.register_notification(event_name, notify_name)
|
||||
|
||||
def register_event_handler(self,
|
||||
event: str,
|
||||
callback: FlexCallback
|
||||
) -> None:
|
||||
self.events.setdefault(event, []).append(callback)
|
||||
|
||||
def send_event(self, event: str, *args) -> asyncio.Future:
|
||||
fut = self.event_loop.create_future()
|
||||
self.event_loop.register_callback(
|
||||
self._process_event, fut, event, *args)
|
||||
return fut
|
||||
|
||||
async def _process_event(self,
|
||||
fut: asyncio.Future,
|
||||
event: str,
|
||||
*args
|
||||
) -> None:
|
||||
events = self.events.get(event, [])
|
||||
coroutines: List[Coroutine] = []
|
||||
try:
|
||||
for func in events:
|
||||
ret = func(*args)
|
||||
if ret is not None:
|
||||
coroutines.append(ret)
|
||||
if coroutines:
|
||||
await asyncio.gather(*coroutines)
|
||||
except ServerError as e:
|
||||
logging.exception(f"Error Processing Event: {fut}")
|
||||
if not fut.done():
|
||||
fut.set_result(None)
|
||||
|
||||
def register_remote_method(self,
|
||||
method_name: str,
|
||||
cb: FlexCallback
|
||||
) -> None:
|
||||
self.klippy_connection.register_remote_method(method_name, cb)
|
||||
|
||||
def get_host_info(self) -> Dict[str, Any]:
|
||||
return {
|
||||
'hostname': socket.gethostname(),
|
||||
'address': self.host,
|
||||
'port': self.port,
|
||||
'ssl_port': self.ssl_port
|
||||
}
|
||||
|
||||
def get_klippy_info(self) -> Dict[str, Any]:
|
||||
return self.klippy_connection.klippy_info
|
||||
|
||||
def get_klippy_state(self) -> str:
|
||||
return self.klippy_connection.state
|
||||
|
||||
def _handle_term_signal(self) -> None:
|
||||
logging.info(f"Exiting with signal SIGTERM")
|
||||
self.event_loop.register_callback(self._stop_server, "terminate")
|
||||
|
||||
async def _stop_server(self, exit_reason: str = "restart") -> None:
|
||||
self.server_running = False
|
||||
# Call each component's "on_exit" method
|
||||
for name, component in self.components.items():
|
||||
if hasattr(component, "on_exit"):
|
||||
func: FlexCallback = getattr(component, "on_exit")
|
||||
try:
|
||||
ret = func()
|
||||
if ret is not None:
|
||||
await ret
|
||||
except Exception:
|
||||
logging.exception(
|
||||
f"Error executing 'on_exit()' for component: {name}")
|
||||
|
||||
# Sleep for 100ms to allow connected websockets to write out
|
||||
# remaining data
|
||||
await asyncio.sleep(.1)
|
||||
try:
|
||||
await self.moonraker_app.close()
|
||||
except Exception:
|
||||
logging.exception("Error Closing App")
|
||||
|
||||
# Disconnect from Klippy
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
asyncio.shield(self.klippy_connection.close(
|
||||
wait_closed=True)), 2.)
|
||||
except Exception:
|
||||
logging.exception("Klippy Disconnect Error")
|
||||
|
||||
# Close all components
|
||||
for name, component in self.components.items():
|
||||
if name in ["application", "websockets", "klippy_connection"]:
|
||||
# These components have already been closed
|
||||
continue
|
||||
if hasattr(component, "close"):
|
||||
func = getattr(component, "close")
|
||||
try:
|
||||
ret = func()
|
||||
if ret is not None:
|
||||
await ret
|
||||
except Exception:
|
||||
logging.exception(
|
||||
f"Error executing 'close()' for component: {name}")
|
||||
# Allow cancelled tasks a chance to run in the eventloop
|
||||
await asyncio.sleep(.001)
|
||||
|
||||
self.exit_reason = exit_reason
|
||||
self.event_loop.remove_signal_handler(signal.SIGTERM)
|
||||
self.event_loop.stop()
|
||||
|
||||
async def _handle_server_restart(self, web_request: WebRequest) -> str:
|
||||
self.event_loop.register_callback(self._stop_server)
|
||||
return "ok"
|
||||
|
||||
async def _handle_info_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
file_manager: Optional[FileManager] = self.lookup_component(
|
||||
'file_manager', None)
|
||||
reg_dirs = []
|
||||
if file_manager is not None:
|
||||
reg_dirs = file_manager.get_registered_dirs()
|
||||
wsm: WebsocketManager = self.lookup_component('websockets')
|
||||
mreqs = self.klippy_connection.missing_requirements
|
||||
return {
|
||||
'klippy_connected': self.klippy_connection.is_connected(),
|
||||
'klippy_state': self.klippy_connection.state,
|
||||
'components': list(self.components.keys()),
|
||||
'failed_components': self.failed_components,
|
||||
'registered_directories': reg_dirs,
|
||||
'warnings': self.warnings,
|
||||
'websocket_count': wsm.get_count(),
|
||||
'moonraker_version': self.app_args['software_version'],
|
||||
'missing_klippy_requirements': mreqs,
|
||||
'api_version': API_VERSION,
|
||||
'api_version_string': ".".join([str(v) for v in API_VERSION])
|
||||
}
|
||||
|
||||
async def _handle_config_request(self,
|
||||
web_request: WebRequest
|
||||
) -> Dict[str, Any]:
|
||||
cfg_file_list: List[Dict[str, Any]] = []
|
||||
cfg_parent = pathlib.Path(
|
||||
self.app_args["config_file"]
|
||||
).expanduser().resolve().parent
|
||||
for fname, sections in self.config.get_file_sections().items():
|
||||
path = pathlib.Path(fname)
|
||||
try:
|
||||
rel_path = str(path.relative_to(str(cfg_parent)))
|
||||
except ValueError:
|
||||
rel_path = fname
|
||||
cfg_file_list.append({"filename": rel_path, "sections": sections})
|
||||
return {
|
||||
'config': self.config.get_parsed_config(),
|
||||
'orig': self.config.get_orig_config(),
|
||||
'files': cfg_file_list
|
||||
}
|
||||
|
||||
def main(cmd_line_args: argparse.Namespace) -> None:
|
||||
cfg_file = cmd_line_args.configfile
|
||||
app_args = {'config_file': cfg_file}
|
||||
|
||||
# Setup Logging
|
||||
version = utils.get_software_version()
|
||||
if cmd_line_args.nologfile:
|
||||
app_args['log_file'] = ""
|
||||
else:
|
||||
app_args['log_file'] = os.path.normpath(
|
||||
os.path.expanduser(cmd_line_args.logfile))
|
||||
app_args['software_version'] = version
|
||||
app_args['python_version'] = sys.version.replace("\n", " ")
|
||||
ql, file_logger, warning = utils.setup_logging(app_args)
|
||||
if warning is not None:
|
||||
app_args['log_warning'] = warning
|
||||
|
||||
# Start asyncio event loop and server
|
||||
event_loop = EventLoop()
|
||||
alt_config_loaded = False
|
||||
estatus = 0
|
||||
while True:
|
||||
try:
|
||||
server = Server(app_args, file_logger, event_loop)
|
||||
server.load_components()
|
||||
except confighelper.ConfigError as e:
|
||||
backup_cfg = confighelper.find_config_backup(cfg_file)
|
||||
logging.exception("Server Config Error")
|
||||
if alt_config_loaded or backup_cfg is None:
|
||||
estatus = 1
|
||||
break
|
||||
app_args['config_file'] = backup_cfg
|
||||
app_args['config_warning'] = (
|
||||
f"Server configuration error: {e}\n"
|
||||
f"Loaded server from most recent working configuration:"
|
||||
f" '{app_args['config_file']}'\n"
|
||||
f"Please fix the issue in moonraker.conf and restart "
|
||||
f"the server."
|
||||
)
|
||||
alt_config_loaded = True
|
||||
continue
|
||||
except Exception:
|
||||
logging.exception("Moonraker Error")
|
||||
estatus = 1
|
||||
break
|
||||
try:
|
||||
event_loop.register_callback(server.server_init)
|
||||
event_loop.start()
|
||||
except Exception:
|
||||
logging.exception("Server Running Error")
|
||||
estatus = 1
|
||||
break
|
||||
if server.exit_reason == "terminate":
|
||||
break
|
||||
# Restore the original config and clear the warning
|
||||
# before the server restarts
|
||||
if alt_config_loaded:
|
||||
app_args['config_file'] = cfg_file
|
||||
app_args.pop('config_warning', None)
|
||||
alt_config_loaded = False
|
||||
event_loop.close()
|
||||
# Since we are running outside of the the server
|
||||
# it is ok to use a blocking sleep here
|
||||
time.sleep(.5)
|
||||
logging.info("Attempting Server Restart...")
|
||||
event_loop.reset()
|
||||
event_loop.close()
|
||||
logging.info("Server Shutdown")
|
||||
ql.stop()
|
||||
exit(estatus)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Parse start arguments
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Moonraker - Klipper API Server")
|
||||
parser.add_argument(
|
||||
"-c", "--configfile", default="~/moonraker.conf",
|
||||
metavar='<configfile>',
|
||||
help="Location of moonraker configuration file")
|
||||
parser.add_argument(
|
||||
"-l", "--logfile", default="/tmp/moonraker.log", metavar='<logfile>',
|
||||
help="log file name and location")
|
||||
parser.add_argument(
|
||||
"-n", "--nologfile", action='store_true',
|
||||
help="disable logging to a file")
|
||||
main(parser.parse_args())
|
||||
1
moonraker/thirdparty/__init__.py
vendored
Normal file
1
moonraker/thirdparty/__init__.py
vendored
Normal file
@@ -0,0 +1 @@
|
||||
# package definition for thirdparty package
|
||||
1
moonraker/thirdparty/packagekit/__init__.py
vendored
Normal file
1
moonraker/thirdparty/packagekit/__init__.py
vendored
Normal file
@@ -0,0 +1 @@
|
||||
# package definition for packagekit thirdparty files
|
||||
768
moonraker/thirdparty/packagekit/enums.py
vendored
Normal file
768
moonraker/thirdparty/packagekit/enums.py
vendored
Normal file
@@ -0,0 +1,768 @@
|
||||
|
||||
# This file was autogenerated from pk-enum.h by pk-enum-converter.py
|
||||
# on Fri Jan 21 16:01:47 2022 UTC
|
||||
#
|
||||
# License for original source:
|
||||
#
|
||||
# Copyright (C) 2007-2014 Richard Hughes <richard@hughsie.com>
|
||||
#
|
||||
# Licensed under the GNU Lesser General Public License Version 2.1
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
#
|
||||
|
||||
from __future__ import annotations
|
||||
import sys
|
||||
from enum import Flag, auto
|
||||
|
||||
class PkFlag(Flag):
|
||||
@classmethod
|
||||
def from_pkstring(cls, pkstring: str):
|
||||
for name, member in cls.__members__.items():
|
||||
if member.pkstring == pkstring:
|
||||
return cls(member.value)
|
||||
# Return "unknown" flag
|
||||
return cls(1)
|
||||
|
||||
@classmethod
|
||||
def from_index(cls, index: int):
|
||||
return cls(1 << index)
|
||||
|
||||
@property
|
||||
def pkstring(self) -> str:
|
||||
if self.name is None:
|
||||
return " | ".join([f.pkstring for f in self])
|
||||
return self.name.lower().replace("_", "-")
|
||||
|
||||
@property
|
||||
def desc(self) -> str:
|
||||
if self.name is None:
|
||||
return ", ".join([f.desc for f in self])
|
||||
description = self.name.lower().replace("_", " ")
|
||||
return description.capitalize()
|
||||
|
||||
@property
|
||||
def index(self) -> int:
|
||||
return self.value.bit_length() - 1
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
def __iter__(self):
|
||||
for i in range(self._value_.bit_length()):
|
||||
val = 1 << i
|
||||
if val & self._value_ == val:
|
||||
yield self.__class__(val)
|
||||
|
||||
|
||||
class Role(PkFlag):
|
||||
"""
|
||||
What we were asked to do, this never changes for the lifetime of the
|
||||
transaction.
|
||||
Icons that have to represent the whole "aim" of the transaction will use
|
||||
these constants
|
||||
|
||||
* Role.UNKNOWN: Unknow request
|
||||
* Role.CANCEL: Cancel transaction
|
||||
* Role.DEPENDS_ON: Get package dependencies
|
||||
* Role.GET_DETAILS: Get package details
|
||||
* Role.GET_FILES:
|
||||
* Role.GET_PACKAGES: Get available packages
|
||||
* Role.GET_REPO_LIST: Get repository list
|
||||
* Role.REQUIRED_BY: Get packages required by given package
|
||||
* Role.GET_UPDATE_DETAIL: Get update details
|
||||
* Role.GET_UPDATES: Get available updates
|
||||
* Role.INSTALL_FILES: Install package files
|
||||
* Role.INSTALL_PACKAGES: Install packages
|
||||
* Role.INSTALL_SIGNATURE: Install signature
|
||||
* Role.REFRESH_CACHE: Refresh cache
|
||||
* Role.REMOVE_PACKAGES: Remove packages
|
||||
* Role.REPO_ENABLE: Enable repository
|
||||
* Role.REPO_SET_DATA:
|
||||
* Role.RESOLVE: Resolve depdencies
|
||||
* Role.SEARCH_DETAILS: Search for details
|
||||
* Role.SEARCH_FILE: Search for file
|
||||
* Role.SEARCH_GROUP: Search for group
|
||||
* Role.SEARCH_NAME: Search for package name
|
||||
* Role.UPDATE_PACKAGES: Update packages
|
||||
* Role.WHAT_PROVIDES: Get what a package provides
|
||||
* Role.ACCEPT_EULA: Accept an EULA
|
||||
* Role.DOWNLOAD_PACKAGES: Download packages
|
||||
* Role.GET_DISTRO_UPGRADES: Get available distribution upgrades
|
||||
* Role.GET_CATEGORIES: Get available categories
|
||||
* Role.GET_OLD_TRANSACTIONS: Get old transation information
|
||||
* Role.REPAIR_SYSTEM: Repair system
|
||||
* Role.GET_DETAILS_LOCAL: Get details on local package
|
||||
* Role.GET_FILES_LOCAL: Get files provided by local package
|
||||
* Role.REPO_REMOVE: Remove repository
|
||||
* Role.UPGRADE_SYSTEM: Upgrade system
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
CANCEL = auto()
|
||||
DEPENDS_ON = auto()
|
||||
GET_DETAILS = auto()
|
||||
GET_FILES = auto()
|
||||
GET_PACKAGES = auto()
|
||||
GET_REPO_LIST = auto()
|
||||
REQUIRED_BY = auto()
|
||||
GET_UPDATE_DETAIL = auto()
|
||||
GET_UPDATES = auto()
|
||||
INSTALL_FILES = auto()
|
||||
INSTALL_PACKAGES = auto()
|
||||
INSTALL_SIGNATURE = auto()
|
||||
REFRESH_CACHE = auto()
|
||||
REMOVE_PACKAGES = auto()
|
||||
REPO_ENABLE = auto()
|
||||
REPO_SET_DATA = auto()
|
||||
RESOLVE = auto()
|
||||
SEARCH_DETAILS = auto()
|
||||
SEARCH_FILE = auto()
|
||||
SEARCH_GROUP = auto()
|
||||
SEARCH_NAME = auto()
|
||||
UPDATE_PACKAGES = auto()
|
||||
WHAT_PROVIDES = auto()
|
||||
ACCEPT_EULA = auto()
|
||||
DOWNLOAD_PACKAGES = auto()
|
||||
GET_DISTRO_UPGRADES = auto()
|
||||
GET_CATEGORIES = auto()
|
||||
GET_OLD_TRANSACTIONS = auto()
|
||||
REPAIR_SYSTEM = auto()
|
||||
GET_DETAILS_LOCAL = auto()
|
||||
GET_FILES_LOCAL = auto()
|
||||
REPO_REMOVE = auto()
|
||||
UPGRADE_SYSTEM = auto()
|
||||
|
||||
class Status(PkFlag):
|
||||
"""
|
||||
What status we are now; this can change for each transaction giving a
|
||||
status of what sort of thing is happening
|
||||
Icons that change to represent the current status of the transaction will
|
||||
use these constants
|
||||
If you add to these, make sure you add filenames in gpk-watch.c also
|
||||
|
||||
A typical transaction will do:
|
||||
- schedule task
|
||||
WAIT
|
||||
- run task
|
||||
SETUP
|
||||
- wait for lock
|
||||
RUNNING
|
||||
|
||||
This means that backends should run pk_backend_set_status (backend,
|
||||
PK_STATUS_ENUM_RUNNING)
|
||||
when they are ready to start running the transaction and after a lock has
|
||||
been got.
|
||||
|
||||
* Status.UNKNOWN: Unknown status
|
||||
* Status.WAIT: Waiting
|
||||
* Status.SETUP: Setting up
|
||||
* Status.RUNNING: Running
|
||||
* Status.QUERY:
|
||||
* Status.INFO:
|
||||
* Status.REMOVE: Removing
|
||||
* Status.REFRESH_CACHE: Refreshing cache
|
||||
* Status.DOWNLOAD: Downloading
|
||||
* Status.INSTALL: Installing
|
||||
* Status.UPDATE: Updating
|
||||
* Status.CLEANUP: Cleaning up
|
||||
* Status.OBSOLETE:
|
||||
* Status.DEP_RESOLVE: Resolving dependencies
|
||||
* Status.SIG_CHECK: Checking signatures
|
||||
* Status.TEST_COMMIT: Testing commit
|
||||
* Status.COMMIT: Committing
|
||||
* Status.REQUEST:
|
||||
* Status.FINISHED: Finished
|
||||
* Status.CANCEL: Cancelling
|
||||
* Status.DOWNLOAD_REPOSITORY: Downloading respository
|
||||
* Status.DOWNLOAD_PACKAGELIST: Donwloading package list
|
||||
* Status.DOWNLOAD_FILELIST: Downloading file list
|
||||
* Status.DOWNLOAD_CHANGELOG: Downloading changelog information
|
||||
* Status.DOWNLOAD_GROUP: Downloading group information
|
||||
* Status.DOWNLOAD_UPDATEINFO: Downloading update information
|
||||
* Status.REPACKAGING: Repackaging
|
||||
* Status.LOADING_CACHE: Loading cache
|
||||
* Status.SCAN_APPLICATIONS: Scanning for applications
|
||||
* Status.GENERATE_PACKAGE_LIST: Generating package list
|
||||
* Status.WAITING_FOR_LOCK: Waiting for lock
|
||||
* Status.WAITING_FOR_AUTH: Waiting for authentication/authorization
|
||||
* Status.SCAN_PROCESS_LIST: Scanning running processes
|
||||
* Status.CHECK_EXECUTABLE_FILES: Checking executable files
|
||||
* Status.CHECK_LIBRARIES: Checking libraries
|
||||
* Status.COPY_FILES: Copying files
|
||||
* Status.RUN_HOOK: Running package hook
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
WAIT = auto()
|
||||
SETUP = auto()
|
||||
RUNNING = auto()
|
||||
QUERY = auto()
|
||||
INFO = auto()
|
||||
REMOVE = auto()
|
||||
REFRESH_CACHE = auto()
|
||||
DOWNLOAD = auto()
|
||||
INSTALL = auto()
|
||||
UPDATE = auto()
|
||||
CLEANUP = auto()
|
||||
OBSOLETE = auto()
|
||||
DEP_RESOLVE = auto()
|
||||
SIG_CHECK = auto()
|
||||
TEST_COMMIT = auto()
|
||||
COMMIT = auto()
|
||||
REQUEST = auto()
|
||||
FINISHED = auto()
|
||||
CANCEL = auto()
|
||||
DOWNLOAD_REPOSITORY = auto()
|
||||
DOWNLOAD_PACKAGELIST = auto()
|
||||
DOWNLOAD_FILELIST = auto()
|
||||
DOWNLOAD_CHANGELOG = auto()
|
||||
DOWNLOAD_GROUP = auto()
|
||||
DOWNLOAD_UPDATEINFO = auto()
|
||||
REPACKAGING = auto()
|
||||
LOADING_CACHE = auto()
|
||||
SCAN_APPLICATIONS = auto()
|
||||
GENERATE_PACKAGE_LIST = auto()
|
||||
WAITING_FOR_LOCK = auto()
|
||||
WAITING_FOR_AUTH = auto()
|
||||
SCAN_PROCESS_LIST = auto()
|
||||
CHECK_EXECUTABLE_FILES = auto()
|
||||
CHECK_LIBRARIES = auto()
|
||||
COPY_FILES = auto()
|
||||
RUN_HOOK = auto()
|
||||
|
||||
class Exit(PkFlag):
|
||||
"""
|
||||
How the backend exited
|
||||
|
||||
* Exit.UNKNOWN: Unknown exit status
|
||||
* Exit.SUCCESS: Backend exited successfully
|
||||
* Exit.FAILED: Backend failed
|
||||
* Exit.CANCELLED: Backend was cancelled
|
||||
* Exit.KEY_REQUIRED: A repository encryption key needs installing
|
||||
* Exit.EULA_REQUIRED: A EULA is required to be accepted
|
||||
* Exit.KILLED: Backend was killed
|
||||
* Exit.MEDIA_CHANGE_REQUIRED: Media change required
|
||||
* Exit.NEED_UNTRUSTED:
|
||||
* Exit.CANCELLED_PRIORITY: Cancelled due to higher priority task
|
||||
* Exit.SKIP_TRANSACTION:
|
||||
* Exit.REPAIR_REQUIRED: Package database requires repairing
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
SUCCESS = auto()
|
||||
FAILED = auto()
|
||||
CANCELLED = auto()
|
||||
KEY_REQUIRED = auto()
|
||||
EULA_REQUIRED = auto()
|
||||
KILLED = auto()
|
||||
MEDIA_CHANGE_REQUIRED = auto()
|
||||
NEED_UNTRUSTED = auto()
|
||||
CANCELLED_PRIORITY = auto()
|
||||
SKIP_TRANSACTION = auto()
|
||||
REPAIR_REQUIRED = auto()
|
||||
|
||||
class Network(PkFlag):
|
||||
"""
|
||||
Network type
|
||||
|
||||
* Network.UNKNOWN: Unknown network
|
||||
* Network.OFFLINE: Offline (no network)
|
||||
* Network.ONLINE: Online (network type unknown)
|
||||
* Network.WIRED: Wired network
|
||||
* Network.WIFI: WiFi network
|
||||
* Network.MOBILE: Mobile network
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
OFFLINE = auto()
|
||||
ONLINE = auto()
|
||||
WIRED = auto()
|
||||
WIFI = auto()
|
||||
MOBILE = auto()
|
||||
|
||||
class Filter(PkFlag):
|
||||
"""
|
||||
The filter types
|
||||
|
||||
* Filter.UNKNOWN: Unknown filter
|
||||
* Filter.NONE: No filter
|
||||
* Filter.INSTALLED: Filter for installed packages
|
||||
* Filter.NOT_INSTALLED: Filter for not installed packages
|
||||
* Filter.DEVELOPMENT: Filter for development packages
|
||||
* Filter.NOT_DEVELOPMENT: Filter for non-development packages
|
||||
* Filter.GUI: Filter for GUI packages
|
||||
* Filter.NOT_GUI: Filter for non-GUI packages
|
||||
* Filter.FREE: Filter for free packages
|
||||
* Filter.NOT_FREE: Filter for non-free packages
|
||||
* Filter.VISIBLE: Filter for visible packages
|
||||
* Filter.NOT_VISIBLE: Filter for invisible packages
|
||||
* Filter.SUPPORTED: Filter for supported packages
|
||||
* Filter.NOT_SUPPORTED: Filter for not supported packages
|
||||
* Filter.BASENAME: Filter for packages that match basename
|
||||
* Filter.NOT_BASENAME: Filter for packages that don't match basename
|
||||
* Filter.NEWEST: Filter for newest packages
|
||||
* Filter.NOT_NEWEST: Filter for not newest packages
|
||||
* Filter.ARCH: Filter for packages that match architecture
|
||||
* Filter.NOT_ARCH: Filter for packages that don't match architecture
|
||||
* Filter.SOURCE: Filter for source packages
|
||||
* Filter.NOT_SOURCE: Filter for non-source packages
|
||||
* Filter.COLLECTIONS: Filter for collections
|
||||
* Filter.NOT_COLLECTIONS: Filter for not collections
|
||||
* Filter.APPLICATION: Filter for application packages
|
||||
* Filter.NOT_APPLICATION: Filter for non-application packages
|
||||
* Filter.DOWNLOADED: Filter for downloaded packages
|
||||
* Filter.NOT_DOWNLOADED: Filter for not downloaded packages
|
||||
"""
|
||||
@property
|
||||
def pkstring(self) -> str:
|
||||
pks = self.name
|
||||
if pks is None:
|
||||
return " | ".join([f.pkstring for f in self])
|
||||
if pks in ["DEVELOPMENT", "NOT_DEVELOPMENT"]:
|
||||
pks = pks[:-6]
|
||||
if pks[:4] == "NOT_":
|
||||
pks = "~" + pks[4:]
|
||||
return pks.lower().replace("_", "-")
|
||||
|
||||
UNKNOWN = auto()
|
||||
NONE = auto()
|
||||
INSTALLED = auto()
|
||||
NOT_INSTALLED = auto()
|
||||
DEVELOPMENT = auto()
|
||||
NOT_DEVELOPMENT = auto()
|
||||
GUI = auto()
|
||||
NOT_GUI = auto()
|
||||
FREE = auto()
|
||||
NOT_FREE = auto()
|
||||
VISIBLE = auto()
|
||||
NOT_VISIBLE = auto()
|
||||
SUPPORTED = auto()
|
||||
NOT_SUPPORTED = auto()
|
||||
BASENAME = auto()
|
||||
NOT_BASENAME = auto()
|
||||
NEWEST = auto()
|
||||
NOT_NEWEST = auto()
|
||||
ARCH = auto()
|
||||
NOT_ARCH = auto()
|
||||
SOURCE = auto()
|
||||
NOT_SOURCE = auto()
|
||||
COLLECTIONS = auto()
|
||||
NOT_COLLECTIONS = auto()
|
||||
APPLICATION = auto()
|
||||
NOT_APPLICATION = auto()
|
||||
DOWNLOADED = auto()
|
||||
NOT_DOWNLOADED = auto()
|
||||
|
||||
class Restart(PkFlag):
|
||||
"""
|
||||
What restart we need to after a transaction, ordered by severity
|
||||
|
||||
* Restart.UNKNOWN: Unknown restart state
|
||||
* Restart.NONE: No restart required
|
||||
* Restart.APPLICATION: Need to restart the application
|
||||
* Restart.SESSION: Need to restart the session
|
||||
* Restart.SYSTEM: Need to restart the system
|
||||
* Restart.SECURITY_SESSION:
|
||||
* Restart.SECURITY_SYSTEM:
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
NONE = auto()
|
||||
APPLICATION = auto()
|
||||
SESSION = auto()
|
||||
SYSTEM = auto()
|
||||
SECURITY_SESSION = auto()
|
||||
SECURITY_SYSTEM = auto()
|
||||
|
||||
class Error(PkFlag):
|
||||
"""
|
||||
The error type
|
||||
|
||||
* Error.UNKNOWN:
|
||||
* Error.OOM: Out of memory
|
||||
* Error.NO_NETWORK: No network access available
|
||||
* Error.NOT_SUPPORTED: Request not supported
|
||||
* Error.INTERNAL_ERROR: Undefined internal error
|
||||
* Error.GPG_FAILURE: GPG encryption failure
|
||||
* Error.PACKAGE_ID_INVALID: Invalid package ID provided
|
||||
* Error.PACKAGE_NOT_INSTALLED: Requested package not installed
|
||||
* Error.PACKAGE_NOT_FOUND: Requested package not found
|
||||
* Error.PACKAGE_ALREADY_INSTALLED: Requested package already installed
|
||||
* Error.PACKAGE_DOWNLOAD_FAILED: Failed to download package
|
||||
* Error.GROUP_NOT_FOUND: Requested group not gound
|
||||
* Error.GROUP_LIST_INVALID: Invalid group list provided
|
||||
* Error.DEP_RESOLUTION_FAILED: Failed to resolve dependencies
|
||||
* Error.FILTER_INVALID: Invalid filter provides
|
||||
* Error.CREATE_THREAD_FAILED: Failed to create thread
|
||||
* Error.TRANSACTION_ERROR: Error occurred during transaction
|
||||
* Error.TRANSACTION_CANCELLED: Transaction was cancelled
|
||||
* Error.NO_CACHE: No cache available
|
||||
* Error.REPO_NOT_FOUND: Requested repository not found
|
||||
* Error.CANNOT_REMOVE_SYSTEM_PACKAGE: Not allowed to remove system package
|
||||
* Error.PROCESS_KILL: Process killed
|
||||
* Error.FAILED_INITIALIZATION:
|
||||
* Error.FAILED_FINALISE:
|
||||
* Error.FAILED_CONFIG_PARSING: Configuration is not valid
|
||||
* Error.CANNOT_CANCEL:
|
||||
* Error.CANNOT_GET_LOCK: Cannot get lock
|
||||
* Error.NO_PACKAGES_TO_UPDATE: No packages to update
|
||||
* Error.CANNOT_WRITE_REPO_CONFIG: Cannot write repository configuration
|
||||
* Error.LOCAL_INSTALL_FAILED:
|
||||
* Error.BAD_GPG_SIGNATURE: Bad GPG signature found
|
||||
* Error.MISSING_GPG_SIGNATURE: Required GPG signature not found
|
||||
* Error.CANNOT_INSTALL_SOURCE_PACKAGE: Cannot install source package
|
||||
* Error.REPO_CONFIGURATION_ERROR:
|
||||
* Error.NO_LICENSE_AGREEMENT:
|
||||
* Error.FILE_CONFLICTS: File conflicts detected
|
||||
* Error.PACKAGE_CONFLICTS: Package conflict
|
||||
* Error.REPO_NOT_AVAILABLE: Repository not available
|
||||
* Error.INVALID_PACKAGE_FILE:
|
||||
* Error.PACKAGE_INSTALL_BLOCKED: Package installation blocked
|
||||
* Error.PACKAGE_CORRUPT: Package corruption occurred
|
||||
* Error.ALL_PACKAGES_ALREADY_INSTALLED: All packages already installed
|
||||
* Error.FILE_NOT_FOUND: Required file not found
|
||||
* Error.NO_MORE_MIRRORS_TO_TRY: Out of repository mirrors to try
|
||||
* Error.NO_DISTRO_UPGRADE_DATA: No distribution upgrade path found
|
||||
* Error.INCOMPATIBLE_ARCHITECTURE: Incompatible architecture found
|
||||
* Error.NO_SPACE_ON_DEVICE: Out of required disk space
|
||||
* Error.MEDIA_CHANGE_REQUIRED: Need to change media
|
||||
* Error.NOT_AUTHORIZED: Authorization failed
|
||||
* Error.UPDATE_NOT_FOUND: Update not found
|
||||
* Error.CANNOT_INSTALL_REPO_UNSIGNED:
|
||||
Installation repository missing signature
|
||||
* Error.CANNOT_UPDATE_REPO_UNSIGNED: Update repository missing signature
|
||||
* Error.CANNOT_GET_FILELIST: Cannot get file list
|
||||
* Error.CANNOT_GET_REQUIRES: Cannot get package requirements
|
||||
* Error.CANNOT_DISABLE_REPOSITORY: Cannot disable reposoitory
|
||||
* Error.RESTRICTED_DOWNLOAD:
|
||||
* Error.PACKAGE_FAILED_TO_CONFIGURE: Package failed to configure
|
||||
* Error.PACKAGE_FAILED_TO_BUILD: Package failed to build
|
||||
* Error.PACKAGE_FAILED_TO_INSTALL: Package failed to install
|
||||
* Error.PACKAGE_FAILED_TO_REMOVE: Package failed to remove
|
||||
* Error.UPDATE_FAILED_DUE_TO_RUNNING_PROCESS:
|
||||
* Error.PACKAGE_DATABASE_CHANGED:
|
||||
* Error.PROVIDE_TYPE_NOT_SUPPORTED:
|
||||
* Error.INSTALL_ROOT_INVALID: Installtion root not suitable
|
||||
* Error.CANNOT_FETCH_SOURCES: Cannot fetch sources
|
||||
* Error.CANCELLED_PRIORITY: Cancelled due to higher priority task
|
||||
* Error.UNFINISHED_TRANSACTION: Transaction unfinished
|
||||
* Error.LOCK_REQUIRED: Required lock not available
|
||||
* Error.REPO_ALREADY_SET:
|
||||
"""
|
||||
@property
|
||||
def pkstring(self) -> str:
|
||||
if self == Error.UPDATE_FAILED_DUE_TO_RUNNING_PROCESS:
|
||||
return "failed-due-to-running-process"
|
||||
return super().pkstring
|
||||
|
||||
UNKNOWN = auto()
|
||||
OUT_OF_MEMORY = auto()
|
||||
NO_NETWORK = auto()
|
||||
NOT_SUPPORTED = auto()
|
||||
INTERNAL_ERROR = auto()
|
||||
GPG_FAILURE = auto()
|
||||
PACKAGE_ID_INVALID = auto()
|
||||
PACKAGE_NOT_INSTALLED = auto()
|
||||
PACKAGE_NOT_FOUND = auto()
|
||||
PACKAGE_ALREADY_INSTALLED = auto()
|
||||
PACKAGE_DOWNLOAD_FAILED = auto()
|
||||
GROUP_NOT_FOUND = auto()
|
||||
GROUP_LIST_INVALID = auto()
|
||||
DEP_RESOLUTION_FAILED = auto()
|
||||
FILTER_INVALID = auto()
|
||||
CREATE_THREAD_FAILED = auto()
|
||||
TRANSACTION_ERROR = auto()
|
||||
TRANSACTION_CANCELLED = auto()
|
||||
NO_CACHE = auto()
|
||||
REPO_NOT_FOUND = auto()
|
||||
CANNOT_REMOVE_SYSTEM_PACKAGE = auto()
|
||||
PROCESS_KILL = auto()
|
||||
FAILED_INITIALIZATION = auto()
|
||||
FAILED_FINALISE = auto()
|
||||
FAILED_CONFIG_PARSING = auto()
|
||||
CANNOT_CANCEL = auto()
|
||||
CANNOT_GET_LOCK = auto()
|
||||
NO_PACKAGES_TO_UPDATE = auto()
|
||||
CANNOT_WRITE_REPO_CONFIG = auto()
|
||||
LOCAL_INSTALL_FAILED = auto()
|
||||
BAD_GPG_SIGNATURE = auto()
|
||||
MISSING_GPG_SIGNATURE = auto()
|
||||
CANNOT_INSTALL_SOURCE_PACKAGE = auto()
|
||||
REPO_CONFIGURATION_ERROR = auto()
|
||||
NO_LICENSE_AGREEMENT = auto()
|
||||
FILE_CONFLICTS = auto()
|
||||
PACKAGE_CONFLICTS = auto()
|
||||
REPO_NOT_AVAILABLE = auto()
|
||||
INVALID_PACKAGE_FILE = auto()
|
||||
PACKAGE_INSTALL_BLOCKED = auto()
|
||||
PACKAGE_CORRUPT = auto()
|
||||
ALL_PACKAGES_ALREADY_INSTALLED = auto()
|
||||
FILE_NOT_FOUND = auto()
|
||||
NO_MORE_MIRRORS_TO_TRY = auto()
|
||||
NO_DISTRO_UPGRADE_DATA = auto()
|
||||
INCOMPATIBLE_ARCHITECTURE = auto()
|
||||
NO_SPACE_ON_DEVICE = auto()
|
||||
MEDIA_CHANGE_REQUIRED = auto()
|
||||
NOT_AUTHORIZED = auto()
|
||||
UPDATE_NOT_FOUND = auto()
|
||||
CANNOT_INSTALL_REPO_UNSIGNED = auto()
|
||||
CANNOT_UPDATE_REPO_UNSIGNED = auto()
|
||||
CANNOT_GET_FILELIST = auto()
|
||||
CANNOT_GET_REQUIRES = auto()
|
||||
CANNOT_DISABLE_REPOSITORY = auto()
|
||||
RESTRICTED_DOWNLOAD = auto()
|
||||
PACKAGE_FAILED_TO_CONFIGURE = auto()
|
||||
PACKAGE_FAILED_TO_BUILD = auto()
|
||||
PACKAGE_FAILED_TO_INSTALL = auto()
|
||||
PACKAGE_FAILED_TO_REMOVE = auto()
|
||||
UPDATE_FAILED_DUE_TO_RUNNING_PROCESS = auto()
|
||||
PACKAGE_DATABASE_CHANGED = auto()
|
||||
PROVIDE_TYPE_NOT_SUPPORTED = auto()
|
||||
INSTALL_ROOT_INVALID = auto()
|
||||
CANNOT_FETCH_SOURCES = auto()
|
||||
CANCELLED_PRIORITY = auto()
|
||||
UNFINISHED_TRANSACTION = auto()
|
||||
LOCK_REQUIRED = auto()
|
||||
REPO_ALREADY_SET = auto()
|
||||
OOM = OUT_OF_MEMORY
|
||||
|
||||
class Group(PkFlag):
|
||||
"""
|
||||
The group type
|
||||
|
||||
* Group.UNKNOWN: Unknown group
|
||||
* Group.ACCESSIBILITY: Accessibility related packages
|
||||
* Group.ACCESSORIES: Accessory packages
|
||||
* Group.ADMIN_TOOLS: Administration tools packages
|
||||
* Group.COMMUNICATION: Communication packages
|
||||
* Group.DESKTOP_GNOME: GNOME packages
|
||||
* Group.DESKTOP_KDE: KDE packages
|
||||
* Group.DESKTOP_OTHER: Other desktop packages
|
||||
* Group.DESKTOP_XFCE: XFCE packages
|
||||
* Group.EDUCATION: Education packages
|
||||
* Group.FONTS: Fonts
|
||||
* Group.GAMES: Games
|
||||
* Group.GRAPHICS: Graphics related packages
|
||||
* Group.INTERNET: Internet related packages
|
||||
* Group.LEGACY: Legacy packages
|
||||
* Group.LOCALIZATION: Localization related packages
|
||||
* Group.MAPS: Map related packages
|
||||
* Group.MULTIMEDIA: Multimedia packages
|
||||
* Group.NETWORK: Network related packages
|
||||
* Group.OFFICE: Office packages
|
||||
* Group.OTHER:
|
||||
* Group.POWER_MANAGEMENT: Power-management related packages
|
||||
* Group.PROGRAMMING: Programming packages
|
||||
* Group.PUBLISHING: Publishing related packages
|
||||
* Group.REPOS:
|
||||
* Group.SECURITY: Security packages
|
||||
* Group.SERVERS: Server related packages
|
||||
* Group.SYSTEM: System packages
|
||||
* Group.VIRTUALIZATION: Virtualization packages
|
||||
* Group.SCIENCE: Science related packages
|
||||
* Group.DOCUMENTATION: Documentation
|
||||
* Group.ELECTRONICS: Electronics package
|
||||
* Group.COLLECTIONS:
|
||||
* Group.VENDOR: Vendor defined group
|
||||
* Group.NEWEST: Special group for recently updated packages
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
ACCESSIBILITY = auto()
|
||||
ACCESSORIES = auto()
|
||||
ADMIN_TOOLS = auto()
|
||||
COMMUNICATION = auto()
|
||||
DESKTOP_GNOME = auto()
|
||||
DESKTOP_KDE = auto()
|
||||
DESKTOP_OTHER = auto()
|
||||
DESKTOP_XFCE = auto()
|
||||
EDUCATION = auto()
|
||||
FONTS = auto()
|
||||
GAMES = auto()
|
||||
GRAPHICS = auto()
|
||||
INTERNET = auto()
|
||||
LEGACY = auto()
|
||||
LOCALIZATION = auto()
|
||||
MAPS = auto()
|
||||
MULTIMEDIA = auto()
|
||||
NETWORK = auto()
|
||||
OFFICE = auto()
|
||||
OTHER = auto()
|
||||
POWER_MANAGEMENT = auto()
|
||||
PROGRAMMING = auto()
|
||||
PUBLISHING = auto()
|
||||
REPOS = auto()
|
||||
SECURITY = auto()
|
||||
SERVERS = auto()
|
||||
SYSTEM = auto()
|
||||
VIRTUALIZATION = auto()
|
||||
SCIENCE = auto()
|
||||
DOCUMENTATION = auto()
|
||||
ELECTRONICS = auto()
|
||||
COLLECTIONS = auto()
|
||||
VENDOR = auto()
|
||||
NEWEST = auto()
|
||||
|
||||
class UpdateState(PkFlag):
|
||||
"""
|
||||
What state the update is in
|
||||
|
||||
* UpdateState.UNKNOWN: Update stability unknown
|
||||
* UpdateState.STABLE: Update is a stable release
|
||||
* UpdateState.UNSTABLE: Update is an unstable release
|
||||
* UpdateState.TESTING: Update is a testing release
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
STABLE = auto()
|
||||
UNSTABLE = auto()
|
||||
TESTING = auto()
|
||||
|
||||
class Info(PkFlag):
|
||||
"""
|
||||
The enumerated types used in Package() - these have to refer to a specific
|
||||
package action, rather than a general state
|
||||
|
||||
* Info.UNKNOWN: Package status is unknown
|
||||
* Info.INSTALLED: Package is installed
|
||||
* Info.AVAILABLE: Package is available to be installed
|
||||
* Info.LOW:
|
||||
* Info.ENHANCEMENT:
|
||||
* Info.NORMAL:
|
||||
* Info.BUGFIX:
|
||||
* Info.IMPORTANT:
|
||||
* Info.SECURITY:
|
||||
* Info.BLOCKED: Package is blocked
|
||||
* Info.DOWNLOADING: Package is downloading
|
||||
* Info.UPDATING: Package is updating
|
||||
* Info.INSTALLING: Package is being installed
|
||||
* Info.REMOVING: Package is being removed
|
||||
* Info.CLEANUP: Package is running cleanup
|
||||
* Info.OBSOLETING:
|
||||
* Info.COLLECTION_INSTALLED:
|
||||
* Info.COLLECTION_AVAILABLE:
|
||||
* Info.FINISHED:
|
||||
* Info.REINSTALLING: Package is being reinstalled
|
||||
* Info.DOWNGRADING: Package is being downgraded
|
||||
* Info.PREPARING: Package is preparing for installation/removal
|
||||
* Info.DECOMPRESSING: Package is decompressing
|
||||
* Info.UNTRUSTED:
|
||||
* Info.TRUSTED:
|
||||
* Info.UNAVAILABLE: Package is unavailable
|
||||
* Info.CRITICAL: Update severity is critical; Since: 1.2.4
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
INSTALLED = auto()
|
||||
AVAILABLE = auto()
|
||||
LOW = auto()
|
||||
ENHANCEMENT = auto()
|
||||
NORMAL = auto()
|
||||
BUGFIX = auto()
|
||||
IMPORTANT = auto()
|
||||
SECURITY = auto()
|
||||
BLOCKED = auto()
|
||||
DOWNLOADING = auto()
|
||||
UPDATING = auto()
|
||||
INSTALLING = auto()
|
||||
REMOVING = auto()
|
||||
CLEANUP = auto()
|
||||
OBSOLETING = auto()
|
||||
COLLECTION_INSTALLED = auto()
|
||||
COLLECTION_AVAILABLE = auto()
|
||||
FINISHED = auto()
|
||||
REINSTALLING = auto()
|
||||
DOWNGRADING = auto()
|
||||
PREPARING = auto()
|
||||
DECOMPRESSING = auto()
|
||||
UNTRUSTED = auto()
|
||||
TRUSTED = auto()
|
||||
UNAVAILABLE = auto()
|
||||
CRITICAL = auto()
|
||||
|
||||
class DistroUpgrade(PkFlag):
|
||||
"""
|
||||
The distro upgrade status
|
||||
|
||||
* DistroUpgrade.UNKNOWN: Unknown disto upgrade state
|
||||
* DistroUpgrade.STABLE: Upgraded to stable release
|
||||
* DistroUpgrade.UNSTABLE: Upgraded to unstable release
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
STABLE = auto()
|
||||
UNSTABLE = auto()
|
||||
|
||||
class SigType(PkFlag):
|
||||
"""
|
||||
The signature type type
|
||||
|
||||
* SigType.UNKNOWN: Unkwown signature type
|
||||
* SigType.GPG: GPG signature
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
GPG = auto()
|
||||
|
||||
class MediaType(PkFlag):
|
||||
"""
|
||||
The media type
|
||||
|
||||
* MediaType.UNKNOWN: Unknown media type
|
||||
* MediaType.CD: Media is a CD
|
||||
* MediaType.DVD: Media is a DVD
|
||||
* MediaType.DISC: Media is a disc (not CD or DVD)
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
CD = auto()
|
||||
DVD = auto()
|
||||
DISC = auto()
|
||||
|
||||
class Authorize(PkFlag):
|
||||
"""
|
||||
The authorization result
|
||||
|
||||
* Authorize.UNKNOWN: Unknown authorization status
|
||||
* Authorize.YES: Authorized
|
||||
* Authorize.NO: Not authorized
|
||||
* Authorize.INTERACTIVE: Interaction required for authorization
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
YES = auto()
|
||||
NO = auto()
|
||||
INTERACTIVE = auto()
|
||||
|
||||
class UpgradeKind(PkFlag):
|
||||
"""
|
||||
The type of distribution upgrade to perform
|
||||
|
||||
* UpgradeKind.UNKNOWN:
|
||||
* UpgradeKind.MINIMAL: Perform minimal upgrade
|
||||
* UpgradeKind.DEFAULT: Perform default upgrade
|
||||
* UpgradeKind.COMPLETE: Perform complete upgrade
|
||||
"""
|
||||
UNKNOWN = auto()
|
||||
MINIMAL = auto()
|
||||
DEFAULT = auto()
|
||||
COMPLETE = auto()
|
||||
|
||||
class TransactionFlag(PkFlag):
|
||||
"""
|
||||
The transaction flags that alter how the transaction is handled
|
||||
|
||||
* TransactionFlag.NONE: No transaction flag
|
||||
* TransactionFlag.ONLY_TRUSTED: Only allow trusted packages
|
||||
* TransactionFlag.SIMULATE: Simulate transaction
|
||||
* TransactionFlag.ONLY_DOWNLOAD: Only download packages
|
||||
* TransactionFlag.ALLOW_REINSTALL: Allow package reinstallation
|
||||
* TransactionFlag.JUST_REINSTALL: Only allow package reinstallation
|
||||
* TransactionFlag.ALLOW_DOWNGRADE: Allow packages to be downgraded
|
||||
"""
|
||||
NONE = auto()
|
||||
ONLY_TRUSTED = auto()
|
||||
SIMULATE = auto()
|
||||
ONLY_DOWNLOAD = auto()
|
||||
ALLOW_REINSTALL = auto()
|
||||
JUST_REINSTALL = auto()
|
||||
ALLOW_DOWNGRADE = auto()
|
||||
232
moonraker/utils.py
Normal file
232
moonraker/utils.py
Normal file
@@ -0,0 +1,232 @@
|
||||
# General Server Utilities
|
||||
#
|
||||
# Copyright (C) 2020 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
|
||||
from __future__ import annotations
|
||||
import logging
|
||||
import logging.handlers
|
||||
import os
|
||||
import glob
|
||||
import importlib
|
||||
import pathlib
|
||||
import sys
|
||||
import subprocess
|
||||
import asyncio
|
||||
import hashlib
|
||||
import json
|
||||
import shlex
|
||||
import re
|
||||
from queue import SimpleQueue as Queue
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
List,
|
||||
Optional,
|
||||
ClassVar,
|
||||
Tuple,
|
||||
Dict,
|
||||
Any,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from types import ModuleType
|
||||
|
||||
MOONRAKER_PATH = os.path.join(os.path.dirname(__file__), '..')
|
||||
SYS_MOD_PATHS = glob.glob("/usr/lib/python3*/dist-packages")
|
||||
SYS_MOD_PATHS += glob.glob("/usr/lib/python3*/site-packages")
|
||||
|
||||
class ServerError(Exception):
|
||||
def __init__(self, message: str, status_code: int = 400) -> None:
|
||||
Exception.__init__(self, message)
|
||||
self.status_code = status_code
|
||||
|
||||
|
||||
class SentinelClass:
|
||||
_instance: ClassVar[Optional[SentinelClass]] = None
|
||||
|
||||
@staticmethod
|
||||
def get_instance() -> SentinelClass:
|
||||
if SentinelClass._instance is None:
|
||||
SentinelClass._instance = SentinelClass()
|
||||
return SentinelClass._instance
|
||||
|
||||
# Coroutine friendly QueueHandler courtesy of Martjin Pieters:
|
||||
# https://www.zopatista.com/python/2019/05/11/asyncio-logging/
|
||||
class LocalQueueHandler(logging.handlers.QueueHandler):
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
# Removed the call to self.prepare(), handle task cancellation
|
||||
try:
|
||||
self.enqueue(record)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
self.handleError(record)
|
||||
|
||||
# Timed Rotating File Handler, based on Klipper's implementation
|
||||
class MoonrakerLoggingHandler(logging.handlers.TimedRotatingFileHandler):
|
||||
def __init__(self, app_args: Dict[str, Any], **kwargs) -> None:
|
||||
super().__init__(app_args['log_file'], **kwargs)
|
||||
self.rollover_info: Dict[str, str] = {
|
||||
'header': f"{'-'*20}Moonraker Log Start{'-'*20}"
|
||||
}
|
||||
self.rollover_info['application_args'] = "\n".join(
|
||||
[f"{k}: {v}" for k, v in app_args.items()])
|
||||
lines = [line for line in self.rollover_info.values() if line]
|
||||
if self.stream is not None:
|
||||
self.stream.write("\n".join(lines) + "\n")
|
||||
|
||||
def set_rollover_info(self, name: str, item: str) -> None:
|
||||
self.rollover_info[name] = item
|
||||
|
||||
def doRollover(self) -> None:
|
||||
super().doRollover()
|
||||
lines = [line for line in self.rollover_info.values() if line]
|
||||
if self.stream is not None:
|
||||
self.stream.write("\n".join(lines) + "\n")
|
||||
|
||||
def _run_git_command(cmd: str) -> str:
|
||||
prog = shlex.split(cmd)
|
||||
process = subprocess.Popen(prog, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
ret, err = process.communicate()
|
||||
retcode = process.wait()
|
||||
if retcode == 0:
|
||||
return ret.strip().decode()
|
||||
raise Exception(f"Failed to run git command: {cmd}")
|
||||
|
||||
def _retrieve_git_tag(source_path: str) -> str:
|
||||
cmd = f"git -C {source_path} rev-list --tags --max-count=1"
|
||||
hash = _run_git_command(cmd)
|
||||
cmd = f"git -C {source_path} describe --tags {hash}"
|
||||
tag = _run_git_command(cmd)
|
||||
cmd = f"git -C {source_path} rev-list {tag}..HEAD --count"
|
||||
count = _run_git_command(cmd)
|
||||
return f"{tag}-{count}"
|
||||
|
||||
# Parse the git version from the command line. This code
|
||||
# is borrowed from Klipper.
|
||||
def retrieve_git_version(source_path: str) -> str:
|
||||
# Obtain version info from "git" program
|
||||
cmd = f"git -C {source_path} describe --always --tags --long --dirty"
|
||||
ver = _run_git_command(cmd)
|
||||
tag_match = re.match(r"v\d+\.\d+\.\d+", ver)
|
||||
if tag_match is not None:
|
||||
return ver
|
||||
# This is likely a shallow clone. Resolve the tag and manually create
|
||||
# the version string
|
||||
tag = _retrieve_git_tag(source_path)
|
||||
return f"t{tag}-g{ver}-shallow"
|
||||
|
||||
def get_software_version() -> str:
|
||||
version = "?"
|
||||
|
||||
try:
|
||||
version = retrieve_git_version(MOONRAKER_PATH)
|
||||
except Exception:
|
||||
vfile = pathlib.Path(os.path.join(
|
||||
MOONRAKER_PATH, "moonraker/.version"))
|
||||
if vfile.exists():
|
||||
try:
|
||||
version = vfile.read_text().strip()
|
||||
except Exception:
|
||||
logging.exception("Unable to extract version from file")
|
||||
version = "?"
|
||||
return version
|
||||
|
||||
def setup_logging(app_args: Dict[str, Any]
|
||||
) -> Tuple[logging.handlers.QueueListener,
|
||||
Optional[MoonrakerLoggingHandler],
|
||||
Optional[str]]:
|
||||
root_logger = logging.getLogger()
|
||||
queue: Queue = Queue()
|
||||
queue_handler = LocalQueueHandler(queue)
|
||||
root_logger.addHandler(queue_handler)
|
||||
root_logger.setLevel(logging.INFO)
|
||||
stdout_hdlr = logging.StreamHandler(sys.stdout)
|
||||
stdout_fmt = logging.Formatter(
|
||||
'[%(filename)s:%(funcName)s()] - %(message)s')
|
||||
stdout_hdlr.setFormatter(stdout_fmt)
|
||||
for name, val in app_args.items():
|
||||
logging.info(f"{name}: {val}")
|
||||
warning: Optional[str] = None
|
||||
file_hdlr: Optional[MoonrakerLoggingHandler] = None
|
||||
listener: Optional[logging.handlers.QueueListener] = None
|
||||
log_file: str = app_args.get('log_file', "")
|
||||
if log_file:
|
||||
try:
|
||||
file_hdlr = MoonrakerLoggingHandler(
|
||||
app_args, when='midnight', backupCount=2)
|
||||
formatter = logging.Formatter(
|
||||
'%(asctime)s [%(filename)s:%(funcName)s()] - %(message)s')
|
||||
file_hdlr.setFormatter(formatter)
|
||||
listener = logging.handlers.QueueListener(
|
||||
queue, file_hdlr, stdout_hdlr)
|
||||
except Exception:
|
||||
log_file = os.path.normpath(log_file)
|
||||
dir_name = os.path.dirname(log_file)
|
||||
warning = f"Unable to create log file at '{log_file}'. " \
|
||||
f"Make sure that the folder '{dir_name}' exists " \
|
||||
f"and Moonraker has Read/Write access to the folder. "
|
||||
if listener is None:
|
||||
listener = logging.handlers.QueueListener(
|
||||
queue, stdout_hdlr)
|
||||
listener.start()
|
||||
return listener, file_hdlr, warning
|
||||
|
||||
def hash_directory(dir_path: str,
|
||||
ignore_exts: List[str],
|
||||
ignore_dirs: List[str]
|
||||
) -> str:
|
||||
checksum = hashlib.blake2s()
|
||||
if not os.path.exists(dir_path):
|
||||
return ""
|
||||
for dpath, dnames, fnames in os.walk(dir_path):
|
||||
valid_dirs: List[str] = []
|
||||
for dname in sorted(dnames):
|
||||
if dname[0] == '.' or dname in ignore_dirs:
|
||||
continue
|
||||
valid_dirs.append(dname)
|
||||
dnames[:] = valid_dirs
|
||||
for fname in sorted(fnames):
|
||||
ext = os.path.splitext(fname)[-1].lower()
|
||||
if fname[0] == '.' or ext in ignore_exts:
|
||||
continue
|
||||
fpath = pathlib.Path(os.path.join(dpath, fname))
|
||||
try:
|
||||
checksum.update(fpath.read_bytes())
|
||||
except Exception:
|
||||
pass
|
||||
return checksum.hexdigest()
|
||||
|
||||
def verify_source(path: str = MOONRAKER_PATH) -> Optional[Tuple[str, bool]]:
|
||||
rfile = pathlib.Path(os.path.join(path, ".release_info"))
|
||||
if not rfile.exists():
|
||||
return None
|
||||
try:
|
||||
rinfo = json.loads(rfile.read_text())
|
||||
except Exception:
|
||||
return None
|
||||
orig_chksum = rinfo['source_checksum']
|
||||
ign_dirs = rinfo['ignored_dirs']
|
||||
ign_exts = rinfo['ignored_exts']
|
||||
checksum = hash_directory(path, ign_exts, ign_dirs)
|
||||
return checksum, checksum == orig_chksum
|
||||
|
||||
def load_system_module(name: str) -> ModuleType:
|
||||
for module_path in SYS_MOD_PATHS:
|
||||
sys.path.insert(0, module_path)
|
||||
try:
|
||||
module = importlib.import_module(name)
|
||||
except ImportError as e:
|
||||
if not isinstance(e, ModuleNotFoundError):
|
||||
logging.exception(f"Failed to load {name} module")
|
||||
sys.path.pop(0)
|
||||
else:
|
||||
sys.path.pop(0)
|
||||
break
|
||||
else:
|
||||
raise ServerError(f"Unable to import module {name}")
|
||||
return module
|
||||
660
moonraker/websockets.py
Normal file
660
moonraker/websockets.py
Normal file
@@ -0,0 +1,660 @@
|
||||
# Websocket Request/Response Handler
|
||||
#
|
||||
# Copyright (C) 2020 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
|
||||
from __future__ import annotations
|
||||
import logging
|
||||
import ipaddress
|
||||
import json
|
||||
import asyncio
|
||||
from tornado.websocket import WebSocketHandler, WebSocketClosedError
|
||||
from utils import ServerError, SentinelClass
|
||||
|
||||
# Annotation imports
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Optional,
|
||||
Callable,
|
||||
Coroutine,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
Dict,
|
||||
List,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from moonraker import Server
|
||||
from app import APIDefinition
|
||||
from klippy_connection import KlippyConnection as Klippy
|
||||
from .components.extensions import ExtensionManager
|
||||
import components.authorization
|
||||
_T = TypeVar("_T")
|
||||
_C = TypeVar("_C", str, bool, float, int)
|
||||
IPUnion = Union[ipaddress.IPv4Address, ipaddress.IPv6Address]
|
||||
ConvType = Union[str, bool, float, int]
|
||||
ArgVal = Union[None, int, float, bool, str]
|
||||
RPCCallback = Callable[..., Coroutine]
|
||||
AuthComp = Optional[components.authorization.Authorization]
|
||||
|
||||
CLIENT_TYPES = ["web", "mobile", "desktop", "display", "bot", "agent", "other"]
|
||||
SENTINEL = SentinelClass.get_instance()
|
||||
|
||||
class Subscribable:
|
||||
def send_status(self,
|
||||
status: Dict[str, Any],
|
||||
eventtime: float
|
||||
) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
class WebRequest:
|
||||
def __init__(self,
|
||||
endpoint: str,
|
||||
args: Dict[str, Any],
|
||||
action: Optional[str] = "",
|
||||
conn: Optional[Subscribable] = None,
|
||||
ip_addr: str = "",
|
||||
user: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
self.endpoint = endpoint
|
||||
self.action = action or ""
|
||||
self.args = args
|
||||
self.conn = conn
|
||||
self.ip_addr: Optional[IPUnion] = None
|
||||
try:
|
||||
self.ip_addr = ipaddress.ip_address(ip_addr)
|
||||
except Exception:
|
||||
self.ip_addr = None
|
||||
self.current_user = user
|
||||
|
||||
def get_endpoint(self) -> str:
|
||||
return self.endpoint
|
||||
|
||||
def get_action(self) -> str:
|
||||
return self.action
|
||||
|
||||
def get_args(self) -> Dict[str, Any]:
|
||||
return self.args
|
||||
|
||||
def get_connection(self) -> Optional[Subscribable]:
|
||||
return self.conn
|
||||
|
||||
def get_ip_address(self) -> Optional[IPUnion]:
|
||||
return self.ip_addr
|
||||
|
||||
def get_current_user(self) -> Optional[Dict[str, Any]]:
|
||||
return self.current_user
|
||||
|
||||
def _get_converted_arg(self,
|
||||
key: str,
|
||||
default: Union[SentinelClass, _T],
|
||||
dtype: Type[_C]
|
||||
) -> Union[_C, _T]:
|
||||
if key not in self.args:
|
||||
if isinstance(default, SentinelClass):
|
||||
raise ServerError(f"No data for argument: {key}")
|
||||
return default
|
||||
val = self.args[key]
|
||||
try:
|
||||
if dtype is not bool:
|
||||
return dtype(val)
|
||||
else:
|
||||
if isinstance(val, str):
|
||||
val = val.lower()
|
||||
if val in ["true", "false"]:
|
||||
return True if val == "true" else False # type: ignore
|
||||
elif isinstance(val, bool):
|
||||
return val # type: ignore
|
||||
raise TypeError
|
||||
except Exception:
|
||||
raise ServerError(
|
||||
f"Unable to convert argument [{key}] to {dtype}: "
|
||||
f"value recieved: {val}")
|
||||
|
||||
def get(self,
|
||||
key: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[_T, Any]:
|
||||
val = self.args.get(key, default)
|
||||
if isinstance(val, SentinelClass):
|
||||
raise ServerError(f"No data for argument: {key}")
|
||||
return val
|
||||
|
||||
def get_str(self,
|
||||
key: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[str, _T]:
|
||||
return self._get_converted_arg(key, default, str)
|
||||
|
||||
def get_int(self,
|
||||
key: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[int, _T]:
|
||||
return self._get_converted_arg(key, default, int)
|
||||
|
||||
def get_float(self,
|
||||
key: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[float, _T]:
|
||||
return self._get_converted_arg(key, default, float)
|
||||
|
||||
def get_boolean(self,
|
||||
key: str,
|
||||
default: Union[SentinelClass, _T] = SENTINEL
|
||||
) -> Union[bool, _T]:
|
||||
return self._get_converted_arg(key, default, bool)
|
||||
|
||||
class JsonRPC:
|
||||
def __init__(self, transport: str = "Websocket") -> None:
|
||||
self.methods: Dict[str, RPCCallback] = {}
|
||||
self.transport = transport
|
||||
|
||||
def register_method(self,
|
||||
name: str,
|
||||
method: RPCCallback
|
||||
) -> None:
|
||||
self.methods[name] = method
|
||||
|
||||
def remove_method(self, name: str) -> None:
|
||||
self.methods.pop(name, None)
|
||||
|
||||
async def dispatch(self,
|
||||
data: str,
|
||||
conn: Optional[WebSocket] = None
|
||||
) -> Optional[str]:
|
||||
response: Any = None
|
||||
try:
|
||||
obj: Union[Dict[str, Any], List[dict]] = json.loads(data)
|
||||
except Exception:
|
||||
msg = f"{self.transport} data not json: {data}"
|
||||
logging.exception(msg)
|
||||
response = self.build_error(-32700, "Parse error")
|
||||
return json.dumps(response)
|
||||
logging.debug(f"{self.transport} Received::{data}")
|
||||
if isinstance(obj, list):
|
||||
response = []
|
||||
for item in obj:
|
||||
resp = await self.process_object(item, conn)
|
||||
if resp is not None:
|
||||
response.append(resp)
|
||||
if not response:
|
||||
response = None
|
||||
else:
|
||||
response = await self.process_object(obj, conn)
|
||||
if response is not None:
|
||||
response = json.dumps(response)
|
||||
logging.debug(f"{self.transport} Response::{response}")
|
||||
return response
|
||||
|
||||
async def process_object(self,
|
||||
obj: Dict[str, Any],
|
||||
conn: Optional[WebSocket]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
req_id: Optional[int] = obj.get('id', None)
|
||||
rpc_version: str = obj.get('jsonrpc', "")
|
||||
if rpc_version != "2.0":
|
||||
return self.build_error(-32600, "Invalid Request", req_id)
|
||||
method_name = obj.get('method', SENTINEL)
|
||||
if method_name is SENTINEL:
|
||||
self.process_response(obj, conn)
|
||||
return None
|
||||
if not isinstance(method_name, str):
|
||||
return self.build_error(-32600, "Invalid Request", req_id)
|
||||
method = self.methods.get(method_name, None)
|
||||
if method is None:
|
||||
return self.build_error(-32601, "Method not found", req_id)
|
||||
params: Dict[str, Any] = {}
|
||||
if 'params' in obj:
|
||||
params = obj['params']
|
||||
if not isinstance(params, dict):
|
||||
return self.build_error(
|
||||
-32602, f"Invalid params:", req_id, True)
|
||||
response = await self.execute_method(method, req_id, conn, params)
|
||||
return response
|
||||
|
||||
def process_response(
|
||||
self, obj: Dict[str, Any], conn: Optional[WebSocket]
|
||||
) -> None:
|
||||
if conn is None:
|
||||
logging.debug(f"RPC Response to non-socket request: {obj}")
|
||||
return
|
||||
response_id = obj.get("id")
|
||||
if response_id is None:
|
||||
logging.debug(f"RPC Response with null ID: {obj}")
|
||||
return
|
||||
result = obj.get("result")
|
||||
if result is None:
|
||||
name = conn.client_data["name"]
|
||||
error = obj.get("error")
|
||||
msg = f"Invalid Response: {obj}"
|
||||
code = -32600
|
||||
if isinstance(error, dict):
|
||||
msg = error.get("message", msg)
|
||||
code = error.get("code", code)
|
||||
msg = f"{name} rpc error: {code} {msg}"
|
||||
ret = ServerError(msg, 418)
|
||||
else:
|
||||
ret = result
|
||||
conn.resolve_pending_response(response_id, ret)
|
||||
|
||||
async def execute_method(self,
|
||||
callback: RPCCallback,
|
||||
req_id: Optional[int],
|
||||
conn: Optional[WebSocket],
|
||||
params: Dict[str, Any]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
if conn is not None:
|
||||
params["_socket_"] = conn
|
||||
try:
|
||||
result = await callback(params)
|
||||
except TypeError as e:
|
||||
return self.build_error(
|
||||
-32602, f"Invalid params:\n{e}", req_id, True)
|
||||
except ServerError as e:
|
||||
code = e.status_code
|
||||
if code == 404:
|
||||
code = -32601
|
||||
return self.build_error(code, str(e), req_id, True)
|
||||
except Exception as e:
|
||||
return self.build_error(-31000, str(e), req_id, True)
|
||||
|
||||
if req_id is None:
|
||||
return None
|
||||
else:
|
||||
return self.build_result(result, req_id)
|
||||
|
||||
def build_result(self, result: Any, req_id: int) -> Dict[str, Any]:
|
||||
return {
|
||||
'jsonrpc': "2.0",
|
||||
'result': result,
|
||||
'id': req_id
|
||||
}
|
||||
|
||||
def build_error(self,
|
||||
code: int,
|
||||
msg: str,
|
||||
req_id: Optional[int] = None,
|
||||
is_exc: bool = False
|
||||
) -> Dict[str, Any]:
|
||||
log_msg = f"JSON-RPC Request Error: {code}\n{msg}"
|
||||
if is_exc:
|
||||
logging.exception(log_msg)
|
||||
else:
|
||||
logging.info(log_msg)
|
||||
return {
|
||||
'jsonrpc': "2.0",
|
||||
'error': {'code': code, 'message': msg},
|
||||
'id': req_id
|
||||
}
|
||||
|
||||
class APITransport:
|
||||
def register_api_handler(self, api_def: APIDefinition) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_api_handler(self, api_def: APIDefinition) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
class WebsocketManager(APITransport):
|
||||
def __init__(self, server: Server) -> None:
|
||||
self.server = server
|
||||
self.klippy: Klippy = server.lookup_component("klippy_connection")
|
||||
self.websockets: Dict[int, WebSocket] = {}
|
||||
self.rpc = JsonRPC()
|
||||
self.closed_event: Optional[asyncio.Event] = None
|
||||
|
||||
self.rpc.register_method("server.websocket.id", self._handle_id_request)
|
||||
self.rpc.register_method(
|
||||
"server.connection.identify", self._handle_identify)
|
||||
|
||||
def register_notification(self,
|
||||
event_name: str,
|
||||
notify_name: Optional[str] = None
|
||||
) -> None:
|
||||
if notify_name is None:
|
||||
notify_name = event_name.split(':')[-1]
|
||||
|
||||
def notify_handler(*args):
|
||||
self.notify_websockets(notify_name, args)
|
||||
self.server.register_event_handler(
|
||||
event_name, notify_handler)
|
||||
|
||||
def register_api_handler(self, api_def: APIDefinition) -> None:
|
||||
if api_def.callback is None:
|
||||
# Remote API, uses RPC to reach out to Klippy
|
||||
ws_method = api_def.jrpc_methods[0]
|
||||
rpc_cb = self._generate_callback(api_def.endpoint)
|
||||
self.rpc.register_method(ws_method, rpc_cb)
|
||||
else:
|
||||
# Local API, uses local callback
|
||||
for ws_method, req_method in \
|
||||
zip(api_def.jrpc_methods, api_def.request_methods):
|
||||
rpc_cb = self._generate_local_callback(
|
||||
api_def.endpoint, req_method, api_def.callback)
|
||||
self.rpc.register_method(ws_method, rpc_cb)
|
||||
logging.info(
|
||||
"Registering Websocket JSON-RPC methods: "
|
||||
f"{', '.join(api_def.jrpc_methods)}")
|
||||
|
||||
def remove_api_handler(self, api_def: APIDefinition) -> None:
|
||||
for jrpc_method in api_def.jrpc_methods:
|
||||
self.rpc.remove_method(jrpc_method)
|
||||
|
||||
def _generate_callback(self, endpoint: str) -> RPCCallback:
|
||||
async def func(args: Dict[str, Any]) -> Any:
|
||||
ws: WebSocket = args.pop("_socket_")
|
||||
result = await self.klippy.request(
|
||||
WebRequest(endpoint, args, conn=ws, ip_addr=ws.ip_addr,
|
||||
user=ws.current_user))
|
||||
return result
|
||||
return func
|
||||
|
||||
def _generate_local_callback(self,
|
||||
endpoint: str,
|
||||
request_method: str,
|
||||
callback: Callable[[WebRequest], Coroutine]
|
||||
) -> RPCCallback:
|
||||
async def func(args: Dict[str, Any]) -> Any:
|
||||
ws: WebSocket = args.pop("_socket_")
|
||||
result = await callback(
|
||||
WebRequest(endpoint, args, request_method, ws,
|
||||
ip_addr=ws.ip_addr, user=ws.current_user))
|
||||
return result
|
||||
return func
|
||||
|
||||
async def _handle_id_request(self, args: Dict[str, Any]) -> Dict[str, int]:
|
||||
ws: WebSocket = args["_socket_"]
|
||||
return {'websocket_id': ws.uid}
|
||||
|
||||
async def _handle_identify(self, args: Dict[str, Any]) -> Dict[str, int]:
|
||||
ws: WebSocket = args["_socket_"]
|
||||
if ws.identified:
|
||||
raise self.server.error(
|
||||
f"Connection already identified: {ws.client_data}"
|
||||
)
|
||||
try:
|
||||
name = str(args["client_name"])
|
||||
version = str(args["version"])
|
||||
client_type: str = str(args["type"]).lower()
|
||||
url = str(args["url"])
|
||||
except KeyError as e:
|
||||
missing_key = str(e).split(":")[-1].strip()
|
||||
raise self.server.error(
|
||||
f"No data for argument: {missing_key}"
|
||||
) from None
|
||||
if client_type not in CLIENT_TYPES:
|
||||
raise self.server.error(f"Invalid Client Type: {client_type}")
|
||||
ws.client_data = {
|
||||
"name": name,
|
||||
"version": version,
|
||||
"type": client_type,
|
||||
"url": url
|
||||
}
|
||||
if client_type == "agent":
|
||||
extensions: ExtensionManager
|
||||
extensions = self.server.lookup_component("extensions")
|
||||
try:
|
||||
extensions.register_agent(ws)
|
||||
except ServerError:
|
||||
ws.client_data["type"] = ""
|
||||
raise
|
||||
logging.info(
|
||||
f"Websocket {ws.uid} Client Identified - "
|
||||
f"Name: {name}, Version: {version}, Type: {client_type}"
|
||||
)
|
||||
self.server.send_event("websockets:websocket_identified", ws)
|
||||
return {'connection_id': ws.uid}
|
||||
|
||||
def has_websocket(self, ws_id: int) -> bool:
|
||||
return ws_id in self.websockets
|
||||
|
||||
def get_websocket(self, ws_id: int) -> Optional[WebSocket]:
|
||||
return self.websockets.get(ws_id, None)
|
||||
|
||||
def get_websockets_by_type(self, client_type: str) -> List[WebSocket]:
|
||||
if not client_type:
|
||||
return []
|
||||
ret: List[WebSocket] = []
|
||||
for ws in self.websockets.values():
|
||||
if ws.client_data.get("type", "") == client_type.lower():
|
||||
ret.append(ws)
|
||||
return ret
|
||||
|
||||
def get_websockets_by_name(self, name: str) -> List[WebSocket]:
|
||||
if not name:
|
||||
return []
|
||||
ret: List[WebSocket] = []
|
||||
for ws in self.websockets.values():
|
||||
if ws.client_data.get("name", "").lower() == name.lower():
|
||||
ret.append(ws)
|
||||
return ret
|
||||
|
||||
def get_unidentified_websockets(self) -> List[WebSocket]:
|
||||
ret: List[WebSocket] = []
|
||||
for ws in self.websockets.values():
|
||||
if not ws.client_data:
|
||||
ret.append(ws)
|
||||
return ret
|
||||
|
||||
def add_websocket(self, ws: WebSocket) -> None:
|
||||
self.websockets[ws.uid] = ws
|
||||
self.server.send_event("websockets:websocked_added", ws)
|
||||
logging.debug(f"New Websocket Added: {ws.uid}")
|
||||
|
||||
def remove_websocket(self, ws: WebSocket) -> None:
|
||||
old_ws = self.websockets.pop(ws.uid, None)
|
||||
if old_ws is not None:
|
||||
self.klippy.remove_subscription(old_ws)
|
||||
self.server.send_event("websockets:websocket_removed", ws)
|
||||
logging.debug(f"Websocket Removed: {ws.uid}")
|
||||
if self.closed_event is not None and not self.websockets:
|
||||
self.closed_event.set()
|
||||
|
||||
def notify_websockets(self,
|
||||
name: str,
|
||||
data: Union[List, Tuple] = [],
|
||||
mask: List[int] = []
|
||||
) -> None:
|
||||
msg: Dict[str, Any] = {'jsonrpc': "2.0", 'method': "notify_" + name}
|
||||
if data:
|
||||
msg['params'] = data
|
||||
for ws in list(self.websockets.values()):
|
||||
if ws.uid in mask:
|
||||
continue
|
||||
ws.queue_message(msg)
|
||||
|
||||
def get_count(self) -> int:
|
||||
return len(self.websockets)
|
||||
|
||||
async def close(self) -> None:
|
||||
if not self.websockets:
|
||||
return
|
||||
self.closed_event = asyncio.Event()
|
||||
for ws in list(self.websockets.values()):
|
||||
ws.close(1001, "Server Shutdown")
|
||||
try:
|
||||
await asyncio.wait_for(self.closed_event.wait(), 2.)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
self.closed_event = None
|
||||
|
||||
class WebSocket(WebSocketHandler, Subscribable):
|
||||
def initialize(self) -> None:
|
||||
self.server: Server = self.settings['server']
|
||||
self.event_loop = self.server.get_event_loop()
|
||||
self.wsm: WebsocketManager = self.server.lookup_component("websockets")
|
||||
self.rpc = self.wsm.rpc
|
||||
self._uid = id(self)
|
||||
self.is_closed: bool = False
|
||||
self.ip_addr: str = self.request.remote_ip
|
||||
self.queue_busy: bool = False
|
||||
self.pending_responses: Dict[int, asyncio.Future] = {}
|
||||
self.message_buf: List[Union[str, Dict[str, Any]]] = []
|
||||
self.last_pong_time: float = self.event_loop.get_loop_time()
|
||||
self._connected_time: float = 0.
|
||||
self._identified: bool = False
|
||||
self._client_data: Dict[str, str] = {
|
||||
"name": "unknown",
|
||||
"version": "",
|
||||
"type": "",
|
||||
"url": ""
|
||||
}
|
||||
|
||||
@property
|
||||
def uid(self) -> int:
|
||||
return self._uid
|
||||
|
||||
@property
|
||||
def hostname(self) -> str:
|
||||
return self.request.host_name
|
||||
|
||||
@property
|
||||
def start_time(self) -> float:
|
||||
return self._connected_time
|
||||
|
||||
@property
|
||||
def identified(self) -> bool:
|
||||
return self._identified
|
||||
|
||||
@property
|
||||
def client_data(self) -> Dict[str, str]:
|
||||
return self._client_data
|
||||
|
||||
@client_data.setter
|
||||
def client_data(self, data: Dict[str, str]) -> None:
|
||||
self._client_data = data
|
||||
self._identified = True
|
||||
|
||||
def open(self, *args, **kwargs) -> None:
|
||||
self.set_nodelay(True)
|
||||
self._connected_time = self.event_loop.get_loop_time()
|
||||
agent = self.request.headers.get("User-Agent", "")
|
||||
is_proxy = False
|
||||
if (
|
||||
"X-Forwarded-For" in self.request.headers or
|
||||
"X-Real-Ip" in self.request.headers
|
||||
):
|
||||
is_proxy = True
|
||||
logging.info(f"Websocket Opened: ID: {self.uid}, "
|
||||
f"Proxied: {is_proxy}, "
|
||||
f"User Agent: {agent}, "
|
||||
f"Host Name: {self.hostname}")
|
||||
self.wsm.add_websocket(self)
|
||||
|
||||
def on_message(self, message: Union[bytes, str]) -> None:
|
||||
self.event_loop.register_callback(self._process_message, message)
|
||||
|
||||
def on_pong(self, data: bytes) -> None:
|
||||
self.last_pong_time = self.event_loop.get_loop_time()
|
||||
|
||||
async def _process_message(self, message: str) -> None:
|
||||
try:
|
||||
response = await self.rpc.dispatch(message, self)
|
||||
if response is not None:
|
||||
self.queue_message(response)
|
||||
except Exception:
|
||||
logging.exception("Websocket Command Error")
|
||||
|
||||
def queue_message(self, message: Union[str, Dict[str, Any]]):
|
||||
self.message_buf.append(message)
|
||||
if self.queue_busy:
|
||||
return
|
||||
self.queue_busy = True
|
||||
self.event_loop.register_callback(self._process_messages)
|
||||
|
||||
async def _process_messages(self):
|
||||
if self.is_closed:
|
||||
self.message_buf = []
|
||||
self.queue_busy = False
|
||||
return
|
||||
while self.message_buf:
|
||||
msg = self.message_buf.pop(0)
|
||||
try:
|
||||
await self.write_message(msg)
|
||||
except WebSocketClosedError:
|
||||
self.is_closed = True
|
||||
logging.info(
|
||||
f"Websocket closed while writing: {self.uid}")
|
||||
break
|
||||
except Exception:
|
||||
logging.exception(
|
||||
f"Error sending data over websocket: {self.uid}")
|
||||
self.queue_busy = False
|
||||
|
||||
def send_status(self,
|
||||
status: Dict[str, Any],
|
||||
eventtime: float
|
||||
) -> None:
|
||||
if not status:
|
||||
return
|
||||
self.queue_message({
|
||||
'jsonrpc': "2.0",
|
||||
'method': "notify_status_update",
|
||||
'params': [status, eventtime]})
|
||||
|
||||
def call_method(
|
||||
self,
|
||||
method: str,
|
||||
params: Optional[Union[List, Dict[str, Any]]] = None
|
||||
) -> Awaitable:
|
||||
fut = self.event_loop.create_future()
|
||||
msg = {
|
||||
'jsonrpc': "2.0",
|
||||
'method': method,
|
||||
'id': id(fut)
|
||||
}
|
||||
if params is not None:
|
||||
msg["params"] = params
|
||||
self.pending_responses[id(fut)] = fut
|
||||
self.queue_message(msg)
|
||||
return fut
|
||||
|
||||
def send_notification(self, name: str, data: List) -> None:
|
||||
self.wsm.notify_websockets(name, data, [self._uid])
|
||||
|
||||
def resolve_pending_response(
|
||||
self, response_id: int, result: Any
|
||||
) -> bool:
|
||||
fut = self.pending_responses.pop(response_id, None)
|
||||
if fut is None:
|
||||
return False
|
||||
if isinstance(result, ServerError):
|
||||
fut.set_exception(result)
|
||||
else:
|
||||
fut.set_result(result)
|
||||
return True
|
||||
|
||||
def on_close(self) -> None:
|
||||
self.is_closed = True
|
||||
self.message_buf = []
|
||||
now = self.event_loop.get_loop_time()
|
||||
pong_elapsed = now - self.last_pong_time
|
||||
for resp in self.pending_responses.values():
|
||||
resp.set_exception(ServerError("Client Socket Disconnected", 500))
|
||||
self.pending_responses = {}
|
||||
logging.info(f"Websocket Closed: ID: {self.uid} "
|
||||
f"Close Code: {self.close_code}, "
|
||||
f"Close Reason: {self.close_reason}, "
|
||||
f"Pong Time Elapsed: {pong_elapsed:.2f}")
|
||||
if self._client_data["type"] == "agent":
|
||||
extensions: ExtensionManager
|
||||
extensions = self.server.lookup_component("extensions")
|
||||
extensions.remove_agent(self)
|
||||
self.wsm.remove_websocket(self)
|
||||
|
||||
def check_origin(self, origin: str) -> bool:
|
||||
if not super(WebSocket, self).check_origin(origin):
|
||||
auth: AuthComp = self.server.lookup_component('authorization', None)
|
||||
if auth is not None:
|
||||
return auth.check_cors(origin)
|
||||
return False
|
||||
return True
|
||||
|
||||
# Check Authorized User
|
||||
def prepare(self):
|
||||
auth: AuthComp = self.server.lookup_component('authorization', None)
|
||||
if auth is not None:
|
||||
self.current_user = auth.check_authorized(self.request)
|
||||
13
pytest.ini
Normal file
13
pytest.ini
Normal file
@@ -0,0 +1,13 @@
|
||||
[pytest]
|
||||
minversion = 7.0
|
||||
pythonpath = moonraker scripts
|
||||
testpaths = tests
|
||||
required_plugins =
|
||||
pytest-asyncio>=0.17.2
|
||||
pytest-timeout>=2.1.0
|
||||
asyncio_mode = strict
|
||||
timeout = 60
|
||||
timeout_method = signal
|
||||
markers =
|
||||
run_paths
|
||||
no_ws_connect
|
||||
64
scripts/build-zip-release.sh
Normal file
64
scripts/build-zip-release.sh
Normal file
@@ -0,0 +1,64 @@
|
||||
#!/bin/bash
|
||||
# This script builds a zipped source release for Moonraker and Klipper.
|
||||
|
||||
install_packages()
|
||||
{
|
||||
PKGLIST="python3-dev curl"
|
||||
|
||||
# Update system package info
|
||||
report_status "Running apt-get update..."
|
||||
sudo apt-get update
|
||||
|
||||
# Install desired packages
|
||||
report_status "Installing packages..."
|
||||
sudo apt-get install --yes $PKGLIST
|
||||
}
|
||||
|
||||
report_status()
|
||||
{
|
||||
echo -e "\n\n###### $1"
|
||||
}
|
||||
|
||||
verify_ready()
|
||||
{
|
||||
if [ "$EUID" -eq 0 ]; then
|
||||
echo "This script must not run as root"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
if [ ! -d "$SRCDIR/.git" ]; then
|
||||
echo "This script must be run from a git repo"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
if [ ! -d "$KLIPPER_DIR/.git" ]; then
|
||||
echo "This script must be run from a git repo"
|
||||
exit -1
|
||||
fi
|
||||
}
|
||||
|
||||
# Force script to exit if an error occurs
|
||||
set -e
|
||||
|
||||
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
|
||||
OUTPUT_DIR="$SRCDIR/.dist"
|
||||
KLIPPER_DIR="$HOME/klipper"
|
||||
BETA=""
|
||||
|
||||
# Parse command line arguments
|
||||
while getopts "o:k:b" arg; do
|
||||
case $arg in
|
||||
o) OUTPUT_DIR=$OPTARG;;
|
||||
k) KLIPPER_DIR=$OPTARG;;
|
||||
b) BETA="-b";;
|
||||
esac
|
||||
done
|
||||
|
||||
[ ! -d $OUTPUT_DIR ] && mkdir $OUTPUT_DIR
|
||||
verify_ready
|
||||
if [ "$BETA" = "" ]; then
|
||||
releaseTag=$( git -C $KLIPPER_DIR describe --tags `git -C $KLIPPER_DIR rev-list --tags --max-count=1` )
|
||||
echo "Checking out Klipper release $releaseTag"
|
||||
git -C $KLIPPER_DIR checkout $releaseTag
|
||||
fi
|
||||
python3 "$SRCDIR/scripts/build_release.py" -k $KLIPPER_DIR -o $OUTPUT_DIR $BETA
|
||||
338
scripts/build_release.py
Normal file
338
scripts/build_release.py
Normal file
@@ -0,0 +1,338 @@
|
||||
#!/usr/bin/python3
|
||||
# Builds zip release files for Moonraker and Klipper
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import shutil
|
||||
import tempfile
|
||||
import json
|
||||
import pathlib
|
||||
import time
|
||||
import traceback
|
||||
import subprocess
|
||||
import re
|
||||
from typing import Dict, Any, List, Set, Optional
|
||||
|
||||
MOONRAKER_PATH = os.path.abspath(
|
||||
os.path.join(os.path.dirname(__file__), ".."))
|
||||
sys.path.append(os.path.join(MOONRAKER_PATH, "moonraker"))
|
||||
from utils import hash_directory, retrieve_git_version # noqa:E402
|
||||
|
||||
# Dirs and exts to ignore when calculating the repo hash
|
||||
IGNORE_DIRS = ["out", "lib", "test", "docs", "__pycache__"]
|
||||
IGNORE_EXTS = [".o", ".so", ".pyc", ".pyo", ".pyd", ".yml", ".yaml"]
|
||||
|
||||
# Files not to include in the source package
|
||||
SKIP_FILES = [".gitignore", ".gitattributes", ".readthedocs.yaml",
|
||||
"mkdocs.yml", "__pycache__"]
|
||||
|
||||
RELEASE_URL = "https://api.github.com/repos/Arksine/moonraker/releases"
|
||||
GIT_MAX_LOG_CNT = 100
|
||||
GIT_LOG_FMT = \
|
||||
"sha:%H%x1Dauthor:%an%x1Ddate:%ct%x1Dsubject:%s%x1Dmessage:%b%x1E"
|
||||
OWNER_REPOS = {
|
||||
'moonraker': "arksine/moonraker",
|
||||
'klippy': "klipper3d/klipper"
|
||||
}
|
||||
INSTALL_SCRIPTS = {
|
||||
'klippy': {
|
||||
'debian': "install-octopi.sh",
|
||||
'arch': "install-arch.sh",
|
||||
'centos': "install-centos.sh"
|
||||
},
|
||||
'moonraker': {
|
||||
'debian': "install-moonraker.sh"
|
||||
}
|
||||
}
|
||||
|
||||
class CopyIgnore:
|
||||
def __init__(self, root_dir: str) -> None:
|
||||
self.root_dir = root_dir
|
||||
|
||||
def __call__(self, dir_path: str, dir_items: List[str]) -> List[str]:
|
||||
ignored: List[str] = []
|
||||
for item in dir_items:
|
||||
if item in SKIP_FILES:
|
||||
ignored.append(item)
|
||||
elif dir_path == self.root_dir:
|
||||
full_path = os.path.join(dir_path, item)
|
||||
# Ignore all hidden directories in the root
|
||||
if os.path.isdir(full_path) and item[0] == ".":
|
||||
ignored.append(item)
|
||||
return ignored
|
||||
|
||||
def search_install_script(data: str,
|
||||
regex: str,
|
||||
exclude: str
|
||||
) -> List[str]:
|
||||
items: Set[str] = set()
|
||||
lines: List[str] = re.findall(regex, data)
|
||||
for line in lines:
|
||||
items.update(line.strip().split())
|
||||
try:
|
||||
items.remove(exclude)
|
||||
except KeyError:
|
||||
pass
|
||||
return list(items)
|
||||
|
||||
def generate_dependency_info(repo_path: str, app_name: str) -> None:
|
||||
inst_scripts = INSTALL_SCRIPTS[app_name]
|
||||
package_info: Dict[str, Any] = {}
|
||||
for distro, script_name in inst_scripts.items():
|
||||
script_path = os.path.join(repo_path, "scripts", script_name)
|
||||
script = pathlib.Path(script_path)
|
||||
if not script.exists():
|
||||
continue
|
||||
data = script.read_text()
|
||||
packages: List[str] = search_install_script(
|
||||
data, r'PKGLIST="(.*)"', "${PKGLIST}")
|
||||
package_info[distro] = {'packages': sorted(packages)}
|
||||
if distro == "arch":
|
||||
aur_packages: List[str] = search_install_script(
|
||||
data, r'AURLIST="(.*)"', "${AURLIST}")
|
||||
package_info[distro]['aur_packages'] = sorted(aur_packages)
|
||||
req_file_name = os.path.join(repo_path, "scripts",
|
||||
f"{app_name}-requirements.txt")
|
||||
req_file = pathlib.Path(req_file_name)
|
||||
python_reqs: List[str] = []
|
||||
if req_file.exists():
|
||||
req_data = req_file.read_text()
|
||||
lines = [line.strip() for line in req_data.split('\n')
|
||||
if line.strip()]
|
||||
for line in lines:
|
||||
comment_idx = line.find('#')
|
||||
if comment_idx == 0:
|
||||
continue
|
||||
if comment_idx > 0:
|
||||
line = line[:comment_idx].strip()
|
||||
python_reqs.append(line)
|
||||
package_info['python'] = sorted(python_reqs)
|
||||
dep_file = pathlib.Path(os.path.join(repo_path, ".dependencies"))
|
||||
dep_file.write_text(json.dumps(package_info))
|
||||
|
||||
def clean_repo(path: str) -> None:
|
||||
# Obtain version info from "git" program
|
||||
prog = ('git', '-C', path, 'clean', '-x', '-f', '-d')
|
||||
process = subprocess.Popen(prog, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE, cwd=path)
|
||||
retcode = process.wait()
|
||||
if retcode != 0:
|
||||
print(f"Error running git clean: {path}")
|
||||
|
||||
def get_releases() -> List[Dict[str, Any]]:
|
||||
print("Fetching Release List...")
|
||||
prog = ('curl', '-H', "Accept: application/vnd.github.v3+json",
|
||||
RELEASE_URL)
|
||||
process = subprocess.Popen(prog, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
response, err = process.communicate()
|
||||
retcode = process.wait()
|
||||
if retcode != 0:
|
||||
print(f"Release list request returned with code {retcode},"
|
||||
f" response:\n{err.decode()}")
|
||||
return []
|
||||
releases = json.loads(response.decode().strip())
|
||||
print(f"Found {len(releases)} releases")
|
||||
return releases
|
||||
|
||||
def get_last_release_info(moonraker_version: str,
|
||||
is_beta: bool,
|
||||
releases: List[Dict[str, Any]]
|
||||
) -> Dict[str, Any]:
|
||||
print("Searching for previous release assets...")
|
||||
cur_tag, commit_count = moonraker_version.split('-', 2)[:2]
|
||||
release_assets = []
|
||||
matched_tag: Optional[str] = None
|
||||
for release in releases:
|
||||
if int(commit_count) != 0:
|
||||
# This is build is not being done against a fresh release,
|
||||
# return release info from a matching tag
|
||||
if release['tag_name'] == cur_tag:
|
||||
release_assets = release['assets']
|
||||
matched_tag = cur_tag
|
||||
break
|
||||
else:
|
||||
# Get the most recent non-matching tag
|
||||
if release['tag_name'] == cur_tag:
|
||||
continue
|
||||
if is_beta or not release['prerelease']:
|
||||
# Get the last tagged release. If we are building a beta,
|
||||
# that is the most recent release. Otherwise we should
|
||||
# omit pre-releases
|
||||
release_assets = release['assets']
|
||||
matched_tag = release['tag_name']
|
||||
break
|
||||
if matched_tag is None:
|
||||
print("No matching release found")
|
||||
matched_tag = "No Tag"
|
||||
else:
|
||||
print(f"Found release: {matched_tag}")
|
||||
|
||||
asset_url: Optional[str] = None
|
||||
content_type: str = ""
|
||||
for asset in release_assets:
|
||||
if asset['name'] == "RELEASE_INFO":
|
||||
asset_url = asset['browser_download_url']
|
||||
content_type = asset['content_type']
|
||||
break
|
||||
if asset_url is None:
|
||||
print(f"RELEASE_INFO asset not found in release: {matched_tag}")
|
||||
return {}
|
||||
# This build is prior to a tagged release, so fetch the current tag
|
||||
print(f"Release Info Download URL: {asset_url}")
|
||||
prog = ('curl', '-L', '-H', f"Accept: {content_type}", asset_url)
|
||||
process = subprocess.Popen(prog, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
response, err = process.communicate()
|
||||
retcode = process.wait()
|
||||
if retcode != 0:
|
||||
print("Request for release info failed")
|
||||
return {}
|
||||
resp = response.decode().strip()
|
||||
print(f"Found Info for release {matched_tag}")
|
||||
return json.loads(resp)
|
||||
|
||||
def get_commit_log(path: str,
|
||||
release_info: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
print(f"Preparing commit log for {path.split('/')[-1]}")
|
||||
start_sha = release_info.get('commit_hash', None)
|
||||
prog = ['git', '-C', path, 'log', f'--format={GIT_LOG_FMT}',
|
||||
f'--max-count={GIT_MAX_LOG_CNT}']
|
||||
if start_sha is not None:
|
||||
prog = ['git', '-C', path, 'log', f'{start_sha}..HEAD',
|
||||
f'--format={GIT_LOG_FMT}', f'--max-count={GIT_MAX_LOG_CNT}']
|
||||
process = subprocess.Popen(prog, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE, cwd=path)
|
||||
response, err = process.communicate()
|
||||
retcode = process.wait()
|
||||
if retcode != 0:
|
||||
return []
|
||||
resp = response.decode().strip()
|
||||
commit_log: List[Dict[str, Any]] = []
|
||||
for log_entry in resp.split('\x1E'):
|
||||
log_entry = log_entry.strip()
|
||||
if not log_entry:
|
||||
continue
|
||||
log_items = [li.strip() for li in log_entry.split('\x1D')
|
||||
if li.strip()]
|
||||
cbh = [li.split(':', 1) for li in log_items]
|
||||
commit_log.append(dict(cbh)) # type: ignore
|
||||
print(f"Found {len(commit_log)} commits")
|
||||
return commit_log
|
||||
|
||||
def get_commit_hash(path: str) -> str:
|
||||
prog = ('git', '-C', path, 'rev-parse', 'HEAD')
|
||||
process = subprocess.Popen(prog, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE, cwd=path)
|
||||
commit_hash, err = process.communicate()
|
||||
retcode = process.wait()
|
||||
if retcode == 0:
|
||||
return commit_hash.strip().decode()
|
||||
raise Exception(f"Failed to get commit hash: {commit_hash.decode()}")
|
||||
|
||||
def generate_version_info(path: str,
|
||||
source_dir: str,
|
||||
channel: str,
|
||||
release_tag: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
print(f"Generating version info: {source_dir}")
|
||||
clean_repo(path)
|
||||
owner_repo = OWNER_REPOS[source_dir]
|
||||
curtime = int(time.time())
|
||||
date_str = time.strftime("%Y%m%d", time.gmtime(curtime))
|
||||
version = retrieve_git_version(path)
|
||||
if release_tag is None:
|
||||
release_tag = version.split('-')[0]
|
||||
source_hash = hash_directory(path, IGNORE_EXTS, IGNORE_DIRS)
|
||||
long_version = f"{version}-moonraker-{date_str}"
|
||||
release_info = {
|
||||
'git_version': version,
|
||||
'long_version': long_version,
|
||||
'commit_hash': get_commit_hash(path),
|
||||
'source_checksum': source_hash,
|
||||
'ignored_exts': IGNORE_EXTS,
|
||||
'ignored_dirs': IGNORE_DIRS,
|
||||
'build_date': curtime,
|
||||
'channel': channel,
|
||||
'owner_repo': owner_repo,
|
||||
'host_repo': OWNER_REPOS['moonraker'],
|
||||
'release_tag': release_tag
|
||||
}
|
||||
vfile = pathlib.Path(os.path.join(path, source_dir, ".version"))
|
||||
vfile.write_text(long_version)
|
||||
rfile = pathlib.Path(os.path.join(path, ".release_info"))
|
||||
rfile.write_text(json.dumps(release_info))
|
||||
generate_dependency_info(path, source_dir)
|
||||
return release_info
|
||||
|
||||
def create_zip(repo_path: str,
|
||||
repo_name: str,
|
||||
output_path: str
|
||||
) -> None:
|
||||
print(f"Creating Zip Release: {repo_name}")
|
||||
zip_path = os.path.join(output_path, repo_name)
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
dest_path = os.path.join(tmp_dir, repo_name)
|
||||
ingore_cb = CopyIgnore(repo_path)
|
||||
shutil.copytree(repo_path, dest_path, ignore=ingore_cb)
|
||||
shutil.make_archive(zip_path, "zip", root_dir=dest_path)
|
||||
|
||||
def main() -> None:
|
||||
# Parse start arguments
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generates zip releases for Moonraker and Klipper")
|
||||
parser.add_argument(
|
||||
"-k", "--klipper", default="~/klipper",
|
||||
metavar='<klipper_path>',
|
||||
help="Path to Klipper git repo")
|
||||
parser.add_argument(
|
||||
"-o", "--output", default=os.path.join(MOONRAKER_PATH, ".dist"),
|
||||
metavar='<output_path>', help="Path to output directory")
|
||||
parser.add_argument(
|
||||
"-b", "--beta", action='store_true',
|
||||
help="Tag release as beta")
|
||||
args = parser.parse_args()
|
||||
kpath: str = os.path.abspath(os.path.expanduser(args.klipper))
|
||||
opath: str = os.path.abspath(os.path.expanduser(args.output))
|
||||
is_beta: bool = args.beta
|
||||
channel = "beta" if is_beta else "stable"
|
||||
if not os.path.exists(kpath):
|
||||
print(f"Invalid path to Klipper: {kpath}")
|
||||
sys.exit(-1)
|
||||
if not os.path.exists(opath):
|
||||
print(f"Invalid output path: {opath}")
|
||||
sys.exit(-1)
|
||||
releases = get_releases()
|
||||
all_info: Dict[str, Dict[str, Any]] = {}
|
||||
try:
|
||||
print("Generating Moonraker Zip Distribution...")
|
||||
all_info['moonraker'] = generate_version_info(
|
||||
MOONRAKER_PATH, "moonraker", channel)
|
||||
create_zip(MOONRAKER_PATH, 'moonraker', opath)
|
||||
rtag: str = all_info['moonraker']['release_tag']
|
||||
print("Generating Klipper Zip Distribution...")
|
||||
all_info['klipper'] = generate_version_info(
|
||||
kpath, "klippy", channel, rtag)
|
||||
create_zip(kpath, 'klipper', opath)
|
||||
info_file = pathlib.Path(os.path.join(opath, "RELEASE_INFO"))
|
||||
info_file.write_text(json.dumps(all_info))
|
||||
last_rinfo = get_last_release_info(
|
||||
all_info['moonraker']['git_version'], is_beta, releases)
|
||||
commit_log = {}
|
||||
commit_log['moonraker'] = get_commit_log(
|
||||
MOONRAKER_PATH, last_rinfo.get('moonraker', {}))
|
||||
commit_log['klipper'] = get_commit_log(
|
||||
kpath, last_rinfo.get('klipper', {}))
|
||||
clog_file = pathlib.Path(os.path.join(opath, "COMMIT_LOG"))
|
||||
clog_file.write_text(json.dumps(commit_log))
|
||||
except Exception:
|
||||
print("Error Creating Zip Distribution")
|
||||
traceback.print_exc(file=sys.stdout)
|
||||
sys.exit(-1)
|
||||
print(f"Build Complete. Files are located at '{opath}'")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
233
scripts/dbtool.py
Normal file
233
scripts/dbtool.py
Normal file
@@ -0,0 +1,233 @@
|
||||
#! /usr/bin/python3
|
||||
# Tool to backup and restore Moonraker's LMDB database
|
||||
#
|
||||
# Copyright (C) 2022 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
import argparse
|
||||
import pathlib
|
||||
import base64
|
||||
import tempfile
|
||||
import re
|
||||
from typing import Any, Dict, Optional, TextIO, Tuple
|
||||
import lmdb
|
||||
|
||||
MAX_NAMESPACES = 100
|
||||
MAX_DB_SIZE = 200 * 2**20
|
||||
HEADER_KEY = b"MOONRAKER_DATABASE_START"
|
||||
|
||||
LINE_MATCH = re.compile(r"\+(\d+),(\d+):(.+?)->(.+)")
|
||||
|
||||
class DBToolError(Exception):
|
||||
pass
|
||||
|
||||
# Use a modified CDBMake Format
|
||||
# +keylen,datalen:namespace|key->data
|
||||
# Key length includes the namespace, key and separator (a colon)
|
||||
|
||||
def open_db(db_path: str) -> lmdb.Environment:
|
||||
return lmdb.open(db_path, map_size=MAX_DB_SIZE,
|
||||
max_dbs=MAX_NAMESPACES)
|
||||
|
||||
def _do_dump(namespace: bytes,
|
||||
db: object,
|
||||
backup: TextIO,
|
||||
txn: lmdb.Transaction
|
||||
) -> None:
|
||||
expected_key_count: int = txn.stat(db)["entries"]
|
||||
# write the namespace header
|
||||
ns_key = base64.b64encode(b"namespace_" + namespace).decode()
|
||||
ns_str = f"entries={expected_key_count}"
|
||||
ns_val = base64.b64encode(ns_str.encode()).decode()
|
||||
out = f"+{len(ns_key)},{len(ns_val)}:{ns_key}->{ns_val}\n"
|
||||
backup.write(out)
|
||||
with txn.cursor(db=db) as cursor:
|
||||
count = 0
|
||||
remaining = cursor.first()
|
||||
while remaining:
|
||||
key, value = cursor.item()
|
||||
keystr = base64.b64encode(key).decode()
|
||||
valstr = base64.b64encode(value).decode()
|
||||
out = f"+{len(keystr)},{len(valstr)}:{keystr}->{valstr}\n"
|
||||
backup.write(out)
|
||||
count += 1
|
||||
remaining = cursor.next()
|
||||
if expected_key_count != count:
|
||||
print("Warning: Key count mismatch for namespace "
|
||||
f"'{namespace.decode()}': expected {expected_key_count}"
|
||||
f", wrote {count}")
|
||||
|
||||
def _write_header(ns_count: int, backup: TextIO):
|
||||
val_str = f"namespace_count={ns_count}"
|
||||
hkey = base64.b64encode(HEADER_KEY).decode()
|
||||
hval = base64.b64encode(val_str.encode()).decode()
|
||||
out = f"+{len(hkey)},{len(hval)}:{hkey}->{hval}\n"
|
||||
backup.write(out)
|
||||
|
||||
def backup(args: Dict[str, Any]):
|
||||
source_db = pathlib.Path(args["source"]).expanduser().resolve()
|
||||
if not source_db.is_dir():
|
||||
print(f"Source path not a folder: '{source_db}'")
|
||||
exit(1)
|
||||
if not source_db.joinpath("data.mdb").exists():
|
||||
print(f"No database file found in source path: '{source_db}'")
|
||||
exit(1)
|
||||
bkp_dest = pathlib.Path(args["output"]).expanduser().resolve()
|
||||
print(f"Backing up database at '{source_db}' to '{bkp_dest}'...")
|
||||
if bkp_dest.exists():
|
||||
print(f"Warning: file at '{bkp_dest}' exists, will be overwritten")
|
||||
env = open_db(str(source_db))
|
||||
expected_ns_cnt: int = env.stat()["entries"]
|
||||
with bkp_dest.open("wt") as f:
|
||||
_write_header(expected_ns_cnt, f)
|
||||
with env.begin(buffers=True) as txn:
|
||||
count = 0
|
||||
with txn.cursor() as cursor:
|
||||
remaining = cursor.first()
|
||||
while remaining:
|
||||
namespace = bytes(cursor.key())
|
||||
db = env.open_db(namespace, txn=txn, create=False)
|
||||
_do_dump(namespace, db, f, txn)
|
||||
count += 1
|
||||
remaining = cursor.next()
|
||||
env.close()
|
||||
if expected_ns_cnt != count:
|
||||
print("Warning: namespace count mismatch: "
|
||||
f"expected: {expected_ns_cnt}, wrote: {count}")
|
||||
print("Backup complete!")
|
||||
|
||||
def _process_header(key: bytes, value: bytes) -> int:
|
||||
if key != HEADER_KEY:
|
||||
raise DBToolError(
|
||||
"Database Backup does not contain a valid header key, "
|
||||
f" got {key.decode()}")
|
||||
val_parts = value.split(b"=", 1)
|
||||
if val_parts[0] != b"namespace_count":
|
||||
raise DBToolError(
|
||||
"Database Backup has an invalid header value, got "
|
||||
f"{value.decode()}")
|
||||
return int(val_parts[1])
|
||||
|
||||
def _process_namespace(key: bytes, value: bytes) -> Tuple[bytes, int]:
|
||||
key_parts = key.split(b"_", 1)
|
||||
if key_parts[0] != b"namespace":
|
||||
raise DBToolError(
|
||||
f"Invalid Namespace Key '{key.decode()}', ID not prefixed")
|
||||
namespace = key_parts[1]
|
||||
val_parts = value.split(b"=", 1)
|
||||
if val_parts[0] != b"entries":
|
||||
raise DBToolError(
|
||||
f"Invalid Namespace value '{value.decode()}', entry "
|
||||
"count not present")
|
||||
entries = int(val_parts[1])
|
||||
return namespace, entries
|
||||
|
||||
def _process_line(line: str) -> Tuple[bytes, bytes]:
|
||||
match = LINE_MATCH.match(line)
|
||||
if match is None:
|
||||
# TODO: use own exception
|
||||
raise DBToolError(
|
||||
f"Invalid DB Entry match: {line}")
|
||||
parts = match.groups()
|
||||
if len(parts) != 4:
|
||||
raise DBToolError(
|
||||
f"Invalid DB Entry, does not contain all data: {line}")
|
||||
key_len, val_len, key, val = parts
|
||||
if len(key) != int(key_len):
|
||||
raise DBToolError(
|
||||
f"Invalid DB Entry, key length mismatch. "
|
||||
f"Got {len(key)}, expected {key_len}, line: {line}")
|
||||
if len(val) != int(val_len):
|
||||
raise DBToolError(
|
||||
f"Invalid DB Entry, value length mismatch. "
|
||||
f"Got {len(val)}, expected {val_len}, line: {line}")
|
||||
decoded_key = base64.b64decode(key.encode())
|
||||
decoded_val = base64.b64decode(val.encode())
|
||||
return decoded_key, decoded_val
|
||||
|
||||
def restore(args: Dict[str, Any]):
|
||||
dest_path = pathlib.Path(args["destination"]).expanduser().resolve()
|
||||
input_db = pathlib.Path(args["input"]).expanduser().resolve()
|
||||
if not input_db.is_file():
|
||||
print(f"No backup found at path: {input_db}")
|
||||
exit(1)
|
||||
if not dest_path.exists():
|
||||
print(f"Destination path '{dest_path}' does not exist, directory"
|
||||
"will be created")
|
||||
print(f"Restoring backup from '{input_db}' to '{dest_path}'...")
|
||||
bkp_dir: Optional[pathlib.Path] = None
|
||||
if dest_path.joinpath("data.mdb").exists():
|
||||
tmp_dir = pathlib.Path(tempfile.gettempdir())
|
||||
bkp_dir = tmp_dir.joinpath("moonrakerdb_backup")
|
||||
if not bkp_dir.is_dir():
|
||||
bkp_dir.mkdir()
|
||||
print(f"Warning: database file at found in '{dest_path}', "
|
||||
"all data will be overwritten. Copying existing DB "
|
||||
f"to '{bkp_dir}'")
|
||||
env = open_db(str(dest_path))
|
||||
if bkp_dir is not None:
|
||||
env.copy(str(bkp_dir))
|
||||
expected_ns_count = -1
|
||||
namespace_count = 0
|
||||
keys_left = 0
|
||||
namespace = b""
|
||||
current_db = object()
|
||||
with env.begin(write=True) as txn:
|
||||
# clear all existing entries
|
||||
dbs = []
|
||||
with txn.cursor() as cursor:
|
||||
remaining = cursor.first()
|
||||
while remaining:
|
||||
ns = cursor.key()
|
||||
dbs.append(env.open_db(ns, txn=txn, create=False))
|
||||
remaining = cursor.next()
|
||||
for db in dbs:
|
||||
txn.drop(db)
|
||||
with input_db.open("rt") as f:
|
||||
while True:
|
||||
line = f.readline()
|
||||
if not line:
|
||||
break
|
||||
key, val = _process_line(line)
|
||||
if expected_ns_count < 0:
|
||||
expected_ns_count = _process_header(key, val)
|
||||
continue
|
||||
if not keys_left:
|
||||
namespace, keys_left = _process_namespace(key, val)
|
||||
current_db = env.open_db(namespace, txn=txn)
|
||||
namespace_count += 1
|
||||
continue
|
||||
txn.put(key, val, db=current_db)
|
||||
keys_left -= 1
|
||||
if expected_ns_count != namespace_count:
|
||||
print("Warning: Namespace count mismatch, expected: "
|
||||
f"{expected_ns_count}, processed {namespace_count}")
|
||||
print("Restore Complete")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Parse start arguments
|
||||
parser = argparse.ArgumentParser(
|
||||
description="dbtool - tool for backup/restore of Moonraker's database")
|
||||
subparsers = parser.add_subparsers(
|
||||
title="commands", description="valid commands", required=True,
|
||||
metavar="<command>")
|
||||
bkp_parser = subparsers.add_parser("backup", help="backup a database")
|
||||
rst_parser = subparsers.add_parser("restore", help="restore a databse")
|
||||
bkp_parser.add_argument(
|
||||
"source", metavar="<source path>",
|
||||
help="location of the folder containing the database to backup")
|
||||
bkp_parser.add_argument(
|
||||
"output", metavar="<output file>",
|
||||
help="location of the backup file to write to",
|
||||
default="~/moonraker_db.bkp")
|
||||
bkp_parser.set_defaults(func=backup)
|
||||
rst_parser.add_argument(
|
||||
"destination", metavar="<destination>",
|
||||
help="location of the folder where the database will be restored")
|
||||
rst_parser.add_argument(
|
||||
"input", metavar="<input file>",
|
||||
help="location of the backup file to restore from")
|
||||
rst_parser.set_defaults(func=restore)
|
||||
args = parser.parse_args()
|
||||
args.func(vars(args))
|
||||
42
scripts/fetch-apikey.sh
Normal file
42
scripts/fetch-apikey.sh
Normal file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
# Helper Script for fetching the API Key from a moonraker database
|
||||
DATABASE_PATH="${HOME}/.moonraker_database"
|
||||
MOONRAKER_ENV="${HOME}/moonraker-env"
|
||||
DB_ARGS="--read=READ --db=authorized_users get _API_KEY_USER_"
|
||||
API_REGEX='(?<="api_key": ")([^"]+)'
|
||||
|
||||
print_help()
|
||||
{
|
||||
echo "Moonraker API Key Extraction Utility"
|
||||
echo
|
||||
echo "usage: fetch-apikey.sh [-h] [-e <python env path>] [-d <database path>]"
|
||||
echo
|
||||
echo "optional arguments:"
|
||||
echo " -h show this message"
|
||||
echo " -e <env path> path to Moonraker env folder"
|
||||
echo " -d <database path> path to Moonraker LMDB database folder"
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
while getopts "he:d:" arg; do
|
||||
case $arg in
|
||||
h) print_help;;
|
||||
e) MOONRAKER_ENV=$OPTARG;;
|
||||
d) DATABASE_PATH=$OPTARG;;
|
||||
esac
|
||||
done
|
||||
|
||||
PYTHON_BIN="${MOONRAKER_ENV}/bin/python"
|
||||
|
||||
if [ ! -f $PYTHON_BIN ]; then
|
||||
echo "No Python binary found at '${PYTHON_BIN}'"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
if [ ! -d $DATABASE_PATH ]; then
|
||||
echo "No Moonraker database found at '${DATABASE_PATH}'"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
${PYTHON_BIN} -mlmdb --env=${DATABASE_PATH} ${DB_ARGS} | grep -Po "${API_REGEX}"
|
||||
174
scripts/install-moonraker.sh
Normal file
174
scripts/install-moonraker.sh
Normal file
@@ -0,0 +1,174 @@
|
||||
#!/bin/bash
|
||||
# This script installs Moonraker on a Raspberry Pi machine running
|
||||
# Raspbian/Raspberry Pi OS based distributions.
|
||||
|
||||
PYTHONDIR="${MOONRAKER_VENV:-${HOME}/moonraker-env}"
|
||||
SYSTEMDDIR="/etc/systemd/system"
|
||||
REBUILD_ENV="${MOONRAKER_REBUILD_ENV:-n}"
|
||||
FORCE_DEFAULTS="${MOONRAKER_FORCE_DEFAULTS:-n}"
|
||||
DISABLE_SYSTEMCTL="${MOONRAKER_DISABLE_SYSTEMCTL:-n}"
|
||||
CONFIG_PATH="${MOONRAKER_CONFIG_PATH:-${HOME}/moonraker.conf}"
|
||||
LOG_PATH="${MOONRAKER_LOG_PATH:-/tmp/moonraker.log}"
|
||||
|
||||
# Step 2: Clean up legacy installation
|
||||
cleanup_legacy() {
|
||||
if [ -f "/etc/init.d/moonraker" ]; then
|
||||
# Stop Moonraker Service
|
||||
echo "#### Cleanup legacy install script"
|
||||
sudo systemctl stop moonraker
|
||||
sudo update-rc.d -f moonraker remove
|
||||
sudo rm -f /etc/init.d/moonraker
|
||||
sudo rm -f /etc/default/moonraker
|
||||
fi
|
||||
}
|
||||
|
||||
# Step 3: Install packages
|
||||
install_packages()
|
||||
{
|
||||
PKGLIST="python3-virtualenv python3-dev libopenjp2-7 python3-libgpiod"
|
||||
PKGLIST="${PKGLIST} curl libcurl4-openssl-dev libssl-dev liblmdb-dev"
|
||||
PKGLIST="${PKGLIST} libsodium-dev zlib1g-dev libjpeg-dev packagekit"
|
||||
|
||||
# Update system package info
|
||||
report_status "Running apt-get update..."
|
||||
sudo apt-get update --allow-releaseinfo-change
|
||||
|
||||
# Install desired packages
|
||||
report_status "Installing packages..."
|
||||
sudo apt-get install --yes ${PKGLIST}
|
||||
}
|
||||
|
||||
# Step 4: Create python virtual environment
|
||||
create_virtualenv()
|
||||
{
|
||||
report_status "Installing python virtual environment..."
|
||||
|
||||
# If venv exists and user prompts a rebuild, then do so
|
||||
if [ -d ${PYTHONDIR} ] && [ $REBUILD_ENV = "y" ]; then
|
||||
report_status "Removing old virtualenv"
|
||||
rm -rf ${PYTHONDIR}
|
||||
fi
|
||||
|
||||
if [ ! -d ${PYTHONDIR} ]; then
|
||||
GET_PIP="${HOME}/get-pip.py"
|
||||
virtualenv --no-pip -p /usr/bin/python3 ${PYTHONDIR}
|
||||
curl https://bootstrap.pypa.io/pip/3.6/get-pip.py -o ${GET_PIP}
|
||||
${PYTHONDIR}/bin/python ${GET_PIP}
|
||||
rm ${GET_PIP}
|
||||
fi
|
||||
|
||||
# Install/update dependencies
|
||||
${PYTHONDIR}/bin/pip install -r ${SRCDIR}/scripts/moonraker-requirements.txt
|
||||
}
|
||||
|
||||
# Step 5: Install startup script
|
||||
install_script()
|
||||
{
|
||||
# Create systemd service file
|
||||
SERVICE_FILE="${SYSTEMDDIR}/moonraker.service"
|
||||
[ -f $SERVICE_FILE ] && [ $FORCE_DEFAULTS = "n" ] && return
|
||||
report_status "Installing system start script..."
|
||||
sudo groupadd -f moonraker-admin
|
||||
sudo /bin/sh -c "cat > ${SERVICE_FILE}" << EOF
|
||||
#Systemd service file for moonraker
|
||||
[Unit]
|
||||
Description=API Server for Klipper
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=$USER
|
||||
SupplementaryGroups=moonraker-admin
|
||||
RemainAfterExit=yes
|
||||
WorkingDirectory=${SRCDIR}
|
||||
ExecStart=${LAUNCH_CMD} -c ${CONFIG_PATH} -l ${LOG_PATH}
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
EOF
|
||||
# Use systemctl to enable the klipper systemd service script
|
||||
if [ $DISABLE_SYSTEMCTL = "n" ]; then
|
||||
sudo systemctl enable moonraker.service
|
||||
sudo systemctl daemon-reload
|
||||
fi
|
||||
}
|
||||
|
||||
check_polkit_rules()
|
||||
{
|
||||
if [ ! -x "$(command -v pkaction)" ]; then
|
||||
return
|
||||
fi
|
||||
POLKIT_VERSION="$( pkaction --version | grep -Po "(\d?\.\d+)" )"
|
||||
if [ "$POLKIT_VERSION" = "0.105" ]; then
|
||||
POLKIT_LEGACY_FILE="/etc/polkit-1/localauthority/50-local.d/10-moonraker.pkla"
|
||||
# legacy policykit rules don't give users other than root read access
|
||||
if sudo [ ! -f $POLKIT_LEGACY_FILE ]; then
|
||||
echo -e "\n*** No PolicyKit Rules detected, run 'set-policykit-rules.sh'"
|
||||
echo "*** if you wish to grant Moonraker authorization to manage"
|
||||
echo "*** system services, reboot/shutdown the system, and update"
|
||||
echo "*** packages."
|
||||
fi
|
||||
else
|
||||
POLKIT_FILE="/etc/polkit-1/rules.d/moonraker.rules"
|
||||
POLKIT_USR_FILE="/usr/share/polkit-1/rules.d/moonraker.rules"
|
||||
if [ ! -f $POLKIT_FILE ] && [ ! -f $POLKIT_USR_FILE ]; then
|
||||
echo -e "\n*** No PolicyKit Rules detected, run 'set-policykit-rules.sh'"
|
||||
echo "*** if you wish to grant Moonraker authorization to manage"
|
||||
echo "*** system services, reboot/shutdown the system, and update"
|
||||
echo "*** packages."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Step 6: Start server
|
||||
start_software()
|
||||
{
|
||||
report_status "Launching Moonraker API Server..."
|
||||
sudo systemctl restart moonraker
|
||||
}
|
||||
|
||||
# Helper functions
|
||||
report_status()
|
||||
{
|
||||
echo -e "\n\n###### $1"
|
||||
}
|
||||
|
||||
verify_ready()
|
||||
{
|
||||
if [ "$EUID" -eq 0 ]; then
|
||||
echo "This script must not run as root"
|
||||
exit -1
|
||||
fi
|
||||
}
|
||||
|
||||
# Force script to exit if an error occurs
|
||||
set -e
|
||||
|
||||
# Find SRCDIR from the pathname of this script
|
||||
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
|
||||
LAUNCH_CMD="${PYTHONDIR}/bin/python ${SRCDIR}/moonraker/moonraker.py"
|
||||
|
||||
# Parse command line arguments
|
||||
while getopts "rfzc:l:" arg; do
|
||||
case $arg in
|
||||
r) REBUILD_ENV="y";;
|
||||
f) FORCE_DEFAULTS="y";;
|
||||
z) DISABLE_SYSTEMCTL="y";;
|
||||
c) CONFIG_PATH=$OPTARG;;
|
||||
l) LOG_PATH=$OPTARG;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Run installation steps defined above
|
||||
verify_ready
|
||||
cleanup_legacy
|
||||
install_packages
|
||||
create_virtualenv
|
||||
install_script
|
||||
check_polkit_rules
|
||||
if [ $DISABLE_SYSTEMCTL = "n" ]; then
|
||||
start_software
|
||||
fi
|
||||
18
scripts/moonraker-requirements.txt
Normal file
18
scripts/moonraker-requirements.txt
Normal file
@@ -0,0 +1,18 @@
|
||||
# Python dependencies for Moonraker
|
||||
tornado==6.1.0
|
||||
pyserial==3.4
|
||||
pyserial-asyncio==0.6
|
||||
pillow==9.0.1
|
||||
lmdb==1.2.1
|
||||
streaming-form-data==1.8.1
|
||||
distro==1.5.0
|
||||
inotify-simple==1.3.5
|
||||
libnacl==1.7.2
|
||||
paho-mqtt==1.5.1
|
||||
pycurl==7.44.1
|
||||
zeroconf==0.37.0
|
||||
preprocess-cancellation==0.2.0
|
||||
jinja2==3.0.3
|
||||
dbus-next==0.2.3
|
||||
apprise==0.9.7
|
||||
ldap3==2.9.1
|
||||
209
scripts/pk-enum-convertor.py
Normal file
209
scripts/pk-enum-convertor.py
Normal file
@@ -0,0 +1,209 @@
|
||||
#! /usr/bin/python3
|
||||
#
|
||||
# The original enum-converter.py may be found at:
|
||||
# https://github.com/PackageKit/PackageKit/blob/b64ee9dfa707d5dd2b93c8eebe9930a55fcde108/lib/python/enum-convertor.py
|
||||
#
|
||||
# Copyright (C) 2008 - 2012 PackageKit Authors
|
||||
#
|
||||
# Licensed under the GNU General Public License Version 2.0
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# The following modifications have been made to the original
|
||||
# script:
|
||||
# * Print the time of conversion
|
||||
# * Extract and print the original license of the source
|
||||
# * Enumerations are extracted from the header file to preserve
|
||||
# order
|
||||
# * Use Python "Flag" Enumerations
|
||||
# * Extract comments and include them as docstrings
|
||||
# * Introduce a string constant validation mode. This extracts
|
||||
# strings from pk-enum.c, then compares to the calculated
|
||||
# strings from pk-enum.h.
|
||||
#
|
||||
# Copyright (C) 2022 Eric Callahan <arksine.code@gmail.com>
|
||||
#
|
||||
# Usage:
|
||||
# pk-enum-converter.py pk_enum.h > enums.py
|
||||
#
|
||||
# Enum String Validation Mode:
|
||||
# pk-enum-converter.py pk_enum.h pk_enum.c
|
||||
#
|
||||
# The pk_enum source files, pk-enum.c and pk-enum.h, can be found in the
|
||||
# PackageKit GitHub repo:
|
||||
# https://github.com/PackageKit/PackageKit/blob/main/lib/packagekit-glib2/pk-enum.c
|
||||
# https://github.com/PackageKit/PackageKit/blob/main/lib/packagekit-glib2/pk-enum.h
|
||||
#
|
||||
from __future__ import print_function
|
||||
|
||||
from re import compile, DOTALL, MULTILINE
|
||||
import time
|
||||
import sys
|
||||
import pathlib
|
||||
import textwrap
|
||||
|
||||
HEADER = \
|
||||
'''
|
||||
# This file was autogenerated from %s by pk-enum-converter.py
|
||||
# on %s UTC
|
||||
#
|
||||
# License for original source:
|
||||
#
|
||||
%s
|
||||
|
||||
from __future__ import annotations
|
||||
import sys
|
||||
from enum import Flag, auto
|
||||
|
||||
class PkFlag(Flag):
|
||||
@classmethod
|
||||
def from_pkstring(cls, pkstring: str):
|
||||
for name, member in cls.__members__.items():
|
||||
if member.pkstring == pkstring:
|
||||
return cls(member.value)
|
||||
# Return "unknown" flag
|
||||
return cls(1)
|
||||
|
||||
@classmethod
|
||||
def from_index(cls, index: int):
|
||||
return cls(1 << index)
|
||||
|
||||
@property
|
||||
def pkstring(self) -> str:
|
||||
if self.name is None:
|
||||
return " | ".join([f.pkstring for f in self])
|
||||
return self.name.lower().replace("_", "-")
|
||||
|
||||
@property
|
||||
def desc(self) -> str:
|
||||
if self.name is None:
|
||||
return ", ".join([f.desc for f in self])
|
||||
description = self.name.lower().replace("_", " ")
|
||||
return description.capitalize()
|
||||
|
||||
@property
|
||||
def index(self) -> int:
|
||||
return self.value.bit_length() - 1
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
def __iter__(self):
|
||||
for i in range(self._value_.bit_length()):
|
||||
val = 1 << i
|
||||
if val & self._value_ == val:
|
||||
yield self.__class__(val)
|
||||
''' # noqa: E122
|
||||
|
||||
FILTER_PKSTRING = \
|
||||
''' @property
|
||||
def pkstring(self) -> str:
|
||||
pks = self.name
|
||||
if pks is None:
|
||||
return " | ".join([f.pkstring for f in self])
|
||||
if pks in ["DEVELOPMENT", "NOT_DEVELOPMENT"]:
|
||||
pks = pks[:-6]
|
||||
if pks[:4] == "NOT_":
|
||||
pks = "~" + pks[4:]
|
||||
return pks.lower().replace("_", "-")
|
||||
''' # noqa: E122
|
||||
|
||||
ERROR_PROPS = \
|
||||
''' @property
|
||||
def pkstring(self) -> str:
|
||||
if self == Error.UPDATE_FAILED_DUE_TO_RUNNING_PROCESS:
|
||||
return "failed-due-to-running-process"
|
||||
return super().pkstring
|
||||
''' # noqa: E122
|
||||
|
||||
ALIASES = {
|
||||
"Error.OOM": "OUT_OF_MEMORY"
|
||||
}
|
||||
|
||||
header_enum = compile(r"/\*\*\n(.+?)@PK_[A-Z_]+_ENUM_LAST:\s+\*\s+(.+?)"
|
||||
r"\s+\*\*/\s+typedef enum {(.+?)} Pk(.+?)Enum",
|
||||
DOTALL | MULTILINE)
|
||||
header_value = compile(r"(PK_[A-Z_]+_ENUM)_([A-Z0-9_]+)")
|
||||
header_desc = compile(r"@PK_[A-Z_]+_ENUM_([A-Z_]+):(.*)")
|
||||
license = compile(r"(Copyright.+?)\*/", DOTALL | MULTILINE)
|
||||
enum_h_name = sys.argv[1]
|
||||
header = pathlib.Path(enum_h_name).read_text()
|
||||
|
||||
# Get License
|
||||
lic_match = license.search(header)
|
||||
assert lic_match is not None
|
||||
lic_parts = lic_match.group(1).split("\n")
|
||||
lic = "\n".join([("# " + p.lstrip("* ")).rstrip(" ") for p in lic_parts])
|
||||
|
||||
|
||||
if len(sys.argv) > 2:
|
||||
# Validation Mode, extract strings from the source file, compare to
|
||||
# those calculated from the enums in the header file
|
||||
enum_to_string = {}
|
||||
enum = compile(r"static const PkEnumMatch enum_([^\]]+)\[\] = {(.*?)};",
|
||||
DOTALL | MULTILINE)
|
||||
value = compile(r"(PK_[A-Z_]+_ENUM_[A-Z0-9_]+),\s+\"([^\"]+)\"")
|
||||
enum_c_name = sys.argv[2]
|
||||
inp = pathlib.Path(enum_c_name).read_text()
|
||||
for (name, data) in enum.findall(inp):
|
||||
for (enum_name, string) in value.findall(data):
|
||||
enum_to_string[enum_name] = string
|
||||
for (desc_data, comments, data, name) in header_enum.findall(header):
|
||||
for (prefix, short_name) in header_value.findall(data):
|
||||
if short_name == "LAST":
|
||||
continue
|
||||
# Validation Mode
|
||||
enum_name = f"{prefix}_{short_name}"
|
||||
string = enum_to_string[enum_name]
|
||||
calc_string = short_name.lower().replace("_", "-")
|
||||
if calc_string[:4] == "not-" and name == "Filter":
|
||||
calc_string = "~" + calc_string[4:]
|
||||
if calc_string != string:
|
||||
print(
|
||||
f"Calculated String Mismatch: {name}.{short_name}\n"
|
||||
f"Calculated: {calc_string}\n"
|
||||
f"Extracted: {string}\n")
|
||||
exit(0)
|
||||
|
||||
print(HEADER % (enum_h_name, time.asctime(time.gmtime()), lic))
|
||||
# Use the header file for correct enum ordering
|
||||
for (desc_data, comments, data, name) in header_enum.findall(header):
|
||||
|
||||
print(f"\nclass {name}(PkFlag):")
|
||||
# Print Docstring
|
||||
print(' """')
|
||||
comments = [(" " * 4 + c.lstrip("* ")).rstrip(" ")
|
||||
for c in comments.splitlines()]
|
||||
for comment in comments:
|
||||
comment = comment.expandtabs(4)
|
||||
if len(comment) > 79:
|
||||
comment = "\n".join(textwrap.wrap(
|
||||
comment, 79, subsequent_indent=" ",
|
||||
tabsize=4))
|
||||
print(comment)
|
||||
print("")
|
||||
for (item, desc) in header_desc.findall(desc_data):
|
||||
line = f" * {name}.{item}: {desc}".rstrip()
|
||||
if len(line) > 79:
|
||||
print(f" * {name}.{item}:")
|
||||
print(f" {desc}")
|
||||
else:
|
||||
print(line)
|
||||
print(' """')
|
||||
if name == "Filter":
|
||||
print(FILTER_PKSTRING)
|
||||
elif name == "Error":
|
||||
print(ERROR_PROPS)
|
||||
aliases = []
|
||||
for (prefix, short_name) in header_value.findall(data):
|
||||
if short_name == "LAST":
|
||||
continue
|
||||
long_name = f"{name}.{short_name}"
|
||||
if long_name in ALIASES:
|
||||
alias = ALIASES[long_name]
|
||||
aliases.append((short_name, alias))
|
||||
short_name = alias
|
||||
# Print Enums
|
||||
print(f" {short_name} = auto()")
|
||||
for name, alias in aliases:
|
||||
print(f" {name} = {alias}")
|
||||
151
scripts/set-policykit-rules.sh
Normal file
151
scripts/set-policykit-rules.sh
Normal file
@@ -0,0 +1,151 @@
|
||||
#!/bin/bash
|
||||
# This script installs Moonraker's PolicyKit Rules used to grant access
|
||||
|
||||
POLKIT_LEGACY_DIR="/etc/polkit-1/localauthority/50-local.d"
|
||||
POLKIT_DIR="/etc/polkit-1/rules.d"
|
||||
POLKIT_USR_DIR="/usr/share/polkit-1/rules.d"
|
||||
MOONRAKER_UNIT="/etc/systemd/system/moonraker.service"
|
||||
MOONRAKER_GID="-1"
|
||||
|
||||
check_moonraker_service()
|
||||
{
|
||||
|
||||
# Force Add the moonraker-admin group
|
||||
sudo groupadd -f moonraker-admin
|
||||
[ ! -f $MOONRAKER_UNIT ] && return
|
||||
# Make sure the unit file contains supplementary group
|
||||
HAS_SUPP="$( grep -cm1 "SupplementaryGroups=moonraker-admin" $MOONRAKER_UNIT || true )"
|
||||
[ "$HAS_SUPP" -eq 1 ] && return
|
||||
report_status "Adding moonraker-admin supplementary group to $MOONRAKER_UNIT"
|
||||
sudo sed -i "/^Type=simple$/a SupplementaryGroups=moonraker-admin" $MOONRAKER_UNIT
|
||||
sudo systemctl daemon-reload
|
||||
}
|
||||
|
||||
add_polkit_legacy_rules()
|
||||
{
|
||||
RULE_FILE="${POLKIT_LEGACY_DIR}/10-moonraker.pkla"
|
||||
report_status "Installing Moonraker PolicyKit Rules (Legacy) to ${RULE_FILE}..."
|
||||
ACTIONS="org.freedesktop.systemd1.manage-units"
|
||||
ACTIONS="${ACTIONS};org.freedesktop.login1.power-off"
|
||||
ACTIONS="${ACTIONS};org.freedesktop.login1.power-off-multiple-sessions"
|
||||
ACTIONS="${ACTIONS};org.freedesktop.login1.reboot"
|
||||
ACTIONS="${ACTIONS};org.freedesktop.login1.reboot-multiple-sessions"
|
||||
ACTIONS="${ACTIONS};org.freedesktop.packagekit.*"
|
||||
sudo /bin/sh -c "cat > ${RULE_FILE}" << EOF
|
||||
[moonraker permissions]
|
||||
Identity=unix-user:$USER
|
||||
Action=$ACTIONS
|
||||
ResultAny=yes
|
||||
EOF
|
||||
}
|
||||
|
||||
add_polkit_rules()
|
||||
{
|
||||
if [ ! -x "$(command -v pkaction)" ]; then
|
||||
echo "PolicyKit not installed"
|
||||
exit 1
|
||||
fi
|
||||
POLKIT_VERSION="$( pkaction --version | grep -Po "(\d+\.?\d*)" )"
|
||||
report_status "PolicyKit Version ${POLKIT_VERSION} Detected"
|
||||
if [ "$POLKIT_VERSION" = "0.105" ]; then
|
||||
# install legacy pkla file
|
||||
add_polkit_legacy_rules
|
||||
return
|
||||
fi
|
||||
RULE_FILE=""
|
||||
if [ -d $POLKIT_USR_DIR ]; then
|
||||
RULE_FILE="${POLKIT_USR_DIR}/moonraker.rules"
|
||||
elif [ -d $POLKIT_DIR ]; then
|
||||
RULE_FILE="${POLKIT_DIR}/moonraker.rules"
|
||||
else
|
||||
echo "PolicyKit rules folder not detected"
|
||||
exit 1
|
||||
fi
|
||||
report_status "Installing PolicyKit Rules to ${RULE_FILE}..."
|
||||
MOONRAKER_GID=$( getent group moonraker-admin | awk -F: '{printf "%d", $3}' )
|
||||
sudo /bin/sh -c "cat > ${RULE_FILE}" << EOF
|
||||
// Allow Moonraker User to manage systemd units, reboot and shutdown
|
||||
// the system
|
||||
polkit.addRule(function(action, subject) {
|
||||
if ((action.id == "org.freedesktop.systemd1.manage-units" ||
|
||||
action.id == "org.freedesktop.login1.power-off" ||
|
||||
action.id == "org.freedesktop.login1.power-off-multiple-sessions" ||
|
||||
action.id == "org.freedesktop.login1.reboot" ||
|
||||
action.id == "org.freedesktop.login1.reboot-multiple-sessions" ||
|
||||
action.id.startsWith("org.freedesktop.packagekit.")) &&
|
||||
subject.user == "$USER") {
|
||||
// Only allow processes with the "moonraker-admin" supplementary group
|
||||
// access
|
||||
var regex = "^Groups:.+?\\\s$MOONRAKER_GID[\\\s\\\0]";
|
||||
var cmdpath = "/proc/" + subject.pid.toString() + "/status";
|
||||
try {
|
||||
polkit.spawn(["grep", "-Po", regex, cmdpath]);
|
||||
return polkit.Result.YES;
|
||||
} catch (error) {
|
||||
return polkit.Result.NOT_HANDLED;
|
||||
}
|
||||
}
|
||||
});
|
||||
EOF
|
||||
}
|
||||
|
||||
clear_polkit_rules()
|
||||
{
|
||||
report_status "Removing all Moonraker PolicyKit rules"
|
||||
sudo rm -f "${POLKIT_LEGACY_DIR}/10-moonraker.pkla"
|
||||
sudo rm -f "${POLKIT_USR_DIR}/moonraker.rules"
|
||||
sudo rm -f "${POLKIT_DIR}/moonraker.rules"
|
||||
}
|
||||
|
||||
# Helper functions
|
||||
report_status()
|
||||
{
|
||||
echo -e "\n\n###### $1"
|
||||
}
|
||||
|
||||
verify_ready()
|
||||
{
|
||||
if [ "$EUID" -eq 0 ]; then
|
||||
echo "This script must not run as root"
|
||||
exit -1
|
||||
fi
|
||||
}
|
||||
|
||||
CLEAR="n"
|
||||
ROOT="n"
|
||||
DISABLE_SYSTEMCTL="n"
|
||||
|
||||
# Parse command line arguments
|
||||
while :; do
|
||||
case $1 in
|
||||
-c|--clear)
|
||||
CLEAR="y"
|
||||
;;
|
||||
-r|--root)
|
||||
ROOT="y"
|
||||
;;
|
||||
-z|--disable-systemctl)
|
||||
DISABLE_SYSTEMCTL="y"
|
||||
;;
|
||||
*)
|
||||
break
|
||||
esac
|
||||
|
||||
shift
|
||||
done
|
||||
|
||||
if [ "$ROOT" = "n" ]; then
|
||||
verify_ready
|
||||
fi
|
||||
|
||||
if [ "$CLEAR" = "y" ]; then
|
||||
clear_polkit_rules
|
||||
else
|
||||
set -e
|
||||
check_moonraker_service
|
||||
add_polkit_rules
|
||||
if [ $DISABLE_SYSTEMCTL = "n" ]; then
|
||||
report_status "Restarting Moonraker..."
|
||||
sudo systemctl restart moonraker
|
||||
fi
|
||||
fi
|
||||
189
scripts/sudo_fix.sh
Normal file
189
scripts/sudo_fix.sh
Normal file
@@ -0,0 +1,189 @@
|
||||
#!/bin/bash
|
||||
|
||||
# moonraker-sudo (mnrkrsudo)
|
||||
# Provides a specified Group that is intended to elevate user privileges
|
||||
# to help moonraker with sudo actions, if in CustomPIOS Images with
|
||||
# Module "password-for-sudo".
|
||||
#
|
||||
# Partially used functions from Arcsine
|
||||
#
|
||||
# Copyright (C) 2020 Stephan Wendel <me@stephanwe.de>
|
||||
#
|
||||
# This file may be distributed under the terms of the GNU GPLv3 license
|
||||
|
||||
### Exit on Errors
|
||||
set -e
|
||||
|
||||
### Configuration
|
||||
|
||||
SUDOERS_DIR='/etc/sudoers.d'
|
||||
SUDOERS_FILE='020-sudo-for-moonraker'
|
||||
NEW_GROUP='mnrkrsudo'
|
||||
|
||||
|
||||
### Functions
|
||||
|
||||
verify_ready()
|
||||
{
|
||||
if [ "$EUID" -eq 0 ]; then
|
||||
echo "This script must not run as root"
|
||||
exit -1
|
||||
fi
|
||||
}
|
||||
|
||||
create_sudoers_file()
|
||||
{
|
||||
|
||||
SCRIPT_TEMP_PATH=/tmp
|
||||
|
||||
report_status "Creating ${SUDOERS_FILE} ..."
|
||||
sudo rm -f $SCRIPT_TEMP_PATH/$SUDOERS_FILE
|
||||
sudo sed "s/GROUPNAME/$NEW_GROUP/g" > $SCRIPT_TEMP_PATH/$SUDOERS_FILE << '#EOF'
|
||||
|
||||
### Elevate moonraker API rights
|
||||
### Do NOT allow Command Parts only Full Commands
|
||||
### for example
|
||||
###
|
||||
### /sbin/systemctl "reboot", /sbin/apt "update", .....
|
||||
|
||||
Defaults!/usr/bin/apt-get env_keep +="DEBIAN_FRONTEND"
|
||||
|
||||
Cmnd_Alias REBOOT = /sbin/shutdown -r now, /bin/systemctl "reboot"
|
||||
Cmnd_Alias SHUTDOWN = /sbin/shutdown now, /sbin/shutdown -h now, /bin/systemctl "poweroff"
|
||||
Cmnd_Alias APT = /usr/bin/apt-get
|
||||
Cmnd_Alias SYSTEMCTL = /bin/systemctl
|
||||
|
||||
|
||||
|
||||
%GROUPNAME ALL=(ALL) NOPASSWD: REBOOT, SHUTDOWN, APT, SYSTEMCTL
|
||||
|
||||
#EOF
|
||||
|
||||
report_status "\e[1;32m...done\e[0m"
|
||||
}
|
||||
|
||||
update_env()
|
||||
{
|
||||
report_status "Export System Variable: DEBIAN_FRONTEND=noninteractive"
|
||||
sudo /bin/sh -c 'echo "DEBIAN_FRONTEND=noninteractive" >> /etc/environment'
|
||||
}
|
||||
|
||||
verify_syntax()
|
||||
{
|
||||
if [ -n "$(whereis -b visudo | awk '{print $2}')" ]; then
|
||||
|
||||
report_status "\e[1;33mVerifying Syntax of ${SUDOERS_FILE}\e[0m\n"
|
||||
|
||||
if [ $(LANG=C sudo visudo -cf $SCRIPT_TEMP_PATH/$SUDOERS_FILE | grep -c "OK" ) -eq 1 ];
|
||||
then
|
||||
VERIFY_STATUS=0
|
||||
report_status "\e[1;32m$(LANG=C sudo visudo -cf $SCRIPT_TEMP_PATH/$SUDOERS_FILE)\e[0m"
|
||||
else
|
||||
report_status "\e[1;31mSyntax Error:\e[0m Check File: $SCRIPT_TEMP_PATH/$SUDOERS_FILE"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
VERIFY_STATUS=0
|
||||
report_status "\e[1;31mCommand 'visudo' not found. Skip verifying sudoers file.\e[0m"
|
||||
fi
|
||||
}
|
||||
|
||||
install_sudoers_file()
|
||||
{
|
||||
verify_syntax
|
||||
if [ $VERIFY_STATUS -eq 0 ];
|
||||
then
|
||||
report_status "Copying $SCRIPT_TEMP_PATH/$SUDOERS_FILE to $SUDOERS_DIR/$SUDOERS_FILE"
|
||||
sudo chmod 0440 $SCRIPT_TEMP_PATH/$SUDOERS_FILE
|
||||
sudo cp --preserve=mode $SCRIPT_TEMP_PATH/$SUDOERS_FILE $SUDOERS_DIR/$SUDOERS_FILE
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_update_sudoers_file()
|
||||
{
|
||||
if [ -e "$SUDOERS_DIR/$SUDOERS_FILE" ];
|
||||
then
|
||||
create_sudoers_file
|
||||
if [ -z $(sudo diff $SCRIPT_TEMP_PATH/$SUDOERS_FILE $SUDOERS_DIR/$SUDOERS_FILE) ]
|
||||
then
|
||||
report_status "No need to update $SUDOERS_DIR/$SUDOERS_FILE"
|
||||
else
|
||||
report_status "$SUDOERS_DIR/$SUDOERS_FILE needs to be updated."
|
||||
install_sudoers_file
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
add_new_group()
|
||||
{
|
||||
sudo addgroup --system $NEW_GROUP &> /dev/null
|
||||
report_status "\e[1;32m...done\e[0m"
|
||||
}
|
||||
|
||||
add_user_to_group()
|
||||
{
|
||||
sudo usermod -aG $NEW_GROUP $USER &> /dev/null
|
||||
report_status "\e[1;32m...done\e[0m"
|
||||
}
|
||||
|
||||
adduser_hint()
|
||||
{
|
||||
report_status "\e[1;31mYou have to REBOOT to take changes effect!\e[0m"
|
||||
}
|
||||
|
||||
# Helper functions
|
||||
report_status()
|
||||
{
|
||||
echo -e "\n\n###### $1"
|
||||
}
|
||||
|
||||
clean_temp()
|
||||
{
|
||||
sudo rm -f $SCRIPT_TEMP_PATH/$SUDOERS_FILE
|
||||
}
|
||||
### Main
|
||||
|
||||
verify_ready
|
||||
|
||||
if [ -e "$SUDOERS_DIR/$SUDOERS_FILE" ] && [ $(sudo cat /etc/gshadow | grep -c "${NEW_GROUP}") -eq 1 ] && [ $(groups | grep -c "$NEW_GROUP") -eq 1 ];
|
||||
then
|
||||
check_update_sudoers_file
|
||||
report_status "\e[1;32mEverything is setup, nothing to do...\e[0m\n"
|
||||
exit 0
|
||||
|
||||
else
|
||||
|
||||
if [ -e "$SUDOERS_DIR/$SUDOERS_FILE" ];
|
||||
then
|
||||
report_status "\e[1;32mFile exists:\e[0m ${SUDOERS_FILE}"
|
||||
check_update_sudoers_file
|
||||
else
|
||||
report_status "\e[1;31mFile not found:\e[0m ${SUDOERS_FILE}\n"
|
||||
create_sudoers_file
|
||||
install_sudoers_file
|
||||
fi
|
||||
|
||||
if [ $(sudo cat /etc/gshadow | grep -c "${NEW_GROUP}") -eq 1 ];
|
||||
then
|
||||
report_status "Group ${NEW_GROUP} already exists..."
|
||||
else
|
||||
report_status "Group ${NEW_GROUP} will be added..."
|
||||
add_new_group
|
||||
fi
|
||||
|
||||
if [ $(groups | grep -c "$NEW_GROUP") -eq 1 ];
|
||||
then
|
||||
report_status "User ${USER} is already in $NEW_GROUP..."
|
||||
else
|
||||
report_status "Adding User ${USER} to Group $NEW_GROUP..."
|
||||
add_user_to_group
|
||||
adduser_hint
|
||||
fi
|
||||
fi
|
||||
|
||||
update_env
|
||||
clean_temp
|
||||
exit 0
|
||||
71
scripts/tag-release.sh
Normal file
71
scripts/tag-release.sh
Normal file
@@ -0,0 +1,71 @@
|
||||
#! /bin/bash
|
||||
# Helper Script for Tagging Moonraker Releases
|
||||
|
||||
PRINT_ONLY="n"
|
||||
KLIPPER_PATH="$HOME/klipper"
|
||||
REMOTE=""
|
||||
DESCRIBE="describe --always --tags --long"
|
||||
|
||||
# Get Tag and Klipper Path
|
||||
TAG=$1
|
||||
shift
|
||||
while :; do
|
||||
case $1 in
|
||||
-k|--klipper-path)
|
||||
shift
|
||||
KLIPPER_PATH=$1
|
||||
;;
|
||||
-p|--print)
|
||||
PRINT_ONLY="y"
|
||||
;;
|
||||
*)
|
||||
break
|
||||
esac
|
||||
|
||||
shift
|
||||
done
|
||||
|
||||
|
||||
if [ ! -d "$KLIPPER_PATH/.git" ]; then
|
||||
echo "Invalid Klipper Path: $KLIPPER_PATH"
|
||||
fi
|
||||
echo "Klipper found at $KLIPPER_PATH"
|
||||
GIT_CMD="git -C $KLIPPER_PATH"
|
||||
|
||||
ALL_REMOTES="$( $GIT_CMD remote | tr '\n' ' ' | awk '{gsub(/^ +| +$/,"")} {print $0}' )"
|
||||
echo "Found Klipper Remotes: $ALL_REMOTES"
|
||||
for val in $ALL_REMOTES; do
|
||||
REMOTE_URL="$( $GIT_CMD remote get-url $val | awk '{gsub(/^ +| +$/,"")} {print tolower($0)}' )"
|
||||
match="$( echo $REMOTE_URL | grep -Ecm1 '(klipper3d|kevinoconnor)/klipper'|| true )"
|
||||
if [ "$match" -eq 1 ]; then
|
||||
echo "Found Remote $val"
|
||||
REMOTE="$val"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
[ "$REMOTE" = "" ] && echo "Unable to find a valid remote" && exit 1
|
||||
|
||||
$GIT_CMD fetch $REMOTE
|
||||
|
||||
DESC="$( $GIT_CMD $DESCRIBE $REMOTE/master | awk '{gsub(/^ +| +$/,"")} {print $0}' )"
|
||||
HASH="$( $GIT_CMD rev-parse $REMOTE/master | awk '{gsub(/^ +| +$/,"")} {print $0}' )"
|
||||
|
||||
if [ "$PRINT_ONLY" = "y" ]; then
|
||||
echo "
|
||||
Tag: $TAG
|
||||
Repo: Klipper
|
||||
Branch: Master
|
||||
Version: $DESC
|
||||
Commit: $HASH
|
||||
"
|
||||
else
|
||||
echo "Adding Tag $TAG"
|
||||
git tag -a $TAG -m "Moonraker Version $TAG
|
||||
Klipper Tag Data
|
||||
repo: klipper
|
||||
branch: master
|
||||
version: $DESC
|
||||
commit: $HASH
|
||||
"
|
||||
fi
|
||||
71
scripts/uninstall-moonraker.sh
Normal file
71
scripts/uninstall-moonraker.sh
Normal file
@@ -0,0 +1,71 @@
|
||||
#!/bin/bash
|
||||
# Moonraker uninstall script for Raspbian/Raspberry Pi OS
|
||||
|
||||
stop_service() {
|
||||
# Stop Moonraker Service
|
||||
echo "#### Stopping Moonraker Service.."
|
||||
sudo systemctl stop moonraker
|
||||
}
|
||||
|
||||
remove_service() {
|
||||
# Remove Moonraker LSB/systemd service
|
||||
echo
|
||||
echo "#### Removing Moonraker Service.."
|
||||
if [ -f "/etc/init.d/moonraker" ]; then
|
||||
# legacy installation, remove the LSB service
|
||||
sudo update-rc.d -f moonraker remove
|
||||
sudo rm -f /etc/init.d/moonraker
|
||||
sudo rm -f /etc/default/moonraker
|
||||
else
|
||||
# Remove systemd installation
|
||||
sudo systemctl disable moonraker
|
||||
sudo rm -f /etc/systemd/system/moonraker.service
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl reset-failed
|
||||
fi
|
||||
}
|
||||
|
||||
remove_files() {
|
||||
# Remove API Key file from older versions
|
||||
if [ -e ~/.klippy_api_key ]; then
|
||||
echo "Removing legacy API Key"
|
||||
rm ~/.klippy_api_key
|
||||
fi
|
||||
|
||||
# Remove API Key file from recent versions
|
||||
if [ -e ~/.moonraker_api_key ]; then
|
||||
echo "Removing API Key"
|
||||
rm ~/.moonraker_api_key
|
||||
fi
|
||||
|
||||
# Remove virtualenv
|
||||
if [ -d ~/moonraker-env ]; then
|
||||
echo "Removing virtualenv..."
|
||||
rm -rf ~/moonraker-env
|
||||
else
|
||||
echo "No moonraker virtualenv found"
|
||||
fi
|
||||
|
||||
# Notify user of method to remove Moonraker source code
|
||||
echo
|
||||
echo "The Moonraker system files and virtualenv have been removed."
|
||||
echo
|
||||
echo "The following command is typically used to remove source files:"
|
||||
echo " rm -rf ~/moonraker"
|
||||
echo
|
||||
echo "You may also wish to uninstall nginx:"
|
||||
echo " sudo apt-get remove nginx"
|
||||
}
|
||||
|
||||
verify_ready()
|
||||
{
|
||||
if [ "$EUID" -eq 0 ]; then
|
||||
echo "This script must not run as root"
|
||||
exit -1
|
||||
fi
|
||||
}
|
||||
|
||||
verify_ready
|
||||
stop_service
|
||||
remove_service
|
||||
remove_files
|
||||
403
tests/assets/klipper/base_printer.cfg
Normal file
403
tests/assets/klipper/base_printer.cfg
Normal file
@@ -0,0 +1,403 @@
|
||||
[mcu]
|
||||
serial: /dev/serial/by-id/usb
|
||||
|
||||
[printer]
|
||||
kinematics: cartesian
|
||||
max_velocity: 300
|
||||
max_accel: 1500
|
||||
max_z_velocity: 15
|
||||
max_z_accel: 200
|
||||
|
||||
[stepper_x]
|
||||
microsteps: 16
|
||||
step_pin: PC0
|
||||
dir_pin: !PL0
|
||||
enable_pin: !PA7
|
||||
rotation_distance: 32
|
||||
endstop_pin: tmc2130_stepper_x:virtual_endstop
|
||||
position_endstop: 0
|
||||
position_min: 0
|
||||
position_max: 250
|
||||
homing_speed: 50
|
||||
homing_retract_dist: 0
|
||||
|
||||
[tmc2130 stepper_x]
|
||||
cs_pin: PG0
|
||||
interpolate: True
|
||||
run_current: .281738
|
||||
hold_current: .281738
|
||||
sense_resistor: 0.220
|
||||
diag1_pin: !PK2
|
||||
driver_IHOLDDELAY: 8
|
||||
driver_TPOWERDOWN: 0
|
||||
driver_TBL: 2
|
||||
driver_TOFF: 3
|
||||
driver_HEND: 1
|
||||
driver_HSTRT: 5
|
||||
driver_PWM_FREQ: 2
|
||||
driver_PWM_GRAD: 2
|
||||
driver_PWM_AMPL: 230
|
||||
driver_PWM_AUTOSCALE: True
|
||||
driver_SGT: 3
|
||||
|
||||
[stepper_y]
|
||||
microsteps: 16
|
||||
step_pin: PC1
|
||||
dir_pin: PL1
|
||||
enable_pin: !PA6
|
||||
rotation_distance: 32
|
||||
endstop_pin: tmc2130_stepper_y:virtual_endstop
|
||||
position_endstop: -4
|
||||
position_max: 210
|
||||
position_min: -4
|
||||
homing_speed: 50
|
||||
homing_retract_dist: 0
|
||||
|
||||
[tmc2130 stepper_y]
|
||||
cs_pin: PG2
|
||||
interpolate: True
|
||||
run_current: .3480291
|
||||
hold_current: .3480291
|
||||
sense_resistor: 0.220
|
||||
diag1_pin: !PK7
|
||||
driver_IHOLDDELAY: 8
|
||||
driver_TPOWERDOWN: 0
|
||||
driver_TBL: 2
|
||||
driver_TOFF: 3
|
||||
driver_HEND: 1
|
||||
driver_HSTRT: 5
|
||||
driver_PWM_FREQ: 2
|
||||
driver_PWM_GRAD: 2
|
||||
driver_PWM_AMPL: 235
|
||||
driver_PWM_AUTOSCALE: True
|
||||
driver_SGT: 3
|
||||
|
||||
[stepper_z]
|
||||
microsteps: 16
|
||||
step_pin: PC2
|
||||
dir_pin: !PL2
|
||||
enable_pin: !PA5
|
||||
rotation_distance: 8
|
||||
endstop_pin: probe:z_virtual_endstop
|
||||
position_max: 220
|
||||
position_min: -2
|
||||
homing_speed: 13.333
|
||||
|
||||
[tmc2130 stepper_z]
|
||||
cs_pin: PK5
|
||||
interpolate: True
|
||||
run_current: .53033
|
||||
hold_current: .53033
|
||||
sense_resistor: 0.220
|
||||
diag1_pin: !PK6
|
||||
driver_IHOLDDELAY: 8
|
||||
driver_TPOWERDOWN: 0
|
||||
driver_TBL: 2
|
||||
driver_TOFF: 3
|
||||
driver_HEND: 1
|
||||
driver_HSTRT: 5
|
||||
driver_PWM_FREQ: 2
|
||||
driver_PWM_GRAD: 4
|
||||
driver_PWM_AMPL: 200
|
||||
driver_PWM_AUTOSCALE: True
|
||||
driver_SGT: 4
|
||||
|
||||
[extruder]
|
||||
microsteps: 8
|
||||
step_pin: PC3
|
||||
dir_pin: PL6
|
||||
enable_pin: !PA4
|
||||
rotation_distance: 6.53061216
|
||||
full_steps_per_rotation: 400
|
||||
nozzle_diameter: 0.4
|
||||
filament_diameter: 1.750
|
||||
max_extrude_cross_section: 50.0
|
||||
# Allows to load filament and purge up to 500mm
|
||||
max_extrude_only_distance: 500.0
|
||||
max_extrude_only_velocity: 120.0
|
||||
max_extrude_only_accel: 1250.0
|
||||
heater_pin: PE5
|
||||
sensor_type: ATC Semitec 104GT-2
|
||||
sensor_pin: PF0
|
||||
control: pid
|
||||
pid_Kp: 16.13
|
||||
pid_Ki: 1.1625
|
||||
pid_Kd: 56.23
|
||||
min_temp: 0
|
||||
max_temp: 305
|
||||
|
||||
[tmc2130 extruder]
|
||||
cs_pin: PK4
|
||||
interpolate: True
|
||||
run_current: 0.41432
|
||||
hold_current: 0.3
|
||||
sense_resistor: 0.220
|
||||
diag1_pin: !PK3
|
||||
driver_IHOLDDELAY: 8
|
||||
driver_TPOWERDOWN: 0
|
||||
driver_TBL: 2
|
||||
driver_TOFF: 3
|
||||
driver_HEND: 1
|
||||
driver_HSTRT: 5
|
||||
driver_PWM_FREQ: 2
|
||||
driver_PWM_GRAD:4
|
||||
driver_PWM_AMPL: 240
|
||||
driver_PWM_AUTOSCALE: True
|
||||
driver_SGT: 3
|
||||
|
||||
[heater_bed]
|
||||
heater_pin: PG5
|
||||
sensor_type: EPCOS 100K B57560G104F
|
||||
sensor_pin: PF2
|
||||
control: pid
|
||||
pid_Kp: 126.13
|
||||
pid_Ki: 4.3
|
||||
pid_Kd: 924.76
|
||||
min_temp: 0
|
||||
max_temp: 125
|
||||
|
||||
[verify_heater heater_bed]
|
||||
max_error: 240
|
||||
check_gain_time: 120
|
||||
|
||||
[heater_fan nozzle_cooling_fan]
|
||||
pin: PH5
|
||||
heater: extruder
|
||||
heater_temp: 50.0
|
||||
|
||||
[fan]
|
||||
pin: PH3
|
||||
|
||||
[display]
|
||||
lcd_type: hd44780
|
||||
rs_pin: PD5
|
||||
e_pin: PF7
|
||||
d4_pin: PF5
|
||||
d5_pin: PG4
|
||||
d6_pin: PH7
|
||||
d7_pin: PG3
|
||||
encoder_pins: ^PJ1,^PJ2
|
||||
click_pin: ^!PH6
|
||||
|
||||
[pause_resume]
|
||||
|
||||
[virtual_sdcard]
|
||||
path: ${gcode_path}
|
||||
|
||||
[respond]
|
||||
default_type: command
|
||||
|
||||
[probe]
|
||||
pin: PB4
|
||||
x_offset: 23
|
||||
y_offset: 5
|
||||
z_offset: 0.8
|
||||
speed: 12.0
|
||||
|
||||
[bed_mesh]
|
||||
speed: 140
|
||||
horizontal_move_z: 2
|
||||
mesh_min: 24, 6
|
||||
mesh_max: 238, 210
|
||||
probe_count: 7
|
||||
mesh_pps: 2
|
||||
fade_start: 1
|
||||
fade_end: 10
|
||||
fade_target: 0
|
||||
move_check_distance: 15
|
||||
algorithm: bicubic
|
||||
bicubic_tension: .2
|
||||
relative_reference_index: 24
|
||||
faulty_region_1_min: 116.75, 41.81
|
||||
faulty_region_1_max: 133.25, 78.81
|
||||
faulty_region_2_min: 156.5, 99.31
|
||||
faulty_region_2_max: 193.5, 115.81
|
||||
faulty_region_3_min: 116.75, 136.21
|
||||
faulty_region_3_max: 133.25, 173.31
|
||||
|
||||
[homing_override]
|
||||
gcode:
|
||||
G1 Z3 F600
|
||||
G28 X0 Y0
|
||||
G1 X131 Y108 F5000
|
||||
G28 Z0
|
||||
axes: Z
|
||||
set_position_x: 0
|
||||
set_position_y: 0
|
||||
set_position_z: 0
|
||||
|
||||
[output_pin BEEPER_pin]
|
||||
pin: PH2
|
||||
pwm: True
|
||||
value: 0
|
||||
shutdown_value:0
|
||||
cycle_time: 0.001
|
||||
scale: 1000
|
||||
|
||||
[force_move]
|
||||
enable_force_move: True
|
||||
|
||||
[idle_timeout]
|
||||
gcode:
|
||||
M104 S0
|
||||
M84
|
||||
|
||||
[gcode_macro PAUSE]
|
||||
rename_existing: BASE_PAUSE
|
||||
gcode:
|
||||
{% if not printer.pause_resume.is_paused %}
|
||||
M600
|
||||
{% endif %}
|
||||
|
||||
[gcode_macro M600]
|
||||
variable_extr_temp: 0
|
||||
gcode:
|
||||
{% set X = params.X|default(100) %}
|
||||
{% set Y = params.Y|default(100) %}
|
||||
{% set Z = params.Z|default(100) %}
|
||||
BASE_PAUSE
|
||||
SET_GCODE_VARIABLE MACRO=M600 VARIABLE=extr_temp VALUE={printer.extruder.target}
|
||||
G91
|
||||
{% if printer.extruder.temperature|float > 180 %}
|
||||
G1 E-.8 F2700
|
||||
{% endif %}
|
||||
G1 Z{Z}
|
||||
G90
|
||||
G1 X{X} Y{Y} F3000
|
||||
|
||||
[gcode_macro RESUME]
|
||||
rename_existing: BASE_RESUME
|
||||
gcode:
|
||||
{% if printer.pause_resume.is_paused %}
|
||||
{% if printer["gcode_macro M600"].extr_temp %}
|
||||
M109 S{printer["gcode_macro M600"].extr_temp}
|
||||
{% endif %}
|
||||
BASE_RESUME
|
||||
{% endif %}
|
||||
|
||||
[gcode_macro LOAD_FILAMENT]
|
||||
gcode:
|
||||
M117 Loading Filament...
|
||||
G92 E0.0
|
||||
G91
|
||||
G1 E50 F400
|
||||
G1 E25 F100
|
||||
G90
|
||||
G92 E0.0
|
||||
M400
|
||||
M117 Load Complete
|
||||
UPDATE_DELAYED_GCODE ID=clear_display DURATION=5
|
||||
|
||||
[gcode_macro UNLOAD_FILAMENT]
|
||||
gcode:
|
||||
M117 Unloading Filament...
|
||||
G92 E0.0
|
||||
G91
|
||||
G1 E-32 F5200
|
||||
G1 E-10 F100
|
||||
G1 E-38 F1000
|
||||
G90
|
||||
G92 E0.0
|
||||
M400
|
||||
M300 S300 P1000
|
||||
M117 Remove Filament Now!
|
||||
UPDATE_DELAYED_GCODE ID=clear_display DURATION=5
|
||||
|
||||
[gcode_macro G80]
|
||||
gcode:
|
||||
G28
|
||||
BED_MESH_CALIBRATE
|
||||
G1 X0 Y0 F4000
|
||||
|
||||
[gcode_macro G81]
|
||||
gcode:
|
||||
{% set S = params.S|default(0) %}
|
||||
BED_MESH_OUTPUT CENTER_ZERO={S}
|
||||
|
||||
[gcode_macro M300]
|
||||
gcode:
|
||||
{% set S = params.S|default(1000) %}
|
||||
{% set P = params.P|default(100) %}
|
||||
SET_PIN PIN=BEEPER_pin VALUE={S}
|
||||
G4 P{P}
|
||||
SET_PIN PIN=BEEPER_pin VALUE=0
|
||||
|
||||
[gcode_macro PRINT_START]
|
||||
gcode:
|
||||
{% set MATERIAL = params.MATERIAL|default("Unknown") %}
|
||||
{% set LAYER_HEIGHT = params.LAYER_HEIGHT|default(0) %}
|
||||
M83
|
||||
CLEAR_PAUSE
|
||||
SET_IDLE_TIMEOUT TIMEOUT=600
|
||||
SET_PRESSURE_ADVANCE ADVANCE=0
|
||||
SET_GCODE_OFFSET Z=0
|
||||
G90
|
||||
M104 S170
|
||||
M190 S{params.BTMP}
|
||||
M109 S170
|
||||
G80
|
||||
M104 S{params.ETMP}
|
||||
G1 X1 Y-3.0 Z20 F1000.0 ; go outside print area
|
||||
M109 S{params.ETMP}
|
||||
G1 Z.4
|
||||
G92 E0.0
|
||||
G91
|
||||
G1 X60.0 E9.0 F1000.0 ; intro line
|
||||
G1 X40.0 E12.5 F1000.0 ; intro line
|
||||
G90
|
||||
G92 E0.0
|
||||
{% if MATERIAL != "PLA" %}
|
||||
SET_VELOCITY_LIMIT SQUARE_CORNER_VELOCITY=1
|
||||
{% endif %}
|
||||
{% if LAYER_HEIGHT|float < 0.051 %}
|
||||
M221 S100
|
||||
{% else %}
|
||||
M221 S95
|
||||
{% endif %}
|
||||
|
||||
[gcode_macro PRINT_END]
|
||||
gcode:
|
||||
CLEAR_PAUSE
|
||||
M400
|
||||
BED_MESH_CLEAR
|
||||
G92 E0.0
|
||||
G91
|
||||
{% if printer.gcode_move.gcode_position.x > 20 %}
|
||||
{% if printer.gcode_move.gcode_position.y > 20 %}
|
||||
G1 Z+1.00 X-20.0 Y-20.0 F20000 ;short quick move to disengage from print
|
||||
{% else %}
|
||||
G1 Z+1.00 X-20.0 F20000 ;short quick move to disengage from print
|
||||
{% endif %}
|
||||
{% elif printer.gcode_move.gcode_position.y > 20 %}
|
||||
G1 Z+1.00 Y-20.0 F20000 ;short quick move to disengage from print
|
||||
{% endif %}
|
||||
G1 E-8.00 F500 ;retract additional filament to prevent oozing
|
||||
G90
|
||||
{% if printer.gcode_move.gcode_position.z < 100 %}
|
||||
G0 Z100 F1500
|
||||
{% elif printer.gcode_move.gcode_position.z < 190 %}
|
||||
G91
|
||||
G0 Z10 F1500
|
||||
G90
|
||||
{% endif %}
|
||||
G0 X10 Y200 F6000
|
||||
SET_GCODE_OFFSET Z=0 MOVE=1
|
||||
TURN_OFF_HEATERS
|
||||
SET_VELOCITY_LIMIT VELOCITY=300 SQUARE_CORNER_VELOCITY=5
|
||||
M84
|
||||
M107
|
||||
M204 S3000
|
||||
M221 S100
|
||||
|
||||
[gcode_macro CANCEL_PRINT]
|
||||
rename_existing: BASE_CANCEL_PRINT
|
||||
gcode:
|
||||
PAUSE
|
||||
SDCARD_RESET_FILE
|
||||
PRINT_END
|
||||
CLEAR_PAUSE
|
||||
|
||||
[gcode_macro TEST_REMOTE_METHOD]
|
||||
gcode:
|
||||
{action_call_remote_method(method="moonraker_test",
|
||||
result="test")}
|
||||
403
tests/assets/klipper/error_printer.cfg
Normal file
403
tests/assets/klipper/error_printer.cfg
Normal file
@@ -0,0 +1,403 @@
|
||||
[mcu]
|
||||
serial: /dev/serial/by-id/usb
|
||||
|
||||
printer]
|
||||
kinematics: cartesian
|
||||
max_velocity: 300
|
||||
max_accel: 1500
|
||||
max_z_velocity: 15
|
||||
max_z_accel: 200
|
||||
|
||||
[stepper_x]
|
||||
microsteps: 16
|
||||
step_pin: PC0
|
||||
dir_pin: !PL0
|
||||
enable_pin: !PA7
|
||||
rotation_distance: 32
|
||||
endstop_pin: tmc2130_stepper_x:virtual_endstop
|
||||
position_endstop: 0
|
||||
position_min: 0
|
||||
position_max: 250
|
||||
homing_speed: 50
|
||||
homing_retract_dist: 0
|
||||
|
||||
[tmc2130 stepper_x]
|
||||
cs_pin: PG0
|
||||
interpolate: True
|
||||
run_current: .281738
|
||||
hold_current: .281738
|
||||
sense_resistor: 0.220
|
||||
diag1_pin: !PK2
|
||||
driver_IHOLDDELAY: 8
|
||||
driver_TPOWERDOWN: 0
|
||||
driver_TBL: 2
|
||||
driver_TOFF: 3
|
||||
driver_HEND: 1
|
||||
driver_HSTRT: 5
|
||||
driver_PWM_FREQ: 2
|
||||
driver_PWM_GRAD: 2
|
||||
driver_PWM_AMPL: 230
|
||||
driver_PWM_AUTOSCALE: True
|
||||
driver_SGT: 3
|
||||
|
||||
[stepper_y]
|
||||
microsteps: 16
|
||||
step_pin: PC1
|
||||
dir_pin: PL1
|
||||
enable_pin: !PA6
|
||||
rotation_distance: 32
|
||||
endstop_pin: tmc2130_stepper_y:virtual_endstop
|
||||
position_endstop: -4
|
||||
position_max: 210
|
||||
position_min: -4
|
||||
homing_speed: 50
|
||||
homing_retract_dist: 0
|
||||
|
||||
[tmc2130 stepper_y]
|
||||
cs_pin: PG2
|
||||
interpolate: True
|
||||
run_current: .3480291
|
||||
hold_current: .3480291
|
||||
sense_resistor: 0.220
|
||||
diag1_pin: !PK7
|
||||
driver_IHOLDDELAY: 8
|
||||
driver_TPOWERDOWN: 0
|
||||
driver_TBL: 2
|
||||
driver_TOFF: 3
|
||||
driver_HEND: 1
|
||||
driver_HSTRT: 5
|
||||
driver_PWM_FREQ: 2
|
||||
driver_PWM_GRAD: 2
|
||||
driver_PWM_AMPL: 235
|
||||
driver_PWM_AUTOSCALE: True
|
||||
driver_SGT: 3
|
||||
|
||||
[stepper_z]
|
||||
microsteps: 16
|
||||
step_pin: PC2
|
||||
dir_pin: !PL2
|
||||
enable_pin: !PA5
|
||||
rotation_distance: 8
|
||||
endstop_pin: probe:z_virtual_endstop
|
||||
position_max: 220
|
||||
position_min: -2
|
||||
homing_speed: 13.333
|
||||
|
||||
[tmc2130 stepper_z]
|
||||
cs_pin: PK5
|
||||
interpolate: True
|
||||
run_current: .53033
|
||||
hold_current: .53033
|
||||
sense_resistor: 0.220
|
||||
diag1_pin: !PK6
|
||||
driver_IHOLDDELAY: 8
|
||||
driver_TPOWERDOWN: 0
|
||||
driver_TBL: 2
|
||||
driver_TOFF: 3
|
||||
driver_HEND: 1
|
||||
driver_HSTRT: 5
|
||||
driver_PWM_FREQ: 2
|
||||
driver_PWM_GRAD: 4
|
||||
driver_PWM_AMPL: 200
|
||||
driver_PWM_AUTOSCALE: True
|
||||
driver_SGT: 4
|
||||
|
||||
[extruder]
|
||||
microsteps: 8
|
||||
step_pin: PC3
|
||||
dir_pin: PL6
|
||||
enable_pin: !PA4
|
||||
rotation_distance: 6.53061216
|
||||
full_steps_per_rotation: 400
|
||||
nozzle_diameter: 0.4
|
||||
filament_diameter: 1.750
|
||||
max_extrude_cross_section: 50.0
|
||||
# Allows to load filament and purge up to 500mm
|
||||
max_extrude_only_distance: 500.0
|
||||
max_extrude_only_velocity: 120.0
|
||||
max_extrude_only_accel: 1250.0
|
||||
heater_pin: PE5
|
||||
sensor_type: ATC Semitec 104GT-2
|
||||
sensor_pin: PF0
|
||||
control: pid
|
||||
pid_Kp: 16.13
|
||||
pid_Ki: 1.1625
|
||||
pid_Kd: 56.23
|
||||
min_temp: 0
|
||||
max_temp: 305
|
||||
|
||||
[tmc2130 extruder]
|
||||
cs_pin: PK4
|
||||
interpolate: True
|
||||
run_current: 0.41432
|
||||
hold_current: 0.3
|
||||
sense_resistor: 0.220
|
||||
diag1_pin: !PK3
|
||||
driver_IHOLDDELAY: 8
|
||||
driver_TPOWERDOWN: 0
|
||||
driver_TBL: 2
|
||||
driver_TOFF: 3
|
||||
driver_HEND: 1
|
||||
driver_HSTRT: 5
|
||||
driver_PWM_FREQ: 2
|
||||
driver_PWM_GRAD:4
|
||||
driver_PWM_AMPL: 240
|
||||
driver_PWM_AUTOSCALE: True
|
||||
driver_SGT: 3
|
||||
|
||||
[heater_bed]
|
||||
heater_pin: PG5
|
||||
sensor_type: EPCOS 100K B57560G104F
|
||||
sensor_pin: PF2
|
||||
control: pid
|
||||
pid_Kp: 126.13
|
||||
pid_Ki: 4.3
|
||||
pid_Kd: 924.76
|
||||
min_temp: 0
|
||||
max_temp: 125
|
||||
|
||||
[verify_heater heater_bed]
|
||||
max_error: 240
|
||||
check_gain_time: 120
|
||||
|
||||
[heater_fan nozzle_cooling_fan]
|
||||
pin: PH5
|
||||
heater: extruder
|
||||
heater_temp: 50.0
|
||||
|
||||
[fan]
|
||||
pin: PH3
|
||||
|
||||
[display]
|
||||
lcd_type: hd44780
|
||||
rs_pin: PD5
|
||||
e_pin: PF7
|
||||
d4_pin: PF5
|
||||
d5_pin: PG4
|
||||
d6_pin: PH7
|
||||
d7_pin: PG3
|
||||
encoder_pins: ^PJ1,^PJ2
|
||||
click_pin: ^!PH6
|
||||
|
||||
[pause_resume]
|
||||
|
||||
[virtual_sdcard]
|
||||
path: ${gcode_path}
|
||||
|
||||
[respond]
|
||||
default_type: command
|
||||
|
||||
[probe]
|
||||
pin: PB4
|
||||
x_offset: 23
|
||||
y_offset: 5
|
||||
z_offset: 0.8
|
||||
speed: 12.0
|
||||
|
||||
[bed_mesh]
|
||||
speed: 140
|
||||
horizontal_move_z: 2
|
||||
mesh_min: 24, 6
|
||||
mesh_max: 238, 210
|
||||
probe_count: 7
|
||||
mesh_pps: 2
|
||||
fade_start: 1
|
||||
fade_end: 10
|
||||
fade_target: 0
|
||||
move_check_distance: 15
|
||||
algorithm: bicubic
|
||||
bicubic_tension: .2
|
||||
relative_reference_index: 24
|
||||
faulty_region_1_min: 116.75, 41.81
|
||||
faulty_region_1_max: 133.25, 78.81
|
||||
faulty_region_2_min: 156.5, 99.31
|
||||
faulty_region_2_max: 193.5, 115.81
|
||||
faulty_region_3_min: 116.75, 136.21
|
||||
faulty_region_3_max: 133.25, 173.31
|
||||
|
||||
[homing_override]
|
||||
gcode:
|
||||
G1 Z3 F600
|
||||
G28 X0 Y0
|
||||
G1 X131 Y108 F5000
|
||||
G28 Z0
|
||||
axes: Z
|
||||
set_position_x: 0
|
||||
set_position_y: 0
|
||||
set_position_z: 0
|
||||
|
||||
[output_pin BEEPER_pin]
|
||||
pin: PH2
|
||||
pwm: True
|
||||
value: 0
|
||||
shutdown_value:0
|
||||
cycle_time: 0.001
|
||||
scale: 1000
|
||||
|
||||
[force_move]
|
||||
enable_force_move: True
|
||||
|
||||
[idle_timeout]
|
||||
gcode:
|
||||
M104 S0
|
||||
M84
|
||||
|
||||
[gcode_macro PAUSE]
|
||||
rename_existing: BASE_PAUSE
|
||||
gcode:
|
||||
{% if not printer.pause_resume.is_paused %}
|
||||
M600
|
||||
{% endif %}
|
||||
|
||||
[gcode_macro M600]
|
||||
variable_extr_temp: 0
|
||||
gcode:
|
||||
{% set X = params.X|default(100) %}
|
||||
{% set Y = params.Y|default(100) %}
|
||||
{% set Z = params.Z|default(100) %}
|
||||
BASE_PAUSE
|
||||
SET_GCODE_VARIABLE MACRO=M600 VARIABLE=extr_temp VALUE={printer.extruder.target}
|
||||
G91
|
||||
{% if printer.extruder.temperature|float > 180 %}
|
||||
G1 E-.8 F2700
|
||||
{% endif %}
|
||||
G1 Z{Z}
|
||||
G90
|
||||
G1 X{X} Y{Y} F3000
|
||||
|
||||
[gcode_macro RESUME]
|
||||
rename_existing: BASE_RESUME
|
||||
gcode:
|
||||
{% if printer.pause_resume.is_paused %}
|
||||
{% if printer["gcode_macro M600"].extr_temp %}
|
||||
M109 S{printer["gcode_macro M600"].extr_temp}
|
||||
{% endif %}
|
||||
BASE_RESUME
|
||||
{% endif %}
|
||||
|
||||
[gcode_macro LOAD_FILAMENT]
|
||||
gcode:
|
||||
M117 Loading Filament...
|
||||
G92 E0.0
|
||||
G91
|
||||
G1 E50 F400
|
||||
G1 E25 F100
|
||||
G90
|
||||
G92 E0.0
|
||||
M400
|
||||
M117 Load Complete
|
||||
UPDATE_DELAYED_GCODE ID=clear_display DURATION=5
|
||||
|
||||
[gcode_macro UNLOAD_FILAMENT]
|
||||
gcode:
|
||||
M117 Unloading Filament...
|
||||
G92 E0.0
|
||||
G91
|
||||
G1 E-32 F5200
|
||||
G1 E-10 F100
|
||||
G1 E-38 F1000
|
||||
G90
|
||||
G92 E0.0
|
||||
M400
|
||||
M300 S300 P1000
|
||||
M117 Remove Filament Now!
|
||||
UPDATE_DELAYED_GCODE ID=clear_display DURATION=5
|
||||
|
||||
[gcode_macro G80]
|
||||
gcode:
|
||||
G28
|
||||
BED_MESH_CALIBRATE
|
||||
G1 X0 Y0 F4000
|
||||
|
||||
[gcode_macro G81]
|
||||
gcode:
|
||||
{% set S = params.S|default(0) %}
|
||||
BED_MESH_OUTPUT CENTER_ZERO={S}
|
||||
|
||||
[gcode_macro M300]
|
||||
gcode:
|
||||
{% set S = params.S|default(1000) %}
|
||||
{% set P = params.P|default(100) %}
|
||||
SET_PIN PIN=BEEPER_pin VALUE={S}
|
||||
G4 P{P}
|
||||
SET_PIN PIN=BEEPER_pin VALUE=0
|
||||
|
||||
[gcode_macro PRINT_START]
|
||||
gcode:
|
||||
{% set MATERIAL = params.MATERIAL|default("Unknown") %}
|
||||
{% set LAYER_HEIGHT = params.LAYER_HEIGHT|default(0) %}
|
||||
M83
|
||||
CLEAR_PAUSE
|
||||
SET_IDLE_TIMEOUT TIMEOUT=600
|
||||
SET_PRESSURE_ADVANCE ADVANCE=0
|
||||
SET_GCODE_OFFSET Z=0
|
||||
G90
|
||||
M104 S170
|
||||
M190 S{params.BTMP}
|
||||
M109 S170
|
||||
G80
|
||||
M104 S{params.ETMP}
|
||||
G1 X1 Y-3.0 Z20 F1000.0 ; go outside print area
|
||||
M109 S{params.ETMP}
|
||||
G1 Z.4
|
||||
G92 E0.0
|
||||
G91
|
||||
G1 X60.0 E9.0 F1000.0 ; intro line
|
||||
G1 X40.0 E12.5 F1000.0 ; intro line
|
||||
G90
|
||||
G92 E0.0
|
||||
{% if MATERIAL != "PLA" %}
|
||||
SET_VELOCITY_LIMIT SQUARE_CORNER_VELOCITY=1
|
||||
{% endif %}
|
||||
{% if LAYER_HEIGHT|float < 0.051 %}
|
||||
M221 S100
|
||||
{% else %}
|
||||
M221 S95
|
||||
{% endif %}
|
||||
|
||||
[gcode_macro PRINT_END]
|
||||
gcode:
|
||||
CLEAR_PAUSE
|
||||
M400
|
||||
BED_MESH_CLEAR
|
||||
G92 E0.0
|
||||
G91
|
||||
{% if printer.gcode_move.gcode_position.x > 20 %}
|
||||
{% if printer.gcode_move.gcode_position.y > 20 %}
|
||||
G1 Z+1.00 X-20.0 Y-20.0 F20000 ;short quick move to disengage from print
|
||||
{% else %}
|
||||
G1 Z+1.00 X-20.0 F20000 ;short quick move to disengage from print
|
||||
{% endif %}
|
||||
{% elif printer.gcode_move.gcode_position.y > 20 %}
|
||||
G1 Z+1.00 Y-20.0 F20000 ;short quick move to disengage from print
|
||||
{% endif %}
|
||||
G1 E-8.00 F500 ;retract additional filament to prevent oozing
|
||||
G90
|
||||
{% if printer.gcode_move.gcode_position.z < 100 %}
|
||||
G0 Z100 F1500
|
||||
{% elif printer.gcode_move.gcode_position.z < 190 %}
|
||||
G91
|
||||
G0 Z10 F1500
|
||||
G90
|
||||
{% endif %}
|
||||
G0 X10 Y200 F6000
|
||||
SET_GCODE_OFFSET Z=0 MOVE=1
|
||||
TURN_OFF_HEATERS
|
||||
SET_VELOCITY_LIMIT VELOCITY=300 SQUARE_CORNER_VELOCITY=5
|
||||
M84
|
||||
M107
|
||||
M204 S3000
|
||||
M221 S100
|
||||
|
||||
[gcode_macro CANCEL_PRINT]
|
||||
rename_existing: BASE_CANCEL_PRINT
|
||||
gcode:
|
||||
PAUSE
|
||||
SDCARD_RESET_FILE
|
||||
PRINT_END
|
||||
CLEAR_PAUSE
|
||||
|
||||
[gcode_macro TEST_REMOTE_METHOD]
|
||||
gcode:
|
||||
{action_call_remote_method(method="moonraker_test",
|
||||
result="test")}
|
||||
1
tests/assets/klipper/klipper.dict
Normal file
1
tests/assets/klipper/klipper.dict
Normal file
File diff suppressed because one or more lines are too long
347
tests/assets/klipper/missing_reqs.cfg
Normal file
347
tests/assets/klipper/missing_reqs.cfg
Normal file
@@ -0,0 +1,347 @@
|
||||
[mcu]
|
||||
serial: /dev/serial/by-id/usb
|
||||
|
||||
[printer]
|
||||
kinematics: cartesian
|
||||
max_velocity: 300
|
||||
max_accel: 1500
|
||||
max_z_velocity: 15
|
||||
max_z_accel: 200
|
||||
|
||||
[stepper_x]
|
||||
microsteps: 16
|
||||
step_pin: PC0
|
||||
dir_pin: !PL0
|
||||
enable_pin: !PA7
|
||||
rotation_distance: 32
|
||||
endstop_pin: tmc2130_stepper_x:virtual_endstop
|
||||
position_endstop: 0
|
||||
position_min: 0
|
||||
position_max: 250
|
||||
homing_speed: 50
|
||||
homing_retract_dist: 0
|
||||
|
||||
[tmc2130 stepper_x]
|
||||
cs_pin: PG0
|
||||
interpolate: True
|
||||
run_current: .281738
|
||||
hold_current: .281738
|
||||
sense_resistor: 0.220
|
||||
diag1_pin: !PK2
|
||||
driver_IHOLDDELAY: 8
|
||||
driver_TPOWERDOWN: 0
|
||||
driver_TBL: 2
|
||||
driver_TOFF: 3
|
||||
driver_HEND: 1
|
||||
driver_HSTRT: 5
|
||||
driver_PWM_FREQ: 2
|
||||
driver_PWM_GRAD: 2
|
||||
driver_PWM_AMPL: 230
|
||||
driver_PWM_AUTOSCALE: True
|
||||
driver_SGT: 3
|
||||
|
||||
[stepper_y]
|
||||
microsteps: 16
|
||||
step_pin: PC1
|
||||
dir_pin: PL1
|
||||
enable_pin: !PA6
|
||||
rotation_distance: 32
|
||||
endstop_pin: tmc2130_stepper_y:virtual_endstop
|
||||
position_endstop: -4
|
||||
position_max: 210
|
||||
position_min: -4
|
||||
homing_speed: 50
|
||||
homing_retract_dist: 0
|
||||
|
||||
[tmc2130 stepper_y]
|
||||
cs_pin: PG2
|
||||
interpolate: True
|
||||
run_current: .3480291
|
||||
hold_current: .3480291
|
||||
sense_resistor: 0.220
|
||||
diag1_pin: !PK7
|
||||
driver_IHOLDDELAY: 8
|
||||
driver_TPOWERDOWN: 0
|
||||
driver_TBL: 2
|
||||
driver_TOFF: 3
|
||||
driver_HEND: 1
|
||||
driver_HSTRT: 5
|
||||
driver_PWM_FREQ: 2
|
||||
driver_PWM_GRAD: 2
|
||||
driver_PWM_AMPL: 235
|
||||
driver_PWM_AUTOSCALE: True
|
||||
driver_SGT: 3
|
||||
|
||||
[stepper_z]
|
||||
microsteps: 16
|
||||
step_pin: PC2
|
||||
dir_pin: !PL2
|
||||
enable_pin: !PA5
|
||||
rotation_distance: 8
|
||||
endstop_pin: probe:z_virtual_endstop
|
||||
position_max: 220
|
||||
position_min: -2
|
||||
homing_speed: 13.333
|
||||
|
||||
[tmc2130 stepper_z]
|
||||
cs_pin: PK5
|
||||
interpolate: True
|
||||
run_current: .53033
|
||||
hold_current: .53033
|
||||
sense_resistor: 0.220
|
||||
diag1_pin: !PK6
|
||||
driver_IHOLDDELAY: 8
|
||||
driver_TPOWERDOWN: 0
|
||||
driver_TBL: 2
|
||||
driver_TOFF: 3
|
||||
driver_HEND: 1
|
||||
driver_HSTRT: 5
|
||||
driver_PWM_FREQ: 2
|
||||
driver_PWM_GRAD: 4
|
||||
driver_PWM_AMPL: 200
|
||||
driver_PWM_AUTOSCALE: True
|
||||
driver_SGT: 4
|
||||
|
||||
[extruder]
|
||||
microsteps: 8
|
||||
step_pin: PC3
|
||||
dir_pin: PL6
|
||||
enable_pin: !PA4
|
||||
rotation_distance: 6.53061216
|
||||
full_steps_per_rotation: 400
|
||||
nozzle_diameter: 0.4
|
||||
filament_diameter: 1.750
|
||||
max_extrude_cross_section: 50.0
|
||||
# Allows to load filament and purge up to 500mm
|
||||
max_extrude_only_distance: 500.0
|
||||
max_extrude_only_velocity: 120.0
|
||||
max_extrude_only_accel: 1250.0
|
||||
heater_pin: PE5
|
||||
sensor_type: ATC Semitec 104GT-2
|
||||
sensor_pin: PF0
|
||||
control: pid
|
||||
pid_Kp: 16.13
|
||||
pid_Ki: 1.1625
|
||||
pid_Kd: 56.23
|
||||
min_temp: 0
|
||||
max_temp: 305
|
||||
|
||||
[tmc2130 extruder]
|
||||
cs_pin: PK4
|
||||
interpolate: True
|
||||
run_current: 0.41432
|
||||
hold_current: 0.3
|
||||
sense_resistor: 0.220
|
||||
diag1_pin: !PK3
|
||||
driver_IHOLDDELAY: 8
|
||||
driver_TPOWERDOWN: 0
|
||||
driver_TBL: 2
|
||||
driver_TOFF: 3
|
||||
driver_HEND: 1
|
||||
driver_HSTRT: 5
|
||||
driver_PWM_FREQ: 2
|
||||
driver_PWM_GRAD:4
|
||||
driver_PWM_AMPL: 240
|
||||
driver_PWM_AUTOSCALE: True
|
||||
driver_SGT: 3
|
||||
|
||||
[heater_bed]
|
||||
heater_pin: PG5
|
||||
sensor_type: EPCOS 100K B57560G104F
|
||||
sensor_pin: PF2
|
||||
control: pid
|
||||
pid_Kp: 126.13
|
||||
pid_Ki: 4.3
|
||||
pid_Kd: 924.76
|
||||
min_temp: 0
|
||||
max_temp: 125
|
||||
|
||||
[verify_heater heater_bed]
|
||||
max_error: 240
|
||||
check_gain_time: 120
|
||||
|
||||
[heater_fan nozzle_cooling_fan]
|
||||
pin: PH5
|
||||
heater: extruder
|
||||
heater_temp: 50.0
|
||||
|
||||
[fan]
|
||||
pin: PH3
|
||||
|
||||
[respond]
|
||||
default_type: command
|
||||
|
||||
[probe]
|
||||
pin: PB4
|
||||
x_offset: 23
|
||||
y_offset: 5
|
||||
z_offset: 0.8
|
||||
speed: 12.0
|
||||
|
||||
[bed_mesh]
|
||||
speed: 140
|
||||
horizontal_move_z: 2
|
||||
mesh_min: 24, 6
|
||||
mesh_max: 238, 210
|
||||
probe_count: 7
|
||||
mesh_pps: 2
|
||||
fade_start: 1
|
||||
fade_end: 10
|
||||
fade_target: 0
|
||||
move_check_distance: 15
|
||||
algorithm: bicubic
|
||||
bicubic_tension: .2
|
||||
relative_reference_index: 24
|
||||
faulty_region_1_min: 116.75, 41.81
|
||||
faulty_region_1_max: 133.25, 78.81
|
||||
faulty_region_2_min: 156.5, 99.31
|
||||
faulty_region_2_max: 193.5, 115.81
|
||||
faulty_region_3_min: 116.75, 136.21
|
||||
faulty_region_3_max: 133.25, 173.31
|
||||
|
||||
[homing_override]
|
||||
gcode:
|
||||
G1 Z3 F600
|
||||
G28 X0 Y0
|
||||
G1 X131 Y108 F5000
|
||||
G28 Z0
|
||||
axes: Z
|
||||
set_position_x: 0
|
||||
set_position_y: 0
|
||||
set_position_z: 0
|
||||
|
||||
[output_pin BEEPER_pin]
|
||||
pin: PH2
|
||||
pwm: True
|
||||
value: 0
|
||||
shutdown_value:0
|
||||
cycle_time: 0.001
|
||||
scale: 1000
|
||||
|
||||
[force_move]
|
||||
enable_force_move: True
|
||||
|
||||
[idle_timeout]
|
||||
gcode:
|
||||
M104 S0
|
||||
M84
|
||||
|
||||
[gcode_macro LOAD_FILAMENT]
|
||||
gcode:
|
||||
M117 Loading Filament...
|
||||
G92 E0.0
|
||||
G91
|
||||
G1 E50 F400
|
||||
G1 E25 F100
|
||||
G90
|
||||
G92 E0.0
|
||||
M400
|
||||
M117 Load Complete
|
||||
UPDATE_DELAYED_GCODE ID=clear_display DURATION=5
|
||||
|
||||
[gcode_macro UNLOAD_FILAMENT]
|
||||
gcode:
|
||||
M117 Unloading Filament...
|
||||
G92 E0.0
|
||||
G91
|
||||
G1 E-32 F5200
|
||||
G1 E-10 F100
|
||||
G1 E-38 F1000
|
||||
G90
|
||||
G92 E0.0
|
||||
M400
|
||||
M300 S300 P1000
|
||||
M117 Remove Filament Now!
|
||||
UPDATE_DELAYED_GCODE ID=clear_display DURATION=5
|
||||
|
||||
[gcode_macro G80]
|
||||
gcode:
|
||||
G28
|
||||
BED_MESH_CALIBRATE
|
||||
G1 X0 Y0 F4000
|
||||
|
||||
[gcode_macro G81]
|
||||
gcode:
|
||||
{% set S = params.S|default(0) %}
|
||||
BED_MESH_OUTPUT CENTER_ZERO={S}
|
||||
|
||||
[gcode_macro M300]
|
||||
gcode:
|
||||
{% set S = params.S|default(1000) %}
|
||||
{% set P = params.P|default(100) %}
|
||||
SET_PIN PIN=BEEPER_pin VALUE={S}
|
||||
G4 P{P}
|
||||
SET_PIN PIN=BEEPER_pin VALUE=0
|
||||
|
||||
[gcode_macro PRINT_START]
|
||||
gcode:
|
||||
{% set MATERIAL = params.MATERIAL|default("Unknown") %}
|
||||
{% set LAYER_HEIGHT = params.LAYER_HEIGHT|default(0) %}
|
||||
M83
|
||||
CLEAR_PAUSE
|
||||
SET_IDLE_TIMEOUT TIMEOUT=600
|
||||
SET_PRESSURE_ADVANCE ADVANCE=0
|
||||
SET_GCODE_OFFSET Z=0
|
||||
G90
|
||||
M104 S170
|
||||
M190 S{params.BTMP}
|
||||
M109 S170
|
||||
G80
|
||||
M104 S{params.ETMP}
|
||||
G1 X1 Y-3.0 Z20 F1000.0 ; go outside print area
|
||||
M109 S{params.ETMP}
|
||||
G1 Z.4
|
||||
G92 E0.0
|
||||
G91
|
||||
G1 X60.0 E9.0 F1000.0 ; intro line
|
||||
G1 X40.0 E12.5 F1000.0 ; intro line
|
||||
G90
|
||||
G92 E0.0
|
||||
{% if MATERIAL != "PLA" %}
|
||||
SET_VELOCITY_LIMIT SQUARE_CORNER_VELOCITY=1
|
||||
{% endif %}
|
||||
{% if LAYER_HEIGHT|float < 0.051 %}
|
||||
M221 S100
|
||||
{% else %}
|
||||
M221 S95
|
||||
{% endif %}
|
||||
|
||||
[gcode_macro PRINT_END]
|
||||
gcode:
|
||||
CLEAR_PAUSE
|
||||
M400
|
||||
BED_MESH_CLEAR
|
||||
G92 E0.0
|
||||
G91
|
||||
{% if printer.gcode_move.gcode_position.x > 20 %}
|
||||
{% if printer.gcode_move.gcode_position.y > 20 %}
|
||||
G1 Z+1.00 X-20.0 Y-20.0 F20000 ;short quick move to disengage from print
|
||||
{% else %}
|
||||
G1 Z+1.00 X-20.0 F20000 ;short quick move to disengage from print
|
||||
{% endif %}
|
||||
{% elif printer.gcode_move.gcode_position.y > 20 %}
|
||||
G1 Z+1.00 Y-20.0 F20000 ;short quick move to disengage from print
|
||||
{% endif %}
|
||||
G1 E-8.00 F500 ;retract additional filament to prevent oozing
|
||||
G90
|
||||
{% if printer.gcode_move.gcode_position.z < 100 %}
|
||||
G0 Z100 F1500
|
||||
{% elif printer.gcode_move.gcode_position.z < 190 %}
|
||||
G91
|
||||
G0 Z10 F1500
|
||||
G90
|
||||
{% endif %}
|
||||
G0 X10 Y200 F6000
|
||||
SET_GCODE_OFFSET Z=0 MOVE=1
|
||||
TURN_OFF_HEATERS
|
||||
SET_VELOCITY_LIMIT VELOCITY=300 SQUARE_CORNER_VELOCITY=5
|
||||
M84
|
||||
M107
|
||||
M204 S3000
|
||||
M221 S100
|
||||
|
||||
|
||||
[gcode_macro TEST_REMOTE_METHOD]
|
||||
gcode:
|
||||
{action_call_remote_method(method="moonraker_test",
|
||||
result="test")}
|
||||
16
tests/assets/moonraker/bare_db.cdb
Normal file
16
tests/assets/moonraker/bare_db.cdb
Normal file
@@ -0,0 +1,16 @@
|
||||
+32,24:TU9PTlJBS0VSX0RBVEFCQVNFX1NUQVJU->bmFtZXNwYWNlX2NvdW50PTU=
|
||||
+36,12:bmFtZXNwYWNlX2F1dGhvcml6ZWRfdXNlcnM=->ZW50cmllcz0x
|
||||
+20,148:X0FQSV9LRVlfVVNFUl8=->eyJ1c2VybmFtZSI6ICJfQVBJX0tFWV9VU0VSXyIsICJhcGlfa2V5IjogIjg4ZTdlMjA0MDU3YjQzYTdiNTI3ZGEwZDQzNjQ1MDg5IiwgImNyZWF0ZWRfb24iOiAxNjQ1NDkwOTExLjM5NzI1OTd9
|
||||
+32,12:bmFtZXNwYWNlX2djb2RlX21ldGFkYXRh->ZW50cmllcz0w
|
||||
+24,12:bmFtZXNwYWNlX2hpc3Rvcnk=->ZW50cmllcz0w
|
||||
+28,12:bmFtZXNwYWNlX21vb25yYWtlcg==->ZW50cmllcz0z
|
||||
+12,236:ZGF0YWJhc2U=->eyJkZWJ1Z19jb3VudGVyIjogMiwgInVuc2FmZV9zaHV0ZG93bnMiOiAxLCAicHJvdGVjdGVkX25hbWVzcGFjZXMiOiBbImdjb2RlX21ldGFkYXRhIiwgImhpc3RvcnkiLCAibW9vbnJha2VyIiwgInVwZGF0ZV9tYW5hZ2VyIl0sICJmb3JiaWRkZW5fbmFtZXNwYWNlcyI6IFsiYXV0aG9yaXplZF91c2VycyJdfQ==
|
||||
+24,12:ZGF0YWJhc2VfdmVyc2lvbg==->cQEAAAAAAAAA
|
||||
+16,84:ZmlsZV9tYW5hZ2Vy->eyJtZXRhZGF0YV92ZXJzaW9uIjogMywgImdjb2RlX3BhdGgiOiAiL2hvbWUvcGkvZ2NvZGVfZmlsZXMifQ==
|
||||
+32,12:bmFtZXNwYWNlX3VwZGF0ZV9tYW5hZ2Vy->ZW50cmllcz02
|
||||
+8,400:Zmx1aWRk->eyJsYXN0X2NvbmZpZ19oYXNoIjogImIyNDE4OTgyZmVhOTg1ZmZlN2ZlODFhOWQ4MWI0MDUwMThmMDFhYjM5MTNmNTk4MmJhMzllZjY4NzFiZjE3NDkiLCAibGFzdF9yZWZyZXNoX3RpbWUiOiAxNjQ1NDkwOTI2LjkwOTYzOTEsICJ2ZXJzaW9uIjogInYxLjE2LjIiLCAicmVtb3RlX3ZlcnNpb24iOiAidjEuMTYuMiIsICJkbF9pbmZvIjogWyJodHRwczovL2dpdGh1Yi5jb20vZmx1aWRkLWNvcmUvZmx1aWRkL3JlbGVhc2VzL2Rvd25sb2FkL3YxLjE2LjIvZmx1aWRkLnppcCIsICJhcHBsaWNhdGlvbi96aXAiLCA5NTE4NjU1XX0=
|
||||
+12,3072:a2xpcHBlcg==->eyJsYXN0X2NvbmZpZ19oYXNoIjogIjg4OTMzZjgyNTVhMTQyNDI2YjM1ODdhYTY0MDdlNTZmNDllZDlmZWM2MWZhOTViMTVmY2Q2NmQ1ZDE3MGU5MGEiLCAibGFzdF9yZWZyZXNoX3RpbWUiOiAxNjQ1NDkwOTIyLjQxNTY0OTIsICJpc192YWxpZCI6IHRydWUsICJuZWVkX2NoYW5uZWxfdXBkYXRlIjogZmFsc2UsICJyZXBvX3ZhbGlkIjogdHJ1ZSwgImdpdF9vd25lciI6ICJLbGlwcGVyM2QiLCAiZ2l0X3JlcG9fbmFtZSI6ICJrbGlwcGVyIiwgImdpdF9yZW1vdGUiOiAib3JpZ2luIiwgImdpdF9icmFuY2giOiAibWFzdGVyIiwgImN1cnJlbnRfdmVyc2lvbiI6ICJ2MC4xMC4wLTI3MSIsICJ1cHN0cmVhbV92ZXJzaW9uIjogInYwLjEwLjAtMjc2IiwgImN1cnJlbnRfY29tbWl0IjogIjhiMGM2ZmNiMDg5NzY5ZjcwZWNiYjExY2MzNzkzZGNkNjFmNDQ1ZGQiLCAidXBzdHJlYW1fY29tbWl0IjogIjJiMmNhYThmMDUwZDMyZWZlMTY1OWU4ZDdjNzQzMWQwN2U5ZTY3YTAiLCAidXBzdHJlYW1fdXJsIjogImh0dHBzOi8vZ2l0aHViLmNvbS9LbGlwcGVyM2Qva2xpcHBlci5naXQiLCAiZnVsbF92ZXJzaW9uX3N0cmluZyI6ICJ2MC4xMC4wLTI3MS1nOGIwYzZmY2ItZGlydHkiLCAiYnJhbmNoZXMiOiBbImRldi13ZWJob29rcy0yMDIxMTExNCIsICJkZXYtd2ViaG9va3MtZml4IiwgIm1hc3RlciJdLCAiZGlydHkiOiB0cnVlLCAiaGVhZF9kZXRhY2hlZCI6IGZhbHNlLCAiZ2l0X21lc3NhZ2VzIjogW10sICJjb21taXRzX2JlaGluZCI6IFt7InNoYSI6ICIyYjJjYWE4ZjA1MGQzMmVmZTE2NTllOGQ3Yzc0MzFkMDdlOWU2N2EwIiwgImF1dGhvciI6ICJGcmFuayBUYWNraXR0IiwgImRhdGUiOiAiMTY0NTQ2Nzk3OCIsICJzdWJqZWN0IjogImtsaXBweS1yZXF1aXJlbWVudHM6IFBpbiBtYXJrdXBzYWZlPT0xLjEuMSB0byBmaXggcHl0aG9uMyAoIzUyODYpIiwgIm1lc3NhZ2UiOiAiTWFya3Vwc2FmZSB1cGRhdGVkIGFuZCB0aGUgbGF0ZXN0IHZlcnNpb24gbm8gbG9uZ2VyIGluY2x1ZGVzIGBzb2Z0X3VuaWNvZGVgXHJcblxyXG5TaWduZWQtb2ZmLWJ5OiBGcmFua2x5biBUYWNraXR0IDxnaXRAZnJhbmsuYWY+IiwgInRhZyI6IG51bGx9LCB7InNoYSI6ICI5ZTE1MzIxNDE4OWQxMDdmZWRjMTJiODNhZWZkYzQyZWZkOTE5NmY5IiwgImF1dGhvciI6ICJLZXZpbiBPJ0Nvbm5vciIsICJkYXRlIjogIjE2NDU0NjQwMjEiLCAic3ViamVjdCI6ICJkb2NzOiBNaW5vciB3b3JkaW5nIGNoYW5nZSB0byBFeGFtcGxlX0NvbmZpZ3MubWQiLCAibWVzc2FnZSI6ICJTaWduZWQtb2ZmLWJ5OiBLZXZpbiBPJ0Nvbm5vciA8a2V2aW5Aa29jb25ub3IubmV0PiIsICJ0YWciOiBudWxsfSwgeyJzaGEiOiAiNzIwMmE1ZGE4ZTIzZGJkZTI4YTE3Y2I2MDNlZWUzMDM4NWZkZDk1MSIsICJhdXRob3IiOiAiS2V2aW4gTydDb25ub3IiLCAiZGF0ZSI6ICIxNjQ1NDYzODUwIiwgInN1YmplY3QiOiAiZG9jczogTWlub3Igd29yZGluZyBjaGFuZ2UgaW4gRXhhbXBsZV9Db25maWdzLm1kIiwgIm1lc3NhZ2UiOiAiU2lnbmVkLW9mZi1ieTogS2V2aW4gTydDb25ub3IgPGtldmluQGtvY29ubm9yLm5ldD4iLCAidGFnIjogbnVsbH0sIHsic2hhIjogIjc0ZGJkOGE4ZTQxYmM5ZDJiMDk3Yzk5N2Y0ZTk3NWI2MGVmZTY4MTEiLCAiYXV0aG9yIjogIktldmluIE8nQ29ubm9yIiwgImRhdGUiOiAiMTY0NTQ2MzY5OSIsICJzdWJqZWN0IjogImRvY3M6IEZpeCBFeGFtcGxlX0NvbmZpZ3MubWQgbGlzdCByZW5kZXJpbmciLCAibWVzc2FnZSI6ICJNa2RvY3MgZG9lc24ndCBzdXBwb3J0IGEgdGhpcmQgbGV2ZWwgb2YgbGlzdCBuZXN0aW5nLlxuXG5TaWduZWQtb2ZmLWJ5OiBLZXZpbiBPJ0Nvbm5vciA8a2V2aW5Aa29jb25ub3IubmV0PiIsICJ0YWciOiBudWxsfSwgeyJzaGEiOiAiYzNiYWE2NzFhNWY0YjZkNjk5YjdiY2FiNTdkZmEwNWJhZWMwYmNlMCIsICJhdXRob3IiOiAiS2V2aW4gTydDb25ub3IiLCAiZGF0ZSI6ICIxNjQ1NDYzMDg1IiwgInN1YmplY3QiOiAiZG9jczogVXBkYXRlIEV4YW1wbGVfQ29uZmlncy5tZCIsICJtZXNzYWdlIjogIkRvY3VtZW50IHRoYXQgc3BhY2VzIGFuZCBzcGVjaWFsIGNoYXJhY3RlcnMgc2hvdWxkIG5vdCBiZSBpbiB0aGVcbmNvbmZpZyBmaWxlbmFtZS5cblxuUmVtb3ZlIHJlZmVyZW5jZSB0byBzdGVwX2Rpc3RhbmNlIGFuZCBwaW5fbWFwIGRlcHJlY2F0ZWQgZmVhdHVyZXMsIGFzXG50aG9zZSBmZWF0dXJlcyBhcmUgbm93IGZ1bGx5IHJlbW92ZWQuXG5cblNpZ25lZC1vZmYtYnk6IEtldmluIE8nQ29ubm9yIDxrZXZpbkBrb2Nvbm5vci5uZXQ+IiwgInRhZyI6IG51bGx9XX0=
|
||||
+12,404:bWFpbnNhaWw=->eyJsYXN0X2NvbmZpZ19oYXNoIjogIjFlNDRlOWZkZDQ2YmI1MzYxN2IwZjJkNjg1YmNhODBkM2MzMzUxYTA3YzA5YmM2NzQyMDA0NWFjNTQxMzAyZjQiLCAibGFzdF9yZWZyZXNoX3RpbWUiOiAxNjQ1NDkwOTI2LjQ1ODIzOTMsICJ2ZXJzaW9uIjogInYyLjAuMSIsICJyZW1vdGVfdmVyc2lvbiI6ICJ2Mi4xLjIiLCAiZGxfaW5mbyI6IFsiaHR0cHM6Ly9naXRodWIuY29tL21haW5zYWlsLWNyZXcvbWFpbnNhaWwvcmVsZWFzZXMvZG93bmxvYWQvdjIuMS4yL21haW5zYWlsLnppcCIsICJhcHBsaWNhdGlvbi96aXAiLCAzNTEyNzYxXX0=
|
||||
+12,1636:bW9vbnJha2Vy->eyJsYXN0X2NvbmZpZ19oYXNoIjogImVjNDEwMWQ4MWIzYzc5MzgzYjIyN2MwOGQwYTg4NDk5Mjg5NDM1ZmFlMGI0MTc5N2U2MWU1NjdjZWEyM2MyNjkiLCAibGFzdF9yZWZyZXNoX3RpbWUiOiAxNjQ1NDkwOTI0LjI4MzY1NTYsICJpc192YWxpZCI6IHRydWUsICJuZWVkX2NoYW5uZWxfdXBkYXRlIjogZmFsc2UsICJyZXBvX3ZhbGlkIjogdHJ1ZSwgImdpdF9vd25lciI6ICI/IiwgImdpdF9yZXBvX25hbWUiOiAibW9vbnJha2VyIiwgImdpdF9yZW1vdGUiOiAiYXJrc2luZSIsICJnaXRfYnJhbmNoIjogImRldi1kYXRhYmFzZS1hc3luYy0yNTAxMjAyMiIsICJjdXJyZW50X3ZlcnNpb24iOiAidjAuNy4xLTQxOCIsICJ1cHN0cmVhbV92ZXJzaW9uIjogInYwLjcuMS00MTUiLCAiY3VycmVudF9jb21taXQiOiAiODRiOGZkNDZmOWEzNjVhYmI5ZWIzY2NiNThjZjdlOWFhZGRmNjJmMiIsICJ1cHN0cmVhbV9jb21taXQiOiAiODA2OTA1MmRmYmU3OTY2ZjljNWY2ZjkwMWU3ZmIyMWFjMmFkMjJjOSIsICJ1cHN0cmVhbV91cmwiOiAiZ2l0Oi8vZXJpYy13b3JrLmhvbWUvbW9vbnJha2VyIiwgImZ1bGxfdmVyc2lvbl9zdHJpbmciOiAidjAuNy4xLTQxOC1nODRiOGZkNCIsICJicmFuY2hlcyI6IFsiZGV2LWNwdV90aHJvdHRsZWRfcGVyZi0yMDIxMTEwMyIsICJkZXYtdXBkYXRlLW1hbmFnZXItbXVsdGljbGllbnQiLCAibWFzdGVyIl0sICJkaXJ0eSI6IGZhbHNlLCAiaGVhZF9kZXRhY2hlZCI6IHRydWUsICJnaXRfbWVzc2FnZXMiOiBbXSwgImNvbW1pdHNfYmVoaW5kIjogW3sic2hhIjogIjgwNjkwNTJkZmJlNzk2NmY5YzVmNmY5MDFlN2ZiMjFhYzJhZDIyYzkiLCAiYXV0aG9yIjogIkVyaWMgQ2FsbGFoYW4iLCAiZGF0ZSI6ICIxNjQ1NDgyNDA5IiwgInN1YmplY3QiOiAic2NyaXB0czogaW50cm9kdWNlIGRidG9vbCIsICJtZXNzYWdlIjogIlRoaXMgdG9vbCBtYXkgYmUgdXNlZCB0byBiYWNrdXAgYW5kIHJlc3RvcmUgTW9vbnJha2VyJ3MgbG1kYlxuZGF0YWJhc2Ugd2l0aG91dCBkZXBlbmRpbmcgb24gdGhlIFwibG1kYi11dGlsc1wiIHBhY2thZ2UuICBUaGVcbmJhY2t1cCBpcyBkb25lIHRvIGEgcGxhaW4gdGV4dCBmaWxlIGluIGNkYiBmb3JtYXQsIHNvIGEgYmFja3VwXG5tYXkgYmUgcmVzdG9yZWQgb24gYW55IHBsYXRmb3JtLlxuXG5TaWduZWQtb2ZmLWJ5OiAgRXJpYyBDYWxsYWhhbiA8YXJrc2luZS5jb2RlQGdtYWlsLmNvbT4iLCAidGFnIjogbnVsbH1dfQ==
|
||||
+12,928:bW9vbnRlc3Q=->eyJsYXN0X2NvbmZpZ19oYXNoIjogIjgwYzY5NjgwNWU3MTczOWIyNWEzYTFiNjNhMTc1YmM5Y2Q1NGVkM2U5YTBiMzBhNDhhNzAzYWFkZWI2YjNmNmMiLCAibGFzdF9yZWZyZXNoX3RpbWUiOiAxNjQ1NDkwOTI3LjgzMDQ0NzQsICJpc192YWxpZCI6IHRydWUsICJuZWVkX2NoYW5uZWxfdXBkYXRlIjogZmFsc2UsICJyZXBvX3ZhbGlkIjogdHJ1ZSwgImdpdF9vd25lciI6ICJhcmtzaW5lIiwgImdpdF9yZXBvX25hbWUiOiAibW9vbnRlc3QiLCAiZ2l0X3JlbW90ZSI6ICJvcmlnaW4iLCAiZ2l0X2JyYW5jaCI6ICJtYXN0ZXIiLCAiY3VycmVudF92ZXJzaW9uIjogInYwLjAuMS0yIiwgInVwc3RyZWFtX3ZlcnNpb24iOiAidjAuMC4xLTIiLCAiY3VycmVudF9jb21taXQiOiAiNWI0Yjk0ODBkYmQxODZiMTY2ZDM2NTVjMTFiNGY2NDBkYzEzNTA5YiIsICJ1cHN0cmVhbV9jb21taXQiOiAiNWI0Yjk0ODBkYmQxODZiMTY2ZDM2NTVjMTFiNGY2NDBkYzEzNTA5YiIsICJ1cHN0cmVhbV91cmwiOiAiaHR0cHM6Ly9naXRodWIuY29tL2Fya3NpbmUvbW9vbnRlc3QuZ2l0IiwgImZ1bGxfdmVyc2lvbl9zdHJpbmciOiAidjAuMC4xLTItZzViNGI5NDgiLCAiYnJhbmNoZXMiOiBbIm1hc3RlciJdLCAiZGlydHkiOiBmYWxzZSwgImhlYWRfZGV0YWNoZWQiOiBmYWxzZSwgImdpdF9tZXNzYWdlcyI6IFtdLCAiY29tbWl0c19iZWhpbmQiOiBbXX0=
|
||||
+8,108:c3lzdGVt->eyJsYXN0X2NvbmZpZ19oYXNoIjogIiIsICJsYXN0X3JlZnJlc2hfdGltZSI6IDE2NDU0OTA5MTQuMTU3MzQwOCwgInBhY2thZ2VzIjogW119
|
||||
18
tests/assets/moonraker/base_server.conf
Normal file
18
tests/assets/moonraker/base_server.conf
Normal file
@@ -0,0 +1,18 @@
|
||||
[server]
|
||||
host: 0.0.0.0
|
||||
port: 7010
|
||||
ssl_port: 7011
|
||||
klippy_uds_address: ${klippy_uds_path}
|
||||
|
||||
[database]
|
||||
database_path: ${database_path}
|
||||
|
||||
[machine]
|
||||
provider: none
|
||||
|
||||
[file_manager]
|
||||
config_path: ${config_path}
|
||||
log_path: ${log_path}
|
||||
|
||||
[secrets]
|
||||
secrets_path: ${secrets_path}
|
||||
20
tests/assets/moonraker/base_server_ssl.conf
Normal file
20
tests/assets/moonraker/base_server_ssl.conf
Normal file
@@ -0,0 +1,20 @@
|
||||
[server]
|
||||
host: 0.0.0.0
|
||||
port: 7010
|
||||
ssl_port: 7011
|
||||
ssl_certificate_path: ${ssl_certificate_path}
|
||||
ssl_key_path: ${ssl_key_path}
|
||||
klippy_uds_address: ${klippy_uds_path}
|
||||
|
||||
[database]
|
||||
database_path: ${database_path}
|
||||
|
||||
[machine]
|
||||
provider: none
|
||||
|
||||
[file_manager]
|
||||
config_path: ${config_path}
|
||||
log_path: ${log_path}
|
||||
|
||||
[secrets]
|
||||
secrets_path: ${secrets_path}
|
||||
18
tests/assets/moonraker/invalid_config.conf
Normal file
18
tests/assets/moonraker/invalid_config.conf
Normal file
@@ -0,0 +1,18 @@
|
||||
[server]
|
||||
host: 0.0.0.0
|
||||
port: 7010
|
||||
klippy_uds_address: ${klippy_uds_path}
|
||||
|
||||
# Syntax error
|
||||
database]
|
||||
database_path: ${database_path}
|
||||
|
||||
[machine]
|
||||
provider: none
|
||||
|
||||
[file_manager]
|
||||
config_path: ${config_path}
|
||||
log_path: ${log_path}
|
||||
|
||||
[secrets]
|
||||
secrets_path: ${secrets_path}
|
||||
3
tests/assets/moonraker/secrets.ini
Normal file
3
tests/assets/moonraker/secrets.ini
Normal file
@@ -0,0 +1,3 @@
|
||||
[mqtt_credentials]
|
||||
username: mqttuser
|
||||
password: mqttpass
|
||||
6
tests/assets/moonraker/secrets.json
Normal file
6
tests/assets/moonraker/secrets.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"mqtt_credentials": {
|
||||
"username": "mqttuser",
|
||||
"password": "mqttpass"
|
||||
}
|
||||
}
|
||||
39
tests/assets/moonraker/supplemental.conf
Normal file
39
tests/assets/moonraker/supplemental.conf
Normal file
@@ -0,0 +1,39 @@
|
||||
[prefix_sec one]
|
||||
|
||||
[prefix_sec two]
|
||||
|
||||
[prefix_sec three]
|
||||
|
||||
[test_options]
|
||||
test_int: 1
|
||||
test_float: 3.5
|
||||
test_bool: True
|
||||
test_string: Hello World
|
||||
test_list:
|
||||
one
|
||||
two
|
||||
three
|
||||
test_int_list: 1,2,3
|
||||
test_float_list: 1.5,2.8,3.2
|
||||
test_multi_list:
|
||||
1,2,3
|
||||
4,5,6
|
||||
test_dict:
|
||||
one=1
|
||||
two=2
|
||||
three=3
|
||||
test_dict_empty_field:
|
||||
one=test
|
||||
two
|
||||
three
|
||||
test_template: {secrets.mqtt_credentials.username}
|
||||
test_gpio: gpiochip0/gpio26
|
||||
test_gpio_no_chip: gpio26
|
||||
test_gpio_invert: !gpiochip0/gpio26
|
||||
test_gpio_no_chip_invert: !gpio26
|
||||
# The following four options should result in an error, cant
|
||||
# pullup/pulldown an output pin
|
||||
test_gpio_pullup: ^gpiochip0/gpio26
|
||||
test_gpio_pullup_no_chip: ^gpio26
|
||||
test_gpio_pulldown: ~gpiochip0/gpio26
|
||||
test_gpio_pulldown_no_chip: ~gpio26
|
||||
22
tests/assets/moonraker/unparsed_server.conf
Normal file
22
tests/assets/moonraker/unparsed_server.conf
Normal file
@@ -0,0 +1,22 @@
|
||||
[server]
|
||||
host: 0.0.0.0
|
||||
port: 7010
|
||||
klippy_uds_address: ${klippy_uds_path}
|
||||
# Add an option that is not registered, should
|
||||
# generate a warning
|
||||
unknown_option: True
|
||||
|
||||
[machine]
|
||||
provider: none
|
||||
|
||||
[database]
|
||||
database_path: ${database_path}
|
||||
|
||||
[file_manager]
|
||||
config_path: ${config_path}
|
||||
log_path: ${log_path}
|
||||
|
||||
[secrets]
|
||||
secrets_path: ${secrets_path}
|
||||
|
||||
[machine unparsed]
|
||||
244
tests/conftest.py
Normal file
244
tests/conftest.py
Normal file
@@ -0,0 +1,244 @@
|
||||
from __future__ import annotations
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
import asyncio
|
||||
import shutil
|
||||
import re
|
||||
import pathlib
|
||||
import sys
|
||||
import shlex
|
||||
import tempfile
|
||||
import subprocess
|
||||
from typing import Iterator, Dict, AsyncIterator, Any
|
||||
from moonraker import Server
|
||||
from eventloop import EventLoop
|
||||
import utils
|
||||
import dbtool
|
||||
from fixtures import KlippyProcess, HttpClient, WebsocketClient
|
||||
|
||||
ASSETS = pathlib.Path(__file__).parent.joinpath("assets")
|
||||
|
||||
need_klippy_restart = pytest.StashKey[bool]()
|
||||
|
||||
def pytest_addoption(parser: pytest.Parser, pluginmanager):
|
||||
parser.addoption("--klipper-path", action="store", dest="klipper_path")
|
||||
parser.addoption("--klipper-exec", action="store", dest="klipper_exec")
|
||||
|
||||
def interpolate_config(source_path: pathlib.Path,
|
||||
dest_path: pathlib.Path,
|
||||
keys: Dict[str, Any]
|
||||
) -> None:
|
||||
def interp(match):
|
||||
return str(keys[match.group(1)])
|
||||
sub_data = re.sub(r"\${([^}]+)}", interp, source_path.read_text())
|
||||
dest_path.write_text(sub_data)
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def ssl_certs() -> Iterator[Dict[str, pathlib.Path]]:
|
||||
with tempfile.TemporaryDirectory(prefix="moonraker-certs-") as tmpdir:
|
||||
tmp_path = pathlib.Path(tmpdir)
|
||||
cert_path = tmp_path.joinpath("certificate.pem")
|
||||
key_path = tmp_path.joinpath("privkey.pem")
|
||||
cmd = (
|
||||
f"openssl req -newkey rsa:4096 -nodes -keyout {key_path} "
|
||||
f"-x509 -days 365 -out {cert_path} -sha256 "
|
||||
"-subj '/C=US/ST=NRW/L=Earth/O=Moonraker/OU=IT/"
|
||||
"CN=www.moonraker-test.com/emailAddress=mail@moonraker-test.com'"
|
||||
)
|
||||
args = shlex.split(cmd)
|
||||
subprocess.run(args, check=True)
|
||||
yield {
|
||||
"ssl_certificate_path": cert_path,
|
||||
"ssl_key_path": key_path,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def event_loop() -> Iterator[asyncio.AbstractEventLoop]:
|
||||
loop = asyncio.get_event_loop_policy().new_event_loop()
|
||||
yield loop
|
||||
loop.close()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def session_args(ssl_certs: Dict[str, pathlib.Path]
|
||||
) -> Iterator[Dict[str, pathlib.Path]]:
|
||||
mconf_asset = ASSETS.joinpath(f"moonraker/base_server.conf")
|
||||
secrets_asset = ASSETS.joinpath(f"moonraker/secrets.ini")
|
||||
pcfg_asset = ASSETS.joinpath(f"klipper/base_printer.cfg")
|
||||
with tempfile.TemporaryDirectory(prefix="moonraker-test") as tmpdir:
|
||||
tmp_path = pathlib.Path(tmpdir)
|
||||
secrets_dest = tmp_path.joinpath("secrets.ini")
|
||||
shutil.copy(secrets_asset, secrets_dest)
|
||||
cfg_path = tmp_path.joinpath("config")
|
||||
cfg_path.mkdir()
|
||||
log_path = tmp_path.joinpath("logs")
|
||||
log_path.mkdir()
|
||||
db_path = tmp_path.joinpath("database")
|
||||
db_path.mkdir()
|
||||
gcode_path = tmp_path.joinpath("gcode_files")
|
||||
gcode_path.mkdir()
|
||||
dest_paths = {
|
||||
"temp_path": tmp_path,
|
||||
"asset_path": ASSETS,
|
||||
"config_path": cfg_path,
|
||||
"database_path": db_path,
|
||||
"log_path": log_path,
|
||||
"gcode_path": gcode_path,
|
||||
"secrets_path": secrets_dest,
|
||||
"klippy_uds_path": tmp_path.joinpath("klippy_uds"),
|
||||
"klippy_pty_path": tmp_path.joinpath("klippy_pty"),
|
||||
"klipper.dict": ASSETS.joinpath("klipper/klipper.dict"),
|
||||
"mconf_asset": mconf_asset,
|
||||
"pcfg_asset": pcfg_asset,
|
||||
}
|
||||
dest_paths.update(ssl_certs)
|
||||
mconf_dest = cfg_path.joinpath("moonraker.conf")
|
||||
dest_paths["moonraker.conf"] = mconf_dest
|
||||
interpolate_config(mconf_asset, mconf_dest, dest_paths)
|
||||
pcfg_dest = cfg_path.joinpath("printer.cfg")
|
||||
dest_paths["printer.cfg"] = pcfg_dest
|
||||
interpolate_config(pcfg_asset, pcfg_dest, dest_paths)
|
||||
yield dest_paths
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def klippy_session(session_args: Dict[str, pathlib.Path],
|
||||
pytestconfig: pytest.Config) -> Iterator[KlippyProcess]:
|
||||
pytestconfig.stash[need_klippy_restart] = False
|
||||
kpath = pytestconfig.getoption('klipper_path', "~/klipper")
|
||||
kexec = pytestconfig.getoption('klipper_exec', None)
|
||||
if kexec is None:
|
||||
kexec = sys.executable
|
||||
exec = pathlib.Path(kexec).expanduser()
|
||||
klipper_path = pathlib.Path(kpath).expanduser()
|
||||
base_cmd = f"{exec} {klipper_path}/klippy/klippy.py "
|
||||
kproc = KlippyProcess(base_cmd, session_args)
|
||||
kproc.start()
|
||||
yield kproc
|
||||
kproc.stop()
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def klippy(klippy_session: KlippyProcess,
|
||||
pytestconfig: pytest.Config):
|
||||
if pytestconfig.stash[need_klippy_restart]:
|
||||
pytestconfig.stash[need_klippy_restart] = False
|
||||
klippy_session.restart()
|
||||
return klippy_session
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def path_args(request: pytest.FixtureRequest,
|
||||
session_args: Dict[str, pathlib.Path],
|
||||
pytestconfig: pytest.Config
|
||||
) -> Iterator[Dict[str, pathlib.Path]]:
|
||||
path_marker = request.node.get_closest_marker("run_paths")
|
||||
paths: Dict[str, Any] = {
|
||||
"moonraker_conf": "base_server.conf",
|
||||
"secrets": "secrets.ini",
|
||||
"printer_cfg": "base_printer.cfg",
|
||||
"klippy_uds": None,
|
||||
}
|
||||
if path_marker is not None:
|
||||
paths.update(path_marker.kwargs)
|
||||
tmp_path = session_args["temp_path"]
|
||||
cfg_path = session_args["config_path"]
|
||||
mconf_dest = session_args["moonraker.conf"]
|
||||
mconf_asset = ASSETS.joinpath(f"moonraker/{paths['moonraker_conf']}")
|
||||
pcfg_asset = ASSETS.joinpath(f"klipper/{paths['printer_cfg']}")
|
||||
last_uds = session_args["klippy_uds_path"]
|
||||
if paths["klippy_uds"] is not None:
|
||||
tmp_uds = tmp_path.joinpath(paths["klippy_uds"])
|
||||
session_args["klippy_uds_path"] = tmp_uds
|
||||
if (
|
||||
not mconf_asset.samefile(session_args["mconf_asset"]) or
|
||||
paths["klippy_uds"] is not None
|
||||
):
|
||||
session_args['mconf_asset'] = mconf_asset
|
||||
interpolate_config(mconf_asset, mconf_dest, session_args)
|
||||
if not pcfg_asset.samefile(session_args["pcfg_asset"]):
|
||||
pcfg_dest = session_args["printer.cfg"]
|
||||
session_args["pcfg_asset"] = pcfg_asset
|
||||
interpolate_config(pcfg_asset, pcfg_dest, session_args)
|
||||
pytestconfig.stash[need_klippy_restart] = True
|
||||
if paths["secrets"] != session_args["secrets_path"].name:
|
||||
secrets_asset = ASSETS.joinpath(f"moonraker/{paths['secrets']}")
|
||||
secrets_dest = tmp_path.joinpath(paths['secrets'])
|
||||
shutil.copy(secrets_asset, secrets_dest)
|
||||
session_args["secrets_path"] = secrets_dest
|
||||
if "moonraker_log" in paths:
|
||||
log_path = session_args["log_path"]
|
||||
session_args['moonraker.log'] = log_path.joinpath(
|
||||
paths["moonraker_log"])
|
||||
bkp_dest: pathlib.Path = cfg_path.joinpath(".moonraker.conf.bkp")
|
||||
if "moonraker_bkp" in paths:
|
||||
bkp_source = ASSETS.joinpath("moonraker/base_server.conf")
|
||||
bkp_dest = cfg_path.joinpath(paths["moonraker_bkp"])
|
||||
interpolate_config(bkp_source, bkp_dest, session_args)
|
||||
if "database" in paths:
|
||||
db_source = ASSETS.joinpath(f"moonraker/{paths['database']}")
|
||||
db_dest = session_args["database_path"]
|
||||
db_args = {"input": str(db_source), "destination": db_dest}
|
||||
dbtool.restore(db_args)
|
||||
yield session_args
|
||||
log = session_args.pop("moonraker.log", None)
|
||||
if log is not None and log.is_file():
|
||||
log.unlink()
|
||||
if bkp_dest.is_file():
|
||||
bkp_dest.unlink()
|
||||
for item in session_args["database_path"].iterdir():
|
||||
if item.is_file():
|
||||
item.unlink()
|
||||
session_args["klippy_uds_path"] = last_uds
|
||||
if paths["klippy_uds"] is not None:
|
||||
# restore the original uds path
|
||||
interpolate_config(mconf_asset, mconf_dest, session_args)
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def base_server(path_args: Dict[str, pathlib.Path],
|
||||
event_loop: asyncio.AbstractEventLoop
|
||||
) -> Iterator[Server]:
|
||||
evtloop = EventLoop()
|
||||
args = {
|
||||
'config_file': str(path_args['moonraker.conf']),
|
||||
'log_file': str(path_args.get("moonraker.log", "")),
|
||||
'software_version': "moonraker-pytest"
|
||||
}
|
||||
ql = logger = None
|
||||
if args["log_file"]:
|
||||
ql, logger, warning = utils.setup_logging(args)
|
||||
if warning:
|
||||
args["log_warning"] = warning
|
||||
yield Server(args, logger, evtloop)
|
||||
if ql is not None:
|
||||
ql.stop()
|
||||
|
||||
@pytest_asyncio.fixture(scope="class")
|
||||
async def full_server(base_server: Server) -> AsyncIterator[Server]:
|
||||
base_server.load_components()
|
||||
ret = base_server.server_init(start_server=False)
|
||||
await asyncio.wait_for(ret, 4.)
|
||||
yield base_server
|
||||
if base_server.event_loop.aioloop.is_running():
|
||||
await base_server._stop_server(exit_reason="terminate")
|
||||
|
||||
@pytest_asyncio.fixture(scope="class")
|
||||
async def ready_server(full_server: Server, klippy: KlippyProcess):
|
||||
ret = full_server.start_server(connect_to_klippy=False)
|
||||
await asyncio.wait_for(ret, 4.)
|
||||
ret = full_server.klippy_connection.connect()
|
||||
await asyncio.wait_for(ret, 4.)
|
||||
yield full_server
|
||||
|
||||
@pytest_asyncio.fixture(scope="class")
|
||||
async def http_client() -> AsyncIterator[HttpClient]:
|
||||
client = HttpClient()
|
||||
yield client
|
||||
client.close()
|
||||
|
||||
@pytest_asyncio.fixture(scope="class")
|
||||
async def websocket_client(request: pytest.FixtureRequest
|
||||
) -> AsyncIterator[WebsocketClient]:
|
||||
conn_marker = request.node.get_closest_marker("no_ws_connect")
|
||||
client = WebsocketClient()
|
||||
if conn_marker is None:
|
||||
await client.connect()
|
||||
yield client
|
||||
client.close()
|
||||
5
tests/fixtures/__init__.py
vendored
Normal file
5
tests/fixtures/__init__.py
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
from .klippy_process import KlippyProcess
|
||||
from .http_client import HttpClient
|
||||
from .websocket_client import WebsocketClient
|
||||
|
||||
__all__ = ("KlippyProcess", "HttpClient", "WebsocketClient")
|
||||
78
tests/fixtures/http_client.py
vendored
Normal file
78
tests/fixtures/http_client.py
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
from __future__ import annotations
|
||||
import json
|
||||
from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError
|
||||
from tornado.httputil import HTTPHeaders
|
||||
from tornado.escape import url_escape
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
class HttpClient:
|
||||
error = HTTPError
|
||||
def __init__(self,
|
||||
type: str = "http",
|
||||
port: int = 7010
|
||||
) -> None:
|
||||
self.client = AsyncHTTPClient()
|
||||
assert type in ["http", "https"]
|
||||
self.prefix = f"{type}://127.0.0.1:{port}/"
|
||||
self.last_response_headers: HTTPHeaders = HTTPHeaders()
|
||||
|
||||
def get_response_headers(self) -> HTTPHeaders:
|
||||
return self.last_response_headers
|
||||
|
||||
async def _do_request(self,
|
||||
method: str,
|
||||
endpoint: str,
|
||||
args: Dict[str, Any] = {},
|
||||
headers: Optional[Dict[str, str]] = None
|
||||
) -> Dict[str, Any]:
|
||||
ep = "/".join([url_escape(part, plus=False) for part in
|
||||
endpoint.lstrip("/").split("/")])
|
||||
url = self.prefix + ep
|
||||
method = method.upper()
|
||||
body: Optional[str] = "" if method == "POST" else None
|
||||
if args:
|
||||
if method in ["GET", "DELETE"]:
|
||||
parts = []
|
||||
for key, val in args.items():
|
||||
if isinstance(val, list):
|
||||
val = ",".join(val)
|
||||
if val:
|
||||
parts.append(f"{url_escape(key)}={url_escape(val)}")
|
||||
else:
|
||||
parts.append(url_escape(key))
|
||||
qs = "&".join(parts)
|
||||
url += "?" + qs
|
||||
else:
|
||||
body = json.dumps(args)
|
||||
if headers is None:
|
||||
headers = {}
|
||||
headers["Content-Type"] = "application/json"
|
||||
request = HTTPRequest(url, method, headers, body=body,
|
||||
request_timeout=2., connect_timeout=2.)
|
||||
ret = await self.client.fetch(request)
|
||||
self.last_response_headers = HTTPHeaders(ret.headers)
|
||||
return json.loads(ret.body)
|
||||
|
||||
async def get(self,
|
||||
endpoint: str,
|
||||
args: Dict[str, Any] = {},
|
||||
headers: Optional[Dict[str, str]] = None
|
||||
) -> Dict[str, Any]:
|
||||
return await self._do_request("GET", endpoint, args, headers)
|
||||
|
||||
async def post(self,
|
||||
endpoint: str,
|
||||
args: Dict[str, Any] = {},
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
return await self._do_request("POST", endpoint, args, headers)
|
||||
|
||||
async def delete(self,
|
||||
endpoint: str,
|
||||
args: Dict[str, Any] = {},
|
||||
headers: Optional[Dict[str, str]] = None
|
||||
) -> Dict[str, Any]:
|
||||
return await self._do_request("DELETE", endpoint, args, headers)
|
||||
|
||||
def close(self):
|
||||
self.client.close()
|
||||
81
tests/fixtures/klippy_process.py
vendored
Normal file
81
tests/fixtures/klippy_process.py
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
from __future__ import annotations
|
||||
import pytest
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
import pathlib
|
||||
import shlex
|
||||
|
||||
from typing import Dict, Optional
|
||||
|
||||
class KlippyProcess:
|
||||
def __init__(self,
|
||||
base_cmd: str,
|
||||
path_args: Dict[str, pathlib.Path],
|
||||
) -> None:
|
||||
self.base_cmd = base_cmd
|
||||
self.config_path = path_args['printer.cfg']
|
||||
self.orig_config = self.config_path
|
||||
self.dict_path = path_args["klipper.dict"]
|
||||
self.pty_path = path_args["klippy_pty_path"]
|
||||
self.uds_path = path_args["klippy_uds_path"]
|
||||
self.proc: Optional[subprocess.Popen] = None
|
||||
self.fd: int = -1
|
||||
|
||||
def start(self):
|
||||
if self.proc is not None:
|
||||
return
|
||||
args = (
|
||||
f"{self.config_path} -o /dev/null -d {self.dict_path} "
|
||||
f"-a {self.uds_path} -I {self.pty_path}"
|
||||
)
|
||||
cmd = f"{self.base_cmd} {args}"
|
||||
cmd_parts = shlex.split(cmd)
|
||||
self.proc = subprocess.Popen(cmd_parts)
|
||||
for _ in range(250):
|
||||
if self.pty_path.exists():
|
||||
try:
|
||||
self.fd = os.open(
|
||||
str(self.pty_path), os.O_RDWR | os.O_NONBLOCK)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
time.sleep(.01)
|
||||
else:
|
||||
self.stop()
|
||||
pytest.fail("Unable to start Klippy process")
|
||||
return False
|
||||
return True
|
||||
|
||||
def send_gcode(self, gcode: str) -> None:
|
||||
if self.fd == -1:
|
||||
return
|
||||
try:
|
||||
os.write(self.fd, f"{gcode}\n".encode())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def restart(self):
|
||||
self.stop()
|
||||
self.start()
|
||||
|
||||
def stop(self):
|
||||
if self.fd != -1:
|
||||
os.close(self.fd)
|
||||
self.fd = -1
|
||||
if self.proc is not None:
|
||||
self.proc.terminate()
|
||||
try:
|
||||
self.proc.wait(2.)
|
||||
except subprocess.TimeoutExpired:
|
||||
self.proc.kill()
|
||||
self.proc = None
|
||||
|
||||
def get_paths(self) -> Dict[str, pathlib.Path]:
|
||||
return {
|
||||
"printer.cfg": self.config_path,
|
||||
"klipper.dict": self.dict_path,
|
||||
"klippy_uds_path": self.uds_path,
|
||||
"klippy_pty_path": self.pty_path,
|
||||
}
|
||||
136
tests/fixtures/websocket_client.py
vendored
Normal file
136
tests/fixtures/websocket_client.py
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
from __future__ import annotations
|
||||
import pytest
|
||||
import json
|
||||
import asyncio
|
||||
import tornado.websocket
|
||||
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Union,
|
||||
Tuple,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Any,
|
||||
Optional,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from tornado.websocket import WebSocketClientConnection
|
||||
|
||||
class WebsocketError(Exception):
|
||||
def __init__(self, code, *args: object) -> None:
|
||||
super().__init__(*args)
|
||||
self.code = code
|
||||
|
||||
class WebsocketClient:
|
||||
error = WebsocketError
|
||||
def __init__(self,
|
||||
type: str = "ws",
|
||||
port: int = 7010
|
||||
) -> None:
|
||||
self.ws: Optional[WebSocketClientConnection] = None
|
||||
self.pending_requests: Dict[int, asyncio.Future] = {}
|
||||
self.notify_cbs: Dict[str, List[Callable[..., None]]] = {}
|
||||
assert type in ["ws", "wss"]
|
||||
self.url = f"{type}://127.0.0.1:{port}/websocket"
|
||||
|
||||
async def connect(self, token: Optional[str] = None) -> None:
|
||||
url = self.url
|
||||
if token is not None:
|
||||
url += f"?token={token}"
|
||||
self.ws = await tornado.websocket.websocket_connect(
|
||||
url, connect_timeout=2.,
|
||||
on_message_callback=self._on_message_received)
|
||||
|
||||
async def request(self,
|
||||
remote_method: str,
|
||||
args: Dict[str, Any] = {}
|
||||
) -> Dict[str, Any]:
|
||||
if self.ws is None:
|
||||
pytest.fail("Websocket Not Connected")
|
||||
loop = asyncio.get_running_loop()
|
||||
fut = loop.create_future()
|
||||
req, req_id = self._encode_request(remote_method, args)
|
||||
self.pending_requests[req_id] = fut
|
||||
await self.ws.write_message(req)
|
||||
return await asyncio.wait_for(fut, 2.)
|
||||
|
||||
def _encode_request(self,
|
||||
method: str,
|
||||
args: Dict[str, Any]
|
||||
) -> Tuple[str, int]:
|
||||
request: Dict[str, Any] = {
|
||||
'jsonrpc': "2.0",
|
||||
'method': method,
|
||||
}
|
||||
if args:
|
||||
request['params'] = args
|
||||
req_id = id(request)
|
||||
request["id"] = req_id
|
||||
return json.dumps(request), req_id
|
||||
|
||||
def _on_message_received(self, message: Union[str, bytes, None]) -> None:
|
||||
if isinstance(message, str):
|
||||
self._decode_jsonrpc(message)
|
||||
|
||||
def _decode_jsonrpc(self, data: str) -> None:
|
||||
try:
|
||||
resp: Dict[str, Any] = json.loads(data)
|
||||
except json.JSONDecodeError:
|
||||
pytest.fail(f"Websocket JSON Decode Error: {data}")
|
||||
header = resp.get('jsonrpc', "")
|
||||
if header != "2.0":
|
||||
# Invalid Json, set error if we can get the id
|
||||
pytest.fail(f"Invalid jsonrpc header: {data}")
|
||||
req_id: Optional[int] = resp.get("id")
|
||||
method: Optional[str] = resp.get("method")
|
||||
if method is not None:
|
||||
if req_id is None:
|
||||
params = resp.get("params", [])
|
||||
if not isinstance(params, list):
|
||||
pytest.fail("jsonrpc notification params"
|
||||
f"should always be a list: {data}")
|
||||
if method in self.notify_cbs:
|
||||
for func in self.notify_cbs[method]:
|
||||
func(*params)
|
||||
else:
|
||||
# This is a request from the server (should not happen)
|
||||
pytest.fail(f"Server should not request from client: {data}")
|
||||
elif req_id is not None:
|
||||
pending_fut = self.pending_requests.pop(req_id, None)
|
||||
if pending_fut is None:
|
||||
# No future pending for this response
|
||||
return
|
||||
# This is a response
|
||||
if "result" in resp:
|
||||
pending_fut.set_result(resp["result"])
|
||||
elif "error" in resp:
|
||||
err = resp["error"]
|
||||
try:
|
||||
code = err["code"]
|
||||
msg = err["message"]
|
||||
except Exception:
|
||||
pytest.fail(f"Invalid jsonrpc error: {data}")
|
||||
exc = WebsocketError(code, msg)
|
||||
pending_fut.set_exception(exc)
|
||||
else:
|
||||
pytest.fail(
|
||||
f"Invalid jsonrpc packet, no result or error: {data}")
|
||||
else:
|
||||
# Invalid json
|
||||
pytest.fail(f"Invalid jsonrpc packet, no id: {data}")
|
||||
|
||||
def register_notify_callback(self, name: str, callback) -> None:
|
||||
if name in self.notify_cbs:
|
||||
self.notify_cbs[name].append(callback)
|
||||
else:
|
||||
self.notify_cbs[name][callback]
|
||||
|
||||
def close(self):
|
||||
for fut in self.pending_requests.values():
|
||||
if not fut.done():
|
||||
fut.set_exception(WebsocketError(
|
||||
0, "Closing Websocket Client"))
|
||||
if self.ws is not None:
|
||||
self.ws.close(1000, "Test Complete")
|
||||
70
tests/mocks/__init__.py
Normal file
70
tests/mocks/__init__.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from __future__ import annotations
|
||||
import asyncio
|
||||
from utils import ServerError
|
||||
from .mock_gpio import MockGpiod
|
||||
|
||||
__all__ = ("MockReader", "MockWriter", "MockComponent", "MockWebsocket",
|
||||
"MockGpiod")
|
||||
|
||||
class MockWriter:
|
||||
def __init__(self, wait_drain: bool = False) -> None:
|
||||
self.wait_drain = wait_drain
|
||||
|
||||
def write(self, data: str) -> None:
|
||||
pass
|
||||
|
||||
async def drain(self) -> None:
|
||||
if self.wait_drain:
|
||||
evt = asyncio.Event()
|
||||
await evt.wait()
|
||||
else:
|
||||
raise ServerError("TestError")
|
||||
|
||||
class MockReader:
|
||||
def __init__(self, action: str = "") -> None:
|
||||
self.action = action
|
||||
self.eof = False
|
||||
|
||||
def at_eof(self) -> bool:
|
||||
return self.eof
|
||||
|
||||
async def readuntil(self, stop: bytes) -> bytes:
|
||||
if self.action == "wait":
|
||||
evt = asyncio.Event()
|
||||
await evt.wait()
|
||||
return b""
|
||||
elif self.action == "raise_error":
|
||||
raise ServerError("TestError")
|
||||
else:
|
||||
self.eof = True
|
||||
return b"NotJsonDecodable"
|
||||
|
||||
|
||||
class MockComponent:
|
||||
def __init__(self,
|
||||
err_init: bool = False,
|
||||
err_exit: bool = False,
|
||||
err_close: bool = False
|
||||
) -> None:
|
||||
self.err_init = err_init
|
||||
self.err_exit = err_exit
|
||||
self.err_close = err_close
|
||||
|
||||
async def component_init(self):
|
||||
if self.err_init:
|
||||
raise ServerError("test")
|
||||
|
||||
async def on_exit(self):
|
||||
if self.err_exit:
|
||||
raise ServerError("test")
|
||||
|
||||
async def close(self):
|
||||
if self.err_close:
|
||||
raise ServerError("test")
|
||||
|
||||
class MockWebsocket:
|
||||
def __init__(self, fut: asyncio.Future) -> None:
|
||||
self.future = fut
|
||||
|
||||
def queue_message(self, data: str):
|
||||
self.future.set_result(data)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user