From 92687fd811b1be46bf6dca2916a3407c13464480 Mon Sep 17 00:00:00 2001 From: Rainboooom <131979403+Rainboooom@users.noreply.github.com> Date: Thu, 15 Jun 2023 11:34:44 +0800 Subject: [PATCH] Delete scripts directory --- scripts/Dockerfile | 44 --- scripts/avrsim.py | 245 ------------- scripts/buildcommands.py | 618 ------------------------------- scripts/calibrate_shaper.py | 174 --------- scripts/canbus_query.py | 64 ---- scripts/check-gcc.sh | 18 - scripts/check_whitespace.py | 70 ---- scripts/check_whitespace.sh | 18 - scripts/checkstack.py | 243 ------------ scripts/ci-build.sh | 81 ---- scripts/ci-install.sh | 68 ---- scripts/flash-linux.sh | 26 -- scripts/flash-pru.sh | 20 - scripts/flash-sdcard.sh | 85 ----- scripts/flash_usb.py | 380 ------------------- scripts/graph_accelerometer.py | 259 ------------- scripts/graph_extruder.py | 193 ---------- scripts/graph_motion.py | 427 ---------------------- scripts/graph_shaper.py | 283 -------------- scripts/graph_temp_sensor.py | 168 --------- scripts/graphstats.py | 303 --------------- scripts/install-arch.sh | 102 ------ scripts/install-beaglebone.sh | 73 ---- scripts/install-centos.sh | 101 ----- scripts/install-debian.sh | 105 ------ scripts/install-octopi.sh | 103 ------ scripts/install-ubuntu-18.04.sh | 102 ------ scripts/klipper-mcu-start.sh | 78 ---- scripts/klipper-pru-start.sh | 119 ------ scripts/klipper-start.sh | 54 --- scripts/klipper-uninstall.sh | 23 -- scripts/klippy-requirements.txt | 10 - scripts/logextract.py | 610 ------------------------------- scripts/make_version.py | 31 -- scripts/motan/analyzers.py | 283 -------------- scripts/motan/data_logger.py | 204 ----------- scripts/motan/motan_graph.py | 149 -------- scripts/motan/readlog.py | 629 -------------------------------- 38 files changed, 6563 deletions(-) delete mode 100644 scripts/Dockerfile delete mode 100644 scripts/avrsim.py delete mode 100644 scripts/buildcommands.py delete mode 100644 scripts/calibrate_shaper.py delete mode 100644 scripts/canbus_query.py delete mode 100644 scripts/check-gcc.sh delete mode 100644 scripts/check_whitespace.py delete mode 100644 scripts/check_whitespace.sh delete mode 100644 scripts/checkstack.py delete mode 100644 scripts/ci-build.sh delete mode 100644 scripts/ci-install.sh delete mode 100644 scripts/flash-linux.sh delete mode 100644 scripts/flash-pru.sh delete mode 100644 scripts/flash-sdcard.sh delete mode 100644 scripts/flash_usb.py delete mode 100644 scripts/graph_accelerometer.py delete mode 100644 scripts/graph_extruder.py delete mode 100644 scripts/graph_motion.py delete mode 100644 scripts/graph_shaper.py delete mode 100644 scripts/graph_temp_sensor.py delete mode 100644 scripts/graphstats.py delete mode 100644 scripts/install-arch.sh delete mode 100644 scripts/install-beaglebone.sh delete mode 100644 scripts/install-centos.sh delete mode 100644 scripts/install-debian.sh delete mode 100644 scripts/install-octopi.sh delete mode 100644 scripts/install-ubuntu-18.04.sh delete mode 100644 scripts/klipper-mcu-start.sh delete mode 100644 scripts/klipper-pru-start.sh delete mode 100644 scripts/klipper-start.sh delete mode 100644 scripts/klipper-uninstall.sh delete mode 100644 scripts/klippy-requirements.txt delete mode 100644 scripts/logextract.py delete mode 100644 scripts/make_version.py delete mode 100644 scripts/motan/analyzers.py delete mode 100644 scripts/motan/data_logger.py delete mode 100644 scripts/motan/motan_graph.py delete mode 100644 scripts/motan/readlog.py diff --git a/scripts/Dockerfile b/scripts/Dockerfile deleted file mode 100644 index c178c1a..0000000 --- a/scripts/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -# This is an example Dockerfile showing how it's possible to install Klipper in Docker. -# IMPORTANT: The docker build must be run from the root of the repo, either copy the -# Dockerfile to the root, or run docker build with "-f", for example: -# docker build . -f scripts/Dockerfile -t klipper -# Note that the host still needs to run Linux to connect the printers serial port to -# the container. -# When running, the serial port of your printer should be connected, including an -# argument such as: -# --device /dev/ttyUSB0:/dev/ttyUSB0 -# It's also required that your control program (eg: OctoPrint) be included in the same -# container as Docker does not allow sharing of the virtual serial port outside the -# container. -# The config should be in a file named "printer.cfg" in a directory mounted at: -# /home/klippy/.config/ -# For more Dockerfile examples with Klipper (and Octoprint) see: -# https://github.com/sillyfrog/OctoPrint-Klipper-mjpg-Dockerfile -FROM ubuntu:18.04 - -RUN apt-get update && \ - apt-get install -y sudo - -# Create user -RUN useradd -ms /bin/bash klippy && adduser klippy dialout -USER klippy - -#This fixes issues with the volume command setting wrong permissions -RUN mkdir /home/klippy/.config -VOLUME /home/klippy/.config - -### Klipper setup ### -WORKDIR /home/klippy - -COPY . klipper/ -USER root -RUN echo 'klippy ALL=(ALL:ALL) NOPASSWD: ALL' > /etc/sudoers.d/klippy && \ - chown klippy:klippy -R klipper -# This is to allow the install script to run without error -RUN ln -s /bin/true /bin/systemctl -USER klippy -RUN ./klipper/scripts/install-ubuntu-18.04.sh -# Clean up install script workaround -RUN sudo rm -f /bin/systemctl - -CMD ["/home/klippy/klippy-env/bin/python", "/home/klippy/klipper/klippy/klippy.py", "/home/klippy/.config/printer.cfg"] diff --git a/scripts/avrsim.py b/scripts/avrsim.py deleted file mode 100644 index e7f191e..0000000 --- a/scripts/avrsim.py +++ /dev/null @@ -1,245 +0,0 @@ -#!/usr/bin/env python3 -# Script to interact with simulavr by simulating a serial port. -# -# Copyright (C) 2015-2018 Kevin O'Connor -# -# This file may be distributed under the terms of the GNU GPLv3 license. -import sys, optparse, time, os, pty, fcntl, termios, errno -import pysimulavr - -SERIALBITS = 10 # 8N1 = 1 start, 8 data, 1 stop -SIMULAVR_FREQ = 10**9 - -# Class to read serial data from AVR serial transmit pin. -class SerialRxPin(pysimulavr.PySimulationMember, pysimulavr.Pin): - def __init__(self, baud, terminal): - pysimulavr.Pin.__init__(self) - pysimulavr.PySimulationMember.__init__(self) - self.terminal = terminal - self.sc = pysimulavr.SystemClock.Instance() - self.delay = SIMULAVR_FREQ // baud - self.current = 0 - self.pos = -1 - def SetInState(self, pin): - pysimulavr.Pin.SetInState(self, pin) - self.state = pin.outState - if self.pos < 0 and pin.outState == pin.LOW: - self.pos = 0 - self.sc.Add(self) - def DoStep(self, trueHwStep): - ishigh = self.state == self.HIGH - self.current |= ishigh << self.pos - self.pos += 1 - if self.pos == 1: - return int(self.delay * 1.5) - if self.pos >= SERIALBITS: - data = bytearray([(self.current >> 1) & 0xff]) - self.terminal.write(data) - self.pos = -1 - self.current = 0 - return -1 - return self.delay - -# Class to send serial data to AVR serial receive pin. -class SerialTxPin(pysimulavr.PySimulationMember, pysimulavr.Pin): - def __init__(self, baud, terminal): - pysimulavr.Pin.__init__(self) - pysimulavr.PySimulationMember.__init__(self) - self.terminal = terminal - self.SetPin('H') - self.sc = pysimulavr.SystemClock.Instance() - self.delay = SIMULAVR_FREQ // baud - self.current = 0 - self.pos = 0 - self.queue = bytearray() - self.sc.Add(self) - def DoStep(self, trueHwStep): - if not self.pos: - if not self.queue: - data = self.terminal.read() - if not data: - return self.delay * 100 - self.queue.extend(data) - self.current = (self.queue.pop(0) << 1) | 0x200 - newstate = 'L' - if self.current & (1 << self.pos): - newstate = 'H' - self.SetPin(newstate) - self.pos += 1 - if self.pos >= SERIALBITS: - self.pos = 0 - return self.delay - -# Support for creating VCD trace files -class Tracing: - def __init__(self, filename, signals): - self.filename = filename - self.signals = signals - if not signals: - self.dman = None - return - self.dman = pysimulavr.DumpManager.Instance() - self.dman.SetSingleDeviceApp() - def show_help(self): - ostr = pysimulavr.ostringstream() - self.dman.save(ostr) - sys.stdout.write(ostr.str()) - sys.exit(1) - def load_options(self): - if self.dman is None: - return - if self.signals.strip() == '?': - self.show_help() - sigs = "\n".join(["+ " + s for s in self.signals.split(',')]) - self.dman.addDumpVCD(self.filename, sigs, "ns", False, False) - def start(self): - if self.dman is not None: - self.dman.start() - def finish(self): - if self.dman is not None: - self.dman.stopApplication() - -# Pace the simulation scaled to real time -class Pacing(pysimulavr.PySimulationMember): - def __init__(self, rate): - pysimulavr.PySimulationMember.__init__(self) - self.sc = pysimulavr.SystemClock.Instance() - self.pacing_rate = 1. / (rate * SIMULAVR_FREQ) - self.next_check_clock = 0 - self.rel_time = time.time() - self.best_offset = 0. - self.delay = SIMULAVR_FREQ // 10000 - self.sc.Add(self) - def DoStep(self, trueHwStep): - curtime = time.time() - clock = self.sc.GetCurrentTime() - offset = clock * self.pacing_rate - (curtime - self.rel_time) - self.best_offset = max(self.best_offset, offset) - if offset > 0.000050: - time.sleep(offset - 0.000040) - if clock >= self.next_check_clock: - self.rel_time -= min(self.best_offset, 0.) - self.next_check_clock = clock + self.delay * 500 - self.best_offset = -999999999. - return self.delay - -# Forward data from a terminal device to the serial port pins -class TerminalIO: - def __init__(self): - self.fd = -1 - def run(self, fd): - self.fd = fd - def write(self, data): - os.write(self.fd, data) - def read(self): - try: - return os.read(self.fd, 64) - except os.error as e: - if e.errno not in (errno.EAGAIN, errno.EWOULDBLOCK): - pysimulavr.SystemClock.Instance().stop() - return "" - -# Support for creating a pseudo-tty for emulating a serial port -def create_pty(ptyname): - mfd, sfd = pty.openpty() - try: - os.unlink(ptyname) - except os.error: - pass - os.symlink(os.ttyname(sfd), ptyname) - fcntl.fcntl(mfd, fcntl.F_SETFL - , fcntl.fcntl(mfd, fcntl.F_GETFL) | os.O_NONBLOCK) - tcattr = termios.tcgetattr(mfd) - tcattr[0] &= ~( - termios.IGNBRK | termios.BRKINT | termios.PARMRK | termios.ISTRIP | - termios.INLCR | termios.IGNCR | termios.ICRNL | termios.IXON) - tcattr[1] &= ~termios.OPOST - tcattr[3] &= ~( - termios.ECHO | termios.ECHONL | termios.ICANON | termios.ISIG | - termios.IEXTEN) - tcattr[2] &= ~(termios.CSIZE | termios.PARENB) - tcattr[2] |= termios.CS8 - tcattr[6][termios.VMIN] = 0 - tcattr[6][termios.VTIME] = 0 - termios.tcsetattr(mfd, termios.TCSAFLUSH, tcattr) - return mfd - -def main(): - usage = "%prog [options] " - opts = optparse.OptionParser(usage) - opts.add_option("-m", "--machine", type="string", dest="machine", - default="atmega644", help="type of AVR machine to simulate") - opts.add_option("-s", "--speed", type="int", dest="speed", default=16000000, - help="machine speed") - opts.add_option("-r", "--rate", type="float", dest="pacing_rate", - default=0., help="real-time pacing rate") - opts.add_option("-b", "--baud", type="int", dest="baud", default=250000, - help="baud rate of the emulated serial port") - opts.add_option("-t", "--trace", type="string", dest="trace", - help="signals to trace (? for help)") - opts.add_option("-p", "--port", type="string", dest="port", - default="/tmp/pseudoserial", - help="pseudo-tty device to create for serial port") - deffile = os.path.splitext(os.path.basename(sys.argv[0]))[0] + ".vcd" - opts.add_option("-f", "--tracefile", type="string", dest="tracefile", - default=deffile, help="filename to write signal trace to") - options, args = opts.parse_args() - if len(args) != 1: - opts.error("Incorrect number of arguments") - elffile = args[0] - proc = options.machine - ptyname = options.port - speed = options.speed - baud = options.baud - - # launch simulator - sc = pysimulavr.SystemClock.Instance() - trace = Tracing(options.tracefile, options.trace) - dev = pysimulavr.AvrFactory.instance().makeDevice(proc) - dev.Load(elffile) - dev.SetClockFreq(SIMULAVR_FREQ // speed) - sc.Add(dev) - pysimulavr.cvar.sysConHandler.SetUseExit(False) - trace.load_options() - - # Do optional real-time pacing - if options.pacing_rate: - pacing = Pacing(options.pacing_rate) - - # Setup terminal - io = TerminalIO() - - # Setup rx pin - rxpin = SerialRxPin(baud, io) - net = pysimulavr.Net() - net.Add(rxpin) - net.Add(dev.GetPin("D1")) - - # Setup tx pin - txpin = SerialTxPin(baud, io) - net2 = pysimulavr.Net() - net2.Add(dev.GetPin("D0")) - net2.Add(txpin) - - # Display start banner - msg = "Starting AVR simulation: machine=%s speed=%d\n" % (proc, speed) - msg += "Serial: port=%s baud=%d\n" % (ptyname, baud) - if options.trace: - msg += "Trace file: %s\n" % (options.tracefile,) - sys.stdout.write(msg) - sys.stdout.flush() - - # Create terminal device - fd = create_pty(ptyname) - - # Run loop - try: - io.run(fd) - trace.start() - sc.RunTimeRange(0x7fff0000ffff0000) - trace.finish() - finally: - os.unlink(ptyname) - -if __name__ == '__main__': - main() diff --git a/scripts/buildcommands.py b/scripts/buildcommands.py deleted file mode 100644 index 2acf3e8..0000000 --- a/scripts/buildcommands.py +++ /dev/null @@ -1,618 +0,0 @@ -#!/usr/bin/env python2 -# Script to handle build time requests embedded in C code. -# -# Copyright (C) 2016-2021 Kevin O'Connor -# -# This file may be distributed under the terms of the GNU GPLv3 license. -import sys, os, subprocess, optparse, logging, shlex, socket, time, traceback -import json, zlib -sys.path.append('./klippy') -import msgproto - -FILEHEADER = """ -/* DO NOT EDIT! This is an autogenerated file. See scripts/buildcommands.py. */ - -#include "board/irq.h" -#include "board/pgm.h" -#include "command.h" -#include "compiler.h" -#include "initial_pins.h" -""" - -def error(msg): - sys.stderr.write(msg + "\n") - sys.exit(-1) - -Handlers = [] - - -###################################################################### -# C call list generation -###################################################################### - -# Create dynamic C functions that call a list of other C functions -class HandleCallList: - def __init__(self): - self.call_lists = {'ctr_run_initfuncs': []} - self.ctr_dispatch = { '_DECL_CALLLIST': self.decl_calllist } - def decl_calllist(self, req): - funcname, callname = req.split()[1:] - self.call_lists.setdefault(funcname, []).append(callname) - def update_data_dictionary(self, data): - pass - def generate_code(self, options): - code = [] - for funcname, funcs in self.call_lists.items(): - func_code = [' extern void %s(void);\n %s();' % (f, f) - for f in funcs] - if funcname == 'ctr_run_taskfuncs': - add_poll = ' irq_poll();\n' - func_code = [add_poll + fc for fc in func_code] - func_code.append(add_poll) - fmt = """ -void -%s(void) -{ - %s -} -""" - code.append(fmt % (funcname, "\n".join(func_code).strip())) - return "".join(code) - -Handlers.append(HandleCallList()) - - -###################################################################### -# Enumeration and static string generation -###################################################################### - -STATIC_STRING_MIN = 2 - -# Generate a dynamic string to integer mapping -class HandleEnumerations: - def __init__(self): - self.static_strings = [] - self.enumerations = {} - self.ctr_dispatch = { - '_DECL_STATIC_STR': self.decl_static_str, - 'DECL_ENUMERATION': self.decl_enumeration, - 'DECL_ENUMERATION_RANGE': self.decl_enumeration_range - } - def add_enumeration(self, enum, name, value): - enums = self.enumerations.setdefault(enum, {}) - if name in enums and enums[name] != value: - error("Conflicting definition for enumeration '%s %s'" % ( - enum, name)) - enums[name] = value - def decl_enumeration(self, req): - enum, name, value = req.split()[1:] - self.add_enumeration(enum, name, int(value, 0)) - def decl_enumeration_range(self, req): - enum, name, value, count = req.split()[1:] - self.add_enumeration(enum, name, (int(value, 0), int(count, 0))) - def decl_static_str(self, req): - msg = req.split(None, 1)[1] - if msg not in self.static_strings: - self.static_strings.append(msg) - def update_data_dictionary(self, data): - for i, s in enumerate(self.static_strings): - self.add_enumeration("static_string_id", s, i + STATIC_STRING_MIN) - data['enumerations'] = self.enumerations - def generate_code(self, options): - code = [] - for i, s in enumerate(self.static_strings): - code.append(' if (__builtin_strcmp(str, "%s") == 0)\n' - ' return %d;\n' % (s, i + STATIC_STRING_MIN)) - fmt = """ -uint8_t __always_inline -ctr_lookup_static_string(const char *str) -{ - %s - return 0xff; -} -""" - return fmt % ("".join(code).strip(),) - -HandlerEnumerations = HandleEnumerations() -Handlers.append(HandlerEnumerations) - - -###################################################################### -# Constants -###################################################################### - -# Allow adding build time constants to the data dictionary -class HandleConstants: - def __init__(self): - self.constants = {} - self.ctr_dispatch = { - 'DECL_CONSTANT': self.decl_constant, - 'DECL_CONSTANT_STR': self.decl_constant_str, - } - def set_value(self, name, value): - if name in self.constants and self.constants[name] != value: - error("Conflicting definition for constant '%s'" % name) - self.constants[name] = value - def decl_constant(self, req): - name, value = req.split()[1:] - self.set_value(name, int(value, 0)) - def decl_constant_str(self, req): - name, value = req.split(None, 2)[1:] - value = value.strip() - if value.startswith('"') and value.endswith('"'): - value = value[1:-1] - self.set_value(name, value) - def update_data_dictionary(self, data): - data['config'] = self.constants - def generate_code(self, options): - return "" - -HandlerConstants = HandleConstants() -Handlers.append(HandlerConstants) - - -###################################################################### -# Initial pins -###################################################################### - -class HandleInitialPins: - def __init__(self): - self.initial_pins = [] - self.ctr_dispatch = { 'DECL_INITIAL_PINS': self.decl_initial_pins } - def decl_initial_pins(self, req): - pins = req.split(None, 1)[1].strip() - if pins.startswith('"') and pins.endswith('"'): - pins = pins[1:-1] - if pins: - self.initial_pins = [p.strip() for p in pins.split(',')] - HandlerConstants.decl_constant_str( - "_DECL_CONSTANT_STR INITIAL_PINS " - + ','.join(self.initial_pins)) - def update_data_dictionary(self, data): - pass - def map_pins(self): - if not self.initial_pins: - return [] - mp = msgproto.MessageParser() - mp.fill_enumerations(HandlerEnumerations.enumerations) - pinmap = mp.get_enumerations().get('pin', {}) - out = [] - for p in self.initial_pins: - flag = "IP_OUT_HIGH" - if p.startswith('!'): - flag = "0" - p = p[1:].strip() - if p not in pinmap: - error("Unknown initial pin '%s'" % (p,)) - out.append("\n {%d, %s}, // %s" % (pinmap[p], flag, p)) - return out - def generate_code(self, options): - out = self.map_pins() - fmt = """ -const struct initial_pin_s initial_pins[] PROGMEM = {%s -}; -const int initial_pins_size PROGMEM = ARRAY_SIZE(initial_pins); -""" - return fmt % (''.join(out),) - -Handlers.append(HandleInitialPins()) - - -###################################################################### -# ARM IRQ vector table generation -###################################################################### - -# Create ARM IRQ vector table from interrupt handler declarations -class Handle_arm_irq: - def __init__(self): - self.irqs = {} - self.ctr_dispatch = { 'DECL_ARMCM_IRQ': self.decl_armcm_irq } - def decl_armcm_irq(self, req): - func, num = req.split()[1:] - num = int(num, 0) - if num in self.irqs and self.irqs[num] != func: - error("Conflicting IRQ definition %d (old %s new %s)" - % (num, self.irqs[num], func)) - self.irqs[num] = func - def update_data_dictionary(self, data): - pass - def generate_code(self, options): - armcm_offset = 16 - if 1 - armcm_offset not in self.irqs: - # The ResetHandler was not defined - don't build VectorTable - return "" - max_irq = max(self.irqs.keys()) - table = [" DefaultHandler,\n"] * (max_irq + armcm_offset + 1) - defs = [] - for num, func in self.irqs.items(): - if num < 1 - armcm_offset: - error("Invalid IRQ %d (%s)" % (num, func)) - defs.append("extern void %s(void);\n" % (func,)) - table[num + armcm_offset] = " %s,\n" % (func,) - table[0] = " &_stack_end,\n" - fmt = """ -extern void DefaultHandler(void); -extern uint32_t _stack_end; -%s -const void *VectorTable[] __visible __section(".vector_table") = { -%s}; -""" - return fmt % (''.join(defs), ''.join(table)) - -Handlers.append(Handle_arm_irq()) - - -###################################################################### -# Wire protocol commands and responses -###################################################################### - -# Dynamic command and response registration -class HandleCommandGeneration: - def __init__(self): - self.commands = {} - self.encoders = [] - self.msg_to_id = dict(msgproto.DefaultMessages) - self.messages_by_name = { m.split()[0]: m for m in self.msg_to_id } - self.all_param_types = {} - self.ctr_dispatch = { - 'DECL_COMMAND_FLAGS': self.decl_command, - '_DECL_ENCODER': self.decl_encoder, - '_DECL_OUTPUT': self.decl_output - } - def decl_command(self, req): - funcname, flags, msgname = req.split()[1:4] - if msgname in self.commands: - error("Multiple definitions for command '%s'" % msgname) - self.commands[msgname] = (funcname, flags, msgname) - msg = req.split(None, 3)[3] - m = self.messages_by_name.get(msgname) - if m is not None and m != msg: - error("Conflicting definition for command '%s'" % msgname) - self.messages_by_name[msgname] = msg - def decl_encoder(self, req): - msg = req.split(None, 1)[1] - msgname = msg.split()[0] - m = self.messages_by_name.get(msgname) - if m is not None and m != msg: - error("Conflicting definition for message '%s'" % msgname) - self.messages_by_name[msgname] = msg - self.encoders.append((msgname, msg)) - def decl_output(self, req): - msg = req.split(None, 1)[1] - self.encoders.append((None, msg)) - def create_message_ids(self): - # Create unique ids for each message type - msgid = max(self.msg_to_id.values()) - mlist = list(self.commands.keys()) + [m for n, m in self.encoders] - for msgname in mlist: - msg = self.messages_by_name.get(msgname, msgname) - if msg not in self.msg_to_id: - msgid += 1 - self.msg_to_id[msg] = msgid - if msgid >= 128: - # The mcu currently assumes all message ids encode to one byte - error("Too many message ids") - def update_data_dictionary(self, data): - # Handle message ids over 96 (they are decoded as negative numbers) - msg_to_tag = {msg: msgid if msgid < 96 else msgid - 128 - for msg, msgid in self.msg_to_id.items()} - command_tags = [msg_to_tag[msg] - for msgname, msg in self.messages_by_name.items() - if msgname in self.commands] - response_tags = [msg_to_tag[msg] - for msgname, msg in self.messages_by_name.items() - if msgname not in self.commands] - data['commands'] = { msg: msgtag for msg, msgtag in msg_to_tag.items() - if msgtag in command_tags } - data['responses'] = { msg: msgtag for msg, msgtag in msg_to_tag.items() - if msgtag in response_tags } - output = {msg: msgtag for msg, msgtag in msg_to_tag.items() - if msgtag not in command_tags and msgtag not in response_tags} - if output: - data['output'] = output - def build_parser(self, msgid, msgformat, msgtype): - if msgtype == "output": - param_types = msgproto.lookup_output_params(msgformat) - comment = "Output: " + msgformat - else: - param_types = [t for name, t in msgproto.lookup_params(msgformat)] - comment = msgformat - params = '0' - types = tuple([t.__class__.__name__ for t in param_types]) - if types: - paramid = self.all_param_types.get(types) - if paramid is None: - paramid = len(self.all_param_types) - self.all_param_types[types] = paramid - params = 'command_parameters%d' % (paramid,) - out = """ - // %s - .msg_id=%d, - .num_params=%d, - .param_types = %s, -""" % (comment, msgid, len(types), params) - if msgtype == 'response': - num_args = (len(types) + types.count('PT_progmem_buffer') - + types.count('PT_buffer')) - out += " .num_args=%d," % (num_args,) - else: - max_size = min(msgproto.MESSAGE_MAX, - (msgproto.MESSAGE_MIN + 1 - + sum([t.max_length for t in param_types]))) - out += " .max_size=%d," % (max_size,) - return out - def generate_responses_code(self): - encoder_defs = [] - output_code = [] - encoder_code = [] - did_output = {} - for msgname, msg in self.encoders: - msgid = self.msg_to_id[msg] - if msgid in did_output: - continue - did_output[msgid] = True - code = (' if (__builtin_strcmp(str, "%s") == 0)\n' - ' return &command_encoder_%s;\n' % (msg, msgid)) - if msgname is None: - parsercode = self.build_parser(msgid, msg, 'output') - output_code.append(code) - else: - parsercode = self.build_parser(msgid, msg, 'command') - encoder_code.append(code) - encoder_defs.append( - "const struct command_encoder command_encoder_%s PROGMEM = {" - " %s\n};\n" % ( - msgid, parsercode)) - fmt = """ -%s - -const __always_inline struct command_encoder * -ctr_lookup_encoder(const char *str) -{ - %s - return NULL; -} - -const __always_inline struct command_encoder * -ctr_lookup_output(const char *str) -{ - %s - return NULL; -} -""" - return fmt % ("".join(encoder_defs).strip(), - "".join(encoder_code).strip(), - "".join(output_code).strip()) - def generate_commands_code(self): - cmd_by_id = { - self.msg_to_id[self.messages_by_name.get(msgname, msgname)]: cmd - for msgname, cmd in self.commands.items() - } - max_cmd_msgid = max(cmd_by_id.keys()) - index = [] - externs = {} - for msgid in range(max_cmd_msgid+1): - if msgid not in cmd_by_id: - index.append(" {\n},") - continue - funcname, flags, msgname = cmd_by_id[msgid] - msg = self.messages_by_name[msgname] - externs[funcname] = 1 - parsercode = self.build_parser(msgid, msg, 'response') - index.append(" {%s\n .flags=%s,\n .func=%s\n}," % ( - parsercode, flags, funcname)) - index = "".join(index).strip() - externs = "\n".join(["extern void "+funcname+"(uint32_t*);" - for funcname in sorted(externs)]) - fmt = """ -%s - -const struct command_parser command_index[] PROGMEM = { -%s -}; - -const uint8_t command_index_size PROGMEM = ARRAY_SIZE(command_index); -""" - return fmt % (externs, index) - def generate_param_code(self): - sorted_param_types = sorted( - [(i, a) for a, i in self.all_param_types.items()]) - params = [''] - for paramid, argtypes in sorted_param_types: - params.append( - 'static const uint8_t command_parameters%d[] PROGMEM = {\n' - ' %s };' % ( - paramid, ', '.join(argtypes),)) - params.append('') - return "\n".join(params) - def generate_code(self, options): - self.create_message_ids() - parsercode = self.generate_responses_code() - cmdcode = self.generate_commands_code() - paramcode = self.generate_param_code() - return paramcode + parsercode + cmdcode - -Handlers.append(HandleCommandGeneration()) - - -###################################################################### -# Version generation -###################################################################### - -# Run program and return the specified output -def check_output(prog): - logging.debug("Running %s" % (repr(prog),)) - try: - process = subprocess.Popen(shlex.split(prog), stdout=subprocess.PIPE) - output = process.communicate()[0] - retcode = process.poll() - except OSError: - logging.debug("Exception on run: %s" % (traceback.format_exc(),)) - return "" - logging.debug("Got (code=%s): %s" % (retcode, repr(output))) - if retcode: - return "" - try: - return str(output.decode('utf8')) - except UnicodeError: - logging.debug("Exception on decode: %s" % (traceback.format_exc(),)) - return "" - -# Obtain version info from "git" program -def git_version(): - if not os.path.exists('.git'): - logging.debug("No '.git' file/directory found") - return "" - ver = check_output("git describe --always --tags --long --dirty").strip() - logging.debug("Got git version: %s" % (repr(ver),)) - return ver - -def build_version(extra, cleanbuild): - version = git_version() - if not version: - cleanbuild = False - version = "?" - elif 'dirty' in version: - cleanbuild = False - if not cleanbuild: - btime = time.strftime("%Y%m%d_%H%M%S") - hostname = socket.gethostname() - version = "%s-%s-%s" % (version, btime, hostname) - return version + extra - -# Run "tool --version" for each specified tool and extract versions -def tool_versions(tools): - tools = [t.strip() for t in tools.split(';')] - versions = ['', ''] - success = 0 - for tool in tools: - # Extract first line from "tool --version" output - verstr = check_output("%s --version" % (tool,)).split('\n')[0] - # Check if this tool looks like a binutils program - isbinutils = 0 - if verstr.startswith('GNU '): - isbinutils = 1 - verstr = verstr[4:] - # Extract version information and exclude program name - if ' ' not in verstr: - continue - prog, ver = verstr.split(' ', 1) - if not prog or not ver: - continue - # Check for any version conflicts - if versions[isbinutils] and versions[isbinutils] != ver: - logging.debug("Mixed version %s vs %s" % ( - repr(versions[isbinutils]), repr(ver))) - versions[isbinutils] = "mixed" - continue - versions[isbinutils] = ver - success += 1 - cleanbuild = versions[0] and versions[1] and success == len(tools) - return cleanbuild, "gcc: %s binutils: %s" % (versions[0], versions[1]) - -# Add version information to the data dictionary -class HandleVersions: - def __init__(self): - self.ctr_dispatch = {} - self.toolstr = self.version = "" - def update_data_dictionary(self, data): - data['version'] = self.version - data['build_versions'] = self.toolstr - def generate_code(self, options): - cleanbuild, self.toolstr = tool_versions(options.tools) - self.version = build_version(options.extra, cleanbuild) - sys.stdout.write("Version: %s\n" % (self.version,)) - return "\n// version: %s\n// build_versions: %s\n" % ( - self.version, self.toolstr) - -Handlers.append(HandleVersions()) - - -###################################################################### -# Identify data dictionary generation -###################################################################### - -# Automatically generate the wire protocol data dictionary -class HandleIdentify: - def __init__(self): - self.ctr_dispatch = {} - def update_data_dictionary(self, data): - pass - def generate_code(self, options): - # Generate data dictionary - data = {} - for h in Handlers: - h.update_data_dictionary(data) - datadict = json.dumps(data, separators=(',', ':'), sort_keys=True) - - # Write data dictionary - if options.write_dictionary: - f = open(options.write_dictionary, 'w') - f.write(datadict) - f.close() - - # Format compressed info into C code - zdatadict = bytearray(zlib.compress(datadict.encode(), 9)) - out = [] - for i in range(len(zdatadict)): - if i % 8 == 0: - out.append('\n ') - out.append(" 0x%02x," % (zdatadict[i],)) - fmt = """ -const uint8_t command_identify_data[] PROGMEM = {%s -}; - -// Identify size = %d (%d uncompressed) -const uint32_t command_identify_size PROGMEM - = ARRAY_SIZE(command_identify_data); -""" - return fmt % (''.join(out), len(zdatadict), len(datadict)) - -Handlers.append(HandleIdentify()) - - -###################################################################### -# Main code -###################################################################### - -def main(): - usage = "%prog [options] " - opts = optparse.OptionParser(usage) - opts.add_option("-e", "--extra", dest="extra", default="", - help="extra version string to append to version") - opts.add_option("-d", dest="write_dictionary", - help="file to write mcu protocol dictionary") - opts.add_option("-t", "--tools", dest="tools", default="", - help="list of build programs to extract version from") - opts.add_option("-v", action="store_true", dest="verbose", - help="enable debug messages") - - options, args = opts.parse_args() - if len(args) != 2: - opts.error("Incorrect arguments") - incmdfile, outcfile = args - if options.verbose: - logging.basicConfig(level=logging.DEBUG) - - # Parse request file - ctr_dispatch = { k: v for h in Handlers for k, v in h.ctr_dispatch.items() } - f = open(incmdfile, 'r') - data = f.read() - f.close() - for req in data.split('\n'): - req = req.lstrip() - if not req: - continue - cmd = req.split()[0] - if cmd not in ctr_dispatch: - error("Unknown build time command '%s'" % cmd) - ctr_dispatch[cmd](req) - - # Write output - code = "".join([FILEHEADER] + [h.generate_code(options) for h in Handlers]) - f = open(outcfile, 'w') - f.write(code) - f.close() - -if __name__ == '__main__': - main() diff --git a/scripts/calibrate_shaper.py b/scripts/calibrate_shaper.py deleted file mode 100644 index 8a0fcdf..0000000 --- a/scripts/calibrate_shaper.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/env python3 -# Shaper auto-calibration script -# -# Copyright (C) 2020 Dmitry Butyugin -# Copyright (C) 2020 Kevin O'Connor -# -# This file may be distributed under the terms of the GNU GPLv3 license. -from __future__ import print_function -import importlib, optparse, os, sys -from textwrap import wrap -import numpy as np, matplotlib -sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), - '..', 'klippy')) -shaper_calibrate = importlib.import_module('.shaper_calibrate', 'extras') - -MAX_TITLE_LENGTH=65 - -def parse_log(logname): - with open(logname) as f: - for header in f: - if not header.startswith('#'): - break - if not header.startswith('freq,psd_x,psd_y,psd_z,psd_xyz'): - # Raw accelerometer data - return np.loadtxt(logname, comments='#', delimiter=',') - # Parse power spectral density data - data = np.loadtxt(logname, skiprows=1, comments='#', delimiter=',') - calibration_data = shaper_calibrate.CalibrationData( - freq_bins=data[:,0], psd_sum=data[:,4], - psd_x=data[:,1], psd_y=data[:,2], psd_z=data[:,3]) - calibration_data.set_numpy(np) - # If input shapers are present in the CSV file, the frequency - # response is already normalized to input frequencies - if 'mzv' not in header: - calibration_data.normalize_to_frequencies() - return calibration_data - -###################################################################### -# Shaper calibration -###################################################################### - -# Find the best shaper parameters -def calibrate_shaper(datas, csv_output, max_smoothing): - helper = shaper_calibrate.ShaperCalibrate(printer=None) - if isinstance(datas[0], shaper_calibrate.CalibrationData): - calibration_data = datas[0] - for data in datas[1:]: - calibration_data.add_data(data) - else: - # Process accelerometer data - calibration_data = helper.process_accelerometer_data(datas[0]) - for data in datas[1:]: - calibration_data.add_data(helper.process_accelerometer_data(data)) - calibration_data.normalize_to_frequencies() - shaper, all_shapers = helper.find_best_shaper( - calibration_data, max_smoothing, print) - print("Recommended shaper is %s @ %.1f Hz" % (shaper.name, shaper.freq)) - if csv_output is not None: - helper.save_calibration_data( - csv_output, calibration_data, all_shapers) - return shaper.name, all_shapers, calibration_data - -###################################################################### -# Plot frequency response and suggested input shapers -###################################################################### - -def plot_freq_response(lognames, calibration_data, shapers, - selected_shaper, max_freq): - freqs = calibration_data.freq_bins - psd = calibration_data.psd_sum[freqs <= max_freq] - px = calibration_data.psd_x[freqs <= max_freq] - py = calibration_data.psd_y[freqs <= max_freq] - pz = calibration_data.psd_z[freqs <= max_freq] - freqs = freqs[freqs <= max_freq] - - fontP = matplotlib.font_manager.FontProperties() - fontP.set_size('x-small') - - fig, ax = matplotlib.pyplot.subplots() - ax.set_xlabel('Frequency, Hz') - ax.set_xlim([0, max_freq]) - ax.set_ylabel('Power spectral density') - - ax.plot(freqs, psd, label='X+Y+Z', color='purple') - ax.plot(freqs, px, label='X', color='red') - ax.plot(freqs, py, label='Y', color='green') - ax.plot(freqs, pz, label='Z', color='blue') - - title = "Frequency response and shapers (%s)" % (', '.join(lognames)) - ax.set_title("\n".join(wrap(title, MAX_TITLE_LENGTH))) - ax.xaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(5)) - ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator()) - ax.ticklabel_format(axis='y', style='scientific', scilimits=(0,0)) - ax.grid(which='major', color='grey') - ax.grid(which='minor', color='lightgrey') - - ax2 = ax.twinx() - ax2.set_ylabel('Shaper vibration reduction (ratio)') - best_shaper_vals = None - for shaper in shapers: - label = "%s (%.1f Hz, vibr=%.1f%%, sm~=%.2f, accel<=%.f)" % ( - shaper.name.upper(), shaper.freq, - shaper.vibrs * 100., shaper.smoothing, - round(shaper.max_accel / 100.) * 100.) - linestyle = 'dotted' - if shaper.name == selected_shaper: - linestyle = 'dashdot' - best_shaper_vals = shaper.vals - ax2.plot(freqs, shaper.vals, label=label, linestyle=linestyle) - ax.plot(freqs, psd * best_shaper_vals, - label='After\nshaper', color='cyan') - # A hack to add a human-readable shaper recommendation to legend - ax2.plot([], [], ' ', - label="Recommended shaper: %s" % (selected_shaper.upper())) - - ax.legend(loc='upper left', prop=fontP) - ax2.legend(loc='upper right', prop=fontP) - - fig.tight_layout() - return fig - -###################################################################### -# Startup -###################################################################### - -def setup_matplotlib(output_to_file): - global matplotlib - if output_to_file: - matplotlib.rcParams.update({'figure.autolayout': True}) - matplotlib.use('Agg') - import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager - import matplotlib.ticker - -def main(): - # Parse command-line arguments - usage = "%prog [options] " - opts = optparse.OptionParser(usage) - opts.add_option("-o", "--output", type="string", dest="output", - default=None, help="filename of output graph") - opts.add_option("-c", "--csv", type="string", dest="csv", - default=None, help="filename of output csv file") - opts.add_option("-f", "--max_freq", type="float", default=200., - help="maximum frequency to graph") - opts.add_option("-s", "--max_smoothing", type="float", default=None, - help="maximum shaper smoothing to allow") - options, args = opts.parse_args() - if len(args) < 1: - opts.error("Incorrect number of arguments") - if options.max_smoothing is not None and options.max_smoothing < 0.05: - opts.error("Too small max_smoothing specified (must be at least 0.05)") - - # Parse data - datas = [parse_log(fn) for fn in args] - - # Calibrate shaper and generate outputs - selected_shaper, shapers, calibration_data = calibrate_shaper( - datas, options.csv, options.max_smoothing) - - if not options.csv or options.output: - # Draw graph - setup_matplotlib(options.output is not None) - - fig = plot_freq_response(args, calibration_data, shapers, - selected_shaper, options.max_freq) - - # Show graph - if options.output is None: - matplotlib.pyplot.show() - else: - fig.set_size_inches(8, 6) - fig.savefig(options.output) - -if __name__ == '__main__': - main() diff --git a/scripts/canbus_query.py b/scripts/canbus_query.py deleted file mode 100644 index 52dd470..0000000 --- a/scripts/canbus_query.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python2 -# Tool to query CAN bus uuids -# -# Copyright (C) 2021 Kevin O'Connor -# -# This file may be distributed under the terms of the GNU GPLv3 license. -import sys, os, optparse, time -import can - -CANBUS_ID_ADMIN = 0x3f0 -CMD_QUERY_UNASSIGNED = 0x00 -RESP_NEED_NODEID = 0x20 -CMD_SET_KLIPPER_NODEID = 0x01 -CMD_SET_CANBOOT_NODEID = 0x11 - -def query_unassigned(canbus_iface): - # Open CAN socket - filters = [{"can_id": CANBUS_ID_ADMIN + 1, "can_mask": 0x7ff, - "extended": False}] - bus = can.interface.Bus(channel=canbus_iface, can_filters=filters, - bustype='socketcan') - # Send query - msg = can.Message(arbitration_id=CANBUS_ID_ADMIN, - data=[CMD_QUERY_UNASSIGNED], is_extended_id=False) - bus.send(msg) - # Read responses - found_ids = {} - start_time = curtime = time.time() - while 1: - tdiff = start_time + 2. - curtime - if tdiff <= 0.: - break - msg = bus.recv(tdiff) - curtime = time.time() - if (msg is None or msg.arbitration_id != CANBUS_ID_ADMIN + 1 - or msg.dlc < 7 or msg.data[0] != RESP_NEED_NODEID): - continue - uuid = sum([v << ((5-i)*8) for i, v in enumerate(msg.data[1:7])]) - if uuid in found_ids: - continue - found_ids[uuid] = 1 - AppNames = { - CMD_SET_KLIPPER_NODEID: "Klipper", - CMD_SET_CANBOOT_NODEID: "CanBoot" - } - app_id = CMD_SET_KLIPPER_NODEID - if msg.dlc > 7: - app_id = msg.data[7] - app_name = AppNames.get(app_id, "Unknown") - sys.stdout.write("Found canbus_uuid=%012x, Application: %s\n" - % (uuid, app_name)) - sys.stdout.write("Total %d uuids found\n" % (len(found_ids,))) - -def main(): - usage = "%prog [options] " - opts = optparse.OptionParser(usage) - options, args = opts.parse_args() - if len(args) != 1: - opts.error("Incorrect number of arguments") - canbus_iface = args[0] - query_unassigned(canbus_iface) - -if __name__ == '__main__': - main() diff --git a/scripts/check-gcc.sh b/scripts/check-gcc.sh deleted file mode 100644 index 2d89d98..0000000 --- a/scripts/check-gcc.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh -# This script checks for a broken Ubuntu 18.04 arm-none-eabi-gcc compile - -f1="$1" -f2="$2" - -s1=`readelf -A "$f1" | grep "Tag_ARM_ISA_use"` -s2=`readelf -A "$f2" | grep "Tag_ARM_ISA_use"` - -if [ "$s1" != "$s2" ]; then - echo "" - echo "ERROR: The compiler failed to correctly compile Klipper" - echo "It will be necessary to upgrade the compiler" - echo "See: https://bugs.launchpad.net/ubuntu/+source/newlib/+bug/1767223" - echo "" - rm -f "$f1" - exit 99 -fi diff --git a/scripts/check_whitespace.py b/scripts/check_whitespace.py deleted file mode 100644 index fe8c7ae..0000000 --- a/scripts/check_whitespace.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python3 -# Check files for whitespace problems -# -# Copyright (C) 2018 Kevin O'Connor -# -# This file may be distributed under the terms of the GNU GPLv3 license. -import sys, os.path, unicodedata - -HaveError = False - -def report_error(filename, lineno, msg): - global HaveError - if not HaveError: - sys.stderr.write("\n\nERROR:\nERROR: White space errors\nERROR:\n") - HaveError = True - sys.stderr.write("%s:%d: %s\n" % (filename, lineno + 1, msg)) - -def check_file(filename): - # Open and read file - try: - f = open(filename, 'rb') - data = f.read() - f.close() - except IOError: - return - if not data: - # Empty files are okay - return - # Do checks - is_source_code = any([filename.endswith(s) for s in ['.c', '.h', '.py']]) - lineno = 0 - for lineno, line in enumerate(data.split(b'\n')): - # Verify line is valid utf-8 - try: - line = line.decode('utf-8') - except UnicodeDecodeError: - report_error(filename, lineno, "Found non utf-8 character") - continue - # Check for control characters - for c in line: - if unicodedata.category(c).startswith('C'): - char_name = repr(c) - if c == '\t': - if os.path.basename(filename).lower() == 'makefile': - continue - char_name = 'tab' - report_error(filename, lineno, "Invalid %s character" % ( - char_name,)) - break - # Check for trailing space - if line.endswith(' ') or line.endswith('\t'): - report_error(filename, lineno, "Line has trailing spaces") - # Check for more than 80 characters - if is_source_code and len(line) > 80: - report_error(filename, lineno, "Line longer than 80 characters") - if not data.endswith(b'\n'): - report_error(filename, lineno, "No newline at end of file") - if data.endswith(b'\n\n'): - report_error(filename, lineno, "Extra newlines at end of file") - -def main(): - files = sys.argv[1:] - for filename in files: - check_file(filename) - if HaveError: - sys.stderr.write("\n\n") - sys.exit(-1) - -if __name__ == '__main__': - main() diff --git a/scripts/check_whitespace.sh b/scripts/check_whitespace.sh deleted file mode 100644 index 4fe2146..0000000 --- a/scripts/check_whitespace.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# Script to check whitespace in Klipper source code. - -# Find SRCDIR from the pathname of this script -SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )" -cd ${SRCDIR} - -# Run whitespace tool on all source files -WS_DIRS="config/ docs/ klippy/ scripts/ src/ test/" -WS_EXCLUDE="-path scripts/kconfig -prune" -WS_FILES="-o -iname '*.[csh]' -o -name '*.py' -o -name '*.sh'" -WS_FILES="$WS_FILES -o -name '*.md' -o -name '*.cfg' -o -name '*.txt'" -WS_FILES="$WS_FILES -o -name '*.html' -o -name '*.css'" -WS_FILES="$WS_FILES -o -name '*.yaml' -o -name '*.yml'" -WS_FILES="$WS_FILES -o -name '*.css' -o -name '*.yaml' -o -name '*.yml'" -WS_FILES="$WS_FILES -o -name '*.test' -o -name '*.config'" -WS_FILES="$WS_FILES -o -iname '*.lds' -o -iname 'Makefile' -o -iname 'Kconfig'" -eval find $WS_DIRS $WS_EXCLUDE $WS_FILES | xargs ./scripts/check_whitespace.py diff --git a/scripts/checkstack.py b/scripts/checkstack.py deleted file mode 100644 index 1a6605f..0000000 --- a/scripts/checkstack.py +++ /dev/null @@ -1,243 +0,0 @@ -#!/usr/bin/env python2 -# Script that tries to find how much stack space each function in an -# object is using. -# -# Copyright (C) 2015 Kevin O'Connor -# -# This file may be distributed under the terms of the GNU GPLv3 license. - -# Usage: -# avr-objdump -d out/klipper.elf | scripts/checkstack.py - -import sys -import re - -# Functions that change stacks -STACKHOP = [] -# List of functions we can assume are never called. -IGNORE = [] - -OUTPUTDESC = """ -#funcname1[preamble_stack_usage,max_usage_with_callers]: -# insn_addr:called_function [usage_at_call_point+caller_preamble,total_usage] -# -#funcname2[p,m,max_usage_to_yield_point]: -# insn_addr:called_function [u+c,t,usage_to_yield_point] -""" - -class function: - def __init__(self, funcaddr, funcname): - self.funcaddr = funcaddr - self.funcname = funcname - self.basic_stack_usage = 0 - self.max_stack_usage = None - self.yield_usage = -1 - self.max_yield_usage = None - self.total_calls = 0 - # called_funcs = [(insnaddr, calladdr, stackusage), ...] - self.called_funcs = [] - self.subfuncs = {} - # Update function info with a found "yield" point. - def noteYield(self, stackusage): - if self.yield_usage < stackusage: - self.yield_usage = stackusage - # Update function info with a found "call" point. - def noteCall(self, insnaddr, calladdr, stackusage): - if (calladdr, stackusage) in self.subfuncs: - # Already noted a nearly identical call - ignore this one. - return - self.called_funcs.append((insnaddr, calladdr, stackusage)) - self.subfuncs[(calladdr, stackusage)] = 1 - -# Find out maximum stack usage for a function -def calcmaxstack(info, funcs): - if info.max_stack_usage is not None: - return - info.max_stack_usage = max_stack_usage = info.basic_stack_usage - info.max_yield_usage = max_yield_usage = info.yield_usage - total_calls = 0 - seenbefore = {} - # Find max of all nested calls. - for insnaddr, calladdr, usage in info.called_funcs: - callinfo = funcs.get(calladdr) - if callinfo is None: - continue - calcmaxstack(callinfo, funcs) - if callinfo.funcname not in seenbefore: - seenbefore[callinfo.funcname] = 1 - total_calls += callinfo.total_calls + 1 - funcnameroot = callinfo.funcname.split('.')[0] - if funcnameroot in IGNORE: - # This called function is ignored - don't contribute it to - # the max stack. - continue - totusage = usage + callinfo.max_stack_usage - totyieldusage = usage + callinfo.max_yield_usage - if funcnameroot in STACKHOP: - # Don't count children of this function - totusage = totyieldusage = usage - if totusage > max_stack_usage: - max_stack_usage = totusage - if callinfo.max_yield_usage >= 0 and totyieldusage > max_yield_usage: - max_yield_usage = totyieldusage - info.max_stack_usage = max_stack_usage - info.max_yield_usage = max_yield_usage - info.total_calls = total_calls - -# Try to arrange output so that functions that call each other are -# near each other. -def orderfuncs(funcaddrs, availfuncs): - l = [(availfuncs[funcaddr].total_calls - , availfuncs[funcaddr].funcname, funcaddr) - for funcaddr in funcaddrs if funcaddr in availfuncs] - l.sort() - l.reverse() - out = [] - while l: - count, name, funcaddr = l.pop(0) - info = availfuncs.get(funcaddr) - if info is None: - continue - calladdrs = [calls[1] for calls in info.called_funcs] - del availfuncs[funcaddr] - out = out + orderfuncs(calladdrs, availfuncs) + [info] - return out - -hex_s = r'[0-9a-f]+' -re_func = re.compile(r'^(?P' + hex_s + r') <(?P.*)>:$') -re_asm = re.compile( - r'^[ ]*(?P' + hex_s - + r'):\t[^\t]*\t(?P[^\t]+?)(?P\t[^;]*)?' - + r'[ ]*(; (?P0x' + hex_s - + r') <(?P.*)>)?$') - -def main(): - unknownfunc = function(None, "") - indirectfunc = function(-1, '') - unknownfunc.max_stack_usage = indirectfunc.max_stack_usage = 0 - unknownfunc.max_yield_usage = indirectfunc.max_yield_usage = -1 - funcs = {-1: indirectfunc} - funcaddr = None - datalines = {} - cur = None - atstart = 0 - stackusage = 0 - - # Parse input lines - for line in sys.stdin.readlines(): - m = re_func.match(line) - if m is not None: - # Found function - funcaddr = int(m.group('funcaddr'), 16) - funcs[funcaddr] = cur = function(funcaddr, m.group('func')) - stackusage = 0 - atstart = 1 - continue - m = re_asm.match(line) - if m is None: - datalines.setdefault(funcaddr, []).append(line) - #print("other", repr(line)) - continue - insn = m.group('insn') - - if insn == 'push': - stackusage += 1 - continue - if insn == 'rcall' and m.group('params').strip() == '.+0': - stackusage += 2 - continue - - if atstart: - if insn in ['in', 'eor']: - continue - cur.basic_stack_usage = stackusage - atstart = 0 - - insnaddr = m.group('insnaddr') - calladdr = m.group('calladdr') - if calladdr is None: - if insn == 'ijmp': - # Indirect tail call - cur.noteCall(insnaddr, -1, 0) - elif insn == 'icall': - cur.noteCall(insnaddr, -1, stackusage + 2) - else: - # misc instruction - continue - else: - # Jump or call insn - calladdr = int(calladdr, 16) - ref = m.group('ref') - if '+' in ref: - # Inter-function jump. - continue - elif insn.startswith('ld') or insn.startswith('st'): - # memory access - continue - elif insn in ('rjmp', 'jmp', 'brne', 'brcs'): - # Tail call - cur.noteCall(insnaddr, calladdr, 0) - elif insn in ('rcall', 'call'): - cur.noteCall(insnaddr, calladdr, stackusage + 2) - else: - print("unknown call", ref) - cur.noteCall(insnaddr, calladdr, stackusage) - # Reset stack usage to preamble usage - stackusage = cur.basic_stack_usage - - # Update for known indirect functions - funcsbyname = {} - for info in funcs.values(): - funcnameroot = info.funcname.split('.')[0] - funcsbyname[funcnameroot] = info - cmdfunc = funcsbyname.get('sched_main') - command_index = funcsbyname.get('command_index') - if command_index is not None and cmdfunc is not None: - for line in datalines[command_index.funcaddr]: - parts = line.split() - if len(parts) < 9: - continue - calladdr = int(parts[8]+parts[7], 16) * 2 - numparams = int(parts[2], 16) - stackusage = cmdfunc.basic_stack_usage + 2 + numparams * 4 - cmdfunc.noteCall(0, calladdr, stackusage) - if len(parts) < 17: - continue - calladdr = int(parts[16]+parts[15], 16) * 2 - numparams = int(parts[10], 16) - stackusage = cmdfunc.basic_stack_usage + 2 + numparams * 4 - cmdfunc.noteCall(0, calladdr, stackusage) - eventfunc = funcsbyname.get('__vector_13', funcsbyname.get('__vector_17')) - for funcnameroot, info in funcsbyname.items(): - if funcnameroot.endswith('_event') and eventfunc is not None: - eventfunc.noteCall(0, info.funcaddr, eventfunc.basic_stack_usage+2) - - # Calculate maxstackusage - for info in funcs.values(): - calcmaxstack(info, funcs) - - # Sort functions for output - funcinfos = orderfuncs(funcs.keys(), funcs.copy()) - - # Show all functions - print(OUTPUTDESC) - for info in funcinfos: - if info.max_stack_usage == 0 and info.max_yield_usage < 0: - continue - yieldstr = "" - if info.max_yield_usage >= 0: - yieldstr = ",%d" % info.max_yield_usage - print("\n%s[%d,%d%s]:" % (info.funcname, info.basic_stack_usage - , info.max_stack_usage, yieldstr)) - for insnaddr, calladdr, stackusage in info.called_funcs: - callinfo = funcs.get(calladdr, unknownfunc) - yieldstr = "" - if callinfo.max_yield_usage >= 0: - yieldstr = ",%d" % (stackusage + callinfo.max_yield_usage) - print(" %04s:%-40s [%d+%d,%d%s]" % ( - insnaddr, callinfo.funcname, stackusage - , callinfo.basic_stack_usage - , stackusage+callinfo.max_stack_usage, yieldstr)) - -if __name__ == '__main__': - main() diff --git a/scripts/ci-build.sh b/scripts/ci-build.sh deleted file mode 100644 index 9d42396..0000000 --- a/scripts/ci-build.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/bash -# Test script for continuous integration. - -# Stop script early on any error; check variables -set -eu - -# Paths to tools installed by ci-install.sh -MAIN_DIR=${PWD} -BUILD_DIR=${PWD}/ci_build -export PATH=${BUILD_DIR}/pru-gcc/bin:${PATH} -PYTHON=${BUILD_DIR}/python-env/bin/python -PYTHON2=${BUILD_DIR}/python2-env/bin/python - - -###################################################################### -# Section grouping output message helpers -###################################################################### - -start_test() -{ - echo "::group::=============== $1 $2" - set -x -} - -finish_test() -{ - set +x - echo "=============== Finished $2" - echo "::endgroup::" -} - - -###################################################################### -# Check for whitespace errors -###################################################################### - -start_test check_whitespace "Check whitespace" -./scripts/check_whitespace.sh -finish_test check_whitespace "Check whitespace" - - -###################################################################### -# Run compile tests for several different MCU types -###################################################################### - -DICTDIR=${BUILD_DIR}/dict -mkdir -p ${DICTDIR} - -for TARGET in test/configs/*.config ; do - start_test mcu_compile "$TARGET" - make clean - make distclean - unset CC - cp ${TARGET} .config - make olddefconfig - make V=1 - size out/*.elf - finish_test mcu_compile "$TARGET" - cp out/klipper.dict ${DICTDIR}/$(basename ${TARGET} .config).dict -done - - -###################################################################### -# Verify klippy host software -###################################################################### - -start_test klippy "Test klippy import (Python3)" -$PYTHON klippy/klippy.py --import-test -finish_test klippy "Test klippy import (Python3)" - -start_test klippy "Test klippy import (Python2)" -$PYTHON2 klippy/klippy.py --import-test -finish_test klippy "Test klippy import (Python2)" - -start_test klippy "Test invoke klippy (Python3)" -$PYTHON scripts/test_klippy.py -d ${DICTDIR} test/klippy/*.test -finish_test klippy "Test invoke klippy (Python3)" - -start_test klippy "Test invoke klippy (Python2)" -$PYTHON2 scripts/test_klippy.py -d ${DICTDIR} test/klippy/*.test -finish_test klippy "Test invoke klippy (Python2)" diff --git a/scripts/ci-install.sh b/scripts/ci-install.sh deleted file mode 100644 index 031b100..0000000 --- a/scripts/ci-install.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash -# Build setup script for continuous integration testing. -# See ci-build.sh for the actual test steps. - -# Stop script early on any error; check variables; be verbose -set -eux - -MAIN_DIR=${PWD} -BUILD_DIR=${PWD}/ci_build -CACHE_DIR=${PWD}/ci_cache -mkdir -p ${BUILD_DIR} ${CACHE_DIR} - - -###################################################################### -# Install system dependencies -###################################################################### - -echo -e "\n\n=============== Install system dependencies\n\n" -PKGS="virtualenv python-dev libffi-dev build-essential" -PKGS="${PKGS} gcc-avr avr-libc" -PKGS="${PKGS} libnewlib-arm-none-eabi gcc-arm-none-eabi binutils-arm-none-eabi" -PKGS="${PKGS} pv libmpfr-dev libgmp-dev libmpc-dev texinfo bison flex" -sudo apt-get install ${PKGS} - - -###################################################################### -# Install (or build) pru gcc -###################################################################### - -echo -e "\n\n=============== Install embedded pru gcc\n\n" -PRU_FILE=${CACHE_DIR}/gnupru.tar.gz -PRU_DIR=${BUILD_DIR}/pru-gcc - -if [ ! -f ${PRU_FILE} ]; then - cd ${BUILD_DIR} - git config --global user.email "you@example.com" - git config --global user.name "Your Name" - git clone https://github.com/dinuxbg/gnupru -b 2018.03-beta-rc3 --depth 1 - cd gnupru - export PREFIX=${PRU_DIR} - ./download-and-patch.sh 2>&1 | pv -nli 30 > ${BUILD_DIR}/gnupru-build.log - ./build.sh 2>&1 | pv -nli 30 >> ${BUILD_DIR}/gnupru-build.log - cd ${BUILD_DIR} - tar cfz ${PRU_FILE} pru-gcc/ -else - cd ${BUILD_DIR} - tar xfz ${PRU_FILE} -fi - - -###################################################################### -# Create python3 virtualenv environment -###################################################################### - -echo -e "\n\n=============== Install python3 virtualenv\n\n" -cd ${MAIN_DIR} -virtualenv -p python3 ${BUILD_DIR}/python-env -${BUILD_DIR}/python-env/bin/pip install -r ${MAIN_DIR}/scripts/klippy-requirements.txt - - -###################################################################### -# Create python2 virtualenv environment -###################################################################### - -echo -e "\n\n=============== Install python2 virtualenv\n\n" -cd ${MAIN_DIR} -virtualenv -p python2 ${BUILD_DIR}/python2-env -${BUILD_DIR}/python2-env/bin/pip install -r ${MAIN_DIR}/scripts/klippy-requirements.txt diff --git a/scripts/flash-linux.sh b/scripts/flash-linux.sh deleted file mode 100644 index 032b705..0000000 --- a/scripts/flash-linux.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# This script installs the Linux MCU code to /usr/local/bin/ - -if [ "$EUID" -ne 0 ]; then - echo "This script must be run as root" - exit -1 -fi -set -e - -# Install new micro-controller code -echo "Installing micro-controller code to /usr/local/bin/" -rm -f /usr/local/bin/klipper_mcu -cp out/klipper.elf /usr/local/bin/klipper_mcu -sync - -# Restart (if system install script present) -if [ -f /etc/init.d/klipper_pru ]; then - echo "Attempting host PRU restart..." - service klipper_pru restart -fi - -# Restart (if system install script present) -if [ -f /etc/init.d/klipper_mcu ]; then - echo "Attempting host MCU restart..." - service klipper_mcu restart -fi diff --git a/scripts/flash-pru.sh b/scripts/flash-pru.sh deleted file mode 100644 index c508606..0000000 --- a/scripts/flash-pru.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# This script installs the PRU firmware on a beaglebone machine. - -if [ "$EUID" -ne 0 ]; then - echo "This script must be run as root" - exit -1 -fi -set -e - -# Install new firmware -echo "Installing firmware to /lib/firmware/" -cp out/pru0.elf /lib/firmware/am335x-pru0-fw -cp out/pru1.elf /lib/firmware/am335x-pru1-fw -sync - -# Restart (if system install script present) -if [ -f /etc/init.d/klipper_pru ]; then - echo "Attempting PRU restart..." - service klipper_pru restart -fi diff --git a/scripts/flash-sdcard.sh b/scripts/flash-sdcard.sh deleted file mode 100644 index 7ee03cc..0000000 --- a/scripts/flash-sdcard.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash -# This script launches flash_sdcard.py, a utitlity that enables -# unattended firmware updates on boards with "SD Card" bootloaders - -# Non-standard installations may need to change this location -KLIPPY_ENV="${HOME}/klippy-env/bin/python" -SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )" -KLIPPER_BIN="${SRCDIR}/out/klipper.bin" -KLIPPER_BIN_DEFAULT=$KLIPPER_BIN -KLIPPER_DICT_DEFAULT="${SRCDIR}/out/klipper.dict" -SPI_FLASH="${SRCDIR}/scripts/spi_flash/spi_flash.py" -BAUD_ARG="" -# Force script to exit if an error occurs -set -e - -print_help_message() -{ - echo "SD Card upload utility for Klipper" - echo - echo "usage: flash_sdcard.sh [-h] [-l] [-b ] [-f ] [-d ]" - echo " " - echo - echo "positional arguments:" - echo " device serial port" - echo " board type" - echo - echo "optional arguments:" - echo " -h show this message" - echo " -l list available boards" - echo " -b serial baud rate (default is 250000)" - echo " -f path to klipper.bin" - echo " -d path to klipper.dict for firmware validation" -} - -# Parse command line "optional args" -while getopts "hlb:f:d:" arg; do - case $arg in - h) - print_help_message - exit 0 - ;; - l) - ${KLIPPY_ENV} ${SPI_FLASH} -l - exit 0 - ;; - b) BAUD_ARG="-b ${OPTARG}";; - f) KLIPPER_BIN=$OPTARG;; - d) KLIPPER_DICT=$OPTARG;; - esac -done - -# Make sure that we have the correct number of positional args -if [ $(($# - $OPTIND + 1)) -ne 2 ]; then - echo "Invalid number of args: $(($# - $OPTIND + 1))" - exit -1 -fi - -DEVICE=${@:$OPTIND:1} -BOARD=${@:$OPTIND+1:1} - -if [ ! -f $KLIPPER_BIN ]; then - echo "No file found at '${KLIPPER_BIN}'" - exit -1 -fi - -if [ ! -e $DEVICE ]; then - echo "No device found at '${DEVICE}'" - exit -1 -fi - -if [ ! $KLIPPER_DICT ] && [ $KLIPPER_BIN == $KLIPPER_BIN_DEFAULT ] ; then - KLIPPER_DICT=$KLIPPER_DICT_DEFAULT -fi - -if [ $KLIPPER_DICT ]; then - if [ ! -f $KLIPPER_DICT ]; then - echo "No file found at '${KLIPPER_BIN}'" - exit -1 - fi - KLIPPER_DICT="-d ${KLIPPER_DICT}" -fi - -# Run Script -echo "Flashing ${KLIPPER_BIN} to ${DEVICE}" -${KLIPPY_ENV} ${SPI_FLASH} ${BAUD_ARG} ${KLIPPER_DICT} ${DEVICE} ${BOARD} ${KLIPPER_BIN} diff --git a/scripts/flash_usb.py b/scripts/flash_usb.py deleted file mode 100644 index 3d62a64..0000000 --- a/scripts/flash_usb.py +++ /dev/null @@ -1,380 +0,0 @@ -#!/usr/bin/env python3 -# Tool to enter a USB bootloader and flash Klipper -# -# Copyright (C) 2019 Kevin O'Connor -# -# This file may be distributed under the terms of the GNU GPLv3 license. -import sys, os, re, subprocess, optparse, time, fcntl, termios, struct - -class error(Exception): - pass - -# Attempt to enter bootloader via 1200 baud request -def enter_bootloader(device): - try: - f = open(device, 'rb') - fd = f.fileno() - fcntl.ioctl(fd, termios.TIOCMBIS, struct.pack('I', termios.TIOCM_DTR)) - t = termios.tcgetattr(fd) - t[4] = t[5] = termios.B1200 - sys.stderr.write("Entering bootloader on %s\n" % (device,)) - termios.tcsetattr(fd, termios.TCSANOW, t) - fcntl.ioctl(fd, termios.TIOCMBIC, struct.pack('I', termios.TIOCM_DTR)) - f.close() - except (IOError, OSError) as e: - pass - -# Translate a serial device name to a stable serial name in /dev/serial/by-path/ -def translate_serial_to_tty(device): - ttyname = os.path.realpath(device) - if not os.path.exists('/dev/serial/by-path/'): - raise error("Unable to find serial 'by-path' folder") - for fname in os.listdir('/dev/serial/by-path/'): - fname = '/dev/serial/by-path/' + fname - if os.path.realpath(fname) == ttyname: - return ttyname, fname - return ttyname, ttyname - -# Translate a serial device name to a usb path (suitable for dfu-util) -def translate_serial_to_usb_path(device): - realdev = os.path.realpath(device) - fname = os.path.basename(realdev) - try: - lname = os.readlink("/sys/class/tty/" + fname) - except OSError as e: - raise error("Unable to find tty device") - ttypath_r = re.compile(r".*/usb\d+.*/(?P\d+-[0-9.]+):\d+\.\d+/.*") - m = ttypath_r.match(lname) - if m is None: - raise error("Unable to find tty usb device") - devpath = os.path.realpath("/sys/class/tty/%s/device" % (fname,)) - return m.group("path"), devpath - -# Wait for a given path to appear -def wait_path(path, alt_path=None): - time.sleep(.100) - start_alt_path = None - end_time = time.time() + 4.0 - while 1: - time.sleep(0.100) - cur_time = time.time() - if os.path.exists(path): - sys.stderr.write("Device reconnect on %s\n" % (path,)) - time.sleep(0.100) - return path - if alt_path is not None and os.path.exists(alt_path): - if start_alt_path is None: - start_alt_path = cur_time - continue - if cur_time >= start_alt_path + 0.300: - sys.stderr.write("Device reconnect on alt path %s\n" % ( - alt_path,)) - return alt_path - if cur_time > end_time: - return path - -CANBOOT_ID ="1d50:6177" - -def detect_canboot(devpath): - usbdir = os.path.dirname(devpath) - try: - with open(os.path.join(usbdir, "idVendor")) as f: - vid = f.read().strip().lower() - with open(os.path.join(usbdir, "idProduct")) as f: - pid = f.read().strip().lower() - except Exception: - return False - usbid = "%s:%s" % (vid, pid) - return usbid == CANBOOT_ID - -def call_flashcan(device, binfile): - try: - import serial - except ModuleNotFoundError: - sys.stderr.write( - "Python's pyserial module is required to update. Install\n" - "with the following command:\n" - " %s -m pip install pyserial\n\n" % (sys.executable,) - ) - sys.exit(-1) - args = [sys.executable, "lib/canboot/flash_can.py", "-d", - device, "-f", binfile] - sys.stderr.write(" ".join(args) + '\n\n') - res = subprocess.call(args) - if res != 0: - sys.stderr.write("Error running flash_can.py\n") - sys.exit(-1) - -def flash_canboot(options, binfile): - ttyname, pathname = translate_serial_to_tty(options.device) - call_flashcan(pathname, binfile) - -# Flash via a call to bossac -def flash_bossac(device, binfile, extra_flags=[]): - ttyname, pathname = translate_serial_to_tty(device) - enter_bootloader(pathname) - pathname = wait_path(pathname, ttyname) - baseargs = ["lib/bossac/bin/bossac", "-U", "-p", pathname] - args = baseargs + extra_flags + ["-w", binfile, "-v"] - sys.stderr.write(" ".join(args) + '\n\n') - res = subprocess.call(args) - if res != 0: - raise error("Error running bossac") - if "-R" not in extra_flags: - args = baseargs + ["-b", "-R"] - try: - subprocess.check_output(args, stderr=subprocess.STDOUT) - if "-b" not in extra_flags: - wait_path(pathname) - subprocess.check_output(args, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - pass - -# Invoke the dfu-util program -def call_dfuutil(flags, binfile, sudo): - args = ["dfu-util"] + flags + ["-D", binfile] - if sudo: - args.insert(0, "sudo") - sys.stderr.write(" ".join(args) + '\n\n') - res = subprocess.call(args) - if res != 0: - raise error("Error running dfu-util") - -# Flash via a call to dfu-util -def flash_dfuutil(device, binfile, extra_flags=[], sudo=True): - hexfmt_r = re.compile(r"^[a-fA-F0-9]{4}:[a-fA-F0-9]{4}$") - if hexfmt_r.match(device.strip()): - call_dfuutil(["-d", ","+device.strip()] + extra_flags, binfile, sudo) - return - ttyname, serbypath = translate_serial_to_tty(device) - buspath, devpath = translate_serial_to_usb_path(device) - enter_bootloader(device) - pathname = wait_path(devpath) - if detect_canboot(devpath): - call_flashcan(serbypath, binfile) - else: - call_dfuutil(["-p", buspath] + extra_flags, binfile, sudo) - -def call_hidflash(binfile, sudo): - args = ["lib/hidflash/hid-flash", binfile] - if sudo: - args.insert(0, "sudo") - sys.stderr.write(" ".join(args) + '\n\n') - res = subprocess.call(args) - if res != 0: - raise error("Error running hid-flash") - -# Flash via call to hid-flash -def flash_hidflash(device, binfile, sudo=True): - hexfmt_r = re.compile(r"^[a-fA-F0-9]{4}:[a-fA-F0-9]{4}$") - if hexfmt_r.match(device.strip()): - call_hidflash(binfile, sudo) - return - ttyname, serbypath = translate_serial_to_tty(device) - buspath, devpath = translate_serial_to_usb_path(device) - enter_bootloader(device) - pathname = wait_path(devpath) - if detect_canboot(devpath): - call_flashcan(serbypath, binfile) - else: - call_hidflash(binfile, sudo) - -# Call Klipper modified "picoboot" -def call_picoboot(bus, addr, binfile, sudo): - args = ["lib/rp2040_flash/rp2040_flash", binfile] - if bus is not None: - args.extend([bus, addr]) - if sudo: - args.insert(0, "sudo") - sys.stderr.write(" ".join(args) + '\n\n') - res = subprocess.call(args) - if res != 0: - raise error("Error running rp2040_flash") - -# Flash via Klipper modified "picoboot" -def flash_picoboot(device, binfile, sudo): - buspath, devpath = translate_serial_to_usb_path(device) - # We need one level up to get access to busnum/devnum files - usbdir = os.path.dirname(devpath) - enter_bootloader(device) - wait_path(usbdir) - with open(usbdir + "/busnum") as f: - bus = f.read().strip() - with open(usbdir + "/devnum") as f: - addr = f.read().strip() - call_picoboot(bus, addr, binfile, sudo) - - -###################################################################### -# Device specific helpers -###################################################################### - -def flash_atsam3(options, binfile): - try: - flash_bossac(options.device, binfile, ["-e", "-b"]) - except error as e: - sys.stderr.write("Failed to flash to %s: %s\n" % ( - options.device, str(e))) - sys.exit(-1) - -def flash_atsam4(options, binfile): - try: - flash_bossac(options.device, binfile, ["-e"]) - except error as e: - sys.stderr.write("Failed to flash to %s: %s\n" % ( - options.device, str(e))) - sys.exit(-1) - -def flash_atsamd(options, binfile): - extra_flags = ["--offset=0x%x" % (options.start,), "-b", "-R"] - try: - flash_bossac(options.device, binfile, extra_flags) - except error as e: - sys.stderr.write("Failed to flash to %s: %s\n" % ( - options.device, str(e))) - sys.exit(-1) - -SMOOTHIE_HELP = """ -Failed to flash to %s: %s - -If flashing Klipper to a Smoothieboard for the first time it may be -necessary to manually place the board into "bootloader mode" - press -and hold the "Play button" and then press and release the "Reset -button". - -When a Smoothieboard is in bootloader mode it can be flashed with the -following command: - make flash FLASH_DEVICE=1d50:6015 - -Alternatively, one can flash a Smoothieboard via SD card - copy the -"out/klipper.bin" file to a file named "firmware.bin" on an SD card -and then restart the Smoothieboard with that SD card. - -""" - -def flash_lpc176x(options, binfile): - try: - flash_dfuutil(options.device, binfile, [], options.sudo) - except error as e: - sys.stderr.write(SMOOTHIE_HELP % (options.device, str(e))) - sys.exit(-1) - -STM32F1_HELP = """ -Failed to flash to %s: %s - -If the device is already in bootloader mode it can be flashed with the -following command: - make flash FLASH_DEVICE=1eaf:0003 - OR - make flash FLASH_DEVICE=1209:beba - -If attempting to flash via 3.3V serial, then use: - make serialflash FLASH_DEVICE=%s - -""" - -def flash_stm32f1(options, binfile): - try: - if options.start == 0x8000800: - flash_hidflash(options.device, binfile, options.sudo) - else: - flash_dfuutil(options.device, binfile, ["-R", "-a", "2"], - options.sudo) - except error as e: - sys.stderr.write(STM32F1_HELP % ( - options.device, str(e), options.device)) - sys.exit(-1) - -STM32F4_HELP = """ -Failed to flash to %s: %s - -If the device is already in bootloader mode it can be flashed with the -following command: - make flash FLASH_DEVICE=0483:df11 - OR - make flash FLASH_DEVICE=1209:beba - -If attempting to flash via 3.3V serial, then use: - make serialflash FLASH_DEVICE=%s - -""" - -def flash_stm32f4(options, binfile): - start = "0x%x:leave" % (options.start,) - try: - if options.start == 0x8004000: - flash_hidflash(options.device, binfile, options.sudo) - else: - flash_dfuutil(options.device, binfile, - ["-R", "-a", "0", "-s", start], options.sudo) - except error as e: - sys.stderr.write(STM32F4_HELP % ( - options.device, str(e), options.device)) - sys.exit(-1) - -RP2040_HELP = """ -Failed to flash to %s: %s - -If the device is already in bootloader mode it can be flashed with the -following command: - make flash FLASH_DEVICE=2e8a:0003 - -Alternatively, one can flash rp2040 boards like the Pico by manually -entering bootloader mode(hold bootsel button during powerup), mount the -device as a usb drive, and copy klipper.uf2 to the device. - -""" - -def flash_rp2040(options, binfile): - try: - if options.device.lower() == "2e8a:0003": - call_picoboot(None, None, binfile, options.sudo) - else: - flash_picoboot(options.device, binfile, options.sudo) - except error as e: - sys.stderr.write(RP2040_HELP % (options.device, str(e))) - sys.exit(-1) - -MCUTYPES = { - 'sam3': flash_atsam3, 'sam4': flash_atsam4, 'samd': flash_atsamd, - 'same70': flash_atsam4, 'lpc176': flash_lpc176x, 'stm32f103': flash_stm32f1, - 'stm32f4': flash_stm32f4, 'stm32f042': flash_stm32f4, - 'stm32f072': flash_stm32f4, 'stm32g0b1': flash_stm32f4, - 'stm32h7': flash_stm32f4, 'rp2040': flash_rp2040 -} - - -###################################################################### -# Startup -###################################################################### - -def main(): - usage = "%prog [options] -t -d " - opts = optparse.OptionParser(usage) - opts.add_option("-t", "--type", type="string", dest="mcutype", - help="micro-controller type") - opts.add_option("-d", "--device", type="string", dest="device", - help="serial port device") - opts.add_option("-s", "--start", type="int", dest="start", - help="start address in flash") - opts.add_option("--no-sudo", action="store_false", dest="sudo", - default=True, help="do not run sudo") - options, args = opts.parse_args() - if len(args) != 1: - opts.error("Incorrect number of arguments") - flash_func = None - if options.mcutype: - for prefix, func in MCUTYPES.items(): - if options.mcutype.startswith(prefix): - flash_func = func - break - if flash_func is None: - opts.error("USB flashing is not supported for MCU '%s'" - % (options.mcutype,)) - if not options.device: - sys.stderr.write("\nPlease specify FLASH_DEVICE\n\n") - sys.exit(-1) - flash_func(options, args[0]) - -if __name__ == '__main__': - main() diff --git a/scripts/graph_accelerometer.py b/scripts/graph_accelerometer.py deleted file mode 100644 index 8c09e84..0000000 --- a/scripts/graph_accelerometer.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env python3 -# Generate adxl345 accelerometer graphs -# -# Copyright (C) 2020 Kevin O'Connor -# Copyright (C) 2020 Dmitry Butyugin -# -# This file may be distributed under the terms of the GNU GPLv3 license. -import importlib, optparse, os, sys -from textwrap import wrap -import numpy as np, matplotlib -sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), - '..', 'klippy')) -shaper_calibrate = importlib.import_module('.shaper_calibrate', 'extras') - -MAX_TITLE_LENGTH=65 - -def parse_log(logname, opts): - with open(logname) as f: - for header in f: - if not header.startswith('#'): - break - if not header.startswith('freq,psd_x,psd_y,psd_z,psd_xyz'): - # Raw accelerometer data - return np.loadtxt(logname, comments='#', delimiter=',') - # Power spectral density data or shaper calibration data - opts.error("File %s does not contain raw accelerometer data and therefore " - "is not supported by graph_accelerometer.py script. Please use " - "calibrate_shaper.py script to process it instead." % (logname,)) - -###################################################################### -# Raw accelerometer graphing -###################################################################### - -def plot_accel(data, logname): - first_time = data[0, 0] - times = data[:,0] - first_time - fig, axes = matplotlib.pyplot.subplots(nrows=3, sharex=True) - axes[0].set_title("\n".join(wrap("Accelerometer data (%s)" % (logname,), - MAX_TITLE_LENGTH))) - axis_names = ['x', 'y', 'z'] - for i in range(len(axis_names)): - avg = data[:,i+1].mean() - adata = data[:,i+1] - data[:,i+1].mean() - ax = axes[i] - ax.plot(times, adata, alpha=0.8) - ax.grid(True) - ax.set_ylabel('%s accel (%+.3f)\n(mm/s^2)' % (axis_names[i], -avg)) - axes[-1].set_xlabel('Time (%+.3f)\n(s)' % (-first_time,)) - fig.tight_layout() - return fig - - -###################################################################### -# Frequency graphing -###################################################################### - -# Calculate estimated "power spectral density" -def calc_freq_response(data, max_freq): - helper = shaper_calibrate.ShaperCalibrate(printer=None) - return helper.process_accelerometer_data(data) - -def calc_specgram(data, axis): - N = data.shape[0] - Fs = N / (data[-1,0] - data[0,0]) - # Round up to a power of 2 for faster FFT - M = 1 << int(.5 * Fs - 1).bit_length() - window = np.kaiser(M, 6.) - def _specgram(x): - return matplotlib.mlab.specgram( - x, Fs=Fs, NFFT=M, noverlap=M//2, window=window, - mode='psd', detrend='mean', scale_by_freq=False) - - d = {'x': data[:,1], 'y': data[:,2], 'z': data[:,3]} - if axis != 'all': - pdata, bins, t = _specgram(d[axis]) - else: - pdata, bins, t = _specgram(d['x']) - for ax in 'yz': - pdata += _specgram(d[ax])[0] - return pdata, bins, t - -def plot_frequency(datas, lognames, max_freq): - calibration_data = calc_freq_response(datas[0], max_freq) - for data in datas[1:]: - calibration_data.add_data(calc_freq_response(data, max_freq)) - freqs = calibration_data.freq_bins - psd = calibration_data.psd_sum[freqs <= max_freq] - px = calibration_data.psd_x[freqs <= max_freq] - py = calibration_data.psd_y[freqs <= max_freq] - pz = calibration_data.psd_z[freqs <= max_freq] - freqs = freqs[freqs <= max_freq] - - fig, ax = matplotlib.pyplot.subplots() - ax.set_title("\n".join(wrap( - "Frequency response (%s)" % (', '.join(lognames)), MAX_TITLE_LENGTH))) - ax.set_xlabel('Frequency (Hz)') - ax.set_ylabel('Power spectral density') - - ax.plot(freqs, psd, label='X+Y+Z', alpha=0.6) - ax.plot(freqs, px, label='X', alpha=0.6) - ax.plot(freqs, py, label='Y', alpha=0.6) - ax.plot(freqs, pz, label='Z', alpha=0.6) - - ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator()) - ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator()) - ax.grid(which='major', color='grey') - ax.grid(which='minor', color='lightgrey') - ax.ticklabel_format(axis='y', style='scientific', scilimits=(0,0)) - - fontP = matplotlib.font_manager.FontProperties() - fontP.set_size('x-small') - ax.legend(loc='best', prop=fontP) - fig.tight_layout() - return fig - -def plot_compare_frequency(datas, lognames, max_freq, axis): - fig, ax = matplotlib.pyplot.subplots() - ax.set_title('Frequency responses comparison') - ax.set_xlabel('Frequency (Hz)') - ax.set_ylabel('Power spectral density') - - for data, logname in zip(datas, lognames): - calibration_data = calc_freq_response(data, max_freq) - freqs = calibration_data.freq_bins - psd = calibration_data.get_psd(axis)[freqs <= max_freq] - freqs = freqs[freqs <= max_freq] - ax.plot(freqs, psd, label="\n".join(wrap(logname, 60)), alpha=0.6) - - ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator()) - ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator()) - ax.grid(which='major', color='grey') - ax.grid(which='minor', color='lightgrey') - fontP = matplotlib.font_manager.FontProperties() - fontP.set_size('x-small') - ax.legend(loc='best', prop=fontP) - fig.tight_layout() - return fig - -# Plot data in a "spectrogram colormap" -def plot_specgram(data, logname, max_freq, axis): - pdata, bins, t = calc_specgram(data, axis) - - fig, ax = matplotlib.pyplot.subplots() - ax.set_title("\n".join(wrap("Spectrogram %s (%s)" % (axis, logname), - MAX_TITLE_LENGTH))) - ax.pcolormesh(t, bins, pdata, norm=matplotlib.colors.LogNorm()) - ax.set_ylim([0., max_freq]) - ax.set_ylabel('frequency (hz)') - ax.set_xlabel('Time (s)') - fig.tight_layout() - return fig - -###################################################################### -# CSV output -###################################################################### - -def write_frequency_response(datas, output): - helper = shaper_calibrate.ShaperCalibrate(printer=None) - calibration_data = helper.process_accelerometer_data(datas[0]) - for data in datas[1:]: - calibration_data.add_data(helper.process_accelerometer_data(data)) - helper.save_calibration_data(output, calibration_data) - -def write_specgram(psd, freq_bins, time, output): - M = freq_bins.shape[0] - with open(output, "w") as csvfile: - csvfile.write("freq\\t") - for ts in time: - csvfile.write(",%.6f" % (ts,)) - csvfile.write("\n") - for i in range(M): - csvfile.write("%.1f" % (freq_bins[i],)) - for value in psd[i,:]: - csvfile.write(",%.6e" % (value,)) - csvfile.write("\n") - -###################################################################### -# Startup -###################################################################### - -def is_csv_output(output): - return output and os.path.splitext(output)[1].lower() == '.csv' - -def setup_matplotlib(output): - global matplotlib - if is_csv_output(output): - # Only mlab may be necessary with CSV output - import matplotlib.mlab - return - if output: - matplotlib.rcParams.update({'figure.autolayout': True}) - matplotlib.use('Agg') - import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager - import matplotlib.ticker - -def main(): - # Parse command-line arguments - usage = "%prog [options] " - opts = optparse.OptionParser(usage) - opts.add_option("-o", "--output", type="string", dest="output", - default=None, help="filename of output graph") - opts.add_option("-f", "--max_freq", type="float", default=200., - help="maximum frequency to graph") - opts.add_option("-r", "--raw", action="store_true", - help="graph raw accelerometer data") - opts.add_option("-c", "--compare", action="store_true", - help="graph comparison of power spectral density " - "between different accelerometer data files") - opts.add_option("-s", "--specgram", action="store_true", - help="graph spectrogram of accelerometer data") - opts.add_option("-a", type="string", dest="axis", default="all", - help="axis to graph (one of 'all', 'x', 'y', or 'z')") - options, args = opts.parse_args() - if len(args) < 1: - opts.error("Incorrect number of arguments") - - # Parse data - datas = [parse_log(fn, opts) for fn in args] - - setup_matplotlib(options.output) - - if is_csv_output(options.output): - if options.raw: - opts.error("raw mode is not supported with csv output") - if options.compare: - opts.error("comparison mode is not supported with csv output") - if options.specgram: - if len(args) > 1: - opts.error("Only 1 input is supported in specgram mode") - pdata, bins, t = calc_specgram(datas[0], options.axis) - write_specgram(pdata, bins, t, options.output) - else: - write_frequency_response(datas, options.output) - return - - # Draw graph - if options.raw: - if len(args) > 1: - opts.error("Only 1 input is supported in raw mode") - fig = plot_accel(datas[0], args[0]) - elif options.specgram: - if len(args) > 1: - opts.error("Only 1 input is supported in specgram mode") - fig = plot_specgram(datas[0], args[0], options.max_freq, options.axis) - elif options.compare: - fig = plot_compare_frequency(datas, args, options.max_freq, - options.axis) - else: - fig = plot_frequency(datas, args, options.max_freq) - - # Show graph - if options.output is None: - matplotlib.pyplot.show() - else: - fig.set_size_inches(8, 6) - fig.savefig(options.output) - -if __name__ == '__main__': - main() diff --git a/scripts/graph_extruder.py b/scripts/graph_extruder.py deleted file mode 100644 index 6f31ed3..0000000 --- a/scripts/graph_extruder.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env python -# Generate extruder pressure advance motion graphs -# -# Copyright (C) 2019-2021 Kevin O'Connor -# -# This file may be distributed under the terms of the GNU GPLv3 license. -import math, optparse, datetime -import matplotlib - -SEG_TIME = .000100 -INV_SEG_TIME = 1. / SEG_TIME - - -###################################################################### -# Basic trapezoid motion -###################################################################### - -# List of moves: [(start_v, end_v, move_t), ...] -Moves = [ - (0., 0., .100), - (0., 100., None), (100., 100., .200), (100., 60., None), - (60., 100., None), (100., 100., .200), (100., 0., None), - (0., 0., .300) -] -EXTRUDE_R = (.4 * .4 * .75) / (math.pi * (1.75 / 2.)**2) -ACCEL = 3000. * EXTRUDE_R - -def gen_positions(): - out = [] - start_d = start_t = t = 0. - for start_v, end_v, move_t in Moves: - start_v *= EXTRUDE_R - end_v *= EXTRUDE_R - if move_t is None: - move_t = abs(end_v - start_v) / ACCEL - half_accel = 0. - if end_v > start_v: - half_accel = .5 * ACCEL - elif start_v > end_v: - half_accel = -.5 * ACCEL - end_t = start_t + move_t - while t <= end_t: - rel_t = t - start_t - out.append(start_d + (start_v + half_accel * rel_t) * rel_t) - t += SEG_TIME - start_d += (start_v + half_accel * move_t) * move_t - start_t = end_t - return out - - -###################################################################### -# List helper functions -###################################################################### - -MARGIN_TIME = 0.050 - -def time_to_index(t): - return int(t * INV_SEG_TIME + .5) - -def indexes(positions): - drop = time_to_index(MARGIN_TIME) - return range(drop, len(positions)-drop) - -def trim_lists(*lists): - keep = len(lists[0]) - time_to_index(2. * MARGIN_TIME) - for l in lists: - del l[keep:] - - -###################################################################### -# Common data filters -###################################################################### - -# Generate estimated first order derivative -def gen_deriv(data): - return [0.] + [(data[i+1] - data[i]) * INV_SEG_TIME - for i in range(len(data)-1)] - -# Simple average between two points smooth_time away -def calc_average(positions, smooth_time): - offset = time_to_index(smooth_time * .5) - out = [0.] * len(positions) - for i in indexes(positions): - out[i] = .5 * (positions[i-offset] + positions[i+offset]) - return out - -# Average (via integration) of smooth_time range -def calc_smooth(positions, smooth_time): - offset = time_to_index(smooth_time * .5) - weight = 1. / (2*offset - 1) - out = [0.] * len(positions) - for i in indexes(positions): - out[i] = sum(positions[i-offset+1:i+offset]) * weight - return out - -# Time weighted average (via integration) of smooth_time range -def calc_weighted(positions, smooth_time): - offset = time_to_index(smooth_time * .5) - weight = 1. / offset**2 - out = [0.] * len(positions) - for i in indexes(positions): - weighted_data = [positions[j] * (offset - abs(j-i)) - for j in range(i-offset, i+offset)] - out[i] = sum(weighted_data) * weight - return out - - -###################################################################### -# Pressure advance -###################################################################### - -SMOOTH_TIME = .040 -PRESSURE_ADVANCE = .045 - -# Calculate raw pressure advance positions -def calc_pa_raw(positions): - pa = PRESSURE_ADVANCE * INV_SEG_TIME - out = [0.] * len(positions) - for i in indexes(positions): - out[i] = positions[i] + pa * (positions[i+1] - positions[i]) - return out - -# Pressure advance after smoothing -def calc_pa(positions): - return calc_weighted(calc_pa_raw(positions), SMOOTH_TIME) - - -###################################################################### -# Plotting and startup -###################################################################### - -def plot_motion(): - # Nominal motion - positions = gen_positions() - velocities = gen_deriv(positions) - accels = gen_deriv(velocities) - # Motion with pressure advance - pa_positions = calc_pa_raw(positions) - pa_velocities = gen_deriv(pa_positions) - # Smoothed motion - sm_positions = calc_pa(positions) - sm_velocities = gen_deriv(sm_positions) - # Build plot - times = [SEG_TIME * i for i in range(len(positions))] - trim_lists(times, velocities, accels, - pa_positions, pa_velocities, - sm_positions, sm_velocities) - fig, ax1 = matplotlib.pyplot.subplots(nrows=1, sharex=True) - ax1.set_title("Extruder Velocity") - ax1.set_ylabel('Velocity (mm/s)') - pa_plot, = ax1.plot(times, pa_velocities, 'r', - label='Pressure Advance', alpha=0.3) - nom_plot, = ax1.plot(times, velocities, 'black', label='Nominal') - sm_plot, = ax1.plot(times, sm_velocities, 'g', label='Smooth PA', alpha=0.9) - fontP = matplotlib.font_manager.FontProperties() - fontP.set_size('x-small') - ax1.legend(handles=[nom_plot, pa_plot, sm_plot], loc='best', prop=fontP) - ax1.set_xlabel('Time (s)') - ax1.grid(True) - fig.tight_layout() - return fig - -def setup_matplotlib(output_to_file): - global matplotlib - if output_to_file: - matplotlib.rcParams.update({'figure.autolayout': True}) - matplotlib.use('Agg') - import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager - import matplotlib.ticker - -def main(): - # Parse command-line arguments - usage = "%prog [options]" - opts = optparse.OptionParser(usage) - opts.add_option("-o", "--output", type="string", dest="output", - default=None, help="filename of output graph") - options, args = opts.parse_args() - if len(args) != 0: - opts.error("Incorrect number of arguments") - - # Draw graph - setup_matplotlib(options.output is not None) - fig = plot_motion() - - # Show graph - if options.output is None: - matplotlib.pyplot.show() - else: - fig.set_size_inches(6, 2.5) - fig.savefig(options.output) - -if __name__ == '__main__': - main() diff --git a/scripts/graph_motion.py b/scripts/graph_motion.py deleted file mode 100644 index 0520343..0000000 --- a/scripts/graph_motion.py +++ /dev/null @@ -1,427 +0,0 @@ -#!/usr/bin/env python -# Script to graph motion results -# -# Copyright (C) 2019-2021 Kevin O'Connor -# Copyright (C) 2020 Dmitry Butyugin -# -# This file may be distributed under the terms of the GNU GPLv3 license. -import optparse, datetime, math -import matplotlib - -SEG_TIME = .000100 -INV_SEG_TIME = 1. / SEG_TIME - -SPRING_FREQ=35.0 -DAMPING_RATIO=0.05 - -CONFIG_FREQ=40.0 -CONFIG_DAMPING_RATIO=0.1 - -###################################################################### -# Basic trapezoid motion -###################################################################### - -# List of moves: [(start_v, end_v, move_t), ...] -Moves = [ - (0., 0., .100), - (6.869, 89.443, None), (89.443, 89.443, .120), (89.443, 17.361, None), - (19.410, 120., None), (120., 120., .130), (120., 5., None), - (0., 0., 0.01), - (-5., -100., None), (-100., -100., .100), (-100., -.5, None), - (0., 0., .200) -] -ACCEL = 3000. -MAX_JERK = ACCEL * 0.6 * SPRING_FREQ - -def get_accel(start_v, end_v): - return ACCEL - -def get_accel_jerk_limit(start_v, end_v): - effective_accel = math.sqrt(MAX_JERK * abs(end_v - start_v) / 6.) - return min(effective_accel, ACCEL) - -# Standard constant acceleration generator -def get_acc_pos_ao2(rel_t, start_v, accel, move_t): - return (start_v + 0.5 * accel * rel_t) * rel_t - -# Bezier curve "accel_order=4" generator -def get_acc_pos_ao4(rel_t, start_v, accel, move_t): - inv_accel_t = 1. / move_t - accel_div_accel_t = accel * inv_accel_t - accel_div_accel_t2 = accel_div_accel_t * inv_accel_t - - c4 = -.5 * accel_div_accel_t2; - c3 = accel_div_accel_t; - c1 = start_v - return ((c4 * rel_t + c3) * rel_t * rel_t + c1) * rel_t - -# Bezier curve "accel_order=6" generator -def get_acc_pos_ao6(rel_t, start_v, accel, move_t): - inv_accel_t = 1. / move_t - accel_div_accel_t = accel * inv_accel_t - accel_div_accel_t2 = accel_div_accel_t * inv_accel_t - accel_div_accel_t3 = accel_div_accel_t2 * inv_accel_t - accel_div_accel_t4 = accel_div_accel_t3 * inv_accel_t - - c6 = accel_div_accel_t4; - c5 = -3. * accel_div_accel_t3; - c4 = 2.5 * accel_div_accel_t2; - c1 = start_v; - return (((c6 * rel_t + c5) * rel_t + c4) - * rel_t * rel_t * rel_t + c1) * rel_t - -get_acc_pos = get_acc_pos_ao2 -get_acc = get_accel - -# Calculate positions based on 'Moves' list -def gen_positions(): - out = [] - start_d = start_t = t = 0. - for start_v, end_v, move_t in Moves: - if move_t is None: - move_t = abs(end_v - start_v) / get_acc(start_v, end_v) - accel = (end_v - start_v) / move_t - end_t = start_t + move_t - while t <= end_t: - rel_t = t - start_t - out.append(start_d + get_acc_pos(rel_t, start_v, accel, move_t)) - t += SEG_TIME - start_d += get_acc_pos(move_t, start_v, accel, move_t) - start_t = end_t - return out - - -###################################################################### -# Estimated motion with belt as spring -###################################################################### - -def estimate_spring(positions): - ang_freq2 = (SPRING_FREQ * 2. * math.pi)**2 - damping_factor = 4. * math.pi * DAMPING_RATIO * SPRING_FREQ - head_pos = head_v = 0. - out = [] - for stepper_pos in positions: - head_pos += head_v * SEG_TIME - head_a = (stepper_pos - head_pos) * ang_freq2 - head_v += head_a * SEG_TIME - head_v -= head_v * damping_factor * SEG_TIME - out.append(head_pos) - return out - - -###################################################################### -# List helper functions -###################################################################### - -MARGIN_TIME = 0.050 - -def time_to_index(t): - return int(t * INV_SEG_TIME + .5) - -def indexes(positions): - drop = time_to_index(MARGIN_TIME) - return range(drop, len(positions)-drop) - -def trim_lists(*lists): - keep = len(lists[0]) - time_to_index(2. * MARGIN_TIME) - for l in lists: - del l[keep:] - - -###################################################################### -# Common data filters -###################################################################### - -# Generate estimated first order derivative -def gen_deriv(data): - return [0.] + [(data[i+1] - data[i]) * INV_SEG_TIME - for i in range(len(data)-1)] - -# Simple average between two points smooth_time away -def calc_average(positions, smooth_time): - offset = time_to_index(smooth_time * .5) - out = [0.] * len(positions) - for i in indexes(positions): - out[i] = .5 * (positions[i-offset] + positions[i+offset]) - return out - -# Average (via integration) of smooth_time range -def calc_smooth(positions, smooth_time): - offset = time_to_index(smooth_time * .5) - weight = 1. / (2*offset - 1) - out = [0.] * len(positions) - for i in indexes(positions): - out[i] = sum(positions[i-offset+1:i+offset]) * weight - return out - -# Time weighted average (via integration) of smooth_time range -def calc_weighted(positions, smooth_time): - offset = time_to_index(smooth_time * .5) - weight = 1. / offset**2 - out = [0.] * len(positions) - for i in indexes(positions): - weighted_data = [positions[j] * (offset - abs(j-i)) - for j in range(i-offset, i+offset)] - out[i] = sum(weighted_data) * weight - return out - -# Weighted average (`h**2 - (t-T)**2`) of smooth_time range -def calc_weighted2(positions, smooth_time): - offset = time_to_index(smooth_time * .5) - weight = .75 / offset**3 - out = [0.] * len(positions) - for i in indexes(positions): - weighted_data = [positions[j] * (offset**2 - (j-i)**2) - for j in range(i-offset, i+offset)] - out[i] = sum(weighted_data) * weight - return out - -# Weighted average (`(h**2 - (t-T)**2)**2`) of smooth_time range -def calc_weighted4(positions, smooth_time): - offset = time_to_index(smooth_time * .5) - weight = 15 / (16. * offset**5) - out = [0.] * len(positions) - for i in indexes(positions): - weighted_data = [positions[j] * ((offset**2 - (j-i)**2))**2 - for j in range(i-offset, i+offset)] - out[i] = sum(weighted_data) * weight - return out - -# Weighted average (`(h - abs(t-T))**2 * (2 * abs(t-T) + h)`) of range -def calc_weighted3(positions, smooth_time): - offset = time_to_index(smooth_time * .5) - weight = 1. / offset**4 - out = [0.] * len(positions) - for i in indexes(positions): - weighted_data = [positions[j] * (offset - abs(j-i))**2 - * (2. * abs(j-i) + offset) - for j in range(i-offset, i+offset)] - out[i] = sum(weighted_data) * weight - return out - - -###################################################################### -# Spring motion estimation -###################################################################### - -def calc_spring_raw(positions): - sa = (INV_SEG_TIME / (CONFIG_FREQ * 2. * math.pi))**2 - ra = 2. * CONFIG_DAMPING_RATIO * math.sqrt(sa) - out = [0.] * len(positions) - for i in indexes(positions): - out[i] = (positions[i] - + sa * (positions[i-1] - 2.*positions[i] + positions[i+1]) - + ra * (positions[i+1] - positions[i])) - return out - -def calc_spring_double_weighted(positions, smooth_time): - offset = time_to_index(smooth_time * .25) - sa = (INV_SEG_TIME / (offset * CONFIG_FREQ * 2. * math.pi))**2 - ra = 2. * CONFIG_DAMPING_RATIO * math.sqrt(sa) - out = [0.] * len(positions) - for i in indexes(positions): - out[i] = (positions[i] - + sa * (positions[i-offset] - 2.*positions[i] - + positions[i+offset]) - + ra * (positions[i+1] - positions[i])) - return calc_weighted(out, smooth_time=.5 * smooth_time) - -###################################################################### -# Input shapers -###################################################################### - -def get_zv_shaper(): - df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2) - K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df) - t_d = 1. / (CONFIG_FREQ * df) - A = [1., K] - T = [0., .5*t_d] - return (A, T, "ZV") - -def get_zvd_shaper(): - df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2) - K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df) - t_d = 1. / (CONFIG_FREQ * df) - A = [1., 2.*K, K**2] - T = [0., .5*t_d, t_d] - return (A, T, "ZVD") - -def get_mzv_shaper(): - df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2) - K = math.exp(-.75 * CONFIG_DAMPING_RATIO * math.pi / df) - t_d = 1. / (CONFIG_FREQ * df) - - a1 = 1. - 1. / math.sqrt(2.) - a2 = (math.sqrt(2.) - 1.) * K - a3 = a1 * K * K - - A = [a1, a2, a3] - T = [0., .375*t_d, .75*t_d] - return (A, T, "MZV") - -def get_ei_shaper(): - v_tol = 0.05 # vibration tolerance - df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2) - K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df) - t_d = 1. / (CONFIG_FREQ * df) - - a1 = .25 * (1. + v_tol) - a2 = .5 * (1. - v_tol) * K - a3 = a1 * K * K - - A = [a1, a2, a3] - T = [0., .5*t_d, t_d] - return (A, T, "EI") - -def get_2hump_ei_shaper(): - v_tol = 0.05 # vibration tolerance - df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2) - K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df) - t_d = 1. / (CONFIG_FREQ * df) - - V2 = v_tol**2 - X = pow(V2 * (math.sqrt(1. - V2) + 1.), 1./3.) - a1 = (3.*X*X + 2.*X + 3.*V2) / (16.*X) - a2 = (.5 - a1) * K - a3 = a2 * K - a4 = a1 * K * K * K - - A = [a1, a2, a3, a4] - T = [0., .5*t_d, t_d, 1.5*t_d] - return (A, T, "2-hump EI") - -def get_3hump_ei_shaper(): - v_tol = 0.05 # vibration tolerance - df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2) - K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df) - t_d = 1. / (CONFIG_FREQ * df) - - K2 = K*K - a1 = 0.0625 * (1. + 3. * v_tol + 2. * math.sqrt(2. * (v_tol + 1.) * v_tol)) - a2 = 0.25 * (1. - v_tol) * K - a3 = (0.5 * (1. + v_tol) - 2. * a1) * K2 - a4 = a2 * K2 - a5 = a1 * K2 * K2 - - A = [a1, a2, a3, a4, a5] - T = [0., .5*t_d, t_d, 1.5*t_d, 2.*t_d] - return (A, T, "3-hump EI") - - -def shift_pulses(shaper): - A, T, name = shaper - n = len(T) - ts = (sum([A[i] * T[i] for i in range(n)])) / sum(A) - for i in range(n): - T[i] -= ts - -def calc_shaper(shaper, positions): - shift_pulses(shaper) - A = shaper[0] - inv_D = 1. / sum(A) - n = len(A) - T = [time_to_index(-shaper[1][j]) for j in range(n)] - out = [0.] * len(positions) - for i in indexes(positions): - out[i] = sum([positions[i + T[j]] * A[j] for j in range(n)]) * inv_D - return out - -# Ideal values -SMOOTH_TIME = (2./3.) / CONFIG_FREQ - -def gen_updated_position(positions): - #return calc_weighted(positions, 0.040) - #return calc_spring_double_weighted(positions, SMOOTH_TIME) - #return calc_weighted4(calc_spring_raw(positions), SMOOTH_TIME) - return calc_shaper(get_ei_shaper(), positions) - - -###################################################################### -# Plotting and startup -###################################################################### - -def plot_motion(): - # Nominal motion - positions = gen_positions() - velocities = gen_deriv(positions) - accels = gen_deriv(velocities) - # Updated motion - upd_positions = gen_updated_position(positions) - upd_velocities = gen_deriv(upd_positions) - upd_accels = gen_deriv(upd_velocities) - # Estimated position with model of belt as spring - spring_orig = estimate_spring(positions) - spring_upd = estimate_spring(upd_positions) - spring_diff_orig = [n-o for n, o in zip(spring_orig, positions)] - spring_diff_upd = [n-o for n, o in zip(spring_upd, positions)] - head_velocities = gen_deriv(spring_orig) - head_accels = gen_deriv(head_velocities) - head_upd_velocities = gen_deriv(spring_upd) - head_upd_accels = gen_deriv(head_upd_velocities) - # Build plot - times = [SEG_TIME * i for i in range(len(positions))] - trim_lists(times, velocities, accels, - upd_velocities, upd_velocities, upd_accels, - spring_diff_orig, spring_diff_upd, - head_velocities, head_upd_velocities, - head_accels, head_upd_accels) - fig, (ax1, ax2, ax3) = matplotlib.pyplot.subplots(nrows=3, sharex=True) - ax1.set_title("Simulation: resonance freq=%.1f Hz, damping_ratio=%.3f,\n" - "configured freq=%.1f Hz, damping_ratio = %.3f" - % (SPRING_FREQ, DAMPING_RATIO, CONFIG_FREQ - , CONFIG_DAMPING_RATIO)) - ax1.set_ylabel('Velocity (mm/s)') - ax1.plot(times, upd_velocities, 'r', label='New Velocity', alpha=0.8) - ax1.plot(times, velocities, 'g', label='Nominal Velocity', alpha=0.8) - ax1.plot(times, head_velocities, label='Head Velocity', alpha=0.4) - ax1.plot(times, head_upd_velocities, label='New Head Velocity', alpha=0.4) - fontP = matplotlib.font_manager.FontProperties() - fontP.set_size('x-small') - ax1.legend(loc='best', prop=fontP) - ax1.grid(True) - ax2.set_ylabel('Acceleration (mm/s^2)') - ax2.plot(times, upd_accels, 'r', label='New Accel', alpha=0.8) - ax2.plot(times, accels, 'g', label='Nominal Accel', alpha=0.8) - ax2.plot(times, head_accels, alpha=0.4) - ax2.plot(times, head_upd_accels, alpha=0.4) - ax2.set_ylim([-5. * ACCEL, 5. * ACCEL]) - ax2.legend(loc='best', prop=fontP) - ax2.grid(True) - ax3.set_ylabel('Deviation (mm)') - ax3.plot(times, spring_diff_upd, 'r', label='New', alpha=0.8) - ax3.plot(times, spring_diff_orig, 'g', label='Nominal', alpha=0.8) - ax3.grid(True) - ax3.legend(loc='best', prop=fontP) - ax3.set_xlabel('Time (s)') - return fig - -def setup_matplotlib(output_to_file): - global matplotlib - if output_to_file: - matplotlib.use('Agg') - import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager - import matplotlib.ticker - -def main(): - # Parse command-line arguments - usage = "%prog [options]" - opts = optparse.OptionParser(usage) - opts.add_option("-o", "--output", type="string", dest="output", - default=None, help="filename of output graph") - options, args = opts.parse_args() - if len(args) != 0: - opts.error("Incorrect number of arguments") - - # Draw graph - setup_matplotlib(options.output is not None) - fig = plot_motion() - - # Show graph - if options.output is None: - matplotlib.pyplot.show() - else: - fig.set_size_inches(8, 6) - fig.savefig(options.output) - -if __name__ == '__main__': - main() diff --git a/scripts/graph_shaper.py b/scripts/graph_shaper.py deleted file mode 100644 index 0ea1945..0000000 --- a/scripts/graph_shaper.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/env python -# Script to plot input shapers -# -# Copyright (C) 2020 Kevin O'Connor -# Copyright (C) 2020 Dmitry Butyugin -# -# This file may be distributed under the terms of the GNU GPLv3 license. -import optparse, math -import matplotlib - -# A set of damping ratios to calculate shaper response for -DAMPING_RATIOS=[0.05, 0.1, 0.2] - -# Parameters of the input shaper -SHAPER_FREQ=50.0 -SHAPER_DAMPING_RATIO=0.1 - -# Simulate input shaping of step function for these true resonance frequency -# and damping ratio -STEP_SIMULATION_RESONANCE_FREQ=60. -STEP_SIMULATION_DAMPING_RATIO=0.15 - -# If set, defines which range of frequencies to plot shaper frequency responce -PLOT_FREQ_RANGE = [] # If empty, will be automatically determined -#PLOT_FREQ_RANGE = [10., 100.] - -PLOT_FREQ_STEP = .01 - -###################################################################### -# Input shapers -###################################################################### - -def get_zv_shaper(): - df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2) - K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df) - t_d = 1. / (SHAPER_FREQ * df) - A = [1., K] - T = [0., .5*t_d] - return (A, T, "ZV") - -def get_zvd_shaper(): - df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2) - K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df) - t_d = 1. / (SHAPER_FREQ * df) - A = [1., 2.*K, K**2] - T = [0., .5*t_d, t_d] - return (A, T, "ZVD") - -def get_mzv_shaper(): - df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2) - K = math.exp(-.75 * SHAPER_DAMPING_RATIO * math.pi / df) - t_d = 1. / (SHAPER_FREQ * df) - - a1 = 1. - 1. / math.sqrt(2.) - a2 = (math.sqrt(2.) - 1.) * K - a3 = a1 * K * K - - A = [a1, a2, a3] - T = [0., .375*t_d, .75*t_d] - return (A, T, "MZV") - -def get_ei_shaper(): - v_tol = 0.05 # vibration tolerance - df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2) - K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df) - t_d = 1. / (SHAPER_FREQ * df) - - a1 = .25 * (1. + v_tol) - a2 = .5 * (1. - v_tol) * K - a3 = a1 * K * K - - A = [a1, a2, a3] - T = [0., .5*t_d, t_d] - return (A, T, "EI") - -def get_2hump_ei_shaper(): - v_tol = 0.05 # vibration tolerance - df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2) - K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df) - t_d = 1. / (SHAPER_FREQ * df) - - V2 = v_tol**2 - X = pow(V2 * (math.sqrt(1. - V2) + 1.), 1./3.) - a1 = (3.*X*X + 2.*X + 3.*V2) / (16.*X) - a2 = (.5 - a1) * K - a3 = a2 * K - a4 = a1 * K * K * K - - A = [a1, a2, a3, a4] - T = [0., .5*t_d, t_d, 1.5*t_d] - return (A, T, "2-hump EI") - -def get_3hump_ei_shaper(): - v_tol = 0.05 # vibration tolerance - df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2) - K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df) - t_d = 1. / (SHAPER_FREQ * df) - - K2 = K*K - a1 = 0.0625 * (1. + 3. * v_tol + 2. * math.sqrt(2. * (v_tol + 1.) * v_tol)) - a2 = 0.25 * (1. - v_tol) * K - a3 = (0.5 * (1. + v_tol) - 2. * a1) * K2 - a4 = a2 * K2 - a5 = a1 * K2 * K2 - - A = [a1, a2, a3, a4, a5] - T = [0., .5*t_d, t_d, 1.5*t_d, 2.*t_d] - return (A, T, "3-hump EI") - - -def estimate_shaper(shaper, freq, damping_ratio): - A, T, _ = shaper - n = len(T) - inv_D = 1. / sum(A) - omega = 2. * math.pi * freq - damping = damping_ratio * omega - omega_d = omega * math.sqrt(1. - damping_ratio**2) - S = C = 0 - for i in range(n): - W = A[i] * math.exp(-damping * (T[-1] - T[i])) - S += W * math.sin(omega_d * T[i]) - C += W * math.cos(omega_d * T[i]) - return math.sqrt(S*S + C*C) * inv_D - -def shift_pulses(shaper): - A, T, name = shaper - n = len(T) - ts = sum([A[i] * T[i] for i in range(n)]) / sum(A) - for i in range(n): - T[i] -= ts - -# Shaper selection -get_shaper = get_ei_shaper - - -###################################################################### -# Plotting and startup -###################################################################### - -def bisect(func, left, right): - lhs_sign = math.copysign(1., func(left)) - while right-left > 1e-8: - mid = .5 * (left + right) - val = func(mid) - if math.copysign(1., val) == lhs_sign: - left = mid - else: - right = mid - return .5 * (left + right) - -def find_shaper_plot_range(shaper, vib_tol): - def eval_shaper(freq): - return estimate_shaper(shaper, freq, DAMPING_RATIOS[0]) - vib_tol - if not PLOT_FREQ_RANGE: - left = bisect(eval_shaper, 0., SHAPER_FREQ) - right = bisect(eval_shaper, SHAPER_FREQ, 2.4 * SHAPER_FREQ) - else: - left, right = PLOT_FREQ_RANGE - return (left, right) - -def gen_shaper_response(shaper): - # Calculate shaper vibration responce on a range of requencies - response = [] - freqs = [] - freq, freq_end = find_shaper_plot_range(shaper, vib_tol=0.25) - while freq <= freq_end: - vals = [] - for damping_ratio in DAMPING_RATIOS: - vals.append(estimate_shaper(shaper, freq, damping_ratio)) - response.append(vals) - freqs.append(freq) - freq += PLOT_FREQ_STEP - legend = ['damping ratio = %.3f' % d_r for d_r in DAMPING_RATIOS] - return freqs, response, legend - -def gen_shaped_step_function(shaper): - # Calculate shaping of a step function - A, T, _ = shaper - inv_D = 1. / sum(A) - n = len(T) - - omega = 2. * math.pi * STEP_SIMULATION_RESONANCE_FREQ - damping = STEP_SIMULATION_DAMPING_RATIO * omega - omega_d = omega * math.sqrt(1. - STEP_SIMULATION_DAMPING_RATIO**2) - phase = math.acos(STEP_SIMULATION_DAMPING_RATIO) - - t_start = T[0] - .5 / SHAPER_FREQ - t_end = T[-1] + 1.5 / STEP_SIMULATION_RESONANCE_FREQ - result = [] - time = [] - t = t_start - - def step_response(t): - if t < 0.: - return 0. - return 1. - math.exp(-damping * t) * math.sin(omega_d * t - + phase) / math.sin(phase) - - while t <= t_end: - val = [] - val.append(1. if t >= 0. else 0.) - #val.append(step_response(t)) - - commanded = 0. - response = 0. - S = C = 0 - for i in range(n): - if t < T[i]: - continue - commanded += A[i] - response += A[i] * step_response(t - T[i]) - val.append(commanded * inv_D) - val.append(response * inv_D) - - result.append(val) - time.append(t) - t += .01 / SHAPER_FREQ - legend = ['step', 'shaper commanded', 'system response'] - return time, result, legend - - -def plot_shaper(shaper): - shift_pulses(shaper) - freqs, response, response_legend = gen_shaper_response(shaper) - time, step_vals, step_legend = gen_shaped_step_function(shaper) - - fig, (ax1, ax2) = matplotlib.pyplot.subplots(nrows=2, figsize=(10,9)) - ax1.set_title("Vibration response simulation for shaper '%s',\n" - "shaper_freq=%.1f Hz, damping_ratio=%.3f" - % (shaper[-1], SHAPER_FREQ, SHAPER_DAMPING_RATIO)) - ax1.plot(freqs, response) - ax1.set_ylim(bottom=0.) - fontP = matplotlib.font_manager.FontProperties() - fontP.set_size('x-small') - ax1.legend(response_legend, loc='best', prop=fontP) - ax1.set_xlabel('Resonance frequency, Hz') - ax1.set_ylabel('Remaining vibrations, ratio') - ax1.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator()) - ax1.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator()) - ax1.grid(which='major', color='grey') - ax1.grid(which='minor', color='lightgrey') - - ax2.set_title("Unit step input, resonance frequency=%.1f Hz, " - "damping ratio=%.3f" % (STEP_SIMULATION_RESONANCE_FREQ, - STEP_SIMULATION_DAMPING_RATIO)) - ax2.plot(time, step_vals) - ax2.legend(step_legend, loc='best', prop=fontP) - ax2.set_xlabel('Time, sec') - ax2.set_ylabel('Amplitude') - ax2.grid() - fig.tight_layout() - return fig - -def setup_matplotlib(output_to_file): - global matplotlib - if output_to_file: - matplotlib.use('Agg') - import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager - import matplotlib.ticker - -def main(): - # Parse command-line arguments - usage = "%prog [options]" - opts = optparse.OptionParser(usage) - opts.add_option("-o", "--output", type="string", dest="output", - default=None, help="filename of output graph") - options, args = opts.parse_args() - if len(args) != 0: - opts.error("Incorrect number of arguments") - - # Draw graph - setup_matplotlib(options.output is not None) - fig = plot_shaper(get_shaper()) - - # Show graph - if options.output is None: - matplotlib.pyplot.show() - else: - fig.set_size_inches(8, 6) - fig.savefig(options.output) - -if __name__ == '__main__': - main() diff --git a/scripts/graph_temp_sensor.py b/scripts/graph_temp_sensor.py deleted file mode 100644 index c1d1852..0000000 --- a/scripts/graph_temp_sensor.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env python2 -# Tool to graph temperature sensor ADC resolution -# -# Copyright (C) 2020 Kevin O'Connor -# -# This file may be distributed under the terms of the GNU GPLv3 license. -import sys, os, optparse -import matplotlib - - -###################################################################### -# Dummy config / printer / etc. class emulation -###################################################################### - -class DummyConfig: - def __init__(self, config_settings): - self.config_settings = config_settings - self.sensor_factories = {} - # Emulate config class - def getfloat(self, option, default, **kw): - return self.config_settings.get(option, default) - def get(self, option, default=None): - return default - def get_printer(self): - return self - def get_name(self): - return "dummy" - # Emulate printer class - def load_object(self, config, name): - return self - def lookup_object(self, name): - return self - # Emulate heaters class - def add_sensor_factory(self, name, factory): - self.sensor_factories[name] = factory - def do_create_sensor(self, sensor_type): - return self.sensor_factories[sensor_type](self).adc_convert - # Emulate query_adc class - def register_adc(self, name, klass): - pass - # Emulate pins class - def setup_pin(self, pin_type, pin_name): - return self - # Emulate mcu_adc class - def setup_adc_callback(self, time, callback): - pass - - -###################################################################### -# Plotting -###################################################################### - -def plot_adc_resolution(config, sensors): - # Temperature list - all_temps = [float(i) for i in range(1, 351)] - temps = all_temps[:-1] - # Build plot - fig, (ax1, ax2) = matplotlib.pyplot.subplots(nrows=2, sharex=True) - pullup = config.getfloat('pullup_resistor', 0.) - adc_voltage = config.getfloat('adc_voltage', 0.) - ax1.set_title("Temperature Sensor (pullup=%.0f, adc_voltage=%.3f)" - % (pullup, adc_voltage)) - ax1.set_ylabel('ADC') - ax2.set_ylabel('ADC change per 1C') - for sensor in sensors: - sc = config.do_create_sensor(sensor) - adcs = [sc.calc_adc(t) for t in all_temps] - ax1.plot(temps, adcs[:-1], label=sensor, alpha=0.6) - adc_deltas = [abs(adcs[i+1] - adcs[i]) for i in range(len(temps))] - ax2.plot(temps, adc_deltas, alpha=0.6) - fontP = matplotlib.font_manager.FontProperties() - fontP.set_size('x-small') - ax1.legend(loc='best', prop=fontP) - ax2.set_xlabel('Temperature (C)') - ax1.grid(True) - ax2.grid(True) - fig.tight_layout() - return fig - -def plot_resistance(config, sensors): - # Temperature list - all_temps = [float(i) for i in range(1, 351)] - # Build plot - fig, ax = matplotlib.pyplot.subplots() - pullup = config.getfloat('pullup_resistor', 0.) - ax.set_title("Temperature Sensor (pullup=%.0f)" % (pullup,)) - ax.set_ylabel('Resistance (Ohms)') - for sensor in sensors: - sc = config.do_create_sensor(sensor) - adcs = [sc.calc_adc(t) for t in all_temps] - rs = [pullup * adc / (1.0 - adc) for adc in adcs] - ax.plot(all_temps, rs, label=sensor, alpha=0.6) - fontP = matplotlib.font_manager.FontProperties() - fontP.set_size('x-small') - ax.legend(loc='best', prop=fontP) - ax.set_xlabel('Temperature (C)') - ax.grid(True) - fig.tight_layout() - return fig - - -###################################################################### -# Startup -###################################################################### - -def setup_matplotlib(output_to_file): - global matplotlib - if output_to_file: - matplotlib.rcParams.update({'figure.autolayout': True}) - matplotlib.use('Agg') - import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager - import matplotlib.ticker - -def import_sensors(config): - global extras - # Load adc_temperature.py and thermistor.py modules - kdir = os.path.join(os.path.dirname(__file__), '..', 'klippy') - sys.path.append(kdir) - import extras.adc_temperature, extras.thermistor - extras.thermistor.load_config(config) - extras.adc_temperature.load_config(config) - -def main(): - # Parse command-line arguments - usage = "%prog [options]" - opts = optparse.OptionParser(usage) - opts.add_option("-o", "--output", type="string", dest="output", - default=None, help="filename of output graph") - opts.add_option("-p", "--pullup", type="float", dest="pullup", - default=4700., help="pullup resistor") - opts.add_option("-v", "--voltage", type="float", dest="voltage", - default=5., help="pullup resistor") - opts.add_option("-s", "--sensors", type="string", dest="sensors", - default="", help="list of sensors (comma separated)") - opts.add_option("-r", "--resistance", action="store_true", - help="graph sensor resistance") - options, args = opts.parse_args() - if len(args) != 0: - opts.error("Incorrect number of arguments") - - # Import sensors - config_settings = {'pullup_resistor': options.pullup, - 'adc_voltage': options.voltage} - config = DummyConfig(config_settings) - import_sensors(config) - - # Determine sensors to graph - if options.sensors: - sensors = [s.strip() for s in options.sensors.split(',')] - else: - sensors = sorted(config.sensor_factories.keys()) - - # Draw graph - setup_matplotlib(options.output is not None) - if options.resistance: - fig = plot_resistance(config, sensors) - else: - fig = plot_adc_resolution(config, sensors) - - # Show graph - if options.output is None: - matplotlib.pyplot.show() - else: - fig.set_size_inches(8, 6) - fig.savefig(options.output) - -if __name__ == '__main__': - main() diff --git a/scripts/graphstats.py b/scripts/graphstats.py deleted file mode 100644 index 7cd5296..0000000 --- a/scripts/graphstats.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/env python -# Script to parse a logging file, extract the stats, and graph them -# -# Copyright (C) 2016-2021 Kevin O'Connor -# -# This file may be distributed under the terms of the GNU GPLv3 license. -import optparse, datetime -import matplotlib - -MAXBANDWIDTH=25000. -MAXBUFFER=2. -STATS_INTERVAL=5. -TASK_MAX=0.0025 - -APPLY_PREFIX = [ - 'mcu_awake', 'mcu_task_avg', 'mcu_task_stddev', 'bytes_write', - 'bytes_read', 'bytes_retransmit', 'freq', 'adj', - 'target', 'temp', 'pwm' -] - -def parse_log(logname, mcu): - if mcu is None: - mcu = "mcu" - mcu_prefix = mcu + ":" - apply_prefix = { p: 1 for p in APPLY_PREFIX } - f = open(logname, 'r') - out = [] - for line in f: - parts = line.split() - if not parts or parts[0] not in ('Stats', 'INFO:root:Stats'): - #if parts and parts[0] == 'INFO:root:shutdown:': - # break - continue - prefix = "" - keyparts = {} - for p in parts[2:]: - if '=' not in p: - prefix = p - if prefix == mcu_prefix: - prefix = '' - continue - name, val = p.split('=', 1) - if name in apply_prefix: - name = prefix + name - keyparts[name] = val - if 'print_time' not in keyparts: - continue - keyparts['#sampletime'] = float(parts[1][:-1]) - out.append(keyparts) - f.close() - return out - -def setup_matplotlib(output_to_file): - global matplotlib - if output_to_file: - matplotlib.use('Agg') - import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager - import matplotlib.ticker - -def find_print_restarts(data): - runoff_samples = {} - last_runoff_start = last_buffer_time = last_sampletime = 0. - last_print_stall = 0 - for d in reversed(data): - # Check for buffer runoff - sampletime = d['#sampletime'] - buffer_time = float(d.get('buffer_time', 0.)) - if (last_runoff_start and last_sampletime - sampletime < 5 - and buffer_time > last_buffer_time): - runoff_samples[last_runoff_start][1].append(sampletime) - elif buffer_time < 1.: - last_runoff_start = sampletime - runoff_samples[last_runoff_start] = [False, [sampletime]] - else: - last_runoff_start = 0. - last_buffer_time = buffer_time - last_sampletime = sampletime - # Check for print stall - print_stall = int(d['print_stall']) - if print_stall < last_print_stall: - if last_runoff_start: - runoff_samples[last_runoff_start][0] = True - last_print_stall = print_stall - sample_resets = {sampletime: 1 for stall, samples in runoff_samples.values() - for sampletime in samples if not stall} - return sample_resets - -def plot_mcu(data, maxbw): - # Generate data for plot - basetime = lasttime = data[0]['#sampletime'] - lastbw = float(data[0]['bytes_write']) + float(data[0]['bytes_retransmit']) - sample_resets = find_print_restarts(data) - times = [] - bwdeltas = [] - loads = [] - awake = [] - hostbuffers = [] - for d in data: - st = d['#sampletime'] - timedelta = st - lasttime - if timedelta <= 0.: - continue - bw = float(d['bytes_write']) + float(d['bytes_retransmit']) - if bw < lastbw: - lastbw = bw - continue - load = float(d['mcu_task_avg']) + 3*float(d['mcu_task_stddev']) - if st - basetime < 15.: - load = 0. - pt = float(d['print_time']) - hb = float(d['buffer_time']) - if hb >= MAXBUFFER or st in sample_resets: - hb = 0. - else: - hb = 100. * (MAXBUFFER - hb) / MAXBUFFER - hostbuffers.append(hb) - times.append(datetime.datetime.utcfromtimestamp(st)) - bwdeltas.append(100. * (bw - lastbw) / (maxbw * timedelta)) - loads.append(100. * load / TASK_MAX) - awake.append(100. * float(d.get('mcu_awake', 0.)) / STATS_INTERVAL) - lasttime = st - lastbw = bw - - # Build plot - fig, ax1 = matplotlib.pyplot.subplots() - ax1.set_title("MCU bandwidth and load utilization") - ax1.set_xlabel('Time') - ax1.set_ylabel('Usage (%)') - ax1.plot_date(times, bwdeltas, 'g', label='Bandwidth', alpha=0.8) - ax1.plot_date(times, loads, 'r', label='MCU load', alpha=0.8) - ax1.plot_date(times, hostbuffers, 'c', label='Host buffer', alpha=0.8) - ax1.plot_date(times, awake, 'y', label='Awake time', alpha=0.6) - fontP = matplotlib.font_manager.FontProperties() - fontP.set_size('x-small') - ax1.legend(loc='best', prop=fontP) - ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M')) - ax1.grid(True) - return fig - -def plot_system(data): - # Generate data for plot - lasttime = data[0]['#sampletime'] - lastcputime = float(data[0]['cputime']) - times = [] - sysloads = [] - cputimes = [] - memavails = [] - for d in data: - st = d['#sampletime'] - timedelta = st - lasttime - if timedelta <= 0.: - continue - lasttime = st - times.append(datetime.datetime.utcfromtimestamp(st)) - cputime = float(d['cputime']) - cpudelta = max(0., min(1.5, (cputime - lastcputime) / timedelta)) - lastcputime = cputime - cputimes.append(cpudelta * 100.) - sysloads.append(float(d['sysload']) * 100.) - memavails.append(float(d['memavail'])) - - # Build plot - fig, ax1 = matplotlib.pyplot.subplots() - ax1.set_title("System load utilization") - ax1.set_xlabel('Time') - ax1.set_ylabel('Load (% of a core)') - ax1.plot_date(times, sysloads, '-', label='system load', - color='cyan', alpha=0.8) - ax1.plot_date(times, cputimes, '-', label='process time', - color='red', alpha=0.8) - ax2 = ax1.twinx() - ax2.set_ylabel('Available memory (KB)') - ax2.plot_date(times, memavails, '-', label='system memory', - color='yellow', alpha=0.3) - fontP = matplotlib.font_manager.FontProperties() - fontP.set_size('x-small') - ax1li, ax1la = ax1.get_legend_handles_labels() - ax2li, ax2la = ax2.get_legend_handles_labels() - ax1.legend(ax1li + ax2li, ax1la + ax2la, loc='best', prop=fontP) - ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M')) - ax1.grid(True) - return fig - -def plot_frequency(data, mcu): - all_keys = {} - for d in data: - all_keys.update(d) - one_mcu = mcu is not None - graph_keys = { key: ([], []) for key in all_keys - if (key in ("freq", "adj") or (not one_mcu and ( - key.endswith(":freq") or key.endswith(":adj")))) } - for d in data: - st = datetime.datetime.utcfromtimestamp(d['#sampletime']) - for key, (times, values) in graph_keys.items(): - val = d.get(key) - if val not in (None, '0', '1'): - times.append(st) - values.append(float(val)) - - # Build plot - fig, ax1 = matplotlib.pyplot.subplots() - if one_mcu: - ax1.set_title("MCU '%s' frequency" % (mcu,)) - else: - ax1.set_title("MCU frequency") - ax1.set_xlabel('Time') - ax1.set_ylabel('Frequency') - for key in sorted(graph_keys): - times, values = graph_keys[key] - ax1.plot_date(times, values, '.', label=key) - fontP = matplotlib.font_manager.FontProperties() - fontP.set_size('x-small') - ax1.legend(loc='best', prop=fontP) - ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M')) - ax1.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%d')) - ax1.grid(True) - return fig - -def plot_temperature(data, heaters): - fig, ax1 = matplotlib.pyplot.subplots() - ax2 = ax1.twinx() - for heater in heaters.split(','): - heater = heater.strip() - temp_key = heater + ':' + 'temp' - target_key = heater + ':' + 'target' - pwm_key = heater + ':' + 'pwm' - times = [] - temps = [] - targets = [] - pwm = [] - for d in data: - temp = d.get(temp_key) - if temp is None: - continue - times.append(datetime.datetime.utcfromtimestamp(d['#sampletime'])) - temps.append(float(temp)) - pwm.append(float(d.get(pwm_key, 0.))) - targets.append(float(d.get(target_key, 0.))) - ax1.plot_date(times, temps, '-', label='%s temp' % (heater,), alpha=0.8) - if any(targets): - label = '%s target' % (heater,) - ax1.plot_date(times, targets, '-', label=label, alpha=0.3) - if any(pwm): - label = '%s pwm' % (heater,) - ax2.plot_date(times, pwm, '-', label=label, alpha=0.2) - # Build plot - ax1.set_title("Temperature of %s" % (heaters,)) - ax1.set_xlabel('Time') - ax1.set_ylabel('Temperature') - ax2.set_ylabel('pwm') - fontP = matplotlib.font_manager.FontProperties() - fontP.set_size('x-small') - ax1li, ax1la = ax1.get_legend_handles_labels() - ax2li, ax2la = ax2.get_legend_handles_labels() - ax1.legend(ax1li + ax2li, ax1la + ax2la, loc='best', prop=fontP) - ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M')) - ax1.grid(True) - return fig - -def main(): - # Parse command-line arguments - usage = "%prog [options] " - opts = optparse.OptionParser(usage) - opts.add_option("-f", "--frequency", action="store_true", - help="graph mcu frequency") - opts.add_option("-s", "--system", action="store_true", - help="graph system load") - opts.add_option("-o", "--output", type="string", dest="output", - default=None, help="filename of output graph") - opts.add_option("-t", "--temperature", type="string", dest="heater", - default=None, help="graph heater temperature") - opts.add_option("-m", "--mcu", type="string", dest="mcu", default=None, - help="limit stats to the given mcu") - options, args = opts.parse_args() - if len(args) != 1: - opts.error("Incorrect number of arguments") - logname = args[0] - - # Parse data - data = parse_log(logname, options.mcu) - if not data: - return - - # Draw graph - setup_matplotlib(options.output is not None) - if options.heater is not None: - fig = plot_temperature(data, options.heater) - elif options.frequency: - fig = plot_frequency(data, options.mcu) - elif options.system: - fig = plot_system(data) - else: - fig = plot_mcu(data, MAXBANDWIDTH) - - # Show graph - if options.output is None: - matplotlib.pyplot.show() - else: - fig.set_size_inches(8, 6) - fig.savefig(options.output) - -if __name__ == '__main__': - main() diff --git a/scripts/install-arch.sh b/scripts/install-arch.sh deleted file mode 100644 index ad5820c..0000000 --- a/scripts/install-arch.sh +++ /dev/null @@ -1,102 +0,0 @@ -#!/bin/bash -# This script installs Klipper on an Arch Linux system - -PYTHONDIR="${HOME}/klippy-env" -SYSTEMDDIR="/etc/systemd/system" -AURCLIENT="pamac" -KLIPPER_USER=$USER -KLIPPER_GROUP=$KLIPPER_USER - -# Step 1: Install system packages -install_packages() -{ - # Packages for python cffi - PKGLIST="python2-virtualenv libffi base-devel" - # kconfig requirements - PKGLIST="${PKGLIST} ncurses" - # hub-ctrl - PKGLIST="${PKGLIST} libusb" - # AVR chip installation and building - PKGLIST="${PKGLIST} avrdude avr-gcc avr-binutils avr-libc" - # ARM chip installation and building - AURLIST="stm32flash" - PKGLIST="${PKGLIST} arm-none-eabi-newlib" - PKGLIST="${PKGLIST} arm-none-eabi-gcc arm-none-eabi-binutils" - - # Install desired packages - report_status "Installing packages..." - sudo pacman -S ${PKGLIST} - $AURCLIENT build ${AURLIST} -} - -# Step 2: Create python virtual environment -create_virtualenv() -{ - report_status "Updating python virtual environment..." - - # Create virtualenv if it doesn't already exist - [ ! -d ${PYTHONDIR} ] && virtualenv2 ${PYTHONDIR} - - # Install/update dependencies - ${PYTHONDIR}/bin/pip install -r ${SRCDIR}/scripts/klippy-requirements.txt -} - -# Step 3: Install startup script -install_script() -{ -# Create systemd service file - KLIPPER_LOG=/tmp/klippy.log - report_status "Installing system start script..." - sudo /bin/sh -c "cat > $SYSTEMDDIR/klipper.service" << EOF -#Systemd service file for klipper -[Unit] -Description=Starts klipper on startup -After=network.target - -[Install] -WantedBy=multi-user.target - -[Service] -Type=simple -User=$KLIPPER_USER -RemainAfterExit=yes -ExecStart=${PYTHONDIR}/bin/python ${SRCDIR}/klippy/klippy.py ${HOME}/printer.cfg -l ${KLIPPER_LOG} -EOF -# Use systemctl to enable the klipper systemd service script - sudo systemctl enable klipper.service - report_status "Make sure to add $KLIPPER_USER to the user group controlling your serial printer port" -} - -# Step 4: Start host software -start_software() -{ - report_status "Launching Klipper host software..." - sudo systemctl start klipper -} - -# Helper functions -report_status() -{ - echo -e "\n\n###### $1" -} - -verify_ready() -{ - if [ "$EUID" -eq 0 ]; then - echo "This script must not run as root" - exit -1 - fi -} - -# Force script to exit if an error occurs -set -e - -# Find SRCDIR from the pathname of this script -SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )" - -# Run installation steps defined above -verify_ready -install_packages -create_virtualenv -install_script -start_software diff --git a/scripts/install-beaglebone.sh b/scripts/install-beaglebone.sh deleted file mode 100644 index a68c8d0..0000000 --- a/scripts/install-beaglebone.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash -# This script installs Klipper on a Beaglebone running Debian Jessie -# for use with its PRU micro-controller. - -# Step 1: Do main install -install_main() -{ - # Run the debian script - should - # work. - ${SRCDIR}/scripts/install-debian.sh -} - -# Step 2: Install additional system packages -install_packages() -{ - # Install desired packages - PKGLIST="gcc-pru" - - report_status "Installing beaglebone packages..." - sudo apt-get install --yes ${PKGLIST} -} - -# Step 3: Install startup script -install_script() -{ - report_status "Installing pru start script..." - sudo cp "${SRCDIR}/scripts/klipper-pru-start.sh" /etc/init.d/klipper_pru - sudo update-rc.d klipper_pru defaults -} - -# Step 4: Install pru udev rule -install_udev() -{ - report_status "Installing pru udev rule..." - sudo /bin/sh -c "cat > /etc/udev/rules.d/pru.rules" < $SYSTEMDDIR/klipper.service" << EOF -#Systemd service file for klipper -[Unit] -Description=Starts klipper on startup -After=network.target - -[Install] -WantedBy=multi-user.target - -[Service] -Type=simple -User=$USER -RemainAfterExit=yes -ExecStart=${PYTHONDIR}/bin/python ${SRCDIR}/klippy/klippy.py ${HOME}/printer.cfg -l /var/log/klippy.log -EOF -# Use systemctl to enable the klipper systemd service script - sudo systemctl enable klipper.service -} - -# Configuration for systemctl klipper - -KLIPPY_USER=$USER - - -# Step 5: Start host software -start_software() -{ - report_status "Launching Klipper host software..." - sudo systemctl restart klipper -} - -# Helper functions -report_status() -{ - echo -e "\n\n###### $1" -} - -verify_ready() -{ - if [ "$EUID" -eq 0 ]; then - echo "This script must not run as root" - exit -1 - fi -} - -# Force script to exit if an error occurs -set -e - -# Find SRCDIR from the pathname of this script -SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )" - -# Run installation steps defined above -verify_ready -install_packages -create_virtualenv -install_script -start_software diff --git a/scripts/install-debian.sh b/scripts/install-debian.sh deleted file mode 100644 index d1d910b..0000000 --- a/scripts/install-debian.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/bin/bash -# This script installs Klipper on an debian -# - -PYTHONDIR="${HOME}/klippy-env" -SYSTEMDDIR="/etc/systemd/system" -KLIPPER_USER=$USER -KLIPPER_GROUP=$KLIPPER_USER - -# Step 1: Install system packages -install_packages() -{ - # Packages for python cffi - PKGLIST="virtualenv python-dev libffi-dev build-essential" - # kconfig requirements - PKGLIST="${PKGLIST} libncurses-dev" - # hub-ctrl - PKGLIST="${PKGLIST} libusb-dev" - # AVR chip installation and building - PKGLIST="${PKGLIST} avrdude gcc-avr binutils-avr avr-libc" - # ARM chip installation and building - PKGLIST="${PKGLIST} stm32flash libnewlib-arm-none-eabi" - PKGLIST="${PKGLIST} gcc-arm-none-eabi binutils-arm-none-eabi libusb-1.0" - - # Update system package info - report_status "Running apt-get update..." - sudo apt-get update - - # Install desired packages - report_status "Installing packages..." - sudo apt-get install --yes ${PKGLIST} -} - -# Step 2: Create python virtual environment -create_virtualenv() -{ - report_status "Updating python virtual environment..." - - # Create virtualenv if it doesn't already exist - [ ! -d ${PYTHONDIR} ] && virtualenv -p python2 ${PYTHONDIR} - - # Install/update dependencies - ${PYTHONDIR}/bin/pip install -r ${SRCDIR}/scripts/klippy-requirements.txt -} - -# Step 3: Install startup script -install_script() -{ -# Create systemd service file - KLIPPER_LOG=/tmp/klippy.log - report_status "Installing system start script..." - sudo /bin/sh -c "cat > $SYSTEMDDIR/klipper.service" << EOF -#Systemd service file for klipper -[Unit] -Description=Starts klipper on startup -After=network.target - -[Install] -WantedBy=multi-user.target - -[Service] -Type=simple -User=$KLIPPER_USER -RemainAfterExit=yes -ExecStart=${PYTHONDIR}/bin/python ${SRCDIR}/klippy/klippy.py ${HOME}/printer.cfg -l ${KLIPPER_LOG} -Restart=always -RestartSec=10 -EOF -# Use systemctl to enable the klipper systemd service script - sudo systemctl enable klipper.service -} - -# Step 4: Start host software -start_software() -{ - report_status "Launching Klipper host software..." - sudo systemctl start klipper -} - -# Helper functions -report_status() -{ - echo -e "\n\n###### $1" -} - -verify_ready() -{ - if [ "$EUID" -eq 0 ]; then - echo "This script must not run as root" - exit -1 - fi -} - -# Force script to exit if an error occurs -set -e - -# Find SRCDIR from the pathname of this script -SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )" - -# Run installation steps defined above -verify_ready -install_packages -create_virtualenv -install_script -start_software diff --git a/scripts/install-octopi.sh b/scripts/install-octopi.sh deleted file mode 100644 index a7fb78c..0000000 --- a/scripts/install-octopi.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash -# This script installs Klipper on a Raspberry Pi machine running the -# OctoPi distribution. - -PYTHONDIR="${HOME}/klippy-env" - -# Step 1: Install system packages -install_packages() -{ - # Packages for python cffi - PKGLIST="virtualenv python-dev libffi-dev build-essential" - # kconfig requirements - PKGLIST="${PKGLIST} libncurses-dev" - # hub-ctrl - PKGLIST="${PKGLIST} libusb-dev" - # AVR chip installation and building - PKGLIST="${PKGLIST} avrdude gcc-avr binutils-avr avr-libc" - # ARM chip installation and building - PKGLIST="${PKGLIST} stm32flash dfu-util libnewlib-arm-none-eabi" - PKGLIST="${PKGLIST} gcc-arm-none-eabi binutils-arm-none-eabi libusb-1.0" - - # Update system package info - report_status "Running apt-get update..." - sudo apt-get update - - # Install desired packages - report_status "Installing packages..." - sudo apt-get install --yes ${PKGLIST} -} - -# Step 2: Create python virtual environment -create_virtualenv() -{ - report_status "Updating python virtual environment..." - - # Create virtualenv if it doesn't already exist - [ ! -d ${PYTHONDIR} ] && virtualenv -p python2 ${PYTHONDIR} - - # Install/update dependencies - ${PYTHONDIR}/bin/pip install -r ${SRCDIR}/scripts/klippy-requirements.txt -} - -# Step 3: Install startup script -install_script() -{ - report_status "Installing system start script..." - sudo cp "${SRCDIR}/scripts/klipper-start.sh" /etc/init.d/klipper - sudo update-rc.d klipper defaults -} - -# Step 4: Install startup script config -install_config() -{ - DEFAULTS_FILE=/etc/default/klipper - [ -f $DEFAULTS_FILE ] && return - - report_status "Installing system start configuration..." - sudo /bin/sh -c "cat > $DEFAULTS_FILE" < $SYSTEMDDIR/klipper.service" << EOF -#Systemd service file for klipper -[Unit] -Description=Starts klipper on startup -After=network.target - -[Install] -WantedBy=multi-user.target - -[Service] -Type=simple -User=$KLIPPER_USER -RemainAfterExit=yes -ExecStart=${PYTHONDIR}/bin/python ${SRCDIR}/klippy/klippy.py ${HOME}/printer.cfg -l ${KLIPPER_LOG} -EOF -# Use systemctl to enable the klipper systemd service script - sudo systemctl enable klipper.service -} - -# Step 4: Start host software -start_software() -{ - report_status "Launching Klipper host software..." - sudo systemctl start klipper -} - -# Helper functions -report_status() -{ - echo -e "\n\n###### $1" -} - -verify_ready() -{ - if [ "$EUID" -eq 0 ]; then - echo "This script must not run as root" - exit -1 - fi -} - -# Force script to exit if an error occurs -set -e - -# Find SRCDIR from the pathname of this script -SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )" - -# Run installation steps defined above -verify_ready -install_packages -create_virtualenv -install_script -start_software diff --git a/scripts/klipper-mcu-start.sh b/scripts/klipper-mcu-start.sh deleted file mode 100644 index f58a629..0000000 --- a/scripts/klipper-mcu-start.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/sh -# System startup script to start the MCU Linux firmware - -### BEGIN INIT INFO -# Provides: klipper_mcu -# Required-Start: $local_fs -# Required-Stop: -# Default-Start: 3 4 5 -# Default-Stop: 0 1 2 6 -# Short-Description: Klipper_MCU daemon -# Description: Starts the MCU for Klipper. -### END INIT INFO - -PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -DESC="klipper_mcu startup" -NAME="klipper_mcu" -KLIPPER_HOST_MCU=/usr/local/bin/klipper_mcu -KLIPPER_HOST_ARGS="-r" -PIDFILE=/var/run/klipper_mcu.pid - -. /lib/lsb/init-functions - -mcu_host_stop() -{ - # Shutdown existing Klipper instance (if applicable). The goal is to - # put the GPIO pins in a safe state. - if [ -c /tmp/klipper_host_mcu ]; then - log_daemon_msg "Attempting to shutdown host mcu..." - set -e - ( echo "FORCE_SHUTDOWN" > /tmp/klipper_host_mcu ) 2> /dev/null || ( log_action_msg "Firmware busy! Please shutdown Klipper and then retry." && exit 1 ) - sleep 1 - ( echo "FORCE_SHUTDOWN" > /tmp/klipper_host_mcu ) 2> /dev/null || ( log_action_msg "Firmware busy! Please shutdown Klipper and then retry." && exit 1 ) - sleep 1 - set +e - fi - - log_daemon_msg "Stopping klipper host mcu" $NAME - killproc -p $PIDFILE $KLIPPER_HOST_MCU -} - -mcu_host_start() -{ - [ -x $KLIPPER_HOST_MCU ] || return - - if [ -c /tmp/klipper_host_mcu ]; then - mcu_host_stop - fi - - log_daemon_msg "Starting klipper MCU" $NAME - start-stop-daemon --start --quiet --exec $KLIPPER_HOST_MCU \ - --background --pidfile $PIDFILE --make-pidfile \ - -- $KLIPPER_HOST_ARGS - log_end_msg $? -} - -case "$1" in -start) - mcu_host_start - ;; -stop) - mcu_host_stop - ;; -restart) - $0 stop - $0 start - ;; -reload|force-reload) - log_daemon_msg "Reloading configuration not supported" $NAME - log_end_msg 1 - ;; -status) - status_of_proc -p $PIDFILE $KLIPPER_HOST_MCU $NAME && exit 0 || exit $? - ;; -*) log_action_msg "Usage: /etc/init.d/klipper_mcu {start|stop|status|restart|reload|force-reload}" - exit 2 - ;; -esac -exit 0 diff --git a/scripts/klipper-pru-start.sh b/scripts/klipper-pru-start.sh deleted file mode 100644 index 5d9af52..0000000 --- a/scripts/klipper-pru-start.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/sh -# System startup script to start the PRU firmware - -### BEGIN INIT INFO -# Provides: klipper_pru -# Required-Start: $local_fs -# Required-Stop: -# Default-Start: 3 4 5 -# Default-Stop: 0 1 2 6 -# Short-Description: Klipper_PRU daemon -# Description: Starts the PRU for Klipper. -### END INIT INFO - -PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -DESC="klipper_pru startup" -NAME="klipper_pru" -KLIPPER_HOST_MCU=/usr/local/bin/klipper_mcu -KLIPPER_HOST_ARGS="-w -r" -PIDFILE=/var/run/klipper_mcu.pid -RPROC0=/sys/class/remoteproc/remoteproc1 -RPROC1=/sys/class/remoteproc/remoteproc2 - -. /lib/lsb/init-functions - -pru_stop() -{ - # Shutdown existing Klipper instance (if applicable). The goal is to - # put the GPIO pins in a safe state. - if [ -c /dev/rpmsg_pru30 ]; then - log_daemon_msg "Attempting to shutdown PRU..." - set -e - ( echo "FORCE_SHUTDOWN" > /dev/rpmsg_pru30 ) 2> /dev/null || ( log_action_msg "Firmware busy! Please shutdown Klipper and then retry." && exit 1 ) - sleep 1 - ( echo "FORCE_SHUTDOWN" > /dev/rpmsg_pru30 ) 2> /dev/null || ( log_action_msg "Firmware busy! Please shutdown Klipper and then retry." && exit 1 ) - sleep 1 - set +e - fi - - log_daemon_msg "Stopping pru" - echo 'stop' > $RPROC0/state - echo 'stop' > $RPROC1/state -} - -pru_start() -{ - if [ -c /dev/rpmsg_pru30 ]; then - pru_stop - else - echo 'stop' > $RPROC0/state - echo 'stop' > $RPROC1/state - fi - sleep 1 - - log_daemon_msg "Starting pru" - echo 'start' > $RPROC0/state - echo 'start' > $RPROC1/state - - # log_daemon_msg "Loading ADC module" - # echo 'BB-ADC' > /sys/devices/platform/bone_capemgr/slots -} - -mcu_host_stop() -{ - # Shutdown existing Klipper instance (if applicable). The goal is to - # put the GPIO pins in a safe state. - if [ -c /tmp/klipper_host_mcu ]; then - log_daemon_msg "Attempting to shutdown host mcu..." - set -e - ( echo "FORCE_SHUTDOWN" > /tmp/klipper_host_mcu ) 2> /dev/null || ( log_action_msg "Firmware busy! Please shutdown Klipper and then retry." && exit 1 ) - sleep 1 - ( echo "FORCE_SHUTDOWN" > /tmp/klipper_host_mcu ) 2> /dev/null || ( log_action_msg "Firmware busy! Please shutdown Klipper and then retry." && exit 1 ) - sleep 1 - set +e - fi - - log_daemon_msg "Stopping klipper host mcu" $NAME - killproc -p $PIDFILE $KLIPPER_HOST_MCU -} - -mcu_host_start() -{ - [ -x $KLIPPER_HOST_MCU ] || return - - if [ -c /tmp/klipper_host_mcu ]; then - mcu_host_stop - fi - - log_daemon_msg "Starting klipper MCU" $NAME - start-stop-daemon --start --quiet --exec $KLIPPER_HOST_MCU \ - --background --pidfile $PIDFILE --make-pidfile \ - -- $KLIPPER_HOST_ARGS - log_end_msg $? -} - -case "$1" in -start) - pru_start - mcu_host_start - ;; -stop) - pru_stop - mcu_host_stop - ;; -restart) - $0 stop - $0 start - ;; -reload|force-reload) - log_daemon_msg "Reloading configuration not supported" $NAME - log_end_msg 1 - ;; -status) - status_of_proc -p $PIDFILE $KLIPPER_HOST_MCU $NAME && exit 0 || exit $? - ;; -*) log_action_msg "Usage: /etc/init.d/klipper {start|stop|status|restart|reload|force-reload}" - exit 2 - ;; -esac -exit 0 diff --git a/scripts/klipper-start.sh b/scripts/klipper-start.sh deleted file mode 100644 index 53b1985..0000000 --- a/scripts/klipper-start.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/sh -# System startup script for Klipper 3d-printer host code - -### BEGIN INIT INFO -# Provides: klipper -# Required-Start: $local_fs -# Required-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Klipper daemon -# Description: Starts the Klipper daemon. -### END INIT INFO - -PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -DESC="klipper daemon" -NAME="klipper" -DEFAULTS_FILE=/etc/default/klipper -PIDFILE=/var/run/klipper.pid - -. /lib/lsb/init-functions - -# Read defaults file -[ -r $DEFAULTS_FILE ] && . $DEFAULTS_FILE - -case "$1" in -start) log_daemon_msg "Starting klipper" $NAME - start-stop-daemon --start --quiet --exec $KLIPPY_EXEC \ - --background --pidfile $PIDFILE --make-pidfile \ - --chuid $KLIPPY_USER --user $KLIPPY_USER \ - -- $KLIPPY_ARGS - log_end_msg $? - ;; -stop) log_daemon_msg "Stopping klipper" $NAME - killproc -p $PIDFILE $KLIPPY_EXEC - RETVAL=$? - [ $RETVAL -eq 0 ] && [ -e "$PIDFILE" ] && rm -f $PIDFILE - log_end_msg $RETVAL - ;; -restart) log_daemon_msg "Restarting klipper" $NAME - $0 stop - $0 start - ;; -reload|force-reload) - log_daemon_msg "Reloading configuration not supported" $NAME - log_end_msg 1 - ;; -status) - status_of_proc -p $PIDFILE $KLIPPY_EXEC $NAME && exit 0 || exit $? - ;; -*) log_action_msg "Usage: /etc/init.d/klipper {start|stop|status|restart|reload|force-reload}" - exit 2 - ;; -esac -exit 0 diff --git a/scripts/klipper-uninstall.sh b/scripts/klipper-uninstall.sh deleted file mode 100644 index 60fee7c..0000000 --- a/scripts/klipper-uninstall.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# Uninstall script for raspbian/debian type installations - -# Stop Klipper Service -echo "#### Stopping Klipper Service.." -sudo service klipper stop - -# Remove Klipper from Startup -echo -echo "#### Removing Klipper from Startup.." -sudo update-rc.d -f klipper remove - -# Remove Klipper from Services -echo -echo "#### Removing Klipper Service.." -sudo rm -f /etc/init.d/klipper /etc/default/klipper - -# Notify user of method to remove Klipper source code -echo -echo "The Klipper system files have been removed." -echo -echo "The following command is typically used to remove local files:" -echo " rm -rf ~/klippy-env ~/klipper" diff --git a/scripts/klippy-requirements.txt b/scripts/klippy-requirements.txt deleted file mode 100644 index dd46dce..0000000 --- a/scripts/klippy-requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -# This file describes the Python virtualenv package requirements for -# the Klipper host software (Klippy). These package requirements are -# typically installed via the command: -# pip install -r klippy-requirements.txt -cffi==1.14.6 -pyserial==3.4 -greenlet==1.1.2 -Jinja2==2.11.3 -python-can==3.3.4 -markupsafe==1.1.1 diff --git a/scripts/logextract.py b/scripts/logextract.py deleted file mode 100644 index 0aad1e4..0000000 --- a/scripts/logextract.py +++ /dev/null @@ -1,610 +0,0 @@ -#!/usr/bin/env python2 -# Script to extract config and shutdown information file a klippy.log file -# -# Copyright (C) 2017 Kevin O'Connor -# -# This file may be distributed under the terms of the GNU GPLv3 license. -import sys, re, collections, ast - -def format_comment(line_num, line): - return "# %6d: %s" % (line_num, line) - - -###################################################################### -# Config file extraction -###################################################################### - -class GatherConfig: - def __init__(self, configs, line_num, recent_lines, logname): - self.configs = configs - self.line_num = line_num - self.config_num = len(configs) + 1 - self.filename = "%s.config%04d.cfg" % (logname, self.config_num) - self.config_lines = [] - self.comments = [] - def add_line(self, line_num, line): - if line != '=======================': - self.config_lines.append(line) - return True - self.finalize() - return False - def finalize(self): - lines = tuple(self.config_lines) - ch = self.configs.get(lines) - if ch is None: - self.configs[lines] = ch = self - else: - ch.comments.extend(self.comments) - ch.comments.append(format_comment(self.line_num, "config file")) - def add_comment(self, comment): - if comment is not None: - self.comments.append(comment) - def write_file(self): - f = open(self.filename, 'wb') - f.write('\n'.join(self.comments + self.config_lines).strip() + '\n') - f.close() - - -###################################################################### -# TMC UART message parsing -###################################################################### - -uart_r = re.compile(r"tmcuart_(?:send|response) oid=[0-9]+ (?:read|write)=") - -class TMCUartHelper: - def _calc_crc8(self, data): - # Generate a CRC8-ATM value for a bytearray - crc = 0 - for b in data: - for i in range(8): - if (crc >> 7) ^ (b & 0x01): - crc = (crc << 1) ^ 0x07 - else: - crc = (crc << 1) - crc &= 0xff - b >>= 1 - return crc - def _add_serial_bits(self, data): - # Add serial start and stop bits to a message in a bytearray - out = 0 - pos = 0 - for d in data: - b = (d << 1) | 0x200 - out |= (b << pos) - pos += 10 - res = bytearray() - for i in range((pos+7)//8): - res.append((out >> (i*8)) & 0xff) - return res - def _encode_read(self, sync, addr, reg): - # Generate a uart read register message - msg = bytearray([sync, addr, reg]) - msg.append(self._calc_crc8(msg)) - return self._add_serial_bits(msg) - def _encode_write(self, sync, addr, reg, val): - # Generate a uart write register message - msg = bytearray([sync, addr, reg, (val >> 24) & 0xff, - (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff]) - msg.append(self._calc_crc8(msg)) - return self._add_serial_bits(msg) - def _decode_read(self, data): - # Extract a uart read request message - if len(data) != 5: - return - # Convert data into a long integer for easy manipulation - mval = pos = 0 - for d in bytearray(data): - mval |= d << pos - pos += 8 - # Extract register value - addr = (mval >> 11) & 0xff - reg = (mval >> 21) & 0xff - # Verify start/stop bits and crc - encoded_data = self._encode_read(0xf5, addr, reg) - if data != encoded_data: - return "Invalid: %s" % (self.pretty_print(addr, reg),) - return self.pretty_print(addr, reg) - def _decode_reg(self, data): - # Extract a uart read response message - if len(data) != 10: - return - # Convert data into a long integer for easy manipulation - mval = pos = 0 - for d in bytearray(data): - mval |= d << pos - pos += 8 - # Extract register value - addr = (mval >> 11) & 0xff - reg = (mval >> 21) & 0xff - val = ((((mval >> 31) & 0xff) << 24) | (((mval >> 41) & 0xff) << 16) - | (((mval >> 51) & 0xff) << 8) | ((mval >> 61) & 0xff)) - sync = 0xf5 - if addr == 0xff: - sync = 0x05 - # Verify start/stop bits and crc - encoded_data = self._encode_write(sync, addr, reg, val) - if data != encoded_data: - #print("Got %s vs %s" % (repr(data), repr(encoded_data))) - return "Invalid:%s" % (self.pretty_print(addr, reg, val),) - return self.pretty_print(addr, reg, val) - def pretty_print(self, addr, reg, val=None): - if val is None: - return "(%x@%x)" % (reg, addr) - if reg & 0x80: - return "(%x@%x=%08x)" % (reg & ~0x80, addr, val) - return "(%x@%x==%08x)" % (reg, addr, val) - def parse_msg(self, msg): - data = bytearray(msg) - if len(data) == 10: - return self._decode_reg(data) - elif len(data) == 5: - return self._decode_read(data) - elif len(data) == 0: - return "" - return "(length?)" - - -###################################################################### -# Shutdown extraction -###################################################################### - -def add_high_bits(val, ref, mask): - half = (mask + 1) // 2 - return ref + ((val - (ref & mask) + half) & mask) - half - -count_s = r"(?P[0-9]+)" -time_s = r"(?P