Add files via upload

This commit is contained in:
Rainboooom
2023-06-15 11:27:03 +08:00
committed by GitHub
parent 7e4691248d
commit c1cf04e8f5
100 changed files with 8640 additions and 0 deletions

44
scripts/Dockerfile Normal file
View File

@@ -0,0 +1,44 @@
# This is an example Dockerfile showing how it's possible to install Klipper in Docker.
# IMPORTANT: The docker build must be run from the root of the repo, either copy the
# Dockerfile to the root, or run docker build with "-f", for example:
# docker build . -f scripts/Dockerfile -t klipper
# Note that the host still needs to run Linux to connect the printers serial port to
# the container.
# When running, the serial port of your printer should be connected, including an
# argument such as:
# --device /dev/ttyUSB0:/dev/ttyUSB0
# It's also required that your control program (eg: OctoPrint) be included in the same
# container as Docker does not allow sharing of the virtual serial port outside the
# container.
# The config should be in a file named "printer.cfg" in a directory mounted at:
# /home/klippy/.config/
# For more Dockerfile examples with Klipper (and Octoprint) see:
# https://github.com/sillyfrog/OctoPrint-Klipper-mjpg-Dockerfile
FROM ubuntu:18.04
RUN apt-get update && \
apt-get install -y sudo
# Create user
RUN useradd -ms /bin/bash klippy && adduser klippy dialout
USER klippy
#This fixes issues with the volume command setting wrong permissions
RUN mkdir /home/klippy/.config
VOLUME /home/klippy/.config
### Klipper setup ###
WORKDIR /home/klippy
COPY . klipper/
USER root
RUN echo 'klippy ALL=(ALL:ALL) NOPASSWD: ALL' > /etc/sudoers.d/klippy && \
chown klippy:klippy -R klipper
# This is to allow the install script to run without error
RUN ln -s /bin/true /bin/systemctl
USER klippy
RUN ./klipper/scripts/install-ubuntu-18.04.sh
# Clean up install script workaround
RUN sudo rm -f /bin/systemctl
CMD ["/home/klippy/klippy-env/bin/python", "/home/klippy/klipper/klippy/klippy.py", "/home/klippy/.config/printer.cfg"]

245
scripts/avrsim.py Normal file
View File

@@ -0,0 +1,245 @@
#!/usr/bin/env python3
# Script to interact with simulavr by simulating a serial port.
#
# Copyright (C) 2015-2018 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, optparse, time, os, pty, fcntl, termios, errno
import pysimulavr
SERIALBITS = 10 # 8N1 = 1 start, 8 data, 1 stop
SIMULAVR_FREQ = 10**9
# Class to read serial data from AVR serial transmit pin.
class SerialRxPin(pysimulavr.PySimulationMember, pysimulavr.Pin):
def __init__(self, baud, terminal):
pysimulavr.Pin.__init__(self)
pysimulavr.PySimulationMember.__init__(self)
self.terminal = terminal
self.sc = pysimulavr.SystemClock.Instance()
self.delay = SIMULAVR_FREQ // baud
self.current = 0
self.pos = -1
def SetInState(self, pin):
pysimulavr.Pin.SetInState(self, pin)
self.state = pin.outState
if self.pos < 0 and pin.outState == pin.LOW:
self.pos = 0
self.sc.Add(self)
def DoStep(self, trueHwStep):
ishigh = self.state == self.HIGH
self.current |= ishigh << self.pos
self.pos += 1
if self.pos == 1:
return int(self.delay * 1.5)
if self.pos >= SERIALBITS:
data = bytearray([(self.current >> 1) & 0xff])
self.terminal.write(data)
self.pos = -1
self.current = 0
return -1
return self.delay
# Class to send serial data to AVR serial receive pin.
class SerialTxPin(pysimulavr.PySimulationMember, pysimulavr.Pin):
def __init__(self, baud, terminal):
pysimulavr.Pin.__init__(self)
pysimulavr.PySimulationMember.__init__(self)
self.terminal = terminal
self.SetPin('H')
self.sc = pysimulavr.SystemClock.Instance()
self.delay = SIMULAVR_FREQ // baud
self.current = 0
self.pos = 0
self.queue = bytearray()
self.sc.Add(self)
def DoStep(self, trueHwStep):
if not self.pos:
if not self.queue:
data = self.terminal.read()
if not data:
return self.delay * 100
self.queue.extend(data)
self.current = (self.queue.pop(0) << 1) | 0x200
newstate = 'L'
if self.current & (1 << self.pos):
newstate = 'H'
self.SetPin(newstate)
self.pos += 1
if self.pos >= SERIALBITS:
self.pos = 0
return self.delay
# Support for creating VCD trace files
class Tracing:
def __init__(self, filename, signals):
self.filename = filename
self.signals = signals
if not signals:
self.dman = None
return
self.dman = pysimulavr.DumpManager.Instance()
self.dman.SetSingleDeviceApp()
def show_help(self):
ostr = pysimulavr.ostringstream()
self.dman.save(ostr)
sys.stdout.write(ostr.str())
sys.exit(1)
def load_options(self):
if self.dman is None:
return
if self.signals.strip() == '?':
self.show_help()
sigs = "\n".join(["+ " + s for s in self.signals.split(',')])
self.dman.addDumpVCD(self.filename, sigs, "ns", False, False)
def start(self):
if self.dman is not None:
self.dman.start()
def finish(self):
if self.dman is not None:
self.dman.stopApplication()
# Pace the simulation scaled to real time
class Pacing(pysimulavr.PySimulationMember):
def __init__(self, rate):
pysimulavr.PySimulationMember.__init__(self)
self.sc = pysimulavr.SystemClock.Instance()
self.pacing_rate = 1. / (rate * SIMULAVR_FREQ)
self.next_check_clock = 0
self.rel_time = time.time()
self.best_offset = 0.
self.delay = SIMULAVR_FREQ // 10000
self.sc.Add(self)
def DoStep(self, trueHwStep):
curtime = time.time()
clock = self.sc.GetCurrentTime()
offset = clock * self.pacing_rate - (curtime - self.rel_time)
self.best_offset = max(self.best_offset, offset)
if offset > 0.000050:
time.sleep(offset - 0.000040)
if clock >= self.next_check_clock:
self.rel_time -= min(self.best_offset, 0.)
self.next_check_clock = clock + self.delay * 500
self.best_offset = -999999999.
return self.delay
# Forward data from a terminal device to the serial port pins
class TerminalIO:
def __init__(self):
self.fd = -1
def run(self, fd):
self.fd = fd
def write(self, data):
os.write(self.fd, data)
def read(self):
try:
return os.read(self.fd, 64)
except os.error as e:
if e.errno not in (errno.EAGAIN, errno.EWOULDBLOCK):
pysimulavr.SystemClock.Instance().stop()
return ""
# Support for creating a pseudo-tty for emulating a serial port
def create_pty(ptyname):
mfd, sfd = pty.openpty()
try:
os.unlink(ptyname)
except os.error:
pass
os.symlink(os.ttyname(sfd), ptyname)
fcntl.fcntl(mfd, fcntl.F_SETFL
, fcntl.fcntl(mfd, fcntl.F_GETFL) | os.O_NONBLOCK)
tcattr = termios.tcgetattr(mfd)
tcattr[0] &= ~(
termios.IGNBRK | termios.BRKINT | termios.PARMRK | termios.ISTRIP |
termios.INLCR | termios.IGNCR | termios.ICRNL | termios.IXON)
tcattr[1] &= ~termios.OPOST
tcattr[3] &= ~(
termios.ECHO | termios.ECHONL | termios.ICANON | termios.ISIG |
termios.IEXTEN)
tcattr[2] &= ~(termios.CSIZE | termios.PARENB)
tcattr[2] |= termios.CS8
tcattr[6][termios.VMIN] = 0
tcattr[6][termios.VTIME] = 0
termios.tcsetattr(mfd, termios.TCSAFLUSH, tcattr)
return mfd
def main():
usage = "%prog [options] <program.elf>"
opts = optparse.OptionParser(usage)
opts.add_option("-m", "--machine", type="string", dest="machine",
default="atmega644", help="type of AVR machine to simulate")
opts.add_option("-s", "--speed", type="int", dest="speed", default=16000000,
help="machine speed")
opts.add_option("-r", "--rate", type="float", dest="pacing_rate",
default=0., help="real-time pacing rate")
opts.add_option("-b", "--baud", type="int", dest="baud", default=250000,
help="baud rate of the emulated serial port")
opts.add_option("-t", "--trace", type="string", dest="trace",
help="signals to trace (? for help)")
opts.add_option("-p", "--port", type="string", dest="port",
default="/tmp/pseudoserial",
help="pseudo-tty device to create for serial port")
deffile = os.path.splitext(os.path.basename(sys.argv[0]))[0] + ".vcd"
opts.add_option("-f", "--tracefile", type="string", dest="tracefile",
default=deffile, help="filename to write signal trace to")
options, args = opts.parse_args()
if len(args) != 1:
opts.error("Incorrect number of arguments")
elffile = args[0]
proc = options.machine
ptyname = options.port
speed = options.speed
baud = options.baud
# launch simulator
sc = pysimulavr.SystemClock.Instance()
trace = Tracing(options.tracefile, options.trace)
dev = pysimulavr.AvrFactory.instance().makeDevice(proc)
dev.Load(elffile)
dev.SetClockFreq(SIMULAVR_FREQ // speed)
sc.Add(dev)
pysimulavr.cvar.sysConHandler.SetUseExit(False)
trace.load_options()
# Do optional real-time pacing
if options.pacing_rate:
pacing = Pacing(options.pacing_rate)
# Setup terminal
io = TerminalIO()
# Setup rx pin
rxpin = SerialRxPin(baud, io)
net = pysimulavr.Net()
net.Add(rxpin)
net.Add(dev.GetPin("D1"))
# Setup tx pin
txpin = SerialTxPin(baud, io)
net2 = pysimulavr.Net()
net2.Add(dev.GetPin("D0"))
net2.Add(txpin)
# Display start banner
msg = "Starting AVR simulation: machine=%s speed=%d\n" % (proc, speed)
msg += "Serial: port=%s baud=%d\n" % (ptyname, baud)
if options.trace:
msg += "Trace file: %s\n" % (options.tracefile,)
sys.stdout.write(msg)
sys.stdout.flush()
# Create terminal device
fd = create_pty(ptyname)
# Run loop
try:
io.run(fd)
trace.start()
sc.RunTimeRange(0x7fff0000ffff0000)
trace.finish()
finally:
os.unlink(ptyname)
if __name__ == '__main__':
main()

618
scripts/buildcommands.py Normal file
View File

@@ -0,0 +1,618 @@
#!/usr/bin/env python2
# Script to handle build time requests embedded in C code.
#
# Copyright (C) 2016-2021 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, os, subprocess, optparse, logging, shlex, socket, time, traceback
import json, zlib
sys.path.append('./klippy')
import msgproto
FILEHEADER = """
/* DO NOT EDIT! This is an autogenerated file. See scripts/buildcommands.py. */
#include "board/irq.h"
#include "board/pgm.h"
#include "command.h"
#include "compiler.h"
#include "initial_pins.h"
"""
def error(msg):
sys.stderr.write(msg + "\n")
sys.exit(-1)
Handlers = []
######################################################################
# C call list generation
######################################################################
# Create dynamic C functions that call a list of other C functions
class HandleCallList:
def __init__(self):
self.call_lists = {'ctr_run_initfuncs': []}
self.ctr_dispatch = { '_DECL_CALLLIST': self.decl_calllist }
def decl_calllist(self, req):
funcname, callname = req.split()[1:]
self.call_lists.setdefault(funcname, []).append(callname)
def update_data_dictionary(self, data):
pass
def generate_code(self, options):
code = []
for funcname, funcs in self.call_lists.items():
func_code = [' extern void %s(void);\n %s();' % (f, f)
for f in funcs]
if funcname == 'ctr_run_taskfuncs':
add_poll = ' irq_poll();\n'
func_code = [add_poll + fc for fc in func_code]
func_code.append(add_poll)
fmt = """
void
%s(void)
{
%s
}
"""
code.append(fmt % (funcname, "\n".join(func_code).strip()))
return "".join(code)
Handlers.append(HandleCallList())
######################################################################
# Enumeration and static string generation
######################################################################
STATIC_STRING_MIN = 2
# Generate a dynamic string to integer mapping
class HandleEnumerations:
def __init__(self):
self.static_strings = []
self.enumerations = {}
self.ctr_dispatch = {
'_DECL_STATIC_STR': self.decl_static_str,
'DECL_ENUMERATION': self.decl_enumeration,
'DECL_ENUMERATION_RANGE': self.decl_enumeration_range
}
def add_enumeration(self, enum, name, value):
enums = self.enumerations.setdefault(enum, {})
if name in enums and enums[name] != value:
error("Conflicting definition for enumeration '%s %s'" % (
enum, name))
enums[name] = value
def decl_enumeration(self, req):
enum, name, value = req.split()[1:]
self.add_enumeration(enum, name, int(value, 0))
def decl_enumeration_range(self, req):
enum, name, value, count = req.split()[1:]
self.add_enumeration(enum, name, (int(value, 0), int(count, 0)))
def decl_static_str(self, req):
msg = req.split(None, 1)[1]
if msg not in self.static_strings:
self.static_strings.append(msg)
def update_data_dictionary(self, data):
for i, s in enumerate(self.static_strings):
self.add_enumeration("static_string_id", s, i + STATIC_STRING_MIN)
data['enumerations'] = self.enumerations
def generate_code(self, options):
code = []
for i, s in enumerate(self.static_strings):
code.append(' if (__builtin_strcmp(str, "%s") == 0)\n'
' return %d;\n' % (s, i + STATIC_STRING_MIN))
fmt = """
uint8_t __always_inline
ctr_lookup_static_string(const char *str)
{
%s
return 0xff;
}
"""
return fmt % ("".join(code).strip(),)
HandlerEnumerations = HandleEnumerations()
Handlers.append(HandlerEnumerations)
######################################################################
# Constants
######################################################################
# Allow adding build time constants to the data dictionary
class HandleConstants:
def __init__(self):
self.constants = {}
self.ctr_dispatch = {
'DECL_CONSTANT': self.decl_constant,
'DECL_CONSTANT_STR': self.decl_constant_str,
}
def set_value(self, name, value):
if name in self.constants and self.constants[name] != value:
error("Conflicting definition for constant '%s'" % name)
self.constants[name] = value
def decl_constant(self, req):
name, value = req.split()[1:]
self.set_value(name, int(value, 0))
def decl_constant_str(self, req):
name, value = req.split(None, 2)[1:]
value = value.strip()
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
self.set_value(name, value)
def update_data_dictionary(self, data):
data['config'] = self.constants
def generate_code(self, options):
return ""
HandlerConstants = HandleConstants()
Handlers.append(HandlerConstants)
######################################################################
# Initial pins
######################################################################
class HandleInitialPins:
def __init__(self):
self.initial_pins = []
self.ctr_dispatch = { 'DECL_INITIAL_PINS': self.decl_initial_pins }
def decl_initial_pins(self, req):
pins = req.split(None, 1)[1].strip()
if pins.startswith('"') and pins.endswith('"'):
pins = pins[1:-1]
if pins:
self.initial_pins = [p.strip() for p in pins.split(',')]
HandlerConstants.decl_constant_str(
"_DECL_CONSTANT_STR INITIAL_PINS "
+ ','.join(self.initial_pins))
def update_data_dictionary(self, data):
pass
def map_pins(self):
if not self.initial_pins:
return []
mp = msgproto.MessageParser()
mp.fill_enumerations(HandlerEnumerations.enumerations)
pinmap = mp.get_enumerations().get('pin', {})
out = []
for p in self.initial_pins:
flag = "IP_OUT_HIGH"
if p.startswith('!'):
flag = "0"
p = p[1:].strip()
if p not in pinmap:
error("Unknown initial pin '%s'" % (p,))
out.append("\n {%d, %s}, // %s" % (pinmap[p], flag, p))
return out
def generate_code(self, options):
out = self.map_pins()
fmt = """
const struct initial_pin_s initial_pins[] PROGMEM = {%s
};
const int initial_pins_size PROGMEM = ARRAY_SIZE(initial_pins);
"""
return fmt % (''.join(out),)
Handlers.append(HandleInitialPins())
######################################################################
# ARM IRQ vector table generation
######################################################################
# Create ARM IRQ vector table from interrupt handler declarations
class Handle_arm_irq:
def __init__(self):
self.irqs = {}
self.ctr_dispatch = { 'DECL_ARMCM_IRQ': self.decl_armcm_irq }
def decl_armcm_irq(self, req):
func, num = req.split()[1:]
num = int(num, 0)
if num in self.irqs and self.irqs[num] != func:
error("Conflicting IRQ definition %d (old %s new %s)"
% (num, self.irqs[num], func))
self.irqs[num] = func
def update_data_dictionary(self, data):
pass
def generate_code(self, options):
armcm_offset = 16
if 1 - armcm_offset not in self.irqs:
# The ResetHandler was not defined - don't build VectorTable
return ""
max_irq = max(self.irqs.keys())
table = [" DefaultHandler,\n"] * (max_irq + armcm_offset + 1)
defs = []
for num, func in self.irqs.items():
if num < 1 - armcm_offset:
error("Invalid IRQ %d (%s)" % (num, func))
defs.append("extern void %s(void);\n" % (func,))
table[num + armcm_offset] = " %s,\n" % (func,)
table[0] = " &_stack_end,\n"
fmt = """
extern void DefaultHandler(void);
extern uint32_t _stack_end;
%s
const void *VectorTable[] __visible __section(".vector_table") = {
%s};
"""
return fmt % (''.join(defs), ''.join(table))
Handlers.append(Handle_arm_irq())
######################################################################
# Wire protocol commands and responses
######################################################################
# Dynamic command and response registration
class HandleCommandGeneration:
def __init__(self):
self.commands = {}
self.encoders = []
self.msg_to_id = dict(msgproto.DefaultMessages)
self.messages_by_name = { m.split()[0]: m for m in self.msg_to_id }
self.all_param_types = {}
self.ctr_dispatch = {
'DECL_COMMAND_FLAGS': self.decl_command,
'_DECL_ENCODER': self.decl_encoder,
'_DECL_OUTPUT': self.decl_output
}
def decl_command(self, req):
funcname, flags, msgname = req.split()[1:4]
if msgname in self.commands:
error("Multiple definitions for command '%s'" % msgname)
self.commands[msgname] = (funcname, flags, msgname)
msg = req.split(None, 3)[3]
m = self.messages_by_name.get(msgname)
if m is not None and m != msg:
error("Conflicting definition for command '%s'" % msgname)
self.messages_by_name[msgname] = msg
def decl_encoder(self, req):
msg = req.split(None, 1)[1]
msgname = msg.split()[0]
m = self.messages_by_name.get(msgname)
if m is not None and m != msg:
error("Conflicting definition for message '%s'" % msgname)
self.messages_by_name[msgname] = msg
self.encoders.append((msgname, msg))
def decl_output(self, req):
msg = req.split(None, 1)[1]
self.encoders.append((None, msg))
def create_message_ids(self):
# Create unique ids for each message type
msgid = max(self.msg_to_id.values())
mlist = list(self.commands.keys()) + [m for n, m in self.encoders]
for msgname in mlist:
msg = self.messages_by_name.get(msgname, msgname)
if msg not in self.msg_to_id:
msgid += 1
self.msg_to_id[msg] = msgid
if msgid >= 128:
# The mcu currently assumes all message ids encode to one byte
error("Too many message ids")
def update_data_dictionary(self, data):
# Handle message ids over 96 (they are decoded as negative numbers)
msg_to_tag = {msg: msgid if msgid < 96 else msgid - 128
for msg, msgid in self.msg_to_id.items()}
command_tags = [msg_to_tag[msg]
for msgname, msg in self.messages_by_name.items()
if msgname in self.commands]
response_tags = [msg_to_tag[msg]
for msgname, msg in self.messages_by_name.items()
if msgname not in self.commands]
data['commands'] = { msg: msgtag for msg, msgtag in msg_to_tag.items()
if msgtag in command_tags }
data['responses'] = { msg: msgtag for msg, msgtag in msg_to_tag.items()
if msgtag in response_tags }
output = {msg: msgtag for msg, msgtag in msg_to_tag.items()
if msgtag not in command_tags and msgtag not in response_tags}
if output:
data['output'] = output
def build_parser(self, msgid, msgformat, msgtype):
if msgtype == "output":
param_types = msgproto.lookup_output_params(msgformat)
comment = "Output: " + msgformat
else:
param_types = [t for name, t in msgproto.lookup_params(msgformat)]
comment = msgformat
params = '0'
types = tuple([t.__class__.__name__ for t in param_types])
if types:
paramid = self.all_param_types.get(types)
if paramid is None:
paramid = len(self.all_param_types)
self.all_param_types[types] = paramid
params = 'command_parameters%d' % (paramid,)
out = """
// %s
.msg_id=%d,
.num_params=%d,
.param_types = %s,
""" % (comment, msgid, len(types), params)
if msgtype == 'response':
num_args = (len(types) + types.count('PT_progmem_buffer')
+ types.count('PT_buffer'))
out += " .num_args=%d," % (num_args,)
else:
max_size = min(msgproto.MESSAGE_MAX,
(msgproto.MESSAGE_MIN + 1
+ sum([t.max_length for t in param_types])))
out += " .max_size=%d," % (max_size,)
return out
def generate_responses_code(self):
encoder_defs = []
output_code = []
encoder_code = []
did_output = {}
for msgname, msg in self.encoders:
msgid = self.msg_to_id[msg]
if msgid in did_output:
continue
did_output[msgid] = True
code = (' if (__builtin_strcmp(str, "%s") == 0)\n'
' return &command_encoder_%s;\n' % (msg, msgid))
if msgname is None:
parsercode = self.build_parser(msgid, msg, 'output')
output_code.append(code)
else:
parsercode = self.build_parser(msgid, msg, 'command')
encoder_code.append(code)
encoder_defs.append(
"const struct command_encoder command_encoder_%s PROGMEM = {"
" %s\n};\n" % (
msgid, parsercode))
fmt = """
%s
const __always_inline struct command_encoder *
ctr_lookup_encoder(const char *str)
{
%s
return NULL;
}
const __always_inline struct command_encoder *
ctr_lookup_output(const char *str)
{
%s
return NULL;
}
"""
return fmt % ("".join(encoder_defs).strip(),
"".join(encoder_code).strip(),
"".join(output_code).strip())
def generate_commands_code(self):
cmd_by_id = {
self.msg_to_id[self.messages_by_name.get(msgname, msgname)]: cmd
for msgname, cmd in self.commands.items()
}
max_cmd_msgid = max(cmd_by_id.keys())
index = []
externs = {}
for msgid in range(max_cmd_msgid+1):
if msgid not in cmd_by_id:
index.append(" {\n},")
continue
funcname, flags, msgname = cmd_by_id[msgid]
msg = self.messages_by_name[msgname]
externs[funcname] = 1
parsercode = self.build_parser(msgid, msg, 'response')
index.append(" {%s\n .flags=%s,\n .func=%s\n}," % (
parsercode, flags, funcname))
index = "".join(index).strip()
externs = "\n".join(["extern void "+funcname+"(uint32_t*);"
for funcname in sorted(externs)])
fmt = """
%s
const struct command_parser command_index[] PROGMEM = {
%s
};
const uint8_t command_index_size PROGMEM = ARRAY_SIZE(command_index);
"""
return fmt % (externs, index)
def generate_param_code(self):
sorted_param_types = sorted(
[(i, a) for a, i in self.all_param_types.items()])
params = ['']
for paramid, argtypes in sorted_param_types:
params.append(
'static const uint8_t command_parameters%d[] PROGMEM = {\n'
' %s };' % (
paramid, ', '.join(argtypes),))
params.append('')
return "\n".join(params)
def generate_code(self, options):
self.create_message_ids()
parsercode = self.generate_responses_code()
cmdcode = self.generate_commands_code()
paramcode = self.generate_param_code()
return paramcode + parsercode + cmdcode
Handlers.append(HandleCommandGeneration())
######################################################################
# Version generation
######################################################################
# Run program and return the specified output
def check_output(prog):
logging.debug("Running %s" % (repr(prog),))
try:
process = subprocess.Popen(shlex.split(prog), stdout=subprocess.PIPE)
output = process.communicate()[0]
retcode = process.poll()
except OSError:
logging.debug("Exception on run: %s" % (traceback.format_exc(),))
return ""
logging.debug("Got (code=%s): %s" % (retcode, repr(output)))
if retcode:
return ""
try:
return str(output.decode('utf8'))
except UnicodeError:
logging.debug("Exception on decode: %s" % (traceback.format_exc(),))
return ""
# Obtain version info from "git" program
def git_version():
if not os.path.exists('.git'):
logging.debug("No '.git' file/directory found")
return ""
ver = check_output("git describe --always --tags --long --dirty").strip()
logging.debug("Got git version: %s" % (repr(ver),))
return ver
def build_version(extra, cleanbuild):
version = git_version()
if not version:
cleanbuild = False
version = "?"
elif 'dirty' in version:
cleanbuild = False
if not cleanbuild:
btime = time.strftime("%Y%m%d_%H%M%S")
hostname = socket.gethostname()
version = "%s-%s-%s" % (version, btime, hostname)
return version + extra
# Run "tool --version" for each specified tool and extract versions
def tool_versions(tools):
tools = [t.strip() for t in tools.split(';')]
versions = ['', '']
success = 0
for tool in tools:
# Extract first line from "tool --version" output
verstr = check_output("%s --version" % (tool,)).split('\n')[0]
# Check if this tool looks like a binutils program
isbinutils = 0
if verstr.startswith('GNU '):
isbinutils = 1
verstr = verstr[4:]
# Extract version information and exclude program name
if ' ' not in verstr:
continue
prog, ver = verstr.split(' ', 1)
if not prog or not ver:
continue
# Check for any version conflicts
if versions[isbinutils] and versions[isbinutils] != ver:
logging.debug("Mixed version %s vs %s" % (
repr(versions[isbinutils]), repr(ver)))
versions[isbinutils] = "mixed"
continue
versions[isbinutils] = ver
success += 1
cleanbuild = versions[0] and versions[1] and success == len(tools)
return cleanbuild, "gcc: %s binutils: %s" % (versions[0], versions[1])
# Add version information to the data dictionary
class HandleVersions:
def __init__(self):
self.ctr_dispatch = {}
self.toolstr = self.version = ""
def update_data_dictionary(self, data):
data['version'] = self.version
data['build_versions'] = self.toolstr
def generate_code(self, options):
cleanbuild, self.toolstr = tool_versions(options.tools)
self.version = build_version(options.extra, cleanbuild)
sys.stdout.write("Version: %s\n" % (self.version,))
return "\n// version: %s\n// build_versions: %s\n" % (
self.version, self.toolstr)
Handlers.append(HandleVersions())
######################################################################
# Identify data dictionary generation
######################################################################
# Automatically generate the wire protocol data dictionary
class HandleIdentify:
def __init__(self):
self.ctr_dispatch = {}
def update_data_dictionary(self, data):
pass
def generate_code(self, options):
# Generate data dictionary
data = {}
for h in Handlers:
h.update_data_dictionary(data)
datadict = json.dumps(data, separators=(',', ':'), sort_keys=True)
# Write data dictionary
if options.write_dictionary:
f = open(options.write_dictionary, 'w')
f.write(datadict)
f.close()
# Format compressed info into C code
zdatadict = bytearray(zlib.compress(datadict.encode(), 9))
out = []
for i in range(len(zdatadict)):
if i % 8 == 0:
out.append('\n ')
out.append(" 0x%02x," % (zdatadict[i],))
fmt = """
const uint8_t command_identify_data[] PROGMEM = {%s
};
// Identify size = %d (%d uncompressed)
const uint32_t command_identify_size PROGMEM
= ARRAY_SIZE(command_identify_data);
"""
return fmt % (''.join(out), len(zdatadict), len(datadict))
Handlers.append(HandleIdentify())
######################################################################
# Main code
######################################################################
def main():
usage = "%prog [options] <cmd section file> <output.c>"
opts = optparse.OptionParser(usage)
opts.add_option("-e", "--extra", dest="extra", default="",
help="extra version string to append to version")
opts.add_option("-d", dest="write_dictionary",
help="file to write mcu protocol dictionary")
opts.add_option("-t", "--tools", dest="tools", default="",
help="list of build programs to extract version from")
opts.add_option("-v", action="store_true", dest="verbose",
help="enable debug messages")
options, args = opts.parse_args()
if len(args) != 2:
opts.error("Incorrect arguments")
incmdfile, outcfile = args
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
# Parse request file
ctr_dispatch = { k: v for h in Handlers for k, v in h.ctr_dispatch.items() }
f = open(incmdfile, 'r')
data = f.read()
f.close()
for req in data.split('\n'):
req = req.lstrip()
if not req:
continue
cmd = req.split()[0]
if cmd not in ctr_dispatch:
error("Unknown build time command '%s'" % cmd)
ctr_dispatch[cmd](req)
# Write output
code = "".join([FILEHEADER] + [h.generate_code(options) for h in Handlers])
f = open(outcfile, 'w')
f.write(code)
f.close()
if __name__ == '__main__':
main()

174
scripts/calibrate_shaper.py Normal file
View File

@@ -0,0 +1,174 @@
#!/usr/bin/env python3
# Shaper auto-calibration script
#
# Copyright (C) 2020 Dmitry Butyugin <dmbutyugin@google.com>
# Copyright (C) 2020 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
from __future__ import print_function
import importlib, optparse, os, sys
from textwrap import wrap
import numpy as np, matplotlib
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', 'klippy'))
shaper_calibrate = importlib.import_module('.shaper_calibrate', 'extras')
MAX_TITLE_LENGTH=65
def parse_log(logname):
with open(logname) as f:
for header in f:
if not header.startswith('#'):
break
if not header.startswith('freq,psd_x,psd_y,psd_z,psd_xyz'):
# Raw accelerometer data
return np.loadtxt(logname, comments='#', delimiter=',')
# Parse power spectral density data
data = np.loadtxt(logname, skiprows=1, comments='#', delimiter=',')
calibration_data = shaper_calibrate.CalibrationData(
freq_bins=data[:,0], psd_sum=data[:,4],
psd_x=data[:,1], psd_y=data[:,2], psd_z=data[:,3])
calibration_data.set_numpy(np)
# If input shapers are present in the CSV file, the frequency
# response is already normalized to input frequencies
if 'mzv' not in header:
calibration_data.normalize_to_frequencies()
return calibration_data
######################################################################
# Shaper calibration
######################################################################
# Find the best shaper parameters
def calibrate_shaper(datas, csv_output, max_smoothing):
helper = shaper_calibrate.ShaperCalibrate(printer=None)
if isinstance(datas[0], shaper_calibrate.CalibrationData):
calibration_data = datas[0]
for data in datas[1:]:
calibration_data.add_data(data)
else:
# Process accelerometer data
calibration_data = helper.process_accelerometer_data(datas[0])
for data in datas[1:]:
calibration_data.add_data(helper.process_accelerometer_data(data))
calibration_data.normalize_to_frequencies()
shaper, all_shapers = helper.find_best_shaper(
calibration_data, max_smoothing, print)
print("Recommended shaper is %s @ %.1f Hz" % (shaper.name, shaper.freq))
if csv_output is not None:
helper.save_calibration_data(
csv_output, calibration_data, all_shapers)
return shaper.name, all_shapers, calibration_data
######################################################################
# Plot frequency response and suggested input shapers
######################################################################
def plot_freq_response(lognames, calibration_data, shapers,
selected_shaper, max_freq):
freqs = calibration_data.freq_bins
psd = calibration_data.psd_sum[freqs <= max_freq]
px = calibration_data.psd_x[freqs <= max_freq]
py = calibration_data.psd_y[freqs <= max_freq]
pz = calibration_data.psd_z[freqs <= max_freq]
freqs = freqs[freqs <= max_freq]
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
fig, ax = matplotlib.pyplot.subplots()
ax.set_xlabel('Frequency, Hz')
ax.set_xlim([0, max_freq])
ax.set_ylabel('Power spectral density')
ax.plot(freqs, psd, label='X+Y+Z', color='purple')
ax.plot(freqs, px, label='X', color='red')
ax.plot(freqs, py, label='Y', color='green')
ax.plot(freqs, pz, label='Z', color='blue')
title = "Frequency response and shapers (%s)" % (', '.join(lognames))
ax.set_title("\n".join(wrap(title, MAX_TITLE_LENGTH)))
ax.xaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(5))
ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax.ticklabel_format(axis='y', style='scientific', scilimits=(0,0))
ax.grid(which='major', color='grey')
ax.grid(which='minor', color='lightgrey')
ax2 = ax.twinx()
ax2.set_ylabel('Shaper vibration reduction (ratio)')
best_shaper_vals = None
for shaper in shapers:
label = "%s (%.1f Hz, vibr=%.1f%%, sm~=%.2f, accel<=%.f)" % (
shaper.name.upper(), shaper.freq,
shaper.vibrs * 100., shaper.smoothing,
round(shaper.max_accel / 100.) * 100.)
linestyle = 'dotted'
if shaper.name == selected_shaper:
linestyle = 'dashdot'
best_shaper_vals = shaper.vals
ax2.plot(freqs, shaper.vals, label=label, linestyle=linestyle)
ax.plot(freqs, psd * best_shaper_vals,
label='After\nshaper', color='cyan')
# A hack to add a human-readable shaper recommendation to legend
ax2.plot([], [], ' ',
label="Recommended shaper: %s" % (selected_shaper.upper()))
ax.legend(loc='upper left', prop=fontP)
ax2.legend(loc='upper right', prop=fontP)
fig.tight_layout()
return fig
######################################################################
# Startup
######################################################################
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
matplotlib.rcParams.update({'figure.autolayout': True})
matplotlib.use('Agg')
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
def main():
# Parse command-line arguments
usage = "%prog [options] <logs>"
opts = optparse.OptionParser(usage)
opts.add_option("-o", "--output", type="string", dest="output",
default=None, help="filename of output graph")
opts.add_option("-c", "--csv", type="string", dest="csv",
default=None, help="filename of output csv file")
opts.add_option("-f", "--max_freq", type="float", default=200.,
help="maximum frequency to graph")
opts.add_option("-s", "--max_smoothing", type="float", default=None,
help="maximum shaper smoothing to allow")
options, args = opts.parse_args()
if len(args) < 1:
opts.error("Incorrect number of arguments")
if options.max_smoothing is not None and options.max_smoothing < 0.05:
opts.error("Too small max_smoothing specified (must be at least 0.05)")
# Parse data
datas = [parse_log(fn) for fn in args]
# Calibrate shaper and generate outputs
selected_shaper, shapers, calibration_data = calibrate_shaper(
datas, options.csv, options.max_smoothing)
if not options.csv or options.output:
# Draw graph
setup_matplotlib(options.output is not None)
fig = plot_freq_response(args, calibration_data, shapers,
selected_shaper, options.max_freq)
# Show graph
if options.output is None:
matplotlib.pyplot.show()
else:
fig.set_size_inches(8, 6)
fig.savefig(options.output)
if __name__ == '__main__':
main()

64
scripts/canbus_query.py Normal file
View File

@@ -0,0 +1,64 @@
#!/usr/bin/env python2
# Tool to query CAN bus uuids
#
# Copyright (C) 2021 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, os, optparse, time
import can
CANBUS_ID_ADMIN = 0x3f0
CMD_QUERY_UNASSIGNED = 0x00
RESP_NEED_NODEID = 0x20
CMD_SET_KLIPPER_NODEID = 0x01
CMD_SET_CANBOOT_NODEID = 0x11
def query_unassigned(canbus_iface):
# Open CAN socket
filters = [{"can_id": CANBUS_ID_ADMIN + 1, "can_mask": 0x7ff,
"extended": False}]
bus = can.interface.Bus(channel=canbus_iface, can_filters=filters,
bustype='socketcan')
# Send query
msg = can.Message(arbitration_id=CANBUS_ID_ADMIN,
data=[CMD_QUERY_UNASSIGNED], is_extended_id=False)
bus.send(msg)
# Read responses
found_ids = {}
start_time = curtime = time.time()
while 1:
tdiff = start_time + 2. - curtime
if tdiff <= 0.:
break
msg = bus.recv(tdiff)
curtime = time.time()
if (msg is None or msg.arbitration_id != CANBUS_ID_ADMIN + 1
or msg.dlc < 7 or msg.data[0] != RESP_NEED_NODEID):
continue
uuid = sum([v << ((5-i)*8) for i, v in enumerate(msg.data[1:7])])
if uuid in found_ids:
continue
found_ids[uuid] = 1
AppNames = {
CMD_SET_KLIPPER_NODEID: "Klipper",
CMD_SET_CANBOOT_NODEID: "CanBoot"
}
app_id = CMD_SET_KLIPPER_NODEID
if msg.dlc > 7:
app_id = msg.data[7]
app_name = AppNames.get(app_id, "Unknown")
sys.stdout.write("Found canbus_uuid=%012x, Application: %s\n"
% (uuid, app_name))
sys.stdout.write("Total %d uuids found\n" % (len(found_ids,)))
def main():
usage = "%prog [options] <can interface>"
opts = optparse.OptionParser(usage)
options, args = opts.parse_args()
if len(args) != 1:
opts.error("Incorrect number of arguments")
canbus_iface = args[0]
query_unassigned(canbus_iface)
if __name__ == '__main__':
main()

18
scripts/check-gcc.sh Normal file
View File

@@ -0,0 +1,18 @@
#!/bin/sh
# This script checks for a broken Ubuntu 18.04 arm-none-eabi-gcc compile
f1="$1"
f2="$2"
s1=`readelf -A "$f1" | grep "Tag_ARM_ISA_use"`
s2=`readelf -A "$f2" | grep "Tag_ARM_ISA_use"`
if [ "$s1" != "$s2" ]; then
echo ""
echo "ERROR: The compiler failed to correctly compile Klipper"
echo "It will be necessary to upgrade the compiler"
echo "See: https://bugs.launchpad.net/ubuntu/+source/newlib/+bug/1767223"
echo ""
rm -f "$f1"
exit 99
fi

View File

@@ -0,0 +1,70 @@
#!/usr/bin/env python3
# Check files for whitespace problems
#
# Copyright (C) 2018 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, os.path, unicodedata
HaveError = False
def report_error(filename, lineno, msg):
global HaveError
if not HaveError:
sys.stderr.write("\n\nERROR:\nERROR: White space errors\nERROR:\n")
HaveError = True
sys.stderr.write("%s:%d: %s\n" % (filename, lineno + 1, msg))
def check_file(filename):
# Open and read file
try:
f = open(filename, 'rb')
data = f.read()
f.close()
except IOError:
return
if not data:
# Empty files are okay
return
# Do checks
is_source_code = any([filename.endswith(s) for s in ['.c', '.h', '.py']])
lineno = 0
for lineno, line in enumerate(data.split(b'\n')):
# Verify line is valid utf-8
try:
line = line.decode('utf-8')
except UnicodeDecodeError:
report_error(filename, lineno, "Found non utf-8 character")
continue
# Check for control characters
for c in line:
if unicodedata.category(c).startswith('C'):
char_name = repr(c)
if c == '\t':
if os.path.basename(filename).lower() == 'makefile':
continue
char_name = 'tab'
report_error(filename, lineno, "Invalid %s character" % (
char_name,))
break
# Check for trailing space
if line.endswith(' ') or line.endswith('\t'):
report_error(filename, lineno, "Line has trailing spaces")
# Check for more than 80 characters
if is_source_code and len(line) > 80:
report_error(filename, lineno, "Line longer than 80 characters")
if not data.endswith(b'\n'):
report_error(filename, lineno, "No newline at end of file")
if data.endswith(b'\n\n'):
report_error(filename, lineno, "Extra newlines at end of file")
def main():
files = sys.argv[1:]
for filename in files:
check_file(filename)
if HaveError:
sys.stderr.write("\n\n")
sys.exit(-1)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,18 @@
#!/bin/bash
# Script to check whitespace in Klipper source code.
# Find SRCDIR from the pathname of this script
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
cd ${SRCDIR}
# Run whitespace tool on all source files
WS_DIRS="config/ docs/ klippy/ scripts/ src/ test/"
WS_EXCLUDE="-path scripts/kconfig -prune"
WS_FILES="-o -iname '*.[csh]' -o -name '*.py' -o -name '*.sh'"
WS_FILES="$WS_FILES -o -name '*.md' -o -name '*.cfg' -o -name '*.txt'"
WS_FILES="$WS_FILES -o -name '*.html' -o -name '*.css'"
WS_FILES="$WS_FILES -o -name '*.yaml' -o -name '*.yml'"
WS_FILES="$WS_FILES -o -name '*.css' -o -name '*.yaml' -o -name '*.yml'"
WS_FILES="$WS_FILES -o -name '*.test' -o -name '*.config'"
WS_FILES="$WS_FILES -o -iname '*.lds' -o -iname 'Makefile' -o -iname 'Kconfig'"
eval find $WS_DIRS $WS_EXCLUDE $WS_FILES | xargs ./scripts/check_whitespace.py

243
scripts/checkstack.py Normal file
View File

@@ -0,0 +1,243 @@
#!/usr/bin/env python2
# Script that tries to find how much stack space each function in an
# object is using.
#
# Copyright (C) 2015 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
# Usage:
# avr-objdump -d out/klipper.elf | scripts/checkstack.py
import sys
import re
# Functions that change stacks
STACKHOP = []
# List of functions we can assume are never called.
IGNORE = []
OUTPUTDESC = """
#funcname1[preamble_stack_usage,max_usage_with_callers]:
# insn_addr:called_function [usage_at_call_point+caller_preamble,total_usage]
#
#funcname2[p,m,max_usage_to_yield_point]:
# insn_addr:called_function [u+c,t,usage_to_yield_point]
"""
class function:
def __init__(self, funcaddr, funcname):
self.funcaddr = funcaddr
self.funcname = funcname
self.basic_stack_usage = 0
self.max_stack_usage = None
self.yield_usage = -1
self.max_yield_usage = None
self.total_calls = 0
# called_funcs = [(insnaddr, calladdr, stackusage), ...]
self.called_funcs = []
self.subfuncs = {}
# Update function info with a found "yield" point.
def noteYield(self, stackusage):
if self.yield_usage < stackusage:
self.yield_usage = stackusage
# Update function info with a found "call" point.
def noteCall(self, insnaddr, calladdr, stackusage):
if (calladdr, stackusage) in self.subfuncs:
# Already noted a nearly identical call - ignore this one.
return
self.called_funcs.append((insnaddr, calladdr, stackusage))
self.subfuncs[(calladdr, stackusage)] = 1
# Find out maximum stack usage for a function
def calcmaxstack(info, funcs):
if info.max_stack_usage is not None:
return
info.max_stack_usage = max_stack_usage = info.basic_stack_usage
info.max_yield_usage = max_yield_usage = info.yield_usage
total_calls = 0
seenbefore = {}
# Find max of all nested calls.
for insnaddr, calladdr, usage in info.called_funcs:
callinfo = funcs.get(calladdr)
if callinfo is None:
continue
calcmaxstack(callinfo, funcs)
if callinfo.funcname not in seenbefore:
seenbefore[callinfo.funcname] = 1
total_calls += callinfo.total_calls + 1
funcnameroot = callinfo.funcname.split('.')[0]
if funcnameroot in IGNORE:
# This called function is ignored - don't contribute it to
# the max stack.
continue
totusage = usage + callinfo.max_stack_usage
totyieldusage = usage + callinfo.max_yield_usage
if funcnameroot in STACKHOP:
# Don't count children of this function
totusage = totyieldusage = usage
if totusage > max_stack_usage:
max_stack_usage = totusage
if callinfo.max_yield_usage >= 0 and totyieldusage > max_yield_usage:
max_yield_usage = totyieldusage
info.max_stack_usage = max_stack_usage
info.max_yield_usage = max_yield_usage
info.total_calls = total_calls
# Try to arrange output so that functions that call each other are
# near each other.
def orderfuncs(funcaddrs, availfuncs):
l = [(availfuncs[funcaddr].total_calls
, availfuncs[funcaddr].funcname, funcaddr)
for funcaddr in funcaddrs if funcaddr in availfuncs]
l.sort()
l.reverse()
out = []
while l:
count, name, funcaddr = l.pop(0)
info = availfuncs.get(funcaddr)
if info is None:
continue
calladdrs = [calls[1] for calls in info.called_funcs]
del availfuncs[funcaddr]
out = out + orderfuncs(calladdrs, availfuncs) + [info]
return out
hex_s = r'[0-9a-f]+'
re_func = re.compile(r'^(?P<funcaddr>' + hex_s + r') <(?P<func>.*)>:$')
re_asm = re.compile(
r'^[ ]*(?P<insnaddr>' + hex_s
+ r'):\t[^\t]*\t(?P<insn>[^\t]+?)(?P<params>\t[^;]*)?'
+ r'[ ]*(; (?P<calladdr>0x' + hex_s
+ r') <(?P<ref>.*)>)?$')
def main():
unknownfunc = function(None, "<unknown>")
indirectfunc = function(-1, '<indirect>')
unknownfunc.max_stack_usage = indirectfunc.max_stack_usage = 0
unknownfunc.max_yield_usage = indirectfunc.max_yield_usage = -1
funcs = {-1: indirectfunc}
funcaddr = None
datalines = {}
cur = None
atstart = 0
stackusage = 0
# Parse input lines
for line in sys.stdin.readlines():
m = re_func.match(line)
if m is not None:
# Found function
funcaddr = int(m.group('funcaddr'), 16)
funcs[funcaddr] = cur = function(funcaddr, m.group('func'))
stackusage = 0
atstart = 1
continue
m = re_asm.match(line)
if m is None:
datalines.setdefault(funcaddr, []).append(line)
#print("other", repr(line))
continue
insn = m.group('insn')
if insn == 'push':
stackusage += 1
continue
if insn == 'rcall' and m.group('params').strip() == '.+0':
stackusage += 2
continue
if atstart:
if insn in ['in', 'eor']:
continue
cur.basic_stack_usage = stackusage
atstart = 0
insnaddr = m.group('insnaddr')
calladdr = m.group('calladdr')
if calladdr is None:
if insn == 'ijmp':
# Indirect tail call
cur.noteCall(insnaddr, -1, 0)
elif insn == 'icall':
cur.noteCall(insnaddr, -1, stackusage + 2)
else:
# misc instruction
continue
else:
# Jump or call insn
calladdr = int(calladdr, 16)
ref = m.group('ref')
if '+' in ref:
# Inter-function jump.
continue
elif insn.startswith('ld') or insn.startswith('st'):
# memory access
continue
elif insn in ('rjmp', 'jmp', 'brne', 'brcs'):
# Tail call
cur.noteCall(insnaddr, calladdr, 0)
elif insn in ('rcall', 'call'):
cur.noteCall(insnaddr, calladdr, stackusage + 2)
else:
print("unknown call", ref)
cur.noteCall(insnaddr, calladdr, stackusage)
# Reset stack usage to preamble usage
stackusage = cur.basic_stack_usage
# Update for known indirect functions
funcsbyname = {}
for info in funcs.values():
funcnameroot = info.funcname.split('.')[0]
funcsbyname[funcnameroot] = info
cmdfunc = funcsbyname.get('sched_main')
command_index = funcsbyname.get('command_index')
if command_index is not None and cmdfunc is not None:
for line in datalines[command_index.funcaddr]:
parts = line.split()
if len(parts) < 9:
continue
calladdr = int(parts[8]+parts[7], 16) * 2
numparams = int(parts[2], 16)
stackusage = cmdfunc.basic_stack_usage + 2 + numparams * 4
cmdfunc.noteCall(0, calladdr, stackusage)
if len(parts) < 17:
continue
calladdr = int(parts[16]+parts[15], 16) * 2
numparams = int(parts[10], 16)
stackusage = cmdfunc.basic_stack_usage + 2 + numparams * 4
cmdfunc.noteCall(0, calladdr, stackusage)
eventfunc = funcsbyname.get('__vector_13', funcsbyname.get('__vector_17'))
for funcnameroot, info in funcsbyname.items():
if funcnameroot.endswith('_event') and eventfunc is not None:
eventfunc.noteCall(0, info.funcaddr, eventfunc.basic_stack_usage+2)
# Calculate maxstackusage
for info in funcs.values():
calcmaxstack(info, funcs)
# Sort functions for output
funcinfos = orderfuncs(funcs.keys(), funcs.copy())
# Show all functions
print(OUTPUTDESC)
for info in funcinfos:
if info.max_stack_usage == 0 and info.max_yield_usage < 0:
continue
yieldstr = ""
if info.max_yield_usage >= 0:
yieldstr = ",%d" % info.max_yield_usage
print("\n%s[%d,%d%s]:" % (info.funcname, info.basic_stack_usage
, info.max_stack_usage, yieldstr))
for insnaddr, calladdr, stackusage in info.called_funcs:
callinfo = funcs.get(calladdr, unknownfunc)
yieldstr = ""
if callinfo.max_yield_usage >= 0:
yieldstr = ",%d" % (stackusage + callinfo.max_yield_usage)
print(" %04s:%-40s [%d+%d,%d%s]" % (
insnaddr, callinfo.funcname, stackusage
, callinfo.basic_stack_usage
, stackusage+callinfo.max_stack_usage, yieldstr))
if __name__ == '__main__':
main()

81
scripts/ci-build.sh Normal file
View File

@@ -0,0 +1,81 @@
#!/bin/bash
# Test script for continuous integration.
# Stop script early on any error; check variables
set -eu
# Paths to tools installed by ci-install.sh
MAIN_DIR=${PWD}
BUILD_DIR=${PWD}/ci_build
export PATH=${BUILD_DIR}/pru-gcc/bin:${PATH}
PYTHON=${BUILD_DIR}/python-env/bin/python
PYTHON2=${BUILD_DIR}/python2-env/bin/python
######################################################################
# Section grouping output message helpers
######################################################################
start_test()
{
echo "::group::=============== $1 $2"
set -x
}
finish_test()
{
set +x
echo "=============== Finished $2"
echo "::endgroup::"
}
######################################################################
# Check for whitespace errors
######################################################################
start_test check_whitespace "Check whitespace"
./scripts/check_whitespace.sh
finish_test check_whitespace "Check whitespace"
######################################################################
# Run compile tests for several different MCU types
######################################################################
DICTDIR=${BUILD_DIR}/dict
mkdir -p ${DICTDIR}
for TARGET in test/configs/*.config ; do
start_test mcu_compile "$TARGET"
make clean
make distclean
unset CC
cp ${TARGET} .config
make olddefconfig
make V=1
size out/*.elf
finish_test mcu_compile "$TARGET"
cp out/klipper.dict ${DICTDIR}/$(basename ${TARGET} .config).dict
done
######################################################################
# Verify klippy host software
######################################################################
start_test klippy "Test klippy import (Python3)"
$PYTHON klippy/klippy.py --import-test
finish_test klippy "Test klippy import (Python3)"
start_test klippy "Test klippy import (Python2)"
$PYTHON2 klippy/klippy.py --import-test
finish_test klippy "Test klippy import (Python2)"
start_test klippy "Test invoke klippy (Python3)"
$PYTHON scripts/test_klippy.py -d ${DICTDIR} test/klippy/*.test
finish_test klippy "Test invoke klippy (Python3)"
start_test klippy "Test invoke klippy (Python2)"
$PYTHON2 scripts/test_klippy.py -d ${DICTDIR} test/klippy/*.test
finish_test klippy "Test invoke klippy (Python2)"

68
scripts/ci-install.sh Normal file
View File

@@ -0,0 +1,68 @@
#!/bin/bash
# Build setup script for continuous integration testing.
# See ci-build.sh for the actual test steps.
# Stop script early on any error; check variables; be verbose
set -eux
MAIN_DIR=${PWD}
BUILD_DIR=${PWD}/ci_build
CACHE_DIR=${PWD}/ci_cache
mkdir -p ${BUILD_DIR} ${CACHE_DIR}
######################################################################
# Install system dependencies
######################################################################
echo -e "\n\n=============== Install system dependencies\n\n"
PKGS="virtualenv python-dev libffi-dev build-essential"
PKGS="${PKGS} gcc-avr avr-libc"
PKGS="${PKGS} libnewlib-arm-none-eabi gcc-arm-none-eabi binutils-arm-none-eabi"
PKGS="${PKGS} pv libmpfr-dev libgmp-dev libmpc-dev texinfo bison flex"
sudo apt-get install ${PKGS}
######################################################################
# Install (or build) pru gcc
######################################################################
echo -e "\n\n=============== Install embedded pru gcc\n\n"
PRU_FILE=${CACHE_DIR}/gnupru.tar.gz
PRU_DIR=${BUILD_DIR}/pru-gcc
if [ ! -f ${PRU_FILE} ]; then
cd ${BUILD_DIR}
git config --global user.email "you@example.com"
git config --global user.name "Your Name"
git clone https://github.com/dinuxbg/gnupru -b 2018.03-beta-rc3 --depth 1
cd gnupru
export PREFIX=${PRU_DIR}
./download-and-patch.sh 2>&1 | pv -nli 30 > ${BUILD_DIR}/gnupru-build.log
./build.sh 2>&1 | pv -nli 30 >> ${BUILD_DIR}/gnupru-build.log
cd ${BUILD_DIR}
tar cfz ${PRU_FILE} pru-gcc/
else
cd ${BUILD_DIR}
tar xfz ${PRU_FILE}
fi
######################################################################
# Create python3 virtualenv environment
######################################################################
echo -e "\n\n=============== Install python3 virtualenv\n\n"
cd ${MAIN_DIR}
virtualenv -p python3 ${BUILD_DIR}/python-env
${BUILD_DIR}/python-env/bin/pip install -r ${MAIN_DIR}/scripts/klippy-requirements.txt
######################################################################
# Create python2 virtualenv environment
######################################################################
echo -e "\n\n=============== Install python2 virtualenv\n\n"
cd ${MAIN_DIR}
virtualenv -p python2 ${BUILD_DIR}/python2-env
${BUILD_DIR}/python2-env/bin/pip install -r ${MAIN_DIR}/scripts/klippy-requirements.txt

26
scripts/flash-linux.sh Normal file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
# This script installs the Linux MCU code to /usr/local/bin/
if [ "$EUID" -ne 0 ]; then
echo "This script must be run as root"
exit -1
fi
set -e
# Install new micro-controller code
echo "Installing micro-controller code to /usr/local/bin/"
rm -f /usr/local/bin/klipper_mcu
cp out/klipper.elf /usr/local/bin/klipper_mcu
sync
# Restart (if system install script present)
if [ -f /etc/init.d/klipper_pru ]; then
echo "Attempting host PRU restart..."
service klipper_pru restart
fi
# Restart (if system install script present)
if [ -f /etc/init.d/klipper_mcu ]; then
echo "Attempting host MCU restart..."
service klipper_mcu restart
fi

20
scripts/flash-pru.sh Normal file
View File

@@ -0,0 +1,20 @@
#!/bin/bash
# This script installs the PRU firmware on a beaglebone machine.
if [ "$EUID" -ne 0 ]; then
echo "This script must be run as root"
exit -1
fi
set -e
# Install new firmware
echo "Installing firmware to /lib/firmware/"
cp out/pru0.elf /lib/firmware/am335x-pru0-fw
cp out/pru1.elf /lib/firmware/am335x-pru1-fw
sync
# Restart (if system install script present)
if [ -f /etc/init.d/klipper_pru ]; then
echo "Attempting PRU restart..."
service klipper_pru restart
fi

85
scripts/flash-sdcard.sh Normal file
View File

@@ -0,0 +1,85 @@
#!/bin/bash
# This script launches flash_sdcard.py, a utitlity that enables
# unattended firmware updates on boards with "SD Card" bootloaders
# Non-standard installations may need to change this location
KLIPPY_ENV="${HOME}/klippy-env/bin/python"
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
KLIPPER_BIN="${SRCDIR}/out/klipper.bin"
KLIPPER_BIN_DEFAULT=$KLIPPER_BIN
KLIPPER_DICT_DEFAULT="${SRCDIR}/out/klipper.dict"
SPI_FLASH="${SRCDIR}/scripts/spi_flash/spi_flash.py"
BAUD_ARG=""
# Force script to exit if an error occurs
set -e
print_help_message()
{
echo "SD Card upload utility for Klipper"
echo
echo "usage: flash_sdcard.sh [-h] [-l] [-b <baud>] [-f <firmware>] [-d <dictionary>]"
echo " <device> <board>"
echo
echo "positional arguments:"
echo " <device> device serial port"
echo " <board> board type"
echo
echo "optional arguments:"
echo " -h show this message"
echo " -l list available boards"
echo " -b <baud> serial baud rate (default is 250000)"
echo " -f <firmware> path to klipper.bin"
echo " -d <dictionary> path to klipper.dict for firmware validation"
}
# Parse command line "optional args"
while getopts "hlb:f:d:" arg; do
case $arg in
h)
print_help_message
exit 0
;;
l)
${KLIPPY_ENV} ${SPI_FLASH} -l
exit 0
;;
b) BAUD_ARG="-b ${OPTARG}";;
f) KLIPPER_BIN=$OPTARG;;
d) KLIPPER_DICT=$OPTARG;;
esac
done
# Make sure that we have the correct number of positional args
if [ $(($# - $OPTIND + 1)) -ne 2 ]; then
echo "Invalid number of args: $(($# - $OPTIND + 1))"
exit -1
fi
DEVICE=${@:$OPTIND:1}
BOARD=${@:$OPTIND+1:1}
if [ ! -f $KLIPPER_BIN ]; then
echo "No file found at '${KLIPPER_BIN}'"
exit -1
fi
if [ ! -e $DEVICE ]; then
echo "No device found at '${DEVICE}'"
exit -1
fi
if [ ! $KLIPPER_DICT ] && [ $KLIPPER_BIN == $KLIPPER_BIN_DEFAULT ] ; then
KLIPPER_DICT=$KLIPPER_DICT_DEFAULT
fi
if [ $KLIPPER_DICT ]; then
if [ ! -f $KLIPPER_DICT ]; then
echo "No file found at '${KLIPPER_BIN}'"
exit -1
fi
KLIPPER_DICT="-d ${KLIPPER_DICT}"
fi
# Run Script
echo "Flashing ${KLIPPER_BIN} to ${DEVICE}"
${KLIPPY_ENV} ${SPI_FLASH} ${BAUD_ARG} ${KLIPPER_DICT} ${DEVICE} ${BOARD} ${KLIPPER_BIN}

380
scripts/flash_usb.py Normal file
View File

@@ -0,0 +1,380 @@
#!/usr/bin/env python3
# Tool to enter a USB bootloader and flash Klipper
#
# Copyright (C) 2019 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, os, re, subprocess, optparse, time, fcntl, termios, struct
class error(Exception):
pass
# Attempt to enter bootloader via 1200 baud request
def enter_bootloader(device):
try:
f = open(device, 'rb')
fd = f.fileno()
fcntl.ioctl(fd, termios.TIOCMBIS, struct.pack('I', termios.TIOCM_DTR))
t = termios.tcgetattr(fd)
t[4] = t[5] = termios.B1200
sys.stderr.write("Entering bootloader on %s\n" % (device,))
termios.tcsetattr(fd, termios.TCSANOW, t)
fcntl.ioctl(fd, termios.TIOCMBIC, struct.pack('I', termios.TIOCM_DTR))
f.close()
except (IOError, OSError) as e:
pass
# Translate a serial device name to a stable serial name in /dev/serial/by-path/
def translate_serial_to_tty(device):
ttyname = os.path.realpath(device)
if not os.path.exists('/dev/serial/by-path/'):
raise error("Unable to find serial 'by-path' folder")
for fname in os.listdir('/dev/serial/by-path/'):
fname = '/dev/serial/by-path/' + fname
if os.path.realpath(fname) == ttyname:
return ttyname, fname
return ttyname, ttyname
# Translate a serial device name to a usb path (suitable for dfu-util)
def translate_serial_to_usb_path(device):
realdev = os.path.realpath(device)
fname = os.path.basename(realdev)
try:
lname = os.readlink("/sys/class/tty/" + fname)
except OSError as e:
raise error("Unable to find tty device")
ttypath_r = re.compile(r".*/usb\d+.*/(?P<path>\d+-[0-9.]+):\d+\.\d+/.*")
m = ttypath_r.match(lname)
if m is None:
raise error("Unable to find tty usb device")
devpath = os.path.realpath("/sys/class/tty/%s/device" % (fname,))
return m.group("path"), devpath
# Wait for a given path to appear
def wait_path(path, alt_path=None):
time.sleep(.100)
start_alt_path = None
end_time = time.time() + 4.0
while 1:
time.sleep(0.100)
cur_time = time.time()
if os.path.exists(path):
sys.stderr.write("Device reconnect on %s\n" % (path,))
time.sleep(0.100)
return path
if alt_path is not None and os.path.exists(alt_path):
if start_alt_path is None:
start_alt_path = cur_time
continue
if cur_time >= start_alt_path + 0.300:
sys.stderr.write("Device reconnect on alt path %s\n" % (
alt_path,))
return alt_path
if cur_time > end_time:
return path
CANBOOT_ID ="1d50:6177"
def detect_canboot(devpath):
usbdir = os.path.dirname(devpath)
try:
with open(os.path.join(usbdir, "idVendor")) as f:
vid = f.read().strip().lower()
with open(os.path.join(usbdir, "idProduct")) as f:
pid = f.read().strip().lower()
except Exception:
return False
usbid = "%s:%s" % (vid, pid)
return usbid == CANBOOT_ID
def call_flashcan(device, binfile):
try:
import serial
except ModuleNotFoundError:
sys.stderr.write(
"Python's pyserial module is required to update. Install\n"
"with the following command:\n"
" %s -m pip install pyserial\n\n" % (sys.executable,)
)
sys.exit(-1)
args = [sys.executable, "lib/canboot/flash_can.py", "-d",
device, "-f", binfile]
sys.stderr.write(" ".join(args) + '\n\n')
res = subprocess.call(args)
if res != 0:
sys.stderr.write("Error running flash_can.py\n")
sys.exit(-1)
def flash_canboot(options, binfile):
ttyname, pathname = translate_serial_to_tty(options.device)
call_flashcan(pathname, binfile)
# Flash via a call to bossac
def flash_bossac(device, binfile, extra_flags=[]):
ttyname, pathname = translate_serial_to_tty(device)
enter_bootloader(pathname)
pathname = wait_path(pathname, ttyname)
baseargs = ["lib/bossac/bin/bossac", "-U", "-p", pathname]
args = baseargs + extra_flags + ["-w", binfile, "-v"]
sys.stderr.write(" ".join(args) + '\n\n')
res = subprocess.call(args)
if res != 0:
raise error("Error running bossac")
if "-R" not in extra_flags:
args = baseargs + ["-b", "-R"]
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
if "-b" not in extra_flags:
wait_path(pathname)
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
pass
# Invoke the dfu-util program
def call_dfuutil(flags, binfile, sudo):
args = ["dfu-util"] + flags + ["-D", binfile]
if sudo:
args.insert(0, "sudo")
sys.stderr.write(" ".join(args) + '\n\n')
res = subprocess.call(args)
if res != 0:
raise error("Error running dfu-util")
# Flash via a call to dfu-util
def flash_dfuutil(device, binfile, extra_flags=[], sudo=True):
hexfmt_r = re.compile(r"^[a-fA-F0-9]{4}:[a-fA-F0-9]{4}$")
if hexfmt_r.match(device.strip()):
call_dfuutil(["-d", ","+device.strip()] + extra_flags, binfile, sudo)
return
ttyname, serbypath = translate_serial_to_tty(device)
buspath, devpath = translate_serial_to_usb_path(device)
enter_bootloader(device)
pathname = wait_path(devpath)
if detect_canboot(devpath):
call_flashcan(serbypath, binfile)
else:
call_dfuutil(["-p", buspath] + extra_flags, binfile, sudo)
def call_hidflash(binfile, sudo):
args = ["lib/hidflash/hid-flash", binfile]
if sudo:
args.insert(0, "sudo")
sys.stderr.write(" ".join(args) + '\n\n')
res = subprocess.call(args)
if res != 0:
raise error("Error running hid-flash")
# Flash via call to hid-flash
def flash_hidflash(device, binfile, sudo=True):
hexfmt_r = re.compile(r"^[a-fA-F0-9]{4}:[a-fA-F0-9]{4}$")
if hexfmt_r.match(device.strip()):
call_hidflash(binfile, sudo)
return
ttyname, serbypath = translate_serial_to_tty(device)
buspath, devpath = translate_serial_to_usb_path(device)
enter_bootloader(device)
pathname = wait_path(devpath)
if detect_canboot(devpath):
call_flashcan(serbypath, binfile)
else:
call_hidflash(binfile, sudo)
# Call Klipper modified "picoboot"
def call_picoboot(bus, addr, binfile, sudo):
args = ["lib/rp2040_flash/rp2040_flash", binfile]
if bus is not None:
args.extend([bus, addr])
if sudo:
args.insert(0, "sudo")
sys.stderr.write(" ".join(args) + '\n\n')
res = subprocess.call(args)
if res != 0:
raise error("Error running rp2040_flash")
# Flash via Klipper modified "picoboot"
def flash_picoboot(device, binfile, sudo):
buspath, devpath = translate_serial_to_usb_path(device)
# We need one level up to get access to busnum/devnum files
usbdir = os.path.dirname(devpath)
enter_bootloader(device)
wait_path(usbdir)
with open(usbdir + "/busnum") as f:
bus = f.read().strip()
with open(usbdir + "/devnum") as f:
addr = f.read().strip()
call_picoboot(bus, addr, binfile, sudo)
######################################################################
# Device specific helpers
######################################################################
def flash_atsam3(options, binfile):
try:
flash_bossac(options.device, binfile, ["-e", "-b"])
except error as e:
sys.stderr.write("Failed to flash to %s: %s\n" % (
options.device, str(e)))
sys.exit(-1)
def flash_atsam4(options, binfile):
try:
flash_bossac(options.device, binfile, ["-e"])
except error as e:
sys.stderr.write("Failed to flash to %s: %s\n" % (
options.device, str(e)))
sys.exit(-1)
def flash_atsamd(options, binfile):
extra_flags = ["--offset=0x%x" % (options.start,), "-b", "-R"]
try:
flash_bossac(options.device, binfile, extra_flags)
except error as e:
sys.stderr.write("Failed to flash to %s: %s\n" % (
options.device, str(e)))
sys.exit(-1)
SMOOTHIE_HELP = """
Failed to flash to %s: %s
If flashing Klipper to a Smoothieboard for the first time it may be
necessary to manually place the board into "bootloader mode" - press
and hold the "Play button" and then press and release the "Reset
button".
When a Smoothieboard is in bootloader mode it can be flashed with the
following command:
make flash FLASH_DEVICE=1d50:6015
Alternatively, one can flash a Smoothieboard via SD card - copy the
"out/klipper.bin" file to a file named "firmware.bin" on an SD card
and then restart the Smoothieboard with that SD card.
"""
def flash_lpc176x(options, binfile):
try:
flash_dfuutil(options.device, binfile, [], options.sudo)
except error as e:
sys.stderr.write(SMOOTHIE_HELP % (options.device, str(e)))
sys.exit(-1)
STM32F1_HELP = """
Failed to flash to %s: %s
If the device is already in bootloader mode it can be flashed with the
following command:
make flash FLASH_DEVICE=1eaf:0003
OR
make flash FLASH_DEVICE=1209:beba
If attempting to flash via 3.3V serial, then use:
make serialflash FLASH_DEVICE=%s
"""
def flash_stm32f1(options, binfile):
try:
if options.start == 0x8000800:
flash_hidflash(options.device, binfile, options.sudo)
else:
flash_dfuutil(options.device, binfile, ["-R", "-a", "2"],
options.sudo)
except error as e:
sys.stderr.write(STM32F1_HELP % (
options.device, str(e), options.device))
sys.exit(-1)
STM32F4_HELP = """
Failed to flash to %s: %s
If the device is already in bootloader mode it can be flashed with the
following command:
make flash FLASH_DEVICE=0483:df11
OR
make flash FLASH_DEVICE=1209:beba
If attempting to flash via 3.3V serial, then use:
make serialflash FLASH_DEVICE=%s
"""
def flash_stm32f4(options, binfile):
start = "0x%x:leave" % (options.start,)
try:
if options.start == 0x8004000:
flash_hidflash(options.device, binfile, options.sudo)
else:
flash_dfuutil(options.device, binfile,
["-R", "-a", "0", "-s", start], options.sudo)
except error as e:
sys.stderr.write(STM32F4_HELP % (
options.device, str(e), options.device))
sys.exit(-1)
RP2040_HELP = """
Failed to flash to %s: %s
If the device is already in bootloader mode it can be flashed with the
following command:
make flash FLASH_DEVICE=2e8a:0003
Alternatively, one can flash rp2040 boards like the Pico by manually
entering bootloader mode(hold bootsel button during powerup), mount the
device as a usb drive, and copy klipper.uf2 to the device.
"""
def flash_rp2040(options, binfile):
try:
if options.device.lower() == "2e8a:0003":
call_picoboot(None, None, binfile, options.sudo)
else:
flash_picoboot(options.device, binfile, options.sudo)
except error as e:
sys.stderr.write(RP2040_HELP % (options.device, str(e)))
sys.exit(-1)
MCUTYPES = {
'sam3': flash_atsam3, 'sam4': flash_atsam4, 'samd': flash_atsamd,
'same70': flash_atsam4, 'lpc176': flash_lpc176x, 'stm32f103': flash_stm32f1,
'stm32f4': flash_stm32f4, 'stm32f042': flash_stm32f4,
'stm32f072': flash_stm32f4, 'stm32g0b1': flash_stm32f4,
'stm32h7': flash_stm32f4, 'rp2040': flash_rp2040
}
######################################################################
# Startup
######################################################################
def main():
usage = "%prog [options] -t <type> -d <device> <klipper.bin>"
opts = optparse.OptionParser(usage)
opts.add_option("-t", "--type", type="string", dest="mcutype",
help="micro-controller type")
opts.add_option("-d", "--device", type="string", dest="device",
help="serial port device")
opts.add_option("-s", "--start", type="int", dest="start",
help="start address in flash")
opts.add_option("--no-sudo", action="store_false", dest="sudo",
default=True, help="do not run sudo")
options, args = opts.parse_args()
if len(args) != 1:
opts.error("Incorrect number of arguments")
flash_func = None
if options.mcutype:
for prefix, func in MCUTYPES.items():
if options.mcutype.startswith(prefix):
flash_func = func
break
if flash_func is None:
opts.error("USB flashing is not supported for MCU '%s'"
% (options.mcutype,))
if not options.device:
sys.stderr.write("\nPlease specify FLASH_DEVICE\n\n")
sys.exit(-1)
flash_func(options, args[0])
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,259 @@
#!/usr/bin/env python3
# Generate adxl345 accelerometer graphs
#
# Copyright (C) 2020 Kevin O'Connor <kevin@koconnor.net>
# Copyright (C) 2020 Dmitry Butyugin <dmbutyugin@google.com>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import importlib, optparse, os, sys
from textwrap import wrap
import numpy as np, matplotlib
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', 'klippy'))
shaper_calibrate = importlib.import_module('.shaper_calibrate', 'extras')
MAX_TITLE_LENGTH=65
def parse_log(logname, opts):
with open(logname) as f:
for header in f:
if not header.startswith('#'):
break
if not header.startswith('freq,psd_x,psd_y,psd_z,psd_xyz'):
# Raw accelerometer data
return np.loadtxt(logname, comments='#', delimiter=',')
# Power spectral density data or shaper calibration data
opts.error("File %s does not contain raw accelerometer data and therefore "
"is not supported by graph_accelerometer.py script. Please use "
"calibrate_shaper.py script to process it instead." % (logname,))
######################################################################
# Raw accelerometer graphing
######################################################################
def plot_accel(data, logname):
first_time = data[0, 0]
times = data[:,0] - first_time
fig, axes = matplotlib.pyplot.subplots(nrows=3, sharex=True)
axes[0].set_title("\n".join(wrap("Accelerometer data (%s)" % (logname,),
MAX_TITLE_LENGTH)))
axis_names = ['x', 'y', 'z']
for i in range(len(axis_names)):
avg = data[:,i+1].mean()
adata = data[:,i+1] - data[:,i+1].mean()
ax = axes[i]
ax.plot(times, adata, alpha=0.8)
ax.grid(True)
ax.set_ylabel('%s accel (%+.3f)\n(mm/s^2)' % (axis_names[i], -avg))
axes[-1].set_xlabel('Time (%+.3f)\n(s)' % (-first_time,))
fig.tight_layout()
return fig
######################################################################
# Frequency graphing
######################################################################
# Calculate estimated "power spectral density"
def calc_freq_response(data, max_freq):
helper = shaper_calibrate.ShaperCalibrate(printer=None)
return helper.process_accelerometer_data(data)
def calc_specgram(data, axis):
N = data.shape[0]
Fs = N / (data[-1,0] - data[0,0])
# Round up to a power of 2 for faster FFT
M = 1 << int(.5 * Fs - 1).bit_length()
window = np.kaiser(M, 6.)
def _specgram(x):
return matplotlib.mlab.specgram(
x, Fs=Fs, NFFT=M, noverlap=M//2, window=window,
mode='psd', detrend='mean', scale_by_freq=False)
d = {'x': data[:,1], 'y': data[:,2], 'z': data[:,3]}
if axis != 'all':
pdata, bins, t = _specgram(d[axis])
else:
pdata, bins, t = _specgram(d['x'])
for ax in 'yz':
pdata += _specgram(d[ax])[0]
return pdata, bins, t
def plot_frequency(datas, lognames, max_freq):
calibration_data = calc_freq_response(datas[0], max_freq)
for data in datas[1:]:
calibration_data.add_data(calc_freq_response(data, max_freq))
freqs = calibration_data.freq_bins
psd = calibration_data.psd_sum[freqs <= max_freq]
px = calibration_data.psd_x[freqs <= max_freq]
py = calibration_data.psd_y[freqs <= max_freq]
pz = calibration_data.psd_z[freqs <= max_freq]
freqs = freqs[freqs <= max_freq]
fig, ax = matplotlib.pyplot.subplots()
ax.set_title("\n".join(wrap(
"Frequency response (%s)" % (', '.join(lognames)), MAX_TITLE_LENGTH)))
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power spectral density')
ax.plot(freqs, psd, label='X+Y+Z', alpha=0.6)
ax.plot(freqs, px, label='X', alpha=0.6)
ax.plot(freqs, py, label='Y', alpha=0.6)
ax.plot(freqs, pz, label='Z', alpha=0.6)
ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax.grid(which='major', color='grey')
ax.grid(which='minor', color='lightgrey')
ax.ticklabel_format(axis='y', style='scientific', scilimits=(0,0))
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
ax.legend(loc='best', prop=fontP)
fig.tight_layout()
return fig
def plot_compare_frequency(datas, lognames, max_freq, axis):
fig, ax = matplotlib.pyplot.subplots()
ax.set_title('Frequency responses comparison')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power spectral density')
for data, logname in zip(datas, lognames):
calibration_data = calc_freq_response(data, max_freq)
freqs = calibration_data.freq_bins
psd = calibration_data.get_psd(axis)[freqs <= max_freq]
freqs = freqs[freqs <= max_freq]
ax.plot(freqs, psd, label="\n".join(wrap(logname, 60)), alpha=0.6)
ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax.grid(which='major', color='grey')
ax.grid(which='minor', color='lightgrey')
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
ax.legend(loc='best', prop=fontP)
fig.tight_layout()
return fig
# Plot data in a "spectrogram colormap"
def plot_specgram(data, logname, max_freq, axis):
pdata, bins, t = calc_specgram(data, axis)
fig, ax = matplotlib.pyplot.subplots()
ax.set_title("\n".join(wrap("Spectrogram %s (%s)" % (axis, logname),
MAX_TITLE_LENGTH)))
ax.pcolormesh(t, bins, pdata, norm=matplotlib.colors.LogNorm())
ax.set_ylim([0., max_freq])
ax.set_ylabel('frequency (hz)')
ax.set_xlabel('Time (s)')
fig.tight_layout()
return fig
######################################################################
# CSV output
######################################################################
def write_frequency_response(datas, output):
helper = shaper_calibrate.ShaperCalibrate(printer=None)
calibration_data = helper.process_accelerometer_data(datas[0])
for data in datas[1:]:
calibration_data.add_data(helper.process_accelerometer_data(data))
helper.save_calibration_data(output, calibration_data)
def write_specgram(psd, freq_bins, time, output):
M = freq_bins.shape[0]
with open(output, "w") as csvfile:
csvfile.write("freq\\t")
for ts in time:
csvfile.write(",%.6f" % (ts,))
csvfile.write("\n")
for i in range(M):
csvfile.write("%.1f" % (freq_bins[i],))
for value in psd[i,:]:
csvfile.write(",%.6e" % (value,))
csvfile.write("\n")
######################################################################
# Startup
######################################################################
def is_csv_output(output):
return output and os.path.splitext(output)[1].lower() == '.csv'
def setup_matplotlib(output):
global matplotlib
if is_csv_output(output):
# Only mlab may be necessary with CSV output
import matplotlib.mlab
return
if output:
matplotlib.rcParams.update({'figure.autolayout': True})
matplotlib.use('Agg')
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
def main():
# Parse command-line arguments
usage = "%prog [options] <raw logs>"
opts = optparse.OptionParser(usage)
opts.add_option("-o", "--output", type="string", dest="output",
default=None, help="filename of output graph")
opts.add_option("-f", "--max_freq", type="float", default=200.,
help="maximum frequency to graph")
opts.add_option("-r", "--raw", action="store_true",
help="graph raw accelerometer data")
opts.add_option("-c", "--compare", action="store_true",
help="graph comparison of power spectral density "
"between different accelerometer data files")
opts.add_option("-s", "--specgram", action="store_true",
help="graph spectrogram of accelerometer data")
opts.add_option("-a", type="string", dest="axis", default="all",
help="axis to graph (one of 'all', 'x', 'y', or 'z')")
options, args = opts.parse_args()
if len(args) < 1:
opts.error("Incorrect number of arguments")
# Parse data
datas = [parse_log(fn, opts) for fn in args]
setup_matplotlib(options.output)
if is_csv_output(options.output):
if options.raw:
opts.error("raw mode is not supported with csv output")
if options.compare:
opts.error("comparison mode is not supported with csv output")
if options.specgram:
if len(args) > 1:
opts.error("Only 1 input is supported in specgram mode")
pdata, bins, t = calc_specgram(datas[0], options.axis)
write_specgram(pdata, bins, t, options.output)
else:
write_frequency_response(datas, options.output)
return
# Draw graph
if options.raw:
if len(args) > 1:
opts.error("Only 1 input is supported in raw mode")
fig = plot_accel(datas[0], args[0])
elif options.specgram:
if len(args) > 1:
opts.error("Only 1 input is supported in specgram mode")
fig = plot_specgram(datas[0], args[0], options.max_freq, options.axis)
elif options.compare:
fig = plot_compare_frequency(datas, args, options.max_freq,
options.axis)
else:
fig = plot_frequency(datas, args, options.max_freq)
# Show graph
if options.output is None:
matplotlib.pyplot.show()
else:
fig.set_size_inches(8, 6)
fig.savefig(options.output)
if __name__ == '__main__':
main()

193
scripts/graph_extruder.py Normal file
View File

@@ -0,0 +1,193 @@
#!/usr/bin/env python
# Generate extruder pressure advance motion graphs
#
# Copyright (C) 2019-2021 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import math, optparse, datetime
import matplotlib
SEG_TIME = .000100
INV_SEG_TIME = 1. / SEG_TIME
######################################################################
# Basic trapezoid motion
######################################################################
# List of moves: [(start_v, end_v, move_t), ...]
Moves = [
(0., 0., .100),
(0., 100., None), (100., 100., .200), (100., 60., None),
(60., 100., None), (100., 100., .200), (100., 0., None),
(0., 0., .300)
]
EXTRUDE_R = (.4 * .4 * .75) / (math.pi * (1.75 / 2.)**2)
ACCEL = 3000. * EXTRUDE_R
def gen_positions():
out = []
start_d = start_t = t = 0.
for start_v, end_v, move_t in Moves:
start_v *= EXTRUDE_R
end_v *= EXTRUDE_R
if move_t is None:
move_t = abs(end_v - start_v) / ACCEL
half_accel = 0.
if end_v > start_v:
half_accel = .5 * ACCEL
elif start_v > end_v:
half_accel = -.5 * ACCEL
end_t = start_t + move_t
while t <= end_t:
rel_t = t - start_t
out.append(start_d + (start_v + half_accel * rel_t) * rel_t)
t += SEG_TIME
start_d += (start_v + half_accel * move_t) * move_t
start_t = end_t
return out
######################################################################
# List helper functions
######################################################################
MARGIN_TIME = 0.050
def time_to_index(t):
return int(t * INV_SEG_TIME + .5)
def indexes(positions):
drop = time_to_index(MARGIN_TIME)
return range(drop, len(positions)-drop)
def trim_lists(*lists):
keep = len(lists[0]) - time_to_index(2. * MARGIN_TIME)
for l in lists:
del l[keep:]
######################################################################
# Common data filters
######################################################################
# Generate estimated first order derivative
def gen_deriv(data):
return [0.] + [(data[i+1] - data[i]) * INV_SEG_TIME
for i in range(len(data)-1)]
# Simple average between two points smooth_time away
def calc_average(positions, smooth_time):
offset = time_to_index(smooth_time * .5)
out = [0.] * len(positions)
for i in indexes(positions):
out[i] = .5 * (positions[i-offset] + positions[i+offset])
return out
# Average (via integration) of smooth_time range
def calc_smooth(positions, smooth_time):
offset = time_to_index(smooth_time * .5)
weight = 1. / (2*offset - 1)
out = [0.] * len(positions)
for i in indexes(positions):
out[i] = sum(positions[i-offset+1:i+offset]) * weight
return out
# Time weighted average (via integration) of smooth_time range
def calc_weighted(positions, smooth_time):
offset = time_to_index(smooth_time * .5)
weight = 1. / offset**2
out = [0.] * len(positions)
for i in indexes(positions):
weighted_data = [positions[j] * (offset - abs(j-i))
for j in range(i-offset, i+offset)]
out[i] = sum(weighted_data) * weight
return out
######################################################################
# Pressure advance
######################################################################
SMOOTH_TIME = .040
PRESSURE_ADVANCE = .045
# Calculate raw pressure advance positions
def calc_pa_raw(positions):
pa = PRESSURE_ADVANCE * INV_SEG_TIME
out = [0.] * len(positions)
for i in indexes(positions):
out[i] = positions[i] + pa * (positions[i+1] - positions[i])
return out
# Pressure advance after smoothing
def calc_pa(positions):
return calc_weighted(calc_pa_raw(positions), SMOOTH_TIME)
######################################################################
# Plotting and startup
######################################################################
def plot_motion():
# Nominal motion
positions = gen_positions()
velocities = gen_deriv(positions)
accels = gen_deriv(velocities)
# Motion with pressure advance
pa_positions = calc_pa_raw(positions)
pa_velocities = gen_deriv(pa_positions)
# Smoothed motion
sm_positions = calc_pa(positions)
sm_velocities = gen_deriv(sm_positions)
# Build plot
times = [SEG_TIME * i for i in range(len(positions))]
trim_lists(times, velocities, accels,
pa_positions, pa_velocities,
sm_positions, sm_velocities)
fig, ax1 = matplotlib.pyplot.subplots(nrows=1, sharex=True)
ax1.set_title("Extruder Velocity")
ax1.set_ylabel('Velocity (mm/s)')
pa_plot, = ax1.plot(times, pa_velocities, 'r',
label='Pressure Advance', alpha=0.3)
nom_plot, = ax1.plot(times, velocities, 'black', label='Nominal')
sm_plot, = ax1.plot(times, sm_velocities, 'g', label='Smooth PA', alpha=0.9)
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
ax1.legend(handles=[nom_plot, pa_plot, sm_plot], loc='best', prop=fontP)
ax1.set_xlabel('Time (s)')
ax1.grid(True)
fig.tight_layout()
return fig
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
matplotlib.rcParams.update({'figure.autolayout': True})
matplotlib.use('Agg')
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
def main():
# Parse command-line arguments
usage = "%prog [options]"
opts = optparse.OptionParser(usage)
opts.add_option("-o", "--output", type="string", dest="output",
default=None, help="filename of output graph")
options, args = opts.parse_args()
if len(args) != 0:
opts.error("Incorrect number of arguments")
# Draw graph
setup_matplotlib(options.output is not None)
fig = plot_motion()
# Show graph
if options.output is None:
matplotlib.pyplot.show()
else:
fig.set_size_inches(6, 2.5)
fig.savefig(options.output)
if __name__ == '__main__':
main()

427
scripts/graph_motion.py Normal file
View File

@@ -0,0 +1,427 @@
#!/usr/bin/env python
# Script to graph motion results
#
# Copyright (C) 2019-2021 Kevin O'Connor <kevin@koconnor.net>
# Copyright (C) 2020 Dmitry Butyugin <dmbutyugin@google.com>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import optparse, datetime, math
import matplotlib
SEG_TIME = .000100
INV_SEG_TIME = 1. / SEG_TIME
SPRING_FREQ=35.0
DAMPING_RATIO=0.05
CONFIG_FREQ=40.0
CONFIG_DAMPING_RATIO=0.1
######################################################################
# Basic trapezoid motion
######################################################################
# List of moves: [(start_v, end_v, move_t), ...]
Moves = [
(0., 0., .100),
(6.869, 89.443, None), (89.443, 89.443, .120), (89.443, 17.361, None),
(19.410, 120., None), (120., 120., .130), (120., 5., None),
(0., 0., 0.01),
(-5., -100., None), (-100., -100., .100), (-100., -.5, None),
(0., 0., .200)
]
ACCEL = 3000.
MAX_JERK = ACCEL * 0.6 * SPRING_FREQ
def get_accel(start_v, end_v):
return ACCEL
def get_accel_jerk_limit(start_v, end_v):
effective_accel = math.sqrt(MAX_JERK * abs(end_v - start_v) / 6.)
return min(effective_accel, ACCEL)
# Standard constant acceleration generator
def get_acc_pos_ao2(rel_t, start_v, accel, move_t):
return (start_v + 0.5 * accel * rel_t) * rel_t
# Bezier curve "accel_order=4" generator
def get_acc_pos_ao4(rel_t, start_v, accel, move_t):
inv_accel_t = 1. / move_t
accel_div_accel_t = accel * inv_accel_t
accel_div_accel_t2 = accel_div_accel_t * inv_accel_t
c4 = -.5 * accel_div_accel_t2;
c3 = accel_div_accel_t;
c1 = start_v
return ((c4 * rel_t + c3) * rel_t * rel_t + c1) * rel_t
# Bezier curve "accel_order=6" generator
def get_acc_pos_ao6(rel_t, start_v, accel, move_t):
inv_accel_t = 1. / move_t
accel_div_accel_t = accel * inv_accel_t
accel_div_accel_t2 = accel_div_accel_t * inv_accel_t
accel_div_accel_t3 = accel_div_accel_t2 * inv_accel_t
accel_div_accel_t4 = accel_div_accel_t3 * inv_accel_t
c6 = accel_div_accel_t4;
c5 = -3. * accel_div_accel_t3;
c4 = 2.5 * accel_div_accel_t2;
c1 = start_v;
return (((c6 * rel_t + c5) * rel_t + c4)
* rel_t * rel_t * rel_t + c1) * rel_t
get_acc_pos = get_acc_pos_ao2
get_acc = get_accel
# Calculate positions based on 'Moves' list
def gen_positions():
out = []
start_d = start_t = t = 0.
for start_v, end_v, move_t in Moves:
if move_t is None:
move_t = abs(end_v - start_v) / get_acc(start_v, end_v)
accel = (end_v - start_v) / move_t
end_t = start_t + move_t
while t <= end_t:
rel_t = t - start_t
out.append(start_d + get_acc_pos(rel_t, start_v, accel, move_t))
t += SEG_TIME
start_d += get_acc_pos(move_t, start_v, accel, move_t)
start_t = end_t
return out
######################################################################
# Estimated motion with belt as spring
######################################################################
def estimate_spring(positions):
ang_freq2 = (SPRING_FREQ * 2. * math.pi)**2
damping_factor = 4. * math.pi * DAMPING_RATIO * SPRING_FREQ
head_pos = head_v = 0.
out = []
for stepper_pos in positions:
head_pos += head_v * SEG_TIME
head_a = (stepper_pos - head_pos) * ang_freq2
head_v += head_a * SEG_TIME
head_v -= head_v * damping_factor * SEG_TIME
out.append(head_pos)
return out
######################################################################
# List helper functions
######################################################################
MARGIN_TIME = 0.050
def time_to_index(t):
return int(t * INV_SEG_TIME + .5)
def indexes(positions):
drop = time_to_index(MARGIN_TIME)
return range(drop, len(positions)-drop)
def trim_lists(*lists):
keep = len(lists[0]) - time_to_index(2. * MARGIN_TIME)
for l in lists:
del l[keep:]
######################################################################
# Common data filters
######################################################################
# Generate estimated first order derivative
def gen_deriv(data):
return [0.] + [(data[i+1] - data[i]) * INV_SEG_TIME
for i in range(len(data)-1)]
# Simple average between two points smooth_time away
def calc_average(positions, smooth_time):
offset = time_to_index(smooth_time * .5)
out = [0.] * len(positions)
for i in indexes(positions):
out[i] = .5 * (positions[i-offset] + positions[i+offset])
return out
# Average (via integration) of smooth_time range
def calc_smooth(positions, smooth_time):
offset = time_to_index(smooth_time * .5)
weight = 1. / (2*offset - 1)
out = [0.] * len(positions)
for i in indexes(positions):
out[i] = sum(positions[i-offset+1:i+offset]) * weight
return out
# Time weighted average (via integration) of smooth_time range
def calc_weighted(positions, smooth_time):
offset = time_to_index(smooth_time * .5)
weight = 1. / offset**2
out = [0.] * len(positions)
for i in indexes(positions):
weighted_data = [positions[j] * (offset - abs(j-i))
for j in range(i-offset, i+offset)]
out[i] = sum(weighted_data) * weight
return out
# Weighted average (`h**2 - (t-T)**2`) of smooth_time range
def calc_weighted2(positions, smooth_time):
offset = time_to_index(smooth_time * .5)
weight = .75 / offset**3
out = [0.] * len(positions)
for i in indexes(positions):
weighted_data = [positions[j] * (offset**2 - (j-i)**2)
for j in range(i-offset, i+offset)]
out[i] = sum(weighted_data) * weight
return out
# Weighted average (`(h**2 - (t-T)**2)**2`) of smooth_time range
def calc_weighted4(positions, smooth_time):
offset = time_to_index(smooth_time * .5)
weight = 15 / (16. * offset**5)
out = [0.] * len(positions)
for i in indexes(positions):
weighted_data = [positions[j] * ((offset**2 - (j-i)**2))**2
for j in range(i-offset, i+offset)]
out[i] = sum(weighted_data) * weight
return out
# Weighted average (`(h - abs(t-T))**2 * (2 * abs(t-T) + h)`) of range
def calc_weighted3(positions, smooth_time):
offset = time_to_index(smooth_time * .5)
weight = 1. / offset**4
out = [0.] * len(positions)
for i in indexes(positions):
weighted_data = [positions[j] * (offset - abs(j-i))**2
* (2. * abs(j-i) + offset)
for j in range(i-offset, i+offset)]
out[i] = sum(weighted_data) * weight
return out
######################################################################
# Spring motion estimation
######################################################################
def calc_spring_raw(positions):
sa = (INV_SEG_TIME / (CONFIG_FREQ * 2. * math.pi))**2
ra = 2. * CONFIG_DAMPING_RATIO * math.sqrt(sa)
out = [0.] * len(positions)
for i in indexes(positions):
out[i] = (positions[i]
+ sa * (positions[i-1] - 2.*positions[i] + positions[i+1])
+ ra * (positions[i+1] - positions[i]))
return out
def calc_spring_double_weighted(positions, smooth_time):
offset = time_to_index(smooth_time * .25)
sa = (INV_SEG_TIME / (offset * CONFIG_FREQ * 2. * math.pi))**2
ra = 2. * CONFIG_DAMPING_RATIO * math.sqrt(sa)
out = [0.] * len(positions)
for i in indexes(positions):
out[i] = (positions[i]
+ sa * (positions[i-offset] - 2.*positions[i]
+ positions[i+offset])
+ ra * (positions[i+1] - positions[i]))
return calc_weighted(out, smooth_time=.5 * smooth_time)
######################################################################
# Input shapers
######################################################################
def get_zv_shaper():
df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2)
K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df)
t_d = 1. / (CONFIG_FREQ * df)
A = [1., K]
T = [0., .5*t_d]
return (A, T, "ZV")
def get_zvd_shaper():
df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2)
K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df)
t_d = 1. / (CONFIG_FREQ * df)
A = [1., 2.*K, K**2]
T = [0., .5*t_d, t_d]
return (A, T, "ZVD")
def get_mzv_shaper():
df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2)
K = math.exp(-.75 * CONFIG_DAMPING_RATIO * math.pi / df)
t_d = 1. / (CONFIG_FREQ * df)
a1 = 1. - 1. / math.sqrt(2.)
a2 = (math.sqrt(2.) - 1.) * K
a3 = a1 * K * K
A = [a1, a2, a3]
T = [0., .375*t_d, .75*t_d]
return (A, T, "MZV")
def get_ei_shaper():
v_tol = 0.05 # vibration tolerance
df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2)
K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df)
t_d = 1. / (CONFIG_FREQ * df)
a1 = .25 * (1. + v_tol)
a2 = .5 * (1. - v_tol) * K
a3 = a1 * K * K
A = [a1, a2, a3]
T = [0., .5*t_d, t_d]
return (A, T, "EI")
def get_2hump_ei_shaper():
v_tol = 0.05 # vibration tolerance
df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2)
K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df)
t_d = 1. / (CONFIG_FREQ * df)
V2 = v_tol**2
X = pow(V2 * (math.sqrt(1. - V2) + 1.), 1./3.)
a1 = (3.*X*X + 2.*X + 3.*V2) / (16.*X)
a2 = (.5 - a1) * K
a3 = a2 * K
a4 = a1 * K * K * K
A = [a1, a2, a3, a4]
T = [0., .5*t_d, t_d, 1.5*t_d]
return (A, T, "2-hump EI")
def get_3hump_ei_shaper():
v_tol = 0.05 # vibration tolerance
df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2)
K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df)
t_d = 1. / (CONFIG_FREQ * df)
K2 = K*K
a1 = 0.0625 * (1. + 3. * v_tol + 2. * math.sqrt(2. * (v_tol + 1.) * v_tol))
a2 = 0.25 * (1. - v_tol) * K
a3 = (0.5 * (1. + v_tol) - 2. * a1) * K2
a4 = a2 * K2
a5 = a1 * K2 * K2
A = [a1, a2, a3, a4, a5]
T = [0., .5*t_d, t_d, 1.5*t_d, 2.*t_d]
return (A, T, "3-hump EI")
def shift_pulses(shaper):
A, T, name = shaper
n = len(T)
ts = (sum([A[i] * T[i] for i in range(n)])) / sum(A)
for i in range(n):
T[i] -= ts
def calc_shaper(shaper, positions):
shift_pulses(shaper)
A = shaper[0]
inv_D = 1. / sum(A)
n = len(A)
T = [time_to_index(-shaper[1][j]) for j in range(n)]
out = [0.] * len(positions)
for i in indexes(positions):
out[i] = sum([positions[i + T[j]] * A[j] for j in range(n)]) * inv_D
return out
# Ideal values
SMOOTH_TIME = (2./3.) / CONFIG_FREQ
def gen_updated_position(positions):
#return calc_weighted(positions, 0.040)
#return calc_spring_double_weighted(positions, SMOOTH_TIME)
#return calc_weighted4(calc_spring_raw(positions), SMOOTH_TIME)
return calc_shaper(get_ei_shaper(), positions)
######################################################################
# Plotting and startup
######################################################################
def plot_motion():
# Nominal motion
positions = gen_positions()
velocities = gen_deriv(positions)
accels = gen_deriv(velocities)
# Updated motion
upd_positions = gen_updated_position(positions)
upd_velocities = gen_deriv(upd_positions)
upd_accels = gen_deriv(upd_velocities)
# Estimated position with model of belt as spring
spring_orig = estimate_spring(positions)
spring_upd = estimate_spring(upd_positions)
spring_diff_orig = [n-o for n, o in zip(spring_orig, positions)]
spring_diff_upd = [n-o for n, o in zip(spring_upd, positions)]
head_velocities = gen_deriv(spring_orig)
head_accels = gen_deriv(head_velocities)
head_upd_velocities = gen_deriv(spring_upd)
head_upd_accels = gen_deriv(head_upd_velocities)
# Build plot
times = [SEG_TIME * i for i in range(len(positions))]
trim_lists(times, velocities, accels,
upd_velocities, upd_velocities, upd_accels,
spring_diff_orig, spring_diff_upd,
head_velocities, head_upd_velocities,
head_accels, head_upd_accels)
fig, (ax1, ax2, ax3) = matplotlib.pyplot.subplots(nrows=3, sharex=True)
ax1.set_title("Simulation: resonance freq=%.1f Hz, damping_ratio=%.3f,\n"
"configured freq=%.1f Hz, damping_ratio = %.3f"
% (SPRING_FREQ, DAMPING_RATIO, CONFIG_FREQ
, CONFIG_DAMPING_RATIO))
ax1.set_ylabel('Velocity (mm/s)')
ax1.plot(times, upd_velocities, 'r', label='New Velocity', alpha=0.8)
ax1.plot(times, velocities, 'g', label='Nominal Velocity', alpha=0.8)
ax1.plot(times, head_velocities, label='Head Velocity', alpha=0.4)
ax1.plot(times, head_upd_velocities, label='New Head Velocity', alpha=0.4)
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
ax1.legend(loc='best', prop=fontP)
ax1.grid(True)
ax2.set_ylabel('Acceleration (mm/s^2)')
ax2.plot(times, upd_accels, 'r', label='New Accel', alpha=0.8)
ax2.plot(times, accels, 'g', label='Nominal Accel', alpha=0.8)
ax2.plot(times, head_accels, alpha=0.4)
ax2.plot(times, head_upd_accels, alpha=0.4)
ax2.set_ylim([-5. * ACCEL, 5. * ACCEL])
ax2.legend(loc='best', prop=fontP)
ax2.grid(True)
ax3.set_ylabel('Deviation (mm)')
ax3.plot(times, spring_diff_upd, 'r', label='New', alpha=0.8)
ax3.plot(times, spring_diff_orig, 'g', label='Nominal', alpha=0.8)
ax3.grid(True)
ax3.legend(loc='best', prop=fontP)
ax3.set_xlabel('Time (s)')
return fig
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
matplotlib.use('Agg')
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
def main():
# Parse command-line arguments
usage = "%prog [options]"
opts = optparse.OptionParser(usage)
opts.add_option("-o", "--output", type="string", dest="output",
default=None, help="filename of output graph")
options, args = opts.parse_args()
if len(args) != 0:
opts.error("Incorrect number of arguments")
# Draw graph
setup_matplotlib(options.output is not None)
fig = plot_motion()
# Show graph
if options.output is None:
matplotlib.pyplot.show()
else:
fig.set_size_inches(8, 6)
fig.savefig(options.output)
if __name__ == '__main__':
main()

283
scripts/graph_shaper.py Normal file
View File

@@ -0,0 +1,283 @@
#!/usr/bin/env python
# Script to plot input shapers
#
# Copyright (C) 2020 Kevin O'Connor <kevin@koconnor.net>
# Copyright (C) 2020 Dmitry Butyugin <dmbutyugin@google.com>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import optparse, math
import matplotlib
# A set of damping ratios to calculate shaper response for
DAMPING_RATIOS=[0.05, 0.1, 0.2]
# Parameters of the input shaper
SHAPER_FREQ=50.0
SHAPER_DAMPING_RATIO=0.1
# Simulate input shaping of step function for these true resonance frequency
# and damping ratio
STEP_SIMULATION_RESONANCE_FREQ=60.
STEP_SIMULATION_DAMPING_RATIO=0.15
# If set, defines which range of frequencies to plot shaper frequency responce
PLOT_FREQ_RANGE = [] # If empty, will be automatically determined
#PLOT_FREQ_RANGE = [10., 100.]
PLOT_FREQ_STEP = .01
######################################################################
# Input shapers
######################################################################
def get_zv_shaper():
df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2)
K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df)
t_d = 1. / (SHAPER_FREQ * df)
A = [1., K]
T = [0., .5*t_d]
return (A, T, "ZV")
def get_zvd_shaper():
df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2)
K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df)
t_d = 1. / (SHAPER_FREQ * df)
A = [1., 2.*K, K**2]
T = [0., .5*t_d, t_d]
return (A, T, "ZVD")
def get_mzv_shaper():
df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2)
K = math.exp(-.75 * SHAPER_DAMPING_RATIO * math.pi / df)
t_d = 1. / (SHAPER_FREQ * df)
a1 = 1. - 1. / math.sqrt(2.)
a2 = (math.sqrt(2.) - 1.) * K
a3 = a1 * K * K
A = [a1, a2, a3]
T = [0., .375*t_d, .75*t_d]
return (A, T, "MZV")
def get_ei_shaper():
v_tol = 0.05 # vibration tolerance
df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2)
K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df)
t_d = 1. / (SHAPER_FREQ * df)
a1 = .25 * (1. + v_tol)
a2 = .5 * (1. - v_tol) * K
a3 = a1 * K * K
A = [a1, a2, a3]
T = [0., .5*t_d, t_d]
return (A, T, "EI")
def get_2hump_ei_shaper():
v_tol = 0.05 # vibration tolerance
df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2)
K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df)
t_d = 1. / (SHAPER_FREQ * df)
V2 = v_tol**2
X = pow(V2 * (math.sqrt(1. - V2) + 1.), 1./3.)
a1 = (3.*X*X + 2.*X + 3.*V2) / (16.*X)
a2 = (.5 - a1) * K
a3 = a2 * K
a4 = a1 * K * K * K
A = [a1, a2, a3, a4]
T = [0., .5*t_d, t_d, 1.5*t_d]
return (A, T, "2-hump EI")
def get_3hump_ei_shaper():
v_tol = 0.05 # vibration tolerance
df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2)
K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df)
t_d = 1. / (SHAPER_FREQ * df)
K2 = K*K
a1 = 0.0625 * (1. + 3. * v_tol + 2. * math.sqrt(2. * (v_tol + 1.) * v_tol))
a2 = 0.25 * (1. - v_tol) * K
a3 = (0.5 * (1. + v_tol) - 2. * a1) * K2
a4 = a2 * K2
a5 = a1 * K2 * K2
A = [a1, a2, a3, a4, a5]
T = [0., .5*t_d, t_d, 1.5*t_d, 2.*t_d]
return (A, T, "3-hump EI")
def estimate_shaper(shaper, freq, damping_ratio):
A, T, _ = shaper
n = len(T)
inv_D = 1. / sum(A)
omega = 2. * math.pi * freq
damping = damping_ratio * omega
omega_d = omega * math.sqrt(1. - damping_ratio**2)
S = C = 0
for i in range(n):
W = A[i] * math.exp(-damping * (T[-1] - T[i]))
S += W * math.sin(omega_d * T[i])
C += W * math.cos(omega_d * T[i])
return math.sqrt(S*S + C*C) * inv_D
def shift_pulses(shaper):
A, T, name = shaper
n = len(T)
ts = sum([A[i] * T[i] for i in range(n)]) / sum(A)
for i in range(n):
T[i] -= ts
# Shaper selection
get_shaper = get_ei_shaper
######################################################################
# Plotting and startup
######################################################################
def bisect(func, left, right):
lhs_sign = math.copysign(1., func(left))
while right-left > 1e-8:
mid = .5 * (left + right)
val = func(mid)
if math.copysign(1., val) == lhs_sign:
left = mid
else:
right = mid
return .5 * (left + right)
def find_shaper_plot_range(shaper, vib_tol):
def eval_shaper(freq):
return estimate_shaper(shaper, freq, DAMPING_RATIOS[0]) - vib_tol
if not PLOT_FREQ_RANGE:
left = bisect(eval_shaper, 0., SHAPER_FREQ)
right = bisect(eval_shaper, SHAPER_FREQ, 2.4 * SHAPER_FREQ)
else:
left, right = PLOT_FREQ_RANGE
return (left, right)
def gen_shaper_response(shaper):
# Calculate shaper vibration responce on a range of requencies
response = []
freqs = []
freq, freq_end = find_shaper_plot_range(shaper, vib_tol=0.25)
while freq <= freq_end:
vals = []
for damping_ratio in DAMPING_RATIOS:
vals.append(estimate_shaper(shaper, freq, damping_ratio))
response.append(vals)
freqs.append(freq)
freq += PLOT_FREQ_STEP
legend = ['damping ratio = %.3f' % d_r for d_r in DAMPING_RATIOS]
return freqs, response, legend
def gen_shaped_step_function(shaper):
# Calculate shaping of a step function
A, T, _ = shaper
inv_D = 1. / sum(A)
n = len(T)
omega = 2. * math.pi * STEP_SIMULATION_RESONANCE_FREQ
damping = STEP_SIMULATION_DAMPING_RATIO * omega
omega_d = omega * math.sqrt(1. - STEP_SIMULATION_DAMPING_RATIO**2)
phase = math.acos(STEP_SIMULATION_DAMPING_RATIO)
t_start = T[0] - .5 / SHAPER_FREQ
t_end = T[-1] + 1.5 / STEP_SIMULATION_RESONANCE_FREQ
result = []
time = []
t = t_start
def step_response(t):
if t < 0.:
return 0.
return 1. - math.exp(-damping * t) * math.sin(omega_d * t
+ phase) / math.sin(phase)
while t <= t_end:
val = []
val.append(1. if t >= 0. else 0.)
#val.append(step_response(t))
commanded = 0.
response = 0.
S = C = 0
for i in range(n):
if t < T[i]:
continue
commanded += A[i]
response += A[i] * step_response(t - T[i])
val.append(commanded * inv_D)
val.append(response * inv_D)
result.append(val)
time.append(t)
t += .01 / SHAPER_FREQ
legend = ['step', 'shaper commanded', 'system response']
return time, result, legend
def plot_shaper(shaper):
shift_pulses(shaper)
freqs, response, response_legend = gen_shaper_response(shaper)
time, step_vals, step_legend = gen_shaped_step_function(shaper)
fig, (ax1, ax2) = matplotlib.pyplot.subplots(nrows=2, figsize=(10,9))
ax1.set_title("Vibration response simulation for shaper '%s',\n"
"shaper_freq=%.1f Hz, damping_ratio=%.3f"
% (shaper[-1], SHAPER_FREQ, SHAPER_DAMPING_RATIO))
ax1.plot(freqs, response)
ax1.set_ylim(bottom=0.)
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
ax1.legend(response_legend, loc='best', prop=fontP)
ax1.set_xlabel('Resonance frequency, Hz')
ax1.set_ylabel('Remaining vibrations, ratio')
ax1.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax1.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax1.grid(which='major', color='grey')
ax1.grid(which='minor', color='lightgrey')
ax2.set_title("Unit step input, resonance frequency=%.1f Hz, "
"damping ratio=%.3f" % (STEP_SIMULATION_RESONANCE_FREQ,
STEP_SIMULATION_DAMPING_RATIO))
ax2.plot(time, step_vals)
ax2.legend(step_legend, loc='best', prop=fontP)
ax2.set_xlabel('Time, sec')
ax2.set_ylabel('Amplitude')
ax2.grid()
fig.tight_layout()
return fig
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
matplotlib.use('Agg')
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
def main():
# Parse command-line arguments
usage = "%prog [options]"
opts = optparse.OptionParser(usage)
opts.add_option("-o", "--output", type="string", dest="output",
default=None, help="filename of output graph")
options, args = opts.parse_args()
if len(args) != 0:
opts.error("Incorrect number of arguments")
# Draw graph
setup_matplotlib(options.output is not None)
fig = plot_shaper(get_shaper())
# Show graph
if options.output is None:
matplotlib.pyplot.show()
else:
fig.set_size_inches(8, 6)
fig.savefig(options.output)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,168 @@
#!/usr/bin/env python2
# Tool to graph temperature sensor ADC resolution
#
# Copyright (C) 2020 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, os, optparse
import matplotlib
######################################################################
# Dummy config / printer / etc. class emulation
######################################################################
class DummyConfig:
def __init__(self, config_settings):
self.config_settings = config_settings
self.sensor_factories = {}
# Emulate config class
def getfloat(self, option, default, **kw):
return self.config_settings.get(option, default)
def get(self, option, default=None):
return default
def get_printer(self):
return self
def get_name(self):
return "dummy"
# Emulate printer class
def load_object(self, config, name):
return self
def lookup_object(self, name):
return self
# Emulate heaters class
def add_sensor_factory(self, name, factory):
self.sensor_factories[name] = factory
def do_create_sensor(self, sensor_type):
return self.sensor_factories[sensor_type](self).adc_convert
# Emulate query_adc class
def register_adc(self, name, klass):
pass
# Emulate pins class
def setup_pin(self, pin_type, pin_name):
return self
# Emulate mcu_adc class
def setup_adc_callback(self, time, callback):
pass
######################################################################
# Plotting
######################################################################
def plot_adc_resolution(config, sensors):
# Temperature list
all_temps = [float(i) for i in range(1, 351)]
temps = all_temps[:-1]
# Build plot
fig, (ax1, ax2) = matplotlib.pyplot.subplots(nrows=2, sharex=True)
pullup = config.getfloat('pullup_resistor', 0.)
adc_voltage = config.getfloat('adc_voltage', 0.)
ax1.set_title("Temperature Sensor (pullup=%.0f, adc_voltage=%.3f)"
% (pullup, adc_voltage))
ax1.set_ylabel('ADC')
ax2.set_ylabel('ADC change per 1C')
for sensor in sensors:
sc = config.do_create_sensor(sensor)
adcs = [sc.calc_adc(t) for t in all_temps]
ax1.plot(temps, adcs[:-1], label=sensor, alpha=0.6)
adc_deltas = [abs(adcs[i+1] - adcs[i]) for i in range(len(temps))]
ax2.plot(temps, adc_deltas, alpha=0.6)
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
ax1.legend(loc='best', prop=fontP)
ax2.set_xlabel('Temperature (C)')
ax1.grid(True)
ax2.grid(True)
fig.tight_layout()
return fig
def plot_resistance(config, sensors):
# Temperature list
all_temps = [float(i) for i in range(1, 351)]
# Build plot
fig, ax = matplotlib.pyplot.subplots()
pullup = config.getfloat('pullup_resistor', 0.)
ax.set_title("Temperature Sensor (pullup=%.0f)" % (pullup,))
ax.set_ylabel('Resistance (Ohms)')
for sensor in sensors:
sc = config.do_create_sensor(sensor)
adcs = [sc.calc_adc(t) for t in all_temps]
rs = [pullup * adc / (1.0 - adc) for adc in adcs]
ax.plot(all_temps, rs, label=sensor, alpha=0.6)
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
ax.legend(loc='best', prop=fontP)
ax.set_xlabel('Temperature (C)')
ax.grid(True)
fig.tight_layout()
return fig
######################################################################
# Startup
######################################################################
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
matplotlib.rcParams.update({'figure.autolayout': True})
matplotlib.use('Agg')
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
def import_sensors(config):
global extras
# Load adc_temperature.py and thermistor.py modules
kdir = os.path.join(os.path.dirname(__file__), '..', 'klippy')
sys.path.append(kdir)
import extras.adc_temperature, extras.thermistor
extras.thermistor.load_config(config)
extras.adc_temperature.load_config(config)
def main():
# Parse command-line arguments
usage = "%prog [options]"
opts = optparse.OptionParser(usage)
opts.add_option("-o", "--output", type="string", dest="output",
default=None, help="filename of output graph")
opts.add_option("-p", "--pullup", type="float", dest="pullup",
default=4700., help="pullup resistor")
opts.add_option("-v", "--voltage", type="float", dest="voltage",
default=5., help="pullup resistor")
opts.add_option("-s", "--sensors", type="string", dest="sensors",
default="", help="list of sensors (comma separated)")
opts.add_option("-r", "--resistance", action="store_true",
help="graph sensor resistance")
options, args = opts.parse_args()
if len(args) != 0:
opts.error("Incorrect number of arguments")
# Import sensors
config_settings = {'pullup_resistor': options.pullup,
'adc_voltage': options.voltage}
config = DummyConfig(config_settings)
import_sensors(config)
# Determine sensors to graph
if options.sensors:
sensors = [s.strip() for s in options.sensors.split(',')]
else:
sensors = sorted(config.sensor_factories.keys())
# Draw graph
setup_matplotlib(options.output is not None)
if options.resistance:
fig = plot_resistance(config, sensors)
else:
fig = plot_adc_resolution(config, sensors)
# Show graph
if options.output is None:
matplotlib.pyplot.show()
else:
fig.set_size_inches(8, 6)
fig.savefig(options.output)
if __name__ == '__main__':
main()

303
scripts/graphstats.py Normal file
View File

@@ -0,0 +1,303 @@
#!/usr/bin/env python
# Script to parse a logging file, extract the stats, and graph them
#
# Copyright (C) 2016-2021 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import optparse, datetime
import matplotlib
MAXBANDWIDTH=25000.
MAXBUFFER=2.
STATS_INTERVAL=5.
TASK_MAX=0.0025
APPLY_PREFIX = [
'mcu_awake', 'mcu_task_avg', 'mcu_task_stddev', 'bytes_write',
'bytes_read', 'bytes_retransmit', 'freq', 'adj',
'target', 'temp', 'pwm'
]
def parse_log(logname, mcu):
if mcu is None:
mcu = "mcu"
mcu_prefix = mcu + ":"
apply_prefix = { p: 1 for p in APPLY_PREFIX }
f = open(logname, 'r')
out = []
for line in f:
parts = line.split()
if not parts or parts[0] not in ('Stats', 'INFO:root:Stats'):
#if parts and parts[0] == 'INFO:root:shutdown:':
# break
continue
prefix = ""
keyparts = {}
for p in parts[2:]:
if '=' not in p:
prefix = p
if prefix == mcu_prefix:
prefix = ''
continue
name, val = p.split('=', 1)
if name in apply_prefix:
name = prefix + name
keyparts[name] = val
if 'print_time' not in keyparts:
continue
keyparts['#sampletime'] = float(parts[1][:-1])
out.append(keyparts)
f.close()
return out
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
matplotlib.use('Agg')
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
def find_print_restarts(data):
runoff_samples = {}
last_runoff_start = last_buffer_time = last_sampletime = 0.
last_print_stall = 0
for d in reversed(data):
# Check for buffer runoff
sampletime = d['#sampletime']
buffer_time = float(d.get('buffer_time', 0.))
if (last_runoff_start and last_sampletime - sampletime < 5
and buffer_time > last_buffer_time):
runoff_samples[last_runoff_start][1].append(sampletime)
elif buffer_time < 1.:
last_runoff_start = sampletime
runoff_samples[last_runoff_start] = [False, [sampletime]]
else:
last_runoff_start = 0.
last_buffer_time = buffer_time
last_sampletime = sampletime
# Check for print stall
print_stall = int(d['print_stall'])
if print_stall < last_print_stall:
if last_runoff_start:
runoff_samples[last_runoff_start][0] = True
last_print_stall = print_stall
sample_resets = {sampletime: 1 for stall, samples in runoff_samples.values()
for sampletime in samples if not stall}
return sample_resets
def plot_mcu(data, maxbw):
# Generate data for plot
basetime = lasttime = data[0]['#sampletime']
lastbw = float(data[0]['bytes_write']) + float(data[0]['bytes_retransmit'])
sample_resets = find_print_restarts(data)
times = []
bwdeltas = []
loads = []
awake = []
hostbuffers = []
for d in data:
st = d['#sampletime']
timedelta = st - lasttime
if timedelta <= 0.:
continue
bw = float(d['bytes_write']) + float(d['bytes_retransmit'])
if bw < lastbw:
lastbw = bw
continue
load = float(d['mcu_task_avg']) + 3*float(d['mcu_task_stddev'])
if st - basetime < 15.:
load = 0.
pt = float(d['print_time'])
hb = float(d['buffer_time'])
if hb >= MAXBUFFER or st in sample_resets:
hb = 0.
else:
hb = 100. * (MAXBUFFER - hb) / MAXBUFFER
hostbuffers.append(hb)
times.append(datetime.datetime.utcfromtimestamp(st))
bwdeltas.append(100. * (bw - lastbw) / (maxbw * timedelta))
loads.append(100. * load / TASK_MAX)
awake.append(100. * float(d.get('mcu_awake', 0.)) / STATS_INTERVAL)
lasttime = st
lastbw = bw
# Build plot
fig, ax1 = matplotlib.pyplot.subplots()
ax1.set_title("MCU bandwidth and load utilization")
ax1.set_xlabel('Time')
ax1.set_ylabel('Usage (%)')
ax1.plot_date(times, bwdeltas, 'g', label='Bandwidth', alpha=0.8)
ax1.plot_date(times, loads, 'r', label='MCU load', alpha=0.8)
ax1.plot_date(times, hostbuffers, 'c', label='Host buffer', alpha=0.8)
ax1.plot_date(times, awake, 'y', label='Awake time', alpha=0.6)
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
ax1.legend(loc='best', prop=fontP)
ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
ax1.grid(True)
return fig
def plot_system(data):
# Generate data for plot
lasttime = data[0]['#sampletime']
lastcputime = float(data[0]['cputime'])
times = []
sysloads = []
cputimes = []
memavails = []
for d in data:
st = d['#sampletime']
timedelta = st - lasttime
if timedelta <= 0.:
continue
lasttime = st
times.append(datetime.datetime.utcfromtimestamp(st))
cputime = float(d['cputime'])
cpudelta = max(0., min(1.5, (cputime - lastcputime) / timedelta))
lastcputime = cputime
cputimes.append(cpudelta * 100.)
sysloads.append(float(d['sysload']) * 100.)
memavails.append(float(d['memavail']))
# Build plot
fig, ax1 = matplotlib.pyplot.subplots()
ax1.set_title("System load utilization")
ax1.set_xlabel('Time')
ax1.set_ylabel('Load (% of a core)')
ax1.plot_date(times, sysloads, '-', label='system load',
color='cyan', alpha=0.8)
ax1.plot_date(times, cputimes, '-', label='process time',
color='red', alpha=0.8)
ax2 = ax1.twinx()
ax2.set_ylabel('Available memory (KB)')
ax2.plot_date(times, memavails, '-', label='system memory',
color='yellow', alpha=0.3)
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
ax1li, ax1la = ax1.get_legend_handles_labels()
ax2li, ax2la = ax2.get_legend_handles_labels()
ax1.legend(ax1li + ax2li, ax1la + ax2la, loc='best', prop=fontP)
ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
ax1.grid(True)
return fig
def plot_frequency(data, mcu):
all_keys = {}
for d in data:
all_keys.update(d)
one_mcu = mcu is not None
graph_keys = { key: ([], []) for key in all_keys
if (key in ("freq", "adj") or (not one_mcu and (
key.endswith(":freq") or key.endswith(":adj")))) }
for d in data:
st = datetime.datetime.utcfromtimestamp(d['#sampletime'])
for key, (times, values) in graph_keys.items():
val = d.get(key)
if val not in (None, '0', '1'):
times.append(st)
values.append(float(val))
# Build plot
fig, ax1 = matplotlib.pyplot.subplots()
if one_mcu:
ax1.set_title("MCU '%s' frequency" % (mcu,))
else:
ax1.set_title("MCU frequency")
ax1.set_xlabel('Time')
ax1.set_ylabel('Frequency')
for key in sorted(graph_keys):
times, values = graph_keys[key]
ax1.plot_date(times, values, '.', label=key)
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
ax1.legend(loc='best', prop=fontP)
ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
ax1.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%d'))
ax1.grid(True)
return fig
def plot_temperature(data, heaters):
fig, ax1 = matplotlib.pyplot.subplots()
ax2 = ax1.twinx()
for heater in heaters.split(','):
heater = heater.strip()
temp_key = heater + ':' + 'temp'
target_key = heater + ':' + 'target'
pwm_key = heater + ':' + 'pwm'
times = []
temps = []
targets = []
pwm = []
for d in data:
temp = d.get(temp_key)
if temp is None:
continue
times.append(datetime.datetime.utcfromtimestamp(d['#sampletime']))
temps.append(float(temp))
pwm.append(float(d.get(pwm_key, 0.)))
targets.append(float(d.get(target_key, 0.)))
ax1.plot_date(times, temps, '-', label='%s temp' % (heater,), alpha=0.8)
if any(targets):
label = '%s target' % (heater,)
ax1.plot_date(times, targets, '-', label=label, alpha=0.3)
if any(pwm):
label = '%s pwm' % (heater,)
ax2.plot_date(times, pwm, '-', label=label, alpha=0.2)
# Build plot
ax1.set_title("Temperature of %s" % (heaters,))
ax1.set_xlabel('Time')
ax1.set_ylabel('Temperature')
ax2.set_ylabel('pwm')
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
ax1li, ax1la = ax1.get_legend_handles_labels()
ax2li, ax2la = ax2.get_legend_handles_labels()
ax1.legend(ax1li + ax2li, ax1la + ax2la, loc='best', prop=fontP)
ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
ax1.grid(True)
return fig
def main():
# Parse command-line arguments
usage = "%prog [options] <logfile>"
opts = optparse.OptionParser(usage)
opts.add_option("-f", "--frequency", action="store_true",
help="graph mcu frequency")
opts.add_option("-s", "--system", action="store_true",
help="graph system load")
opts.add_option("-o", "--output", type="string", dest="output",
default=None, help="filename of output graph")
opts.add_option("-t", "--temperature", type="string", dest="heater",
default=None, help="graph heater temperature")
opts.add_option("-m", "--mcu", type="string", dest="mcu", default=None,
help="limit stats to the given mcu")
options, args = opts.parse_args()
if len(args) != 1:
opts.error("Incorrect number of arguments")
logname = args[0]
# Parse data
data = parse_log(logname, options.mcu)
if not data:
return
# Draw graph
setup_matplotlib(options.output is not None)
if options.heater is not None:
fig = plot_temperature(data, options.heater)
elif options.frequency:
fig = plot_frequency(data, options.mcu)
elif options.system:
fig = plot_system(data)
else:
fig = plot_mcu(data, MAXBANDWIDTH)
# Show graph
if options.output is None:
matplotlib.pyplot.show()
else:
fig.set_size_inches(8, 6)
fig.savefig(options.output)
if __name__ == '__main__':
main()

102
scripts/install-arch.sh Normal file
View File

@@ -0,0 +1,102 @@
#!/bin/bash
# This script installs Klipper on an Arch Linux system
PYTHONDIR="${HOME}/klippy-env"
SYSTEMDDIR="/etc/systemd/system"
AURCLIENT="pamac"
KLIPPER_USER=$USER
KLIPPER_GROUP=$KLIPPER_USER
# Step 1: Install system packages
install_packages()
{
# Packages for python cffi
PKGLIST="python2-virtualenv libffi base-devel"
# kconfig requirements
PKGLIST="${PKGLIST} ncurses"
# hub-ctrl
PKGLIST="${PKGLIST} libusb"
# AVR chip installation and building
PKGLIST="${PKGLIST} avrdude avr-gcc avr-binutils avr-libc"
# ARM chip installation and building
AURLIST="stm32flash"
PKGLIST="${PKGLIST} arm-none-eabi-newlib"
PKGLIST="${PKGLIST} arm-none-eabi-gcc arm-none-eabi-binutils"
# Install desired packages
report_status "Installing packages..."
sudo pacman -S ${PKGLIST}
$AURCLIENT build ${AURLIST}
}
# Step 2: Create python virtual environment
create_virtualenv()
{
report_status "Updating python virtual environment..."
# Create virtualenv if it doesn't already exist
[ ! -d ${PYTHONDIR} ] && virtualenv2 ${PYTHONDIR}
# Install/update dependencies
${PYTHONDIR}/bin/pip install -r ${SRCDIR}/scripts/klippy-requirements.txt
}
# Step 3: Install startup script
install_script()
{
# Create systemd service file
KLIPPER_LOG=/tmp/klippy.log
report_status "Installing system start script..."
sudo /bin/sh -c "cat > $SYSTEMDDIR/klipper.service" << EOF
#Systemd service file for klipper
[Unit]
Description=Starts klipper on startup
After=network.target
[Install]
WantedBy=multi-user.target
[Service]
Type=simple
User=$KLIPPER_USER
RemainAfterExit=yes
ExecStart=${PYTHONDIR}/bin/python ${SRCDIR}/klippy/klippy.py ${HOME}/printer.cfg -l ${KLIPPER_LOG}
EOF
# Use systemctl to enable the klipper systemd service script
sudo systemctl enable klipper.service
report_status "Make sure to add $KLIPPER_USER to the user group controlling your serial printer port"
}
# Step 4: Start host software
start_software()
{
report_status "Launching Klipper host software..."
sudo systemctl start klipper
}
# Helper functions
report_status()
{
echo -e "\n\n###### $1"
}
verify_ready()
{
if [ "$EUID" -eq 0 ]; then
echo "This script must not run as root"
exit -1
fi
}
# Force script to exit if an error occurs
set -e
# Find SRCDIR from the pathname of this script
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
# Run installation steps defined above
verify_ready
install_packages
create_virtualenv
install_script
start_software

View File

@@ -0,0 +1,73 @@
#!/bin/bash
# This script installs Klipper on a Beaglebone running Debian Jessie
# for use with its PRU micro-controller.
# Step 1: Do main install
install_main()
{
# Run the debian script - should
# work.
${SRCDIR}/scripts/install-debian.sh
}
# Step 2: Install additional system packages
install_packages()
{
# Install desired packages
PKGLIST="gcc-pru"
report_status "Installing beaglebone packages..."
sudo apt-get install --yes ${PKGLIST}
}
# Step 3: Install startup script
install_script()
{
report_status "Installing pru start script..."
sudo cp "${SRCDIR}/scripts/klipper-pru-start.sh" /etc/init.d/klipper_pru
sudo update-rc.d klipper_pru defaults
}
# Step 4: Install pru udev rule
install_udev()
{
report_status "Installing pru udev rule..."
sudo /bin/sh -c "cat > /etc/udev/rules.d/pru.rules" <<EOF
KERNEL=="rpmsg_pru30", GROUP="tty", MODE="0660"
EOF
}
# Step 5: Add user to tty group
install_groups()
{
report_status "Adding $USER to tty group..."
sudo adduser $USER tty
}
# Helper functions
report_status()
{
echo -e "\n\n###### $1"
}
verify_ready()
{
if [ "$EUID" -eq 0 ]; then
echo "This script must not run as root"
exit -1
fi
}
# Force script to exit if an error occurs
set -e
# Find SRCDIR from the pathname of this script
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
# Run installation steps defined above
verify_ready
install_main
install_packages
install_script
install_udev
install_groups

101
scripts/install-centos.sh Normal file
View File

@@ -0,0 +1,101 @@
#!/bin/bash
# This script installs Klipper on an x86_64 machine running the
# CentOS 7 distribution.
PYTHONDIR="${HOME}/klippy-env"
SYSTEMDDIR="/etc/systemd/system"
# Step 1: Install system packages
install_packages()
{
# Packages for python cffi
PKGLIST="python-virtualenv libffi-devel"
# kconfig requirements
PKGLIST="${PKGLIST} ncurses-devel"
# hub-ctrl
PKGLIST="${PKGLIST} libusb-devel"
# AVR chip installation and building
PKGLIST="${PKGLIST} avrdude gcc-avr32-linux-gnu binutils-avr32-linux-gnu avr-libc"
# ARM chip installation and building
# CentOS/Fedora do not appear to have these packages available at this time
PKGLIST="${PKGLIST} arm-none-eabi-gcc-cs arm-none-eabi-newlib"
# Install desired packages
report_status "Installing packages..."
sudo yum install -y ${PKGLIST}
}
# Step 2: Create python virtual environment
create_virtualenv()
{
report_status "Updating python virtual environment..."
# Create virtualenv if it doesn't already exist
[ ! -d ${PYTHONDIR} ] && virtualenv -p python2 ${PYTHONDIR}
# Install/update dependencies
${PYTHONDIR}/bin/pip install -r ${SRCDIR}/scripts/klippy-requirements.txt
}
# Step 3: Install startup script
install_script()
{
# Create systemd service file
report_status "Installing system start script..."
sudo /bin/sh -c "cat > $SYSTEMDDIR/klipper.service" << EOF
#Systemd service file for klipper
[Unit]
Description=Starts klipper on startup
After=network.target
[Install]
WantedBy=multi-user.target
[Service]
Type=simple
User=$USER
RemainAfterExit=yes
ExecStart=${PYTHONDIR}/bin/python ${SRCDIR}/klippy/klippy.py ${HOME}/printer.cfg -l /var/log/klippy.log
EOF
# Use systemctl to enable the klipper systemd service script
sudo systemctl enable klipper.service
}
# Configuration for systemctl klipper
KLIPPY_USER=$USER
# Step 5: Start host software
start_software()
{
report_status "Launching Klipper host software..."
sudo systemctl restart klipper
}
# Helper functions
report_status()
{
echo -e "\n\n###### $1"
}
verify_ready()
{
if [ "$EUID" -eq 0 ]; then
echo "This script must not run as root"
exit -1
fi
}
# Force script to exit if an error occurs
set -e
# Find SRCDIR from the pathname of this script
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
# Run installation steps defined above
verify_ready
install_packages
create_virtualenv
install_script
start_software

105
scripts/install-debian.sh Normal file
View File

@@ -0,0 +1,105 @@
#!/bin/bash
# This script installs Klipper on an debian
#
PYTHONDIR="${HOME}/klippy-env"
SYSTEMDDIR="/etc/systemd/system"
KLIPPER_USER=$USER
KLIPPER_GROUP=$KLIPPER_USER
# Step 1: Install system packages
install_packages()
{
# Packages for python cffi
PKGLIST="virtualenv python-dev libffi-dev build-essential"
# kconfig requirements
PKGLIST="${PKGLIST} libncurses-dev"
# hub-ctrl
PKGLIST="${PKGLIST} libusb-dev"
# AVR chip installation and building
PKGLIST="${PKGLIST} avrdude gcc-avr binutils-avr avr-libc"
# ARM chip installation and building
PKGLIST="${PKGLIST} stm32flash libnewlib-arm-none-eabi"
PKGLIST="${PKGLIST} gcc-arm-none-eabi binutils-arm-none-eabi libusb-1.0"
# Update system package info
report_status "Running apt-get update..."
sudo apt-get update
# Install desired packages
report_status "Installing packages..."
sudo apt-get install --yes ${PKGLIST}
}
# Step 2: Create python virtual environment
create_virtualenv()
{
report_status "Updating python virtual environment..."
# Create virtualenv if it doesn't already exist
[ ! -d ${PYTHONDIR} ] && virtualenv -p python2 ${PYTHONDIR}
# Install/update dependencies
${PYTHONDIR}/bin/pip install -r ${SRCDIR}/scripts/klippy-requirements.txt
}
# Step 3: Install startup script
install_script()
{
# Create systemd service file
KLIPPER_LOG=/tmp/klippy.log
report_status "Installing system start script..."
sudo /bin/sh -c "cat > $SYSTEMDDIR/klipper.service" << EOF
#Systemd service file for klipper
[Unit]
Description=Starts klipper on startup
After=network.target
[Install]
WantedBy=multi-user.target
[Service]
Type=simple
User=$KLIPPER_USER
RemainAfterExit=yes
ExecStart=${PYTHONDIR}/bin/python ${SRCDIR}/klippy/klippy.py ${HOME}/printer.cfg -l ${KLIPPER_LOG}
Restart=always
RestartSec=10
EOF
# Use systemctl to enable the klipper systemd service script
sudo systemctl enable klipper.service
}
# Step 4: Start host software
start_software()
{
report_status "Launching Klipper host software..."
sudo systemctl start klipper
}
# Helper functions
report_status()
{
echo -e "\n\n###### $1"
}
verify_ready()
{
if [ "$EUID" -eq 0 ]; then
echo "This script must not run as root"
exit -1
fi
}
# Force script to exit if an error occurs
set -e
# Find SRCDIR from the pathname of this script
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
# Run installation steps defined above
verify_ready
install_packages
create_virtualenv
install_script
start_software

103
scripts/install-octopi.sh Normal file
View File

@@ -0,0 +1,103 @@
#!/bin/bash
# This script installs Klipper on a Raspberry Pi machine running the
# OctoPi distribution.
PYTHONDIR="${HOME}/klippy-env"
# Step 1: Install system packages
install_packages()
{
# Packages for python cffi
PKGLIST="virtualenv python-dev libffi-dev build-essential"
# kconfig requirements
PKGLIST="${PKGLIST} libncurses-dev"
# hub-ctrl
PKGLIST="${PKGLIST} libusb-dev"
# AVR chip installation and building
PKGLIST="${PKGLIST} avrdude gcc-avr binutils-avr avr-libc"
# ARM chip installation and building
PKGLIST="${PKGLIST} stm32flash dfu-util libnewlib-arm-none-eabi"
PKGLIST="${PKGLIST} gcc-arm-none-eabi binutils-arm-none-eabi libusb-1.0"
# Update system package info
report_status "Running apt-get update..."
sudo apt-get update
# Install desired packages
report_status "Installing packages..."
sudo apt-get install --yes ${PKGLIST}
}
# Step 2: Create python virtual environment
create_virtualenv()
{
report_status "Updating python virtual environment..."
# Create virtualenv if it doesn't already exist
[ ! -d ${PYTHONDIR} ] && virtualenv -p python2 ${PYTHONDIR}
# Install/update dependencies
${PYTHONDIR}/bin/pip install -r ${SRCDIR}/scripts/klippy-requirements.txt
}
# Step 3: Install startup script
install_script()
{
report_status "Installing system start script..."
sudo cp "${SRCDIR}/scripts/klipper-start.sh" /etc/init.d/klipper
sudo update-rc.d klipper defaults
}
# Step 4: Install startup script config
install_config()
{
DEFAULTS_FILE=/etc/default/klipper
[ -f $DEFAULTS_FILE ] && return
report_status "Installing system start configuration..."
sudo /bin/sh -c "cat > $DEFAULTS_FILE" <<EOF
# Configuration for /etc/init.d/klipper
KLIPPY_USER=$USER
KLIPPY_EXEC=${PYTHONDIR}/bin/python
KLIPPY_ARGS="${SRCDIR}/klippy/klippy.py ${HOME}/printer.cfg -l /tmp/klippy.log"
EOF
}
# Step 5: Start host software
start_software()
{
report_status "Launching Klipper host software..."
sudo /etc/init.d/klipper restart
}
# Helper functions
report_status()
{
echo -e "\n\n###### $1"
}
verify_ready()
{
if [ "$EUID" -eq 0 ]; then
echo "This script must not run as root"
exit -1
fi
}
# Force script to exit if an error occurs
set -e
# Find SRCDIR from the pathname of this script
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
# Run installation steps defined above
verify_ready
install_packages
create_virtualenv
install_script
install_config
start_software

View File

@@ -0,0 +1,102 @@
#!/bin/bash
# This script installs Klipper on an Ubuntu 18.04 machine with Octoprint
PYTHONDIR="${HOME}/klippy-env"
SYSTEMDDIR="/etc/systemd/system"
KLIPPER_USER=$USER
KLIPPER_GROUP=$KLIPPER_USER
# Step 1: Install system packages
install_packages()
{
# Packages for python cffi
PKGLIST="virtualenv python-dev libffi-dev build-essential"
# kconfig requirements
PKGLIST="${PKGLIST} libncurses-dev"
# hub-ctrl
PKGLIST="${PKGLIST} libusb-dev"
# AVR chip installation and building
PKGLIST="${PKGLIST} avrdude gcc-avr binutils-avr avr-libc"
# ARM chip installation and building
PKGLIST="${PKGLIST} stm32flash libnewlib-arm-none-eabi"
PKGLIST="${PKGLIST} gcc-arm-none-eabi binutils-arm-none-eabi libusb-1.0"
# Update system package info
report_status "Running apt-get update..."
sudo apt-get update
# Install desired packages
report_status "Installing packages..."
sudo apt-get install --yes ${PKGLIST}
}
# Step 2: Create python virtual environment
create_virtualenv()
{
report_status "Updating python virtual environment..."
# Create virtualenv if it doesn't already exist
[ ! -d ${PYTHONDIR} ] && virtualenv -p python2 ${PYTHONDIR}
# Install/update dependencies
${PYTHONDIR}/bin/pip install -r ${SRCDIR}/scripts/klippy-requirements.txt
}
# Step 3: Install startup script
install_script()
{
# Create systemd service file
KLIPPER_LOG=/tmp/klippy.log
report_status "Installing system start script..."
sudo /bin/sh -c "cat > $SYSTEMDDIR/klipper.service" << EOF
#Systemd service file for klipper
[Unit]
Description=Starts klipper on startup
After=network.target
[Install]
WantedBy=multi-user.target
[Service]
Type=simple
User=$KLIPPER_USER
RemainAfterExit=yes
ExecStart=${PYTHONDIR}/bin/python ${SRCDIR}/klippy/klippy.py ${HOME}/printer.cfg -l ${KLIPPER_LOG}
EOF
# Use systemctl to enable the klipper systemd service script
sudo systemctl enable klipper.service
}
# Step 4: Start host software
start_software()
{
report_status "Launching Klipper host software..."
sudo systemctl start klipper
}
# Helper functions
report_status()
{
echo -e "\n\n###### $1"
}
verify_ready()
{
if [ "$EUID" -eq 0 ]; then
echo "This script must not run as root"
exit -1
fi
}
# Force script to exit if an error occurs
set -e
# Find SRCDIR from the pathname of this script
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
# Run installation steps defined above
verify_ready
install_packages
create_virtualenv
install_script
start_software

View File

@@ -0,0 +1,78 @@
#!/bin/sh
# System startup script to start the MCU Linux firmware
### BEGIN INIT INFO
# Provides: klipper_mcu
# Required-Start: $local_fs
# Required-Stop:
# Default-Start: 3 4 5
# Default-Stop: 0 1 2 6
# Short-Description: Klipper_MCU daemon
# Description: Starts the MCU for Klipper.
### END INIT INFO
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
DESC="klipper_mcu startup"
NAME="klipper_mcu"
KLIPPER_HOST_MCU=/usr/local/bin/klipper_mcu
KLIPPER_HOST_ARGS="-r"
PIDFILE=/var/run/klipper_mcu.pid
. /lib/lsb/init-functions
mcu_host_stop()
{
# Shutdown existing Klipper instance (if applicable). The goal is to
# put the GPIO pins in a safe state.
if [ -c /tmp/klipper_host_mcu ]; then
log_daemon_msg "Attempting to shutdown host mcu..."
set -e
( echo "FORCE_SHUTDOWN" > /tmp/klipper_host_mcu ) 2> /dev/null || ( log_action_msg "Firmware busy! Please shutdown Klipper and then retry." && exit 1 )
sleep 1
( echo "FORCE_SHUTDOWN" > /tmp/klipper_host_mcu ) 2> /dev/null || ( log_action_msg "Firmware busy! Please shutdown Klipper and then retry." && exit 1 )
sleep 1
set +e
fi
log_daemon_msg "Stopping klipper host mcu" $NAME
killproc -p $PIDFILE $KLIPPER_HOST_MCU
}
mcu_host_start()
{
[ -x $KLIPPER_HOST_MCU ] || return
if [ -c /tmp/klipper_host_mcu ]; then
mcu_host_stop
fi
log_daemon_msg "Starting klipper MCU" $NAME
start-stop-daemon --start --quiet --exec $KLIPPER_HOST_MCU \
--background --pidfile $PIDFILE --make-pidfile \
-- $KLIPPER_HOST_ARGS
log_end_msg $?
}
case "$1" in
start)
mcu_host_start
;;
stop)
mcu_host_stop
;;
restart)
$0 stop
$0 start
;;
reload|force-reload)
log_daemon_msg "Reloading configuration not supported" $NAME
log_end_msg 1
;;
status)
status_of_proc -p $PIDFILE $KLIPPER_HOST_MCU $NAME && exit 0 || exit $?
;;
*) log_action_msg "Usage: /etc/init.d/klipper_mcu {start|stop|status|restart|reload|force-reload}"
exit 2
;;
esac
exit 0

View File

@@ -0,0 +1,119 @@
#!/bin/sh
# System startup script to start the PRU firmware
### BEGIN INIT INFO
# Provides: klipper_pru
# Required-Start: $local_fs
# Required-Stop:
# Default-Start: 3 4 5
# Default-Stop: 0 1 2 6
# Short-Description: Klipper_PRU daemon
# Description: Starts the PRU for Klipper.
### END INIT INFO
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
DESC="klipper_pru startup"
NAME="klipper_pru"
KLIPPER_HOST_MCU=/usr/local/bin/klipper_mcu
KLIPPER_HOST_ARGS="-w -r"
PIDFILE=/var/run/klipper_mcu.pid
RPROC0=/sys/class/remoteproc/remoteproc1
RPROC1=/sys/class/remoteproc/remoteproc2
. /lib/lsb/init-functions
pru_stop()
{
# Shutdown existing Klipper instance (if applicable). The goal is to
# put the GPIO pins in a safe state.
if [ -c /dev/rpmsg_pru30 ]; then
log_daemon_msg "Attempting to shutdown PRU..."
set -e
( echo "FORCE_SHUTDOWN" > /dev/rpmsg_pru30 ) 2> /dev/null || ( log_action_msg "Firmware busy! Please shutdown Klipper and then retry." && exit 1 )
sleep 1
( echo "FORCE_SHUTDOWN" > /dev/rpmsg_pru30 ) 2> /dev/null || ( log_action_msg "Firmware busy! Please shutdown Klipper and then retry." && exit 1 )
sleep 1
set +e
fi
log_daemon_msg "Stopping pru"
echo 'stop' > $RPROC0/state
echo 'stop' > $RPROC1/state
}
pru_start()
{
if [ -c /dev/rpmsg_pru30 ]; then
pru_stop
else
echo 'stop' > $RPROC0/state
echo 'stop' > $RPROC1/state
fi
sleep 1
log_daemon_msg "Starting pru"
echo 'start' > $RPROC0/state
echo 'start' > $RPROC1/state
# log_daemon_msg "Loading ADC module"
# echo 'BB-ADC' > /sys/devices/platform/bone_capemgr/slots
}
mcu_host_stop()
{
# Shutdown existing Klipper instance (if applicable). The goal is to
# put the GPIO pins in a safe state.
if [ -c /tmp/klipper_host_mcu ]; then
log_daemon_msg "Attempting to shutdown host mcu..."
set -e
( echo "FORCE_SHUTDOWN" > /tmp/klipper_host_mcu ) 2> /dev/null || ( log_action_msg "Firmware busy! Please shutdown Klipper and then retry." && exit 1 )
sleep 1
( echo "FORCE_SHUTDOWN" > /tmp/klipper_host_mcu ) 2> /dev/null || ( log_action_msg "Firmware busy! Please shutdown Klipper and then retry." && exit 1 )
sleep 1
set +e
fi
log_daemon_msg "Stopping klipper host mcu" $NAME
killproc -p $PIDFILE $KLIPPER_HOST_MCU
}
mcu_host_start()
{
[ -x $KLIPPER_HOST_MCU ] || return
if [ -c /tmp/klipper_host_mcu ]; then
mcu_host_stop
fi
log_daemon_msg "Starting klipper MCU" $NAME
start-stop-daemon --start --quiet --exec $KLIPPER_HOST_MCU \
--background --pidfile $PIDFILE --make-pidfile \
-- $KLIPPER_HOST_ARGS
log_end_msg $?
}
case "$1" in
start)
pru_start
mcu_host_start
;;
stop)
pru_stop
mcu_host_stop
;;
restart)
$0 stop
$0 start
;;
reload|force-reload)
log_daemon_msg "Reloading configuration not supported" $NAME
log_end_msg 1
;;
status)
status_of_proc -p $PIDFILE $KLIPPER_HOST_MCU $NAME && exit 0 || exit $?
;;
*) log_action_msg "Usage: /etc/init.d/klipper {start|stop|status|restart|reload|force-reload}"
exit 2
;;
esac
exit 0

54
scripts/klipper-start.sh Normal file
View File

@@ -0,0 +1,54 @@
#!/bin/sh
# System startup script for Klipper 3d-printer host code
### BEGIN INIT INFO
# Provides: klipper
# Required-Start: $local_fs
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Klipper daemon
# Description: Starts the Klipper daemon.
### END INIT INFO
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
DESC="klipper daemon"
NAME="klipper"
DEFAULTS_FILE=/etc/default/klipper
PIDFILE=/var/run/klipper.pid
. /lib/lsb/init-functions
# Read defaults file
[ -r $DEFAULTS_FILE ] && . $DEFAULTS_FILE
case "$1" in
start) log_daemon_msg "Starting klipper" $NAME
start-stop-daemon --start --quiet --exec $KLIPPY_EXEC \
--background --pidfile $PIDFILE --make-pidfile \
--chuid $KLIPPY_USER --user $KLIPPY_USER \
-- $KLIPPY_ARGS
log_end_msg $?
;;
stop) log_daemon_msg "Stopping klipper" $NAME
killproc -p $PIDFILE $KLIPPY_EXEC
RETVAL=$?
[ $RETVAL -eq 0 ] && [ -e "$PIDFILE" ] && rm -f $PIDFILE
log_end_msg $RETVAL
;;
restart) log_daemon_msg "Restarting klipper" $NAME
$0 stop
$0 start
;;
reload|force-reload)
log_daemon_msg "Reloading configuration not supported" $NAME
log_end_msg 1
;;
status)
status_of_proc -p $PIDFILE $KLIPPY_EXEC $NAME && exit 0 || exit $?
;;
*) log_action_msg "Usage: /etc/init.d/klipper {start|stop|status|restart|reload|force-reload}"
exit 2
;;
esac
exit 0

View File

@@ -0,0 +1,23 @@
#!/bin/bash
# Uninstall script for raspbian/debian type installations
# Stop Klipper Service
echo "#### Stopping Klipper Service.."
sudo service klipper stop
# Remove Klipper from Startup
echo
echo "#### Removing Klipper from Startup.."
sudo update-rc.d -f klipper remove
# Remove Klipper from Services
echo
echo "#### Removing Klipper Service.."
sudo rm -f /etc/init.d/klipper /etc/default/klipper
# Notify user of method to remove Klipper source code
echo
echo "The Klipper system files have been removed."
echo
echo "The following command is typically used to remove local files:"
echo " rm -rf ~/klippy-env ~/klipper"

View File

@@ -0,0 +1,10 @@
# This file describes the Python virtualenv package requirements for
# the Klipper host software (Klippy). These package requirements are
# typically installed via the command:
# pip install -r klippy-requirements.txt
cffi==1.14.6
pyserial==3.4
greenlet==1.1.2
Jinja2==2.11.3
python-can==3.3.4
markupsafe==1.1.1

610
scripts/logextract.py Normal file
View File

@@ -0,0 +1,610 @@
#!/usr/bin/env python2
# Script to extract config and shutdown information file a klippy.log file
#
# Copyright (C) 2017 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, re, collections, ast
def format_comment(line_num, line):
return "# %6d: %s" % (line_num, line)
######################################################################
# Config file extraction
######################################################################
class GatherConfig:
def __init__(self, configs, line_num, recent_lines, logname):
self.configs = configs
self.line_num = line_num
self.config_num = len(configs) + 1
self.filename = "%s.config%04d.cfg" % (logname, self.config_num)
self.config_lines = []
self.comments = []
def add_line(self, line_num, line):
if line != '=======================':
self.config_lines.append(line)
return True
self.finalize()
return False
def finalize(self):
lines = tuple(self.config_lines)
ch = self.configs.get(lines)
if ch is None:
self.configs[lines] = ch = self
else:
ch.comments.extend(self.comments)
ch.comments.append(format_comment(self.line_num, "config file"))
def add_comment(self, comment):
if comment is not None:
self.comments.append(comment)
def write_file(self):
f = open(self.filename, 'wb')
f.write('\n'.join(self.comments + self.config_lines).strip() + '\n')
f.close()
######################################################################
# TMC UART message parsing
######################################################################
uart_r = re.compile(r"tmcuart_(?:send|response) oid=[0-9]+ (?:read|write)=")
class TMCUartHelper:
def _calc_crc8(self, data):
# Generate a CRC8-ATM value for a bytearray
crc = 0
for b in data:
for i in range(8):
if (crc >> 7) ^ (b & 0x01):
crc = (crc << 1) ^ 0x07
else:
crc = (crc << 1)
crc &= 0xff
b >>= 1
return crc
def _add_serial_bits(self, data):
# Add serial start and stop bits to a message in a bytearray
out = 0
pos = 0
for d in data:
b = (d << 1) | 0x200
out |= (b << pos)
pos += 10
res = bytearray()
for i in range((pos+7)//8):
res.append((out >> (i*8)) & 0xff)
return res
def _encode_read(self, sync, addr, reg):
# Generate a uart read register message
msg = bytearray([sync, addr, reg])
msg.append(self._calc_crc8(msg))
return self._add_serial_bits(msg)
def _encode_write(self, sync, addr, reg, val):
# Generate a uart write register message
msg = bytearray([sync, addr, reg, (val >> 24) & 0xff,
(val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff])
msg.append(self._calc_crc8(msg))
return self._add_serial_bits(msg)
def _decode_read(self, data):
# Extract a uart read request message
if len(data) != 5:
return
# Convert data into a long integer for easy manipulation
mval = pos = 0
for d in bytearray(data):
mval |= d << pos
pos += 8
# Extract register value
addr = (mval >> 11) & 0xff
reg = (mval >> 21) & 0xff
# Verify start/stop bits and crc
encoded_data = self._encode_read(0xf5, addr, reg)
if data != encoded_data:
return "Invalid: %s" % (self.pretty_print(addr, reg),)
return self.pretty_print(addr, reg)
def _decode_reg(self, data):
# Extract a uart read response message
if len(data) != 10:
return
# Convert data into a long integer for easy manipulation
mval = pos = 0
for d in bytearray(data):
mval |= d << pos
pos += 8
# Extract register value
addr = (mval >> 11) & 0xff
reg = (mval >> 21) & 0xff
val = ((((mval >> 31) & 0xff) << 24) | (((mval >> 41) & 0xff) << 16)
| (((mval >> 51) & 0xff) << 8) | ((mval >> 61) & 0xff))
sync = 0xf5
if addr == 0xff:
sync = 0x05
# Verify start/stop bits and crc
encoded_data = self._encode_write(sync, addr, reg, val)
if data != encoded_data:
#print("Got %s vs %s" % (repr(data), repr(encoded_data)))
return "Invalid:%s" % (self.pretty_print(addr, reg, val),)
return self.pretty_print(addr, reg, val)
def pretty_print(self, addr, reg, val=None):
if val is None:
return "(%x@%x)" % (reg, addr)
if reg & 0x80:
return "(%x@%x=%08x)" % (reg & ~0x80, addr, val)
return "(%x@%x==%08x)" % (reg, addr, val)
def parse_msg(self, msg):
data = bytearray(msg)
if len(data) == 10:
return self._decode_reg(data)
elif len(data) == 5:
return self._decode_read(data)
elif len(data) == 0:
return ""
return "(length?)"
######################################################################
# Shutdown extraction
######################################################################
def add_high_bits(val, ref, mask):
half = (mask + 1) // 2
return ref + ((val - (ref & mask) + half) & mask) - half
count_s = r"(?P<count>[0-9]+)"
time_s = r"(?P<time>[0-9]+[.][0-9]+)"
esttime_s = r"(?P<esttime>[0-9]+[.][0-9]+)"
shortseq_s = r"(?P<shortseq>[0-9a-f])"
sent_r = re.compile(r"^Sent " + count_s + " " + esttime_s + " " + time_s
+ " [0-9]+: seq: 1" + shortseq_s + ",")
# MCU "Sent" shutdown message parsing
class MCUSentStream:
def __init__(self, mcu, count):
self.mcu = mcu
self.sent_stream = []
self.send_count = count
def parse_line(self, line_num, line):
m = sent_r.match(line)
if m is not None:
shortseq = int(m.group('shortseq'), 16)
seq = (self.mcu.shutdown_seq + int(m.group('count'))
- self.send_count)
seq = add_high_bits(shortseq, seq, 0xf)
ts = float(m.group('time'))
esttime = float(m.group('esttime'))
self.mcu.sent_time_to_seq[(esttime, seq & 0xf)] = seq
self.mcu.sent_seq_to_time[seq] = ts
line = self.mcu.annotate(line, seq, ts)
self.sent_stream.append((ts, line_num, line))
return True, None
return self.mcu.parse_line(line_num, line)
def get_lines(self):
return self.sent_stream
receive_r = re.compile(r"^Receive: " + count_s + " " + time_s + " " + esttime_s
+ " [0-9]+: seq: 1" + shortseq_s + ",")
# MCU "Receive" shutdown message parsing
class MCUReceiveStream:
def __init__(self, mcu):
self.mcu = mcu
self.receive_stream = []
def parse_line(self, line_num, line):
m = receive_r.match(line)
if m is not None:
shortseq = int(m.group('shortseq'), 16)
ts = float(m.group('time'))
esttime = float(m.group('esttime'))
seq = self.mcu.sent_time_to_seq.get((esttime, (shortseq - 1) & 0xf))
if seq is not None:
self.mcu.receive_seq_to_time[seq + 1] = ts
line = self.mcu.annotate(line, seq, ts)
self.receive_stream.append((ts, line_num, line))
return True, None
return self.mcu.parse_line(line_num, line)
def get_lines(self):
return self.receive_stream
stats_seq_s = r" send_seq=(?P<sseq>[0-9]+) receive_seq=(?P<rseq>[0-9]+) "
serial_dump_r = re.compile(r"^Dumping serial stats: .*" + stats_seq_s)
send_dump_r = re.compile(r"^Dumping send queue " + count_s + " messages$")
receive_dump_r = re.compile(r"^Dumping receive queue " + count_s + " messages$")
clock_r = re.compile(r"^clocksync state: mcu_freq=(?P<freq>[0-9]+) .*"
+ r" clock_est=\((?P<st>[^ ]+)"
+ r" (?P<sc>[0-9]+) (?P<f>[^ ]+)\)")
repl_seq_r = re.compile(r": seq: 1" + shortseq_s)
clock_s = r"(?P<clock>[0-9]+)"
repl_clock_r = re.compile(r"clock=" + clock_s + r"(?: |$)")
repl_uart_r = re.compile(r"tmcuart_(?:response|send) oid=[0-9]+"
+ r" (?:read|write)=(?P<msg>(?:'[^']*'"
+ r'|"[^"]*"))(?: |$)')
# MCU shutdown message parsing
class MCUStream:
def __init__(self, name):
self.name = name
self.sent_time_to_seq = {}
self.sent_seq_to_time = {}
self.receive_seq_to_time = {}
self.mcu_freq = 1
self.clock_est = (0., 0., 1.)
self.shutdown_seq = None
def trans_clock(self, clock, ts):
sample_time, sample_clock, freq = self.clock_est
exp_clock = int(sample_clock + (ts - sample_time) * freq)
ext_clock = add_high_bits(clock, exp_clock, 0xffffffff)
return sample_time + (ext_clock - sample_clock) / freq
def annotate(self, line, seq, ts):
if seq is not None:
line = repl_seq_r.sub(r"\g<0>(%d)" % (seq,), line)
def clock_update(m):
return m.group(0).rstrip() + "(%.6f) " % (
self.trans_clock(int(m.group('clock')), ts),)
line = repl_clock_r.sub(clock_update, line)
def uart_update(m):
msg = TMCUartHelper().parse_msg(ast.literal_eval(m.group('msg')))
return m.group(0).rstrip() + "%s " % (msg,)
line = repl_uart_r.sub(uart_update, line)
if self.name != 'mcu':
line = "mcu '%s': %s" % (self.name, line)
return line
def parse_line(self, line_num, line):
m = clock_r.match(line)
if m is not None:
self.mcu_freq = int(m.group('freq'))
st = float(m.group('st'))
sc = int(m.group('sc'))
f = float(m.group('f'))
self.clock_est = (st, sc, f)
m = serial_dump_r.match(line)
if m is not None:
self.shutdown_seq = int(m.group('rseq'))
m = send_dump_r.match(line)
if m is not None:
return True, MCUSentStream(self, int(m.group('count')))
m = receive_dump_r.match(line)
if m is not None:
return True, MCUReceiveStream(self)
return False, None
def get_lines(self):
return []
stepper_move_r = re.compile(r"^queue_step " + count_s + r": t=" + clock_s
+ r" ")
# Kinematic "trapq" shutdown message parsing
class StepperStream:
def __init__(self, name, mcu_name, mcus):
self.name = name
self.stepper_stream = []
self.clock_est = (0., 0., 1.)
mcu = mcus.get(mcu_name)
if mcu is not None:
self.clock_est = mcu.clock_est
def parse_line(self, line_num, line):
m = stepper_move_r.match(line)
if m is not None:
# Convert clock to systime
clock = int(m.group('clock'))
sample_time, sample_clock, freq = self.clock_est
ts = sample_time + (clock - sample_clock) / freq
# Add systime to log
parts = line.split(' ', 4)
parts[0] = "%s queue_step" % (self.name,)
parts[2] += '(%.6f)' % (ts,)
self.stepper_stream.append((ts, line_num, ' '.join(parts)))
return True, None
return False, None
def get_lines(self):
return self.stepper_stream
trapq_move_r = re.compile(r"^move " + count_s + r": pt=" + time_s)
# Kinematic "trapq" shutdown message parsing
class TrapQStream:
def __init__(self, name, mcus):
self.name = name
self.trapq_stream = []
self.mcu_freq = 1
self.clock_est = (0., 0., 1.)
mcu = mcus.get("mcu")
if mcu is not None:
self.mcu_freq = mcu.mcu_freq
self.clock_est = mcu.clock_est
def parse_line(self, line_num, line):
m = trapq_move_r.match(line)
if m is not None:
# Convert print_time to systime
pt = float(m.group('time'))
clock = pt * self.mcu_freq
sample_time, sample_clock, freq = self.clock_est
ts = sample_time + (clock - sample_clock) / freq
# Add systime to log
parts = line.split(' ', 4)
parts[0] = "%s move" % (self.name,)
parts[2] += '(%.6f)' % (ts,)
self.trapq_stream.append((ts, line_num, ' '.join(parts)))
return True, None
return False, None
def get_lines(self):
return self.trapq_stream
gcode_cmd_r = re.compile(r"^Read " + time_s + r": (?P<gcode>['\"].*)$")
varlist_split_r = re.compile(r"([^ ]+)=")
# G-Code shutdown message parsing
class GCodeStream:
def __init__(self, shutdown_line_num, logname):
self.gcode_stream = []
self.gcode_commands = []
self.gcode_state = ''
self.gcode_filename = "%s.gcode%05d" % (logname, shutdown_line_num)
def extract_params(self, line):
parts = varlist_split_r.split(line)
try:
return { parts[i]: ast.literal_eval(parts[i+1].strip())
for i in range(1, len(parts), 2) }
except:
return {}
def handle_gcode_state(self, line):
kv = self.extract_params(line)
out = ['; Start g-code state restore', 'G28']
if not kv.get('absolute_coord', kv.get('absolutecoord')):
out.append('G91')
if not kv.get('absolute_extrude', kv.get('absoluteextrude')):
out.append('M83')
lp = kv['last_position']
out.append('G1 X%f Y%f Z%f F%f' % (
lp[0], lp[1], lp[2], kv['speed'] * 60.))
bp = kv['base_position']
if bp[:3] != [0., 0., 0.]:
out.append('; Must manually set base position...')
out.append('G92 E%f' % (lp[3] - bp[3],))
hp = kv['homing_position']
if hp != [0., 0., 0., 0.]:
out.append('; Must manually set homing position...')
if abs(kv['speed_factor'] - 1. / 60.) > .000001:
out.append('M220 S%f' % (kv['speed_factor'] * 60. * 100.,))
if kv['extrude_factor'] != 1.:
out.append('M221 S%f' % (kv['extrude_factor'] * 100.,))
out.extend(['; End of state restore', '', ''])
self.gcode_state = '\n'.join(out)
def parse_line(self, line_num, line):
m = gcode_cmd_r.match(line)
if m is not None:
ts = float(m.group('time'))
self.gcode_stream.append((ts, line_num, line))
self.gcode_commands.append(m.group('gcode'))
return True, None
return False, None
def get_lines(self):
# Produce output gcode stream
if self.gcode_stream:
data = [ast.literal_eval(gc) for gc in self.gcode_commands]
f = open(self.gcode_filename, 'wb')
f.write(self.gcode_state + ''.join(data))
f.close()
return self.gcode_stream
api_cmd_r = re.compile(r"^Received " + time_s + r": \{.*\}$")
# API server shutdowm message parsing
class APIStream:
def __init__(self):
self.api_stream = []
def parse_line(self, line_num, line):
m = api_cmd_r.match(line)
if m is not None:
ts = float(m.group('time'))
self.api_stream.append((ts, line_num, line))
return True, None
return False, None
def get_lines(self):
return self.api_stream
stats_r = re.compile(r"^Stats " + time_s + ": ")
mcu_r = re.compile(r"MCU '(?P<mcu>[^']+)' (is_)?shutdown: (?P<reason>.*)$")
stepper_r = re.compile(r"^Dumping stepper '(?P<name>[^']*)' \((?P<mcu>[^)]+)\) "
+ count_s + r" queue_step:$")
trapq_r = re.compile(r"^Dumping trapq '(?P<name>[^']*)' " + count_s
+ r" moves:$")
gcode_r = re.compile(r"Dumping gcode input " + count_s + r" blocks$")
gcode_state_r = re.compile(r"^gcode state: ")
api_r = re.compile(r"Dumping " + count_s + r" requests for client "
+ r"(?P<client>[0-9]+)" + r"$")
# Stats message parsing and high-level message dispatch
class StatsStream:
def __init__(self, shutdown_line_num, logname):
self.shutdown_line_num = shutdown_line_num
self.gcode_stream = GCodeStream(shutdown_line_num, logname)
self.mcus = {}
self.first_stat_time = self.last_stat_time = None
self.stats_stream = []
def reset_first_stat_time(self):
self.first_stat_time = self.last_stat_time
def get_stat_times(self):
return self.first_stat_time, self.last_stat_time
def check_stats_seq(self, ts, line):
# Parse stats
parts = line.split()
mcu = ""
keyparts = {}
for p in parts[2:]:
if '=' not in p:
mcu = p
continue
name, val = p.split('=', 1)
keyparts[mcu + name] = val
min_ts = 0
max_ts = 999999999999
for mcu_name, mcu in self.mcus.items():
sname = '%s:send_seq' % (mcu_name,)
rname = '%s:receive_seq' % (mcu_name,)
if sname not in keyparts:
continue
sseq = int(keyparts[sname])
rseq = int(keyparts[rname])
min_ts = max(min_ts, mcu.sent_seq_to_time.get(sseq-1, 0),
mcu.receive_seq_to_time.get(rseq, 0))
max_ts = min(max_ts, mcu.sent_seq_to_time.get(sseq, 999999999999),
mcu.receive_seq_to_time.get(rseq+1, 999999999999))
return min(max(ts, min_ts + 0.00000001), max_ts - 0.00000001)
def parse_line(self, line_num, line):
m = stats_r.match(line)
if m is not None:
ts = float(m.group('time'))
self.last_stat_time = ts
if self.first_stat_time is None:
self.first_stat_time = ts
self.stats_stream.append((ts, line_num, line))
return True, None
self.stats_stream.append((None, line_num, line))
m = mcu_r.match(line)
if m is not None:
mcu_name = m.group('mcu')
mcu_stream = MCUStream(mcu_name)
self.mcus[mcu_name] = mcu_stream
return True, mcu_stream
m = stepper_r.match(line)
if m is not None:
return True, StepperStream(m.group('name'), m.group('mcu'),
self.mcus)
m = trapq_r.match(line)
if m is not None:
return True, TrapQStream(m.group('name'), self.mcus)
m = gcode_r.match(line)
if m is not None:
return True, self.gcode_stream
m = gcode_state_r.match(line)
if m is not None:
self.gcode_stream.handle_gcode_state(line)
return True, None
m = api_r.match(line)
if m is not None:
return True, APIStream()
return False, None
def get_lines(self):
# Ignore old stats
all_ts = []
for mcu_name, mcu in self.mcus.items():
all_ts.extend(mcu.sent_seq_to_time.values())
all_ts.extend(mcu.receive_seq_to_time.values())
if not all_ts:
return []
min_stream_ts = min(all_ts)
max_stream_ts = max(all_ts)
for i, info in enumerate(self.stats_stream):
if info[0] is not None and info[0] >= min_stream_ts - 5.:
del self.stats_stream[:i]
break
# Improve accuracy of stats timestamps
last_ts = self.stats_stream[0][0]
for i, (ts, line_num, line) in enumerate(self.stats_stream):
if ts is not None:
last_ts = self.check_stats_seq(ts, line)
elif (line_num >= self.shutdown_line_num
and last_ts <= max_stream_ts):
last_ts = max_stream_ts + 0.00000001
self.stats_stream[i] = (last_ts, line_num, line)
return self.stats_stream
# Main handler for creating shutdown diagnostics file
class GatherShutdown:
def __init__(self, configs, line_num, recent_lines, logname):
self.filename = "%s.shutdown%05d" % (logname, line_num)
self.comments = []
if configs:
configs_by_id = {c.config_num: c for c in configs.values()}
config = configs_by_id[max(configs_by_id.keys())]
config.add_comment(format_comment(line_num, recent_lines[-1][1]))
self.comments.append("# config %s" % (config.filename,))
self.stats_stream = StatsStream(line_num, logname)
self.active_streams = [self.stats_stream]
self.all_streams = list(self.active_streams)
for line_num, line in recent_lines:
self.parse_line(line_num, line)
self.stats_stream.reset_first_stat_time()
def add_comment(self, comment):
if comment is not None:
self.comments.append(comment)
def add_line(self, line_num, line):
self.parse_line(line_num, line)
first, last = self.stats_stream.get_stat_times()
if first is not None and last > first + 5.:
self.finalize()
return False
if (line.startswith('Git version')
or line.startswith('Start printer at')
or line == '===== Config file ====='):
self.finalize()
return False
return True
def parse_line(self, line_num, line):
for s in self.active_streams:
did_parse, new_stream = s.parse_line(line_num, line)
if did_parse:
if new_stream is not None:
self.all_streams.append(new_stream)
self.active_streams = [new_stream, self.stats_stream]
break
def finalize(self):
# Make sure no timestamp goes backwards
streams = [p.get_lines() for p in self.all_streams]
for s in streams:
for i in range(1, len(s)):
if s[i-1][0] > s[i][0]:
s[i] = (s[i-1][0], s[i][1], s[i][2])
# Produce output sorted by timestamp
out = [i for s in streams for i in s]
out.sort()
out = [i[2] for i in out]
f = open(self.filename, 'wb')
f.write('\n'.join(self.comments + out))
f.close()
######################################################################
# Startup
######################################################################
def main():
logname = sys.argv[1]
last_git = last_start = None
configs = {}
handler = None
recent_lines = collections.deque([], 200)
# Parse log file
f = open(logname, 'rb')
for line_num, line in enumerate(f):
line = line.rstrip()
line_num += 1
recent_lines.append((line_num, line))
if handler is not None:
ret = handler.add_line(line_num, line)
if ret:
continue
recent_lines.clear()
handler = None
if line.startswith('Git version'):
last_git = format_comment(line_num, line)
elif line.startswith('Start printer at'):
last_start = format_comment(line_num, line)
elif line == '===== Config file =====':
handler = GatherConfig(configs, line_num, recent_lines, logname)
handler.add_comment(last_git)
handler.add_comment(last_start)
elif 'shutdown: ' in line or line.startswith('Dumping '):
handler = GatherShutdown(configs, line_num, recent_lines, logname)
handler.add_comment(last_git)
handler.add_comment(last_start)
if handler is not None:
handler.finalize()
# Write found config files
for cfg in configs.values():
cfg.write_file()
if __name__ == '__main__':
main()

31
scripts/make_version.py Normal file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env python2
# Get the version number for klippy
#
# Copyright (C) 2018 Lucas Fink <software@lfcode.ca>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
from __future__ import print_function
import argparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../klippy'))
import util
def main(argv):
p = argparse.ArgumentParser()
p.add_argument(
'distroname',
help='Name of distro this package is intended for'
)
args = p.parse_args()
print(util.get_git_version(from_file=False),
args.distroname.replace(' ', ''), sep='-')
if __name__ == '__main__':
main(sys.argv[1:])

283
scripts/motan/analyzers.py Normal file
View File

@@ -0,0 +1,283 @@
# Log data analyzing functions
#
# Copyright (C) 2021 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import math, collections
import readlog
######################################################################
# Analysis code
######################################################################
# Analyzer handlers: {name: class, ...}
AHandlers = {}
# Calculate a derivative (position to velocity, or velocity to accel)
class GenDerivative:
ParametersMin = ParametersMax = 1
DataSets = [
('derivative(<dataset>)', 'Derivative of the given dataset'),
]
def __init__(self, amanager, name_parts):
self.amanager = amanager
self.source = name_parts[1]
amanager.setup_dataset(self.source)
def get_label(self):
label = self.amanager.get_label(self.source)
lname = label['label']
units = label['units']
if '(mm)' in units:
rep = [('Position', 'Velocity'), ('(mm)', '(mm/s)')]
elif '(mm/s)' in units:
rep = [('Velocity', 'Acceleration'), ('(mm/s)', '(mm/s^2)')]
else:
return {'label': 'Derivative', 'units': 'Unknown'}
for old, new in rep:
lname = lname.replace(old, new).replace(old.lower(), new.lower())
units = units.replace(old, new).replace(old.lower(), new.lower())
return {'label': lname, 'units': units}
def generate_data(self):
inv_seg_time = 1. / self.amanager.get_segment_time()
data = self.amanager.get_datasets()[self.source]
deriv = [(data[i+1] - data[i]) * inv_seg_time
for i in range(len(data)-1)]
return [deriv[0]] + deriv
AHandlers["derivative"] = GenDerivative
# Calculate an integral (accel to velocity, or velocity to position)
class GenIntegral:
ParametersMin = 1
ParametersMax = 3
DataSets = [
('integral(<dataset>)', 'Integral of the given dataset'),
('integral(<dataset1>,<dataset2>)',
'Integral with dataset2 as reference'),
('integral(<dataset1>,<dataset2>,<half_life>)',
'Integral with weighted half-life time'),
]
def __init__(self, amanager, name_parts):
self.amanager = amanager
self.source = name_parts[1]
amanager.setup_dataset(self.source)
self.ref = None
self.half_life = 0.015
if len(name_parts) >= 3:
self.ref = name_parts[2]
amanager.setup_dataset(self.ref)
if len(name_parts) == 4:
self.half_life = float(name_parts[3])
def get_label(self):
label = self.amanager.get_label(self.source)
lname = label['label']
units = label['units']
if '(mm/s)' in units:
rep = [('Velocity', 'Position'), ('(mm/s)', '(mm)')]
elif '(mm/s^2)' in units:
rep = [('Acceleration', 'Velocity'), ('(mm/s^2)', '(mm/s)')]
else:
return {'label': 'Integral', 'units': 'Unknown'}
for old, new in rep:
lname = lname.replace(old, new).replace(old.lower(), new.lower())
units = units.replace(old, new).replace(old.lower(), new.lower())
return {'label': lname, 'units': units}
def generate_data(self):
seg_time = self.amanager.get_segment_time()
src = self.amanager.get_datasets()[self.source]
offset = sum(src) / len(src)
total = 0.
ref = None
if self.ref is not None:
ref = self.amanager.get_datasets()[self.ref]
offset -= (ref[-1] - ref[0]) / (len(src) * seg_time)
total = ref[0]
src_weight = 1.
if self.half_life:
src_weight = math.exp(math.log(.5) * seg_time / self.half_life)
ref_weight = 1. - src_weight
data = [0.] * len(src)
for i, v in enumerate(src):
total += (v - offset) * seg_time
if ref is not None:
total = src_weight * total + ref_weight * ref[i]
data[i] = total
return data
AHandlers["integral"] = GenIntegral
# Calculate a kinematic stepper position from the toolhead requested position
class GenKinematicPosition:
ParametersMin = ParametersMax = 1
DataSets = [
('kin(<stepper>)', 'Stepper position derived from toolhead kinematics'),
]
def __init__(self, amanager, name_parts):
self.amanager = amanager
stepper = name_parts[1]
status = self.amanager.get_initial_status()
kin = status['configfile']['settings']['printer']['kinematics']
if kin not in ['cartesian', 'corexy']:
raise amanager.error("Unsupported kinematics '%s'" % (kin,))
if stepper not in ['stepper_x', 'stepper_y', 'stepper_z']:
raise amanager.error("Unknown stepper '%s'" % (stepper,))
if kin == 'corexy' and stepper in ['stepper_x', 'stepper_y']:
self.source1 = 'trapq(toolhead,x)'
self.source2 = 'trapq(toolhead,y)'
if stepper == 'stepper_x':
self.generate_data = self.generate_data_corexy_plus
else:
self.generate_data = self.generate_data_corexy_minus
amanager.setup_dataset(self.source1)
amanager.setup_dataset(self.source2)
else:
self.source1 = 'trapq(toolhead,%s)' % (stepper[-1:],)
self.source2 = None
self.generate_data = self.generate_data_passthrough
amanager.setup_dataset(self.source1)
def get_label(self):
return {'label': 'Position', 'units': 'Position\n(mm)'}
def generate_data_corexy_plus(self):
datasets = self.amanager.get_datasets()
data1 = datasets[self.source1]
data2 = datasets[self.source2]
return [d1 + d2 for d1, d2 in zip(data1, data2)]
def generate_data_corexy_minus(self):
datasets = self.amanager.get_datasets()
data1 = datasets[self.source1]
data2 = datasets[self.source2]
return [d1 - d2 for d1, d2 in zip(data1, data2)]
def generate_data_passthrough(self):
return self.amanager.get_datasets()[self.source1]
AHandlers["kin"] = GenKinematicPosition
# Calculate a toolhead x/y position from corexy stepper positions
class GenCorexyPosition:
ParametersMin = ParametersMax = 3
DataSets = [
('corexy(x,<stepper>,<stepper>)', 'Toolhead x position from steppers'),
('corexy(y,<stepper>,<stepper>)', 'Toolhead y position from steppers'),
]
def __init__(self, amanager, name_parts):
self.amanager = amanager
self.is_plus = name_parts[1] == 'x'
self.source1, self.source2 = name_parts[2:]
amanager.setup_dataset(self.source1)
amanager.setup_dataset(self.source2)
def get_label(self):
axis = 'x'
if not self.is_plus:
axis = 'y'
return {'label': 'Derived %s position' % (axis,),
'units': 'Position\n(mm)'}
def generate_data(self):
datasets = self.amanager.get_datasets()
data1 = datasets[self.source1]
data2 = datasets[self.source2]
if self.is_plus:
return [.5 * (d1 + d2) for d1, d2 in zip(data1, data2)]
return [.5 * (d1 - d2) for d1, d2 in zip(data1, data2)]
AHandlers["corexy"] = GenCorexyPosition
# Calculate a position deviation
class GenDeviation:
ParametersMin = ParametersMax = 2
DataSets = [
('deviation(<dataset1>,<dataset2>)', 'Difference between datasets'),
]
def __init__(self, amanager, name_parts):
self.amanager = amanager
self.source1, self.source2 = name_parts[1:]
amanager.setup_dataset(self.source1)
amanager.setup_dataset(self.source2)
def get_label(self):
label1 = self.amanager.get_label(self.source1)
label2 = self.amanager.get_label(self.source2)
if label1['units'] != label2['units']:
return {'label': 'Deviation', 'units': 'Unknown'}
parts = label1['units'].split('\n')
units = '\n'.join([parts[0]] + ['Deviation'] + parts[1:])
return {'label': label1['label'] + ' deviation', 'units': units}
def generate_data(self):
datasets = self.amanager.get_datasets()
data1 = datasets[self.source1]
data2 = datasets[self.source2]
return [d1 - d2 for d1, d2 in zip(data1, data2)]
AHandlers["deviation"] = GenDeviation
######################################################################
# Analyzer management and data generation
######################################################################
# Return a description of available analyzers
def list_datasets():
datasets = []
for ah in sorted(AHandlers.keys()):
datasets += AHandlers[ah].DataSets
return datasets
# Manage raw and generated data samples
class AnalyzerManager:
error = None
def __init__(self, lmanager, segment_time):
self.lmanager = lmanager
self.error = lmanager.error
self.segment_time = segment_time
self.raw_datasets = collections.OrderedDict()
self.gen_datasets = collections.OrderedDict()
self.datasets = {}
self.dataset_times = []
self.duration = 5.
def set_duration(self, duration):
self.duration = duration
def get_segment_time(self):
return self.segment_time
def get_datasets(self):
return self.datasets
def get_dataset_times(self):
return self.dataset_times
def get_initial_status(self):
return self.lmanager.get_initial_status()
def setup_dataset(self, name):
name = name.strip()
if name in self.raw_datasets:
return self.raw_datasets[name]
if name in self.gen_datasets:
return self.gen_datasets[name]
name_parts = readlog.name_split(name)
if name_parts[0] in self.lmanager.available_dataset_types():
hdl = self.lmanager.setup_dataset(name)
self.raw_datasets[name] = hdl
else:
cls = AHandlers.get(name_parts[0])
if cls is None:
raise self.error("Unknown dataset '%s'" % (name,))
num_param = len(name_parts) - 1
if num_param < cls.ParametersMin or num_param > cls.ParametersMax:
raise self.error("Invalid parameters to dataset '%s'" % (name,))
hdl = cls(self, name_parts)
self.gen_datasets[name] = hdl
self.datasets[name] = []
return hdl
def get_label(self, dataset):
hdl = self.raw_datasets.get(dataset)
if hdl is None:
hdl = self.gen_datasets.get(dataset)
if hdl is None:
raise self.error("Unknown dataset '%s'" % (dataset,))
return hdl.get_label()
def generate_datasets(self):
# Generate raw data
list_hdls = [(self.datasets[name], hdl)
for name, hdl in self.raw_datasets.items()]
initial_start_time = self.lmanager.get_initial_start_time()
start_time = t = self.lmanager.get_start_time()
end_time = start_time + self.duration
while t < end_time:
t += self.segment_time
self.dataset_times.append(t - initial_start_time)
for dl, hdl in list_hdls:
dl.append(hdl.pull_data(t))
# Generate analyzer data
for name, hdl in self.gen_datasets.items():
self.datasets[name] = hdl.generate_data()

View File

@@ -0,0 +1,204 @@
#!/usr/bin/env python
# Tool to subscribe to motion data and log it to a disk file
#
# Copyright (C) 2020-2021 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, os, optparse, socket, select, json, errno, time, zlib
INDEX_UPDATE_TIME = 5.0
ClientInfo = {'program': 'motan_data_logger', 'version': 'v0.1'}
def webhook_socket_create(uds_filename):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.setblocking(0)
sys.stderr.write("Waiting for connect to %s\n" % (uds_filename,))
while 1:
try:
sock.connect(uds_filename)
except socket.error as e:
if e.errno == errno.ECONNREFUSED:
time.sleep(0.1)
continue
sys.stderr.write("Unable to connect socket %s [%d,%s]\n"
% (uds_filename, e.errno,
errno.errorcode[e.errno]))
sys.exit(-1)
break
sys.stderr.write("Connection.\n")
return sock
class LogWriter:
def __init__(self, filename):
self.file = open(filename, "wb")
self.comp = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, 31)
self.raw_pos = self.file_pos = 0
def add_data(self, data):
d = self.comp.compress(data + b"\x03")
self.file.write(d)
self.file_pos += len(d)
self.raw_pos += len(data) + 1
def flush(self, flag=zlib.Z_FULL_FLUSH):
if not self.raw_pos:
return self.file_pos
d = self.comp.flush(flag)
self.file.write(d)
self.file_pos += len(d)
return self.file_pos
def close(self):
self.flush(zlib.Z_FINISH)
self.file.close()
self.file = None
self.comp = None
class DataLogger:
def __init__(self, uds_filename, log_prefix):
# IO
self.webhook_socket = webhook_socket_create(uds_filename)
self.poll = select.poll()
self.poll.register(self.webhook_socket, select.POLLIN | select.POLLHUP)
self.socket_data = b""
# Data log
self.logger = LogWriter(log_prefix + ".json.gz")
self.index = LogWriter(log_prefix + ".index.gz")
# Handlers
self.query_handlers = {}
self.async_handlers = {}
# get_status databasing
self.db = {}
self.next_index_time = 0.
# Start login process
self.send_query("info", "info", {"client_info": ClientInfo},
self.handle_info)
def error(self, msg):
sys.stderr.write(msg + "\n")
def finish(self, msg):
self.error(msg)
self.logger.close()
self.index.close()
sys.exit(0)
# Unix Domain Socket IO
def send_query(self, msg_id, method, params, cb):
self.query_handlers[msg_id] = cb
msg = {"id": msg_id, "method": method, "params": params}
cm = json.dumps(msg, separators=(',', ':')).encode()
self.webhook_socket.send(cm + b"\x03")
def process_socket(self):
data = self.webhook_socket.recv(4096)
if not data:
self.finish("Socket closed")
parts = data.split(b"\x03")
parts[0] = self.socket_data + parts[0]
self.socket_data = parts.pop()
for part in parts:
try:
msg = json.loads(part)
except:
self.error("ERROR: Unable to parse line")
continue
self.logger.add_data(part)
msg_q = msg.get("q")
if msg_q is not None:
hdl = self.async_handlers.get(msg_q)
if hdl is not None:
hdl(msg, part)
continue
msg_id = msg.get("id")
hdl = self.query_handlers.get(msg_id)
if hdl is not None:
del self.query_handlers[msg_id]
hdl(msg, part)
if not self.query_handlers:
self.flush_index()
continue
self.error("ERROR: Message with unknown id")
def run(self):
try:
while 1:
res = self.poll.poll(1000.)
for fd, event in res:
if fd == self.webhook_socket.fileno():
self.process_socket()
except KeyboardInterrupt as e:
self.finish("Keyboard Interrupt")
# Query response handlers
def send_subscribe(self, msg_id, method, params, cb=None, async_cb=None):
if cb is None:
cb = self.handle_dump
if async_cb is not None:
self.async_handlers[msg_id] = async_cb
params["response_template"] = {"q": msg_id}
self.send_query(msg_id, method, params, cb)
def handle_info(self, msg, raw_msg):
if msg["result"]["state"] != "ready":
self.finish("Klipper not in ready state")
self.send_query("list", "objects/list", {}, self.handle_list)
def handle_list(self, msg, raw_msg):
subreq = {o: None for o in msg["result"]["objects"]}
self.send_subscribe("status", "objects/subscribe", {"objects": subreq},
self.handle_subscribe, self.handle_async_db)
def handle_subscribe(self, msg, raw_msg):
result = msg["result"]
self.next_index_time = result["eventtime"] + INDEX_UPDATE_TIME
self.db["status"] = status = result["status"]
# Subscribe to trapq and stepper queue updates
motion_report = status.get("motion_report", {})
for trapq in motion_report.get("trapq", []):
self.send_subscribe("trapq:" + trapq, "motion_report/dump_trapq",
{"name": trapq})
for stepper in motion_report.get("steppers", []):
self.send_subscribe("stepq:" + stepper,
"motion_report/dump_stepper", {"name": stepper})
# Subscribe to additional sensor data
config = status["configfile"]["settings"]
for cfgname in config.keys():
if cfgname == "adxl345" or cfgname.startswith("adxl345 "):
aname = cfgname.split()[-1]
self.send_subscribe("adxl345:" + aname, "adxl345/dump_adxl345",
{"sensor": aname})
if cfgname.startswith("angle "):
aname = cfgname.split()[1]
self.send_subscribe("angle:" + aname, "angle/dump_angle",
{"sensor": aname})
def handle_dump(self, msg, raw_msg):
msg_id = msg["id"]
if "result" not in msg:
self.error("Unable to subscribe to '%s': %s"
% (msg_id, msg.get("error", {}).get("message", "")))
return
self.db.setdefault("subscriptions", {})[msg_id] = msg["result"]
def flush_index(self):
self.db['file_position'] = self.logger.flush()
self.index.add_data(json.dumps(self.db, separators=(',', ':')).encode())
self.db = {"status": {}}
def handle_async_db(self, msg, raw_msg):
params = msg["params"]
db_status = self.db['status']
for k, v in params.get("status", {}).items():
db_status.setdefault(k, {}).update(v)
eventtime = params['eventtime']
if eventtime >= self.next_index_time:
self.next_index_time = eventtime + INDEX_UPDATE_TIME
self.flush_index()
def nice():
try:
# Try to re-nice writing process
os.nice(10)
except:
pass
def main():
usage = "%prog [options] <socket filename> <log name>"
opts = optparse.OptionParser(usage)
options, args = opts.parse_args()
if len(args) != 2:
opts.error("Incorrect number of arguments")
nice()
dl = DataLogger(args[0], args[1])
dl.run()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,149 @@
#!/usr/bin/env python
# Script to perform motion analysis and graphing
#
# Copyright (C) 2019-2021 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, optparse, ast
import matplotlib
import readlog, analyzers
try:
import urlparse
except:
import urllib.parse as urlparse
######################################################################
# Graphing
######################################################################
def plot_motion(amanager, graphs, log_prefix):
# Generate data
for graph in graphs:
for dataset, plot_params in graph:
amanager.setup_dataset(dataset)
amanager.generate_datasets()
datasets = amanager.get_datasets()
times = amanager.get_dataset_times()
# Build plot
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small')
fig, rows = matplotlib.pyplot.subplots(nrows=len(graphs), sharex=True)
if len(graphs) == 1:
rows = [rows]
rows[0].set_title("Motion Analysis (%s)" % (log_prefix,))
for graph, graph_ax in zip(graphs, rows):
graph_units = graph_twin_units = twin_ax = None
for dataset, plot_params in graph:
label = amanager.get_label(dataset)
ax = graph_ax
if graph_units is None:
graph_units = label['units']
ax.set_ylabel(graph_units)
elif label['units'] != graph_units:
if graph_twin_units is None:
ax = twin_ax = graph_ax.twinx()
graph_twin_units = label['units']
ax.set_ylabel(graph_twin_units)
elif label['units'] == graph_twin_units:
ax = twin_ax
else:
graph_units = "Unknown"
ax.set_ylabel(graph_units)
pparams = {'label': label['label'], 'alpha': 0.8}
pparams.update(plot_params)
ax.plot(times, datasets[dataset], **pparams)
if twin_ax is not None:
li1, la1 = graph_ax.get_legend_handles_labels()
li2, la2 = twin_ax.get_legend_handles_labels()
twin_ax.legend(li1 + li2, la1 + la2, loc='best', prop=fontP)
else:
graph_ax.legend(loc='best', prop=fontP)
graph_ax.grid(True)
rows[-1].set_xlabel('Time (s)')
return fig
######################################################################
# Startup
######################################################################
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
matplotlib.use('Agg')
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
def parse_graph_description(desc):
if '?' not in desc:
return (desc, {})
dataset, params = desc.split('?', 1)
params = {k: v for k, v in urlparse.parse_qsl(params)}
for fkey in ['alpha']:
if fkey in params:
params[fkey] = float(params[fkey])
return (dataset, params)
def list_datasets():
datasets = readlog.list_datasets() + analyzers.list_datasets()
out = ["\nAvailable datasets:\n"]
for dataset, desc in datasets:
out.append("%-24s: %s\n" % (dataset, desc))
out.append("\n")
sys.stdout.write("".join(out))
sys.exit(0)
def main():
# Parse command-line arguments
usage = "%prog [options] <logname>"
opts = optparse.OptionParser(usage)
opts.add_option("-o", "--output", type="string", dest="output",
default=None, help="filename of output graph")
opts.add_option("-s", "--skip", type="float", default=0.,
help="Set the start time to graph")
opts.add_option("-d", "--duration", type="float", default=5.,
help="Number of seconds to graph")
opts.add_option("--segment-time", type="float", default=0.000100,
help="Analysis segment time (default 0.000100 seconds)")
opts.add_option("-g", "--graph", help="Graph to generate (python literal)")
opts.add_option("-l", "--list-datasets", action="store_true",
help="List available datasets")
options, args = opts.parse_args()
if options.list_datasets:
list_datasets()
if len(args) != 1:
opts.error("Incorrect number of arguments")
log_prefix = args[0]
# Open data files
lmanager = readlog.LogManager(log_prefix)
lmanager.setup_index()
lmanager.seek_time(options.skip)
amanager = analyzers.AnalyzerManager(lmanager, options.segment_time)
amanager.set_duration(options.duration)
# Default graphs to draw
graph_descs = [
["trapq(toolhead,velocity)?color=green"],
["trapq(toolhead,accel)?color=green"],
["deviation(stepq(stepper_x),kin(stepper_x))?color=blue"],
]
if options.graph is not None:
graph_descs = ast.literal_eval(options.graph)
graphs = [[parse_graph_description(g) for g in graph_row]
for graph_row in graph_descs]
# Draw graph
setup_matplotlib(options.output is not None)
fig = plot_motion(amanager, graphs, log_prefix)
# Show graph
if options.output is None:
matplotlib.pyplot.show()
else:
fig.set_size_inches(8, 6)
fig.savefig(options.output)
if __name__ == '__main__':
main()

629
scripts/motan/readlog.py Normal file
View File

@@ -0,0 +1,629 @@
# Code for reading data logs produced by data_logger.py
#
# Copyright (C) 2021 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import json, zlib
class error(Exception):
pass
######################################################################
# Log data handlers
######################################################################
# Log data handlers: {name: class, ...}
LogHandlers = {}
# Extract status fields from log
class HandleStatusField:
SubscriptionIdParts = 0
ParametersMin = ParametersMax = 1
DataSets = [
('status(<field>)', 'A get_status field name (separate by periods)'),
]
def __init__(self, lmanager, name, name_parts):
self.status_tracker = lmanager.get_status_tracker()
self.field_name = name_parts[1]
self.field_parts = name_parts[1].split('.')
self.next_update_time = 0.
self.result = None
def get_label(self):
label = '%s field' % (self.field_name,)
return {'label': label, 'units': 'Unknown'}
def pull_data(self, req_time):
if req_time < self.next_update_time:
return self.result
db, next_update_time = self.status_tracker.pull_status(req_time)
for fp in self.field_parts[:-1]:
db = db.get(fp, {})
self.result = db.get(self.field_parts[-1], 0.)
self.next_update_time = next_update_time
return self.result
LogHandlers["status"] = HandleStatusField
# Extract requested position, velocity, and accel from a trapq log
class HandleTrapQ:
SubscriptionIdParts = 2
ParametersMin = ParametersMax = 2
DataSets = [
('trapq(<name>,velocity)', 'Requested velocity for the given trapq'),
('trapq(<name>,accel)', 'Requested acceleration for the given trapq'),
('trapq(<name>,<axis>)', 'Requested axis (x, y, or z) position'),
('trapq(<name>,<axis>_velocity)', 'Requested axis velocity'),
('trapq(<name>,<axis>_accel)', 'Requested axis acceleration'),
]
def __init__(self, lmanager, name, name_parts):
self.name = name
self.jdispatch = lmanager.get_jdispatch()
self.cur_data = [(0., 0., 0., 0., (0., 0., 0.), (0., 0., 0.))]
self.data_pos = 0
tq, trapq_name, datasel = name_parts
ptypes = {}
ptypes['velocity'] = {
'label': '%s velocity' % (trapq_name,),
'units': 'Velocity\n(mm/s)', 'func': self._pull_velocity
}
ptypes['accel'] = {
'label': '%s acceleration' % (trapq_name,),
'units': 'Acceleration\n(mm/s^2)', 'func': self._pull_accel
}
for axis, name in enumerate("xyz"):
ptypes['%s' % (name,)] = {
'label': '%s %s position' % (trapq_name, name), 'axis': axis,
'units': 'Position\n(mm)', 'func': self._pull_axis_position
}
ptypes['%s_velocity' % (name,)] = {
'label': '%s %s velocity' % (trapq_name, name), 'axis': axis,
'units': 'Velocity\n(mm/s)', 'func': self._pull_axis_velocity
}
ptypes['%s_accel' % (name,)] = {
'label': '%s %s acceleration' % (trapq_name, name),
'axis': axis, 'units': 'Acceleration\n(mm/s^2)',
'func': self._pull_axis_accel
}
pinfo = ptypes.get(datasel)
if pinfo is None:
raise error("Unknown trapq data selection '%s'" % (datasel,))
self.label = {'label': pinfo['label'], 'units': pinfo['units']}
self.axis = pinfo.get('axis')
self.pull_data = pinfo['func']
def get_label(self):
return self.label
def _find_move(self, req_time):
data_pos = self.data_pos
while 1:
move = self.cur_data[data_pos]
print_time, move_t, start_v, accel, start_pos, axes_r = move
if req_time <= print_time + move_t:
return move, req_time >= print_time
data_pos += 1
if data_pos < len(self.cur_data):
self.data_pos = data_pos
continue
jmsg = self.jdispatch.pull_msg(req_time, self.name)
if jmsg is None:
return move, False
self.cur_data = jmsg['data']
self.data_pos = data_pos = 0
def _pull_axis_position(self, req_time):
move, in_range = self._find_move(req_time)
print_time, move_t, start_v, accel, start_pos, axes_r = move
mtime = max(0., min(move_t, req_time - print_time))
dist = (start_v + .5 * accel * mtime) * mtime;
return start_pos[self.axis] + axes_r[self.axis] * dist
def _pull_axis_velocity(self, req_time):
move, in_range = self._find_move(req_time)
if not in_range:
return 0.
print_time, move_t, start_v, accel, start_pos, axes_r = move
return (start_v + accel * (req_time - print_time)) * axes_r[self.axis]
def _pull_axis_accel(self, req_time):
move, in_range = self._find_move(req_time)
if not in_range:
return 0.
print_time, move_t, start_v, accel, start_pos, axes_r = move
return accel * axes_r[self.axis]
def _pull_velocity(self, req_time):
move, in_range = self._find_move(req_time)
if not in_range:
return 0.
print_time, move_t, start_v, accel, start_pos, axes_r = move
return start_v + accel * (req_time - print_time)
def _pull_accel(self, req_time):
move, in_range = self._find_move(req_time)
if not in_range:
return 0.
print_time, move_t, start_v, accel, start_pos, axes_r = move
return accel
LogHandlers["trapq"] = HandleTrapQ
# Extract positions from queue_step log
class HandleStepQ:
SubscriptionIdParts = 2
ParametersMin = 1
ParametersMax = 2
DataSets = [
('stepq(<stepper>)', 'Commanded position of the given stepper'),
('stepq(<stepper>,<time>)', 'Commanded position with smooth time'),
]
def __init__(self, lmanager, name, name_parts):
self.name = name
self.stepper_name = name_parts[1]
self.jdispatch = lmanager.get_jdispatch()
self.step_data = [(0., 0., 0.), (0., 0., 0.)] # [(time, half_pos, pos)]
self.data_pos = 0
self.smooth_time = 0.010
if len(name_parts) == 3:
try:
self.smooth_time = float(name_parts[2])
except ValueError:
raise error("Invalid stepq smooth time '%s'" % (name_parts[2],))
def get_label(self):
label = '%s position' % (self.stepper_name,)
return {'label': label, 'units': 'Position\n(mm)'}
def pull_data(self, req_time):
smooth_time = self.smooth_time
while 1:
data_pos = self.data_pos
step_data = self.step_data
# Find steps before and after req_time
next_time, next_halfpos, next_pos = step_data[data_pos + 1]
if req_time >= next_time:
if data_pos + 2 < len(step_data):
self.data_pos = data_pos + 1
continue
self._pull_block(req_time)
continue
last_time, last_halfpos, last_pos = step_data[data_pos]
# Perform step smoothing
rtdiff = req_time - last_time
stime = next_time - last_time
if stime <= smooth_time:
pdiff = next_halfpos - last_halfpos
return last_halfpos + rtdiff * pdiff / stime
stime = .5 * smooth_time
if rtdiff < stime:
pdiff = last_pos - last_halfpos
return last_halfpos + rtdiff * pdiff / stime
rtdiff = next_time - req_time
if rtdiff < stime:
pdiff = last_pos - next_halfpos
return next_halfpos + rtdiff * pdiff / stime
return last_pos
def _pull_block(self, req_time):
step_data = self.step_data
del step_data[:-1]
self.data_pos = 0
# Read data block containing requested time frame
while 1:
jmsg = self.jdispatch.pull_msg(req_time, self.name)
if jmsg is None:
last_time, last_halfpos, last_pos = step_data[0]
self.step_data.append((req_time + .1, last_pos, last_pos))
return
last_time = jmsg['last_step_time']
if req_time <= last_time:
break
# Process block into (time, half_position, position) 3-tuples
first_time = step_time = jmsg['first_step_time']
first_clock = jmsg['first_clock']
step_clock = first_clock - jmsg['data'][0][0]
cdiff = jmsg['last_clock'] - first_clock
tdiff = last_time - first_time
inv_freq = 0.
if cdiff:
inv_freq = tdiff / cdiff
step_dist = jmsg['step_distance']
step_pos = jmsg['start_position']
for interval, raw_count, add in jmsg['data']:
qs_dist = step_dist
count = raw_count
if count < 0:
qs_dist = -qs_dist
count = -count
for i in range(count):
step_clock += interval
interval += add
step_time = first_time + (step_clock - first_clock) * inv_freq
step_halfpos = step_pos + .5 * qs_dist
step_pos += qs_dist
step_data.append((step_time, step_halfpos, step_pos))
LogHandlers["stepq"] = HandleStepQ
# Extract stepper motor phase position
class HandleStepPhase:
SubscriptionIdParts = 0
ParametersMin = 1
ParametersMax = 2
DataSets = [
('step_phase(<driver>)', 'Stepper motor phase of the given stepper'),
('step_phase(<driver>,microstep)', 'Microstep position for stepper'),
]
def __init__(self, lmanager, name, name_parts):
self.name = name
self.driver_name = name_parts[1]
self.stepper_name = " ".join(self.driver_name.split()[1:])
config = lmanager.get_initial_status()['configfile']['settings']
if self.driver_name not in config or self.stepper_name not in config:
raise error("Unable to find stepper driver '%s' config"
% (self.driver_name,))
if len(name_parts) == 3 and name_parts[2] != "microstep":
raise error("Unknown step_phase selection '%s'" % (name_parts[2],))
self.report_microsteps = len(name_parts) == 3
sconfig = config[self.stepper_name]
self.phases = sconfig["microsteps"]
if not self.report_microsteps:
self.phases *= 4
self.jdispatch = lmanager.get_jdispatch()
self.jdispatch.add_handler(name, "stepq:" + self.stepper_name)
# stepq tracking
self.step_data = [(0., 0), (0., 0)] # [(time, mcu_pos)]
self.data_pos = 0
# driver phase tracking
self.status_tracker = lmanager.get_status_tracker()
self.next_status_time = 0.
self.mcu_phase_offset = 0
def get_label(self):
if self.report_microsteps:
return {'label': '%s microstep' % (self.stepper_name,),
'units': 'Microstep'}
return {'label': '%s phase' % (self.stepper_name,), 'units': 'Phase'}
def _pull_phase_offset(self, req_time):
db, self.next_status_time = self.status_tracker.pull_status(req_time)
mcu_phase_offset = db.get(self.driver_name, {}).get('mcu_phase_offset')
if mcu_phase_offset is None:
mcu_phase_offset = 0
self.mcu_phase_offset = mcu_phase_offset
def pull_data(self, req_time):
if req_time >= self.next_status_time:
self._pull_phase_offset(req_time)
while 1:
data_pos = self.data_pos
step_data = self.step_data
# Find steps before and after req_time
next_time, next_pos = step_data[data_pos + 1]
if req_time >= next_time:
if data_pos + 2 < len(step_data):
self.data_pos = data_pos + 1
continue
self._pull_block(req_time)
continue
step_pos = step_data[data_pos][1]
return (step_pos - self.mcu_phase_offset) % self.phases
def _pull_block(self, req_time):
step_data = self.step_data
del step_data[:-1]
self.data_pos = 0
# Read data block containing requested time frame
while 1:
jmsg = self.jdispatch.pull_msg(req_time, self.name)
if jmsg is None:
last_time, last_pos = step_data[0]
self.step_data.append((req_time + .1, last_pos))
return
last_time = jmsg['last_step_time']
if req_time <= last_time:
break
# Process block into (time, position) 2-tuples
first_time = step_time = jmsg['first_step_time']
first_clock = jmsg['first_clock']
step_clock = first_clock - jmsg['data'][0][0]
cdiff = jmsg['last_clock'] - first_clock
tdiff = last_time - first_time
inv_freq = 0.
if cdiff:
inv_freq = tdiff / cdiff
step_pos = jmsg['start_mcu_position']
for interval, raw_count, add in jmsg['data']:
qs_dist = 1
count = raw_count
if count < 0:
qs_dist = -1
count = -count
for i in range(count):
step_clock += interval
interval += add
step_time = first_time + (step_clock - first_clock) * inv_freq
step_pos += qs_dist
step_data.append((step_time, step_pos))
LogHandlers["step_phase"] = HandleStepPhase
# Extract accelerometer data
class HandleADXL345:
SubscriptionIdParts = 2
ParametersMin = ParametersMax = 2
DataSets = [
('adxl345(<name>,<axis>)', 'Accelerometer for given axis (x, y, or z)'),
]
def __init__(self, lmanager, name, name_parts):
self.name = name
self.adxl_name = name_parts[1]
self.jdispatch = lmanager.get_jdispatch()
self.next_accel_time = self.last_accel_time = 0.
self.next_accel = self.last_accel = (0., 0., 0.)
self.cur_data = []
self.data_pos = 0
if name_parts[2] not in 'xyz':
raise error("Unknown adxl345 data selection '%s'" % (name,))
self.axis = 'xyz'.index(name_parts[2])
def get_label(self):
label = '%s %s acceleration' % (self.adxl_name, 'xyz'[self.axis])
return {'label': label, 'units': 'Acceleration\n(mm/s^2)'}
def pull_data(self, req_time):
axis = self.axis
while 1:
if req_time <= self.next_accel_time:
adiff = self.next_accel[axis] - self.last_accel[axis]
tdiff = self.next_accel_time - self.last_accel_time
rtdiff = req_time - self.last_accel_time
return self.last_accel[axis] + rtdiff * adiff / tdiff
if self.data_pos >= len(self.cur_data):
# Read next data block
jmsg = self.jdispatch.pull_msg(req_time, self.name)
if jmsg is None:
return 0.
self.cur_data = jmsg['data']
self.data_pos = 0
continue
self.last_accel = self.next_accel
self.last_accel_time = self.next_accel_time
self.next_accel_time, x, y, z = self.cur_data[self.data_pos]
self.next_accel = (x, y, z)
self.data_pos += 1
LogHandlers["adxl345"] = HandleADXL345
# Extract positions from magnetic angle sensor
class HandleAngle:
SubscriptionIdParts = 2
ParametersMin = ParametersMax = 1
DataSets = [
('angle(<name>)', 'Angle sensor position'),
]
def __init__(self, lmanager, name, name_parts):
self.name = name
self.angle_name = name_parts[1]
self.jdispatch = lmanager.get_jdispatch()
self.next_angle_time = self.last_angle_time = 0.
self.next_angle = self.last_angle = 0.
self.cur_data = []
self.data_pos = 0
self.position_offset = 0.
self.angle_dist = 1.
# Determine angle distance from associated stepper's rotation_distance
config = lmanager.get_initial_status()['configfile']['settings']
aname = 'angle %s' % (self.angle_name,)
stepper_name = config.get(aname, {}).get('stepper')
if stepper_name is not None:
sconfig = config.get(stepper_name, {})
rotation_distance = sconfig.get('rotation_distance', 1.)
gear_ratio = sconfig.get('gear_ratio', ())
if type(gear_ratio) == str: # XXX
gear_ratio = [[float(v.strip()) for v in gr.split(':')]
for gr in gear_ratio.split(',')]
for n, d in gear_ratio:
rotation_distance *= d / n
self.angle_dist = rotation_distance / 65536.
def get_label(self):
label = '%s position' % (self.angle_name,)
return {'label': label, 'units': 'Position\n(mm)'}
def pull_data(self, req_time):
while 1:
if req_time <= self.next_angle_time:
pdiff = self.next_angle - self.last_angle
tdiff = self.next_angle_time - self.last_angle_time
rtdiff = req_time - self.last_angle_time
po = rtdiff * pdiff / tdiff
return ((self.last_angle + po) * self.angle_dist
+ self.position_offset)
if self.data_pos >= len(self.cur_data):
# Read next data block
jmsg = self.jdispatch.pull_msg(req_time, self.name)
if jmsg is None:
return (self.next_angle * self.angle_dist
+ self.position_offset)
self.cur_data = jmsg['data']
position_offset = jmsg.get('position_offset')
if position_offset is not None:
self.position_offset = position_offset
self.data_pos = 0
continue
self.last_angle = self.next_angle
self.last_angle_time = self.next_angle_time
self.next_angle_time, self.next_angle = self.cur_data[self.data_pos]
self.data_pos += 1
LogHandlers["angle"] = HandleAngle
######################################################################
# Log reading
######################################################################
# Read, uncompress, and parse messages in a log built by data_logger.py
class JsonLogReader:
def __init__(self, filename):
self.file = open(filename, "rb")
self.comp = zlib.decompressobj(31)
self.msgs = [b""]
def seek(self, pos):
self.file.seek(pos)
self.comp = zlib.decompressobj(-15)
def pull_msg(self):
msgs = self.msgs
while 1:
if len(msgs) > 1:
msg = msgs.pop(0)
try:
json_msg = json.loads(msg)
except:
logging.exception("Unable to parse line")
continue
return json_msg
raw_data = self.file.read(8192)
if not raw_data:
return None
data = self.comp.decompress(raw_data)
parts = data.split(b'\x03')
parts[0] = msgs[0] + parts[0]
self.msgs = msgs = parts
# Store messages in per-subscription queues until handlers are ready for them
class JsonDispatcher:
def __init__(self, log_prefix):
self.names = {}
self.queues = {}
self.last_read_time = 0.
self.log_reader = JsonLogReader(log_prefix + ".json.gz")
self.is_eof = False
def check_end_of_data(self):
return self.is_eof and not any(self.queues.values())
def add_handler(self, name, subscription_id):
self.names[name] = q = []
self.queues.setdefault(subscription_id, []).append(q)
def pull_msg(self, req_time, name):
q = self.names[name]
while 1:
if q:
return q.pop(0)
if req_time + 1. < self.last_read_time:
return None
json_msg = self.log_reader.pull_msg()
if json_msg is None:
self.is_eof = True
return None
qid = json_msg.get('q')
if qid == 'status':
pt = json_msg.get('toolhead', {}).get('estimated_print_time')
if pt is not None:
self.last_read_time = pt
for mq in self.queues.get(qid, []):
mq.append(json_msg['params'])
######################################################################
# Dataset and log tracking
######################################################################
# Tracking of get_status messages
class TrackStatus:
def __init__(self, lmanager, name, start_status):
self.name = name
self.jdispatch = lmanager.get_jdispatch()
self.next_status_time = 0.
self.status = dict(start_status)
self.next_update = {}
def pull_status(self, req_time):
status = self.status
while 1:
if req_time < self.next_status_time:
return status, self.next_status_time
for k, v in self.next_update.items():
status.setdefault(k, {}).update(v)
jmsg = self.jdispatch.pull_msg(req_time, self.name)
if jmsg is None:
self.next_status_time = req_time + 0.100
self.next_update = {}
return status, self.next_status_time
self.next_update = jmsg['status']
th = self.next_update.get('toolhead', {})
self.next_status_time = th.get('estimated_print_time', 0.)
# Split a string by commas while keeping parenthesis intact
def param_split(line):
out = []
level = prev = 0
for i, c in enumerate(line):
if not level and c == ',':
out.append(line[prev:i])
prev = i+1
elif c == '(':
level += 1
elif level and c== ')':
level -= 1
out.append(line[prev:])
return out
# Split a dataset name (eg, "abc(def,ghi)") into parts
def name_split(name):
if '(' not in name or not name.endswith(')'):
raise error("Malformed dataset name '%s'" % (name,))
aname, aparams = name.split('(', 1)
return [aname] + param_split(aparams[:-1])
# Return a description of possible datasets
def list_datasets():
datasets = []
for lh in sorted(LogHandlers.keys()):
datasets += LogHandlers[lh].DataSets
return datasets
# Main log access management
class LogManager:
error = error
def __init__(self, log_prefix):
self.index_reader = JsonLogReader(log_prefix + ".index.gz")
self.jdispatch = JsonDispatcher(log_prefix)
self.initial_start_time = self.start_time = 0.
self.datasets = {}
self.initial_status = {}
self.start_status = {}
self.log_subscriptions = {}
self.status_tracker = None
def setup_index(self):
fmsg = self.index_reader.pull_msg()
self.initial_status = status = fmsg['status']
self.start_status = dict(status)
start_time = status['toolhead']['estimated_print_time']
self.initial_start_time = self.start_time = start_time
self.log_subscriptions = fmsg.get('subscriptions', {})
def get_initial_status(self):
return self.initial_status
def available_dataset_types(self):
return {name: None for name in LogHandlers}
def get_jdispatch(self):
return self.jdispatch
def seek_time(self, req_time):
self.start_time = req_start_time = self.initial_start_time + req_time
start_status = self.start_status
seek_time = max(self.initial_start_time, req_start_time - 1.)
file_position = 0
while 1:
fmsg = self.index_reader.pull_msg()
if fmsg is None:
break
th = fmsg['status']['toolhead']
ptime = max(th['estimated_print_time'], th.get('print_time', 0.))
if ptime > seek_time:
break
for k, v in fmsg["status"].items():
start_status.setdefault(k, {}).update(v)
file_position = fmsg['file_position']
if file_position:
self.jdispatch.log_reader.seek(file_position)
def get_initial_start_time(self):
return self.initial_start_time
def get_start_time(self):
return self.start_time
def get_status_tracker(self):
if self.status_tracker is None:
self.status_tracker = TrackStatus(self, "status", self.start_status)
self.jdispatch.add_handler("status", "status")
return self.status_tracker
def setup_dataset(self, name):
if name in self.datasets:
return self.datasets[name]
name_parts = name_split(name)
cls = LogHandlers.get(name_parts[0])
if cls is None:
raise error("Unknown dataset '%s'" % (name_parts[0],))
len_pp = len(name_parts) - 1
if len_pp < cls.ParametersMin or len_pp > cls.ParametersMax:
raise error("Invalid number of parameters for '%s'" % (name,))
if cls.SubscriptionIdParts:
subscription_id = ":".join(name_parts[:cls.SubscriptionIdParts])
if subscription_id not in self.log_subscriptions:
raise error("Dataset '%s' not in capture" % (subscription_id,))
self.jdispatch.add_handler(name, subscription_id)
self.datasets[name] = hdl = cls(self, name, name_parts)
return hdl