plus4的klipper版本

This commit is contained in:
whb0514
2024-09-02 13:37:34 +08:00
parent 653d7a8f6e
commit b90736975b
1006 changed files with 1195894 additions and 11114 deletions

View File

@@ -20,8 +20,8 @@ SOURCE_FILES = [
'pyhelper.c', 'serialqueue.c', 'stepcompress.c', 'itersolve.c', 'trapq.c',
'pollreactor.c', 'msgblock.c', 'trdispatch.c',
'kin_cartesian.c', 'kin_corexy.c', 'kin_corexz.c', 'kin_delta.c',
'kin_polar.c', 'kin_rotary_delta.c', 'kin_winch.c', 'kin_extruder.c',
'kin_shaper.c',
'kin_deltesian.c', 'kin_polar.c', 'kin_rotary_delta.c', 'kin_winch.c',
'kin_extruder.c', 'kin_shaper.c', 'kin_idex.c',
]
DEST_LIB = "c_helper.so"
OTHER_FILES = [
@@ -49,6 +49,8 @@ defs_stepcompress = """
, uint64_t clock);
int stepcompress_queue_msg(struct stepcompress *sc
, uint32_t *data, int len);
int stepcompress_queue_mq_msg(struct stepcompress *sc, uint64_t req_clock
, uint32_t *data, int len);
int stepcompress_extract_old(struct stepcompress *sc
, struct pull_history_steps *p, int max
, uint64_t start_clock, uint64_t end_clock);
@@ -58,7 +60,8 @@ defs_stepcompress = """
void steppersync_free(struct steppersync *ss);
void steppersync_set_time(struct steppersync *ss
, double time_offset, double mcu_freq);
int steppersync_flush(struct steppersync *ss, uint64_t move_clock);
int steppersync_flush(struct steppersync *ss, uint64_t move_clock
, uint64_t clear_history_clock);
"""
defs_itersolve = """
@@ -85,14 +88,15 @@ defs_trapq = """
double x_r, y_r, z_r;
};
struct trapq *trapq_alloc(void);
void trapq_free(struct trapq *tq);
void trapq_append(struct trapq *tq, double print_time
, double accel_t, double cruise_t, double decel_t
, double start_pos_x, double start_pos_y, double start_pos_z
, double axes_r_x, double axes_r_y, double axes_r_z
, double start_v, double cruise_v, double accel);
struct trapq *trapq_alloc(void);
void trapq_free(struct trapq *tq);
void trapq_finalize_moves(struct trapq *tq, double print_time);
void trapq_finalize_moves(struct trapq *tq, double print_time
, double clear_history_time);
void trapq_set_position(struct trapq *tq, double print_time
, double pos_x, double pos_y, double pos_z);
int trapq_extract_old(struct trapq *tq, struct pull_move *p, int max
@@ -101,7 +105,6 @@ defs_trapq = """
defs_kin_cartesian = """
struct stepper_kinematics *cartesian_stepper_alloc(char axis);
struct stepper_kinematics *cartesian_reverse_stepper_alloc(char axis);
"""
defs_kin_corexy = """
@@ -117,6 +120,11 @@ defs_kin_delta = """
, double tower_x, double tower_y);
"""
defs_kin_deltesian = """
struct stepper_kinematics *deltesian_stepper_alloc(double arm2
, double arm_x);
"""
defs_kin_polar = """
struct stepper_kinematics *polar_stepper_alloc(char type);
"""
@@ -139,8 +147,8 @@ defs_kin_extruder = """
"""
defs_kin_shaper = """
double input_shaper_get_step_generation_window(int n, double a[]
, double t[]);
double input_shaper_get_step_generation_window(
struct stepper_kinematics *sk);
int input_shaper_set_shaper_params(struct stepper_kinematics *sk, char axis
, int n, double a[], double t[]);
int input_shaper_set_sk(struct stepper_kinematics *sk
@@ -148,6 +156,14 @@ defs_kin_shaper = """
struct stepper_kinematics * input_shaper_alloc(void);
"""
defs_kin_idex = """
void dual_carriage_set_sk(struct stepper_kinematics *sk
, struct stepper_kinematics *orig_sk);
int dual_carriage_set_transform(struct stepper_kinematics *sk
, char axis, double scale, double offs);
struct stepper_kinematics * dual_carriage_alloc(void);
"""
defs_serialqueue = """
#define MESSAGE_MAX 64
struct pull_queue_message {
@@ -168,8 +184,8 @@ defs_serialqueue = """
, uint64_t notify_id);
void serialqueue_pull(struct serialqueue *sq
, struct pull_queue_message *pqm);
void serialqueue_set_baud_adjust(struct serialqueue *sq
, double baud_adjust);
void serialqueue_set_wire_frequency(struct serialqueue *sq
, double frequency);
void serialqueue_set_receive_window(struct serialqueue *sq
, int receive_window);
void serialqueue_set_clock_est(struct serialqueue *sq, double est_freq
@@ -205,8 +221,8 @@ defs_all = [
defs_pyhelper, defs_serialqueue, defs_std, defs_stepcompress,
defs_itersolve, defs_trapq, defs_trdispatch,
defs_kin_cartesian, defs_kin_corexy, defs_kin_corexz, defs_kin_delta,
defs_kin_polar, defs_kin_rotary_delta, defs_kin_winch, defs_kin_extruder,
defs_kin_shaper,
defs_kin_deltesian, defs_kin_polar, defs_kin_rotary_delta, defs_kin_winch,
defs_kin_extruder, defs_kin_shaper, defs_kin_idex,
]
# Update filenames to an absolute path

Binary file not shown.

Binary file not shown.

View File

@@ -49,42 +49,3 @@ cartesian_stepper_alloc(char axis)
}
return sk;
}
static double
cart_reverse_stepper_x_calc_position(struct stepper_kinematics *sk
, struct move *m, double move_time)
{
return -move_get_coord(m, move_time).x;
}
static double
cart_reverse_stepper_y_calc_position(struct stepper_kinematics *sk
, struct move *m, double move_time)
{
return -move_get_coord(m, move_time).y;
}
static double
cart_reverse_stepper_z_calc_position(struct stepper_kinematics *sk
, struct move *m, double move_time)
{
return -move_get_coord(m, move_time).z;
}
struct stepper_kinematics * __visible
cartesian_reverse_stepper_alloc(char axis)
{
struct stepper_kinematics *sk = malloc(sizeof(*sk));
memset(sk, 0, sizeof(*sk));
if (axis == 'x') {
sk->calc_position_cb = cart_reverse_stepper_x_calc_position;
sk->active_flags = AF_X;
} else if (axis == 'y') {
sk->calc_position_cb = cart_reverse_stepper_y_calc_position;
sk->active_flags = AF_Y;
} else if (axis == 'z') {
sk->calc_position_cb = cart_reverse_stepper_z_calc_position;
sk->active_flags = AF_Z;
}
return sk;
}

View File

@@ -0,0 +1,41 @@
// Deltesian kinematics stepper pulse time generation
//
// Copyright (C) 2022 Fabrice Gallet <tircown@gmail.com>
//
// This file may be distributed under the terms of the GNU GPLv3 license.
#include <math.h> // sqrt
#include <stddef.h> // offsetof
#include <stdlib.h> // malloc
#include <string.h> // memset
#include "compiler.h" // __visible
#include "itersolve.h" // struct stepper_kinematics
#include "trapq.h" // move_get_coord
struct deltesian_stepper {
struct stepper_kinematics sk;
double arm2, arm_x;
};
static double
deltesian_stepper_calc_position(struct stepper_kinematics *sk, struct move *m
, double move_time)
{
struct deltesian_stepper *ds = container_of(
sk, struct deltesian_stepper, sk);
struct coord c = move_get_coord(m, move_time);
double dx = c.x - ds->arm_x;
return sqrt(ds->arm2 - dx*dx) + c.z;
}
struct stepper_kinematics * __visible
deltesian_stepper_alloc(double arm2, double arm_x)
{
struct deltesian_stepper *ds = malloc(sizeof(*ds));
memset(ds, 0, sizeof(*ds));
ds->arm2 = arm2;
ds->arm_x = arm_x;
ds->sk.calc_position_cb = deltesian_stepper_calc_position;
ds->sk.active_flags = AF_X | AF_Z;
return &ds->sk;
}

81
klippy/chelper/kin_idex.c Normal file
View File

@@ -0,0 +1,81 @@
// Idex dual carriage kinematics
//
// Copyright (C) 2023 Dmitry Butyugin <dmbutyugin@google.com>
//
// This file may be distributed under the terms of the GNU GPLv3 license.
#include <stddef.h> // offsetof
#include <stdlib.h> // malloc
#include <string.h> // memset
#include "compiler.h" // __visible
#include "itersolve.h" // struct stepper_kinematics
#include "trapq.h" // struct move
#define DUMMY_T 500.0
struct dual_carriage_stepper {
struct stepper_kinematics sk;
struct stepper_kinematics *orig_sk;
struct move m;
double x_scale, x_offs, y_scale, y_offs;
};
double
dual_carriage_calc_position(struct stepper_kinematics *sk, struct move *m
, double move_time)
{
struct dual_carriage_stepper *dc = container_of(
sk, struct dual_carriage_stepper, sk);
struct coord pos = move_get_coord(m, move_time);
dc->m.start_pos.x = pos.x * dc->x_scale + dc->x_offs;
dc->m.start_pos.y = pos.y * dc->y_scale + dc->y_offs;
dc->m.start_pos.z = pos.z;
return dc->orig_sk->calc_position_cb(dc->orig_sk, &dc->m, DUMMY_T);
}
void __visible
dual_carriage_set_sk(struct stepper_kinematics *sk
, struct stepper_kinematics *orig_sk)
{
struct dual_carriage_stepper *dc = container_of(
sk, struct dual_carriage_stepper, sk);
dc->sk.calc_position_cb = dual_carriage_calc_position;
dc->sk.active_flags = orig_sk->active_flags;
dc->orig_sk = orig_sk;
}
int __visible
dual_carriage_set_transform(struct stepper_kinematics *sk, char axis
, double scale, double offs)
{
struct dual_carriage_stepper *dc = container_of(
sk, struct dual_carriage_stepper, sk);
if (axis == 'x') {
dc->x_scale = scale;
dc->x_offs = offs;
if (!scale)
dc->sk.active_flags &= ~AF_X;
else if (scale && dc->orig_sk->active_flags & AF_X)
dc->sk.active_flags |= AF_X;
return 0;
}
if (axis == 'y') {
dc->y_scale = scale;
dc->y_offs = offs;
if (!scale)
dc->sk.active_flags &= ~AF_Y;
else if (scale && dc->orig_sk->active_flags & AF_Y)
dc->sk.active_flags |= AF_Y;
return 0;
}
return -1;
}
struct stepper_kinematics * __visible
dual_carriage_alloc(void)
{
struct dual_carriage_stepper *dc = malloc(sizeof(*dc));
memset(dc, 0, sizeof(*dc));
dc->m.move_t = 2. * DUMMY_T;
return &dc->sk;
}

View File

@@ -171,6 +171,9 @@ input_shaper_set_sk(struct stepper_kinematics *sk
return -1;
is->sk.active_flags = orig_sk->active_flags;
is->orig_sk = orig_sk;
is->sk.commanded_pos = orig_sk->commanded_pos;
is->sk.last_flush_time = orig_sk->last_flush_time;
is->sk.last_move_time = orig_sk->last_move_time;
return 0;
}
@@ -201,25 +204,20 @@ input_shaper_set_shaper_params(struct stepper_kinematics *sk, char axis
struct input_shaper *is = container_of(sk, struct input_shaper, sk);
struct shaper_pulses *sp = axis == 'x' ? &is->sx : &is->sy;
int status = 0;
if (is->orig_sk->active_flags & (axis == 'x' ? AF_X : AF_Y))
// Ignore input shaper update if the axis is not active
if (is->orig_sk->active_flags & (axis == 'x' ? AF_X : AF_Y)) {
status = init_shaper(n, a, t, sp);
else
sp->num_pulses = 0;
shaper_note_generation_time(is);
shaper_note_generation_time(is);
}
return status;
}
double __visible
input_shaper_get_step_generation_window(int n, double a[], double t[])
input_shaper_get_step_generation_window(struct stepper_kinematics *sk)
{
struct shaper_pulses sp;
init_shaper(n, a, t, &sp);
if (!sp.num_pulses)
return 0.;
double window = -sp.pulses[0].t;
if (sp.pulses[sp.num_pulses-1].t > window)
window = sp.pulses[sp.num_pulses-1].t;
return window;
struct input_shaper *is = container_of(sk, struct input_shaper, sk);
return is->sk.gen_steps_pre_active > is->sk.gen_steps_post_active
? is->sk.gen_steps_pre_active : is->sk.gen_steps_post_active;
}
struct stepper_kinematics * __visible

View File

@@ -30,7 +30,7 @@
#include "serialqueue.h" // struct queue_message
struct command_queue {
struct list_head stalled_queue, ready_queue;
struct list_head upcoming_queue, ready_queue;
struct list_node node;
};
@@ -49,7 +49,7 @@ struct serialqueue {
int receive_waiting;
// Baud / clock tracking
int receive_window;
double baud_adjust, idle_time;
double bittime_adjust, idle_time;
struct clock_estimate ce;
double last_receive_sent_time;
// Retransmit support
@@ -59,9 +59,10 @@ struct serialqueue {
double srtt, rttvar, rto;
// Pending transmission message queues
struct list_head pending_queues;
int ready_bytes, stalled_bytes, need_ack_bytes, last_ack_bytes;
int ready_bytes, upcoming_bytes, need_ack_bytes, last_ack_bytes;
uint64_t need_kick_clock;
struct list_head notify_queue;
double last_write_fail_time;
// Received messages
struct list_head receive_queue;
// Fastreader support
@@ -136,6 +137,23 @@ kick_bg_thread(struct serialqueue *sq)
report_errno("pipe write", ret);
}
// Minimum number of bits in a canbus message
#define CANBUS_PACKET_BITS ((1 + 11 + 3 + 4) + (16 + 2 + 7 + 3))
#define CANBUS_IFS_BITS 4
// Determine minimum time needed to transmit a given number of bytes
static double
calculate_bittime(struct serialqueue *sq, uint32_t bytes)
{
if (sq->serial_fd_type == SQT_CAN) {
uint32_t pkts = DIV_ROUND_UP(bytes, 8);
uint32_t bits = bytes * 8 + pkts * CANBUS_PACKET_BITS - CANBUS_IFS_BITS;
return sq->bittime_adjust * bits;
} else {
return sq->bittime_adjust * bytes;
}
}
// Update internal state when the receive sequence increases
static void
update_receive_seq(struct serialqueue *sq, double eventtime, uint64_t rseq)
@@ -192,7 +210,7 @@ update_receive_seq(struct serialqueue *sq, double eventtime, uint64_t rseq)
} else {
struct queue_message *sent = list_first_entry(
&sq->sent_queue, struct queue_message, node);
double nr = eventtime + sq->rto + sent->len * sq->baud_adjust;
double nr = eventtime + sq->rto + calculate_bittime(sq, sent->len);
pollreactor_update_timer(sq->pr, SQPT_RETRANSMIT, nr);
}
}
@@ -204,12 +222,11 @@ handle_message(struct serialqueue *sq, double eventtime, int len)
pthread_mutex_lock(&sq->lock);
// Calculate receive sequence number
uint64_t rseq = ((sq->receive_seq & ~MESSAGE_SEQ_MASK)
| (sq->input_buf[MESSAGE_POS_SEQ] & MESSAGE_SEQ_MASK));
uint32_t rseq_delta = ((sq->input_buf[MESSAGE_POS_SEQ] - sq->receive_seq)
& MESSAGE_SEQ_MASK);
uint64_t rseq = sq->receive_seq + rseq_delta;
if (rseq != sq->receive_seq) {
// New sequence number
if (rseq < sq->receive_seq)
rseq += MESSAGE_SEQ_MASK+1;
if (rseq > sq->send_seq && sq->receive_seq != 1) {
// An ack for a message not sent? Out of order message?
sq->bytes_invalid += len;
@@ -251,7 +268,7 @@ handle_message(struct serialqueue *sq, double eventtime, int len)
qm->sent_time = (rseq > sq->retransmit_seq
? sq->last_receive_sent_time : 0.);
qm->receive_time = get_monotonic(); // must be time post read()
qm->receive_time -= sq->baud_adjust * len;
qm->receive_time -= calculate_bittime(sq, len);
list_add_tail(&qm->node, &sq->receive_queue);
must_wake = 1;
}
@@ -339,6 +356,7 @@ kick_event(struct serialqueue *sq, double eventtime)
pollreactor_update_timer(sq->pr, SQPT_COMMAND, PR_NOW);
}
// OS write of data to be sent to the mcu
static void
do_write(struct serialqueue *sq, void *buf, int buflen)
{
@@ -358,8 +376,16 @@ do_write(struct serialqueue *sq, void *buf, int buflen)
int ret = write(sq->serial_fd, &cf, sizeof(cf));
if (ret < 0) {
report_errno("can write", ret);
double curtime = get_monotonic();
if (!sq->last_write_fail_time) {
sq->last_write_fail_time = curtime;
} else if (curtime > sq->last_write_fail_time + 10.0) {
errorf("Halting reads due to CAN write errors.");
pollreactor_do_exit(sq->pr);
}
return;
}
sq->last_write_fail_time = 0.0;
buf += size;
buflen -= size;
}
@@ -407,8 +433,8 @@ retransmit_event(struct serialqueue *sq, double eventtime)
}
sq->retransmit_seq = sq->send_seq;
sq->rtt_sample_seq = 0;
sq->idle_time = eventtime + buflen * sq->baud_adjust;
double waketime = eventtime + first_buflen * sq->baud_adjust + sq->rto;
sq->idle_time = eventtime + calculate_bittime(sq, buflen);
double waketime = eventtime + sq->rto + calculate_bittime(sq, first_buflen);
pthread_mutex_unlock(&sq->lock);
return waketime;
@@ -416,7 +442,8 @@ retransmit_event(struct serialqueue *sq, double eventtime)
// Construct a block of data to be sent to the serial port
static int
build_and_send_command(struct serialqueue *sq, uint8_t *buf, double eventtime)
build_and_send_command(struct serialqueue *sq, uint8_t *buf, int pending
, double eventtime)
{
int len = MESSAGE_HEADER_SIZE;
while (sq->ready_bytes) {
@@ -439,7 +466,7 @@ build_and_send_command(struct serialqueue *sq, uint8_t *buf, double eventtime)
if (len + qm->len > MESSAGE_MAX - MESSAGE_TRAILER_SIZE)
break;
list_del(&qm->node);
if (list_empty(&cq->ready_queue) && list_empty(&cq->stalled_queue))
if (list_empty(&cq->ready_queue) && list_empty(&cq->upcoming_queue))
list_del(&cq->node);
memcpy(&buf[len], qm->msg, qm->len);
len += qm->len;
@@ -463,17 +490,15 @@ build_and_send_command(struct serialqueue *sq, uint8_t *buf, double eventtime)
buf[len - MESSAGE_TRAILER_SYNC] = MESSAGE_SYNC;
// Store message block
if (eventtime > sq->idle_time)
sq->idle_time = eventtime;
sq->idle_time += len * sq->baud_adjust;
double idletime = eventtime > sq->idle_time ? eventtime : sq->idle_time;
idletime += calculate_bittime(sq, pending + len);
struct queue_message *out = message_alloc();
memcpy(out->msg, buf, len);
out->len = len;
out->sent_time = eventtime;
out->receive_time = sq->idle_time;
out->receive_time = idletime;
if (list_empty(&sq->sent_queue))
pollreactor_update_timer(sq->pr, SQPT_RETRANSMIT
, sq->idle_time + sq->rto);
pollreactor_update_timer(sq->pr, SQPT_RETRANSMIT, idletime + sq->rto);
if (!sq->rtt_sample_seq)
sq->rtt_sample_seq = sq->send_seq;
sq->send_seq++;
@@ -484,7 +509,7 @@ build_and_send_command(struct serialqueue *sq, uint8_t *buf, double eventtime)
// Determine the time the next serial data should be sent
static double
check_send_command(struct serialqueue *sq, double eventtime)
check_send_command(struct serialqueue *sq, int pending, double eventtime)
{
if (sq->send_seq - sq->receive_seq >= MAX_PENDING_BLOCKS
&& sq->receive_seq != (uint64_t)-1)
@@ -501,15 +526,15 @@ check_send_command(struct serialqueue *sq, double eventtime)
// Check for stalled messages now ready
double idletime = eventtime > sq->idle_time ? eventtime : sq->idle_time;
idletime += MESSAGE_MIN * sq->baud_adjust;
idletime += calculate_bittime(sq, pending + MESSAGE_MIN);
uint64_t ack_clock = clock_from_time(&sq->ce, idletime);
uint64_t min_stalled_clock = MAX_CLOCK, min_ready_clock = MAX_CLOCK;
struct command_queue *cq;
list_for_each_entry(cq, &sq->pending_queues, node) {
// Move messages from the stalled_queue to the ready_queue
while (!list_empty(&cq->stalled_queue)) {
// Move messages from the upcoming_queue to the ready_queue
while (!list_empty(&cq->upcoming_queue)) {
struct queue_message *qm = list_first_entry(
&cq->stalled_queue, struct queue_message, node);
&cq->upcoming_queue, struct queue_message, node);
if (ack_clock < qm->min_clock) {
if (qm->min_clock < min_stalled_clock)
min_stalled_clock = qm->min_clock;
@@ -517,7 +542,7 @@ check_send_command(struct serialqueue *sq, double eventtime)
}
list_del(&qm->node);
list_add_tail(&qm->node, &cq->ready_queue);
sq->stalled_bytes -= qm->len;
sq->upcoming_bytes -= qm->len;
sq->ready_bytes += qm->len;
}
// Update min_ready_clock
@@ -525,9 +550,10 @@ check_send_command(struct serialqueue *sq, double eventtime)
struct queue_message *qm = list_first_entry(
&cq->ready_queue, struct queue_message, node);
uint64_t req_clock = qm->req_clock;
double bgtime = pending ? idletime : sq->idle_time;
double bgoffset = MIN_REQTIME_DELTA + MIN_BACKGROUND_DELTA;
if (req_clock == BACKGROUND_PRIORITY_CLOCK)
req_clock = clock_from_time(&sq->ce, sq->idle_time + bgoffset);
req_clock = clock_from_time(&sq->ce, bgtime + bgoffset);
if (req_clock < min_ready_clock)
min_ready_clock = req_clock;
}
@@ -561,18 +587,21 @@ command_event(struct serialqueue *sq, double eventtime)
int buflen = 0;
double waketime;
for (;;) {
waketime = check_send_command(sq, eventtime);
waketime = check_send_command(sq, buflen, eventtime);
if (waketime != PR_NOW || buflen + MESSAGE_MAX > sizeof(buf)) {
if (buflen) {
// Write message blocks
do_write(sq, buf, buflen);
sq->bytes_write += buflen;
double idletime = (eventtime > sq->idle_time
? eventtime : sq->idle_time);
sq->idle_time = idletime + calculate_bittime(sq, buflen);
buflen = 0;
}
if (waketime != PR_NOW)
break;
}
buflen += build_and_send_command(sq, &buf[buflen], eventtime);
buflen += build_and_send_command(sq, &buf[buflen], buflen, eventtime);
}
pthread_mutex_unlock(&sq->lock);
return waketime;
@@ -693,7 +722,7 @@ serialqueue_free(struct serialqueue *sq)
&sq->pending_queues, struct command_queue, node);
list_del(&cq->node);
message_queue_free(&cq->ready_queue);
message_queue_free(&cq->stalled_queue);
message_queue_free(&cq->upcoming_queue);
}
pthread_mutex_unlock(&sq->lock);
pollreactor_free(sq->pr);
@@ -707,7 +736,7 @@ serialqueue_alloc_commandqueue(void)
struct command_queue *cq = malloc(sizeof(*cq));
memset(cq, 0, sizeof(*cq));
list_init(&cq->ready_queue);
list_init(&cq->stalled_queue);
list_init(&cq->upcoming_queue);
return cq;
}
@@ -717,7 +746,7 @@ serialqueue_free_commandqueue(struct command_queue *cq)
{
if (!cq)
return;
if (!list_empty(&cq->ready_queue) || !list_empty(&cq->stalled_queue)) {
if (!list_empty(&cq->ready_queue) || !list_empty(&cq->upcoming_queue)) {
errorf("Memory leak! Can't free non-empty commandqueue");
return;
}
@@ -763,12 +792,12 @@ serialqueue_send_batch(struct serialqueue *sq, struct command_queue *cq
return;
qm = list_first_entry(msgs, struct queue_message, node);
// Add list to cq->stalled_queue
// Add list to cq->upcoming_queue
pthread_mutex_lock(&sq->lock);
if (list_empty(&cq->ready_queue) && list_empty(&cq->stalled_queue))
if (list_empty(&cq->ready_queue) && list_empty(&cq->upcoming_queue))
list_add_tail(&cq->node, &sq->pending_queues);
list_join_tail(msgs, &cq->stalled_queue);
sq->stalled_bytes += len;
list_join_tail(msgs, &cq->upcoming_queue);
sq->upcoming_bytes += len;
int mustwake = 0;
if (qm->min_clock < sq->need_kick_clock) {
sq->need_kick_clock = 0;
@@ -847,10 +876,15 @@ exit:
}
void __visible
serialqueue_set_baud_adjust(struct serialqueue *sq, double baud_adjust)
serialqueue_set_wire_frequency(struct serialqueue *sq, double frequency)
{
pthread_mutex_lock(&sq->lock);
sq->baud_adjust = baud_adjust;
if (sq->serial_fd_type == SQT_CAN) {
sq->bittime_adjust = 1. / frequency;
} else {
// An 8N1 serial line is 10 bits per byte (1 start, 8 data, 1 stop)
sq->bittime_adjust = 10. / frequency;
}
pthread_mutex_unlock(&sq->lock);
}
@@ -899,13 +933,13 @@ serialqueue_get_stats(struct serialqueue *sq, char *buf, int len)
" bytes_retransmit=%u bytes_invalid=%u"
" send_seq=%u receive_seq=%u retransmit_seq=%u"
" srtt=%.3f rttvar=%.3f rto=%.3f"
" ready_bytes=%u stalled_bytes=%u"
" ready_bytes=%u upcoming_bytes=%u"
, stats.bytes_write, stats.bytes_read
, stats.bytes_retransmit, stats.bytes_invalid
, (int)stats.send_seq, (int)stats.receive_seq
, (int)stats.retransmit_seq
, stats.srtt, stats.rttvar, stats.rto
, stats.ready_bytes, stats.stalled_bytes);
, stats.ready_bytes, stats.upcoming_bytes);
}
// Extract old messages stored in the debug queues

View File

@@ -42,7 +42,7 @@ void serialqueue_send(struct serialqueue *sq, struct command_queue *cq
, uint8_t *msg, int len, uint64_t min_clock
, uint64_t req_clock, uint64_t notify_id);
void serialqueue_pull(struct serialqueue *sq, struct pull_queue_message *pqm);
void serialqueue_set_baud_adjust(struct serialqueue *sq, double baud_adjust);
void serialqueue_set_wire_frequency(struct serialqueue *sq, double frequency);
void serialqueue_set_receive_window(struct serialqueue *sq, int receive_window);
void serialqueue_set_clock_est(struct serialqueue *sq, double est_freq
, double conv_time, uint64_t conv_clock

View File

@@ -54,8 +54,6 @@ struct step_move {
int16_t add;
};
#define HISTORY_EXPIRE (30.0)
struct history_steps {
struct list_node node;
uint64_t first_clock, last_clock;
@@ -292,6 +290,13 @@ free_history(struct stepcompress *sc, uint64_t end_clock)
}
}
// Expire the stepcompress history older than the given clock
static void
stepcompress_history_expire(struct stepcompress *sc, uint64_t end_clock)
{
free_history(sc, end_clock);
}
// Free memory associated with a 'stepcompress' object
void __visible
stepcompress_free(struct stepcompress *sc)
@@ -322,9 +327,6 @@ calc_last_step_print_time(struct stepcompress *sc)
{
double lsc = sc->last_step_clock;
sc->last_step_print_time = sc->mcu_time_offset + (lsc - .5) / sc->mcu_freq;
if (lsc > sc->mcu_freq * HISTORY_EXPIRE)
free_history(sc, lsc - sc->mcu_freq * HISTORY_EXPIRE);
}
// Set the conversion rate of 'print_time' to mcu clock
@@ -623,6 +625,21 @@ stepcompress_queue_msg(struct stepcompress *sc, uint32_t *data, int len)
return 0;
}
// Queue an mcu command that will consume space in the mcu move queue
int __visible
stepcompress_queue_mq_msg(struct stepcompress *sc, uint64_t req_clock
, uint32_t *data, int len)
{
int ret = stepcompress_flush(sc, UINT64_MAX);
if (ret)
return ret;
struct queue_message *qm = message_alloc_and_encode(data, len);
qm->min_clock = qm->req_clock = req_clock;
list_add_tail(&qm->node, &sc->msg_queue);
return 0;
}
// Return history of queue_step commands
int __visible
stepcompress_extract_old(struct stepcompress *sc, struct pull_history_steps *p
@@ -716,6 +733,18 @@ steppersync_set_time(struct steppersync *ss, double time_offset
}
}
// Expire the stepcompress history before the given clock time
static void
steppersync_history_expire(struct steppersync *ss, uint64_t end_clock)
{
int i;
for (i = 0; i < ss->sc_num; i++)
{
struct stepcompress *sc = ss->sc_list[i];
stepcompress_history_expire(sc, end_clock);
}
}
// Implement a binary heap algorithm to track when the next available
// 'struct move' in the mcu will be available
static void
@@ -743,7 +772,8 @@ heap_replace(struct steppersync *ss, uint64_t req_clock)
// Find and transmit any scheduled steps prior to the given 'move_clock'
int __visible
steppersync_flush(struct steppersync *ss, uint64_t move_clock)
steppersync_flush(struct steppersync *ss, uint64_t move_clock
, uint64_t clear_history_clock)
{
// Flush each stepcompress to the specified move_clock
int i;
@@ -791,5 +821,7 @@ steppersync_flush(struct steppersync *ss, uint64_t move_clock)
// Transmit commands
if (!list_empty(&msgs))
serialqueue_send_batch(ss->sq, ss->cq, &msgs);
steppersync_history_expire(ss, clear_history_clock);
return 0;
}

View File

@@ -29,6 +29,8 @@ int stepcompress_set_last_position(struct stepcompress *sc, uint64_t clock
int64_t stepcompress_find_past_position(struct stepcompress *sc
, uint64_t clock);
int stepcompress_queue_msg(struct stepcompress *sc, uint32_t *data, int len);
int stepcompress_queue_mq_msg(struct stepcompress *sc, uint64_t req_clock
, uint32_t *data, int len);
int stepcompress_extract_old(struct stepcompress *sc
, struct pull_history_steps *p, int max
, uint64_t start_clock, uint64_t end_clock);
@@ -40,6 +42,7 @@ struct steppersync *steppersync_alloc(
void steppersync_free(struct steppersync *ss);
void steppersync_set_time(struct steppersync *ss, double time_offset
, double mcu_freq);
int steppersync_flush(struct steppersync *ss, uint64_t move_clock);
int steppersync_flush(struct steppersync *ss, uint64_t move_clock
, uint64_t clear_history_clock);
#endif // stepcompress.h

View File

@@ -20,54 +20,6 @@ move_alloc(void)
return m;
}
// Fill and add a move to the trapezoid velocity queue
void __visible
trapq_append(struct trapq *tq, double print_time
, double accel_t, double cruise_t, double decel_t
, double start_pos_x, double start_pos_y, double start_pos_z
, double axes_r_x, double axes_r_y, double axes_r_z
, double start_v, double cruise_v, double accel)
{
struct coord start_pos = { .x=start_pos_x, .y=start_pos_y, .z=start_pos_z };
struct coord axes_r = { .x=axes_r_x, .y=axes_r_y, .z=axes_r_z };
if (accel_t) {
struct move *m = move_alloc();
m->print_time = print_time;
m->move_t = accel_t;
m->start_v = start_v;
m->half_accel = .5 * accel;
m->start_pos = start_pos;
m->axes_r = axes_r;
trapq_add_move(tq, m);
print_time += accel_t;
start_pos = move_get_coord(m, accel_t);
}
if (cruise_t) {
struct move *m = move_alloc();
m->print_time = print_time;
m->move_t = cruise_t;
m->start_v = cruise_v;
m->half_accel = 0.;
m->start_pos = start_pos;
m->axes_r = axes_r;
trapq_add_move(tq, m);
print_time += cruise_t;
start_pos = move_get_coord(m, cruise_t);
}
if (decel_t) {
struct move *m = move_alloc();
m->print_time = print_time;
m->move_t = decel_t;
m->start_v = cruise_v;
m->half_accel = -.5 * accel;
m->start_pos = start_pos;
m->axes_r = axes_r;
trapq_add_move(tq, m);
}
}
// Return the distance moved given a time in a move
inline double
move_get_distance(struct move *m, double move_time)
@@ -163,11 +115,58 @@ trapq_add_move(struct trapq *tq, struct move *m)
tail_sentinel->print_time = 0.;
}
#define HISTORY_EXPIRE (30.0)
// Fill and add a move to the trapezoid velocity queue
void __visible
trapq_append(struct trapq *tq, double print_time
, double accel_t, double cruise_t, double decel_t
, double start_pos_x, double start_pos_y, double start_pos_z
, double axes_r_x, double axes_r_y, double axes_r_z
, double start_v, double cruise_v, double accel)
{
struct coord start_pos = { .x=start_pos_x, .y=start_pos_y, .z=start_pos_z };
struct coord axes_r = { .x=axes_r_x, .y=axes_r_y, .z=axes_r_z };
if (accel_t) {
struct move *m = move_alloc();
m->print_time = print_time;
m->move_t = accel_t;
m->start_v = start_v;
m->half_accel = .5 * accel;
m->start_pos = start_pos;
m->axes_r = axes_r;
trapq_add_move(tq, m);
print_time += accel_t;
start_pos = move_get_coord(m, accel_t);
}
if (cruise_t) {
struct move *m = move_alloc();
m->print_time = print_time;
m->move_t = cruise_t;
m->start_v = cruise_v;
m->half_accel = 0.;
m->start_pos = start_pos;
m->axes_r = axes_r;
trapq_add_move(tq, m);
print_time += cruise_t;
start_pos = move_get_coord(m, cruise_t);
}
if (decel_t) {
struct move *m = move_alloc();
m->print_time = print_time;
m->move_t = decel_t;
m->start_v = cruise_v;
m->half_accel = -.5 * accel;
m->start_pos = start_pos;
m->axes_r = axes_r;
trapq_add_move(tq, m);
}
}
// Expire any moves older than `print_time` from the trapezoid velocity queue
void __visible
trapq_finalize_moves(struct trapq *tq, double print_time)
trapq_finalize_moves(struct trapq *tq, double print_time
, double clear_history_time)
{
struct move *head_sentinel = list_first_entry(&tq->moves, struct move,node);
struct move *tail_sentinel = list_last_entry(&tq->moves, struct move, node);
@@ -190,10 +189,9 @@ trapq_finalize_moves(struct trapq *tq, double print_time)
if (list_empty(&tq->history))
return;
struct move *latest = list_first_entry(&tq->history, struct move, node);
double expire_time = latest->print_time + latest->move_t - HISTORY_EXPIRE;
for (;;) {
struct move *m = list_last_entry(&tq->history, struct move, node);
if (m == latest || m->print_time + m->move_t > expire_time)
if (m == latest || m->print_time + m->move_t > clear_history_time)
break;
list_del(&m->node);
free(m);
@@ -206,7 +204,7 @@ trapq_set_position(struct trapq *tq, double print_time
, double pos_x, double pos_y, double pos_z)
{
// Flush all moves from trapq
trapq_finalize_moves(tq, NEVER_TIME);
trapq_finalize_moves(tq, NEVER_TIME, 0);
// Prune any moves in the trapq history that were interrupted
while (!list_empty(&tq->history)) {

View File

@@ -32,18 +32,19 @@ struct pull_move {
};
struct move *move_alloc(void);
void trapq_append(struct trapq *tq, double print_time
, double accel_t, double cruise_t, double decel_t
, double start_pos_x, double start_pos_y, double start_pos_z
, double axes_r_x, double axes_r_y, double axes_r_z
, double start_v, double cruise_v, double accel);
double move_get_distance(struct move *m, double move_time);
struct coord move_get_coord(struct move *m, double move_time);
struct trapq *trapq_alloc(void);
void trapq_free(struct trapq *tq);
void trapq_check_sentinels(struct trapq *tq);
void trapq_add_move(struct trapq *tq, struct move *m);
void trapq_finalize_moves(struct trapq *tq, double print_time);
void trapq_append(struct trapq *tq, double print_time
, double accel_t, double cruise_t, double decel_t
, double start_pos_x, double start_pos_y, double start_pos_z
, double axes_r_x, double axes_r_y, double axes_r_z
, double start_v, double cruise_v, double accel);
void trapq_finalize_moves(struct trapq *tq, double print_time
, double clear_history_time);
void trapq_set_position(struct trapq *tq, double print_time
, double pos_x, double pos_y, double pos_z);
int trapq_extract_old(struct trapq *tq, struct pull_move *p, int max

View File

@@ -66,10 +66,8 @@ class ClockSync:
self.queries_pending = 0
# Extend clock to 64bit
last_clock = self.last_clock
clock = (last_clock & ~0xffffffff) | params['clock']
if clock < last_clock:
clock += 0x100000000
self.last_clock = clock
clock_delta = (params['clock'] - last_clock) & 0xffffffff
self.last_clock = clock = last_clock + clock_delta
# Check if this is the best round-trip-time seen so far
sent_time = params['#sent_time']
if not sent_time:
@@ -138,10 +136,9 @@ class ClockSync:
# misc commands
def clock32_to_clock64(self, clock32):
last_clock = self.last_clock
clock_diff = (last_clock - clock32) & 0xffffffff
if clock_diff & 0x80000000:
return last_clock + 0x100000000 - clock_diff
return last_clock - clock_diff
clock_diff = (clock32 - last_clock) & 0xffffffff
clock_diff -= (clock_diff & 0x80000000) << 1
return last_clock + clock_diff
def is_active(self):
return self.queries_pending <= 4
def dump_debug(self):

Binary file not shown.

View File

@@ -80,11 +80,15 @@ class ConfigWrapper:
def getlists(self, option, default=sentinel, seps=(',',), count=None,
parser=str, note_valid=True):
def lparser(value, pos):
if len(value.strip()) == 0:
# Return an empty list instead of [''] for empty string
parts = []
else:
parts = [p.strip() for p in value.split(seps[pos])]
if pos:
# Nested list
parts = [p.strip() for p in value.split(seps[pos])]
return tuple([lparser(p, pos - 1) for p in parts if p])
res = [parser(p.strip()) for p in value.split(seps[pos])]
res = [parser(p) for p in parts]
if count is not None and len(res) != count:
raise error("Option '%s' in section '%s' must have %d elements"
% (option, self.section, count))
@@ -139,6 +143,8 @@ class PrinterConfig:
self.printer = printer
self.autosave = None
self.deprecated = {}
self.runtime_warnings = []
self.deprecate_warnings = []
self.status_raw_config = {}
self.status_save_pending = {}
self.status_settings = {}
@@ -147,6 +153,8 @@ class PrinterConfig:
gcode = self.printer.lookup_object('gcode')
gcode.register_command("SAVE_CONFIG", self.cmd_SAVE_CONFIG,
desc=self.cmd_SAVE_CONFIG_help)
gcode.register_command("SAVE_CONFIG_QD", self.cmd_SAVE_CONFIG_QD,
desc=self.cmd_SAVE_CONFIG_QD_help)
def get_printer(self):
return self.printer
def _read_config_file(self, filename):
@@ -168,16 +176,16 @@ class PrinterConfig:
autosave_data = data[pos + len(AUTOSAVE_HEADER):].strip()
# Check for errors and strip line prefixes
if "\n#*# " in regular_data:
logging.warn("Can't read autosave from config file"
" - autosave state corrupted")
logging.warning("Can't read autosave from config file"
" - autosave state corrupted")
return data, ""
out = [""]
for line in autosave_data.split('\n'):
if ((not line.startswith("#*#")
or (len(line) >= 4 and not line.startswith("#*# ")))
and autosave_data):
logging.warn("Can't read autosave from config file"
" - modifications after header")
logging.warning("Can't read autosave from config file"
" - modifications after header")
return data, ""
out.append(line[4:])
out.append("")
@@ -185,7 +193,6 @@ class PrinterConfig:
comment_r = re.compile('[#;].*$')
value_r = re.compile('[^A-Za-z0-9_].*$')
def _strip_duplicates(self, data, config):
fileconfig = config.fileconfig
# Comment out fields in 'data' that are defined in 'config'
lines = data.split('\n')
section = None
@@ -213,7 +220,10 @@ class PrinterConfig:
data = '\n'.join(buffer)
del buffer[:]
sbuffer = io.StringIO(data)
fileconfig.readfp(sbuffer, filename)
if sys.version_info.major >= 3:
fileconfig.read_file(sbuffer, filename)
else:
fileconfig.readfp(sbuffer, filename)
def _resolve_include(self, source_filename, include_spec, fileconfig,
visited):
dirname = os.path.dirname(source_filename)
@@ -307,6 +317,11 @@ class PrinterConfig:
"======================="]
self.printer.set_rollover_info("config", "\n".join(lines))
# Status reporting
def runtime_warning(self, msg):
logging.warning(msg)
res = {'type': 'runtime_warning', 'message': msg}
self.runtime_warnings.append(res)
self.status_warnings = self.runtime_warnings + self.deprecate_warnings
def deprecate(self, section, option, value=None, msg=None):
self.deprecated[(section, option, value)] = msg
def _build_status(self, config):
@@ -318,7 +333,7 @@ class PrinterConfig:
self.status_settings = {}
for (section, option), value in config.access_tracking.items():
self.status_settings.setdefault(section, {})[option] = value
self.status_warnings = []
self.deprecate_warnings = []
for (section, option, value), msg in self.deprecated.items():
if value is None:
res = {'type': 'deprecated_option'}
@@ -327,7 +342,8 @@ class PrinterConfig:
res['message'] = msg
res['section'] = section
res['option'] = option
self.status_warnings.append(res)
self.deprecate_warnings.append(res)
self.status_warnings = self.runtime_warnings + self.deprecate_warnings
def get_status(self, eventtime):
return {'config': self.status_raw_config,
'settings': self.status_settings,
@@ -408,12 +424,62 @@ class PrinterConfig:
try:
f = open(temp_name, 'w')
f.write(data)
f.flush()
f.close()
os.rename(cfgname, backup_name)
os.rename(temp_name, cfgname)
os.system("sync")
except:
msg = "Unable to write config file during SAVE_CONFIG"
logging.exception(msg)
raise gcode.error(msg)
# Request a restart
gcode.request_restart('restart')
gcode.request_restart('firmware_restart')
cmd_SAVE_CONFIG_QD_help = "Overwrite config file and restart"
def cmd_SAVE_CONFIG_QD(self, gcmd):
if not self.autosave.fileconfig.sections():
return
gcode = self.printer.lookup_object('gcode')
# Create string containing autosave data
autosave_data = self._build_config_string(self.autosave)
lines = [('#*# ' + l).strip()
for l in autosave_data.split('\n')]
lines.insert(0, "\n" + AUTOSAVE_HEADER.rstrip())
lines.append("")
autosave_data = '\n'.join(lines)
# Read in and validate current config file
cfgname = self.printer.get_start_args()['config_file']
try:
data = self._read_config_file(cfgname)
regular_data, old_autosave_data = self._find_autosave_data(data)
config = self._build_config_wrapper(regular_data, cfgname)
except error as e:
msg = "Unable to parse existing config on SAVE_CONFIG"
logging.exception(msg)
raise gcode.error(msg)
regular_data = self._strip_duplicates(regular_data, self.autosave)
self._disallow_include_conflicts(regular_data, cfgname, gcode)
data = regular_data.rstrip() + autosave_data
# Determine filenames
datestr = time.strftime("-%Y%m%d_%H%M%S")
backup_name = cfgname + datestr
temp_name = cfgname + "_autosave"
if cfgname.endswith(".cfg"):
backup_name = cfgname[:-4] + datestr + ".cfg"
temp_name = cfgname[:-4] + "_autosave.cfg"
# Create new config file with temporary name and swap with main config
logging.info("SAVE_CONFIG to '%s' (backup in '%s')",
cfgname, backup_name)
try:
f = open(temp_name, 'w')
f.write(data)
f.flush()
f.close()
os.rename(cfgname, backup_name)
os.rename(temp_name, cfgname)
os.system("sync")
except:
msg = "Unable to write config file during SAVE_CONFIG"
logging.exception(msg)
raise gcode.error(msg)

Binary file not shown.

View File

@@ -5,17 +5,18 @@
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, optparse, os, re, logging
import util, reactor, serialhdl, pins, msgproto, clocksync
import util, reactor, serialhdl, msgproto, clocksync
help_txt = """
This is a debugging console for the Klipper micro-controller.
In addition to mcu commands, the following artificial commands are
available:
PINS : Load pin name aliases (eg, "PINS arduino")
DELAY : Send a command at a clock time (eg, "DELAY 9999 get_uptime")
FLOOD : Send a command many times (eg, "FLOOD 22 .01 get_uptime")
SUPPRESS : Suppress a response message (eg, "SUPPRESS analog_in_state 4")
SET : Create a local variable (eg, "SET myvar 123.4")
DUMP : Dump memory (eg, "DUMP 0x12345678 100 32")
FILEDUMP : Dump to file (eg, "FILEDUMP data.bin 0x12345678 100 32")
STATS : Report serial statistics
LIST : List available mcu commands, local commands, and local variables
HELP : Show this text
@@ -42,12 +43,12 @@ class KeyboardReader:
self.fd = sys.stdin.fileno()
util.set_nonblock(self.fd)
self.mcu_freq = 0
self.pins = pins.PinResolver(validate_aliases=False)
self.data = ""
reactor.register_fd(self.fd, self.process_kbd)
reactor.register_callback(self.connect)
self.local_commands = {
"SET": self.command_SET,
"DUMP": self.command_DUMP, "FILEDUMP": self.command_FILEDUMP,
"DELAY": self.command_DELAY, "FLOOD": self.command_FLOOD,
"SUPPRESS": self.command_SUPPRESS, "STATS": self.command_STATS,
"LIST": self.command_LIST, "HELP": self.command_HELP,
@@ -98,6 +99,55 @@ class KeyboardReader:
except ValueError:
pass
self.eval_globals[parts[1]] = val
def command_DUMP(self, parts, filename=None):
# Extract command args
try:
addr = int(parts[1], 0)
count = int(parts[2], 0)
order = [2, 0, 1, 0][(addr | count) & 3]
if len(parts) > 3:
order = {'32': 2, '16': 1, '8': 0}[parts[3]]
except ValueError as e:
self.output("Error: %s" % (str(e),))
return
bsize = 1 << order
# Query data from mcu
vals = []
for i in range((count + bsize - 1) >> order):
caddr = addr + (i << order)
cmd = "debug_read order=%d addr=%d" % (order, caddr)
params = self.ser.send_with_response(cmd, "debug_result")
vals.append(params['val'])
# Report data
if filename is None and order == 2:
# Common 32bit hex dump
for i in range((len(vals) + 3) // 4):
p = i * 4
hexvals = " ".join(["%08x" % (v,) for v in vals[p:p+4]])
self.output("%08x %s" % (addr + p * 4, hexvals))
return
# Convert to byte format
data = bytearray()
for val in vals:
for b in range(bsize):
data.append((val >> (8 * b)) & 0xff)
data = data[:count]
if filename is not None:
f = open(filename, 'wb')
f.write(data)
f.close()
self.output("Wrote %d bytes to '%s'" % (len(data), filename))
return
for i in range((count + 15) // 16):
p = i * 16
paddr = addr + p
d = data[p:p+16]
hexbytes = " ".join(["%02x" % (v,) for v in d])
pb = "".join([chr(v) if v >= 0x20 and v < 0x7f else '.' for v in d])
o = "%08x %-47s |%s|" % (paddr, hexbytes, pb)
self.output("%s %s" % (o[:34], o[34:]))
def command_FILEDUMP(self, parts):
self.command_DUMP(parts[1:], filename=parts[1])
def command_DELAY(self, parts):
try:
val = int(parts[1])
@@ -172,11 +222,7 @@ class KeyboardReader:
return None
line = ''.join(evalparts)
self.output("Eval: %s" % (line,))
try:
line = self.pins.update_command(line).strip()
except:
self.output("Unable to map pin: %s" % (line,))
return None
line = line.strip()
if line:
parts = line.split()
if parts[0] in self.local_commands:

Binary file not shown.

View File

@@ -95,8 +95,8 @@ class LinearVoltage:
for temp, volt in params:
adc = (volt - voltage_offset) / adc_voltage
if adc < 0. or adc > 1.:
logging.warn("Ignoring adc sample %.3f/%.3f in heater %s",
temp, volt, config.get_name())
logging.warning("Ignoring adc sample %.3f/%.3f in heater %s",
temp, volt, config.get_name())
continue
samples.append((adc, temp))
try:

Binary file not shown.

View File

@@ -1,10 +1,10 @@
# Support for reading acceleration data from an adxl345 chip
#
# Copyright (C) 2020-2021 Kevin O'Connor <kevin@koconnor.net>
# Copyright (C) 2020-2023 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import logging, time, collections, threading, multiprocessing, os
from . import bus, motion_report
import logging, time, collections, multiprocessing, os
from . import bus, bulk_sensor
# ADXL345 registers
REG_DEVID = 0x00
@@ -24,33 +24,37 @@ ADXL345_DEV_ID = 0xe5
SET_FIFO_CTL = 0x90
FREEFALL_ACCEL = 9.80665 * 1000.
SCALE = 0.0039 * FREEFALL_ACCEL # 3.9mg/LSB * Earth gravity in mm/s**2
SCALE_XY = 0.003774 * FREEFALL_ACCEL # 1 / 265 (at 3.3V) mg/LSB
SCALE_Z = 0.003906 * FREEFALL_ACCEL # 1 / 256 (at 3.3V) mg/LSB
Accel_Measurement = collections.namedtuple(
'Accel_Measurement', ('time', 'accel_x', 'accel_y', 'accel_z'))
# Helper class to obtain measurements
class AccelQueryHelper:
def __init__(self, printer, cconn):
def __init__(self, printer):
self.printer = printer
self.cconn = cconn
self.is_finished = False
print_time = printer.lookup_object('toolhead').get_last_move_time()
self.request_start_time = self.request_end_time = print_time
self.samples = self.raw_samples = []
self.msgs = []
self.samples = []
def finish_measurements(self):
toolhead = self.printer.lookup_object('toolhead')
self.request_end_time = toolhead.get_last_move_time()
toolhead.wait_moves()
self.cconn.finalize()
def _get_raw_samples(self):
raw_samples = self.cconn.get_messages()
if raw_samples:
self.raw_samples = raw_samples
return self.raw_samples
self.is_finished = True
def handle_batch(self, msg):
if self.is_finished:
return False
if len(self.msgs) >= 10000:
# Avoid filling up memory with too many samples
return False
self.msgs.append(msg)
return True
def has_valid_samples(self):
raw_samples = self._get_raw_samples()
for msg in raw_samples:
data = msg['params']['data']
for msg in self.msgs:
data = msg['data']
first_sample_time = data[0][0]
last_sample_time = data[-1][0]
if (first_sample_time > self.request_end_time
@@ -59,21 +63,20 @@ class AccelQueryHelper:
# The time intervals [first_sample_time, last_sample_time]
# and [request_start_time, request_end_time] have non-zero
# intersection. It is still theoretically possible that none
# of the samples from raw_samples fall into the time interval
# of the samples from msgs fall into the time interval
# [request_start_time, request_end_time] if it is too narrow
# or on very heavy data losses. In practice, that interval
# is at least 1 second, so this possibility is negligible.
return True
return False
def get_samples(self):
raw_samples = self._get_raw_samples()
if not raw_samples:
if not self.msgs:
return self.samples
total = sum([len(m['params']['data']) for m in raw_samples])
total = sum([len(m['data']) for m in self.msgs])
count = 0
self.samples = samples = [None] * total
for msg in raw_samples:
for samp_time, x, y, z in msg['params']['data']:
for msg in self.msgs:
for samp_time, x, y, z in msg['data']:
if samp_time < self.request_start_time:
continue
if samp_time > self.request_end_time:
@@ -172,77 +175,31 @@ class AccelCommandHelper:
val = gcmd.get("VAL", minval=0, maxval=255, parser=lambda x: int(x, 0))
self.chip.set_reg(reg, val)
# Helper class for chip clock synchronization via linear regression
class ClockSyncRegression:
def __init__(self, mcu, chip_clock_smooth, decay = 1. / 20.):
self.mcu = mcu
self.chip_clock_smooth = chip_clock_smooth
self.decay = decay
self.last_chip_clock = self.last_exp_mcu_clock = 0.
self.mcu_clock_avg = self.mcu_clock_variance = 0.
self.chip_clock_avg = self.chip_clock_covariance = 0.
def reset(self, mcu_clock, chip_clock):
self.mcu_clock_avg = self.last_mcu_clock = mcu_clock
self.chip_clock_avg = chip_clock
self.mcu_clock_variance = self.chip_clock_covariance = 0.
self.last_chip_clock = self.last_exp_mcu_clock = 0.
def update(self, mcu_clock, chip_clock):
# Update linear regression
decay = self.decay
diff_mcu_clock = mcu_clock - self.mcu_clock_avg
self.mcu_clock_avg += decay * diff_mcu_clock
self.mcu_clock_variance = (1. - decay) * (
self.mcu_clock_variance + diff_mcu_clock**2 * decay)
diff_chip_clock = chip_clock - self.chip_clock_avg
self.chip_clock_avg += decay * diff_chip_clock
self.chip_clock_covariance = (1. - decay) * (
self.chip_clock_covariance + diff_mcu_clock*diff_chip_clock*decay)
def set_last_chip_clock(self, chip_clock):
base_mcu, base_chip, inv_cfreq = self.get_clock_translation()
self.last_chip_clock = chip_clock
self.last_exp_mcu_clock = base_mcu + (chip_clock-base_chip) * inv_cfreq
def get_clock_translation(self):
inv_chip_freq = self.mcu_clock_variance / self.chip_clock_covariance
if not self.last_chip_clock:
return self.mcu_clock_avg, self.chip_clock_avg, inv_chip_freq
# Find mcu clock associated with future chip_clock
s_chip_clock = self.last_chip_clock + self.chip_clock_smooth
scdiff = s_chip_clock - self.chip_clock_avg
s_mcu_clock = self.mcu_clock_avg + scdiff * inv_chip_freq
# Calculate frequency to converge at future point
mdiff = s_mcu_clock - self.last_exp_mcu_clock
s_inv_chip_freq = mdiff / self.chip_clock_smooth
return self.last_exp_mcu_clock, self.last_chip_clock, s_inv_chip_freq
def get_time_translation(self):
base_mcu, base_chip, inv_cfreq = self.get_clock_translation()
clock_to_print_time = self.mcu.clock_to_print_time
base_time = clock_to_print_time(base_mcu)
inv_freq = clock_to_print_time(base_mcu + inv_cfreq) - base_time
return base_time, base_chip, inv_freq
# Helper to read the axes_map parameter from the config
def read_axes_map(config):
am = {'x': (0, SCALE_XY), 'y': (1, SCALE_XY), 'z': (2, SCALE_Z),
'-x': (0, -SCALE_XY), '-y': (1, -SCALE_XY), '-z': (2, -SCALE_Z)}
axes_map = config.getlist('axes_map', ('x','y','z'), count=3)
if any([a not in am for a in axes_map]):
raise config.error("Invalid axes_map parameter")
return [am[a.strip()] for a in axes_map]
MIN_MSG_TIME = 0.100
BYTES_PER_SAMPLE = 5
SAMPLES_PER_BLOCK = 10
BATCH_UPDATES = 0.100
# Printer class that controls ADXL345 chip
class ADXL345:
def __init__(self, config):
self.printer = config.get_printer()
AccelCommandHelper(config, self)
self.query_rate = 0
am = {'x': (0, SCALE), 'y': (1, SCALE), 'z': (2, SCALE),
'-x': (0, -SCALE), '-y': (1, -SCALE), '-z': (2, -SCALE)}
axes_map = config.getlist('axes_map', ('x','y','z'), count=3)
if any([a not in am for a in axes_map]):
raise config.error("Invalid adxl345 axes_map parameter")
self.axes_map = [am[a.strip()] for a in axes_map]
self.axes_map = read_axes_map(config)
self.data_rate = config.getint('rate', 3200)
if self.data_rate not in QUERY_RATES:
raise config.error("Invalid rate parameter: %d" % (self.data_rate,))
# Measurement storage (accessed from background thread)
self.lock = threading.Lock()
self.raw_samples = []
# Setup mcu sensor_adxl345 bulk query code
self.spi = bus.MCU_SPI_from_config(config, 3, default_speed=5000000)
self.mcu = mcu = self.spi.get_mcu()
@@ -254,18 +211,21 @@ class ADXL345:
mcu.add_config_cmd("query_adxl345 oid=%d clock=0 rest_ticks=0"
% (oid,), on_restart=True)
mcu.register_config_callback(self._build_config)
mcu.register_response(self._handle_adxl345_data, "adxl345_data", oid)
self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "adxl345_data", oid)
# Clock tracking
self.last_sequence = self.max_query_duration = 0
self.last_limit_count = self.last_error_count = 0
self.clock_sync = ClockSyncRegression(self.mcu, 640)
# API server endpoints
self.api_dump = motion_report.APIDumpHelper(
self.printer, self._api_update, self._api_startstop, 0.100)
chip_smooth = self.data_rate * BATCH_UPDATES * 2
self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth)
self.clock_updater = bulk_sensor.ChipClockUpdater(self.clock_sync,
BYTES_PER_SAMPLE)
self.last_error_count = 0
# Process messages in batches
self.batch_bulk = bulk_sensor.BatchBulkHelper(
self.printer, self._process_batch,
self._start_measurements, self._finish_measurements, BATCH_UPDATES)
self.name = config.get_name().split()[-1]
wh = self.printer.lookup_object('webhooks')
wh.register_mux_endpoint("adxl345/dump_adxl345", "sensor", self.name,
self._handle_dump_adxl345)
hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration')
self.batch_bulk.add_mux_endpoint("adxl345/dump_adxl345", "sensor",
self.name, {'header': hdr})
def _build_config(self):
cmdqueue = self.spi.get_command_queue()
self.query_adxl345_cmd = self.mcu.lookup_command(
@@ -291,24 +251,23 @@ class ADXL345:
"This is generally indicative of connection problems "
"(e.g. faulty wiring) or a faulty adxl345 chip." % (
reg, val, stored_val))
# Measurement collection
def is_measuring(self):
return self.query_rate > 0
def _handle_adxl345_data(self, params):
with self.lock:
self.raw_samples.append(params)
def start_internal_client(self):
aqh = AccelQueryHelper(self.printer)
self.batch_bulk.add_client(aqh.handle_batch)
return aqh
# Measurement decoding
def _extract_samples(self, raw_samples):
# Load variables to optimize inner loop below
(x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map
last_sequence = self.last_sequence
last_sequence = self.clock_updater.get_last_sequence()
time_base, chip_base, inv_freq = self.clock_sync.get_time_translation()
# Process every message in raw_samples
count = seq = 0
samples = [None] * (len(raw_samples) * SAMPLES_PER_BLOCK)
for params in raw_samples:
seq_diff = (last_sequence - params['sequence']) & 0xffff
seq_diff = (params['sequence'] - last_sequence) & 0xffff
seq_diff -= (seq_diff & 0x8000) << 1
seq = last_sequence - seq_diff
seq = last_sequence + seq_diff
d = bytearray(params['data'])
msg_cdiff = seq * SAMPLES_PER_BLOCK - chip_base
for i in range(len(d) // BYTES_PER_SAMPLE):
@@ -341,33 +300,9 @@ class ADXL345:
break
else:
raise self.printer.command_error("Unable to query adxl345 fifo")
mcu_clock = self.mcu.clock32_to_clock64(params['clock'])
sequence = (self.last_sequence & ~0xffff) | params['next_sequence']
if sequence < self.last_sequence:
sequence += 0x10000
self.last_sequence = sequence
buffered = params['buffered']
limit_count = (self.last_limit_count & ~0xffff) | params['limit_count']
if limit_count < self.last_limit_count:
limit_count += 0x10000
self.last_limit_count = limit_count
duration = params['query_ticks']
if duration > self.max_query_duration:
# Skip measurement as a high query time could skew clock tracking
self.max_query_duration = max(2 * self.max_query_duration,
self.mcu.seconds_to_clock(.000005))
return
self.max_query_duration = 2 * duration
msg_count = (sequence * SAMPLES_PER_BLOCK
+ buffered // BYTES_PER_SAMPLE + fifo)
# The "chip clock" is the message counter plus .5 for average
# inaccuracy of query responses and plus .5 for assumed offset
# of adxl345 hw processing time.
chip_clock = msg_count + 1
self.clock_sync.update(mcu_clock + duration // 2, chip_clock)
self.clock_updater.update_clock(params)
# Start, stop, and process message batches
def _start_measurements(self):
if self.is_measuring():
return
# In case of miswiring, testing ADXL345 device ID prevents treating
# noise or wrong signal as a correctly initialized device
dev_id = self.read_reg(REG_DEVID)
@@ -383,59 +318,35 @@ class ADXL345:
self.set_reg(REG_FIFO_CTL, 0x00)
self.set_reg(REG_BW_RATE, QUERY_RATES[self.data_rate])
self.set_reg(REG_FIFO_CTL, SET_FIFO_CTL)
# Setup samples
with self.lock:
self.raw_samples = []
# Start bulk reading
self.bulk_queue.clear_samples()
systime = self.printer.get_reactor().monotonic()
print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME
reqclock = self.mcu.print_time_to_clock(print_time)
rest_ticks = self.mcu.seconds_to_clock(4. / self.data_rate)
self.query_rate = self.data_rate
self.query_adxl345_cmd.send([self.oid, reqclock, rest_ticks],
reqclock=reqclock)
logging.info("ADXL345 starting '%s' measurements", self.name)
# Initialize clock tracking
self.last_sequence = 0
self.last_limit_count = self.last_error_count = 0
self.clock_sync.reset(reqclock, 0)
self.max_query_duration = 1 << 31
self.clock_updater.note_start(reqclock)
self._update_clock(minclock=reqclock)
self.max_query_duration = 1 << 31
self.clock_updater.clear_duration_filter()
self.last_error_count = 0
def _finish_measurements(self):
if not self.is_measuring():
return
# Halt bulk reading
params = self.query_adxl345_end_cmd.send([self.oid, 0, 0])
self.query_rate = 0
with self.lock:
self.raw_samples = []
self.bulk_queue.clear_samples()
logging.info("ADXL345 finished '%s' measurements", self.name)
# API interface
def _api_update(self, eventtime):
def _process_batch(self, eventtime):
self._update_clock()
with self.lock:
raw_samples = self.raw_samples
self.raw_samples = []
raw_samples = self.bulk_queue.pull_samples()
if not raw_samples:
return {}
samples = self._extract_samples(raw_samples)
if not samples:
return {}
return {'data': samples, 'errors': self.last_error_count,
'overflows': self.last_limit_count}
def _api_startstop(self, is_start):
if is_start:
self._start_measurements()
else:
self._finish_measurements()
def _handle_dump_adxl345(self, web_request):
self.api_dump.add_client(web_request)
hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration')
web_request.send({'header': hdr})
def start_internal_client(self):
cconn = self.api_dump.add_internal_client()
return AccelQueryHelper(self.printer, cconn)
'overflows': self.clock_updater.get_last_limit_count()}
def load_config(config):
return ADXL345(config)

Binary file not shown.

162
klippy/extras/aht10.py Normal file
View File

@@ -0,0 +1,162 @@
# AHT10/AHT20/AHT21 I2c-based humiditure sensor support
#
# Copyright (C) 2023 Scott Mudge <mail@scottmudge.com>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import logging
from . import bus
######################################################################
# Compatible Sensors:
# AHT10 - Tested w/ BTT GTR 1.0 MCU on i2c3
# AHT20 - Untested but should work
# AHT21 - Tested w/ BTT GTR 1.0 MCU on i2c3
######################################################################
AHT10_I2C_ADDR= 0x38
AHT10_COMMANDS = {
'INIT' :[0xE1, 0x08, 0x00],
'MEASURE' :[0xAC, 0x33, 0x00],
'RESET' :[0xBA, 0x08, 0x00]
}
AHT10_MAX_BUSY_CYCLES= 5
class AHT10:
def __init__(self, config):
self.printer = config.get_printer()
self.name = config.get_name().split()[-1]
self.reactor = self.printer.get_reactor()
self.i2c = bus.MCU_I2C_from_config(
config, default_addr=AHT10_I2C_ADDR, default_speed=100000)
self.report_time = config.getint('aht10_report_time',30,minval=5)
self.temp = self.min_temp = self.max_temp = self.humidity = 0.
self.sample_timer = self.reactor.register_timer(self._sample_aht10)
self.printer.add_object("aht10 " + self.name, self)
self.printer.register_event_handler("klippy:connect",
self.handle_connect)
self.is_calibrated = False
self.init_sent = False
def handle_connect(self):
self._init_aht10()
self.reactor.update_timer(self.sample_timer, self.reactor.NOW)
def setup_minmax(self, min_temp, max_temp):
self.min_temp = min_temp
self.max_temp = max_temp
def setup_callback(self, cb):
self._callback = cb
def get_report_time_delta(self):
return self.report_time
def _make_measurement(self):
if not self.init_sent:
return False
data = None
is_busy = True
cycles = 0
try:
while is_busy:
# Check if we're constantly busy. If so, send soft-reset
# and issue warning.
if is_busy and cycles > AHT10_MAX_BUSY_CYCLES:
logging.warning("aht10: device reported busy after " +
"%d cycles, resetting device"% AHT10_MAX_BUSY_CYCLES)
self._reset_device()
data = None
break
cycles += 1
# Write command for updating temperature+status bit
self.i2c.i2c_write(AHT10_COMMANDS['MEASURE'])
# Wait 110ms after first read, 75ms minimum
self.reactor.pause(self.reactor.monotonic() + .110)
# Read data
read = self.i2c.i2c_read([], 6)
if read is None:
logging.warning("aht10: received data from" +
" i2c_read is None")
continue
data = bytearray(read['response'])
if len(data) < 6:
logging.warning("aht10: received bytes less than" +
" expected 6 [%d]"%len(data))
continue
self.is_calibrated = True if (data[0] & 0b00000100) else False
is_busy = True if (data[0] & 0b01000000) else False
if is_busy:
return False
except Exception as e:
logging.exception("aht10: exception encountered" +
" reading data: %s"%str(e))
return False
temp = ((data[3] & 0x0F) << 16) | (data[4] << 8) | data[5]
self.temp = ((temp*200) / 1048576) - 50
hum = ((data[1] << 16) | (data[2] << 8) | data[3]) >> 4
self.humidity = int(hum * 100 / 1048576)
# Clamp humidity
if (self.humidity > 100):
self.humidity = 100
elif (self.humidity < 0):
self.humidity = 0
return True
def _reset_device(self):
if not self.init_sent:
return
# Reset device
self.i2c.i2c_write(AHT10_COMMANDS['RESET'])
# Wait 100ms after reset
self.reactor.pause(self.reactor.monotonic() + .10)
def _init_aht10(self):
# Init device
self.i2c.i2c_write(AHT10_COMMANDS['INIT'])
# Wait 100ms after init
self.reactor.pause(self.reactor.monotonic() + .10)
self.init_sent = True
if self._make_measurement():
logging.info("aht10: successfully initialized, initial temp: " +
"%.3f, humidity: %.3f"%(self.temp, self.humidity))
def _sample_aht10(self, eventtime):
if not self._make_measurement():
self.temp = self.humidity = .0
return self.reactor.NEVER
if self.temp < self.min_temp or self.temp > self.max_temp:
self.printer.invoke_shutdown(
"AHT10 temperature %0.1f outside range of %0.1f:%.01f"
% (self.temp, self.min_temp, self.max_temp))
measured_time = self.reactor.monotonic()
print_time = self.i2c.get_mcu().estimated_print_time(measured_time)
self._callback(print_time, self.temp)
return measured_time + self.report_time
def get_status(self, eventtime):
return {
'temperature': round(self.temp, 2),
'humidity': self.humidity,
}
def load_config(config):
# Register sensor
pheater = config.get_printer().lookup_object("heaters")
pheater.add_sensor_factory("AHT10", AHT10)

View File

@@ -3,13 +3,14 @@
# Copyright (C) 2021,2022 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import logging, math, threading
from . import bus, motion_report
import logging, math
from . import bus, bulk_sensor
MIN_MSG_TIME = 0.100
TCODE_ERROR = 0xff
TRINAMIC_DRIVERS = ["tmc2130", "tmc2208", "tmc2209", "tmc2240", "tmc2660", "tmc5160"]
TRINAMIC_DRIVERS = ["tmc2130", "tmc2208", "tmc2209", "tmc2240", "tmc2660",
"tmc5160"]
CALIBRATION_BITS = 6 # 64 entries
ANGLE_BITS = 16 # angles range from 0..65535
@@ -84,9 +85,9 @@ class AngleCalibration:
cal2 = calibration[bucket + 1]
adj = (angle & interp_mask) * (cal2 - cal1)
adj = cal1 + ((adj + interp_round) >> interp_bits)
angle_diff = (angle - adj) & 0xffff
angle_diff = (adj - angle) & 0xffff
angle_diff -= (angle_diff & 0x8000) << 1
new_angle = angle - angle_diff
new_angle = angle + angle_diff
if calibration_reversed:
new_angle = -new_angle
samples[i] = (samp_time, new_angle)
@@ -156,8 +157,14 @@ class AngleCalibration:
def do_calibration_moves(self):
move = self.printer.lookup_object('force_move').manual_move
# Start data collection
angle_sensor = self.printer.lookup_object(self.name)
cconn = angle_sensor.start_internal_client()
msgs = []
is_finished = False
def handle_batch(msg):
if is_finished:
return False
msgs.append(msg)
return True
self.printer.lookup_object(self.name).add_client(handle_batch)
# Move stepper several turns (to allow internal sensor calibration)
microsteps, full_steps = self.get_microsteps()
mcu_stepper = self.mcu_stepper
@@ -189,13 +196,12 @@ class AngleCalibration:
move(mcu_stepper, .5*rotation_dist + align_dist, move_speed)
toolhead.wait_moves()
# Finish data collection
cconn.finalize()
msgs = cconn.get_messages()
is_finished = True
# Correlate query responses
cal = {}
step = 0
for msg in msgs:
for query_time, pos in msg['params']['data']:
for query_time, pos in msg['data']:
# Add to step tracking
while step < len(times) and query_time > times[step][1]:
step += 1
@@ -374,9 +380,9 @@ class HelperTLE5012B:
mcu_clock, chip_clock = self._query_clock()
mdiff = mcu_clock - self.last_chip_mcu_clock
chip_mclock = self.last_chip_clock + int(mdiff * self.chip_freq + .5)
cdiff = (chip_mclock - chip_clock) & 0xffff
cdiff = (chip_clock - chip_mclock) & 0xffff
cdiff -= (cdiff & 0x8000) << 1
new_chip_clock = chip_mclock - cdiff
new_chip_clock = chip_mclock + cdiff
self.chip_freq = float(new_chip_clock - self.last_chip_clock) / mdiff
self.last_chip_clock = new_chip_clock
self.last_chip_mcu_clock = mcu_clock
@@ -405,7 +411,11 @@ class HelperTLE5012B:
parser=lambda x: int(x, 0))
self._write_reg(reg, val)
BYTES_PER_SAMPLE = 3
SAMPLES_PER_BLOCK = 16
SAMPLE_PERIOD = 0.000400
BATCH_UPDATES = 0.100
class Angle:
def __init__(self, config):
@@ -416,9 +426,6 @@ class Angle:
# Measurement conversion
self.start_clock = self.time_shift = self.sample_ticks = 0
self.last_sequence = self.last_angle = 0
# Measurement storage (accessed from background thread)
self.lock = threading.Lock()
self.raw_samples = []
# Sensor type
sensors = { "a1333": HelperA1333, "as5047d": HelperAS5047D,
"tle5012b": HelperTLE5012B }
@@ -438,15 +445,15 @@ class Angle:
"query_spi_angle oid=%d clock=0 rest_ticks=0 time_shift=0"
% (oid,), on_restart=True)
mcu.register_config_callback(self._build_config)
mcu.register_response(self._handle_spi_angle_data,
"spi_angle_data", oid)
# API server endpoints
self.api_dump = motion_report.APIDumpHelper(
self.printer, self._api_update, self._api_startstop, 0.100)
self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "spi_angle_data", oid)
# Process messages in batches
self.batch_bulk = bulk_sensor.BatchBulkHelper(
self.printer, self._process_batch,
self._start_measurements, self._finish_measurements, BATCH_UPDATES)
self.name = config.get_name().split()[1]
wh = self.printer.lookup_object('webhooks')
wh.register_mux_endpoint("angle/dump_angle", "sensor", self.name,
self._handle_dump_angle)
api_resp = {'header': ('time', 'angle')}
self.batch_bulk.add_mux_endpoint("angle/dump_angle",
"sensor", self.name, api_resp)
def _build_config(self):
freq = self.mcu.seconds_to_clock(1.)
while float(TCODE_ERROR << self.time_shift) / freq < 0.002:
@@ -460,12 +467,9 @@ class Angle:
"spi_angle_end oid=%c sequence=%hu", oid=self.oid, cq=cmdqueue)
def get_status(self, eventtime=None):
return {'temperature': self.sensor_helper.last_temperature}
# Measurement collection
def is_measuring(self):
return self.start_clock != 0
def _handle_spi_angle_data(self, params):
with self.lock:
self.raw_samples.append(params)
def add_client(self, client_cb):
self.batch_bulk.add_client(client_cb)
# Measurement decoding
def _extract_samples(self, raw_samples):
# Load variables to optimize inner loop below
sample_ticks = self.sample_ticks
@@ -486,23 +490,23 @@ class Angle:
static_delay = self.sensor_helper.get_static_delay()
# Process every message in raw_samples
count = error_count = 0
samples = [None] * (len(raw_samples) * 16)
samples = [None] * (len(raw_samples) * SAMPLES_PER_BLOCK)
for params in raw_samples:
seq = (last_sequence & ~0xffff) | params['sequence']
if seq < last_sequence:
seq += 0x10000
last_sequence = seq
seq_diff = (params['sequence'] - last_sequence) & 0xffff
last_sequence += seq_diff
samp_count = last_sequence * SAMPLES_PER_BLOCK
msg_mclock = start_clock + samp_count*sample_ticks
d = bytearray(params['data'])
msg_mclock = start_clock + seq*16*sample_ticks
for i in range(len(d) // 3):
tcode = d[i*3]
for i in range(len(d) // BYTES_PER_SAMPLE):
d_ta = d[i*BYTES_PER_SAMPLE:(i+1)*BYTES_PER_SAMPLE]
tcode = d_ta[0]
if tcode == TCODE_ERROR:
error_count += 1
continue
raw_angle = d[i*3 + 1] | (d[i*3 + 2] << 8)
angle_diff = (last_angle - raw_angle) & 0xffff
raw_angle = d_ta[1] | (d_ta[2] << 8)
angle_diff = (raw_angle - last_angle) & 0xffff
angle_diff -= (angle_diff & 0x8000) << 1
last_angle -= angle_diff
last_angle += angle_diff
mclock = msg_mclock + i*sample_ticks
if is_tcode_absolute:
# tcode is tle5012b frame counter
@@ -521,29 +525,14 @@ class Angle:
self.last_angle = last_angle
del samples[count:]
return samples, error_count
# API interface
def _api_update(self, eventtime):
if self.sensor_helper.is_tcode_absolute:
self.sensor_helper.update_clock()
with self.lock:
raw_samples = self.raw_samples
self.raw_samples = []
if not raw_samples:
return {}
samples, error_count = self._extract_samples(raw_samples)
if not samples:
return {}
offset = self.calibration.apply_calibration(samples)
return {'data': samples, 'errors': error_count,
'position_offset': offset}
# Start, stop, and process message batches
def _is_measuring(self):
return self.start_clock != 0
def _start_measurements(self):
if self.is_measuring():
return
logging.info("Starting angle '%s' measurements", self.name)
self.sensor_helper.start()
# Start bulk reading
with self.lock:
self.raw_samples = []
self.bulk_queue.clear_samples()
self.last_sequence = 0
systime = self.printer.get_reactor().monotonic()
print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME
@@ -553,26 +542,23 @@ class Angle:
self.query_spi_angle_cmd.send([self.oid, reqclock, rest_ticks,
self.time_shift], reqclock=reqclock)
def _finish_measurements(self):
if not self.is_measuring():
return
# Halt bulk reading
params = self.query_spi_angle_end_cmd.send([self.oid, 0, 0, 0])
self.start_clock = 0
with self.lock:
self.raw_samples = []
self.bulk_queue.clear_samples()
self.sensor_helper.last_temperature = None
logging.info("Stopped angle '%s' measurements", self.name)
def _api_startstop(self, is_start):
if is_start:
self._start_measurements()
else:
self._finish_measurements()
def _handle_dump_angle(self, web_request):
self.api_dump.add_client(web_request)
hdr = ('time', 'angle')
web_request.send({'header': hdr})
def start_internal_client(self):
return self.api_dump.add_internal_client()
def _process_batch(self, eventtime):
if self.sensor_helper.is_tcode_absolute:
self.sensor_helper.update_clock()
raw_samples = self.bulk_queue.pull_samples()
if not raw_samples:
return {}
samples, error_count = self._extract_samples(raw_samples)
if not samples:
return {}
offset = self.calibration.apply_calibration(samples)
return {'data': samples, 'errors': error_count,
'position_offset': offset}
def load_config_prefix(config):
return Angle(config)

View File

@@ -0,0 +1,258 @@
# Axis Twist Compensation
#
# Copyright (C) 2022 Jeremy Tan <jeremytkw98@gmail.com>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import math
from . import manual_probe as ManualProbe, bed_mesh as BedMesh
DEFAULT_SAMPLE_COUNT = 3
DEFAULT_SPEED = 50.
DEFAULT_HORIZONTAL_MOVE_Z = 5.
class AxisTwistCompensation:
def __init__(self, config):
# get printer
self.printer = config.get_printer()
self.gcode = self.printer.lookup_object('gcode')
# get values from [axis_twist_compensation] section in printer .cfg
self.horizontal_move_z = config.getfloat('horizontal_move_z',
DEFAULT_HORIZONTAL_MOVE_Z)
self.speed = config.getfloat('speed', DEFAULT_SPEED)
self.calibrate_start_x = config.getfloat('calibrate_start_x')
self.calibrate_end_x = config.getfloat('calibrate_end_x')
self.calibrate_y = config.getfloat('calibrate_y')
self.z_compensations = config.getlists('z_compensations',
default=[], parser=float)
self.compensation_start_x = config.getfloat('compensation_start_x',
default=None)
self.compensation_end_x = config.getfloat('compensation_start_y',
default=None)
self.m = None
self.b = None
# setup calibrater
self.calibrater = Calibrater(self, config)
def get_z_compensation_value(self, pos):
if not self.z_compensations:
return 0
x_coord = pos[0]
z_compensations = self.z_compensations
sample_count = len(z_compensations)
spacing = ((self.calibrate_end_x - self.calibrate_start_x)
/ (sample_count - 1))
interpolate_t = (x_coord - self.calibrate_start_x) / spacing
interpolate_i = int(math.floor(interpolate_t))
interpolate_i = BedMesh.constrain(interpolate_i, 0, sample_count - 2)
interpolate_t -= interpolate_i
interpolated_z_compensation = BedMesh.lerp(
interpolate_t, z_compensations[interpolate_i],
z_compensations[interpolate_i + 1])
return interpolated_z_compensation
def clear_compensations(self):
self.z_compensations = []
self.m = None
self.b = None
class Calibrater:
def __init__(self, compensation, config):
# setup self attributes
self.compensation = compensation
self.printer = compensation.printer
self.gcode = self.printer.lookup_object('gcode')
self.probe = None
# probe settings are set to none, until they are available
self.lift_speed, self.probe_x_offset, self.probe_y_offset, _ = \
None, None, None, None
self.printer.register_event_handler("klippy:connect",
self._handle_connect)
self.speed = compensation.speed
self.horizontal_move_z = compensation.horizontal_move_z
self.start_point = (compensation.calibrate_start_x,
compensation.calibrate_y)
self.end_point = (compensation.calibrate_end_x,
compensation.calibrate_y)
self.results = None
self.current_point_index = None
self.gcmd = None
self.configname = config.get_name()
# register gcode handlers
self._register_gcode_handlers()
def _handle_connect(self):
self.probe = self.printer.lookup_object('probe', None)
if (self.probe is None):
config = self.printer.lookup_object('configfile')
raise config.error(
"AXIS_TWIST_COMPENSATION requires [probe] to be defined")
self.lift_speed = self.probe.get_lift_speed()
self.probe_x_offset, self.probe_y_offset, _ = \
self.probe.get_offsets()
def _register_gcode_handlers(self):
# register gcode handlers
self.gcode = self.printer.lookup_object('gcode')
self.gcode.register_command(
'AXIS_TWIST_COMPENSATION_CALIBRATE',
self.cmd_AXIS_TWIST_COMPENSATION_CALIBRATE,
desc=self.cmd_AXIS_TWIST_COMPENSATION_CALIBRATE_help)
cmd_AXIS_TWIST_COMPENSATION_CALIBRATE_help = """
Performs the x twist calibration wizard
Measure z probe offset at n points along the x axis,
and calculate x twist compensation
"""
def cmd_AXIS_TWIST_COMPENSATION_CALIBRATE(self, gcmd):
self.gcmd = gcmd
sample_count = gcmd.get_int('SAMPLE_COUNT', DEFAULT_SAMPLE_COUNT)
# check for valid sample_count
if sample_count is None or sample_count < 2:
raise self.gcmd.error(
"SAMPLE_COUNT to probe must be at least 2")
# clear the current config
self.compensation.clear_compensations()
# calculate some values
x_range = self.end_point[0] - self.start_point[0]
interval_dist = x_range / (sample_count - 1)
nozzle_points = self._calculate_nozzle_points(sample_count,
interval_dist)
probe_points = self._calculate_probe_points(
nozzle_points, self.probe_x_offset, self.probe_y_offset)
# verify no other manual probe is in progress
ManualProbe.verify_no_manual_probe(self.printer)
# begin calibration
self.current_point_index = 0
self.results = []
self._calibration(probe_points, nozzle_points, interval_dist)
def _calculate_nozzle_points(self, sample_count, interval_dist):
# calculate the points to put the probe at, returned as a list of tuples
nozzle_points = []
for i in range(sample_count):
x = self.start_point[0] + i * interval_dist
y = self.start_point[1]
nozzle_points.append((x, y))
return nozzle_points
def _calculate_probe_points(self, nozzle_points,
probe_x_offset, probe_y_offset):
# calculate the points to put the nozzle at
# returned as a list of tuples
probe_points = []
for point in nozzle_points:
x = point[0] - probe_x_offset
y = point[1] - probe_y_offset
probe_points.append((x, y))
return probe_points
def _move_helper(self, target_coordinates, override_speed=None):
# pad target coordinates
target_coordinates = \
(target_coordinates[0], target_coordinates[1], None) \
if len(target_coordinates) == 2 else target_coordinates
toolhead = self.printer.lookup_object('toolhead')
speed = self.speed if target_coordinates[2] == None else self.lift_speed
speed = override_speed if override_speed is not None else speed
toolhead.manual_move(target_coordinates, speed)
def _calibration(self, probe_points, nozzle_points, interval):
# begin the calibration process
self.gcmd.respond_info("AXIS_TWIST_COMPENSATION_CALIBRATE: "
"Probing point %d of %d" % (
self.current_point_index + 1,
len(probe_points)))
# horizontal_move_z (to prevent probe trigger or hitting bed)
self._move_helper((None, None, self.horizontal_move_z))
# move to point to probe
self._move_helper((probe_points[self.current_point_index][0],
probe_points[self.current_point_index][1], None))
# probe the point
self.current_measured_z = self.probe.run_probe(self.gcmd)[2]
# horizontal_move_z (to prevent probe trigger or hitting bed)
self._move_helper((None, None, self.horizontal_move_z))
# move the nozzle over the probe point
self._move_helper((nozzle_points[self.current_point_index]))
# start the manual (nozzle) probe
ManualProbe.ManualProbeHelper(
self.printer, self.gcmd,
self._manual_probe_callback_factory(
probe_points, nozzle_points, interval))
def _manual_probe_callback_factory(self, probe_points,
nozzle_points, interval):
# returns a callback function for the manual probe
is_end = self.current_point_index == len(probe_points) - 1
def callback(kin_pos):
if kin_pos is None:
# probe was cancelled
self.gcmd.respond_info(
"AXIS_TWIST_COMPENSATION_CALIBRATE: Probe cancelled, "
"calibration aborted")
return
z_offset = self.current_measured_z - kin_pos[2]
self.results.append(z_offset)
if is_end:
# end of calibration
self._finalize_calibration()
else:
# move to next point
self.current_point_index += 1
self._calibration(probe_points, nozzle_points, interval)
return callback
def _finalize_calibration(self):
# finalize the calibration process
# calculate average of results
avg = sum(self.results) / len(self.results)
# subtract average from each result
# so that they are independent of z_offset
self.results = [avg - x for x in self.results]
# save the config
configfile = self.printer.lookup_object('configfile')
values_as_str = ', '.join(["{:.6f}".format(x)
for x in self.results])
configfile.set(self.configname, 'z_compensations', values_as_str)
configfile.set(self.configname, 'compensation_start_x',
self.start_point[0])
configfile.set(self.configname, 'compensation_end_x',
self.end_point[0])
self.compensation.z_compensations = self.results
self.compensation.compensation_start_x = self.start_point[0]
self.compensation.compensation_end_x = self.end_point[0]
self.gcode.respond_info(
"AXIS_TWIST_COMPENSATION state has been saved "
"for the current session. The SAVE_CONFIG command will "
"update the printer config file and restart the printer.")
# output result
self.gcmd.respond_info(
"AXIS_TWIST_COMPENSATION_CALIBRATE: Calibration complete, "
"offsets: %s, mean z_offset: %f"
% (self.results, avg))
# klipper's entry point using [axis_twist_compensation] section in printer.cfg
def load_config(config):
return AxisTwistCompensation(config)

View File

@@ -1,6 +1,5 @@
# Mesh Bed Leveling
#
# Copyright (C) 2018 Kevin O'Connor <kevin@koconnor.net>
# Copyright (C) 2018-2019 Eric Callahan <arksine.code@gmail.com>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
@@ -103,6 +102,7 @@ class BedMesh:
self.log_fade_complete = False
self.base_fade_target = config.getfloat('fade_target', None)
self.fade_target = 0.
self.tool_offset = 0.
self.gcode = self.printer.lookup_object('gcode')
self.splitter = MoveSplitter(config, self.gcode)
# setup persistent storage
@@ -129,12 +129,11 @@ class BedMesh:
def handle_connect(self):
self.toolhead = self.printer.lookup_object('toolhead')
self.bmc.print_generated_points(logging.info)
self.pmgr.initialize()
def set_mesh(self, mesh):
if mesh is not None and self.fade_end != self.FADE_DISABLE:
self.log_fade_complete = True
if self.base_fade_target is None:
self.fade_target = mesh.avg_z
self.fade_target = mesh.get_z_average()
else:
self.fade_target = self.base_fade_target
min_z, max_z = mesh.get_z_range()
@@ -159,6 +158,7 @@ class BedMesh:
"mesh max: %.4f" % (self.fade_dist, min_z, max_z))
else:
self.fade_target = 0.
self.tool_offset = 0.
self.z_mesh = mesh
self.splitter.initialize(mesh, self.fade_target)
# cache the current position before a transform takes place
@@ -166,6 +166,7 @@ class BedMesh:
gcode_move.reset_last_position()
self.update_status()
def get_z_factor(self, z_pos):
z_pos += self.tool_offset
if z_pos >= self.fade_end:
return 0.
elif z_pos >= self.fade_start:
@@ -184,14 +185,15 @@ class BedMesh:
max_adj = self.z_mesh.calc_z(x, y)
factor = 1.
z_adj = max_adj - self.fade_target
if min(z, (z - max_adj)) >= self.fade_end:
fade_z_pos = z + self.tool_offset
if min(fade_z_pos, (fade_z_pos - max_adj)) >= self.fade_end:
# Fade out is complete, no factor
factor = 0.
elif max(z, (z - max_adj)) >= self.fade_start:
elif max(fade_z_pos, (fade_z_pos - max_adj)) >= self.fade_start:
# Likely in the process of fading out adjustment.
# Because we don't yet know the gcode z position, use
# algebra to calculate the factor from the toolhead pos
factor = ((self.fade_end + self.fade_target - z) /
factor = ((self.fade_end + self.fade_target - fade_z_pos) /
(self.fade_dist - z_adj))
factor = constrain(factor, 0., 1.)
final_z_adj = factor * z_adj + self.fade_target
@@ -235,7 +237,7 @@ class BedMesh:
mesh_max = (params['max_x'], params['max_y'])
probed_matrix = self.z_mesh.get_probed_matrix()
mesh_matrix = self.z_mesh.get_mesh_matrix()
self.status['profile_name'] = self.pmgr.get_current_profile()
self.status['profile_name'] = self.z_mesh.get_profile_name()
self.status['mesh_min'] = mesh_min
self.status['mesh_max'] = mesh_max
self.status['probed_matrix'] = probed_matrix
@@ -273,12 +275,21 @@ class BedMesh:
for i, axis in enumerate(['X', 'Y']):
offsets[i] = gcmd.get_float(axis, None)
self.z_mesh.set_mesh_offsets(offsets)
tool_offset = gcmd.get_float("ZFADE", None)
if tool_offset is not None:
self.tool_offset = tool_offset
gcode_move = self.printer.lookup_object('gcode_move')
gcode_move.reset_last_position()
else:
gcmd.respond_info("No mesh loaded to offset")
class ZrefMode:
DISABLED = 0 # Zero reference disabled
IN_MESH = 1 # Zero reference position within mesh
PROBE = 2 # Zero refrennce position outside of mesh, probe needed
class BedMeshCalibrate:
ALGOS = ['lagrange', 'bicubic']
def __init__(self, config, bedmesh):
@@ -286,17 +297,18 @@ class BedMeshCalibrate:
self.orig_config = {'radius': None, 'origin': None}
self.radius = self.origin = None
self.mesh_min = self.mesh_max = (0., 0.)
self.relative_reference_index = config.getint(
'relative_reference_index', None)
self.adaptive_margin = config.getfloat('adaptive_margin', 0.0)
self.zero_ref_pos = config.getfloatlist(
"zero_reference_position", None, count=2
)
self.zero_reference_mode = ZrefMode.DISABLED
self.faulty_regions = []
self.substituted_indices = collections.OrderedDict()
self.orig_config['rri'] = self.relative_reference_index
self.bedmesh = bedmesh
self.mesh_config = collections.OrderedDict()
self._init_mesh_config(config)
self._generate_points(config.error)
self._profile_name = None
self.orig_points = self.points
self._profile_name = "default"
self.probe_helper = probe.ProbePointsHelper(
config, self.probe_finalize, self._get_adjusted_points())
self.probe_helper.minimum_points(3)
@@ -305,7 +317,13 @@ class BedMeshCalibrate:
self.gcode.register_command(
'BED_MESH_CALIBRATE', self.cmd_BED_MESH_CALIBRATE,
desc=self.cmd_BED_MESH_CALIBRATE_help)
def _generate_points(self, error):
# Save z offset temporarily and apply to bed mesh
self.probed_z_offset = 0
self.apply_to_bed_mesh = False
self.gcode.register_command(
'SAVE_Z_OFFSET_TO_BED_MESH',
self.cmd_SAVE_Z_OFFSET_TO_BED_MESH)
def _generate_points(self, error, probe_method="automatic"):
x_cnt = self.mesh_config['x_count']
y_cnt = self.mesh_config['y_count']
min_x, min_y = self.mesh_min
@@ -315,7 +333,7 @@ class BedMeshCalibrate:
# floor distances down to next hundredth
x_dist = math.floor(x_dist * 100) / 100
y_dist = math.floor(y_dist * 100) / 100
if x_dist <= 1. or y_dist <= 1.:
if x_dist < 1. or y_dist < 1.:
raise error("bed_mesh: min/max points too close together")
if self.radius is not None:
@@ -348,9 +366,32 @@ class BedMeshCalibrate:
(self.origin[0] + pos_x, self.origin[1] + pos_y))
pos_y += y_dist
self.points = points
if self.zero_ref_pos is None or probe_method == "manual":
# Zero Reference Disabled
self.zero_reference_mode = ZrefMode.DISABLED
elif within(self.zero_ref_pos, self.mesh_min, self.mesh_max):
# Zero Reference position within mesh
self.zero_reference_mode = ZrefMode.IN_MESH
else:
# Zero Reference position outside of mesh
self.zero_reference_mode = ZrefMode.PROBE
if not self.faulty_regions:
return
self.substituted_indices.clear()
if self.zero_reference_mode == ZrefMode.PROBE:
# Cannot probe a reference within a faulty region
for min_c, max_c in self.faulty_regions:
if within(self.zero_ref_pos, min_c, max_c):
opt = "zero_reference_position"
raise error(
"bed_mesh: Cannot probe zero reference position at "
"(%.2f, %.2f) as it is located within a faulty region."
" Check the value for option '%s'"
% (self.zero_ref_pos[0], self.zero_ref_pos[1], opt,)
)
# Check to see if any points fall within faulty regions
if probe_method == "manual":
return
last_y = self.points[0][1]
is_reversed = False
for i, coord in enumerate(self.points):
@@ -399,11 +440,11 @@ class BedMeshCalibrate:
mesh_pt = "(%.1f, %.1f)" % (x, y)
print_func(
" %-4d| %-16s| %s" % (i, adj_pt, mesh_pt))
if self.relative_reference_index is not None:
rri = self.relative_reference_index
if self.zero_ref_pos is not None:
print_func(
"bed_mesh: relative_reference_index %d is (%.2f, %.2f)"
% (rri, self.points[rri][0], self.points[rri][1]))
"bed_mesh: zero_reference_position is (%.2f, %.2f)"
% (self.zero_ref_pos[0], self.zero_ref_pos[1])
)
if self.substituted_indices:
print_func("bed_mesh: faulty region points")
for i, v in self.substituted_indices.items():
@@ -519,11 +560,117 @@ class BedMeshCalibrate:
"interpolation. Configured Probe Count: %d, %d" %
(self.mesh_config['x_count'], self.mesh_config['y_count']))
params['algo'] = 'lagrange'
def set_adaptive_mesh(self, gcmd):
if not gcmd.get_int('ADAPTIVE', 0):
return False
exclude_objects = self.printer.lookup_object("exclude_object", None)
if exclude_objects is None:
gcmd.respond_info("Exclude objects not enabled. Using full mesh...")
return False
objects = exclude_objects.get_status().get("objects", [])
if not objects:
return False
margin = gcmd.get_float('ADAPTIVE_MARGIN', self.adaptive_margin)
# List all exclude_object points by axis and iterate over
# all polygon points, and pick the min and max or each axis
list_of_xs = []
list_of_ys = []
gcmd.respond_info("Found %s objects" % (len(objects)))
for obj in objects:
for point in obj["polygon"]:
list_of_xs.append(point[0])
list_of_ys.append(point[1])
# Define bounds of adaptive mesh area
mesh_min = [min(list_of_xs), min(list_of_ys)]
mesh_max = [max(list_of_xs), max(list_of_ys)]
adjusted_mesh_min = [x - margin for x in mesh_min]
adjusted_mesh_max = [x + margin for x in mesh_max]
# Force margin to respect original mesh bounds
adjusted_mesh_min[0] = max(adjusted_mesh_min[0],
self.orig_config["mesh_min"][0])
adjusted_mesh_min[1] = max(adjusted_mesh_min[1],
self.orig_config["mesh_min"][1])
adjusted_mesh_max[0] = min(adjusted_mesh_max[0],
self.orig_config["mesh_max"][0])
adjusted_mesh_max[1] = min(adjusted_mesh_max[1],
self.orig_config["mesh_max"][1])
adjusted_mesh_size = (adjusted_mesh_max[0] - adjusted_mesh_min[0],
adjusted_mesh_max[1] - adjusted_mesh_min[1])
# Compute a ratio between the adapted and original sizes
ratio = (adjusted_mesh_size[0] /
(self.orig_config["mesh_max"][0] -
self.orig_config["mesh_min"][0]),
adjusted_mesh_size[1] /
(self.orig_config["mesh_max"][1] -
self.orig_config["mesh_min"][1]))
gcmd.respond_info("Original mesh bounds: (%s,%s)" %
(self.orig_config["mesh_min"],
self.orig_config["mesh_max"]))
gcmd.respond_info("Original probe count: (%s,%s)" %
(self.mesh_config["x_count"],
self.mesh_config["y_count"]))
gcmd.respond_info("Adapted mesh bounds: (%s,%s)" %
(adjusted_mesh_min, adjusted_mesh_max))
gcmd.respond_info("Ratio: (%s, %s)" % ratio)
new_x_probe_count = int(
math.ceil(self.mesh_config["x_count"] * ratio[0]))
new_y_probe_count = int(
math.ceil(self.mesh_config["y_count"] * ratio[1]))
# There is one case, where we may have to adjust the probe counts:
# axis0 < 4 and axis1 > 6 (see _verify_algorithm).
min_num_of_probes = 3
if max(new_x_probe_count, new_y_probe_count) > 6 and \
min(new_x_probe_count, new_y_probe_count) < 4:
min_num_of_probes = 4
new_x_probe_count = max(min_num_of_probes, new_x_probe_count)
new_y_probe_count = max(min_num_of_probes, new_y_probe_count)
gcmd.respond_info("Adapted probe count: (%s,%s)" %
(new_x_probe_count, new_y_probe_count))
# If the adapted mesh size is too small, adjust it to something
# useful.
adjusted_mesh_size = (max(adjusted_mesh_size[0], new_x_probe_count),
max(adjusted_mesh_size[1], new_y_probe_count))
if self.radius is not None:
adapted_radius = math.sqrt((adjusted_mesh_size[0] ** 2) +
(adjusted_mesh_size[1] ** 2)) / 2
adapted_origin = (adjusted_mesh_min[0] +
(adjusted_mesh_size[0] / 2),
adjusted_mesh_min[1] +
(adjusted_mesh_size[1] / 2))
to_adapted_origin = math.sqrt(adapted_origin[0]**2 +
adapted_origin[1]**2)
# If the adapted mesh size is smaller than the default/full
# mesh, adjust the parameters. Otherwise, just do the full mesh.
if adapted_radius + to_adapted_origin < self.radius:
self.radius = adapted_radius
self.origin = adapted_origin
self.mesh_min = (-self.radius, -self.radius)
self.mesh_max = (self.radius, self.radius)
self.mesh_config["x_count"] = self.mesh_config["y_count"] = \
max(new_x_probe_count, new_y_probe_count)
else:
self.mesh_min = adjusted_mesh_min
self.mesh_max = adjusted_mesh_max
self.mesh_config["x_count"] = new_x_probe_count
self.mesh_config["y_count"] = new_y_probe_count
self._profile_name = None
return True
def update_config(self, gcmd):
# reset default configuration
self.radius = self.orig_config['radius']
self.origin = self.orig_config['origin']
self.relative_reference_index = self.orig_config['rri']
self.mesh_min = self.orig_config['mesh_min']
self.mesh_max = self.orig_config['mesh_max']
for key in list(self.mesh_config.keys()):
@@ -531,12 +678,6 @@ class BedMeshCalibrate:
params = gcmd.get_command_parameters()
need_cfg_update = False
if 'RELATIVE_REFERENCE_INDEX' in params:
self.relative_reference_index = gcmd.get_int(
'RELATIVE_REFERENCE_INDEX')
if self.relative_reference_index < 0:
self.relative_reference_index = None
need_cfg_update = True
if self.radius is not None:
if "MESH_RADIUS" in params:
self.radius = gcmd.get_float("MESH_RADIUS")
@@ -569,45 +710,64 @@ class BedMeshCalibrate:
self.mesh_config['algo'] = gcmd.get('ALGORITHM').strip().lower()
need_cfg_update = True
need_cfg_update |= self.set_adaptive_mesh(gcmd)
probe_method = gcmd.get("METHOD", "automatic")
if need_cfg_update:
self._verify_algorithm(gcmd.error)
self._generate_points(gcmd.error)
self._generate_points(gcmd.error, probe_method)
gcmd.respond_info("Generating new points...")
self.print_generated_points(gcmd.respond_info)
pts = self._get_adjusted_points()
self.probe_helper.update_probe_points(pts, 3)
msg = "relative_reference_index: %s\n" % \
(self.relative_reference_index)
msg += "\n".join(["%s: %s" % (k, v) for k, v
in self.mesh_config.items()])
msg = "\n".join(["%s: %s" % (k, v)
for k, v in self.mesh_config.items()])
logging.info("Updated Mesh Configuration:\n" + msg)
else:
self.points = self.orig_points
self._generate_points(gcmd.error, probe_method)
pts = self._get_adjusted_points()
self.probe_helper.update_probe_points(pts, 3)
def _get_adjusted_points(self):
if not self.substituted_indices:
return self.points
adj_pts = []
last_index = 0
for i, pts in self.substituted_indices.items():
adj_pts.extend(self.points[last_index:i])
adj_pts.extend(pts)
# Add one to the last index to skip the point
# we are replacing
last_index = i + 1
adj_pts.extend(self.points[last_index:])
if self.substituted_indices:
last_index = 0
for i, pts in self.substituted_indices.items():
adj_pts.extend(self.points[last_index:i])
adj_pts.extend(pts)
# Add one to the last index to skip the point
# we are replacing
last_index = i + 1
adj_pts.extend(self.points[last_index:])
else:
adj_pts = list(self.points)
if self.zero_reference_mode == ZrefMode.PROBE:
adj_pts.append(self.zero_ref_pos)
return adj_pts
cmd_BED_MESH_CALIBRATE_help = "Perform Mesh Bed Leveling"
def cmd_BED_MESH_CALIBRATE(self, gcmd):
self._profile_name = gcmd.get('PROFILE', "default")
if not self._profile_name.strip():
raise gcmd.error("Value for parameter 'PROFILE' must be specified")
self.bedmesh.set_mesh(None)
self.update_config(gcmd)
self.probe_helper.start_probe(gcmd)
def cmd_SAVE_Z_OFFSET_TO_BED_MESH(self, gcmd):
self.probed_z_offset = self.printer.lookup_object('probe').last_z_result
self.apply_to_bed_mesh = gcmd.get('APPLY', True)
def probe_finalize(self, offsets, positions):
x_offset, y_offset, z_offset = offsets
positions = [[round(p[0], 2), round(p[1], 2), p[2]]
for p in positions]
if self.zero_reference_mode == ZrefMode.PROBE:
ref_pos = positions.pop()
logging.info(
"bed_mesh: z-offset replaced with probed z value at "
"position (%.2f, %.2f, %.6f)"
% (ref_pos[0], ref_pos[1], ref_pos[2])
)
z_offset = ref_pos[2]
params = dict(self.mesh_config)
params['min_x'] = min(positions, key=lambda p: p[0])[0] + x_offset
params['max_x'] = max(positions, key=lambda p: p[0])[0] + x_offset
@@ -658,11 +818,6 @@ class BedMeshCalibrate:
% (off_pt[0], off_pt[1], probed[0], probed[1]))
positions = corrected_pts
if self.relative_reference_index is not None:
# zero out probe z offset and
# set offset relative to reference index
z_offset = positions[self.relative_reference_index][2]
probed_matrix = []
row = []
prev_pos = positions[0]
@@ -714,14 +869,25 @@ class BedMeshCalibrate:
"Probed table length: %d Probed Table:\n%s") %
(len(probed_matrix), str(probed_matrix)))
z_mesh = ZMesh(params)
z_mesh = ZMesh(params, self._profile_name)
try:
if self.apply_to_bed_mesh:
for row in range(len(probed_matrix)):
for col in range(len(probed_matrix[row])):
probed_matrix[row][col] -= self.probed_z_offset
self.apply_to_bed_mesh = False
z_mesh.build_mesh(probed_matrix)
except BedMeshError as e:
raise self.gcode.error(str(e))
if self.zero_reference_mode == ZrefMode.IN_MESH:
# The reference can be anywhere in the mesh, therefore
# it is necessary to set the reference after the initial mesh
# is generated to lookup the correct z value.
z_mesh.set_zero_reference(*self.zero_ref_pos)
self.bedmesh.set_mesh(z_mesh)
self.gcode.respond_info("Mesh Bed Leveling Complete")
self.bedmesh.save_profile(self._profile_name)
if self._profile_name is not None:
self.bedmesh.save_profile(self._profile_name)
def _dump_points(self, probed_pts, corrected_pts, offsets):
# logs generated points with offset applied, points received
# from the finalize callback, and the list of corrected points
@@ -807,10 +973,10 @@ class MoveSplitter:
class ZMesh:
def __init__(self, params):
def __init__(self, params, name):
self.profile_name = name or "adaptive-%X" % (id(self),)
self.probed_matrix = self.mesh_matrix = None
self.mesh_params = params
self.avg_z = 0.
self.mesh_offsets = [0., 0.]
logging.debug('bed_mesh: probe/mesh parameters:')
for key, value in self.mesh_params.items():
@@ -857,6 +1023,8 @@ class ZMesh:
return [[]]
def get_mesh_params(self):
return self.mesh_params
def get_profile_name(self):
return self.profile_name
def print_probed_matrix(self, print_func):
if self.probed_matrix is not None:
msg = "Mesh Leveling Probed Z positions:\n"
@@ -875,7 +1043,7 @@ class ZMesh:
msg += "Search Height: %d\n" % (move_z)
msg += "Mesh Offsets: X=%.4f, Y=%.4f\n" % (
self.mesh_offsets[0], self.mesh_offsets[1])
msg += "Mesh Average: %.2f\n" % (self.avg_z)
msg += "Mesh Average: %.2f\n" % (self.get_z_average())
rng = self.get_z_range()
msg += "Mesh Range: min=%.4f max=%.4f\n" % (rng[0], rng[1])
msg += "Interpolation Algorithm: %s\n" \
@@ -891,13 +1059,17 @@ class ZMesh:
def build_mesh(self, z_matrix):
self.probed_matrix = z_matrix
self._sample(z_matrix)
self.avg_z = (sum([sum(x) for x in self.mesh_matrix]) /
sum([len(x) for x in self.mesh_matrix]))
# Round average to the nearest 100th. This
# should produce an offset that is divisible by common
# z step distances
self.avg_z = round(self.avg_z, 2)
self.print_mesh(logging.debug)
def set_zero_reference(self, xpos, ypos):
offset = self.calc_z(xpos, ypos)
logging.info(
"bed_mesh: setting zero reference at (%.2f, %.2f, %.6f)"
% (xpos, ypos, offset)
)
for matrix in [self.probed_matrix, self.mesh_matrix]:
for yidx in range(len(matrix)):
for xidx in range(len(matrix[yidx])):
matrix[yidx][xidx] -= offset
def set_mesh_offsets(self, offsets):
for i, o in enumerate(offsets):
if o is not None:
@@ -924,6 +1096,16 @@ class ZMesh:
return mesh_min, mesh_max
else:
return 0., 0.
def get_z_average(self):
if self.mesh_matrix is not None:
avg_z = (sum([sum(x) for x in self.mesh_matrix]) /
sum([len(x) for x in self.mesh_matrix]))
# Round average to the nearest 100th. This
# should produce an offset that is divisible by common
# z step distances
return round(avg_z, 2)
else:
return 0.
def _get_linear_index(self, coord, axis):
if axis == 0:
# X-axis
@@ -1103,7 +1285,6 @@ class ProfileManager:
self.gcode = self.printer.lookup_object('gcode')
self.bedmesh = bedmesh
self.profiles = {}
self.current_profile = ""
self.incompatible_profiles = []
# Fetch stored profiles from Config
stored_profs = config.get_prefix_sections(self.name)
@@ -1135,14 +1316,8 @@ class ProfileManager:
self.gcode.register_command(
'BED_MESH_PROFILE', self.cmd_BED_MESH_PROFILE,
desc=self.cmd_BED_MESH_PROFILE_help)
def initialize(self):
self._check_incompatible_profiles()
if "default" in self.profiles:
self.load_profile("default")
def get_profiles(self):
return self.profiles
def get_current_profile(self):
return self.current_profile
def _check_incompatible_profiles(self):
if self.incompatible_profiles:
configfile = self.printer.lookup_object('configfile')
@@ -1183,7 +1358,6 @@ class ProfileManager:
profile['points'] = probed_matrix
profile['mesh_params'] = collections.OrderedDict(mesh_params)
self.profiles = profiles
self.current_profile = prof_name
self.bedmesh.update_status()
self.gcode.respond_info(
"Bed Mesh state has been saved to profile [%s]\n"
@@ -1197,12 +1371,11 @@ class ProfileManager:
"bed_mesh: Unknown profile [%s]" % prof_name)
probed_matrix = profile['points']
mesh_params = profile['mesh_params']
z_mesh = ZMesh(mesh_params)
z_mesh = ZMesh(mesh_params, prof_name)
try:
z_mesh.build_mesh(probed_matrix)
except BedMeshError as e:
raise self.gcode.error(str(e))
self.current_profile = prof_name
self.bedmesh.set_mesh(z_mesh)
def remove_profile(self, prof_name):
if prof_name in self.profiles:
@@ -1229,6 +1402,10 @@ class ProfileManager:
for key in options:
name = gcmd.get(key, None)
if name is not None:
if not name.strip():
raise gcmd.error(
"Value for parameter '%s' must be specified" % (key)
)
if name == "default" and key == 'SAVE':
gcmd.respond_info(
"Profile 'default' is reserved, please choose"

Binary file not shown.

View File

@@ -7,9 +7,7 @@
class BedScrews:
def __init__(self, config):
self.printer = config.get_printer()
self.state = None
self.current_screw = 0
self.accepted_screws = 0
self.reset()
self.number_of_screws = 0
# Read config
screws = []
@@ -39,8 +37,17 @@ class BedScrews:
self.gcode.register_command("BED_SCREWS_ADJUST",
self.cmd_BED_SCREWS_ADJUST,
desc=self.cmd_BED_SCREWS_ADJUST_help)
def reset(self):
self.state = None
self.current_screw = 0
self.accepted_screws = 0
def move(self, coord, speed):
self.printer.lookup_object('toolhead').manual_move(coord, speed)
try:
self.printer.lookup_object('toolhead').manual_move(coord, speed)
except self.printer.command_error as e:
self.unregister_commands()
self.reset()
raise
def move_to_screw(self, state, screw):
# Move up, over, and then down
self.move((None, None, self.horizontal_move_z), self.lift_speed)
@@ -64,6 +71,13 @@ class BedScrews:
self.gcode.register_command('ACCEPT', None)
self.gcode.register_command('ADJUSTED', None)
self.gcode.register_command('ABORT', None)
def get_status(self, eventtime):
return {
'is_active': self.state is not None,
'state': self.state,
'current_screw': self.current_screw,
'accepted_screws': self.accepted_screws
}
cmd_BED_SCREWS_ADJUST_help = "Tool to help adjust bed leveling screws"
def cmd_BED_SCREWS_ADJUST(self, gcmd):
if self.state is not None:
@@ -92,7 +106,7 @@ class BedScrews:
self.move_to_screw('fine', 0)
return
# Done
self.state = None
self.reset()
self.move((None, None, self.horizontal_move_z), self.lift_speed)
gcmd.respond_info("Bed screws tool completed successfully")
cmd_ADJUSTED_help = "Accept bed screw position after notable adjustment"
@@ -103,7 +117,7 @@ class BedScrews:
cmd_ABORT_help = "Abort bed screws tool"
def cmd_ABORT(self, gcmd):
self.unregister_commands()
self.state = None
self.reset()
def load_config(config):
return BedScrews(config)

Binary file not shown.

View File

@@ -8,6 +8,7 @@ from . import bus
REPORT_TIME = .8
BME280_CHIP_ADDR = 0x76
BME280_REGS = {
'RESET': 0xE0, 'CTRL_HUM': 0xF2,
'STATUS': 0xF3, 'CTRL_MEAS': 0xF4, 'CONFIG': 0xF5,
@@ -46,6 +47,16 @@ BME680_GAS_CONSTANTS = {
15: (1., 244.140625)
}
BMP180_REGS = {
'RESET': 0xE0,
'CAL_1': 0xAA,
'CTRL_MEAS': 0xF4,
'REG_MSB': 0xF6,
'REG_LSB': 0xF7,
'CRV_TEMP': 0x2E,
'CRV_PRES': 0x34
}
STATUS_MEASURING = 1 << 3
STATUS_IM_UPDATE = 1
MODE = 1
@@ -57,7 +68,7 @@ MEASURE_DONE = 1 << 5
RESET_CHIP_VALUE = 0xB6
BME_CHIPS = {
0x58: 'BMP280', 0x60: 'BME280', 0x61: 'BME680'
0x58: 'BMP280', 0x60: 'BME280', 0x61: 'BME680', 0x55: 'BMP180'
}
BME_CHIP_ID_REG = 0xD0
@@ -81,6 +92,14 @@ def get_signed_byte(bits):
return get_twos_complement(bits, 8)
def get_unsigned_short_msb(bits):
return bits[0] << 8 | bits[1]
def get_signed_short_msb(bits):
val = get_unsigned_short_msb(bits)
return get_twos_complement(val, 16)
class BME280:
def __init__(self, config):
self.printer = config.get_printer()
@@ -188,6 +207,23 @@ class BME280:
dig['G3'] = get_signed_byte(calib_data_2[13])
return dig
def read_calibration_data_bmp180(calib_data_1):
dig = {}
dig['AC1'] = get_signed_short_msb(calib_data_1[0:2])
dig['AC2'] = get_signed_short_msb(calib_data_1[2:4])
dig['AC3'] = get_signed_short_msb(calib_data_1[4:6])
dig['AC4'] = get_unsigned_short_msb(calib_data_1[6:8])
dig['AC5'] = get_unsigned_short_msb(calib_data_1[8:10])
dig['AC6'] = get_unsigned_short_msb(calib_data_1[10:12])
dig['B1'] = get_signed_short_msb(calib_data_1[12:14])
dig['B2'] = get_signed_short_msb(calib_data_1[14:16])
dig['MB'] = get_signed_short_msb(calib_data_1[16:18])
dig['MC'] = get_signed_short_msb(calib_data_1[18:20])
dig['MD'] = get_signed_short_msb(calib_data_1[20:22])
return dig
chip_id = self.read_id()
if chip_id not in BME_CHIPS.keys():
logging.info("bme280: Unknown Chip ID received %#x" % chip_id)
@@ -201,15 +237,21 @@ class BME280:
self.reactor.pause(self.reactor.monotonic() + .5)
# Make sure non-volatile memory has been copied to registers
status = self.read_register('STATUS', 1)[0]
while status & STATUS_IM_UPDATE:
self.reactor.pause(self.reactor.monotonic() + .01)
if self.chip_type != 'BMP180':
# BMP180 has no status register available
status = self.read_register('STATUS', 1)[0]
while status & STATUS_IM_UPDATE:
self.reactor.pause(self.reactor.monotonic() + .01)
status = self.read_register('STATUS', 1)[0]
if self.chip_type == 'BME680':
self.max_sample_time = 0.5
self.sample_timer = self.reactor.register_timer(self._sample_bme680)
self.chip_registers = BME680_REGS
elif self.chip_type == 'BMP180':
self.max_sample_time = (1.25 + ((2.3 * self.os_pres) + .575)) / 1000
self.sample_timer = self.reactor.register_timer(self._sample_bmp180)
self.chip_registers = BMP180_REGS
else:
self.max_sample_time = \
(1.25 + (2.3 * self.os_temp) + ((2.3 * self.os_pres) + .575)
@@ -221,14 +263,19 @@ class BME280:
self.write_register('CONFIG', (self.iir_filter & 0x07) << 2)
# Read out and calculate the trimming parameters
cal_1 = self.read_register('CAL_1', 26)
cal_2 = self.read_register('CAL_2', 16)
if self.chip_type == 'BMP180':
cal_1 = self.read_register('CAL_1', 22)
else:
cal_1 = self.read_register('CAL_1', 26)
cal_2 = self.read_register('CAL_2', 16)
if self.chip_type == 'BME280':
self.dig = read_calibration_data_bme280(cal_1, cal_2)
elif self.chip_type == 'BMP280':
self.dig = read_calibration_data_bmp280(cal_1)
elif self.chip_type == 'BME680':
self.dig = read_calibration_data_bme680(cal_1, cal_2)
elif self.chip_type == 'BMP180':
self.dig = read_calibration_data_bmp180(cal_1)
def _sample_bme280(self, eventtime):
# Enter forced mode
@@ -332,7 +379,44 @@ class BME280:
% (self.temp, self.min_temp, self.max_temp))
measured_time = self.reactor.monotonic()
self._callback(self.mcu.estimated_print_time(measured_time), self.temp)
return measured_time + REPORT_TIME * 4
return measured_time + REPORT_TIME
def _sample_bmp180(self, eventtime):
meas = self.chip_registers['CRV_TEMP']
self.write_register('CTRL_MEAS', meas)
try:
self.reactor.pause(self.reactor.monotonic() + .01)
data = self.read_register('REG_MSB', 2)
temp_raw = (data[0] << 8) | data[1]
except Exception:
logging.exception("BMP180: Error reading temperature")
self.temp = self.pressure = .0
return self.reactor.NEVER
meas = self.chip_registers['CRV_PRES'] | (self.os_pres << 6)
self.write_register('CTRL_MEAS', meas)
try:
self.reactor.pause(self.reactor.monotonic() + .01)
data = self.read_register('REG_MSB', 3)
pressure_raw = \
((data[0] << 16)|(data[1] << 8)|data[2]) >> (8 - self.os_pres)
except Exception:
logging.exception("BMP180: Error reading pressure")
self.temp = self.pressure = .0
return self.reactor.NEVER
self.temp = self._compensate_temp_bmp180(temp_raw)
self.pressure = self._compensate_pressure_bmp180(pressure_raw) / 100.
if self.temp < self.min_temp or self.temp > self.max_temp:
self.printer.invoke_shutdown(
"BMP180 temperature %0.1f outside range of %0.1f:%.01f"
% (self.temp, self.min_temp, self.max_temp))
measured_time = self.reactor.monotonic()
self._callback(self.mcu.estimated_print_time(measured_time), self.temp)
return measured_time + REPORT_TIME
def _compensate_temp(self, raw_temp):
dig = self.dig
@@ -443,6 +527,37 @@ class BME280:
return duration_reg
def _compensate_temp_bmp180(self, raw_temp):
dig = self.dig
x1 = (raw_temp - dig['AC6']) * dig['AC5'] / 32768.
x2 = dig['MC'] * 2048 / (x1 + dig['MD'])
b5 = x1 + x2
self.t_fine = b5
return (b5 + 8)/16./10.
def _compensate_pressure_bmp180(self, raw_pressure):
dig = self.dig
b5 = self.t_fine
b6 = b5 - 4000
x1 = (dig['B2'] * (b6 * b6 / 4096)) / 2048
x2 = dig['AC2'] * b6 / 2048
x3 = x1 + x2
b3 = ((int(dig['AC1'] * 4 + x3) << self.os_pres) + 2) / 4
x1 = dig['AC3'] * b6 / 8192
x2 = (dig['B1'] * (b6 * b6 / 4096)) / 65536
x3 = ((x1 + x2) + 2) / 4
b4 = dig['AC4'] * (x3 + 32768) / 32768
b7 = (raw_pressure - b3) * (50000 >> self.os_pres)
if (b7 < 0x80000000):
p = (b7 * 2) / b4
else:
p = (b7 / b4) * 2
x1 = (p / 256) * (p / 256)
x1 = (x1 * 3038) / 65536
x2 = (-7357 * p) / 65536
p = p + (x1 + x2 + 3791) / 16.
return p
def read_id(self):
# read chip id register
regs = [BME_CHIP_ID_REG]

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,247 @@
# Tools for reading bulk sensor data from the mcu
#
# Copyright (C) 2020-2023 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import logging, threading
# This "bulk sensor" module facilitates the processing of sensor chip
# measurements that do not require the host to respond with low
# latency. This module helps collect these measurements into batches
# that are then processed periodically by the host code (as specified
# by BatchBulkHelper.batch_interval). It supports the collection of
# thousands of sensor measurements per second.
#
# Processing measurements in batches reduces load on the mcu, reduces
# bandwidth to/from the mcu, and reduces load on the host. It also
# makes it easier to export the raw measurements via the webhooks
# system (aka API Server).
BATCH_INTERVAL = 0.500
# Helper to process accumulated messages in periodic batches
class BatchBulkHelper:
def __init__(self, printer, batch_cb, start_cb=None, stop_cb=None,
batch_interval=BATCH_INTERVAL):
self.printer = printer
self.batch_cb = batch_cb
if start_cb is None:
start_cb = (lambda: None)
self.start_cb = start_cb
if stop_cb is None:
stop_cb = (lambda: None)
self.stop_cb = stop_cb
self.is_started = False
self.batch_interval = batch_interval
self.batch_timer = None
self.client_cbs = []
self.webhooks_start_resp = {}
# Periodic batch processing
def _start(self):
if self.is_started:
return
self.is_started = True
try:
self.start_cb()
except self.printer.command_error as e:
logging.exception("BatchBulkHelper start callback error")
self.is_started = False
del self.client_cbs[:]
raise
reactor = self.printer.get_reactor()
systime = reactor.monotonic()
waketime = systime + self.batch_interval
self.batch_timer = reactor.register_timer(self._proc_batch, waketime)
def _stop(self):
del self.client_cbs[:]
self.printer.get_reactor().unregister_timer(self.batch_timer)
self.batch_timer = None
if not self.is_started:
return
try:
self.stop_cb()
except self.printer.command_error as e:
logging.exception("BatchBulkHelper stop callback error")
del self.client_cbs[:]
self.is_started = False
if self.client_cbs:
# New client started while in process of stopping
self._start()
def _proc_batch(self, eventtime):
try:
msg = self.batch_cb(eventtime)
except self.printer.command_error as e:
logging.exception("BatchBulkHelper batch callback error")
self._stop()
return self.printer.get_reactor().NEVER
if not msg:
return eventtime + self.batch_interval
for client_cb in list(self.client_cbs):
res = client_cb(msg)
if not res:
# This client no longer needs updates - unregister it
self.client_cbs.remove(client_cb)
if not self.client_cbs:
self._stop()
return self.printer.get_reactor().NEVER
return eventtime + self.batch_interval
# Client registration
def add_client(self, client_cb):
self.client_cbs.append(client_cb)
self._start()
# Webhooks registration
def _add_api_client(self, web_request):
whbatch = BatchWebhooksClient(web_request)
self.add_client(whbatch.handle_batch)
web_request.send(self.webhooks_start_resp)
def add_mux_endpoint(self, path, key, value, webhooks_start_resp):
self.webhooks_start_resp = webhooks_start_resp
wh = self.printer.lookup_object('webhooks')
wh.register_mux_endpoint(path, key, value, self._add_api_client)
# A webhooks wrapper for use by BatchBulkHelper
class BatchWebhooksClient:
def __init__(self, web_request):
self.cconn = web_request.get_client_connection()
self.template = web_request.get_dict('response_template', {})
def handle_batch(self, msg):
if self.cconn.is_closed():
return False
tmp = dict(self.template)
tmp['params'] = msg
self.cconn.send(tmp)
return True
# Helper class to store incoming messages in a queue
class BulkDataQueue:
def __init__(self, mcu, msg_name, oid):
# Measurement storage (accessed from background thread)
self.lock = threading.Lock()
self.raw_samples = []
# Register callback with mcu
mcu.register_response(self._handle_data, msg_name, oid)
def _handle_data(self, params):
with self.lock:
self.raw_samples.append(params)
def pull_samples(self):
with self.lock:
raw_samples = self.raw_samples
self.raw_samples = []
return raw_samples
def clear_samples(self):
self.pull_samples()
######################################################################
# Clock synchronization
######################################################################
# It is common for sensors to produce measurements at a fixed
# frequency. If the mcu can reliably obtain all of these
# measurements, then the code here can calculate a precision timestamp
# for them. That is, it can determine the actual sensor measurement
# frequency, the time of the first measurement, and thus a precise
# time for all measurements.
#
# This system works by having the mcu periodically report a precision
# timestamp along with the total number of measurements the sensor has
# taken as of that time. In brief, knowing the total number of
# measurements taken over an extended period provides an accurate
# estimate of measurement frequency, which can then also be utilized
# to determine the time of the first measurement.
# Helper class for chip clock synchronization via linear regression
class ClockSyncRegression:
def __init__(self, mcu, chip_clock_smooth, decay = 1. / 20.):
self.mcu = mcu
self.chip_clock_smooth = chip_clock_smooth
self.decay = decay
self.last_chip_clock = self.last_exp_mcu_clock = 0.
self.mcu_clock_avg = self.mcu_clock_variance = 0.
self.chip_clock_avg = self.chip_clock_covariance = 0.
def reset(self, mcu_clock, chip_clock):
self.mcu_clock_avg = self.last_mcu_clock = mcu_clock
self.chip_clock_avg = chip_clock
self.mcu_clock_variance = self.chip_clock_covariance = 0.
self.last_chip_clock = self.last_exp_mcu_clock = 0.
def update(self, mcu_clock, chip_clock):
# Update linear regression
decay = self.decay
diff_mcu_clock = mcu_clock - self.mcu_clock_avg
self.mcu_clock_avg += decay * diff_mcu_clock
self.mcu_clock_variance = (1. - decay) * (
self.mcu_clock_variance + diff_mcu_clock**2 * decay)
diff_chip_clock = chip_clock - self.chip_clock_avg
self.chip_clock_avg += decay * diff_chip_clock
self.chip_clock_covariance = (1. - decay) * (
self.chip_clock_covariance + diff_mcu_clock*diff_chip_clock*decay)
def set_last_chip_clock(self, chip_clock):
base_mcu, base_chip, inv_cfreq = self.get_clock_translation()
self.last_chip_clock = chip_clock
self.last_exp_mcu_clock = base_mcu + (chip_clock-base_chip) * inv_cfreq
def get_clock_translation(self):
inv_chip_freq = self.mcu_clock_variance / self.chip_clock_covariance
if not self.last_chip_clock:
return self.mcu_clock_avg, self.chip_clock_avg, inv_chip_freq
# Find mcu clock associated with future chip_clock
s_chip_clock = self.last_chip_clock + self.chip_clock_smooth
scdiff = s_chip_clock - self.chip_clock_avg
s_mcu_clock = self.mcu_clock_avg + scdiff * inv_chip_freq
# Calculate frequency to converge at future point
mdiff = s_mcu_clock - self.last_exp_mcu_clock
s_inv_chip_freq = mdiff / self.chip_clock_smooth
return self.last_exp_mcu_clock, self.last_chip_clock, s_inv_chip_freq
def get_time_translation(self):
base_mcu, base_chip, inv_cfreq = self.get_clock_translation()
clock_to_print_time = self.mcu.clock_to_print_time
base_time = clock_to_print_time(base_mcu)
inv_freq = clock_to_print_time(base_mcu + inv_cfreq) - base_time
return base_time, base_chip, inv_freq
MAX_BULK_MSG_SIZE = 52
# Handle common periodic chip status query responses
class ChipClockUpdater:
def __init__(self, clock_sync, bytes_per_sample):
self.clock_sync = clock_sync
self.bytes_per_sample = bytes_per_sample
self.samples_per_block = MAX_BULK_MSG_SIZE // bytes_per_sample
self.mcu = clock_sync.mcu
self.last_sequence = self.max_query_duration = 0
self.last_limit_count = 0
def get_last_sequence(self):
return self.last_sequence
def get_last_limit_count(self):
return self.last_limit_count
def clear_duration_filter(self):
self.max_query_duration = 1 << 31
def note_start(self, reqclock):
self.last_sequence = 0
self.last_limit_count = 0
self.clock_sync.reset(reqclock, 0)
self.clear_duration_filter()
def update_clock(self, params):
# Handle a status response message of the form:
# adxl345_status oid=x clock=x query_ticks=x next_sequence=x
# buffered=x fifo=x limit_count=x
fifo = params['fifo']
mcu_clock = self.mcu.clock32_to_clock64(params['clock'])
seq_diff = (params['next_sequence'] - self.last_sequence) & 0xffff
self.last_sequence += seq_diff
buffered = params['buffered']
lc_diff = (params['limit_count'] - self.last_limit_count) & 0xffff
self.last_limit_count += lc_diff
duration = params['query_ticks']
if duration > self.max_query_duration:
# Skip measurement as a high query time could skew clock tracking
self.max_query_duration = max(2 * self.max_query_duration,
self.mcu.seconds_to_clock(.000005))
return
self.max_query_duration = 2 * duration
msg_count = (self.last_sequence * self.samples_per_block
+ buffered // self.bytes_per_sample + fifo)
# The "chip clock" is the message counter plus .5 for average
# inaccuracy of query responses and plus .5 for assumed offset
# of hardware processing time.
chip_clock = msg_count + 1
self.clock_sync.update(mcu_clock + duration // 2, chip_clock)

View File

@@ -142,13 +142,22 @@ def MCU_SPI_from_config(config, mode, pin_option="cs_pin",
# Helper code for working with devices connected to an MCU via an I2C bus
class MCU_I2C:
def __init__(self, mcu, bus, addr, speed):
def __init__(self, mcu, bus, addr, speed, sw_pins=None):
self.mcu = mcu
self.bus = bus
self.i2c_address = addr
self.oid = self.mcu.create_oid()
self.config_fmt = "config_i2c oid=%d i2c_bus=%%s rate=%d address=%d" % (
self.oid, speed, addr)
mcu.add_config_cmd("config_i2c oid=%d" % (self.oid,))
# Generate I2C bus config message
if sw_pins is not None:
self.config_fmt = (
"i2c_set_software_bus oid=%d"
" scl_pin=%s sda_pin=%s rate=%d address=%d"
% (self.oid, sw_pins[0], sw_pins[1], speed, addr))
else:
self.config_fmt = (
"i2c_set_bus oid=%d i2c_bus=%%s rate=%d address=%d"
% (self.oid, speed, addr))
self.cmd_queue = self.mcu.alloc_command_queue()
self.mcu.register_config_callback(self.build_config)
self.i2c_write_cmd = self.i2c_read_cmd = self.i2c_modify_bits_cmd = None
@@ -161,8 +170,10 @@ class MCU_I2C:
def get_command_queue(self):
return self.cmd_queue
def build_config(self):
bus = resolve_bus_name(self.mcu, "i2c_bus", self.bus)
self.mcu.add_config_cmd(self.config_fmt % (bus,))
if '%' in self.config_fmt:
bus = resolve_bus_name(self.mcu, "i2c_bus", self.bus)
self.config_fmt = self.config_fmt % (bus,)
self.mcu.add_config_cmd(self.config_fmt)
self.i2c_write_cmd = self.mcu.lookup_command(
"i2c_write oid=%c data=%*s", cq=self.cmd_queue)
self.i2c_read_cmd = self.mcu.lookup_query_command(
@@ -202,13 +213,24 @@ def MCU_I2C_from_config(config, default_addr=None, default_speed=100000):
printer = config.get_printer()
i2c_mcu = mcu.get_printer_mcu(printer, config.get('i2c_mcu', 'mcu'))
speed = config.getint('i2c_speed', default_speed, minval=100000)
bus = config.get('i2c_bus', None)
if default_addr is None:
addr = config.getint('i2c_address', minval=0, maxval=127)
else:
addr = config.getint('i2c_address', default_addr, minval=0, maxval=127)
# Determine pin from config
ppins = config.get_printer().lookup_object("pins")
if config.get('i2c_software_scl_pin', None) is not None:
sw_pin_names = ['i2c_software_%s_pin' % (name,)
for name in ['scl', 'sda']]
sw_pin_params = [ppins.lookup_pin(config.get(name), share_type=name)
for name in sw_pin_names]
sw_pins = tuple([pin_params['pin'] for pin_params in sw_pin_params])
bus = None
else:
bus = config.get('i2c_bus', None)
sw_pins = None
# Create MCU_I2C object
return MCU_I2C(i2c_mcu, bus, addr, speed)
return MCU_I2C(i2c_mcu, bus, addr, speed, sw_pins)
######################################################################

Binary file not shown.

View File

@@ -1,6 +1,6 @@
# Support for button detection and callbacks
#
# Copyright (C) 2018 Kevin O'Connor <kevin@koconnor.net>
# Copyright (C) 2018-2023 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import logging
@@ -57,10 +57,9 @@ class MCU_buttons:
def handle_buttons_state(self, params):
# Expand the message ack_count from 8-bit
ack_count = self.ack_count
ack_diff = (ack_count - params['ack_count']) & 0xff
if ack_diff & 0x80:
ack_diff -= 0x100
msg_ack_count = ack_count - ack_diff
ack_diff = (params['ack_count'] - ack_count) & 0xff
ack_diff -= (ack_diff & 0x80) << 1
msg_ack_count = ack_count + ack_diff
# Determine new buttons
buttons = bytearray(params['state'])
new_count = msg_ack_count + len(buttons) - self.ack_count
@@ -70,17 +69,17 @@ class MCU_buttons:
# Send ack to MCU
self.ack_cmd.send([self.oid, new_count])
self.ack_count += new_count
# Call self.handle_button() with this event in main thread
for nb in new_buttons:
self.reactor.register_async_callback(
(lambda e, s=self, b=nb: s.handle_button(e, b)))
def handle_button(self, eventtime, button):
button ^= self.invert
changed = button ^ self.last_button
for mask, shift, callback in self.callbacks:
if changed & mask:
callback(eventtime, (button & mask) >> shift)
self.last_button = button
# Invoke callbacks with this event in main thread
btime = params['#receive_time']
for button in new_buttons:
button ^= self.invert
changed = button ^ self.last_button
self.last_button = button
for mask, shift, callback in self.callbacks:
if changed & mask:
state = (button & mask) >> shift
self.reactor.register_async_callback(
(lambda et, c=callback, bt=btime, s=state: c(bt, s)))
######################################################################

Binary file not shown.

View File

@@ -4,6 +4,8 @@
#
# This file may be distributed under the terms of the GNU GPLv3 license.
NODEID_FIRST = 4
class PrinterCANBus:
def __init__(self, config):
self.printer = config.get_printer()
@@ -11,7 +13,7 @@ class PrinterCANBus:
def add_uuid(self, config, canbus_uuid, canbus_iface):
if canbus_uuid in self.ids:
raise config.error("Duplicate canbus_uuid")
new_id = len(self.ids)
new_id = len(self.ids) + NODEID_FIRST
self.ids[canbus_uuid] = new_id
return new_id
def get_nodeid(self, canbus_uuid):

View File

@@ -0,0 +1,59 @@
from . import fan
PIN_MIN_TIME = 0.100
class ChamberFan:
def __init__(self, config):
self.printer = config.get_printer()
self.printer.register_event_handler("klippy:ready", self.handle_ready)
self.printer.register_event_handler("klippy:connect",
self.handle_connect)
self.printer.load_object(config, 'heaters')
self.heaters = []
self.fan = fan.Fan(config)
self.fan_speed = config.getfloat('fan_speed', default=1.,
minval=0., maxval=1.)
self.idle_speed = config.getfloat(
'idle_speed', default=self.fan_speed, minval=0., maxval=1.)
self.idle_timeout = config.getint("idle_timeout", default=30, minval=0)
self.heater_names = config.getlist("heater", ())
self.fan_on = True
self.last_on = self.idle_timeout
self.last_speed = 0.
def handle_connect(self):
# Heater lookup
pheaters = self.printer.lookup_object('heaters')
self.heaters = [pheaters.lookup_heater(n) for n in self.heater_names]
def handle_ready(self):
reactor = self.printer.get_reactor()
reactor.register_timer(self.callback, reactor.monotonic()+PIN_MIN_TIME)
gcode = self.printer.lookup_object("gcode")
gcode.register_command("TOGGLE_CHAMBER_FAN", self.cmd_toggle_chamber_fan)
def cmd_toggle_chamber_fan(self, gcmd):
self.fan_on = not self.fan_on
def get_status(self, eventtime):
return self.fan.get_status(eventtime)
def callback(self, eventtime):
speed = 0.
active = False
for heater in self.heaters:
_, target_temp = heater.get_temp(eventtime)
if target_temp:
active = True
if active:
self.last_on = 0
speed = self.fan_speed
elif self.last_on < self.idle_timeout:
speed = self.idle_speed
self.last_on += 1
if not self.fan_on:
speed = 0.
if speed != self.last_speed:
self.last_speed = speed
curtime = self.printer.get_reactor().monotonic()
print_time = self.fan.get_mcu().estimated_print_time(curtime)
self.fan.set_speed(print_time + PIN_MIN_TIME, speed)
return eventtime + 1.
def load_config_prefix(config):
return ChamberFan(config)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -24,6 +24,9 @@
# + Home All
# + Home Z
# + Home X/Y
# + Z Tilt
# + Quad Gantry Lvl
# + Bed Mesh
# + Steppers off
# + Fan: OFF
# + Fan speed: 000%
@@ -233,6 +236,24 @@ enable: {not printer.idle_timeout.state == "Printing"}
name: Home X/Y
gcode: G28 X Y
[menu __main __control __z_tilt]
type: command
enable: {not printer.idle_timeout.state == "Printing" and ('z_tilt' in printer)}
name: Z Tilt
gcode: Z_TILT_ADJUST
[menu __main __control __quad_gantry_level]
type: command
enable: {not printer.idle_timeout.state == "Printing" and ('quad_gantry_level' in printer)}
name: Quad Gantry Lvl
gcode: QUAD_GANTRY_LEVEL
[menu __main __control __bed_mesh]
type: command
enable: {not printer.idle_timeout.state == "Printing" and ('bed_mesh' in printer)}
name: Bed Mesh
gcode: BED_MESH_CALIBRATE
[menu __main __control __disable]
type: command
name: Steppers off
@@ -683,7 +704,7 @@ name: Calibration
[menu __main __setup __calib __delta_calib_auto]
type: command
enable: {not printer.idle_timeout.state == "Printing"}
enable: {(not printer.idle_timeout.state == "Printing") and ('delta_calibrate' in printer)}
name: Delta cal. auto
gcode:
G28
@@ -691,12 +712,12 @@ gcode:
[menu __main __setup __calib __delta_calib_man]
type: list
enable: {not printer.idle_timeout.state == "Printing"}
enable: {(not printer.idle_timeout.state == "Printing") and ('delta_calibrate' in printer)}
name: Delta cal. man
[menu __main __setup __calib __bedprobe]
type: command
enable: {not printer.idle_timeout.state == "Printing"}
enable: {(not printer.idle_timeout.state == "Printing") and ('probe' in printer)}
name: Bed probe
gcode: PROBE

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -6,7 +6,8 @@
import math, logging
import stepper
TRINAMIC_DRIVERS = ["tmc2130", "tmc2208", "tmc2209", "tmc2240", "tmc2660", "tmc5160"]
TRINAMIC_DRIVERS = ["tmc2130", "tmc2208", "tmc2209", "tmc2240", "tmc2660",
"tmc5160"]
# Calculate the trigger phase of a stepper motor
class PhaseCalc:

View File

@@ -234,7 +234,7 @@ class ExcludeObject:
elif current:
if not self.current_object:
gcmd.respond_error('There is no current object to cancel')
raise self.gcode.error('There is no current object to cancel')
else:
self._exclude_object(self.current_object)

Binary file not shown.

View File

@@ -15,6 +15,8 @@ class PrinterExtruderStepper:
self.handle_connect)
def handle_connect(self):
self.extruder_stepper.sync_to_extruder(self.extruder_name)
def get_status(self, eventtime):
return self.extruder_stepper.get_status(eventtime)
def load_config_prefix(config):
return PrinterExtruderStepper(config)

View File

@@ -30,6 +30,12 @@ class Fan:
shutdown_power = max(0., min(self.max_power, shutdown_speed))
self.mcu_fan.setup_start_value(0., shutdown_power)
self.enable_pin = None
enable_pin = config.get('enable_pin', None)
if enable_pin is not None:
self.enable_pin = ppins.setup_pin('digital_out', enable_pin)
self.enable_pin.setup_max_duration(0.)
# Setup tachometer
self.tachometer = FanTachometer(config)
@@ -46,6 +52,11 @@ class Fan:
if value == self.last_fan_value:
return
print_time = max(self.last_fan_time + FAN_MIN_TIME, print_time)
if self.enable_pin:
if value > 0 and self.last_fan_value == 0:
self.enable_pin.set_digital(print_time, 1)
elif value == 0 and self.last_fan_value > 0:
self.enable_pin.set_digital(print_time, 0)
if (value and value < self.max_power and self.kick_start_time
and (not self.last_fan_value or value - self.last_fan_value > .5)):
# Run fan at full speed for specified kick_start_time

Binary file not shown.

Binary file not shown.

View File

@@ -43,12 +43,12 @@ class ForceMove:
gcode = self.printer.lookup_object('gcode')
gcode.register_command('STEPPER_BUZZ', self.cmd_STEPPER_BUZZ,
desc=self.cmd_STEPPER_BUZZ_help)
gcode.register_command('SET_KINEMATIC_POSITION',
self.cmd_SET_KINEMATIC_POSITION,
desc=self.cmd_SET_KINEMATIC_POSITION_help)
if config.getboolean("enable_force_move", False):
gcode.register_command('FORCE_MOVE', self.cmd_FORCE_MOVE,
desc=self.cmd_FORCE_MOVE_help)
gcode.register_command('SET_KINEMATIC_POSITION',
self.cmd_SET_KINEMATIC_POSITION,
desc=self.cmd_SET_KINEMATIC_POSITION_help)
def register_stepper(self, config, mcu_stepper):
self.steppers[mcu_stepper.get_name()] = mcu_stepper
def lookup_stepper(self, name):
@@ -86,7 +86,8 @@ class ForceMove:
0., 0., 0., axis_r, 0., 0., 0., cruise_v, accel)
print_time = print_time + accel_t + cruise_t + accel_t
stepper.generate_steps(print_time)
self.trapq_finalize_moves(self.trapq, print_time + 99999.9)
self.trapq_finalize_moves(self.trapq, print_time + 99999.9,
print_time + 99999.9)
stepper.set_trapq(prev_trapq)
stepper.set_stepper_kinematics(prev_sk)
toolhead.note_kinematic_activity(print_time)
@@ -131,7 +132,6 @@ class ForceMove:
z = gcmd.get_float('Z', curpos[2])
logging.info("SET_KINEMATIC_POSITION pos=%.3f,%.3f,%.3f", x, y, z)
toolhead.set_position([x, y, z, curpos[3]], homing_axes=(0, 1, 2))
def load_config(config):
return ForceMove(config)

Binary file not shown.

View File

@@ -10,9 +10,22 @@ import math
# Coordinates created by this are converted into G1 commands.
#
# note: only IJ version available
# supports XY, XZ & YZ planes with remaining axis as helical
# Enum
ARC_PLANE_X_Y = 0
ARC_PLANE_X_Z = 1
ARC_PLANE_Y_Z = 2
# Enum
X_AXIS = 0
Y_AXIS = 1
Z_AXIS = 2
E_AXIS = 3
class ArcSupport:
def __init__(self, config):
self.printer = config.get_printer()
self.mm_per_arc_segment = config.getfloat('resolution', 1., above=0.0)
@@ -22,12 +35,30 @@ class ArcSupport:
self.gcode.register_command("G2", self.cmd_G2)
self.gcode.register_command("G3", self.cmd_G3)
self.gcode.register_command("G17", self.cmd_G17)
self.gcode.register_command("G18", self.cmd_G18)
self.gcode.register_command("G19", self.cmd_G19)
self.Coord = self.gcode.Coord
# backwards compatibility, prior implementation only supported XY
self.plane = ARC_PLANE_X_Y
def cmd_G2(self, gcmd):
self._cmd_inner(gcmd, True)
def cmd_G3(self, gcmd):
self._cmd_inner(gcmd, False)
def cmd_G17(self, gcmd):
self.plane = ARC_PLANE_X_Y
def cmd_G18(self, gcmd):
self.plane = ARC_PLANE_X_Z
def cmd_G19(self, gcmd):
self.plane = ARC_PLANE_Y_Z
def _cmd_inner(self, gcmd, clockwise):
gcodestatus = self.gcode_move.get_status()
if not gcodestatus['absolute_coordinates']:
@@ -35,21 +66,33 @@ class ArcSupport:
currentPos = gcodestatus['gcode_position']
# Parse parameters
asX = gcmd.get_float("X", currentPos[0])
asY = gcmd.get_float("Y", currentPos[1])
asZ = gcmd.get_float("Z", currentPos[2])
asTarget = self.Coord(x=gcmd.get_float("X", currentPos[0]),
y=gcmd.get_float("Y", currentPos[1]),
z=gcmd.get_float("Z", currentPos[2]),
e=None)
if gcmd.get_float("R", None) is not None:
raise gcmd.error("G2/G3 does not support R moves")
asI = gcmd.get_float("I", 0.)
asJ = gcmd.get_float("J", 0.)
if not asI and not asJ:
raise gcmd.error("G2/G3 neither I nor J given")
# determine the plane coordinates and the helical axis
asPlanar = [ gcmd.get_float(a, 0.) for i,a in enumerate('IJ') ]
axes = (X_AXIS, Y_AXIS, Z_AXIS)
if self.plane == ARC_PLANE_X_Z:
asPlanar = [ gcmd.get_float(a, 0.) for i,a in enumerate('IK') ]
axes = (X_AXIS, Z_AXIS, Y_AXIS)
elif self.plane == ARC_PLANE_Y_Z:
asPlanar = [ gcmd.get_float(a, 0.) for i,a in enumerate('JK') ]
axes = (Y_AXIS, Z_AXIS, X_AXIS)
if not (asPlanar[0] or asPlanar[1]):
raise gcmd.error("G2/G3 requires IJ, IK or JK parameters")
asE = gcmd.get_float("E", None)
asF = gcmd.get_float("F", None)
# Build list of linear coordinates to move to
coords = self.planArc(currentPos, [asX, asY, asZ], [asI, asJ],
clockwise)
# Build list of linear coordinates to move
coords = self.planArc(currentPos, asTarget, asPlanar,
clockwise, *axes)
e_per_move = e_base = 0.
if asE is not None:
if gcodestatus['absolute_extrude']:
@@ -74,37 +117,37 @@ class ArcSupport:
# The arc is approximated by generating many small linear segments.
# The length of each segment is configured in MM_PER_ARC_SEGMENT
# Arcs smaller then this value, will be a Line only
def planArc(self, currentPos, targetPos, offset, clockwise):
#
# alpha and beta axes are the current plane, helical axis is linear travel
def planArc(self, currentPos, targetPos, offset, clockwise,
alpha_axis, beta_axis, helical_axis):
# todo: sometimes produces full circles
X_AXIS = 0
Y_AXIS = 1
Z_AXIS = 2
# Radius vector from center to current location
r_P = -offset[0]
r_Q = -offset[1]
# Determine angular travel
center_P = currentPos[X_AXIS] - r_P
center_Q = currentPos[Y_AXIS] - r_Q
rt_X = targetPos[X_AXIS] - center_P
rt_Y = targetPos[Y_AXIS] - center_Q
angular_travel = math.atan2(r_P * rt_Y - r_Q * rt_X,
r_P * rt_X + r_Q * rt_Y)
center_P = currentPos[alpha_axis] - r_P
center_Q = currentPos[beta_axis] - r_Q
rt_Alpha = targetPos[alpha_axis] - center_P
rt_Beta = targetPos[beta_axis] - center_Q
angular_travel = math.atan2(r_P * rt_Beta - r_Q * rt_Alpha,
r_P * rt_Alpha + r_Q * rt_Beta)
if angular_travel < 0.:
angular_travel += 2. * math.pi
if clockwise:
angular_travel -= 2. * math.pi
if (angular_travel == 0.
and currentPos[X_AXIS] == targetPos[X_AXIS]
and currentPos[Y_AXIS] == targetPos[Y_AXIS]):
and currentPos[alpha_axis] == targetPos[alpha_axis]
and currentPos[beta_axis] == targetPos[beta_axis]):
# Make a circle if the angular rotation is 0 and the
# target is current position
angular_travel = 2. * math.pi
# Determine number of segments
linear_travel = targetPos[Z_AXIS] - currentPos[Z_AXIS]
linear_travel = targetPos[helical_axis] - currentPos[helical_axis]
radius = math.hypot(r_P, r_Q)
flat_mm = radius * angular_travel
if linear_travel:
@@ -118,14 +161,18 @@ class ArcSupport:
linear_per_segment = linear_travel / segments
coords = []
for i in range(1, int(segments)):
dist_Z = i * linear_per_segment
dist_Helical = i * linear_per_segment
cos_Ti = math.cos(i * theta_per_segment)
sin_Ti = math.sin(i * theta_per_segment)
r_P = -offset[0] * cos_Ti + offset[1] * sin_Ti
r_Q = -offset[0] * sin_Ti - offset[1] * cos_Ti
c = [center_P + r_P, center_Q + r_Q, currentPos[Z_AXIS] + dist_Z]
coords.append(c)
# Coord doesn't support index assignment, create list
c = [None, None, None, None]
c[alpha_axis] = center_P + r_P
c[beta_axis] = center_Q + r_Q
c[helical_axis] = currentPos[helical_axis] + dist_Helical
coords.append(self.Coord(*c))
coords.append(targetPos)
return coords

Binary file not shown.

View File

@@ -3,7 +3,7 @@
# Copyright (C) 2018-2021 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import traceback, logging, ast, copy
import traceback, logging, ast, copy, json
import jinja2
@@ -144,12 +144,13 @@ class GCodeMacro:
prefix = 'variable_'
for option in config.get_prefix_options(prefix):
try:
self.variables[option[len(prefix):]] = ast.literal_eval(
config.get(option))
except ValueError as e:
literal = ast.literal_eval(config.get(option))
json.dumps(literal, separators=(',', ':'))
self.variables[option[len(prefix):]] = literal
except (SyntaxError, TypeError, ValueError) as e:
raise config.error(
"Option '%s' in section '%s' is not a valid literal" % (
option, config.get_name()))
"Option '%s' in section '%s' is not a valid literal: %s" % (
option, config.get_name(), e))
def handle_connect(self):
prev_cmd = self.gcode.register_command(self.alias, None)
if prev_cmd is None:
@@ -169,8 +170,10 @@ class GCodeMacro:
raise gcmd.error("Unknown gcode_macro variable '%s'" % (variable,))
try:
literal = ast.literal_eval(value)
except ValueError as e:
raise gcmd.error("Unable to parse '%s' as a literal" % (value,))
json.dumps(literal, separators=(',', ':'))
except (SyntaxError, TypeError, ValueError) as e:
raise gcmd.error("Unable to parse '%s' as a literal: %s" %
(value, e))
v = dict(self.variables)
v[variable] = literal
self.variables = v

Binary file not shown.

View File

@@ -0,0 +1,15 @@
class GCodeMacroBreaker:
def __init__(self, config):
# Gcode macro interupt
self.printer = config.get_printer()
webhooks = self.printer.lookup_object('webhooks')
webhooks.register_endpoint("breakmacro", self._handle_breakmacro)
webhooks.register_endpoint("resumemacro", self._handle_resumemacro)
self.gcode = self.printer.lookup_object('gcode')
def _handle_breakmacro(self, web_request):
self.gcode.break_flag = True
def _handle_resumemacro(self, web_request):
self.gcode.break_flag = False
def load_config(config):
return GCodeMacroBreaker(config)

View File

@@ -49,6 +49,9 @@ class GCodeMove:
self.saved_states = {}
self.move_transform = self.move_with_transform = None
self.position_with_transform = (lambda: [0., 0., 0., 0.])
# Save and load z offset
gcode.register_command('SAVE_ZOFFSET_TO_VARIABLE', self.cmd_SAVE_ZOFFSET_TO_VARIABLE)
gcode.register_command('LOAD_ZOFFSET_FROM_VARIABLE', self.cmd_LOAD_ZOFFSET_FROM_VARIABLE)
def _handle_ready(self):
self.is_printer_ready = True
if self.move_transform is None:
@@ -271,6 +274,16 @@ class GCodeMove:
"gcode homing: %s"
% (mcu_pos, stepper_pos, kin_pos, toolhead_pos,
gcode_pos, base_pos, homing_pos))
def cmd_SAVE_ZOFFSET_TO_VARIABLE(self, gcmd):
variables = self.printer.lookup_object("save_variables")
gcode_move = self.printer.lookup_object("gcode_move")
variables.save_variable('Variables', 'z_offset', gcode_move.homing_position[2])
def cmd_LOAD_ZOFFSET_FROM_VARIABLE(self, gcmd):
variables = self.printer.lookup_object("save_variables")
gcode_move = self.printer.lookup_object("gcode_move")
gcode_move.homing_position[2] = float(variables.load_variable('Variables', 'z_offset'))
def load_config(config):
return GCodeMove(config)

Binary file not shown.

View File

@@ -0,0 +1,87 @@
# Run a shell command via gcode
#
# Copyright (C) 2019 Eric Callahan <arksine.code@gmail.com>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import os
import shlex
import subprocess
import logging
class ShellCommand:
def __init__(self, config):
self.name = config.get_name().split()[-1]
self.printer = config.get_printer()
self.gcode = self.printer.lookup_object('gcode')
cmd = config.get('command')
cmd = os.path.expanduser(cmd)
self.command = shlex.split(cmd)
self.timeout = config.getfloat('timeout', 2., above=0.)
self.verbose = config.getboolean('verbose', True)
self.proc_fd = None
self.partial_output = ""
self.gcode.register_mux_command(
"RUN_SHELL_COMMAND", "CMD", self.name,
self.cmd_RUN_SHELL_COMMAND,
desc=self.cmd_RUN_SHELL_COMMAND_help)
def _process_output(self, eventime):
if self.proc_fd is None:
return
try:
data = os.read(self.proc_fd, 4096)
except Exception:
pass
data = self.partial_output + data.decode()
if '\n' not in data:
self.partial_output = data
return
elif data[-1] != '\n':
split = data.rfind('\n') + 1
self.partial_output = data[split:]
data = data[:split]
else:
self.partial_output = ""
self.gcode.respond_info(data)
cmd_RUN_SHELL_COMMAND_help = "Run a linux shell command"
def cmd_RUN_SHELL_COMMAND(self, params):
gcode_params = params.get('PARAMS','')
gcode_params = shlex.split(gcode_params)
reactor = self.printer.get_reactor()
try:
proc = subprocess.Popen(
self.command + gcode_params, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except Exception:
logging.exception(
"shell_command: Command {%s} failed" % (self.name))
raise self.gcode.error("Error running command {%s}" % (self.name))
if self.verbose:
self.proc_fd = proc.stdout.fileno()
self.gcode.respond_info("Running Command {%s}...:" % (self.name))
hdl = reactor.register_fd(self.proc_fd, self._process_output)
eventtime = reactor.monotonic()
endtime = eventtime + self.timeout
complete = False
while eventtime < endtime:
eventtime = reactor.pause(eventtime + .05)
if proc.poll() is not None:
complete = True
break
if not complete:
proc.terminate()
if self.verbose:
if self.partial_output:
self.gcode.respond_info(self.partial_output)
self.partial_output = ""
if complete:
msg = "Command {%s} finished\n" % (self.name)
else:
msg = "Command {%s} timed out" % (self.name)
self.gcode.respond_info(msg)
reactor.unregister_fd(hdl)
self.proc_fd = None
def load_config_prefix(config):
return ShellCommand(config)

View File

@@ -124,6 +124,7 @@ class HallFilamentWidthSensor:
# Update filament array for lastFilamentWidthReading
self.update_filament_array(last_epos)
# Check runout
# self.gcode.respond_info("Check diameter: {}".format(self.diameter))
self.runout_helper.note_filament_present(
self.diameter > self.runout_dia)
# Does filament exists
@@ -134,23 +135,23 @@ class HallFilamentWidthSensor:
if pending_position <= last_epos:
# Get first item in filament_array queue
item = self.filament_array.pop(0)
self.filament_width = item[1]
else:
if ((self.use_current_dia_while_delay)
and (self.firstExtruderUpdatePosition
== pending_position)):
self.filament_width = self.diameter
elif self.firstExtruderUpdatePosition == pending_position:
self.filament_width = self.nominal_filament_dia
if ((self.filament_width <= self.max_diameter)
and (self.filament_width >= self.min_diameter)):
percentage = round(self.nominal_filament_dia**2
/ self.filament_width**2 * 100)
self.gcode.run_script("M221 S" + str(percentage))
else:
self.gcode.run_script("M221 S100")
# self.filament_width = item[1]
# else:
# if ((self.use_current_dia_while_delay)
# and (self.firstExtruderUpdatePosition
# == pending_position)):
# self.filament_width = self.diameter
# elif self.firstExtruderUpdatePosition == pending_position:
# self.filament_width = self.nominal_filament_dia
# if ((self.filament_width <= self.max_diameter)
# and (self.filament_width >= self.min_diameter)):
# percentage = round(self.nominal_filament_dia**2
# / self.filament_width**2 * 100)
# self.gcode.run_script("M221 S" + str(percentage))
# else:
# self.gcode.run_script("M221 S100")
else:
self.gcode.run_script("M221 S100")
# self.gcode.run_script("M221 S100")
self.filament_array = []
if self.is_active:
@@ -171,7 +172,7 @@ class HallFilamentWidthSensor:
self.filament_array = []
gcmd.respond_info("Filament width measurements cleared!")
# Set extrude multiplier to 100%
self.gcode.run_script_from_command("M221 S100")
# self.gcode.run_script_from_command("M221 S100")
def cmd_M405(self, gcmd):
response = "Filament width sensor Turned On"
@@ -196,7 +197,7 @@ class HallFilamentWidthSensor:
# Clear filament array
self.filament_array = []
# Set extrude multiplier to 100%
self.gcode.run_script_from_command("M221 S100")
# self.gcode.run_script_from_command("M221 S100")
gcmd.respond_info(response)
def cmd_Get_Raw_Values(self, gcmd):

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -230,6 +230,7 @@ class PrinterHeaters:
self.gcode_id_to_sensor = {}
self.available_heaters = []
self.available_sensors = []
self.available_monitors = []
self.has_started = self.have_load_sensors = False
self.printer.register_event_handler("klippy:ready", self._handle_ready)
self.printer.register_event_handler("gcode:request_restart",
@@ -241,6 +242,16 @@ class PrinterHeaters:
gcode.register_command("M105", self.cmd_M105, when_not_ready=True)
gcode.register_command("TEMPERATURE_WAIT", self.cmd_TEMPERATURE_WAIT,
desc=self.cmd_TEMPERATURE_WAIT_help)
# Wait heater interupt
webhooks = self.printer.lookup_object('webhooks')
webhooks.register_endpoint("breakheater", self._handle_breakheater)
self.break_flag=False
def _handle_breakheater(self,web_request):
reactor = self.printer.get_reactor()
for heater in self.heaters.values():
eventtime = reactor.monotonic()
if heater.check_busy(eventtime):
self.break_flag = True
def load_config(self, config):
self.have_load_sensors = True
# Load default temperature sensors
@@ -293,9 +304,12 @@ class PrinterHeaters:
raise self.printer.config_error(
"G-Code sensor id %s already registered" % (gcode_id,))
self.gcode_id_to_sensor[gcode_id] = psensor
def register_monitor(self, config):
self.available_monitors.append(config.get_name())
def get_status(self, eventtime):
return {'available_heaters': self.available_heaters,
'available_sensors': self.available_sensors}
'available_sensors': self.available_sensors,
'available_monitors': self.available_monitors}
def turn_off_all_heaters(self, print_time=0.):
for heater in self.heaters.values():
heater.set_temp(0.)
@@ -330,7 +344,11 @@ class PrinterHeaters:
gcode = self.printer.lookup_object("gcode")
reactor = self.printer.get_reactor()
eventtime = reactor.monotonic()
self.break_flag = False
while not self.printer.is_shutdown() and heater.check_busy(eventtime):
if self.break_flag:
self.break_flag = False
break
print_time = toolhead.get_last_move_time()
gcode.respond_raw(self._get_temp(eventtime))
eventtime = reactor.pause(eventtime + 1.)
@@ -359,7 +377,7 @@ class PrinterHeaters:
toolhead = self.printer.lookup_object("toolhead")
reactor = self.printer.get_reactor()
eventtime = reactor.monotonic()
while not self.printer.is_shutdown():
while not self.printer.is_shutdown() and not self.break_flag:
temp, target = sensor.get_temp(eventtime)
if temp >= min_temp and temp <= max_temp:
return

Binary file not shown.

View File

@@ -250,8 +250,8 @@ class PrinterHoming:
"Probing failed due to printer shutdown")
raise
if hmove.check_no_movement() is not None:
raise self.printer.command_error(
"Probe triggered prior to movement")
gcode = self.printer.lookup_object('gcode')
gcode.respond_info('Probe triggered prior to movement')
return epos
def cmd_G28(self, gcmd):
# Move to origin

Binary file not shown.

Binary file not shown.

View File

@@ -126,7 +126,7 @@ class HTU21D:
rdevId |= response[1]
checksum = response[2]
if self._chekCRC8(rdevId) != checksum:
logging.warn("htu21d: Reading deviceId !Checksum error!")
logging.warning("htu21d: Reading deviceId !Checksum error!")
rdevId = rdevId >> 8
deviceId_list = list(
filter(
@@ -135,10 +135,10 @@ class HTU21D:
if len(deviceId_list) != 0:
logging.info("htu21d: Found Device Type %s" % deviceId_list[0])
else:
logging.warn("htu21d: Unknown Device ID %#x " % rdevId)
logging.warning("htu21d: Unknown Device ID %#x " % rdevId)
if(self.deviceId != deviceId_list[0]):
logging.warn(
if self.deviceId != deviceId_list[0]:
logging.warning(
"htu21d: Found device %s. Forcing to type %s as config.",
deviceId_list[0],self.deviceId)
@@ -169,7 +169,9 @@ class HTU21D:
rtemp = response[0] << 8
rtemp |= response[1]
if self._chekCRC8(rtemp) != response[2]:
logging.warn("htu21d: Checksum error on Temperature reading!")
logging.warning(
"htu21d: Checksum error on Temperature reading!"
)
else:
self.temp = (0.002681 * float(rtemp) - 46.85)
logging.debug("htu21d: Temperature %.2f " % self.temp)
@@ -190,7 +192,7 @@ class HTU21D:
rhumid = response[0] << 8
rhumid|= response[1]
if self._chekCRC8(rhumid) != response[2]:
logging.warn("htu21d: Checksum error on Humidity reading!")
logging.warning("htu21d: Checksum error on Humidity reading!")
else:
#clear status bits,
# humidity always returns xxxxxx10 in the LSB field

Binary file not shown.

Binary file not shown.

View File

@@ -61,7 +61,6 @@ class AxisInputShaper:
self.params.update(gcmd)
old_n, old_A, old_T = self.n, self.A, self.T
self.n, self.A, self.T = self.params.get_shaper()
return (old_n, old_A, old_T) != (self.n, self.A, self.T)
def set_shaper_kinematics(self, sk):
ffi_main, ffi_lib = chelper.get_ffi()
success = ffi_lib.input_shaper_set_shaper_params(
@@ -71,10 +70,6 @@ class AxisInputShaper:
ffi_lib.input_shaper_set_shaper_params(
sk, self.axis.encode(), self.n, self.A, self.T)
return success
def get_step_generation_window(self):
ffi_main, ffi_lib = chelper.get_ffi()
return ffi_lib.input_shaper_get_step_generation_window(self.n,
self.A, self.T)
def disable_shaping(self):
if self.saved is None and self.n:
self.saved = (self.n, self.A, self.T)
@@ -98,7 +93,7 @@ class InputShaper:
self.toolhead = None
self.shapers = [AxisInputShaper('x', config),
AxisInputShaper('y', config)]
self.stepper_kinematics = []
self.input_shaper_stepper_kinematics = []
self.orig_stepper_kinematics = []
# Register gcode commands
gcode = self.printer.lookup_object('gcode')
@@ -109,38 +104,51 @@ class InputShaper:
return self.shapers
def connect(self):
self.toolhead = self.printer.lookup_object("toolhead")
kin = self.toolhead.get_kinematics()
# Lookup stepper kinematics
ffi_main, ffi_lib = chelper.get_ffi()
steppers = kin.get_steppers()
for s in steppers:
sk = ffi_main.gc(ffi_lib.input_shaper_alloc(), ffi_lib.free)
orig_sk = s.set_stepper_kinematics(sk)
res = ffi_lib.input_shaper_set_sk(sk, orig_sk)
if res < 0:
s.set_stepper_kinematics(orig_sk)
continue
self.stepper_kinematics.append(sk)
self.orig_stepper_kinematics.append(orig_sk)
# Configure initial values
self.old_delay = 0.
self._update_input_shaping(error=self.printer.config_error)
def _get_input_shaper_stepper_kinematics(self, stepper):
# Lookup stepper kinematics
sk = stepper.get_stepper_kinematics()
if sk in self.orig_stepper_kinematics:
# Already processed this stepper kinematics unsuccessfully
return None
if sk in self.input_shaper_stepper_kinematics:
return sk
self.orig_stepper_kinematics.append(sk)
ffi_main, ffi_lib = chelper.get_ffi()
is_sk = ffi_main.gc(ffi_lib.input_shaper_alloc(), ffi_lib.free)
stepper.set_stepper_kinematics(is_sk)
res = ffi_lib.input_shaper_set_sk(is_sk, sk)
if res < 0:
stepper.set_stepper_kinematics(sk)
return None
self.input_shaper_stepper_kinematics.append(is_sk)
return is_sk
def _update_input_shaping(self, error=None):
self.toolhead.flush_step_generation()
new_delay = max([s.get_step_generation_window() for s in self.shapers])
self.toolhead.note_step_generation_scan_time(new_delay,
old_delay=self.old_delay)
failed = []
for sk in self.stepper_kinematics:
ffi_main, ffi_lib = chelper.get_ffi()
kin = self.toolhead.get_kinematics()
failed_shapers = []
for s in kin.get_steppers():
if s.get_trapq() is None:
continue
is_sk = self._get_input_shaper_stepper_kinematics(s)
if is_sk is None:
continue
old_delay = ffi_lib.input_shaper_get_step_generation_window(is_sk)
for shaper in self.shapers:
if shaper in failed:
if shaper in failed_shapers:
continue
if not shaper.set_shaper_kinematics(sk):
failed.append(shaper)
if failed:
if not shaper.set_shaper_kinematics(is_sk):
failed_shapers.append(shaper)
new_delay = ffi_lib.input_shaper_get_step_generation_window(is_sk)
if old_delay != new_delay:
self.toolhead.note_step_generation_scan_time(new_delay,
old_delay)
if failed_shapers:
error = error or self.printer.command_error
raise error("Failed to configure shaper(s) %s with given parameters"
% (', '.join([s.get_name() for s in failed])))
% (', '.join([s.get_name() for s in failed_shapers])))
def disable_shaping(self):
for shaper in self.shapers:
shaper.disable_shaping()
@@ -151,10 +159,9 @@ class InputShaper:
self._update_input_shaping()
cmd_SET_INPUT_SHAPER_help = "Set cartesian parameters for input shaper"
def cmd_SET_INPUT_SHAPER(self, gcmd):
updated = False
for shaper in self.shapers:
updated |= shaper.update(gcmd)
if updated:
if gcmd.get_command_parameters():
for shaper in self.shapers:
shaper.update(gcmd)
self._update_input_shaping()
for shaper in self.shapers:
shaper.report(gcmd)

Binary file not shown.

Binary file not shown.

201
klippy/extras/lis2dw.py Normal file
View File

@@ -0,0 +1,201 @@
# Support for reading acceleration data from an LIS2DW chip
#
# Copyright (C) 2023 Zhou.XianMing <zhouxm@biqu3d.com>
# Copyright (C) 2020-2021 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import logging
from . import bus, adxl345, bulk_sensor
# LIS2DW registers
REG_LIS2DW_WHO_AM_I_ADDR = 0x0F
REG_LIS2DW_CTRL_REG1_ADDR = 0x20
REG_LIS2DW_CTRL_REG2_ADDR = 0x21
REG_LIS2DW_CTRL_REG3_ADDR = 0x22
REG_LIS2DW_CTRL_REG6_ADDR = 0x25
REG_LIS2DW_STATUS_REG_ADDR = 0x27
REG_LIS2DW_OUT_XL_ADDR = 0x28
REG_LIS2DW_OUT_XH_ADDR = 0x29
REG_LIS2DW_OUT_YL_ADDR = 0x2A
REG_LIS2DW_OUT_YH_ADDR = 0x2B
REG_LIS2DW_OUT_ZL_ADDR = 0x2C
REG_LIS2DW_OUT_ZH_ADDR = 0x2D
REG_LIS2DW_FIFO_CTRL = 0x2E
REG_LIS2DW_FIFO_SAMPLES = 0x2F
REG_MOD_READ = 0x80
# REG_MOD_MULTI = 0x40
LIS2DW_DEV_ID = 0x44
FREEFALL_ACCEL = 9.80665
SCALE = FREEFALL_ACCEL * 1.952 / 4
MIN_MSG_TIME = 0.100
BYTES_PER_SAMPLE = 6
SAMPLES_PER_BLOCK = 8
BATCH_UPDATES = 0.100
# Printer class that controls LIS2DW chip
class LIS2DW:
def __init__(self, config):
self.printer = config.get_printer()
adxl345.AccelCommandHelper(config, self)
self.axes_map = adxl345.read_axes_map(config)
self.data_rate = 1600
# Setup mcu sensor_lis2dw bulk query code
self.spi = bus.MCU_SPI_from_config(config, 3, default_speed=5000000)
self.mcu = mcu = self.spi.get_mcu()
self.oid = oid = mcu.create_oid()
self.query_lis2dw_cmd = self.query_lis2dw_end_cmd = None
self.query_lis2dw_status_cmd = None
mcu.add_config_cmd("config_lis2dw oid=%d spi_oid=%d"
% (oid, self.spi.get_oid()))
mcu.add_config_cmd("query_lis2dw oid=%d clock=0 rest_ticks=0"
% (oid,), on_restart=True)
mcu.register_config_callback(self._build_config)
self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "lis2dw_data", oid)
# Clock tracking
chip_smooth = self.data_rate * BATCH_UPDATES * 2
self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth)
self.clock_updater = bulk_sensor.ChipClockUpdater(self.clock_sync,
BYTES_PER_SAMPLE)
self.last_error_count = 0
# Process messages in batches
self.batch_bulk = bulk_sensor.BatchBulkHelper(
self.printer, self._process_batch,
self._start_measurements, self._finish_measurements, BATCH_UPDATES)
self.name = config.get_name().split()[-1]
hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration')
self.batch_bulk.add_mux_endpoint("lis2dw/dump_lis2dw", "sensor",
self.name, {'header': hdr})
def _build_config(self):
cmdqueue = self.spi.get_command_queue()
self.query_lis2dw_cmd = self.mcu.lookup_command(
"query_lis2dw oid=%c clock=%u rest_ticks=%u", cq=cmdqueue)
self.query_lis2dw_end_cmd = self.mcu.lookup_query_command(
"query_lis2dw oid=%c clock=%u rest_ticks=%u",
"lis2dw_status oid=%c clock=%u query_ticks=%u next_sequence=%hu"
" buffered=%c fifo=%c limit_count=%hu", oid=self.oid, cq=cmdqueue)
self.query_lis2dw_status_cmd = self.mcu.lookup_query_command(
"query_lis2dw_status oid=%c",
"lis2dw_status oid=%c clock=%u query_ticks=%u next_sequence=%hu"
" buffered=%c fifo=%c limit_count=%hu", oid=self.oid, cq=cmdqueue)
def read_reg(self, reg):
params = self.spi.spi_transfer([reg | REG_MOD_READ, 0x00])
response = bytearray(params['response'])
return response[1]
def set_reg(self, reg, val, minclock=0):
self.spi.spi_send([reg, val & 0xFF], minclock=minclock)
stored_val = self.read_reg(reg)
if stored_val != val:
raise self.printer.command_error(
"Failed to set LIS2DW register [0x%x] to 0x%x: got 0x%x. "
"This is generally indicative of connection problems "
"(e.g. faulty wiring) or a faulty lis2dw chip." % (
reg, val, stored_val))
def start_internal_client(self):
aqh = adxl345.AccelQueryHelper(self.printer)
self.batch_bulk.add_client(aqh.handle_batch)
return aqh
# Measurement decoding
def _extract_samples(self, raw_samples):
# Load variables to optimize inner loop below
(x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map
last_sequence = self.clock_updater.get_last_sequence()
time_base, chip_base, inv_freq = self.clock_sync.get_time_translation()
# Process every message in raw_samples
count = seq = 0
samples = [None] * (len(raw_samples) * SAMPLES_PER_BLOCK)
for params in raw_samples:
seq_diff = (params['sequence'] - last_sequence) & 0xffff
seq_diff -= (seq_diff & 0x8000) << 1
seq = last_sequence + seq_diff
d = bytearray(params['data'])
msg_cdiff = seq * SAMPLES_PER_BLOCK - chip_base
for i in range(len(d) // BYTES_PER_SAMPLE):
d_xyz = d[i*BYTES_PER_SAMPLE:(i+1)*BYTES_PER_SAMPLE]
xlow, xhigh, ylow, yhigh, zlow, zhigh = d_xyz
# Merge and perform twos-complement
rx = (((xhigh << 8) | xlow)) - ((xhigh & 0x80) << 9)
ry = (((yhigh << 8) | ylow)) - ((yhigh & 0x80) << 9)
rz = (((zhigh << 8) | zlow)) - ((zhigh & 0x80) << 9)
raw_xyz = (rx, ry, rz)
x = round(raw_xyz[x_pos] * x_scale, 6)
y = round(raw_xyz[y_pos] * y_scale, 6)
z = round(raw_xyz[z_pos] * z_scale, 6)
ptime = round(time_base + (msg_cdiff + i) * inv_freq, 6)
samples[count] = (ptime, x, y, z)
count += 1
self.clock_sync.set_last_chip_clock(seq * SAMPLES_PER_BLOCK + i)
del samples[count:]
return samples
def _update_clock(self, minclock=0):
params = self.query_lis2dw_status_cmd.send([self.oid],
minclock=minclock)
self.clock_updater.update_clock(params)
# Start, stop, and process message batches
def _start_measurements(self):
# In case of miswiring, testing LIS2DW device ID prevents treating
# noise or wrong signal as a correctly initialized device
dev_id = self.read_reg(REG_LIS2DW_WHO_AM_I_ADDR)
logging.info("lis2dw_dev_id: %x", dev_id)
if dev_id != LIS2DW_DEV_ID:
raise self.printer.command_error(
"Invalid lis2dw id (got %x vs %x).\n"
"This is generally indicative of connection problems\n"
"(e.g. faulty wiring) or a faulty lis2dw chip."
% (dev_id, LIS2DW_DEV_ID))
# Setup chip in requested query rate
# ODR/2, +-16g, low-pass filter, Low-noise abled
self.set_reg(REG_LIS2DW_CTRL_REG6_ADDR, 0x34)
# Continuous mode: If the FIFO is full
# the new sample overwrites the older sample.
self.set_reg(REG_LIS2DW_FIFO_CTRL, 0xC0)
# High-Performance / Low-Power mode 1600/200 Hz
# High-Performance Mode (14-bit resolution)
self.set_reg(REG_LIS2DW_CTRL_REG1_ADDR, 0x94)
# Start bulk reading
self.bulk_queue.clear_samples()
systime = self.printer.get_reactor().monotonic()
print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME
reqclock = self.mcu.print_time_to_clock(print_time)
rest_ticks = self.mcu.seconds_to_clock(4. / self.data_rate)
self.query_lis2dw_cmd.send([self.oid, reqclock, rest_ticks],
reqclock=reqclock)
logging.info("LIS2DW starting '%s' measurements", self.name)
# Initialize clock tracking
self.clock_updater.note_start(reqclock)
self._update_clock(minclock=reqclock)
self.clock_updater.clear_duration_filter()
self.last_error_count = 0
def _finish_measurements(self):
# Halt bulk reading
params = self.query_lis2dw_end_cmd.send([self.oid, 0, 0])
self.bulk_queue.clear_samples()
logging.info("LIS2DW finished '%s' measurements", self.name)
self.set_reg(REG_LIS2DW_FIFO_CTRL, 0x00)
def _process_batch(self, eventtime):
self._update_clock()
raw_samples = self.bulk_queue.pull_samples()
if not raw_samples:
return {}
samples = self._extract_samples(raw_samples)
if not samples:
return {}
return {'data': samples, 'errors': self.last_error_count,
'overflows': self.clock_updater.get_last_limit_count()}
def load_config(config):
return LIS2DW(config)
def load_config_prefix(config):
return LIS2DW(config)

Binary file not shown.

View File

@@ -13,9 +13,25 @@ class ManualProbe:
self.gcode_move = self.printer.load_object(config, "gcode_move")
self.gcode.register_command('MANUAL_PROBE', self.cmd_MANUAL_PROBE,
desc=self.cmd_MANUAL_PROBE_help)
# Endstop value for cartesian printers with separate Z axis
zconfig = config.getsection('stepper_z')
self.z_position_endstop = zconfig.getfloat('position_endstop', None,
note_valid=False)
# Endstop values for linear delta printers with vertical A,B,C towers
a_tower_config = config.getsection('stepper_a')
self.a_position_endstop = a_tower_config.getfloat('position_endstop',
None,
note_valid=False)
b_tower_config = config.getsection('stepper_b')
self.b_position_endstop = b_tower_config.getfloat('position_endstop',
None,
note_valid=False)
c_tower_config = config.getsection('stepper_c')
self.c_position_endstop = c_tower_config.getfloat('position_endstop',
None,
note_valid=False)
# Conditionally register appropriate commands depending on printer
# Cartestian printers with separate Z Axis
if self.z_position_endstop is not None:
self.gcode.register_command(
'Z_ENDSTOP_CALIBRATE', self.cmd_Z_ENDSTOP_CALIBRATE,
@@ -24,6 +40,12 @@ class ManualProbe:
'Z_OFFSET_APPLY_ENDSTOP',
self.cmd_Z_OFFSET_APPLY_ENDSTOP,
desc=self.cmd_Z_OFFSET_APPLY_ENDSTOP_help)
# Linear delta printers with A,B,C towers
if 'delta' == config.getsection('printer').get('kinematics'):
self.gcode.register_command(
'Z_OFFSET_APPLY_ENDSTOP',
self.cmd_Z_OFFSET_APPLY_DELTA_ENDSTOPS,
desc=self.cmd_Z_OFFSET_APPLY_ENDSTOP_help)
self.reset_status()
def manual_probe_finalize(self, kin_pos):
if kin_pos is not None:
@@ -66,6 +88,29 @@ class ManualProbe:
"with the above and restart the printer." % (new_calibrate))
configfile.set('stepper_z', 'position_endstop',
"%.3f" % (new_calibrate,))
def cmd_Z_OFFSET_APPLY_DELTA_ENDSTOPS(self,gcmd):
offset = self.gcode_move.get_status()['homing_origin'].z
configfile = self.printer.lookup_object('configfile')
if offset == 0:
self.gcode.respond_info("Nothing to do: Z Offset is 0")
else:
new_a_calibrate = self.a_position_endstop - offset
new_b_calibrate = self.b_position_endstop - offset
new_c_calibrate = self.c_position_endstop - offset
self.gcode.respond_info(
"stepper_a: position_endstop: %.3f\n"
"stepper_b: position_endstop: %.3f\n"
"stepper_c: position_endstop: %.3f\n"
"The SAVE_CONFIG command will update the printer config file\n"
"with the above and restart the printer." % (new_a_calibrate,
new_b_calibrate,
new_c_calibrate))
configfile.set('stepper_a', 'position_endstop',
"%.3f" % (new_a_calibrate,))
configfile.set('stepper_b', 'position_endstop',
"%.3f" % (new_b_calibrate,))
configfile.set('stepper_c', 'position_endstop',
"%.3f" % (new_c_calibrate,))
cmd_Z_OFFSET_APPLY_ENDSTOP_help = "Adjust the z endstop_position"
# Verify that a manual probe isn't already in progress

Binary file not shown.

View File

@@ -67,7 +67,8 @@ class ManualStepper:
0., cruise_v, accel)
self.next_cmd_time = self.next_cmd_time + accel_t + cruise_t + accel_t
self.rail.generate_steps(self.next_cmd_time)
self.trapq_finalize_moves(self.trapq, self.next_cmd_time + 99999.9)
self.trapq_finalize_moves(self.trapq, self.next_cmd_time + 99999.9,
self.next_cmd_time + 99999.9)
toolhead = self.printer.lookup_object('toolhead')
toolhead.note_kinematic_activity(self.next_cmd_time)
if sync:

View File

@@ -68,17 +68,30 @@ class SoftwareI2C:
class mcp4018:
def __init__(self, config):
self.printer = config.get_printer()
self.i2c = SoftwareI2C(config, 0x2f)
self.scale = config.getfloat('scale', 1., above=0.)
self.start_value = config.getfloat('wiper',
minval=0., maxval=self.scale)
config.get_printer().register_event_handler("klippy:connect",
self.handle_connect)
# Register commands
self.name = config.get_name().split()[1]
gcode = self.printer.lookup_object('gcode')
gcode.register_mux_command("SET_DIGIPOT", "DIGIPOT", self.name,
self.cmd_SET_DIGIPOT,
desc=self.cmd_SET_DIGIPOT_help)
def handle_connect(self):
self.set_dac(self.start_value)
def set_dac(self, value):
val = int(value * 127. / self.scale + .5)
self.i2c.i2c_write([val])
cmd_SET_DIGIPOT_help = "Set digipot value"
def cmd_SET_DIGIPOT(self, gcmd):
wiper = gcmd.get_float('WIPER', minval=0., maxval=self.scale)
if wiper is not None:
self.set_dac(wiper)
gcmd.respond_info("New value for DIGIPOT = %s, wiper = %.2f"
% (self.name, wiper))
def load_config_prefix(config):
return mcp4018(config)

View File

@@ -5,110 +5,19 @@
# This file may be distributed under the terms of the GNU GPLv3 license.
import logging
import chelper
API_UPDATE_INTERVAL = 0.500
# Helper to periodically transmit data to a set of API clients
class APIDumpHelper:
def __init__(self, printer, data_cb, startstop_cb=None,
update_interval=API_UPDATE_INTERVAL):
self.printer = printer
self.data_cb = data_cb
if startstop_cb is None:
startstop_cb = (lambda is_start: None)
self.startstop_cb = startstop_cb
self.is_started = False
self.update_interval = update_interval
self.update_timer = None
self.clients = {}
def _stop(self):
self.clients.clear()
reactor = self.printer.get_reactor()
reactor.unregister_timer(self.update_timer)
self.update_timer = None
if not self.is_started:
return reactor.NEVER
try:
self.startstop_cb(False)
except self.printer.command_error as e:
logging.exception("API Dump Helper stop callback error")
self.clients.clear()
self.is_started = False
if self.clients:
# New client started while in process of stopping
self._start()
return reactor.NEVER
def _start(self):
if self.is_started:
return
self.is_started = True
try:
self.startstop_cb(True)
except self.printer.command_error as e:
logging.exception("API Dump Helper start callback error")
self.is_started = False
self.clients.clear()
raise
reactor = self.printer.get_reactor()
systime = reactor.monotonic()
waketime = systime + self.update_interval
self.update_timer = reactor.register_timer(self._update, waketime)
def add_client(self, web_request):
cconn = web_request.get_client_connection()
template = web_request.get_dict('response_template', {})
self.clients[cconn] = template
self._start()
def add_internal_client(self):
cconn = InternalDumpClient()
self.clients[cconn] = {}
self._start()
return cconn
def _update(self, eventtime):
try:
msg = self.data_cb(eventtime)
except self.printer.command_error as e:
logging.exception("API Dump Helper data callback error")
return self._stop()
if not msg:
return eventtime + self.update_interval
for cconn, template in list(self.clients.items()):
if cconn.is_closed():
del self.clients[cconn]
if not self.clients:
return self._stop()
continue
tmp = dict(template)
tmp['params'] = msg
cconn.send(tmp)
return eventtime + self.update_interval
# An "internal webhooks" wrapper for using APIDumpHelper internally
class InternalDumpClient:
def __init__(self):
self.msgs = []
self.is_done = False
def get_messages(self):
return self.msgs
def finalize(self):
self.is_done = True
def is_closed(self):
return self.is_done
def send(self, msg):
self.msgs.append(msg)
if len(self.msgs) >= 10000:
# Avoid filling up memory with too many samples
self.finalize()
from . import bulk_sensor
# Extract stepper queue_step messages
class DumpStepper:
def __init__(self, printer, mcu_stepper):
self.printer = printer
self.mcu_stepper = mcu_stepper
self.last_api_clock = 0
self.api_dump = APIDumpHelper(printer, self._api_update)
wh = self.printer.lookup_object('webhooks')
wh.register_mux_endpoint("motion_report/dump_stepper", "name",
mcu_stepper.get_name(), self._add_api_client)
self.last_batch_clock = 0
self.batch_bulk = bulk_sensor.BatchBulkHelper(printer,
self._process_batch)
api_resp = {'header': ('interval', 'count', 'add')}
self.batch_bulk.add_mux_endpoint("motion_report/dump_stepper", "name",
mcu_stepper.get_name(), api_resp)
def get_step_queue(self, start_clock, end_clock):
mcu_stepper = self.mcu_stepper
res = []
@@ -134,15 +43,15 @@ class DumpStepper:
% (i, s.first_clock, s.start_position, s.interval,
s.step_count, s.add))
logging.info('\n'.join(out))
def _api_update(self, eventtime):
data, cdata = self.get_step_queue(self.last_api_clock, 1<<63)
def _process_batch(self, eventtime):
data, cdata = self.get_step_queue(self.last_batch_clock, 1<<63)
if not data:
return {}
clock_to_print_time = self.mcu_stepper.get_mcu().clock_to_print_time
first = data[0]
first_clock = first.first_clock
first_time = clock_to_print_time(first_clock)
self.last_api_clock = last_clock = data[-1].last_clock
self.last_batch_clock = last_clock = data[-1].last_clock
last_time = clock_to_print_time(last_clock)
mcu_pos = first.start_position
start_position = self.mcu_stepper.mcu_to_commanded_position(mcu_pos)
@@ -154,10 +63,6 @@ class DumpStepper:
"start_mcu_position": mcu_pos, "step_distance": step_dist,
"first_clock": first_clock, "first_step_time": first_time,
"last_clock": last_clock, "last_step_time": last_time}
def _add_api_client(self, web_request):
self.api_dump.add_client(web_request)
hdr = ('interval', 'count', 'add')
web_request.send({'header': hdr})
NEVER_TIME = 9999999999999999.
@@ -167,11 +72,13 @@ class DumpTrapQ:
self.printer = printer
self.name = name
self.trapq = trapq
self.last_api_msg = (0., 0.)
self.api_dump = APIDumpHelper(printer, self._api_update)
wh = self.printer.lookup_object('webhooks')
wh.register_mux_endpoint("motion_report/dump_trapq", "name", name,
self._add_api_client)
self.last_batch_msg = (0., 0.)
self.batch_bulk = bulk_sensor.BatchBulkHelper(printer,
self._process_batch)
api_resp = {'header': ('time', 'duration', 'start_velocity',
'acceleration', 'start_position', 'direction')}
self.batch_bulk.add_mux_endpoint("motion_report/dump_trapq",
"name", name, api_resp)
def extract_trapq(self, start_time, end_time):
ffi_main, ffi_lib = chelper.get_ffi()
res = []
@@ -210,23 +117,18 @@ class DumpTrapQ:
move.start_z + move.z_r * dist)
velocity = move.start_v + move.accel * move_time
return pos, velocity
def _api_update(self, eventtime):
qtime = self.last_api_msg[0] + min(self.last_api_msg[1], 0.100)
def _process_batch(self, eventtime):
qtime = self.last_batch_msg[0] + min(self.last_batch_msg[1], 0.100)
data, cdata = self.extract_trapq(qtime, NEVER_TIME)
d = [(m.print_time, m.move_t, m.start_v, m.accel,
(m.start_x, m.start_y, m.start_z), (m.x_r, m.y_r, m.z_r))
for m in data]
if d and d[0] == self.last_api_msg:
if d and d[0] == self.last_batch_msg:
d.pop(0)
if not d:
return {}
self.last_api_msg = d[-1]
self.last_batch_msg = d[-1]
return {"data": d}
def _add_api_client(self, web_request):
self.api_dump.add_client(web_request)
hdr = ('time', 'duration', 'start_velocity', 'acceleration',
'start_position', 'direction')
web_request.send({'header': hdr})
STATUS_REFRESH_TIME = 0.250

Binary file not shown.

View File

@@ -4,13 +4,21 @@
# Copyright (C) 2020-2021 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import logging, time, collections, threading, multiprocessing, os
from . import bus, motion_report, adxl345
import logging
from . import bus, adxl345, bulk_sensor
MPU9250_ADDR = 0x68
MPU9250_DEV_ID = 0x73
MPU6050_DEV_ID = 0x68
MPU_DEV_IDS = {
0x74: "mpu-9515",
0x73: "mpu-9255",
0x71: "mpu-9250",
0x70: "mpu-6500",
0x68: "mpu-6050",
#everything above are normal MPU IDs
0x75: "mpu-unknown (DEFECTIVE! USE WITH CAUTION!)",
0x69: "mpu-unknown (DEFECTIVE! USE WITH CAUTION!)",
}
# MPU9250 registers
REG_DEVID = 0x75
@@ -39,32 +47,22 @@ SCALE = 0.000244140625 * FREEFALL_ACCEL
FIFO_SIZE = 512
Accel_Measurement = collections.namedtuple(
'Accel_Measurement', ('time', 'accel_x', 'accel_y', 'accel_z'))
MIN_MSG_TIME = 0.100
BYTES_PER_SAMPLE = 6
SAMPLES_PER_BLOCK = 8
BATCH_UPDATES = 0.100
# Printer class that controls MPU9250 chip
class MPU9250:
def __init__(self, config):
self.printer = config.get_printer()
adxl345.AccelCommandHelper(config, self)
self.query_rate = 0
am = {'x': (0, SCALE), 'y': (1, SCALE), 'z': (2, SCALE),
'-x': (0, -SCALE), '-y': (1, -SCALE), '-z': (2, -SCALE)}
axes_map = config.getlist('axes_map', ('x','y','z'), count=3)
if any([a not in am for a in axes_map]):
raise config.error("Invalid mpu9250 axes_map parameter")
self.axes_map = [am[a.strip()] for a in axes_map]
self.axes_map = adxl345.read_axes_map(config)
self.data_rate = config.getint('rate', 4000)
if self.data_rate not in SAMPLE_RATE_DIVS:
raise config.error("Invalid rate parameter: %d" % (self.data_rate,))
# Measurement storage (accessed from background thread)
self.lock = threading.Lock()
self.raw_samples = []
# Setup mcu sensor_mpu9250 bulk query code
self.i2c = bus.MCU_I2C_from_config(config,
default_addr=MPU9250_ADDR,
@@ -74,18 +72,21 @@ class MPU9250:
self.query_mpu9250_cmd = self.query_mpu9250_end_cmd = None
self.query_mpu9250_status_cmd = None
mcu.register_config_callback(self._build_config)
mcu.register_response(self._handle_mpu9250_data, "mpu9250_data", oid)
self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "mpu9250_data", oid)
# Clock tracking
self.last_sequence = self.max_query_duration = 0
self.last_limit_count = self.last_error_count = 0
self.clock_sync = adxl345.ClockSyncRegression(self.mcu, 640)
# API server endpoints
self.api_dump = motion_report.APIDumpHelper(
self.printer, self._api_update, self._api_startstop, 0.100)
chip_smooth = self.data_rate * BATCH_UPDATES * 2
self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth)
self.clock_updater = bulk_sensor.ChipClockUpdater(self.clock_sync,
BYTES_PER_SAMPLE)
self.last_error_count = 0
# Process messages in batches
self.batch_bulk = bulk_sensor.BatchBulkHelper(
self.printer, self._process_batch,
self._start_measurements, self._finish_measurements, BATCH_UPDATES)
self.name = config.get_name().split()[-1]
wh = self.printer.lookup_object('webhooks')
wh.register_mux_endpoint("mpu9250/dump_mpu9250", "sensor", self.name,
self._handle_dump_mpu9250)
hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration')
self.batch_bulk.add_mux_endpoint("mpu9250/dump_mpu9250", "sensor",
self.name, {'header': hdr})
def _build_config(self):
cmdqueue = self.i2c.get_command_queue()
self.mcu.add_config_cmd("config_mpu9250 oid=%d i2c_oid=%d"
@@ -105,28 +106,25 @@ class MPU9250:
def read_reg(self, reg):
params = self.i2c.i2c_read([reg], 1)
return bytearray(params['response'])[0]
def set_reg(self, reg, val, minclock=0):
self.i2c.i2c_write([reg, val & 0xFF], minclock=minclock)
# Measurement collection
def is_measuring(self):
return self.query_rate > 0
def _handle_mpu9250_data(self, params):
with self.lock:
self.raw_samples.append(params)
def start_internal_client(self):
aqh = adxl345.AccelQueryHelper(self.printer)
self.batch_bulk.add_client(aqh.handle_batch)
return aqh
# Measurement decoding
def _extract_samples(self, raw_samples):
# Load variables to optimize inner loop below
(x_pos, x_scale), (y_pos, y_scale), (z_pos, z_scale) = self.axes_map
last_sequence = self.last_sequence
last_sequence = self.clock_updater.get_last_sequence()
time_base, chip_base, inv_freq = self.clock_sync.get_time_translation()
# Process every message in raw_samples
count = seq = 0
samples = [None] * (len(raw_samples) * SAMPLES_PER_BLOCK)
for params in raw_samples:
seq_diff = (last_sequence - params['sequence']) & 0xffff
seq_diff = (params['sequence'] - last_sequence) & 0xffff
seq_diff -= (seq_diff & 0x8000) << 1
seq = last_sequence - seq_diff
seq = last_sequence + seq_diff
d = bytearray(params['data'])
msg_cdiff = seq * SAMPLES_PER_BLOCK - chip_base
@@ -150,116 +148,66 @@ class MPU9250:
return samples
def _update_clock(self, minclock=0):
# Query current state
for retry in range(5):
params = self.query_mpu9250_status_cmd.send([self.oid],
minclock=minclock)
fifo = params['fifo'] & 0x1fff
if fifo <= FIFO_SIZE:
break
else:
raise self.printer.command_error("Unable to query mpu9250 fifo")
mcu_clock = self.mcu.clock32_to_clock64(params['clock'])
sequence = (self.last_sequence & ~0xffff) | params['next_sequence']
if sequence < self.last_sequence:
sequence += 0x10000
self.last_sequence = sequence
buffered = params['buffered']
limit_count = (self.last_limit_count & ~0xffff) | params['limit_count']
if limit_count < self.last_limit_count:
limit_count += 0x10000
self.last_limit_count = limit_count
duration = params['query_ticks']
if duration > self.max_query_duration:
# Skip measurement as a high query time could skew clock tracking
self.max_query_duration = max(2 * self.max_query_duration,
self.mcu.seconds_to_clock(.000005))
return
self.max_query_duration = 2 * duration
msg_count = (sequence * SAMPLES_PER_BLOCK
+ buffered // BYTES_PER_SAMPLE + fifo)
# The "chip clock" is the message counter plus .5 for average
# inaccuracy of query responses and plus .5 for assumed offset
# of mpu9250 hw processing time.
chip_clock = msg_count + 1
self.clock_sync.update(mcu_clock + duration // 2, chip_clock)
params = self.query_mpu9250_status_cmd.send([self.oid],
minclock=minclock)
self.clock_updater.update_clock(params)
# Start, stop, and process message batches
def _start_measurements(self):
if self.is_measuring():
return
# In case of miswiring, testing MPU9250 device ID prevents treating
# noise or wrong signal as a correctly initialized device
dev_id = self.read_reg(REG_DEVID)
if dev_id != MPU9250_DEV_ID and dev_id != MPU6050_DEV_ID:
if dev_id not in MPU_DEV_IDS.keys():
raise self.printer.command_error(
"Invalid mpu9250/mpu6050 id (got %x).\n"
"Invalid mpu id (got %x).\n"
"This is generally indicative of connection problems\n"
"(e.g. faulty wiring) or a faulty chip."
% (dev_id))
else:
logging.info("Found %s with id %x"% (MPU_DEV_IDS[dev_id], dev_id))
# Setup chip in requested query rate
self.set_reg(REG_PWR_MGMT_1, SET_PWR_MGMT_1_WAKE)
self.set_reg(REG_PWR_MGMT_2, SET_PWR_MGMT_2_ACCEL_ON)
time.sleep(20. / 1000) # wait for accelerometer chip wake up
self.set_reg(REG_SMPLRT_DIV, SAMPLE_RATE_DIVS[self.data_rate])
# Add 20ms pause for accelerometer chip wake up
self.read_reg(REG_DEVID) # Dummy read to ensure queues flushed
systime = self.printer.get_reactor().monotonic()
next_time = self.mcu.estimated_print_time(systime) + 0.020
self.set_reg(REG_SMPLRT_DIV, SAMPLE_RATE_DIVS[self.data_rate],
minclock=self.mcu.print_time_to_clock(next_time))
self.set_reg(REG_CONFIG, SET_CONFIG)
self.set_reg(REG_ACCEL_CONFIG, SET_ACCEL_CONFIG)
self.set_reg(REG_ACCEL_CONFIG2, SET_ACCEL_CONFIG2)
# Setup samples
with self.lock:
self.raw_samples = []
# Start bulk reading
self.bulk_queue.clear_samples()
systime = self.printer.get_reactor().monotonic()
print_time = self.mcu.estimated_print_time(systime) + MIN_MSG_TIME
reqclock = self.mcu.print_time_to_clock(print_time)
rest_ticks = self.mcu.seconds_to_clock(1. / self.data_rate)
self.query_rate = self.data_rate
rest_ticks = self.mcu.seconds_to_clock(4. / self.data_rate)
self.query_mpu9250_cmd.send([self.oid, reqclock, rest_ticks],
reqclock=reqclock)
logging.info("MPU9250 starting '%s' measurements", self.name)
# Initialize clock tracking
self.last_sequence = 0
self.last_limit_count = self.last_error_count = 0
self.clock_sync.reset(reqclock, 0)
self.max_query_duration = 1 << 31
self.clock_updater.note_start(reqclock)
self._update_clock(minclock=reqclock)
self.max_query_duration = 1 << 31
self.clock_updater.clear_duration_filter()
self.last_error_count = 0
def _finish_measurements(self):
if not self.is_measuring():
return
# Halt bulk reading
params = self.query_mpu9250_end_cmd.send([self.oid, 0, 0])
self.query_rate = 0
with self.lock:
self.raw_samples = []
self.bulk_queue.clear_samples()
logging.info("MPU9250 finished '%s' measurements", self.name)
self.set_reg(REG_PWR_MGMT_1, SET_PWR_MGMT_1_SLEEP)
self.set_reg(REG_PWR_MGMT_2, SET_PWR_MGMT_2_OFF)
# API interface
def _api_update(self, eventtime):
def _process_batch(self, eventtime):
self._update_clock()
with self.lock:
raw_samples = self.raw_samples
self.raw_samples = []
raw_samples = self.bulk_queue.pull_samples()
if not raw_samples:
return {}
samples = self._extract_samples(raw_samples)
if not samples:
return {}
return {'data': samples, 'errors': self.last_error_count,
'overflows': self.last_limit_count}
def _api_startstop(self, is_start):
if is_start:
self._start_measurements()
else:
self._finish_measurements()
def _handle_dump_mpu9250(self, web_request):
self.api_dump.add_client(web_request)
hdr = ('time', 'x_acceleration', 'y_acceleration', 'z_acceleration')
web_request.send({'header': hdr})
def start_internal_client(self):
cconn = self.api_dump.add_internal_client()
return adxl345.AccelQueryHelper(self.printer, cconn)
'overflows': self.clock_updater.get_last_limit_count()}
def load_config(config):
return MPU9250(config)

Binary file not shown.

Binary file not shown.

View File

@@ -221,9 +221,9 @@ class Palette2:
def _wait_for_heartbeat(self):
startTs = self.reactor.monotonic()
currTs = startTs
while self.heartbeat is None and self.heartbeat < (
currTs - SETUP_TIMEOUT) and startTs > (
currTs - SETUP_TIMEOUT):
while self.heartbeat is None or (self.heartbeat < (
currTs - SETUP_TIMEOUT) and startTs > (
currTs - SETUP_TIMEOUT)):
currTs = self.reactor.pause(currTs + 1.)
if self.heartbeat < (currTs - SETUP_TIMEOUT):
@@ -401,7 +401,7 @@ class Palette2:
try:
fw = params[0][1:]
logging.info(
"Palette 2 firmware version %s detected" % os.fwalk)
"Palette 2 firmware version %s detected" % fw)
except (TypeError, IndexError):
logging.error("Unable to parse firmware version")
@@ -544,13 +544,15 @@ class Palette2:
self.cmd_Disconnect()
return self.reactor.NEVER
if len(raw_bytes):
text_buffer = self.read_buffer + str(raw_bytes.decode())
new_buffer = str(raw_bytes.decode(encoding='UTF-8',
errors='ignore'))
text_buffer = self.read_buffer + new_buffer
while True:
i = text_buffer.find("\n")
if i >= 0:
line = text_buffer[0:i+1]
line = text_buffer[0:i + 1]
self.read_queue.put(line.strip())
text_buffer = text_buffer[i+1:]
text_buffer = text_buffer[i + 1:]
else:
break
self.read_buffer = text_buffer
@@ -566,7 +568,7 @@ class Palette2:
heartbeat_strings = [COMMAND_HEARTBEAT, "Connection Okay"]
if not any(x in text_line for x in heartbeat_strings):
logging.debug("%0.3f P2 -> : %s" %(eventtime, text_line))
logging.debug("%0.3f P2 -> : %s" % (eventtime, text_line))
# Received a heartbeat from the device
if text_line == COMMAND_HEARTBEAT:
@@ -581,7 +583,7 @@ class Palette2:
self.write_queue.put(COMMAND_HEARTBEAT)
eventtime = self.reactor.pause(eventtime + 5)
if self.heartbeat and self.heartbeat < (
eventtime - HEARTBEAT_TIMEOUT):
eventtime - HEARTBEAT_TIMEOUT):
logging.error(
"P2 has not responded to heartbeat")
if not self.is_printing or self.is_setup_complete:
@@ -610,6 +612,7 @@ class Palette2:
logging.error("Unable to communicate with the Palette 2")
self.signal_disconnect = True
return self.reactor.NEVER
return eventtime + SERIAL_TIMER
return eventtime + SERIAL_TIMER
def _run_Smart_Load(self, eventtime):
@@ -621,7 +624,7 @@ class Palette2:
idle_time = est_print_time - print_time
if not lookahead_empty or idle_time < 0.5:
return eventtime + \
max(0., min(1., print_time - est_print_time))
max(0., min(1., print_time - est_print_time))
extrude = abs(self.remaining_load_length)
extrude = min(50, extrude / 2)
@@ -646,5 +649,6 @@ class Palette2:
status["ping"] = self.omega_pings[-1]
return status
def load_config(config):
return Palette2(config)

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More