mirror of
https://github.com/QIDITECH/QIDISlicer.git
synced 2026-02-02 08:58:43 +03:00
QIDISlicer1.0.0
This commit is contained in:
225
src/libslic3r/SLA/AGGRaster.hpp
Normal file
225
src/libslic3r/SLA/AGGRaster.hpp
Normal file
@@ -0,0 +1,225 @@
|
||||
#ifndef AGGRASTER_HPP
|
||||
#define AGGRASTER_HPP
|
||||
|
||||
#include <libslic3r/SLA/RasterBase.hpp>
|
||||
#include "libslic3r/ExPolygon.hpp"
|
||||
|
||||
// For rasterizing
|
||||
#include <agg/agg_basics.h>
|
||||
#include <agg/agg_rendering_buffer.h>
|
||||
#include <agg/agg_pixfmt_gray.h>
|
||||
#include <agg/agg_pixfmt_rgb.h>
|
||||
#include <agg/agg_renderer_base.h>
|
||||
#include <agg/agg_renderer_scanline.h>
|
||||
|
||||
#include <agg/agg_scanline_p.h>
|
||||
#include <agg/agg_rasterizer_scanline_aa.h>
|
||||
#include <agg/agg_path_storage.h>
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
inline const Polygon& contour(const ExPolygon& p) { return p.contour; }
|
||||
inline const Polygons& holes(const ExPolygon& p) { return p.holes; }
|
||||
|
||||
namespace sla {
|
||||
|
||||
template<class Color> struct Colors {
|
||||
static const Color White;
|
||||
static const Color Black;
|
||||
};
|
||||
|
||||
template<class Color> const Color Colors<Color>::White = Color{255};
|
||||
template<class Color> const Color Colors<Color>::Black = Color{0};
|
||||
|
||||
template<class PixelRenderer,
|
||||
template<class /*agg::renderer_base<PixelRenderer>*/> class Renderer,
|
||||
class Rasterizer = agg::rasterizer_scanline_aa<>,
|
||||
class Scanline = agg::scanline_p8>
|
||||
class AGGRaster: public RasterBase {
|
||||
public:
|
||||
using TColor = typename PixelRenderer::color_type;
|
||||
using TValue = typename TColor::value_type;
|
||||
using TPixel = typename PixelRenderer::pixel_type;
|
||||
using TRawBuffer = agg::rendering_buffer;
|
||||
|
||||
protected:
|
||||
|
||||
Resolution m_resolution;
|
||||
PixelDim m_pxdim_scaled; // used for scaled coordinate polygons
|
||||
|
||||
std::vector<TPixel> m_buf;
|
||||
agg::rendering_buffer m_rbuf;
|
||||
|
||||
PixelRenderer m_pixrenderer;
|
||||
|
||||
agg::renderer_base<PixelRenderer> m_raw_renderer;
|
||||
Renderer<agg::renderer_base<PixelRenderer>> m_renderer;
|
||||
|
||||
Trafo m_trafo;
|
||||
Scanline m_scanlines;
|
||||
Rasterizer m_rasterizer;
|
||||
|
||||
void flipy(agg::path_storage &path) const
|
||||
{
|
||||
path.flip_y(0, double(m_resolution.height_px));
|
||||
}
|
||||
|
||||
void flipx(agg::path_storage &path) const
|
||||
{
|
||||
path.flip_x(0, double(m_resolution.width_px));
|
||||
}
|
||||
|
||||
double getPx(const Point &p) { return p(0) * m_pxdim_scaled.w_mm; }
|
||||
double getPy(const Point &p) { return p(1) * m_pxdim_scaled.h_mm; }
|
||||
agg::path_storage to_path(const Polygon &poly) { return to_path(poly.points); }
|
||||
|
||||
template<class PointVec> agg::path_storage _to_path(const PointVec& v)
|
||||
{
|
||||
agg::path_storage path;
|
||||
|
||||
auto it = v.begin();
|
||||
path.move_to(getPx(*it), getPy(*it));
|
||||
while(++it != v.end()) path.line_to(getPx(*it), getPy(*it));
|
||||
path.line_to(getPx(v.front()), getPy(v.front()));
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
template<class PointVec> agg::path_storage _to_path_flpxy(const PointVec& v)
|
||||
{
|
||||
agg::path_storage path;
|
||||
|
||||
auto it = v.begin();
|
||||
path.move_to(getPy(*it), getPx(*it));
|
||||
while(++it != v.end()) path.line_to(getPy(*it), getPx(*it));
|
||||
path.line_to(getPy(v.front()), getPx(v.front()));
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
template<class PointVec> agg::path_storage to_path(const PointVec &v)
|
||||
{
|
||||
auto path = m_trafo.flipXY ? _to_path_flpxy(v) : _to_path(v);
|
||||
|
||||
path.translate_all_paths(m_trafo.center_x * m_pxdim_scaled.w_mm,
|
||||
m_trafo.center_y * m_pxdim_scaled.h_mm);
|
||||
|
||||
if(m_trafo.mirror_x) flipx(path);
|
||||
if(m_trafo.mirror_y) flipy(path);
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
template<class P> void _draw(const P &poly)
|
||||
{
|
||||
m_rasterizer.reset();
|
||||
|
||||
m_rasterizer.add_path(to_path(contour(poly)));
|
||||
for(auto& h : holes(poly)) m_rasterizer.add_path(to_path(h));
|
||||
|
||||
agg::render_scanlines(m_rasterizer, m_scanlines, m_renderer);
|
||||
}
|
||||
|
||||
public:
|
||||
template<class GammaFn>
|
||||
AGGRaster(const Resolution &res,
|
||||
const PixelDim & pd,
|
||||
const Trafo & trafo,
|
||||
const TColor & foreground,
|
||||
const TColor & background,
|
||||
GammaFn && gammafn)
|
||||
: m_resolution(res)
|
||||
, m_pxdim_scaled(SCALING_FACTOR, SCALING_FACTOR)
|
||||
, m_buf(res.pixels())
|
||||
, m_rbuf(reinterpret_cast<TValue *>(m_buf.data()),
|
||||
unsigned(res.width_px),
|
||||
unsigned(res.height_px),
|
||||
int(res.width_px *PixelRenderer::num_components))
|
||||
, m_pixrenderer(m_rbuf)
|
||||
, m_raw_renderer(m_pixrenderer)
|
||||
, m_renderer(m_raw_renderer)
|
||||
, m_trafo(trafo)
|
||||
{
|
||||
// Visual Studio compiler gives warnings about possible division by zero.
|
||||
assert(pd.w_mm != 0 && pd.h_mm != 0);
|
||||
if (pd.w_mm != 0 && pd.h_mm != 0) {
|
||||
m_pxdim_scaled.w_mm /= pd.w_mm;
|
||||
m_pxdim_scaled.h_mm /= pd.h_mm;
|
||||
}
|
||||
m_renderer.color(foreground);
|
||||
clear(background);
|
||||
|
||||
m_rasterizer.gamma(gammafn);
|
||||
}
|
||||
|
||||
Trafo trafo() const override { return m_trafo; }
|
||||
Resolution resolution() const { return m_resolution; }
|
||||
PixelDim pixel_dimensions() const
|
||||
{
|
||||
return {SCALING_FACTOR / m_pxdim_scaled.w_mm,
|
||||
SCALING_FACTOR / m_pxdim_scaled.h_mm};
|
||||
}
|
||||
|
||||
void draw(const ExPolygon &poly) override { _draw(poly); }
|
||||
|
||||
EncodedRaster encode(RasterEncoder encoder) const override
|
||||
{
|
||||
return encoder(m_buf.data(), m_resolution.width_px, m_resolution.height_px, 1);
|
||||
}
|
||||
|
||||
void clear(const TColor color) { m_raw_renderer.clear(color); }
|
||||
};
|
||||
|
||||
/*
|
||||
* Captures an anti-aliased monochrome canvas where vectorial
|
||||
* polygons can be rasterized. Fill color is always white and the background is
|
||||
* black. Contours are anti-aliased.
|
||||
*
|
||||
* A gamma function can be specified at compile time to make it more flexible.
|
||||
*/
|
||||
using _RasterGrayscaleAA =
|
||||
AGGRaster<agg::pixfmt_gray8, agg::renderer_scanline_aa_solid>;
|
||||
|
||||
class RasterGrayscaleAA : public _RasterGrayscaleAA {
|
||||
using Base = _RasterGrayscaleAA;
|
||||
using typename Base::TColor;
|
||||
using typename Base::TValue;
|
||||
public:
|
||||
template<class GammaFn>
|
||||
RasterGrayscaleAA(const Resolution &res,
|
||||
const PixelDim &pd,
|
||||
const RasterBase::Trafo &trafo,
|
||||
GammaFn &&fn)
|
||||
: Base(res,
|
||||
pd,
|
||||
trafo,
|
||||
Colors<TColor>::White,
|
||||
Colors<TColor>::Black,
|
||||
std::forward<GammaFn>(fn))
|
||||
{}
|
||||
|
||||
uint8_t read_pixel(size_t col, size_t row) const
|
||||
{
|
||||
static_assert(std::is_same<TValue, uint8_t>::value, "Not grayscale pix");
|
||||
|
||||
uint8_t px;
|
||||
Base::m_buf[row * Base::resolution().width_px + col].get(px);
|
||||
return px;
|
||||
}
|
||||
|
||||
void clear() { Base::clear(Colors<TColor>::Black); }
|
||||
};
|
||||
|
||||
class RasterGrayscaleAAGammaPower: public RasterGrayscaleAA {
|
||||
public:
|
||||
RasterGrayscaleAAGammaPower(const Resolution &res,
|
||||
const PixelDim &pd,
|
||||
const RasterBase::Trafo &trafo,
|
||||
double gamma = 1.)
|
||||
: RasterGrayscaleAA(res, pd, trafo, agg::gamma_power(gamma))
|
||||
{}
|
||||
};
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // AGGRASTER_HPP
|
||||
427
src/libslic3r/SLA/BranchingTreeSLA.cpp
Normal file
427
src/libslic3r/SLA/BranchingTreeSLA.cpp
Normal file
@@ -0,0 +1,427 @@
|
||||
#include "BranchingTreeSLA.hpp"
|
||||
|
||||
#include "libslic3r/Execution/ExecutionTBB.hpp"
|
||||
|
||||
#include "libslic3r/KDTreeIndirect.hpp"
|
||||
|
||||
#include "SupportTreeUtils.hpp"
|
||||
#include "BranchingTree/PointCloud.hpp"
|
||||
|
||||
#include "Pad.hpp"
|
||||
|
||||
#include <map>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
inline constexpr const auto &beam_ex_policy = ex_tbb;
|
||||
|
||||
class BranchingTreeBuilder: public branchingtree::Builder {
|
||||
SupportTreeBuilder &m_builder;
|
||||
const SupportableMesh &m_sm;
|
||||
const branchingtree::PointCloud &m_cloud;
|
||||
|
||||
std::vector<branchingtree::Node> m_pillars; // to put an index over them
|
||||
|
||||
// cache succesfull ground connections
|
||||
mutable std::map<int, GroundConnection> m_gnd_connections;
|
||||
mutable execution::SpinningMutex<ExecutionTBB> m_gnd_connections_mtx;
|
||||
|
||||
// Scaling of the input value 'widening_factor:<0, 1>' to produce resonable
|
||||
// widening behaviour
|
||||
static constexpr double WIDENING_SCALE = 0.05;
|
||||
|
||||
double get_radius(const branchingtree::Node &j) const
|
||||
{
|
||||
double w = WIDENING_SCALE * m_sm.cfg.pillar_widening_factor * j.weight;
|
||||
|
||||
return double(j.Rmin) + w;
|
||||
}
|
||||
|
||||
std::vector<size_t> m_unroutable_pinheads;
|
||||
|
||||
void build_subtree(size_t root)
|
||||
{
|
||||
traverse(m_cloud, root, [this](const branchingtree::Node &node) {
|
||||
if (node.left >= 0 && node.right >= 0) {
|
||||
auto nparent = m_cloud.get(node.id);
|
||||
auto nleft = m_cloud.get(node.left);
|
||||
auto nright = m_cloud.get(node.right);
|
||||
Vec3d from1d = nleft.pos.cast<double>();
|
||||
Vec3d from2d = nright.pos.cast<double>();
|
||||
Vec3d tod = nparent.pos.cast<double>();
|
||||
double mergeR = get_radius(nparent);
|
||||
double leftR = get_radius(nleft);
|
||||
double rightR = get_radius(nright);
|
||||
|
||||
m_builder.add_diffbridge(from1d, tod, leftR, mergeR);
|
||||
m_builder.add_diffbridge(from2d, tod, rightR, mergeR);
|
||||
m_builder.add_junction(tod, mergeR);
|
||||
} else if (int child = node.left + node.right + 1; child >= 0) {
|
||||
auto from = m_cloud.get(child);
|
||||
auto to = m_cloud.get(node.id);
|
||||
auto tod = to.pos.cast<double>();
|
||||
double toR = get_radius(to);
|
||||
m_builder.add_diffbridge(from.pos.cast<double>(),
|
||||
tod,
|
||||
get_radius(from),
|
||||
toR);
|
||||
m_builder.add_junction(tod, toR);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void discard_subtree(size_t root)
|
||||
{
|
||||
// Discard all the support points connecting to this branch.
|
||||
traverse(m_cloud, root, [this](const branchingtree::Node &node) {
|
||||
int suppid_parent = m_cloud.get_leaf_id(node.id);
|
||||
int suppid_left = m_cloud.get_leaf_id(node.left);
|
||||
int suppid_right = m_cloud.get_leaf_id(node.right);
|
||||
if (suppid_parent >= 0)
|
||||
m_unroutable_pinheads.emplace_back(suppid_parent);
|
||||
if (suppid_left >= 0)
|
||||
m_unroutable_pinheads.emplace_back(suppid_left);
|
||||
if (suppid_right >= 0)
|
||||
m_unroutable_pinheads.emplace_back(suppid_right);
|
||||
});
|
||||
}
|
||||
|
||||
void discard_subtree_rescure(size_t root)
|
||||
{
|
||||
// Discard all the support points connecting to this branch.
|
||||
// As a last resort, try to route child nodes to ground and stop
|
||||
// traversing if any child branch succeeds.
|
||||
traverse(m_cloud, root, [this](const branchingtree::Node &node) {
|
||||
branchingtree::TraverseReturnT ret{true, true};
|
||||
|
||||
int suppid_parent = m_cloud.get_leaf_id(node.id);
|
||||
int suppid_left = branchingtree::Node::ID_NONE;
|
||||
int suppid_right = branchingtree::Node::ID_NONE;
|
||||
|
||||
double glvl = ground_level(m_sm);
|
||||
branchingtree::Node dst = node;
|
||||
dst.pos.z() = glvl;
|
||||
dst.weight += node.pos.z() - glvl;
|
||||
|
||||
if (node.left >= 0 && add_ground_bridge(m_cloud.get(node.left), dst))
|
||||
ret.to_left = false;
|
||||
else
|
||||
suppid_left = m_cloud.get_leaf_id(node.left);
|
||||
|
||||
if (node.right >= 0 && add_ground_bridge(m_cloud.get(node.right), dst))
|
||||
ret.to_right = false;
|
||||
else
|
||||
suppid_right = m_cloud.get_leaf_id(node.right);
|
||||
|
||||
if (suppid_parent >= 0)
|
||||
m_unroutable_pinheads.emplace_back(suppid_parent);
|
||||
if (suppid_left >= 0)
|
||||
m_unroutable_pinheads.emplace_back(suppid_left);
|
||||
if (suppid_right >= 0)
|
||||
m_unroutable_pinheads.emplace_back(suppid_right);
|
||||
|
||||
return ret;
|
||||
});
|
||||
}
|
||||
|
||||
public:
|
||||
BranchingTreeBuilder(SupportTreeBuilder &builder,
|
||||
const SupportableMesh &sm,
|
||||
const branchingtree::PointCloud &cloud)
|
||||
: m_builder{builder}, m_sm{sm}, m_cloud{cloud}
|
||||
{}
|
||||
|
||||
bool add_bridge(const branchingtree::Node &from,
|
||||
const branchingtree::Node &to) override;
|
||||
|
||||
bool add_merger(const branchingtree::Node &node,
|
||||
const branchingtree::Node &closest,
|
||||
const branchingtree::Node &merge_node) override;
|
||||
|
||||
bool add_ground_bridge(const branchingtree::Node &from,
|
||||
const branchingtree::Node &/*to*/) override;
|
||||
|
||||
bool add_mesh_bridge(const branchingtree::Node &from,
|
||||
const branchingtree::Node &to) override;
|
||||
|
||||
std::optional<Vec3f> suggest_avoidance(const branchingtree::Node &from,
|
||||
float max_bridge_len) const override;
|
||||
|
||||
void report_unroutable(const branchingtree::Node &j) override
|
||||
{
|
||||
double glvl = ground_level(m_sm);
|
||||
branchingtree::Node dst = j;
|
||||
dst.pos.z() = glvl;
|
||||
dst.weight += j.pos.z() - glvl;
|
||||
if (add_ground_bridge(j, dst))
|
||||
return;
|
||||
|
||||
BOOST_LOG_TRIVIAL(warning) << "Cannot route junction at " << j.pos.x()
|
||||
<< " " << j.pos.y() << " " << j.pos.z();
|
||||
|
||||
// Discard all the support points connecting to this branch.
|
||||
discard_subtree_rescure(j.id);
|
||||
// discard_subtree(j.id);
|
||||
}
|
||||
|
||||
const std::vector<size_t>& unroutable_pinheads() const
|
||||
{
|
||||
return m_unroutable_pinheads;
|
||||
}
|
||||
|
||||
bool is_valid() const override { return !m_builder.ctl().stopcondition(); }
|
||||
|
||||
const std::vector<branchingtree::Node> & pillars() const { return m_pillars; }
|
||||
|
||||
const GroundConnection *ground_conn(size_t pillar) const
|
||||
{
|
||||
const GroundConnection *ret = nullptr;
|
||||
|
||||
auto it = m_gnd_connections.find(m_pillars[pillar].id);
|
||||
if (it != m_gnd_connections.end())
|
||||
ret = &it->second;
|
||||
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
bool BranchingTreeBuilder::add_bridge(const branchingtree::Node &from,
|
||||
const branchingtree::Node &to)
|
||||
{
|
||||
Vec3d fromd = from.pos.cast<double>(), tod = to.pos.cast<double>();
|
||||
double fromR = get_radius(from), toR = get_radius(to);
|
||||
Beam beam{Ball{fromd, fromR}, Ball{tod, toR}};
|
||||
auto hit = beam_mesh_hit(beam_ex_policy , m_sm.emesh, beam,
|
||||
m_sm.cfg.safety_distance_mm);
|
||||
|
||||
bool ret = hit.distance() > (tod - fromd).norm();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool BranchingTreeBuilder::add_merger(const branchingtree::Node &node,
|
||||
const branchingtree::Node &closest,
|
||||
const branchingtree::Node &merge_node)
|
||||
{
|
||||
Vec3d from1d = node.pos.cast<double>(),
|
||||
from2d = closest.pos.cast<double>(),
|
||||
tod = merge_node.pos.cast<double>();
|
||||
|
||||
double mergeR = get_radius(merge_node);
|
||||
double nodeR = get_radius(node);
|
||||
double closestR = get_radius(closest);
|
||||
Beam beam1{Ball{from1d, nodeR}, Ball{tod, mergeR}};
|
||||
Beam beam2{Ball{from2d, closestR}, Ball{tod, mergeR}};
|
||||
|
||||
auto sd = m_sm.cfg.safety_distance_mm ;
|
||||
auto hit1 = beam_mesh_hit(beam_ex_policy , m_sm.emesh, beam1, sd);
|
||||
auto hit2 = beam_mesh_hit(beam_ex_policy , m_sm.emesh, beam2, sd);
|
||||
|
||||
bool ret = hit1.distance() > (tod - from1d).norm() &&
|
||||
hit2.distance() > (tod - from2d).norm();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool BranchingTreeBuilder::add_ground_bridge(const branchingtree::Node &from,
|
||||
const branchingtree::Node &to)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
namespace bgi = boost::geometry::index;
|
||||
|
||||
auto it = m_gnd_connections.find(from.id);
|
||||
const GroundConnection *connptr = nullptr;
|
||||
|
||||
if (it == m_gnd_connections.end()) {
|
||||
sla::Junction j{from.pos.cast<double>(), get_radius(from)};
|
||||
Vec3d init_dir = (to.pos - from.pos).cast<double>().normalized();
|
||||
|
||||
auto conn = deepsearch_ground_connection(beam_ex_policy , m_sm, j,
|
||||
get_radius(to), init_dir);
|
||||
|
||||
// Remember that this node was tested if can go to ground, don't
|
||||
// test it with any other destination ground point because
|
||||
// it is unlikely that search_ground_route would find a better solution
|
||||
connptr = &(m_gnd_connections[from.id] = conn);
|
||||
} else {
|
||||
connptr = &(it->second);
|
||||
}
|
||||
|
||||
if (connptr && *connptr) {
|
||||
m_pillars.emplace_back(from);
|
||||
ret = true;
|
||||
build_subtree(from.id);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool BranchingTreeBuilder::add_mesh_bridge(const branchingtree::Node &from,
|
||||
const branchingtree::Node &to)
|
||||
{
|
||||
if (from.weight > m_sm.cfg.max_weight_on_model_support)
|
||||
return false;
|
||||
|
||||
sla::Junction fromj = {from.pos.cast<double>(), get_radius(from)};
|
||||
|
||||
auto anchor = m_sm.cfg.ground_facing_only ?
|
||||
std::optional<Anchor>{} : // If no mesh connections are allowed
|
||||
calculate_anchor_placement(beam_ex_policy , m_sm, fromj,
|
||||
to.pos.cast<double>());
|
||||
|
||||
if (anchor) {
|
||||
sla::Junction toj = {anchor->junction_point(), anchor->r_back_mm};
|
||||
|
||||
auto hit = beam_mesh_hit(beam_ex_policy , m_sm.emesh,
|
||||
Beam{{fromj.pos, fromj.r}, {toj.pos, toj.r}}, 0.);
|
||||
|
||||
if (hit.distance() > distance(fromj.pos, toj.pos)) {
|
||||
m_builder.add_diffbridge(fromj.pos, toj.pos, fromj.r, toj.r);
|
||||
m_builder.add_anchor(*anchor);
|
||||
|
||||
build_subtree(from.id);
|
||||
} else {
|
||||
anchor.reset();
|
||||
}
|
||||
}
|
||||
|
||||
return bool(anchor);
|
||||
}
|
||||
|
||||
static std::optional<Vec3f> get_avoidance(const GroundConnection &conn,
|
||||
float maxdist)
|
||||
{
|
||||
std::optional<Vec3f> ret;
|
||||
|
||||
if (conn) {
|
||||
if (conn.path.size() > 1) {
|
||||
ret = conn.path[1].pos.cast<float>();
|
||||
} else {
|
||||
Vec3f pbeg = conn.path[0].pos.cast<float>();
|
||||
Vec3f pend = conn.pillar_base->pos.cast<float>();
|
||||
pbeg.z() = std::max(pbeg.z() - maxdist, pend.z());
|
||||
ret = pbeg;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::optional<Vec3f> BranchingTreeBuilder::suggest_avoidance(
|
||||
const branchingtree::Node &from, float max_bridge_len) const
|
||||
{
|
||||
std::optional<Vec3f> ret;
|
||||
|
||||
double glvl = ground_level(m_sm);
|
||||
branchingtree::Node dst = from;
|
||||
dst.pos.z() = glvl;
|
||||
dst.weight += from.pos.z() - glvl;
|
||||
sla::Junction j{from.pos.cast<double>(), get_radius(from)};
|
||||
|
||||
auto found_it = m_gnd_connections.end();
|
||||
{
|
||||
std::lock_guard lk{m_gnd_connections_mtx};
|
||||
found_it = m_gnd_connections.find(from.id);
|
||||
}
|
||||
|
||||
if (found_it != m_gnd_connections.end()) {
|
||||
ret = get_avoidance(found_it->second, max_bridge_len);
|
||||
} else {
|
||||
auto conn = deepsearch_ground_connection(
|
||||
beam_ex_policy , m_sm, j, get_radius(dst), sla::DOWN);
|
||||
|
||||
{
|
||||
std::lock_guard lk{m_gnd_connections_mtx};
|
||||
m_gnd_connections[from.id] = conn;
|
||||
}
|
||||
|
||||
ret = get_avoidance(conn, max_bridge_len);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline void build_pillars(SupportTreeBuilder &builder,
|
||||
BranchingTreeBuilder &vbuilder,
|
||||
const SupportableMesh &sm)
|
||||
{
|
||||
for (size_t pill_id = 0; pill_id < vbuilder.pillars().size(); ++pill_id) {
|
||||
auto * conn = vbuilder.ground_conn(pill_id);
|
||||
if (conn)
|
||||
build_ground_connection(builder, sm, *conn);
|
||||
}
|
||||
}
|
||||
|
||||
void create_branching_tree(SupportTreeBuilder &builder, const SupportableMesh &sm)
|
||||
{
|
||||
auto coordfn = [&sm](size_t id, size_t dim) { return sm.pts[id].pos(dim); };
|
||||
KDTreeIndirect<3, float, decltype (coordfn)> tree{coordfn, sm.pts.size()};
|
||||
|
||||
auto nondup_idx = non_duplicate_suppt_indices(tree, sm.pts, 0.1);
|
||||
std::vector<std::optional<Head>> heads(nondup_idx.size());
|
||||
auto leafs = reserve_vector<branchingtree::Node>(nondup_idx.size());
|
||||
|
||||
execution::for_each(
|
||||
ex_tbb, size_t(0), nondup_idx.size(),
|
||||
[&sm, &heads, &nondup_idx, &builder](size_t i) {
|
||||
if (!builder.ctl().stopcondition())
|
||||
heads[i] = calculate_pinhead_placement(ex_seq, sm, nondup_idx[i]);
|
||||
},
|
||||
execution::max_concurrency(ex_tbb)
|
||||
);
|
||||
|
||||
if (builder.ctl().stopcondition())
|
||||
return;
|
||||
|
||||
for (auto &h : heads)
|
||||
if (h && h->is_valid()) {
|
||||
leafs.emplace_back(h->junction_point().cast<float>(), h->r_back_mm);
|
||||
h->id = long(leafs.size() - 1);
|
||||
builder.add_head(h->id, *h);
|
||||
}
|
||||
|
||||
auto &its = *sm.emesh.get_triangle_mesh();
|
||||
ExPolygons bedpolys = {branchingtree::make_bed_poly(its)};
|
||||
|
||||
auto props = branchingtree::Properties{}
|
||||
.bed_shape(bedpolys)
|
||||
.ground_level(sla::ground_level(sm))
|
||||
.max_slope(sm.cfg.bridge_slope)
|
||||
.max_branch_length(sm.cfg.max_bridge_length_mm);
|
||||
|
||||
auto meshpts = sm.cfg.ground_facing_only ?
|
||||
std::vector<branchingtree::Node>{} :
|
||||
branchingtree::sample_mesh(its,
|
||||
props.sampling_radius());
|
||||
|
||||
auto bedpts = branchingtree::sample_bed(props.bed_shape(),
|
||||
float(props.ground_level()),
|
||||
props.sampling_radius());
|
||||
|
||||
for (auto &bp : bedpts)
|
||||
bp.Rmin = sm.cfg.head_back_radius_mm;
|
||||
|
||||
branchingtree::PointCloud nodes{std::move(meshpts), std::move(bedpts),
|
||||
std::move(leafs), props};
|
||||
|
||||
BranchingTreeBuilder vbuilder{builder, sm, nodes};
|
||||
|
||||
execution::for_each(ex_tbb,
|
||||
size_t(0),
|
||||
nodes.get_leafs().size(),
|
||||
[&nodes, &vbuilder](size_t leaf_idx) {
|
||||
vbuilder.suggest_avoidance(nodes.get_leafs()[leaf_idx],
|
||||
nodes.properties().max_branch_length());
|
||||
});
|
||||
|
||||
branchingtree::build_tree(nodes, vbuilder);
|
||||
|
||||
build_pillars(builder, vbuilder, sm);
|
||||
|
||||
for (size_t id : vbuilder.unroutable_pinheads())
|
||||
builder.head(id).invalidate();
|
||||
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
15
src/libslic3r/SLA/BranchingTreeSLA.hpp
Normal file
15
src/libslic3r/SLA/BranchingTreeSLA.hpp
Normal file
@@ -0,0 +1,15 @@
|
||||
#ifndef BRANCHINGTREESLA_HPP
|
||||
#define BRANCHINGTREESLA_HPP
|
||||
|
||||
#include "libslic3r/BranchingTree/BranchingTree.hpp"
|
||||
#include "SupportTreeBuilder.hpp"
|
||||
|
||||
#include <boost/log/trivial.hpp>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
void create_branching_tree(SupportTreeBuilder& builder, const SupportableMesh &sm);
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // BRANCHINGTREESLA_HPP
|
||||
152
src/libslic3r/SLA/Clustering.cpp
Normal file
152
src/libslic3r/SLA/Clustering.cpp
Normal file
@@ -0,0 +1,152 @@
|
||||
#include "Clustering.hpp"
|
||||
#include "boost/geometry/index/rtree.hpp"
|
||||
|
||||
#include <libslic3r/SLA/SpatIndex.hpp>
|
||||
#include <libslic3r/BoostAdapter.hpp>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
namespace bgi = boost::geometry::index;
|
||||
using Index3D = bgi::rtree< PointIndexEl, bgi::rstar<16, 4> /* ? */ >;
|
||||
|
||||
namespace {
|
||||
|
||||
bool cmp_ptidx_elements(const PointIndexEl& e1, const PointIndexEl& e2)
|
||||
{
|
||||
return e1.second < e2.second;
|
||||
};
|
||||
|
||||
ClusteredPoints cluster(Index3D &sindex,
|
||||
unsigned max_points,
|
||||
std::function<std::vector<PointIndexEl>(
|
||||
const Index3D &, const PointIndexEl &)> qfn)
|
||||
{
|
||||
using Elems = std::vector<PointIndexEl>;
|
||||
|
||||
// Recursive function for visiting all the points in a given distance to
|
||||
// each other
|
||||
std::function<void(Elems&, Elems&)> group =
|
||||
[&sindex, &group, max_points, qfn](Elems& pts, Elems& cluster)
|
||||
{
|
||||
for(auto& p : pts) {
|
||||
std::vector<PointIndexEl> tmp = qfn(sindex, p);
|
||||
|
||||
std::sort(tmp.begin(), tmp.end(), cmp_ptidx_elements);
|
||||
|
||||
Elems newpts;
|
||||
std::set_difference(tmp.begin(), tmp.end(),
|
||||
cluster.begin(), cluster.end(),
|
||||
std::back_inserter(newpts), cmp_ptidx_elements);
|
||||
|
||||
int c = max_points && newpts.size() + cluster.size() > max_points?
|
||||
int(max_points - cluster.size()) : int(newpts.size());
|
||||
|
||||
cluster.insert(cluster.end(), newpts.begin(), newpts.begin() + c);
|
||||
std::sort(cluster.begin(), cluster.end(), cmp_ptidx_elements);
|
||||
|
||||
if(!newpts.empty() && (!max_points || cluster.size() < max_points))
|
||||
group(newpts, cluster);
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<Elems> clusters;
|
||||
for(auto it = sindex.begin(); it != sindex.end();) {
|
||||
Elems cluster = {};
|
||||
Elems pts = {*it};
|
||||
group(pts, cluster);
|
||||
|
||||
for(auto& c : cluster) sindex.remove(c);
|
||||
it = sindex.begin();
|
||||
|
||||
clusters.emplace_back(cluster);
|
||||
}
|
||||
|
||||
ClusteredPoints result;
|
||||
for(auto& cluster : clusters) {
|
||||
result.emplace_back();
|
||||
for(auto c : cluster) result.back().emplace_back(c.second);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::vector<PointIndexEl> distance_queryfn(const Index3D& sindex,
|
||||
const PointIndexEl& p,
|
||||
double dist,
|
||||
unsigned max_points)
|
||||
{
|
||||
std::vector<PointIndexEl> tmp; tmp.reserve(max_points);
|
||||
sindex.query(
|
||||
bgi::nearest(p.first, max_points),
|
||||
std::back_inserter(tmp)
|
||||
);
|
||||
|
||||
for(auto it = tmp.begin(); it < tmp.end(); ++it)
|
||||
if((p.first - it->first).norm() > dist) it = tmp.erase(it);
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Clustering a set of points by the given criteria
|
||||
ClusteredPoints cluster(
|
||||
const std::vector<unsigned>& indices,
|
||||
std::function<Vec3d(unsigned)> pointfn,
|
||||
double dist,
|
||||
unsigned max_points)
|
||||
{
|
||||
// A spatial index for querying the nearest points
|
||||
Index3D sindex;
|
||||
|
||||
// Build the index
|
||||
for(auto idx : indices) sindex.insert( std::make_pair(pointfn(idx), idx));
|
||||
|
||||
return cluster(sindex, max_points,
|
||||
[dist, max_points](const Index3D& sidx, const PointIndexEl& p)
|
||||
{
|
||||
return distance_queryfn(sidx, p, dist, max_points);
|
||||
});
|
||||
}
|
||||
|
||||
// Clustering a set of points by the given criteria
|
||||
ClusteredPoints cluster(
|
||||
const std::vector<unsigned>& indices,
|
||||
std::function<Vec3d(unsigned)> pointfn,
|
||||
std::function<bool(const PointIndexEl&, const PointIndexEl&)> predicate,
|
||||
unsigned max_points)
|
||||
{
|
||||
// A spatial index for querying the nearest points
|
||||
Index3D sindex;
|
||||
|
||||
// Build the index
|
||||
for(auto idx : indices) sindex.insert( std::make_pair(pointfn(idx), idx));
|
||||
|
||||
return cluster(sindex, max_points,
|
||||
[max_points, predicate](const Index3D& sidx, const PointIndexEl& p)
|
||||
{
|
||||
std::vector<PointIndexEl> tmp; tmp.reserve(max_points);
|
||||
sidx.query(bgi::satisfies([p, predicate](const PointIndexEl& e){
|
||||
return predicate(p, e);
|
||||
}), std::back_inserter(tmp));
|
||||
return tmp;
|
||||
});
|
||||
}
|
||||
|
||||
ClusteredPoints cluster(const Eigen::MatrixXd& pts, double dist, unsigned max_points)
|
||||
{
|
||||
// A spatial index for querying the nearest points
|
||||
Index3D sindex;
|
||||
|
||||
// Build the index
|
||||
for(Eigen::Index i = 0; i < pts.rows(); i++)
|
||||
sindex.insert(std::make_pair(Vec3d(pts.row(i)), unsigned(i)));
|
||||
|
||||
return cluster(sindex, max_points,
|
||||
[dist, max_points](const Index3D& sidx, const PointIndexEl& p)
|
||||
{
|
||||
return distance_queryfn(sidx, p, dist, max_points);
|
||||
});
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
82
src/libslic3r/SLA/Clustering.hpp
Normal file
82
src/libslic3r/SLA/Clustering.hpp
Normal file
@@ -0,0 +1,82 @@
|
||||
#ifndef SLA_CLUSTERING_HPP
|
||||
#define SLA_CLUSTERING_HPP
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include <libslic3r/Point.hpp>
|
||||
#include <libslic3r/SLA/SpatIndex.hpp>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
using ClusterEl = std::vector<unsigned>;
|
||||
using ClusteredPoints = std::vector<ClusterEl>;
|
||||
|
||||
// Clustering a set of points by the given distance.
|
||||
ClusteredPoints cluster(const std::vector<unsigned>& indices,
|
||||
std::function<Vec3d(unsigned)> pointfn,
|
||||
double dist,
|
||||
unsigned max_points);
|
||||
|
||||
ClusteredPoints cluster(const Eigen::MatrixXd& points,
|
||||
double dist,
|
||||
unsigned max_points);
|
||||
|
||||
ClusteredPoints cluster(
|
||||
const std::vector<unsigned>& indices,
|
||||
std::function<Vec3d(unsigned)> pointfn,
|
||||
std::function<bool(const PointIndexEl&, const PointIndexEl&)> predicate,
|
||||
unsigned max_points);
|
||||
|
||||
// This function returns the position of the centroid in the input 'clust'
|
||||
// vector of point indices.
|
||||
template<class DistFn, class PointFn>
|
||||
long cluster_centroid(const ClusterEl &clust, PointFn pointfn, DistFn df)
|
||||
{
|
||||
switch(clust.size()) {
|
||||
case 0: /* empty cluster */ return -1;
|
||||
case 1: /* only one element */ return 0;
|
||||
case 2: /* if two elements, there is no center */ return 0;
|
||||
default: ;
|
||||
}
|
||||
|
||||
// The function works by calculating for each point the average distance
|
||||
// from all the other points in the cluster. We create a selector bitmask of
|
||||
// the same size as the cluster. The bitmask will have two true bits and
|
||||
// false bits for the rest of items and we will loop through all the
|
||||
// permutations of the bitmask (combinations of two points). Get the
|
||||
// distance for the two points and add the distance to the averages.
|
||||
// The point with the smallest average than wins.
|
||||
|
||||
// The complexity should be O(n^2) but we will mostly apply this function
|
||||
// for small clusters only (cca 3 elements)
|
||||
|
||||
std::vector<bool> sel(clust.size(), false); // create full zero bitmask
|
||||
std::fill(sel.end() - 2, sel.end(), true); // insert the two ones
|
||||
std::vector<double> avgs(clust.size(), 0.0); // store the average distances
|
||||
|
||||
do {
|
||||
std::array<size_t, 2> idx;
|
||||
for(size_t i = 0, j = 0; i < clust.size(); i++)
|
||||
if(sel[i]) idx[j++] = i;
|
||||
|
||||
double d = df(pointfn(clust[idx[0]]),
|
||||
pointfn(clust[idx[1]]));
|
||||
|
||||
// add the distance to the sums for both associated points
|
||||
for(auto i : idx) avgs[i] += d;
|
||||
|
||||
// now continue with the next permutation of the bitmask with two 1s
|
||||
} while(std::next_permutation(sel.begin(), sel.end()));
|
||||
|
||||
// Divide by point size in the cluster to get the average (may be redundant)
|
||||
for(auto& a : avgs) a /= clust.size();
|
||||
|
||||
// get the lowest average distance and return the index
|
||||
auto minit = std::min_element(avgs.begin(), avgs.end());
|
||||
return long(minit - avgs.begin());
|
||||
}
|
||||
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // CLUSTERING_HPP
|
||||
146
src/libslic3r/SLA/ConcaveHull.cpp
Normal file
146
src/libslic3r/SLA/ConcaveHull.cpp
Normal file
@@ -0,0 +1,146 @@
|
||||
#include <libslic3r/SLA/ConcaveHull.hpp>
|
||||
#include <libslic3r/SLA/SpatIndex.hpp>
|
||||
|
||||
#include <libslic3r/MTUtils.hpp>
|
||||
#include <libslic3r/ClipperUtils.hpp>
|
||||
|
||||
#include <boost/log/trivial.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
namespace sla {
|
||||
|
||||
inline Vec3d to_vec3(const Vec2crd &v2) { return {double(v2(X)), double(v2(Y)), 0.}; }
|
||||
inline Vec3d to_vec3(const Vec2d &v2) { return {v2(X), v2(Y), 0.}; }
|
||||
inline Vec2crd to_vec2(const Vec3d &v3) { return {coord_t(v3(X)), coord_t(v3(Y))}; }
|
||||
|
||||
Point ConcaveHull::centroid(const Points &pp)
|
||||
{
|
||||
Point c;
|
||||
switch(pp.size()) {
|
||||
case 0: break;
|
||||
case 1: c = pp.front(); break;
|
||||
case 2: c = (pp[0] + pp[1]) / 2; break;
|
||||
default: {
|
||||
auto MAX = std::numeric_limits<Point::coord_type>::max();
|
||||
auto MIN = std::numeric_limits<Point::coord_type>::min();
|
||||
Point min = {MAX, MAX}, max = {MIN, MIN};
|
||||
|
||||
for(auto& p : pp) {
|
||||
if(p(0) < min(0)) min(0) = p(0);
|
||||
if(p(1) < min(1)) min(1) = p(1);
|
||||
if(p(0) > max(0)) max(0) = p(0);
|
||||
if(p(1) > max(1)) max(1) = p(1);
|
||||
}
|
||||
c(0) = min(0) + (max(0) - min(0)) / 2;
|
||||
c(1) = min(1) + (max(1) - min(1)) / 2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
Points ConcaveHull::calculate_centroids() const
|
||||
{
|
||||
// We get the centroids of all the islands in the 2D slice
|
||||
Points centroids;
|
||||
centroids.reserve(m_polys.size());
|
||||
std::transform(m_polys.begin(), m_polys.end(),
|
||||
std::back_inserter(centroids),
|
||||
[](const Polygon &poly) { return centroid(poly); });
|
||||
|
||||
return centroids;
|
||||
}
|
||||
|
||||
void ConcaveHull::merge_polygons() { m_polys = get_contours(union_ex(m_polys)); }
|
||||
|
||||
void ConcaveHull::add_connector_rectangles(const Points ¢roids,
|
||||
coord_t max_dist,
|
||||
ThrowOnCancel thr)
|
||||
{
|
||||
// Centroid of the centroids of islands. This is where the additional
|
||||
// connector sticks are routed.
|
||||
Point cc = centroid(centroids);
|
||||
|
||||
PointIndex ctrindex;
|
||||
unsigned idx = 0;
|
||||
for(const Point &ct : centroids) ctrindex.insert(to_vec3(ct), idx++);
|
||||
|
||||
m_polys.reserve(m_polys.size() + centroids.size());
|
||||
|
||||
idx = 0;
|
||||
for (const Point &c : centroids) {
|
||||
thr();
|
||||
|
||||
double dx = c.x() - cc.x(), dy = c.y() - cc.y();
|
||||
double l = std::sqrt(dx * dx + dy * dy);
|
||||
double nx = dx / l, ny = dy / l;
|
||||
|
||||
const Point &ct = centroids[idx];
|
||||
|
||||
std::vector<PointIndexEl> result = ctrindex.nearest(to_vec3(ct), 2);
|
||||
|
||||
double dist = max_dist;
|
||||
for (const PointIndexEl &el : result)
|
||||
if (el.second != idx) {
|
||||
dist = Line(to_vec2(el.first), ct).length();
|
||||
break;
|
||||
}
|
||||
|
||||
idx++;
|
||||
|
||||
if (dist >= max_dist) return;
|
||||
|
||||
Polygon r;
|
||||
r.points.reserve(3);
|
||||
r.points.emplace_back(cc);
|
||||
|
||||
Point n(scaled(nx), scaled(ny));
|
||||
r.points.emplace_back(c + Point(n.y(), -n.x()));
|
||||
r.points.emplace_back(c + Point(-n.y(), n.x()));
|
||||
offset(r, scaled<float>(1.));
|
||||
|
||||
m_polys.emplace_back(r);
|
||||
}
|
||||
}
|
||||
|
||||
ConcaveHull::ConcaveHull(const Polygons &polys, double mergedist, ThrowOnCancel thr)
|
||||
{
|
||||
if(polys.empty()) return;
|
||||
|
||||
m_polys = polys;
|
||||
merge_polygons();
|
||||
|
||||
if(m_polys.size() == 1) return;
|
||||
|
||||
Points centroids = calculate_centroids();
|
||||
|
||||
add_connector_rectangles(centroids, scaled(mergedist), thr);
|
||||
|
||||
merge_polygons();
|
||||
}
|
||||
|
||||
ExPolygons ConcaveHull::to_expolygons() const
|
||||
{
|
||||
auto ret = reserve_vector<ExPolygon>(m_polys.size());
|
||||
for (const Polygon &p : m_polys) ret.emplace_back(ExPolygon(p));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ExPolygons offset_waffle_style_ex(const ConcaveHull &hull, coord_t delta)
|
||||
{
|
||||
return to_expolygons(offset_waffle_style(hull, delta));
|
||||
}
|
||||
|
||||
Polygons offset_waffle_style(const ConcaveHull &hull, coord_t delta)
|
||||
{
|
||||
auto arc_tolerance = scaled<double>(0.01);
|
||||
Polygons res = closing(hull.polygons(), 2 * delta, delta, ClipperLib::jtRound, arc_tolerance);
|
||||
|
||||
auto it = std::remove_if(res.begin(), res.end(), [](Polygon &p) { return p.is_clockwise(); });
|
||||
res.erase(it, res.end());
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
53
src/libslic3r/SLA/ConcaveHull.hpp
Normal file
53
src/libslic3r/SLA/ConcaveHull.hpp
Normal file
@@ -0,0 +1,53 @@
|
||||
#ifndef SLA_CONCAVEHULL_HPP
|
||||
#define SLA_CONCAVEHULL_HPP
|
||||
|
||||
#include <libslic3r/ExPolygon.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
namespace sla {
|
||||
|
||||
inline Polygons get_contours(const ExPolygons &poly)
|
||||
{
|
||||
Polygons ret; ret.reserve(poly.size());
|
||||
for (const ExPolygon &p : poly) ret.emplace_back(p.contour);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
using ThrowOnCancel = std::function<void()>;
|
||||
|
||||
/// A fake concave hull that is constructed by connecting separate shapes
|
||||
/// with explicit bridges. Bridges are generated from each shape's centroid
|
||||
/// to the center of the "scene" which is the centroid calculated from the shape
|
||||
/// centroids (a star is created...)
|
||||
class ConcaveHull {
|
||||
Polygons m_polys;
|
||||
|
||||
static Point centroid(const Points& pp);
|
||||
|
||||
static inline Point centroid(const Polygon &poly) { return poly.centroid(); }
|
||||
|
||||
Points calculate_centroids() const;
|
||||
|
||||
void merge_polygons();
|
||||
|
||||
void add_connector_rectangles(const Points ¢roids,
|
||||
coord_t max_dist,
|
||||
ThrowOnCancel thr);
|
||||
public:
|
||||
|
||||
ConcaveHull(const ExPolygons& polys, double merge_dist, ThrowOnCancel thr)
|
||||
: ConcaveHull{to_polygons(polys), merge_dist, thr} {}
|
||||
|
||||
ConcaveHull(const Polygons& polys, double mergedist, ThrowOnCancel thr);
|
||||
|
||||
const Polygons & polygons() const { return m_polys; }
|
||||
|
||||
ExPolygons to_expolygons() const;
|
||||
};
|
||||
|
||||
ExPolygons offset_waffle_style_ex(const ConcaveHull &ccvhull, coord_t delta);
|
||||
Polygons offset_waffle_style(const ConcaveHull &polys, coord_t delta);
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
#endif // CONCAVEHULL_HPP
|
||||
983
src/libslic3r/SLA/DefaultSupportTree.cpp
Normal file
983
src/libslic3r/SLA/DefaultSupportTree.cpp
Normal file
@@ -0,0 +1,983 @@
|
||||
#include "DefaultSupportTree.hpp"
|
||||
|
||||
#include <libslic3r/Optimize/NLoptOptimizer.hpp>
|
||||
#include <libslic3r/SLA/Clustering.hpp>
|
||||
#include <libslic3r/MeshNormals.hpp>
|
||||
#include <libslic3r/Execution/ExecutionTBB.hpp>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
using Slic3r::opt::initvals;
|
||||
using Slic3r::opt::bounds;
|
||||
using Slic3r::opt::StopCriteria;
|
||||
using Slic3r::opt::Optimizer;
|
||||
using Slic3r::opt::AlgNLoptSubplex;
|
||||
using Slic3r::opt::AlgNLoptGenetic;
|
||||
|
||||
DefaultSupportTree::DefaultSupportTree(SupportTreeBuilder & builder,
|
||||
const SupportableMesh &sm)
|
||||
: m_sm(sm)
|
||||
, m_support_nmls(sm.pts.size(), 3)
|
||||
, m_builder(builder)
|
||||
, m_points(sm.pts.size(), 3)
|
||||
, m_thr(builder.ctl().cancelfn)
|
||||
{
|
||||
// Prepare the support points in Eigen/IGL format as well, we will use
|
||||
// it mostly in this form.
|
||||
|
||||
long i = 0;
|
||||
for (const SupportPoint &sp : m_sm.pts) {
|
||||
m_points.row(i).x() = double(sp.pos.x());
|
||||
m_points.row(i).y() = double(sp.pos.y());
|
||||
m_points.row(i).z() = double(sp.pos.z());
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
bool DefaultSupportTree::execute(SupportTreeBuilder &builder,
|
||||
const SupportableMesh &sm)
|
||||
{
|
||||
if(sm.pts.empty()) return false;
|
||||
|
||||
DefaultSupportTree alg(builder, sm);
|
||||
|
||||
// Let's define the individual steps of the processing. We can experiment
|
||||
// later with the ordering and the dependencies between them.
|
||||
enum Steps {
|
||||
BEGIN,
|
||||
PINHEADS,
|
||||
CLASSIFY,
|
||||
ROUTING_GROUND,
|
||||
ROUTING_NONGROUND,
|
||||
CASCADE_PILLARS,
|
||||
MERGE_RESULT,
|
||||
DONE,
|
||||
ABORT,
|
||||
NUM_STEPS
|
||||
//...
|
||||
};
|
||||
|
||||
// Collect the algorithm steps into a nice sequence
|
||||
std::array<std::function<void()>, NUM_STEPS> program = {
|
||||
[] () {
|
||||
// Begin...
|
||||
// Potentially clear up the shared data (not needed for now)
|
||||
},
|
||||
|
||||
std::bind(&DefaultSupportTree::add_pinheads, &alg),
|
||||
|
||||
std::bind(&DefaultSupportTree::classify, &alg),
|
||||
|
||||
std::bind(&DefaultSupportTree::routing_to_ground, &alg),
|
||||
|
||||
std::bind(&DefaultSupportTree::routing_to_model, &alg),
|
||||
|
||||
std::bind(&DefaultSupportTree::interconnect_pillars, &alg),
|
||||
|
||||
std::bind(&DefaultSupportTree::merge_result, &alg),
|
||||
|
||||
[] () {
|
||||
// Done
|
||||
},
|
||||
|
||||
[] () {
|
||||
// Abort
|
||||
}
|
||||
};
|
||||
|
||||
Steps pc = BEGIN;
|
||||
|
||||
if(sm.cfg.ground_facing_only) {
|
||||
program[ROUTING_NONGROUND] = []() {
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< "Skipping model-facing supports as requested.";
|
||||
};
|
||||
}
|
||||
|
||||
// Let's define a simple automaton that will run our program.
|
||||
auto progress = [&builder, &pc] () {
|
||||
static const std::array<std::string, NUM_STEPS> stepstr {
|
||||
"Starting",
|
||||
"Generate pinheads",
|
||||
"Classification",
|
||||
"Routing to ground",
|
||||
"Routing supports to model surface",
|
||||
"Interconnecting pillars",
|
||||
"Merging support mesh",
|
||||
"Done",
|
||||
"Abort"
|
||||
};
|
||||
|
||||
static const std::array<unsigned, NUM_STEPS> stepstate {
|
||||
0,
|
||||
30,
|
||||
50,
|
||||
60,
|
||||
70,
|
||||
80,
|
||||
99,
|
||||
100,
|
||||
0
|
||||
};
|
||||
|
||||
if(builder.ctl().stopcondition()) pc = ABORT;
|
||||
|
||||
switch(pc) {
|
||||
case BEGIN: pc = PINHEADS; break;
|
||||
case PINHEADS: pc = CLASSIFY; break;
|
||||
case CLASSIFY: pc = ROUTING_GROUND; break;
|
||||
case ROUTING_GROUND: pc = ROUTING_NONGROUND; break;
|
||||
case ROUTING_NONGROUND: pc = CASCADE_PILLARS; break;
|
||||
case CASCADE_PILLARS: pc = MERGE_RESULT; break;
|
||||
case MERGE_RESULT: pc = DONE; break;
|
||||
case DONE:
|
||||
case ABORT: break;
|
||||
default: ;
|
||||
}
|
||||
|
||||
builder.ctl().statuscb(stepstate[pc], stepstr[pc]);
|
||||
};
|
||||
|
||||
// Just here we run the computation...
|
||||
while(pc < DONE) {
|
||||
progress();
|
||||
program[pc]();
|
||||
}
|
||||
|
||||
return pc == ABORT;
|
||||
}
|
||||
|
||||
AABBMesh::hit_result DefaultSupportTree::pinhead_mesh_intersect(
|
||||
const Vec3d &s,
|
||||
const Vec3d &dir,
|
||||
double r_pin,
|
||||
double r_back,
|
||||
double width,
|
||||
double sd)
|
||||
{
|
||||
return sla::pinhead_mesh_hit(suptree_ex_policy, m_sm.emesh, s, dir, r_pin, r_back, width, sd);
|
||||
}
|
||||
|
||||
AABBMesh::hit_result DefaultSupportTree::bridge_mesh_intersect(
|
||||
const Vec3d &src, const Vec3d &dir, double r, double sd)
|
||||
{
|
||||
return sla::beam_mesh_hit(suptree_ex_policy, m_sm.emesh, {src, dir, r}, sd);
|
||||
}
|
||||
|
||||
bool DefaultSupportTree::interconnect(const Pillar &pillar,
|
||||
const Pillar &nextpillar)
|
||||
{
|
||||
// We need to get the starting point of the zig-zag pattern. We have to
|
||||
// be aware that the two head junctions are at different heights. We
|
||||
// may start from the lowest junction and call it a day but this
|
||||
// strategy would leave unconnected a lot of pillar duos where the
|
||||
// shorter pillar is too short to start a new bridge but the taller
|
||||
// pillar could still be bridged with the shorter one.
|
||||
bool was_connected = false;
|
||||
|
||||
Vec3d supper = pillar.startpoint();
|
||||
Vec3d slower = nextpillar.startpoint();
|
||||
Vec3d eupper = pillar.endpoint();
|
||||
Vec3d elower = nextpillar.endpoint();
|
||||
|
||||
double zmin = ground_level(m_sm) + m_sm.cfg.base_height_mm;
|
||||
eupper.z() = std::max(eupper.z(), zmin);
|
||||
elower.z() = std::max(elower.z(), zmin);
|
||||
|
||||
// The usable length of both pillars should be positive
|
||||
if(slower.z() - elower.z() < 0) return false;
|
||||
if(supper.z() - eupper.z() < 0) return false;
|
||||
|
||||
double pillar_dist = distance(Vec2d{slower.x(), slower.y()},
|
||||
Vec2d{supper.x(), supper.y()});
|
||||
double bridge_distance = pillar_dist / std::cos(-m_sm.cfg.bridge_slope);
|
||||
double zstep = pillar_dist * std::tan(-m_sm.cfg.bridge_slope);
|
||||
|
||||
if(pillar_dist < 2 * m_sm.cfg.head_back_radius_mm ||
|
||||
pillar_dist > m_sm.cfg.max_pillar_link_distance_mm) return false;
|
||||
|
||||
if(supper.z() < slower.z()) supper.swap(slower);
|
||||
if(eupper.z() < elower.z()) eupper.swap(elower);
|
||||
|
||||
double startz = 0, endz = 0;
|
||||
|
||||
startz = slower.z() - zstep < supper.z() ? slower.z() - zstep : slower.z();
|
||||
endz = eupper.z() + zstep > elower.z() ? eupper.z() + zstep : eupper.z();
|
||||
|
||||
if(slower.z() - eupper.z() < std::abs(zstep)) {
|
||||
// no space for even one cross
|
||||
|
||||
// Get max available space
|
||||
startz = std::min(supper.z(), slower.z() - zstep);
|
||||
endz = std::max(eupper.z() + zstep, elower.z());
|
||||
|
||||
// Align to center
|
||||
double available_dist = (startz - endz);
|
||||
double rounds = std::floor(available_dist / std::abs(zstep));
|
||||
startz -= 0.5 * (available_dist - rounds * std::abs(zstep));
|
||||
}
|
||||
|
||||
auto pcm = m_sm.cfg.pillar_connection_mode;
|
||||
bool docrosses =
|
||||
pcm == PillarConnectionMode::cross ||
|
||||
(pcm == PillarConnectionMode::dynamic &&
|
||||
pillar_dist > 2*m_sm.cfg.base_radius_mm);
|
||||
|
||||
// 'sj' means starting junction, 'ej' is the end junction of a bridge.
|
||||
// They will be swapped in every iteration thus the zig-zag pattern.
|
||||
// According to a config parameter, a second bridge may be added which
|
||||
// results in a cross connection between the pillars.
|
||||
Vec3d sj = supper, ej = slower; sj.z() = startz; ej.z() = sj.z() + zstep;
|
||||
|
||||
// TODO: This is a workaround to not have a faulty last bridge
|
||||
while(ej.z() >= eupper.z() /*endz*/) {
|
||||
if(bridge_mesh_distance(sj, dirv(sj, ej), pillar.r_start) >= bridge_distance)
|
||||
{
|
||||
m_builder.add_crossbridge(sj, ej, pillar.r_start);
|
||||
was_connected = true;
|
||||
}
|
||||
|
||||
// double bridging: (crosses)
|
||||
if(docrosses) {
|
||||
Vec3d sjback(ej.x(), ej.y(), sj.z());
|
||||
Vec3d ejback(sj.x(), sj.y(), ej.z());
|
||||
if (sjback.z() <= slower.z() && ejback.z() >= eupper.z() &&
|
||||
bridge_mesh_distance(sjback, dirv(sjback, ejback),
|
||||
pillar.r_start) >= bridge_distance) {
|
||||
// need to check collision for the cross stick
|
||||
m_builder.add_crossbridge(sjback, ejback, pillar.r_start);
|
||||
was_connected = true;
|
||||
}
|
||||
}
|
||||
|
||||
sj.swap(ej);
|
||||
ej.z() = sj.z() + zstep;
|
||||
}
|
||||
|
||||
return was_connected;
|
||||
}
|
||||
|
||||
bool DefaultSupportTree::connect_to_nearpillar(const Head &head,
|
||||
long nearpillar_id)
|
||||
{
|
||||
auto nearpillar = [this, nearpillar_id]() -> const Pillar& {
|
||||
return m_builder.pillar(nearpillar_id);
|
||||
};
|
||||
|
||||
if (m_builder.bridgecount(nearpillar()) > m_sm.cfg.max_bridges_on_pillar)
|
||||
return false;
|
||||
|
||||
Vec3d headjp = head.junction_point();
|
||||
Vec3d nearjp_u = nearpillar().startpoint();
|
||||
Vec3d nearjp_l = nearpillar().endpoint();
|
||||
|
||||
double r = head.r_back_mm;
|
||||
double d2d = distance(to_2d(headjp), to_2d(nearjp_u));
|
||||
double d3d = distance(headjp, nearjp_u);
|
||||
|
||||
double hdiff = nearjp_u.z() - headjp.z();
|
||||
double slope = std::atan2(hdiff, d2d);
|
||||
|
||||
Vec3d bridgestart = headjp;
|
||||
Vec3d bridgeend = nearjp_u;
|
||||
double max_len = r * m_sm.cfg.max_bridge_length_mm / m_sm.cfg.head_back_radius_mm;
|
||||
double max_slope = m_sm.cfg.bridge_slope;
|
||||
double zdiff = 0.0;
|
||||
|
||||
// check the default situation if feasible for a bridge
|
||||
if(d3d > max_len || slope > -max_slope) {
|
||||
// not feasible to connect the two head junctions. We have to search
|
||||
// for a suitable touch point.
|
||||
|
||||
double Zdown = headjp.z() + d2d * std::tan(-max_slope);
|
||||
Vec3d touchjp = bridgeend; touchjp.z() = Zdown;
|
||||
double D = distance(headjp, touchjp);
|
||||
zdiff = Zdown - nearjp_u.z();
|
||||
|
||||
if(zdiff > 0) {
|
||||
Zdown -= zdiff;
|
||||
bridgestart.z() -= zdiff;
|
||||
touchjp.z() = Zdown;
|
||||
|
||||
double t = bridge_mesh_distance(headjp, DOWN, r);
|
||||
|
||||
// We can't insert a pillar under the source head to connect
|
||||
// with the nearby pillar's starting junction
|
||||
if(t < zdiff) return false;
|
||||
}
|
||||
|
||||
if(Zdown <= nearjp_u.z() && Zdown >= nearjp_l.z() && D < max_len)
|
||||
bridgeend.z() = Zdown;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
// There will be a minimum distance from the ground where the
|
||||
// bridge is allowed to connect. This is an empiric value.
|
||||
double minz = ground_level(m_sm) + 4 * head.r_back_mm;
|
||||
if(bridgeend.z() < minz) return false;
|
||||
|
||||
double t = bridge_mesh_distance(bridgestart, dirv(bridgestart, bridgeend), r);
|
||||
|
||||
// Cannot insert the bridge. (further search might not worth the hassle)
|
||||
if(t < distance(bridgestart, bridgeend)) return false;
|
||||
|
||||
std::lock_guard lk(m_bridge_mutex);
|
||||
|
||||
if (m_builder.bridgecount(nearpillar()) < m_sm.cfg.max_bridges_on_pillar) {
|
||||
// A partial pillar is needed under the starting head.
|
||||
if(zdiff > 0) {
|
||||
m_builder.add_pillar(head.id, headjp.z() - bridgestart.z());
|
||||
m_builder.add_junction(bridgestart, r);
|
||||
m_builder.add_bridge(bridgestart, bridgeend, r);
|
||||
} else {
|
||||
m_builder.add_bridge(head.id, bridgeend);
|
||||
}
|
||||
|
||||
m_builder.increment_bridges(nearpillar());
|
||||
} else return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DefaultSupportTree::create_ground_pillar(const Junction &hjp,
|
||||
const Vec3d &sourcedir,
|
||||
long head_id)
|
||||
{
|
||||
auto [ret, pillar_id] = sla::create_ground_pillar(suptree_ex_policy,
|
||||
m_builder,
|
||||
m_sm,
|
||||
hjp.pos,
|
||||
sourcedir,
|
||||
hjp.r,
|
||||
hjp.r,
|
||||
head_id);
|
||||
|
||||
if (pillar_id >= 0) // Save the pillar endpoint in the spatial index
|
||||
m_pillar_index.guarded_insert(m_builder.pillar(pillar_id).endpt,
|
||||
unsigned(pillar_id));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void DefaultSupportTree::add_pinheads()
|
||||
{
|
||||
// The minimum distance for two support points to remain valid.
|
||||
const double /*constexpr*/ D_SP = 0.1;
|
||||
|
||||
// Get the points that are too close to each other and keep only the
|
||||
// first one
|
||||
auto aliases = cluster(m_points, D_SP, 2);
|
||||
|
||||
PtIndices filtered_indices;
|
||||
filtered_indices.reserve(aliases.size());
|
||||
m_iheads.reserve(aliases.size());
|
||||
for(auto& a : aliases) {
|
||||
// Here we keep only the front point of the cluster.
|
||||
filtered_indices.emplace_back(a.front());
|
||||
}
|
||||
|
||||
// calculate the normals to the triangles for filtered points
|
||||
auto nmls = normals(suptree_ex_policy, m_points, m_sm.emesh,
|
||||
m_sm.cfg.head_front_radius_mm, m_thr,
|
||||
filtered_indices);
|
||||
|
||||
// Not all of the support points have to be a valid position for
|
||||
// support creation. The angle may be inappropriate or there may
|
||||
// not be enough space for the pinhead. Filtering is applied for
|
||||
// these reasons.
|
||||
|
||||
auto heads = reserve_vector<Head>(m_sm.pts.size());
|
||||
for (const SupportPoint &sp : m_sm.pts) {
|
||||
m_thr();
|
||||
heads.emplace_back(
|
||||
NaNd,
|
||||
sp.head_front_radius,
|
||||
0.,
|
||||
m_sm.cfg.head_penetration_mm,
|
||||
Vec3d::Zero(), // dir
|
||||
sp.pos.cast<double>() // displacement
|
||||
);
|
||||
}
|
||||
|
||||
std::function<void(unsigned, size_t, double)> filterfn;
|
||||
filterfn = [this, &nmls, &heads, &filterfn](unsigned fidx, size_t i, double back_r) {
|
||||
m_thr();
|
||||
|
||||
Vec3d n = nmls.row(Eigen::Index(i));
|
||||
|
||||
// for all normals we generate the spherical coordinates and
|
||||
// saturate the polar angle to 45 degrees from the bottom then
|
||||
// convert back to standard coordinates to get the new normal.
|
||||
// Then we just create a quaternion from the two normals
|
||||
// (Quaternion::FromTwoVectors) and apply the rotation to the
|
||||
// arrow head.
|
||||
|
||||
auto [polar, azimuth] = dir_to_spheric(n);
|
||||
|
||||
// skip if the tilt is not sane
|
||||
if (polar < PI - m_sm.cfg.normal_cutoff_angle) return;
|
||||
|
||||
// We saturate the polar angle to 3pi/4
|
||||
polar = std::max(polar, PI - m_sm.cfg.bridge_slope);
|
||||
|
||||
// save the head (pinpoint) position
|
||||
Vec3d hp = m_points.row(fidx);
|
||||
|
||||
double lmin = m_sm.cfg.head_width_mm, lmax = lmin;
|
||||
|
||||
if (back_r < m_sm.cfg.head_back_radius_mm) {
|
||||
lmin = 0., lmax = m_sm.cfg.head_penetration_mm;
|
||||
}
|
||||
|
||||
// The distance needed for a pinhead to not collide with model.
|
||||
double w = lmin + 2 * back_r + 2 * m_sm.cfg.head_front_radius_mm -
|
||||
m_sm.cfg.head_penetration_mm;
|
||||
|
||||
double pin_r = double(m_sm.pts[fidx].head_front_radius);
|
||||
|
||||
// Reassemble the now corrected normal
|
||||
auto nn = spheric_to_dir(polar, azimuth).normalized();
|
||||
|
||||
// check available distance
|
||||
AABBMesh::hit_result t = pinhead_mesh_intersect(hp, nn, pin_r, back_r, w);
|
||||
|
||||
if (t.distance() < w) {
|
||||
// Let's try to optimize this angle, there might be a
|
||||
// viable normal that doesn't collide with the model
|
||||
// geometry and its very close to the default.
|
||||
|
||||
Optimizer<AlgNLoptGenetic> solver(get_criteria(m_sm.cfg));
|
||||
solver.seed(0); // we want deterministic behavior
|
||||
|
||||
auto oresult = solver.to_max().optimize(
|
||||
[this, pin_r, back_r, hp](const opt::Input<3> &input)
|
||||
{
|
||||
auto &[plr, azm, l] = input;
|
||||
|
||||
auto dir = spheric_to_dir(plr, azm).normalized();
|
||||
|
||||
return pinhead_mesh_intersect(
|
||||
hp, dir, pin_r, back_r, l).distance();
|
||||
},
|
||||
initvals({polar, azimuth, (lmin + lmax) / 2.}), // start with what we have
|
||||
bounds({
|
||||
{PI - m_sm.cfg.bridge_slope, PI}, // Must not exceed the slope limit
|
||||
{-PI, PI}, // azimuth can be a full search
|
||||
{lmin, lmax}
|
||||
}));
|
||||
|
||||
if(oresult.score > w) {
|
||||
polar = std::get<0>(oresult.optimum);
|
||||
azimuth = std::get<1>(oresult.optimum);
|
||||
nn = spheric_to_dir(polar, azimuth).normalized();
|
||||
lmin = std::get<2>(oresult.optimum);
|
||||
t = AABBMesh::hit_result(oresult.score);
|
||||
}
|
||||
}
|
||||
|
||||
if (t.distance() > w && hp.z() + w * nn.z() >= ground_level(m_sm)) {
|
||||
Head &h = heads[fidx];
|
||||
h.id = fidx;
|
||||
h.dir = nn;
|
||||
h.width_mm = lmin;
|
||||
h.r_back_mm = back_r;
|
||||
} else if (back_r > m_sm.cfg.head_fallback_radius_mm) {
|
||||
filterfn(fidx, i, m_sm.cfg.head_fallback_radius_mm);
|
||||
}
|
||||
};
|
||||
|
||||
execution::for_each(
|
||||
suptree_ex_policy, size_t(0), filtered_indices.size(),
|
||||
[this, &filterfn, &filtered_indices](size_t i) {
|
||||
filterfn(filtered_indices[i], i, m_sm.cfg.head_back_radius_mm);
|
||||
},
|
||||
execution::max_concurrency(suptree_ex_policy));
|
||||
|
||||
for (size_t i = 0; i < heads.size(); ++i)
|
||||
if (heads[i].is_valid()) {
|
||||
m_builder.add_head(i, heads[i]);
|
||||
m_iheads.emplace_back(i);
|
||||
}
|
||||
|
||||
m_thr();
|
||||
}
|
||||
|
||||
void DefaultSupportTree::classify()
|
||||
{
|
||||
// We should first get the heads that reach the ground directly
|
||||
PtIndices ground_head_indices;
|
||||
ground_head_indices.reserve(m_iheads.size());
|
||||
m_iheads_onmodel.reserve(m_iheads.size());
|
||||
|
||||
// First we decide which heads reach the ground and can be full
|
||||
// pillars and which shall be connected to the model surface (or
|
||||
// search a suitable path around the surface that leads to the
|
||||
// ground -- TODO)
|
||||
for(unsigned i : m_iheads) {
|
||||
m_thr();
|
||||
|
||||
Head &head = m_builder.head(i);
|
||||
double r = head.r_back_mm;
|
||||
Vec3d headjp = head.junction_point();
|
||||
|
||||
// collision check
|
||||
auto hit = bridge_mesh_intersect(headjp, DOWN, r);
|
||||
|
||||
if(std::isinf(hit.distance())) ground_head_indices.emplace_back(i);
|
||||
else if(m_sm.cfg.ground_facing_only) head.invalidate();
|
||||
else m_iheads_onmodel.emplace_back(i);
|
||||
|
||||
m_head_to_ground_scans[i] = hit;
|
||||
}
|
||||
|
||||
// We want to search for clusters of points that are far enough
|
||||
// from each other in the XY plane to not cross their pillar bases
|
||||
// These clusters of support points will join in one pillar,
|
||||
// possibly in their centroid support point.
|
||||
|
||||
auto pointfn = [this](unsigned i) {
|
||||
return m_builder.head(i).junction_point();
|
||||
};
|
||||
|
||||
auto predicate = [this](const PointIndexEl &e1,
|
||||
const PointIndexEl &e2) {
|
||||
double d2d = distance(to_2d(e1.first), to_2d(e2.first));
|
||||
double d3d = distance(e1.first, e2.first);
|
||||
return d2d < 2 * m_sm.cfg.base_radius_mm
|
||||
&& d3d < m_sm.cfg.max_bridge_length_mm;
|
||||
};
|
||||
|
||||
m_pillar_clusters = cluster(ground_head_indices, pointfn, predicate,
|
||||
m_sm.cfg.max_bridges_on_pillar);
|
||||
}
|
||||
|
||||
void DefaultSupportTree::routing_to_ground()
|
||||
{
|
||||
ClusterEl cl_centroids;
|
||||
cl_centroids.reserve(m_pillar_clusters.size());
|
||||
|
||||
for (auto &cl : m_pillar_clusters) {
|
||||
m_thr();
|
||||
|
||||
// place all the centroid head positions into the index. We
|
||||
// will query for alternative pillar positions. If a sidehead
|
||||
// cannot connect to the cluster centroid, we have to search
|
||||
// for another head with a full pillar. Also when there are two
|
||||
// elements in the cluster, the centroid is arbitrary and the
|
||||
// sidehead is allowed to connect to a nearby pillar to
|
||||
// increase structural stability.
|
||||
|
||||
if (cl.empty()) continue;
|
||||
|
||||
// get the current cluster centroid
|
||||
auto & thr = m_thr;
|
||||
const auto &points = m_points;
|
||||
|
||||
long lcid = cluster_centroid(
|
||||
cl, [&points](size_t idx) { return points.row(long(idx)); },
|
||||
[thr](const Vec3d &p1, const Vec3d &p2) {
|
||||
thr();
|
||||
return distance(Vec2d(p1.x(), p1.y()), Vec2d(p2.x(), p2.y()));
|
||||
});
|
||||
|
||||
assert(lcid >= 0);
|
||||
unsigned hid = cl[size_t(lcid)]; // Head ID
|
||||
|
||||
cl_centroids.emplace_back(hid);
|
||||
|
||||
Head &h = m_builder.head(hid);
|
||||
|
||||
if (!create_ground_pillar(h.junction(), h.dir, h.id)) {
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< "Pillar cannot be created for support point id: " << hid;
|
||||
m_iheads_onmodel.emplace_back(h.id);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// now we will go through the clusters ones again and connect the
|
||||
// sidepoints with the cluster centroid (which is a ground pillar)
|
||||
// or a nearby pillar if the centroid is unreachable.
|
||||
size_t ci = 0;
|
||||
for (const std::vector<unsigned> &cl : m_pillar_clusters) {
|
||||
m_thr();
|
||||
|
||||
auto cidx = cl_centroids[ci++];
|
||||
|
||||
auto q = m_pillar_index.query(m_builder.head(cidx).junction_point(), 1);
|
||||
if (!q.empty()) {
|
||||
long centerpillarID = q.front().second;
|
||||
for (auto c : cl) {
|
||||
m_thr();
|
||||
if (c == cidx) continue;
|
||||
|
||||
auto &sidehead = m_builder.head(c);
|
||||
|
||||
if (!connect_to_nearpillar(sidehead, centerpillarID) &&
|
||||
!search_pillar_and_connect(sidehead)) {
|
||||
// Vec3d pend = Vec3d{pstart.x(), pstart.y(), gndlvl};
|
||||
// Could not find a pillar, create one
|
||||
create_ground_pillar(sidehead.junction(), sidehead.dir, sidehead.id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool DefaultSupportTree::connect_to_ground(Head &head)
|
||||
{
|
||||
auto [ret, pillar_id] = sla::search_ground_route(suptree_ex_policy,
|
||||
m_builder, m_sm,
|
||||
{head.junction_point(),
|
||||
head.r_back_mm},
|
||||
head.r_back_mm,
|
||||
head.dir);
|
||||
|
||||
if (pillar_id >= 0) {
|
||||
// Save the pillar endpoint in the spatial index
|
||||
m_pillar_index.guarded_insert(m_builder.pillar(pillar_id).endpt,
|
||||
unsigned(pillar_id));
|
||||
|
||||
head.pillar_id = pillar_id;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool DefaultSupportTree::connect_to_model_body(Head &head)
|
||||
{
|
||||
if (head.id <= SupportTreeNode::ID_UNSET) return false;
|
||||
|
||||
auto it = m_head_to_ground_scans.find(unsigned(head.id));
|
||||
if (it == m_head_to_ground_scans.end()) return false;
|
||||
|
||||
auto &hit = it->second;
|
||||
|
||||
if (!hit.is_hit()) {
|
||||
// TODO scan for potential anchor points on model surface
|
||||
return false;
|
||||
}
|
||||
|
||||
Vec3d hjp = head.junction_point();
|
||||
double zangle = std::asin(hit.direction().z());
|
||||
zangle = std::max(zangle, PI/4);
|
||||
double h = std::sin(zangle) * head.fullwidth();
|
||||
|
||||
// The width of the tail head that we would like to have...
|
||||
h = std::min(hit.distance() - head.r_back_mm, h);
|
||||
|
||||
// If this is a mini pillar dont bother with the tail width, can be 0.
|
||||
if (head.r_back_mm < m_sm.cfg.head_back_radius_mm) h = std::max(h, 0.);
|
||||
else if (h <= 0.) return false;
|
||||
|
||||
Vec3d endp{hjp.x(), hjp.y(), hjp.z() - hit.distance() + h};
|
||||
auto center_hit = m_sm.emesh.query_ray_hit(hjp, DOWN);
|
||||
|
||||
double hitdiff = center_hit.distance() - hit.distance();
|
||||
Vec3d hitp = std::abs(hitdiff) < 2*head.r_back_mm?
|
||||
center_hit.position() : hit.position();
|
||||
|
||||
long pillar_id = m_builder.add_pillar(head.id, hjp.z() - endp.z());
|
||||
Pillar &pill = m_builder.pillar(pillar_id);
|
||||
|
||||
Vec3d taildir = endp - hitp;
|
||||
double dist = (hitp - endp).norm() + m_sm.cfg.head_penetration_mm;
|
||||
double w = dist - 2 * head.r_pin_mm - head.r_back_mm;
|
||||
|
||||
if (w < 0.) {
|
||||
BOOST_LOG_TRIVIAL(warning) << "Pinhead width is negative!";
|
||||
w = 0.;
|
||||
}
|
||||
|
||||
m_builder.add_anchor(head.r_back_mm, head.r_pin_mm, w,
|
||||
m_sm.cfg.head_penetration_mm, taildir, hitp);
|
||||
|
||||
m_pillar_index.guarded_insert(pill.endpoint(), pill.id);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DefaultSupportTree::search_pillar_and_connect(const Head &source)
|
||||
{
|
||||
// Hope that a local copy takes less time than the whole search loop.
|
||||
// We also need to remove elements progressively from the copied index.
|
||||
PointIndex spindex = m_pillar_index.guarded_clone();
|
||||
|
||||
long nearest_id = SupportTreeNode::ID_UNSET;
|
||||
|
||||
Vec3d querypt = source.junction_point();
|
||||
|
||||
while(nearest_id < 0 && !spindex.empty()) { m_thr();
|
||||
// loop until a suitable head is not found
|
||||
// if there is a pillar closer than the cluster center
|
||||
// (this may happen as the clustering is not perfect)
|
||||
// than we will bridge to this closer pillar
|
||||
|
||||
Vec3d qp(querypt.x(), querypt.y(), ground_level(m_sm));
|
||||
auto qres = spindex.nearest(qp, 1);
|
||||
if(qres.empty()) break;
|
||||
|
||||
auto ne = qres.front();
|
||||
nearest_id = ne.second;
|
||||
|
||||
if(nearest_id >= 0) {
|
||||
if (size_t(nearest_id) < m_builder.pillarcount()) {
|
||||
if(!connect_to_nearpillar(source, nearest_id) ||
|
||||
m_builder.pillar(nearest_id).r_start < source.r_back_mm) {
|
||||
nearest_id = SupportTreeNode::ID_UNSET; // continue searching
|
||||
spindex.remove(ne); // without the current pillar
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nearest_id >= 0;
|
||||
}
|
||||
|
||||
void DefaultSupportTree::routing_to_model()
|
||||
{
|
||||
// We need to check if there is an easy way out to the bed surface.
|
||||
// If it can be routed there with a bridge shorter than
|
||||
// min_bridge_distance.
|
||||
|
||||
execution::for_each(
|
||||
suptree_ex_policy, m_iheads_onmodel.begin(), m_iheads_onmodel.end(),
|
||||
[this](const unsigned idx) {
|
||||
m_thr();
|
||||
|
||||
auto &head = m_builder.head(idx);
|
||||
|
||||
// Search nearby pillar
|
||||
if (search_pillar_and_connect(head)) { return; }
|
||||
|
||||
// Cannot connect to nearby pillar. We will try to search for
|
||||
// a route to the ground.
|
||||
if (connect_to_ground(head)) { return; }
|
||||
|
||||
// No route to the ground, so connect to the model body as a last resort
|
||||
if (connect_to_model_body(head)) { return; }
|
||||
|
||||
// We have failed to route this head.
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< "Failed to route model facing support point. ID: " << idx;
|
||||
|
||||
head.invalidate();
|
||||
},
|
||||
execution::max_concurrency(suptree_ex_policy));
|
||||
}
|
||||
|
||||
void DefaultSupportTree::interconnect_pillars()
|
||||
{
|
||||
// Now comes the algorithm that connects pillars with each other.
|
||||
// Ideally every pillar should be connected with at least one of its
|
||||
// neighbors if that neighbor is within max_pillar_link_distance
|
||||
|
||||
// Pillars with height exceeding H1 will require at least one neighbor
|
||||
// to connect with. Height exceeding H2 require two neighbors.
|
||||
double H1 = m_sm.cfg.max_solo_pillar_height_mm;
|
||||
double H2 = m_sm.cfg.max_dual_pillar_height_mm;
|
||||
double d = m_sm.cfg.max_pillar_link_distance_mm;
|
||||
|
||||
// A connection between two pillars only counts if the height ratio is
|
||||
// bigger than 50%
|
||||
double min_height_ratio = 0.5;
|
||||
|
||||
std::set<unsigned long> pairs;
|
||||
|
||||
// A function to connect one pillar with its neighbors. THe number of
|
||||
// neighbors is given in the configuration. This function if called
|
||||
// for every pillar in the pillar index. A pair of pillar will not
|
||||
// be connected multiple times this is ensured by the 'pairs' set which
|
||||
// remembers the processed pillar pairs
|
||||
auto cascadefn =
|
||||
[this, d, &pairs, min_height_ratio, H1] (const PointIndexEl& el)
|
||||
{
|
||||
Vec3d qp = el.first; // endpoint of the pillar
|
||||
|
||||
const Pillar& pillar = m_builder.pillar(el.second); // actual pillar
|
||||
|
||||
// Get the max number of neighbors a pillar should connect to
|
||||
unsigned neighbors = m_sm.cfg.pillar_cascade_neighbors;
|
||||
|
||||
// connections are already enough for the pillar
|
||||
if(pillar.links >= neighbors) return;
|
||||
|
||||
double max_d = d * pillar.r_start / m_sm.cfg.head_back_radius_mm;
|
||||
// Query all remaining points within reach
|
||||
auto qres = m_pillar_index.query([qp, max_d](const PointIndexEl& e){
|
||||
return distance(e.first, qp) < max_d;
|
||||
});
|
||||
|
||||
// sort the result by distance (have to check if this is needed)
|
||||
std::sort(qres.begin(), qres.end(),
|
||||
[qp](const PointIndexEl& e1, const PointIndexEl& e2){
|
||||
return distance(e1.first, qp) < distance(e2.first, qp);
|
||||
});
|
||||
|
||||
for(auto& re : qres) { // process the queried neighbors
|
||||
|
||||
if(re.second == el.second) continue; // Skip self
|
||||
|
||||
auto a = el.second, b = re.second;
|
||||
|
||||
// Get unique hash for the given pair (order doesn't matter)
|
||||
auto hashval = pairhash(a, b);
|
||||
|
||||
// Search for the pair amongst the remembered pairs
|
||||
if(pairs.find(hashval) != pairs.end()) continue;
|
||||
|
||||
const Pillar& neighborpillar = m_builder.pillar(re.second);
|
||||
|
||||
// this neighbor is occupied, skip
|
||||
if (neighborpillar.links >= neighbors) continue;
|
||||
if (neighborpillar.r_start < pillar.r_start) continue;
|
||||
|
||||
if(interconnect(pillar, neighborpillar)) {
|
||||
pairs.insert(hashval);
|
||||
|
||||
// If the interconnection length between the two pillars is
|
||||
// less than 50% of the longer pillar's height, don't count
|
||||
if(pillar.height < H1 ||
|
||||
neighborpillar.height / pillar.height > min_height_ratio)
|
||||
m_builder.increment_links(pillar);
|
||||
|
||||
if(neighborpillar.height < H1 ||
|
||||
pillar.height / neighborpillar.height > min_height_ratio)
|
||||
m_builder.increment_links(neighborpillar);
|
||||
|
||||
}
|
||||
|
||||
// connections are enough for one pillar
|
||||
if(pillar.links >= neighbors) break;
|
||||
}
|
||||
};
|
||||
|
||||
// Run the cascade for the pillars in the index
|
||||
m_pillar_index.foreach(cascadefn);
|
||||
|
||||
// We would be done here if we could allow some pillars to not be
|
||||
// connected with any neighbors. But this might leave the support tree
|
||||
// unprintable.
|
||||
//
|
||||
// The current solution is to insert additional pillars next to these
|
||||
// lonely pillars. One or even two additional pillar might get inserted
|
||||
// depending on the length of the lonely pillar.
|
||||
|
||||
size_t pillarcount = m_builder.pillarcount();
|
||||
|
||||
// Again, go through all pillars, this time in the whole support tree
|
||||
// not just the index.
|
||||
for(size_t pid = 0; pid < pillarcount; pid++) {
|
||||
auto pillar = [this, pid]() { return m_builder.pillar(pid); };
|
||||
|
||||
// Decide how many additional pillars will be needed:
|
||||
|
||||
unsigned needpillars = 0;
|
||||
if (pillar().bridges > m_sm.cfg.max_bridges_on_pillar)
|
||||
needpillars = 3;
|
||||
else if (pillar().links < 2 && pillar().height > H2) {
|
||||
// Not enough neighbors to support this pillar
|
||||
needpillars = 2;
|
||||
} else if (pillar().links < 1 && pillar().height > H1) {
|
||||
// No neighbors could be found and the pillar is too long.
|
||||
needpillars = 1;
|
||||
}
|
||||
|
||||
needpillars = std::max(pillar().links, needpillars) - pillar().links;
|
||||
if (needpillars == 0) continue;
|
||||
|
||||
// Search for new pillar locations:
|
||||
|
||||
bool found = false;
|
||||
double alpha = 0; // goes to 2Pi
|
||||
double r = 2 * m_sm.cfg.base_radius_mm;
|
||||
Vec3d pillarsp = pillar().startpoint();
|
||||
|
||||
// temp value for starting point detection
|
||||
Vec3d sp(pillarsp.x(), pillarsp.y(), pillarsp.z() - r);
|
||||
|
||||
// A vector of bool for placement feasbility
|
||||
std::vector<bool> canplace(needpillars, false);
|
||||
std::vector<Vec3d> spts(needpillars); // vector of starting points
|
||||
|
||||
double gnd = ground_level(m_sm);
|
||||
double min_dist = m_sm.cfg.pillar_base_safety_distance_mm +
|
||||
m_sm.cfg.base_radius_mm + EPSILON;
|
||||
|
||||
while(!found && alpha < 2*PI) {
|
||||
for (unsigned n = 0;
|
||||
n < needpillars && (!n || canplace[n - 1]);
|
||||
n++)
|
||||
{
|
||||
double a = alpha + n * PI / 3;
|
||||
Vec3d s = sp;
|
||||
s.x() += std::cos(a) * r;
|
||||
s.y() += std::sin(a) * r;
|
||||
spts[n] = s;
|
||||
|
||||
// Check the path vertically down
|
||||
Vec3d check_from = s + Vec3d{0., 0., pillar().r_start};
|
||||
auto hr = bridge_mesh_intersect(check_from, DOWN, pillar().r_start);
|
||||
Vec3d gndsp{s.x(), s.y(), gnd};
|
||||
|
||||
// If the path is clear, check for pillar base collisions
|
||||
canplace[n] = std::isinf(hr.distance()) &&
|
||||
std::sqrt(m_sm.emesh.squared_distance(gndsp)) >
|
||||
min_dist;
|
||||
}
|
||||
|
||||
found = std::all_of(canplace.begin(), canplace.end(),
|
||||
[](bool v) { return v; });
|
||||
|
||||
// 20 angles will be tried...
|
||||
alpha += 0.1 * PI;
|
||||
}
|
||||
|
||||
std::vector<long> newpills;
|
||||
newpills.reserve(needpillars);
|
||||
|
||||
if (found)
|
||||
for (unsigned n = 0; n < needpillars; n++) {
|
||||
Vec3d s = spts[n];
|
||||
Pillar p(Vec3d{s.x(), s.y(), gnd}, s.z() - gnd, pillar().r_start);
|
||||
|
||||
if (interconnect(pillar(), p)) {
|
||||
Pillar &pp = m_builder.pillar(m_builder.add_pillar(p));
|
||||
|
||||
add_pillar_base(pp.id);
|
||||
|
||||
m_pillar_index.insert(pp.endpoint(), unsigned(pp.id));
|
||||
|
||||
m_builder.add_junction(s, pillar().r_start);
|
||||
double t = bridge_mesh_distance(pillarsp, dirv(pillarsp, s),
|
||||
pillar().r_start);
|
||||
if (distance(pillarsp, s) < t)
|
||||
m_builder.add_bridge(pillarsp, s, pillar().r_start);
|
||||
|
||||
if (pillar().endpoint().z() > ground_level(m_sm) + pillar().r_start)
|
||||
m_builder.add_junction(pillar().endpoint(), pillar().r_start);
|
||||
|
||||
newpills.emplace_back(pp.id);
|
||||
m_builder.increment_links(pillar());
|
||||
m_builder.increment_links(pp);
|
||||
}
|
||||
}
|
||||
|
||||
if(!newpills.empty()) {
|
||||
for(auto it = newpills.begin(), nx = std::next(it);
|
||||
nx != newpills.end(); ++it, ++nx) {
|
||||
const Pillar& itpll = m_builder.pillar(*it);
|
||||
const Pillar& nxpll = m_builder.pillar(*nx);
|
||||
if(interconnect(itpll, nxpll)) {
|
||||
m_builder.increment_links(itpll);
|
||||
m_builder.increment_links(nxpll);
|
||||
}
|
||||
}
|
||||
|
||||
m_pillar_index.foreach(cascadefn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
245
src/libslic3r/SLA/DefaultSupportTree.hpp
Normal file
245
src/libslic3r/SLA/DefaultSupportTree.hpp
Normal file
@@ -0,0 +1,245 @@
|
||||
#ifndef LEGACYSUPPORTTREE_HPP
|
||||
#define LEGACYSUPPORTTREE_HPP
|
||||
|
||||
#include "SupportTreeUtilsLegacy.hpp"
|
||||
|
||||
#include <libslic3r/SLA/SpatIndex.hpp>
|
||||
#include <libslic3r/Execution/ExecutionTBB.hpp>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
inline constexpr const auto &suptree_ex_policy = ex_tbb;
|
||||
|
||||
class PillarIndex {
|
||||
PointIndex m_index;
|
||||
using Mutex = execution::BlockingMutex<decltype(suptree_ex_policy)>;
|
||||
mutable Mutex m_mutex;
|
||||
|
||||
public:
|
||||
|
||||
template<class...Args> inline void guarded_insert(Args&&...args)
|
||||
{
|
||||
std::lock_guard<Mutex> lck(m_mutex);
|
||||
m_index.insert(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<class...Args>
|
||||
inline std::vector<PointIndexEl> guarded_query(Args&&...args) const
|
||||
{
|
||||
std::lock_guard<Mutex> lck(m_mutex);
|
||||
return m_index.query(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<class...Args> inline void insert(Args&&...args)
|
||||
{
|
||||
m_index.insert(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<class...Args>
|
||||
inline std::vector<PointIndexEl> query(Args&&...args) const
|
||||
{
|
||||
return m_index.query(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<class Fn> inline void foreach(Fn fn) { m_index.foreach(fn); }
|
||||
template<class Fn> inline void guarded_foreach(Fn fn)
|
||||
{
|
||||
std::lock_guard<Mutex> lck(m_mutex);
|
||||
m_index.foreach(fn);
|
||||
}
|
||||
|
||||
PointIndex guarded_clone()
|
||||
{
|
||||
std::lock_guard<Mutex> lck(m_mutex);
|
||||
return m_index;
|
||||
}
|
||||
};
|
||||
|
||||
class DefaultSupportTree {
|
||||
const SupportableMesh &m_sm;
|
||||
|
||||
using PtIndices = std::vector<unsigned>;
|
||||
|
||||
PtIndices m_iheads; // support points with pinhead
|
||||
PtIndices m_iheads_onmodel;
|
||||
|
||||
std::map<unsigned, AABBMesh::hit_result> m_head_to_ground_scans;
|
||||
|
||||
// normals for support points from model faces.
|
||||
Eigen::MatrixXd m_support_nmls;
|
||||
|
||||
// Clusters of points which can reach the ground directly and can be
|
||||
// bridged to one central pillar
|
||||
std::vector<PtIndices> m_pillar_clusters;
|
||||
|
||||
// This algorithm uses the SupportTreeBuilder class to fill gradually
|
||||
// the support elements (heads, pillars, bridges, ...)
|
||||
SupportTreeBuilder& m_builder;
|
||||
|
||||
// support points in Eigen/IGL format
|
||||
Eigen::MatrixXd m_points;
|
||||
|
||||
// throw if canceled: It will be called many times so a shorthand will
|
||||
// come in handy.
|
||||
ThrowOnCancel m_thr;
|
||||
|
||||
// A spatial index to easily find strong pillars to connect to.
|
||||
PillarIndex m_pillar_index;
|
||||
|
||||
// When bridging heads to pillars... TODO: find a cleaner solution
|
||||
execution::BlockingMutex<ExecutionTBB> m_bridge_mutex;
|
||||
|
||||
inline AABBMesh::hit_result ray_mesh_intersect(const Vec3d& s,
|
||||
const Vec3d& dir)
|
||||
{
|
||||
return m_sm.emesh.query_ray_hit(s, dir);
|
||||
}
|
||||
|
||||
// This function will test if a future pinhead would not collide with the
|
||||
// model geometry. It does not take a 'Head' object because those are
|
||||
// created after this test. Parameters: s: The touching point on the model
|
||||
// surface. dir: This is the direction of the head from the pin to the back
|
||||
// r_pin, r_back: the radiuses of the pin and the back sphere width: This
|
||||
// is the full width from the pin center to the back center m: The object
|
||||
// mesh.
|
||||
// The return value is the hit result from the ray casting. If the starting
|
||||
// point was inside the model, an "invalid" hit_result will be returned
|
||||
// with a zero distance value instead of a NAN. This way the result can
|
||||
// be used safely for comparison with other distances.
|
||||
AABBMesh::hit_result pinhead_mesh_intersect(
|
||||
const Vec3d& s,
|
||||
const Vec3d& dir,
|
||||
double r_pin,
|
||||
double r_back,
|
||||
double width,
|
||||
double safety_d);
|
||||
|
||||
AABBMesh::hit_result pinhead_mesh_intersect(const Vec3d &s,
|
||||
const Vec3d &dir,
|
||||
double r_pin,
|
||||
double r_back,
|
||||
double width)
|
||||
{
|
||||
return pinhead_mesh_intersect(s, dir, r_pin, r_back, width,
|
||||
r_back * m_sm.cfg.safety_distance_mm /
|
||||
m_sm.cfg.head_back_radius_mm);
|
||||
}
|
||||
|
||||
// Checking bridge (pillar and stick as well) intersection with the model.
|
||||
// If the function is used for headless sticks, the ins_check parameter
|
||||
// have to be true as the beginning of the stick might be inside the model
|
||||
// geometry.
|
||||
// The return value is the hit result from the ray casting. If the starting
|
||||
// point was inside the model, an "invalid" hit_result will be returned
|
||||
// with a zero distance value instead of a NAN. This way the result can
|
||||
// be used safely for comparison with other distances.
|
||||
AABBMesh::hit_result bridge_mesh_intersect(
|
||||
const Vec3d& s,
|
||||
const Vec3d& dir,
|
||||
double r,
|
||||
double safety_d);
|
||||
|
||||
AABBMesh::hit_result bridge_mesh_intersect(
|
||||
const Vec3d& s,
|
||||
const Vec3d& dir,
|
||||
double r)
|
||||
{
|
||||
return bridge_mesh_intersect(s, dir, r,
|
||||
r * m_sm.cfg.safety_distance_mm /
|
||||
m_sm.cfg.head_back_radius_mm);
|
||||
}
|
||||
|
||||
template<class...Args>
|
||||
inline double bridge_mesh_distance(Args&&...args) {
|
||||
return bridge_mesh_intersect(std::forward<Args>(args)...).distance();
|
||||
}
|
||||
|
||||
// Helper function for interconnecting two pillars with zig-zag bridges.
|
||||
bool interconnect(const Pillar& pillar, const Pillar& nextpillar);
|
||||
|
||||
// For connecting a head to a nearby pillar.
|
||||
bool connect_to_nearpillar(const Head& head, long nearpillar_id);
|
||||
|
||||
// Find route for a head to the ground. Inserts additional bridge from the
|
||||
// head to the pillar if cannot create pillar directly.
|
||||
// The optional dir parameter is the direction of the bridge which is the
|
||||
// direction of the pinhead if omitted.
|
||||
inline bool connect_to_ground(Head& head);
|
||||
|
||||
bool connect_to_model_body(Head &head);
|
||||
|
||||
bool search_pillar_and_connect(const Head& source);
|
||||
|
||||
// This is a proxy function for pillar creation which will mind the gap
|
||||
// between the pad and the model bottom in zero elevation mode.
|
||||
// jp is the starting junction point which needs to be routed down.
|
||||
// sourcedir is the allowed direction of an optional bridge between the
|
||||
// jp junction and the final pillar.
|
||||
bool create_ground_pillar(const Junction &jp,
|
||||
const Vec3d &sourcedir,
|
||||
long head_id = SupportTreeNode::ID_UNSET);
|
||||
|
||||
void add_pillar_base(long pid)
|
||||
{
|
||||
m_builder.add_pillar_base(pid, m_sm.cfg.base_height_mm, m_sm.cfg.base_radius_mm);
|
||||
}
|
||||
|
||||
std::optional<DiffBridge> search_widening_path(const Vec3d &jp,
|
||||
const Vec3d &dir,
|
||||
double radius,
|
||||
double new_radius)
|
||||
{
|
||||
return sla::search_widening_path(suptree_ex_policy, m_sm, jp, dir, radius, new_radius);
|
||||
}
|
||||
|
||||
public:
|
||||
DefaultSupportTree(SupportTreeBuilder & builder, const SupportableMesh &sm);
|
||||
|
||||
// Now let's define the individual steps of the support generation algorithm
|
||||
|
||||
// Filtering step: here we will discard inappropriate support points
|
||||
// and decide the future of the appropriate ones. We will check if a
|
||||
// pinhead is applicable and adjust its angle at each support point. We
|
||||
// will also merge the support points that are just too close and can
|
||||
// be considered as one.
|
||||
void add_pinheads();
|
||||
|
||||
// Further classification of the support points with pinheads. If the
|
||||
// ground is directly reachable through a vertical line parallel to the
|
||||
// Z axis we consider a support point as pillar candidate. If touches
|
||||
// the model geometry, it will be marked as non-ground facing and
|
||||
// further steps will process it. Also, the pillars will be grouped
|
||||
// into clusters that can be interconnected with bridges. Elements of
|
||||
// these groups may or may not be interconnected. Here we only run the
|
||||
// clustering algorithm.
|
||||
void classify();
|
||||
|
||||
// Step: Routing the ground connected pinheads, and interconnecting
|
||||
// them with additional (angled) bridges. Not all of these pinheads
|
||||
// will be a full pillar (ground connected). Some will connect to a
|
||||
// nearby pillar using a bridge. The max number of such side-heads for
|
||||
// a central pillar is limited to avoid bad weight distribution.
|
||||
void routing_to_ground();
|
||||
|
||||
// Step: routing the pinheads that would connect to the model surface
|
||||
// along the Z axis downwards. For now these will actually be connected with
|
||||
// the model surface with a flipped pinhead. In the future here we could use
|
||||
// some smart algorithms to search for a safe path to the ground or to a
|
||||
// nearby pillar that can hold the supported weight.
|
||||
void routing_to_model();
|
||||
|
||||
void interconnect_pillars();
|
||||
|
||||
inline void merge_result() { m_builder.merged_mesh(); }
|
||||
|
||||
static bool execute(SupportTreeBuilder & builder, const SupportableMesh &sm);
|
||||
};
|
||||
|
||||
inline void create_default_tree(SupportTreeBuilder &builder, const SupportableMesh &sm)
|
||||
{
|
||||
DefaultSupportTree::execute(builder, sm);
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // LEGACYSUPPORTTREE_HPP
|
||||
896
src/libslic3r/SLA/Hollowing.cpp
Normal file
896
src/libslic3r/SLA/Hollowing.cpp
Normal file
@@ -0,0 +1,896 @@
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <numeric>
|
||||
#include <unordered_set>
|
||||
#include <random>
|
||||
|
||||
#include <libslic3r/OpenVDBUtils.hpp>
|
||||
#include <libslic3r/TriangleMesh.hpp>
|
||||
#include <libslic3r/TriangleMeshSlicer.hpp>
|
||||
#include <libslic3r/SLA/Hollowing.hpp>
|
||||
#include <libslic3r/AABBTreeIndirect.hpp>
|
||||
#include <libslic3r/AABBMesh.hpp>
|
||||
#include <libslic3r/ClipperUtils.hpp>
|
||||
#include <libslic3r/QuadricEdgeCollapse.hpp>
|
||||
#include <libslic3r/SLA/SupportTreeMesher.hpp>
|
||||
#include <libslic3r/Execution/ExecutionSeq.hpp>
|
||||
#include <libslic3r/Model.hpp>
|
||||
|
||||
#include <libslic3r/MeshBoolean.hpp>
|
||||
|
||||
#include <boost/log/trivial.hpp>
|
||||
|
||||
#include <libslic3r/MTUtils.hpp>
|
||||
#include <libslic3r/I18N.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
namespace sla {
|
||||
|
||||
struct Interior {
|
||||
indexed_triangle_set mesh;
|
||||
VoxelGridPtr gridptr;
|
||||
|
||||
double iso_surface = 0.;
|
||||
double thickness = 0.;
|
||||
double full_narrowb = 2.;
|
||||
|
||||
void reset_accessor() const // This resets the accessor and its cache
|
||||
// Not a thread safe call!
|
||||
{
|
||||
if (gridptr)
|
||||
Slic3r::reset_accessor(*gridptr);
|
||||
}
|
||||
};
|
||||
|
||||
void InteriorDeleter::operator()(Interior *p)
|
||||
{
|
||||
delete p;
|
||||
}
|
||||
|
||||
indexed_triangle_set &get_mesh(Interior &interior)
|
||||
{
|
||||
return interior.mesh;
|
||||
}
|
||||
|
||||
const indexed_triangle_set &get_mesh(const Interior &interior)
|
||||
{
|
||||
return interior.mesh;
|
||||
}
|
||||
|
||||
const VoxelGrid &get_grid(const Interior &interior)
|
||||
{
|
||||
return *interior.gridptr;
|
||||
}
|
||||
|
||||
VoxelGrid &get_grid(Interior &interior)
|
||||
{
|
||||
return *interior.gridptr;
|
||||
}
|
||||
|
||||
InteriorPtr generate_interior(const VoxelGrid &vgrid,
|
||||
const HollowingConfig &hc,
|
||||
const JobController &ctl)
|
||||
{
|
||||
double voxsc = get_voxel_scale(vgrid);
|
||||
double offset = hc.min_thickness; // world units
|
||||
double D = hc.closing_distance; // world units
|
||||
float in_range = 1.1f * float(offset + D); // world units
|
||||
float out_range = 1.f / voxsc; // world units
|
||||
auto narrowb = 1.f; // voxel units (voxel count)
|
||||
|
||||
if (ctl.stopcondition()) return {};
|
||||
else ctl.statuscb(0, _u8L("Hollowing"));
|
||||
|
||||
auto gridptr = dilate_grid(vgrid, out_range, in_range);
|
||||
|
||||
if (ctl.stopcondition()) return {};
|
||||
else ctl.statuscb(30, _u8L("Hollowing"));
|
||||
|
||||
double iso_surface = D;
|
||||
if (D > EPSILON) {
|
||||
gridptr = redistance_grid(*gridptr, -(offset + D), narrowb, narrowb);
|
||||
|
||||
gridptr = dilate_grid(*gridptr, 1.1 * std::ceil(iso_surface), 0.f);
|
||||
|
||||
out_range = iso_surface;
|
||||
in_range = narrowb / voxsc;
|
||||
} else {
|
||||
iso_surface = -offset;
|
||||
}
|
||||
|
||||
if (ctl.stopcondition()) return {};
|
||||
else ctl.statuscb(70, _u8L("Hollowing"));
|
||||
|
||||
double adaptivity = 0.;
|
||||
InteriorPtr interior = InteriorPtr{new Interior{}};
|
||||
|
||||
interior->mesh = grid_to_mesh(*gridptr, iso_surface, adaptivity);
|
||||
interior->gridptr = std::move(gridptr);
|
||||
|
||||
if (ctl.stopcondition()) return {};
|
||||
else ctl.statuscb(100, _u8L("Hollowing"));
|
||||
|
||||
interior->iso_surface = iso_surface;
|
||||
interior->thickness = offset;
|
||||
interior->full_narrowb = (out_range + in_range) / 2.;
|
||||
|
||||
return interior;
|
||||
}
|
||||
|
||||
indexed_triangle_set DrainHole::to_mesh() const
|
||||
{
|
||||
auto r = double(radius);
|
||||
auto h = double(height);
|
||||
indexed_triangle_set hole = its_make_cylinder(r, h); //sla::cylinder(r, h, steps);
|
||||
Eigen::Quaternionf q;
|
||||
q.setFromTwoVectors(Vec3f::UnitZ(), normal);
|
||||
for(auto& p : hole.vertices) p = q * p + pos;
|
||||
|
||||
return hole;
|
||||
}
|
||||
|
||||
bool DrainHole::operator==(const DrainHole &sp) const
|
||||
{
|
||||
return (pos == sp.pos) && (normal == sp.normal) &&
|
||||
is_approx(radius, sp.radius) &&
|
||||
is_approx(height, sp.height);
|
||||
}
|
||||
|
||||
bool DrainHole::is_inside(const Vec3f& pt) const
|
||||
{
|
||||
Eigen::Hyperplane<float, 3> plane(normal, pos);
|
||||
float dist = plane.signedDistance(pt);
|
||||
if (dist < float(EPSILON) || dist > height)
|
||||
return false;
|
||||
|
||||
Eigen::ParametrizedLine<float, 3> axis(pos, normal);
|
||||
if ( axis.squaredDistance(pt) < pow(radius, 2.f))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Given a line s+dir*t, find parameter t of intersections with the hole
|
||||
// and the normal (points inside the hole). Outputs through out reference,
|
||||
// returns true if two intersections were found.
|
||||
bool DrainHole::get_intersections(const Vec3f& s, const Vec3f& dir,
|
||||
std::array<std::pair<float, Vec3d>, 2>& out)
|
||||
const
|
||||
{
|
||||
assert(is_approx(normal.norm(), 1.f));
|
||||
const Eigen::ParametrizedLine<float, 3> ray(s, dir.normalized());
|
||||
|
||||
for (size_t i=0; i<2; ++i)
|
||||
out[i] = std::make_pair(AABBMesh::hit_result::infty(), Vec3d::Zero());
|
||||
|
||||
const float sqr_radius = pow(radius, 2.f);
|
||||
|
||||
// first check a bounding sphere of the hole:
|
||||
Vec3f center = pos+normal*height/2.f;
|
||||
float sqr_dist_limit = pow(height/2.f, 2.f) + sqr_radius ;
|
||||
if (ray.squaredDistance(center) > sqr_dist_limit)
|
||||
return false;
|
||||
|
||||
// The line intersects the bounding sphere, look for intersections with
|
||||
// bases of the cylinder.
|
||||
|
||||
size_t found = 0; // counts how many intersections were found
|
||||
Eigen::Hyperplane<float, 3> base;
|
||||
if (! is_approx(ray.direction().dot(normal), 0.f)) {
|
||||
for (size_t i=1; i<=1; --i) {
|
||||
Vec3f cylinder_center = pos+i*height*normal;
|
||||
if (i == 0) {
|
||||
// The hole base can be identical to mesh surface if it is flat
|
||||
// let's better move the base outward a bit
|
||||
cylinder_center -= EPSILON*normal;
|
||||
}
|
||||
base = Eigen::Hyperplane<float, 3>(normal, cylinder_center);
|
||||
Vec3f intersection = ray.intersectionPoint(base);
|
||||
// Only accept the point if it is inside the cylinder base.
|
||||
if ((cylinder_center-intersection).squaredNorm() < sqr_radius) {
|
||||
out[found].first = ray.intersectionParameter(base);
|
||||
out[found].second = (i==0 ? 1. : -1.) * normal.cast<double>();
|
||||
++found;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// In case the line was perpendicular to the cylinder axis, previous
|
||||
// block was skipped, but base will later be assumed to be valid.
|
||||
base = Eigen::Hyperplane<float, 3>(normal, pos-EPSILON*normal);
|
||||
}
|
||||
|
||||
// In case there is still an intersection to be found, check the wall
|
||||
if (found != 2 && ! is_approx(std::abs(ray.direction().dot(normal)), 1.f)) {
|
||||
// Project the ray onto the base plane
|
||||
Vec3f proj_origin = base.projection(ray.origin());
|
||||
Vec3f proj_dir = base.projection(ray.origin()+ray.direction())-proj_origin;
|
||||
// save how the parameter scales and normalize the projected direction
|
||||
float par_scale = proj_dir.norm();
|
||||
proj_dir = proj_dir/par_scale;
|
||||
Eigen::ParametrizedLine<float, 3> projected_ray(proj_origin, proj_dir);
|
||||
// Calculate point on the secant that's closest to the center
|
||||
// and its distance to the circle along the projected line
|
||||
Vec3f closest = projected_ray.projection(pos);
|
||||
float dist = sqrt((sqr_radius - (closest-pos).squaredNorm()));
|
||||
// Unproject both intersections on the original line and check
|
||||
// they are on the cylinder and not past it:
|
||||
for (int i=-1; i<=1 && found !=2; i+=2) {
|
||||
Vec3f isect = closest + i*dist * projected_ray.direction();
|
||||
Vec3f to_isect = isect-proj_origin;
|
||||
float par = to_isect.norm() / par_scale;
|
||||
if (to_isect.normalized().dot(proj_dir.normalized()) < 0.f)
|
||||
par *= -1.f;
|
||||
Vec3d hit_normal = (pos-isect).normalized().cast<double>();
|
||||
isect = ray.pointAt(par);
|
||||
// check that the intersection is between the base planes:
|
||||
float vert_dist = base.signedDistance(isect);
|
||||
if (vert_dist > 0.f && vert_dist < height) {
|
||||
out[found].first = par;
|
||||
out[found].second = hit_normal;
|
||||
++found;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If only one intersection was found, it is some corner case,
|
||||
// no intersection will be returned:
|
||||
if (found != 2)
|
||||
return false;
|
||||
|
||||
// Sort the intersections:
|
||||
if (out[0].first > out[1].first)
|
||||
std::swap(out[0], out[1]);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void cut_drainholes(std::vector<ExPolygons> & obj_slices,
|
||||
const std::vector<float> &slicegrid,
|
||||
float closing_radius,
|
||||
const sla::DrainHoles & holes,
|
||||
std::function<void(void)> thr)
|
||||
{
|
||||
TriangleMesh mesh;
|
||||
for (const sla::DrainHole &holept : holes)
|
||||
mesh.merge(TriangleMesh{holept.to_mesh()});
|
||||
|
||||
if (mesh.empty()) return;
|
||||
|
||||
std::vector<ExPolygons> hole_slices = slice_mesh_ex(mesh.its, slicegrid, closing_radius, thr);
|
||||
|
||||
if (obj_slices.size() != hole_slices.size())
|
||||
BOOST_LOG_TRIVIAL(warning)
|
||||
<< "Sliced object and drain-holes layer count does not match!";
|
||||
|
||||
size_t until = std::min(obj_slices.size(), hole_slices.size());
|
||||
|
||||
for (size_t i = 0; i < until; ++i)
|
||||
obj_slices[i] = diff_ex(obj_slices[i], hole_slices[i]);
|
||||
}
|
||||
|
||||
void hollow_mesh(TriangleMesh &mesh, const HollowingConfig &cfg, int flags)
|
||||
{
|
||||
InteriorPtr interior = generate_interior(mesh.its, cfg, JobController{});
|
||||
if (!interior) return;
|
||||
|
||||
hollow_mesh(mesh, *interior, flags);
|
||||
}
|
||||
|
||||
void hollow_mesh(TriangleMesh &mesh, const Interior &interior, int flags)
|
||||
{
|
||||
if (mesh.empty() || interior.mesh.empty()) return;
|
||||
|
||||
if (flags & hfRemoveInsideTriangles && interior.gridptr)
|
||||
remove_inside_triangles(mesh, interior);
|
||||
|
||||
indexed_triangle_set interi = interior.mesh;
|
||||
sla::swap_normals(interi);
|
||||
TriangleMesh inter{std::move(interi)};
|
||||
|
||||
mesh.merge(inter);
|
||||
}
|
||||
|
||||
void hollow_mesh(indexed_triangle_set &mesh, const Interior &interior, int flags)
|
||||
{
|
||||
if (mesh.empty() || interior.mesh.empty()) return;
|
||||
|
||||
if (flags & hfRemoveInsideTriangles && interior.gridptr)
|
||||
remove_inside_triangles(mesh, interior);
|
||||
|
||||
indexed_triangle_set interi = interior.mesh;
|
||||
sla::swap_normals(interi);
|
||||
|
||||
its_merge(mesh, interi);
|
||||
}
|
||||
|
||||
// Get the distance of p to the interior's zero iso_surface. Interior should
|
||||
// have its zero isosurface positioned at offset + closing_distance inwards form
|
||||
// the model surface.
|
||||
static double get_distance_raw(const Vec3f &p, const Interior &interior)
|
||||
{
|
||||
assert(interior.gridptr);
|
||||
|
||||
return Slic3r::get_distance_raw(p, *interior.gridptr);
|
||||
}
|
||||
|
||||
struct TriangleBubble { Vec3f center; double R; };
|
||||
|
||||
// Return the distance of bubble center to the interior boundary or NaN if the
|
||||
// triangle is too big to be measured.
|
||||
static double get_distance(const TriangleBubble &b, const Interior &interior)
|
||||
{
|
||||
double R = b.R;
|
||||
double D = 2. * R;
|
||||
double Dst = get_distance_raw(b.center, interior);
|
||||
|
||||
return D > interior.full_narrowb ||
|
||||
((Dst - R) < 0. && 2 * R > interior.thickness) ?
|
||||
std::nan("") :
|
||||
Dst - interior.iso_surface;
|
||||
}
|
||||
|
||||
inline double get_distance(const Vec3f &p, const Interior &interior)
|
||||
{
|
||||
double d = get_distance_raw(p, interior) - interior.iso_surface;
|
||||
return d;
|
||||
}
|
||||
|
||||
template<class T>
|
||||
FloatingOnly<T> get_distance(const Vec<3, T> &p, const Interior &interior)
|
||||
{
|
||||
return get_distance(Vec3f(p.template cast<float>()), interior);
|
||||
}
|
||||
|
||||
// A face that can be divided. Stores the indices into the original mesh if its
|
||||
// part of that mesh and the vertices it consists of.
|
||||
enum { NEW_FACE = -1};
|
||||
struct DivFace {
|
||||
Vec3i indx;
|
||||
std::array<Vec3f, 3> verts;
|
||||
long faceid = NEW_FACE;
|
||||
long parent = NEW_FACE;
|
||||
};
|
||||
|
||||
// Divide a face recursively and call visitor on all the sub-faces.
|
||||
template<class Fn>
|
||||
void divide_triangle(const DivFace &face, Fn &&visitor)
|
||||
{
|
||||
std::array<Vec3f, 3> edges = {(face.verts[0] - face.verts[1]),
|
||||
(face.verts[1] - face.verts[2]),
|
||||
(face.verts[2] - face.verts[0])};
|
||||
|
||||
std::array<size_t, 3> edgeidx = {0, 1, 2};
|
||||
|
||||
std::sort(edgeidx.begin(), edgeidx.end(), [&edges](size_t e1, size_t e2) {
|
||||
return edges[e1].squaredNorm() > edges[e2].squaredNorm();
|
||||
});
|
||||
|
||||
DivFace child1, child2;
|
||||
|
||||
child1.parent = face.faceid == NEW_FACE ? face.parent : face.faceid;
|
||||
child1.indx(0) = -1;
|
||||
child1.indx(1) = face.indx(edgeidx[1]);
|
||||
child1.indx(2) = face.indx((edgeidx[1] + 1) % 3);
|
||||
child1.verts[0] = (face.verts[edgeidx[0]] + face.verts[(edgeidx[0] + 1) % 3]) / 2.;
|
||||
child1.verts[1] = face.verts[edgeidx[1]];
|
||||
child1.verts[2] = face.verts[(edgeidx[1] + 1) % 3];
|
||||
|
||||
if (visitor(child1))
|
||||
divide_triangle(child1, std::forward<Fn>(visitor));
|
||||
|
||||
child2.parent = face.faceid == NEW_FACE ? face.parent : face.faceid;
|
||||
child2.indx(0) = -1;
|
||||
child2.indx(1) = face.indx(edgeidx[2]);
|
||||
child2.indx(2) = face.indx((edgeidx[2] + 1) % 3);
|
||||
child2.verts[0] = child1.verts[0];
|
||||
child2.verts[1] = face.verts[edgeidx[2]];
|
||||
child2.verts[2] = face.verts[(edgeidx[2] + 1) % 3];
|
||||
|
||||
if (visitor(child2))
|
||||
divide_triangle(child2, std::forward<Fn>(visitor));
|
||||
}
|
||||
|
||||
void remove_inside_triangles(indexed_triangle_set &mesh, const Interior &interior,
|
||||
const std::vector<bool> &exclude_mask)
|
||||
{
|
||||
enum TrPos { posInside, posTouch, posOutside };
|
||||
|
||||
auto &faces = mesh.indices;
|
||||
auto &vertices = mesh.vertices;
|
||||
auto bb = bounding_box(mesh); //mesh.bounding_box();
|
||||
|
||||
bool use_exclude_mask = faces.size() == exclude_mask.size();
|
||||
auto is_excluded = [&exclude_mask, use_exclude_mask](size_t face_id) {
|
||||
return use_exclude_mask && exclude_mask[face_id];
|
||||
};
|
||||
|
||||
// TODO: Parallel mode not working yet
|
||||
constexpr auto &exec_policy = ex_seq;
|
||||
|
||||
// Info about the needed modifications on the input mesh.
|
||||
struct MeshMods {
|
||||
|
||||
// Just a thread safe wrapper for a vector of triangles.
|
||||
struct {
|
||||
std::vector<std::array<Vec3f, 3>> data;
|
||||
execution::SpinningMutex<decltype(exec_policy)> mutex;
|
||||
|
||||
void emplace_back(const std::array<Vec3f, 3> &pts)
|
||||
{
|
||||
std::lock_guard lk{mutex};
|
||||
data.emplace_back(pts);
|
||||
}
|
||||
|
||||
size_t size() const { return data.size(); }
|
||||
const std::array<Vec3f, 3>& operator[](size_t idx) const
|
||||
{
|
||||
return data[idx];
|
||||
}
|
||||
|
||||
} new_triangles;
|
||||
|
||||
// A vector of bool for all faces signaling if it needs to be removed
|
||||
// or not.
|
||||
std::vector<bool> to_remove;
|
||||
|
||||
MeshMods(const indexed_triangle_set &mesh):
|
||||
to_remove(mesh.indices.size(), false) {}
|
||||
|
||||
// Number of triangles that need to be removed.
|
||||
size_t to_remove_cnt() const
|
||||
{
|
||||
return std::accumulate(to_remove.begin(), to_remove.end(), size_t(0));
|
||||
}
|
||||
|
||||
} mesh_mods{mesh};
|
||||
|
||||
// Must return true if further division of the face is needed.
|
||||
auto divfn = [&interior, bb, &mesh_mods](const DivFace &f) {
|
||||
BoundingBoxf3 facebb { f.verts.begin(), f.verts.end() };
|
||||
|
||||
// Face is certainly outside the cavity
|
||||
if (! facebb.intersects(bb) && f.faceid != NEW_FACE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
TriangleBubble bubble{facebb.center().cast<float>(), facebb.radius()};
|
||||
|
||||
double D = get_distance(bubble, interior);
|
||||
double R = bubble.R;
|
||||
|
||||
if (std::isnan(D)) // The distance cannot be measured, triangle too big
|
||||
return true;
|
||||
|
||||
// Distance of the bubble wall to the interior wall. Negative if the
|
||||
// bubble is overlapping with the interior
|
||||
double bubble_distance = D - R;
|
||||
|
||||
// The face is crossing the interior or inside, it must be removed and
|
||||
// parts of it re-added, that are outside the interior
|
||||
if (bubble_distance < 0.) {
|
||||
if (f.faceid != NEW_FACE)
|
||||
mesh_mods.to_remove[f.faceid] = true;
|
||||
|
||||
if (f.parent != NEW_FACE) // Top parent needs to be removed as well
|
||||
mesh_mods.to_remove[f.parent] = true;
|
||||
|
||||
// If the outside part is between the interior and the exterior
|
||||
// (inside the wall being invisible), no further division is needed.
|
||||
if ((R + D) < interior.thickness)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
} else if (f.faceid == NEW_FACE) {
|
||||
// New face completely outside needs to be re-added.
|
||||
mesh_mods.new_triangles.emplace_back(f.verts);
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
interior.reset_accessor();
|
||||
|
||||
execution::for_each(
|
||||
exec_policy, size_t(0), faces.size(),
|
||||
[&](size_t face_idx) {
|
||||
const Vec3i &face = faces[face_idx];
|
||||
|
||||
// If the triangle is excluded, we need to keep it.
|
||||
if (is_excluded(face_idx))
|
||||
return;
|
||||
|
||||
std::array<Vec3f, 3> pts = {vertices[face(0)], vertices[face(1)],
|
||||
vertices[face(2)]};
|
||||
|
||||
BoundingBoxf3 facebb{pts.begin(), pts.end()};
|
||||
|
||||
// Face is certainly outside the cavity
|
||||
if (!facebb.intersects(bb))
|
||||
return;
|
||||
|
||||
DivFace df{face, pts, long(face_idx)};
|
||||
|
||||
if (divfn(df)) divide_triangle(df, divfn);
|
||||
},
|
||||
execution::max_concurrency(exec_policy)
|
||||
);
|
||||
|
||||
auto new_faces = reserve_vector<Vec3i>(faces.size() +
|
||||
mesh_mods.new_triangles.size());
|
||||
|
||||
for (size_t face_idx = 0; face_idx < faces.size(); ++face_idx) {
|
||||
if (!mesh_mods.to_remove[face_idx])
|
||||
new_faces.emplace_back(faces[face_idx]);
|
||||
}
|
||||
|
||||
for(size_t i = 0; i < mesh_mods.new_triangles.size(); ++i) {
|
||||
size_t o = vertices.size();
|
||||
vertices.emplace_back(mesh_mods.new_triangles[i][0]);
|
||||
vertices.emplace_back(mesh_mods.new_triangles[i][1]);
|
||||
vertices.emplace_back(mesh_mods.new_triangles[i][2]);
|
||||
new_faces.emplace_back(int(o), int(o + 1), int(o + 2));
|
||||
}
|
||||
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< "Trimming: " << mesh_mods.to_remove_cnt() << " triangles removed";
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< "Trimming: " << mesh_mods.new_triangles.size() << " triangles added";
|
||||
|
||||
faces.swap(new_faces);
|
||||
new_faces = {};
|
||||
|
||||
// mesh = TriangleMesh{mesh.its};
|
||||
//FIXME do we want to repair the mesh? Are there duplicate vertices or flipped triangles?
|
||||
}
|
||||
|
||||
void remove_inside_triangles(TriangleMesh &mesh, const Interior &interior,
|
||||
const std::vector<bool> &exclude_mask)
|
||||
{
|
||||
remove_inside_triangles(mesh.its, interior, exclude_mask);
|
||||
}
|
||||
|
||||
struct FaceHash {
|
||||
|
||||
// A 64 bit number's max hex digits
|
||||
static constexpr size_t MAX_NUM_CHARS = 16;
|
||||
|
||||
// A hash is created for each triangle to be identifiable. The hash uses
|
||||
// only the triangle's geometric traits, not the index in a particular mesh.
|
||||
std::unordered_set<std::string> facehash;
|
||||
|
||||
// Returns the string in reverse, but that is ok for hashing
|
||||
static std::array<char, MAX_NUM_CHARS + 1> to_chars(int64_t val)
|
||||
{
|
||||
std::array<char, MAX_NUM_CHARS + 1> ret;
|
||||
|
||||
static const constexpr char * Conv = "0123456789abcdef";
|
||||
|
||||
auto ptr = ret.begin();
|
||||
auto uval = static_cast<uint64_t>(std::abs(val));
|
||||
while (uval) {
|
||||
*ptr = Conv[uval & 0xf];
|
||||
++ptr;
|
||||
uval = uval >> 4;
|
||||
}
|
||||
if (val < 0) { *ptr = '-'; ++ptr; }
|
||||
*ptr = '\0'; // C style string ending
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static std::string hash(const Vec<3, int64_t> &v)
|
||||
{
|
||||
std::string ret;
|
||||
ret.reserve(3 * MAX_NUM_CHARS);
|
||||
|
||||
for (auto val : v)
|
||||
ret += to_chars(val).data();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static std::string facekey(const Vec3i &face, const std::vector<Vec3f> &vertices)
|
||||
{
|
||||
// Scale to integer to avoid floating points
|
||||
std::array<Vec<3, int64_t>, 3> pts = {
|
||||
scaled<int64_t>(vertices[face(0)]),
|
||||
scaled<int64_t>(vertices[face(1)]),
|
||||
scaled<int64_t>(vertices[face(2)])
|
||||
};
|
||||
|
||||
// Get the first two sides of the triangle, do a cross product and move
|
||||
// that vector to the center of the triangle. This encodes all
|
||||
// information to identify an identical triangle at the same position.
|
||||
Vec<3, int64_t> a = pts[0] - pts[2], b = pts[1] - pts[2];
|
||||
Vec<3, int64_t> c = a.cross(b) + (pts[0] + pts[1] + pts[2]) / 3;
|
||||
|
||||
// Return a concatenated string representation of the coordinates
|
||||
return hash(c);
|
||||
}
|
||||
|
||||
FaceHash (const indexed_triangle_set &its): facehash(its.indices.size())
|
||||
{
|
||||
for (Vec3i face : its.indices) {
|
||||
std::swap(face(0), face(2));
|
||||
facehash.insert(facekey(face, its.vertices));
|
||||
}
|
||||
}
|
||||
|
||||
bool find(const std::string &key)
|
||||
{
|
||||
auto it = facehash.find(key);
|
||||
return it != facehash.end();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
static void exclude_neighbors(const Vec3i &face,
|
||||
std::vector<bool> &mask,
|
||||
const indexed_triangle_set &its,
|
||||
const VertexFaceIndex &index,
|
||||
size_t recursions)
|
||||
{
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
const auto &neighbors_range = index[face(i)];
|
||||
for (size_t fi_n : neighbors_range) {
|
||||
mask[fi_n] = true;
|
||||
if (recursions > 0)
|
||||
exclude_neighbors(its.indices[fi_n], mask, its, index, recursions - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<bool> create_exclude_mask(const indexed_triangle_set &its,
|
||||
const Interior &interior,
|
||||
const std::vector<DrainHole> &holes)
|
||||
{
|
||||
FaceHash interior_hash{sla::get_mesh(interior)};
|
||||
|
||||
std::vector<bool> exclude_mask(its.indices.size(), false);
|
||||
|
||||
VertexFaceIndex neighbor_index{its};
|
||||
|
||||
for (size_t fi = 0; fi < its.indices.size(); ++fi) {
|
||||
auto &face = its.indices[fi];
|
||||
|
||||
if (interior_hash.find(FaceHash::facekey(face, its.vertices))) {
|
||||
exclude_mask[fi] = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (exclude_mask[fi]) {
|
||||
exclude_neighbors(face, exclude_mask, its, neighbor_index, 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Lets deal with the holes. All the triangles of a hole and all the
|
||||
// neighbors of these triangles need to be kept. The neigbors were
|
||||
// created by CGAL mesh boolean operation that modified the original
|
||||
// interior inside the input mesh to contain the holes.
|
||||
Vec3d tr_center = (
|
||||
its.vertices[face(0)] +
|
||||
its.vertices[face(1)] +
|
||||
its.vertices[face(2)]
|
||||
).cast<double>() / 3.;
|
||||
|
||||
// If the center is more than half a mm inside the interior,
|
||||
// it cannot possibly be part of a hole wall.
|
||||
if (sla::get_distance(tr_center, interior) < -0.5)
|
||||
continue;
|
||||
|
||||
Vec3f U = its.vertices[face(1)] - its.vertices[face(0)];
|
||||
Vec3f V = its.vertices[face(2)] - its.vertices[face(0)];
|
||||
Vec3f C = U.cross(V);
|
||||
Vec3f face_normal = C.normalized();
|
||||
|
||||
for (const sla::DrainHole &dh : holes) {
|
||||
if (dh.failed) continue;
|
||||
|
||||
Vec3d dhpos = dh.pos.cast<double>();
|
||||
Vec3d dhend = dhpos + dh.normal.cast<double>() * dh.height;
|
||||
|
||||
Linef3 holeaxis{dhpos, dhend};
|
||||
|
||||
double D_hole_center = line_alg::distance_to(holeaxis, tr_center);
|
||||
double D_hole = std::abs(D_hole_center - dh.radius);
|
||||
float dot = dh.normal.dot(face_normal);
|
||||
|
||||
// Empiric tolerances for center distance and normals angle.
|
||||
// For triangles that are part of a hole wall the angle of
|
||||
// triangle normal and the hole axis is around 90 degrees,
|
||||
// so the dot product is around zero.
|
||||
double D_tol = dh.radius / sla::DrainHole::steps;
|
||||
float normal_angle_tol = 1.f / sla::DrainHole::steps;
|
||||
|
||||
if (D_hole < D_tol && std::abs(dot) < normal_angle_tol) {
|
||||
exclude_mask[fi] = true;
|
||||
exclude_neighbors(face, exclude_mask, its, neighbor_index, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return exclude_mask;
|
||||
}
|
||||
|
||||
DrainHoles transformed_drainhole_points(const ModelObject &mo,
|
||||
const Transform3d &trafo)
|
||||
{
|
||||
auto pts = mo.sla_drain_holes;
|
||||
// const Transform3d& vol_trafo = mo.volumes.front()->get_transformation().get_matrix();
|
||||
const Geometry::Transformation trans(trafo /** vol_trafo*/);
|
||||
const Transform3d& tr = trans.get_matrix();
|
||||
for (sla::DrainHole &hl : pts) {
|
||||
Vec3d pos = hl.pos.cast<double>();
|
||||
Vec3d nrm = hl.normal.cast<double>();
|
||||
|
||||
pos = tr * pos;
|
||||
nrm = tr * nrm - tr.translation();
|
||||
|
||||
// Now shift the hole a bit above the object and make it deeper to
|
||||
// compensate for it. This is to avoid problems when the hole is placed
|
||||
// on (nearly) flat surface.
|
||||
pos -= nrm.normalized() * sla::HoleStickOutLength;
|
||||
|
||||
hl.pos = pos.cast<float>();
|
||||
hl.normal = nrm.cast<float>();
|
||||
hl.height += sla::HoleStickOutLength;
|
||||
}
|
||||
|
||||
return pts;
|
||||
}
|
||||
|
||||
double get_voxel_scale(double mesh_volume, const HollowingConfig &hc)
|
||||
{
|
||||
static constexpr double MIN_SAMPLES_IN_WALL = 3.5;
|
||||
static constexpr double MAX_OVERSAMPL = 8.;
|
||||
static constexpr double UNIT_VOLUME = 500000; // empiric
|
||||
|
||||
// I can't figure out how to increase the grid resolution through openvdb
|
||||
// API so the model will be scaled up before conversion and the result
|
||||
// scaled down. Voxels have a unit size. If I set voxelSize smaller, it
|
||||
// scales the whole geometry down, and doesn't increase the number of
|
||||
// voxels.
|
||||
//
|
||||
// First an allowed range for voxel scale is determined from an initial
|
||||
// range of <MIN_SAMPLES_IN_WALL, MAX_OVERSAMPL>. The final voxel scale is
|
||||
// then chosen from this range using the 'quality:<0, 1>' parameter.
|
||||
// The minimum can be lowered if the wall thickness is great enough and
|
||||
// the maximum is lowered if the model volume very big.
|
||||
|
||||
double sc_divider = std::max(1.0, (mesh_volume / UNIT_VOLUME));
|
||||
double min_oversampl = std::max(MIN_SAMPLES_IN_WALL / hc.min_thickness, 1.);
|
||||
double max_oversampl_scaled = std::max(min_oversampl, MAX_OVERSAMPL / sc_divider);
|
||||
auto voxel_scale = min_oversampl + (max_oversampl_scaled - min_oversampl) * hc.quality;
|
||||
|
||||
BOOST_LOG_TRIVIAL(debug) << "Hollowing: max oversampl will be: " << max_oversampl_scaled;
|
||||
BOOST_LOG_TRIVIAL(debug) << "Hollowing: voxel scale will be: " << voxel_scale;
|
||||
BOOST_LOG_TRIVIAL(debug) << "Hollowing: mesh volume is: " << mesh_volume;
|
||||
|
||||
return voxel_scale;
|
||||
}
|
||||
|
||||
// The same as its_compactify_vertices, but returns a new mesh, doesn't touch
|
||||
// the original
|
||||
static indexed_triangle_set
|
||||
remove_unconnected_vertices(const indexed_triangle_set &its)
|
||||
{
|
||||
if (its.indices.empty()) {};
|
||||
|
||||
indexed_triangle_set M;
|
||||
|
||||
std::vector<int> vtransl(its.vertices.size(), -1);
|
||||
int vcnt = 0;
|
||||
for (auto &f : its.indices) {
|
||||
|
||||
for (int i = 0; i < 3; ++i)
|
||||
if (vtransl[size_t(f(i))] < 0) {
|
||||
|
||||
M.vertices.emplace_back(its.vertices[size_t(f(i))]);
|
||||
vtransl[size_t(f(i))] = vcnt++;
|
||||
}
|
||||
|
||||
std::array<int, 3> new_f = {
|
||||
vtransl[size_t(f(0))],
|
||||
vtransl[size_t(f(1))],
|
||||
vtransl[size_t(f(2))]
|
||||
};
|
||||
|
||||
M.indices.emplace_back(new_f[0], new_f[1], new_f[2]);
|
||||
}
|
||||
|
||||
return M;
|
||||
}
|
||||
|
||||
int hollow_mesh_and_drill(indexed_triangle_set &hollowed_mesh,
|
||||
const Interior &interior,
|
||||
const DrainHoles &drainholes,
|
||||
std::function<void(size_t)> on_hole_fail)
|
||||
{
|
||||
auto tree = AABBTreeIndirect::build_aabb_tree_over_indexed_triangle_set(
|
||||
hollowed_mesh.vertices,
|
||||
hollowed_mesh.indices
|
||||
);
|
||||
|
||||
std::uniform_real_distribution<float> dist(0., float(EPSILON));
|
||||
auto holes_mesh_cgal = MeshBoolean::cgal::triangle_mesh_to_cgal({}, {});
|
||||
indexed_triangle_set part_to_drill = hollowed_mesh;
|
||||
|
||||
std::mt19937 m_rng{std::random_device{}()};
|
||||
|
||||
for (size_t i = 0; i < drainholes.size(); ++i) {
|
||||
sla::DrainHole holept = drainholes[i];
|
||||
|
||||
holept.normal += Vec3f{dist(m_rng), dist(m_rng), dist(m_rng)};
|
||||
holept.normal.normalize();
|
||||
holept.pos += Vec3f{dist(m_rng), dist(m_rng), dist(m_rng)};
|
||||
indexed_triangle_set m = holept.to_mesh();
|
||||
|
||||
part_to_drill.indices.clear();
|
||||
auto bb = bounding_box(m);
|
||||
Eigen::AlignedBox<float, 3> ebb{bb.min.cast<float>(),
|
||||
bb.max.cast<float>()};
|
||||
|
||||
AABBTreeIndirect::traverse(
|
||||
tree,
|
||||
AABBTreeIndirect::intersecting(ebb),
|
||||
[&part_to_drill, &hollowed_mesh](const auto& node)
|
||||
{
|
||||
part_to_drill.indices.emplace_back(hollowed_mesh.indices[node.idx]);
|
||||
// continue traversal
|
||||
return true;
|
||||
});
|
||||
|
||||
auto cgal_meshpart = MeshBoolean::cgal::triangle_mesh_to_cgal(
|
||||
remove_unconnected_vertices(part_to_drill));
|
||||
|
||||
if (MeshBoolean::cgal::does_self_intersect(*cgal_meshpart)) {
|
||||
on_hole_fail(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
auto cgal_hole = MeshBoolean::cgal::triangle_mesh_to_cgal(m);
|
||||
MeshBoolean::cgal::plus(*holes_mesh_cgal, *cgal_hole);
|
||||
}
|
||||
|
||||
auto ret = static_cast<int>(HollowMeshResult::Ok);
|
||||
|
||||
if (MeshBoolean::cgal::does_self_intersect(*holes_mesh_cgal)) {
|
||||
ret |= static_cast<int>(HollowMeshResult::DrillingFailed);
|
||||
}
|
||||
|
||||
auto hollowed_mesh_cgal = MeshBoolean::cgal::triangle_mesh_to_cgal(hollowed_mesh);
|
||||
|
||||
if (!MeshBoolean::cgal::does_bound_a_volume(*hollowed_mesh_cgal)) {
|
||||
ret |= static_cast<int>(HollowMeshResult::FaultyMesh);
|
||||
}
|
||||
|
||||
if (!MeshBoolean::cgal::empty(*holes_mesh_cgal)
|
||||
&& !MeshBoolean::cgal::does_bound_a_volume(*holes_mesh_cgal)) {
|
||||
ret |= static_cast<int>(HollowMeshResult::FaultyHoles);
|
||||
}
|
||||
|
||||
// Don't even bother
|
||||
if (ret & static_cast<int>(HollowMeshResult::DrillingFailed))
|
||||
return ret;
|
||||
|
||||
try {
|
||||
if (!MeshBoolean::cgal::empty(*holes_mesh_cgal))
|
||||
MeshBoolean::cgal::minus(*hollowed_mesh_cgal, *holes_mesh_cgal);
|
||||
|
||||
hollowed_mesh =
|
||||
MeshBoolean::cgal::cgal_to_indexed_triangle_set(*hollowed_mesh_cgal);
|
||||
|
||||
std::vector<bool> exclude_mask =
|
||||
create_exclude_mask(hollowed_mesh, interior, drainholes);
|
||||
|
||||
sla::remove_inside_triangles(hollowed_mesh, interior, exclude_mask);
|
||||
} catch (const Slic3r::RuntimeError &) {
|
||||
ret |= static_cast<int>(HollowMeshResult::DrillingFailed);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
216
src/libslic3r/SLA/Hollowing.hpp
Normal file
216
src/libslic3r/SLA/Hollowing.hpp
Normal file
@@ -0,0 +1,216 @@
|
||||
#ifndef SLA_HOLLOWING_HPP
|
||||
#define SLA_HOLLOWING_HPP
|
||||
|
||||
#include <memory>
|
||||
#include <libslic3r/TriangleMesh.hpp>
|
||||
#include <libslic3r/OpenVDBUtils.hpp>
|
||||
#include <libslic3r/SLA/JobController.hpp>
|
||||
#include <libslic3r/CSGMesh/VoxelizeCSGMesh.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
class ModelObject;
|
||||
|
||||
namespace sla {
|
||||
|
||||
struct HollowingConfig
|
||||
{
|
||||
double min_thickness = 2.;
|
||||
double quality = 0.5;
|
||||
double closing_distance = 0.5;
|
||||
bool enabled = true;
|
||||
};
|
||||
|
||||
enum HollowingFlags { hfRemoveInsideTriangles = 0x1 };
|
||||
|
||||
// All data related to a generated mesh interior. Includes the 3D grid and mesh
|
||||
// and various metadata. No need to manipulate from outside.
|
||||
struct Interior;
|
||||
struct InteriorDeleter { void operator()(Interior *p); };
|
||||
using InteriorPtr = std::unique_ptr<Interior, InteriorDeleter>;
|
||||
|
||||
indexed_triangle_set & get_mesh(Interior &interior);
|
||||
const indexed_triangle_set &get_mesh(const Interior &interior);
|
||||
|
||||
const VoxelGrid & get_grid(const Interior &interior);
|
||||
VoxelGrid &get_grid(Interior &interior);
|
||||
|
||||
struct DrainHole
|
||||
{
|
||||
Vec3f pos;
|
||||
Vec3f normal;
|
||||
float radius;
|
||||
float height;
|
||||
bool failed = false;
|
||||
|
||||
DrainHole()
|
||||
: pos(Vec3f::Zero()), normal(Vec3f::UnitZ()), radius(5.f), height(10.f)
|
||||
{}
|
||||
|
||||
DrainHole(Vec3f p, Vec3f n, float r, float h, bool fl = false)
|
||||
: pos(p), normal(n), radius(r), height(h), failed(fl)
|
||||
{}
|
||||
|
||||
DrainHole(const DrainHole& rhs) :
|
||||
DrainHole(rhs.pos, rhs.normal, rhs.radius, rhs.height, rhs.failed) {}
|
||||
|
||||
bool operator==(const DrainHole &sp) const;
|
||||
|
||||
bool operator!=(const DrainHole &sp) const { return !(sp == (*this)); }
|
||||
|
||||
bool is_inside(const Vec3f& pt) const;
|
||||
|
||||
bool get_intersections(const Vec3f& s, const Vec3f& dir,
|
||||
std::array<std::pair<float, Vec3d>, 2>& out) const;
|
||||
|
||||
indexed_triangle_set to_mesh() const;
|
||||
|
||||
template<class Archive> inline void serialize(Archive &ar)
|
||||
{
|
||||
ar(pos, normal, radius, height, failed);
|
||||
}
|
||||
|
||||
static constexpr size_t steps = 32;
|
||||
};
|
||||
|
||||
using DrainHoles = std::vector<DrainHole>;
|
||||
|
||||
constexpr float HoleStickOutLength = 1.f;
|
||||
|
||||
double get_voxel_scale(double mesh_volume, const HollowingConfig &hc);
|
||||
|
||||
InteriorPtr generate_interior(const VoxelGrid &mesh,
|
||||
const HollowingConfig & = {},
|
||||
const JobController &ctl = {});
|
||||
|
||||
inline InteriorPtr generate_interior(const indexed_triangle_set &mesh,
|
||||
const HollowingConfig &hc = {},
|
||||
const JobController &ctl = {})
|
||||
{
|
||||
auto voxel_scale = get_voxel_scale(its_volume(mesh), hc);
|
||||
auto statusfn = [&ctl](int){ return ctl.stopcondition && ctl.stopcondition(); };
|
||||
auto grid = mesh_to_grid(mesh, MeshToGridParams{}
|
||||
.voxel_scale(voxel_scale)
|
||||
.exterior_bandwidth(3.f)
|
||||
.interior_bandwidth(3.f)
|
||||
.statusfn(statusfn));
|
||||
|
||||
if (!grid || (ctl.stopcondition && ctl.stopcondition()))
|
||||
return {};
|
||||
|
||||
// if (its_is_splittable(mesh))
|
||||
grid = redistance_grid(*grid, 0.0f, 3.f, 3.f);
|
||||
|
||||
return grid ? generate_interior(*grid, hc, ctl) : InteriorPtr{};
|
||||
}
|
||||
|
||||
template<class Cont> double csgmesh_positive_maxvolume(const Cont &csg)
|
||||
{
|
||||
double mesh_vol = 0;
|
||||
|
||||
bool skip = false;
|
||||
for (const auto &m : csg) {
|
||||
auto op = csg::get_operation(m);
|
||||
auto stackop = csg::get_stack_operation(m);
|
||||
if (stackop == csg::CSGStackOp::Push && op != csg::CSGType::Union)
|
||||
skip = true;
|
||||
|
||||
if (!skip && csg::get_mesh(m) && op == csg::CSGType::Union)
|
||||
mesh_vol = std::max(mesh_vol,
|
||||
double(its_volume(*(csg::get_mesh(m)))));
|
||||
|
||||
if (stackop == csg::CSGStackOp::Pop)
|
||||
skip = false;
|
||||
}
|
||||
|
||||
return mesh_vol;
|
||||
}
|
||||
|
||||
template<class It>
|
||||
InteriorPtr generate_interior(const Range<It> &csgparts,
|
||||
const HollowingConfig &hc = {},
|
||||
const JobController &ctl = {})
|
||||
{
|
||||
double mesh_vol = csgmesh_positive_maxvolume(csgparts);
|
||||
double voxsc = get_voxel_scale(mesh_vol, hc);
|
||||
|
||||
auto params = csg::VoxelizeParams{}
|
||||
.voxel_scale(voxsc)
|
||||
.exterior_bandwidth(3.f)
|
||||
.interior_bandwidth(3.f)
|
||||
.statusfn([&ctl](int){ return ctl.stopcondition && ctl.stopcondition(); });
|
||||
|
||||
auto ptr = csg::voxelize_csgmesh(csgparts, params);
|
||||
|
||||
if (!ptr || (ctl.stopcondition && ctl.stopcondition()))
|
||||
return {};
|
||||
|
||||
// TODO: figure out issues without the redistance
|
||||
// if (csgparts.size() > 1 || its_is_splittable(*csg::get_mesh(*csgparts.begin())))
|
||||
|
||||
ptr = redistance_grid(*ptr, 0.0f, 3.f, 3.f);
|
||||
|
||||
return ptr ? generate_interior(*ptr, hc, ctl) : InteriorPtr{};
|
||||
}
|
||||
|
||||
// Will do the hollowing
|
||||
void hollow_mesh(TriangleMesh &mesh, const HollowingConfig &cfg, int flags = 0);
|
||||
|
||||
// Hollowing prepared in "interior", merge with original mesh
|
||||
void hollow_mesh(TriangleMesh &mesh, const Interior &interior, int flags = 0);
|
||||
|
||||
// Will do the hollowing
|
||||
void hollow_mesh(indexed_triangle_set &mesh, const HollowingConfig &cfg, int flags = 0);
|
||||
|
||||
// Hollowing prepared in "interior", merge with original mesh
|
||||
void hollow_mesh(indexed_triangle_set &mesh, const Interior &interior, int flags = 0);
|
||||
|
||||
enum class HollowMeshResult {
|
||||
Ok = 0,
|
||||
FaultyMesh = 1,
|
||||
FaultyHoles = 2,
|
||||
DrillingFailed = 4
|
||||
};
|
||||
|
||||
// Return HollowMeshResult codes OR-ed.
|
||||
int hollow_mesh_and_drill(
|
||||
indexed_triangle_set &mesh,
|
||||
const Interior& interior,
|
||||
const DrainHoles &holes,
|
||||
std::function<void(size_t)> on_hole_fail = [](size_t){});
|
||||
|
||||
void remove_inside_triangles(TriangleMesh &mesh, const Interior &interior,
|
||||
const std::vector<bool> &exclude_mask = {});
|
||||
|
||||
void remove_inside_triangles(indexed_triangle_set &mesh, const Interior &interior,
|
||||
const std::vector<bool> &exclude_mask = {});
|
||||
|
||||
sla::DrainHoles transformed_drainhole_points(const ModelObject &mo,
|
||||
const Transform3d &trafo);
|
||||
|
||||
void cut_drainholes(std::vector<ExPolygons> & obj_slices,
|
||||
const std::vector<float> &slicegrid,
|
||||
float closing_radius,
|
||||
const sla::DrainHoles & holes,
|
||||
std::function<void(void)> thr);
|
||||
|
||||
inline void swap_normals(indexed_triangle_set &its)
|
||||
{
|
||||
for (auto &face : its.indices)
|
||||
std::swap(face(0), face(2));
|
||||
}
|
||||
|
||||
// Create exclude mask for triangle removal inside hollowed interiors.
|
||||
// This is necessary when the interior is already part of the mesh which was
|
||||
// drilled using CGAL mesh boolean operation. Excluded will be the triangles
|
||||
// originally part of the interior mesh and triangles that make up the drilled
|
||||
// hole walls.
|
||||
std::vector<bool> create_exclude_mask(
|
||||
const indexed_triangle_set &its,
|
||||
const sla::Interior &interior,
|
||||
const std::vector<sla::DrainHole> &holes);
|
||||
|
||||
} // namespace sla
|
||||
} // namespace Slic3r
|
||||
|
||||
#endif // HOLLOWINGFILTER_H
|
||||
32
src/libslic3r/SLA/JobController.hpp
Normal file
32
src/libslic3r/SLA/JobController.hpp
Normal file
@@ -0,0 +1,32 @@
|
||||
#ifndef SLA_JOBCONTROLLER_HPP
|
||||
#define SLA_JOBCONTROLLER_HPP
|
||||
|
||||
#include <functional>
|
||||
#include <string>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
/// A Control structure for the support calculation. Consists of the status
|
||||
/// indicator callback and the stop condition predicate.
|
||||
struct JobController
|
||||
{
|
||||
using StatusFn = std::function<void(unsigned, const std::string&)>;
|
||||
using StopCond = std::function<bool(void)>;
|
||||
using CancelFn = std::function<void(void)>;
|
||||
|
||||
// This will signal the status of the calculation to the front-end
|
||||
StatusFn statuscb = [](unsigned, const std::string&){};
|
||||
|
||||
// Returns true if the calculation should be aborted.
|
||||
StopCond stopcondition = [](){ return false; };
|
||||
|
||||
// Similar to cancel callback. This should check the stop condition and
|
||||
// if true, throw an appropriate exception. (TriangleMeshSlicer needs this)
|
||||
// consider it a hard abort. stopcondition is permits the algorithm to
|
||||
// terminate itself
|
||||
CancelFn cancelfn = [](){};
|
||||
};
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // JOBCONTROLLER_HPP
|
||||
535
src/libslic3r/SLA/Pad.cpp
Normal file
535
src/libslic3r/SLA/Pad.cpp
Normal file
@@ -0,0 +1,535 @@
|
||||
#include <libslic3r/SLA/Pad.hpp>
|
||||
#include <libslic3r/SLA/SpatIndex.hpp>
|
||||
#include <libslic3r/BoostAdapter.hpp>
|
||||
//#include <libslic3r/SLA/Contour3D.hpp>
|
||||
#include <libslic3r/TriangleMeshSlicer.hpp>
|
||||
|
||||
#include "ConcaveHull.hpp"
|
||||
|
||||
#include "boost/log/trivial.hpp"
|
||||
#include "ClipperUtils.hpp"
|
||||
#include "Tesselate.hpp"
|
||||
#include "MTUtils.hpp"
|
||||
|
||||
#include "TriangulateWall.hpp"
|
||||
|
||||
// For debugging:
|
||||
// #include <fstream>
|
||||
// #include <libnest2d/tools/benchmark.h>
|
||||
#include "SVG.hpp"
|
||||
|
||||
#include "I18N.hpp"
|
||||
#include <boost/log/trivial.hpp>
|
||||
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
namespace {
|
||||
|
||||
indexed_triangle_set walls(
|
||||
const Polygon &lower,
|
||||
const Polygon &upper,
|
||||
double lower_z_mm,
|
||||
double upper_z_mm)
|
||||
{
|
||||
indexed_triangle_set w;
|
||||
triangulate_wall(w.vertices, w.indices, lower, upper, lower_z_mm,
|
||||
upper_z_mm);
|
||||
|
||||
return w;
|
||||
}
|
||||
|
||||
// Same as walls() but with identical higher and lower polygons.
|
||||
inline indexed_triangle_set straight_walls(const Polygon &plate,
|
||||
double lo_z,
|
||||
double hi_z)
|
||||
{
|
||||
return wall_strip(plate, hi_z, lo_z); //walls(plate, plate, lo_z, hi_z);
|
||||
}
|
||||
|
||||
// Function to cut tiny connector cavities for a given polygon. The input poly
|
||||
// will be offsetted by "padding" and small rectangle shaped cavities will be
|
||||
// inserted along the perimeter in every "stride" distance. The stick rectangles
|
||||
// will have a with about "stick_width". The input dimensions are in world
|
||||
// measure, not the scaled clipper units.
|
||||
void breakstick_holes(Points& pts,
|
||||
double padding,
|
||||
double stride,
|
||||
double stick_width,
|
||||
double penetration)
|
||||
{
|
||||
if(stride <= EPSILON || stick_width <= EPSILON || padding <= EPSILON)
|
||||
return;
|
||||
|
||||
// SVG svg("bridgestick_plate.svg");
|
||||
// svg.draw(poly);
|
||||
|
||||
// The connector stick will be a small rectangle with dimensions
|
||||
// stick_width x (penetration + padding) to have some penetration
|
||||
// into the input polygon.
|
||||
|
||||
Points out;
|
||||
out.reserve(2 * pts.size()); // output polygon points
|
||||
|
||||
// stick bottom and right edge dimensions
|
||||
double sbottom = scaled(stick_width);
|
||||
double sright = scaled(penetration + padding);
|
||||
|
||||
// scaled stride distance
|
||||
double sstride = scaled(stride);
|
||||
double t = 0;
|
||||
|
||||
// process pairs of vertices as an edge, start with the last and
|
||||
// first point
|
||||
for (size_t i = pts.size() - 1, j = 0; j < pts.size(); i = j, ++j) {
|
||||
// Get vertices and the direction vectors
|
||||
const Point &a = pts[i], &b = pts[j];
|
||||
Vec2d dir = b.cast<double>() - a.cast<double>();
|
||||
double nrm = dir.norm();
|
||||
dir /= nrm;
|
||||
Vec2d dirp(-dir(Y), dir(X));
|
||||
|
||||
// Insert start point
|
||||
out.emplace_back(a);
|
||||
|
||||
// dodge the start point, do not make sticks on the joins
|
||||
while (t < sbottom) t += sbottom;
|
||||
double tend = nrm - sbottom;
|
||||
|
||||
while (t < tend) { // insert the stick on the polygon perimeter
|
||||
|
||||
// calculate the stick rectangle vertices and insert them
|
||||
// into the output.
|
||||
Point p1 = a + (t * dir).cast<coord_t>();
|
||||
Point p2 = p1 + (sright * dirp).cast<coord_t>();
|
||||
Point p3 = p2 + (sbottom * dir).cast<coord_t>();
|
||||
Point p4 = p3 + (sright * -dirp).cast<coord_t>();
|
||||
out.insert(out.end(), {p1, p2, p3, p4});
|
||||
|
||||
// continue along the perimeter
|
||||
t += sstride;
|
||||
}
|
||||
|
||||
t = t - nrm;
|
||||
|
||||
// Insert edge endpoint
|
||||
out.emplace_back(b);
|
||||
}
|
||||
|
||||
// move the new points
|
||||
out.shrink_to_fit();
|
||||
pts.swap(out);
|
||||
}
|
||||
|
||||
template<class...Args>
|
||||
ExPolygons breakstick_holes(const ExPolygons &input, Args...args)
|
||||
{
|
||||
ExPolygons ret = input;
|
||||
for (ExPolygon &p : ret) {
|
||||
breakstick_holes(p.contour.points, args...);
|
||||
for (auto &h : p.holes) breakstick_holes(h.points, args...);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline coord_t get_waffle_offset(const PadConfig &c)
|
||||
{
|
||||
return scaled(c.brim_size_mm + c.wing_distance());
|
||||
}
|
||||
|
||||
static inline double get_merge_distance(const PadConfig &c)
|
||||
{
|
||||
return 2. * (1.8 * c.wall_thickness_mm) + c.max_merge_dist_mm;
|
||||
}
|
||||
|
||||
// Part of the pad configuration that is used for 3D geometry generation
|
||||
struct PadConfig3D {
|
||||
double thickness, height, wing_height, slope;
|
||||
|
||||
explicit PadConfig3D(const PadConfig &cfg2d)
|
||||
: thickness{cfg2d.wall_thickness_mm}
|
||||
, height{cfg2d.full_height()}
|
||||
, wing_height{cfg2d.wall_height_mm}
|
||||
, slope{cfg2d.wall_slope}
|
||||
{}
|
||||
|
||||
inline double bottom_offset() const
|
||||
{
|
||||
return (thickness + wing_height) / std::tan(slope);
|
||||
}
|
||||
};
|
||||
|
||||
// Outer part of the skeleton is used to generate the waffled edges of the pad.
|
||||
// Inner parts will not be waffled or offsetted. Inner parts are only used if
|
||||
// pad is generated around the object and correspond to holes and inner polygons
|
||||
// in the model blueprint.
|
||||
struct PadSkeleton { ExPolygons inner, outer; };
|
||||
|
||||
PadSkeleton divide_blueprint(const ExPolygons &bp)
|
||||
{
|
||||
ClipperLib::PolyTree ptree = union_pt(bp);
|
||||
|
||||
PadSkeleton ret;
|
||||
ret.inner.reserve(size_t(ptree.Total()));
|
||||
ret.outer.reserve(size_t(ptree.Total()));
|
||||
|
||||
for (ClipperLib::PolyTree::PolyNode *node : ptree.Childs) {
|
||||
ExPolygon poly;
|
||||
poly.contour.points = std::move(node->Contour);
|
||||
for (ClipperLib::PolyTree::PolyNode *child : node->Childs) {
|
||||
poly.holes.emplace_back(std::move(child->Contour));
|
||||
|
||||
traverse_pt(child->Childs, &ret.inner);
|
||||
}
|
||||
|
||||
ret.outer.emplace_back(poly);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// A helper class for storing polygons and maintaining a spatial index of their
|
||||
// bounding boxes.
|
||||
class Intersector {
|
||||
BoxIndex m_index;
|
||||
ExPolygons m_polys;
|
||||
|
||||
public:
|
||||
|
||||
// Add a new polygon to the index
|
||||
void add(const ExPolygon &ep)
|
||||
{
|
||||
m_polys.emplace_back(ep);
|
||||
m_index.insert(get_extents(ep), unsigned(m_index.size()));
|
||||
}
|
||||
|
||||
// Check an arbitrary polygon for intersection with the indexed polygons
|
||||
bool intersects(const ExPolygon &poly)
|
||||
{
|
||||
// Create a suitable query bounding box.
|
||||
auto bb = poly.contour.bounding_box();
|
||||
|
||||
std::vector<BoxIndexEl> qres = m_index.query(bb, BoxIndex::qtIntersects);
|
||||
|
||||
// Now check intersections on the actual polygons (not just the boxes)
|
||||
bool is_overlap = false;
|
||||
auto qit = qres.begin();
|
||||
while (!is_overlap && qit != qres.end())
|
||||
is_overlap = is_overlap || poly.overlaps(m_polys[(qit++)->second]);
|
||||
|
||||
return is_overlap;
|
||||
}
|
||||
};
|
||||
|
||||
// This dummy intersector to implement the "force pad everywhere" feature
|
||||
struct DummyIntersector
|
||||
{
|
||||
inline void add(const ExPolygon &) {}
|
||||
inline bool intersects(const ExPolygon &) { return true; }
|
||||
};
|
||||
|
||||
template<class _Intersector>
|
||||
class _AroundPadSkeleton : public PadSkeleton
|
||||
{
|
||||
// A spatial index used to be able to efficiently find intersections of
|
||||
// support polygons with the model polygons.
|
||||
_Intersector m_intersector;
|
||||
|
||||
public:
|
||||
_AroundPadSkeleton(const ExPolygons &support_blueprint,
|
||||
const ExPolygons &model_blueprint,
|
||||
const PadConfig & cfg,
|
||||
ThrowOnCancel thr)
|
||||
{
|
||||
// We need to merge the support and the model contours in a special
|
||||
// way in which the model contours have to be substracted from the
|
||||
// support contours. The pad has to have a hole in which the model can
|
||||
// fit perfectly (thus the substraction -- diff_ex). Also, the pad has
|
||||
// to be eliminated from areas where there is no need for a pad, due
|
||||
// to missing supports.
|
||||
|
||||
add_supports_to_index(support_blueprint);
|
||||
|
||||
auto model_bp_offs =
|
||||
offset_ex(model_blueprint,
|
||||
scaled<float>(cfg.embed_object.object_gap_mm),
|
||||
ClipperLib::jtMiter, 1);
|
||||
|
||||
ExPolygons fullcvh =
|
||||
wafflized_concave_hull(support_blueprint, model_bp_offs, cfg, thr);
|
||||
|
||||
auto model_bp_sticks =
|
||||
breakstick_holes(model_bp_offs, cfg.embed_object.object_gap_mm,
|
||||
cfg.embed_object.stick_stride_mm,
|
||||
cfg.embed_object.stick_width_mm,
|
||||
cfg.embed_object.stick_penetration_mm);
|
||||
|
||||
ExPolygons fullpad = diff_ex(fullcvh, model_bp_sticks);
|
||||
|
||||
PadSkeleton divided = divide_blueprint(fullpad);
|
||||
|
||||
remove_redundant_parts(divided.outer);
|
||||
remove_redundant_parts(divided.inner);
|
||||
|
||||
outer = std::move(divided.outer);
|
||||
inner = std::move(divided.inner);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
// Add the support blueprint to the search index to be queried later
|
||||
void add_supports_to_index(const ExPolygons &supp_bp)
|
||||
{
|
||||
for (auto &ep : supp_bp) m_intersector.add(ep);
|
||||
}
|
||||
|
||||
// Create the wafflized pad around all object in the scene. This pad doesnt
|
||||
// have any holes yet.
|
||||
ExPolygons wafflized_concave_hull(const ExPolygons &supp_bp,
|
||||
const ExPolygons &model_bp,
|
||||
const PadConfig &cfg,
|
||||
ThrowOnCancel thr)
|
||||
{
|
||||
auto allin = reserve_vector<ExPolygon>(supp_bp.size() + model_bp.size());
|
||||
|
||||
for (auto &ep : supp_bp) allin.emplace_back(ep.contour);
|
||||
for (auto &ep : model_bp) allin.emplace_back(ep.contour);
|
||||
|
||||
ConcaveHull cchull{allin, get_merge_distance(cfg), thr};
|
||||
return offset_waffle_style_ex(cchull, get_waffle_offset(cfg));
|
||||
}
|
||||
|
||||
// To remove parts of the pad skeleton which do not host any supports
|
||||
void remove_redundant_parts(ExPolygons &parts)
|
||||
{
|
||||
auto endit = std::remove_if(parts.begin(), parts.end(),
|
||||
[this](const ExPolygon &p) {
|
||||
return !m_intersector.intersects(p);
|
||||
});
|
||||
|
||||
parts.erase(endit, parts.end());
|
||||
}
|
||||
};
|
||||
|
||||
using AroundPadSkeleton = _AroundPadSkeleton<Intersector>;
|
||||
using BrimPadSkeleton = _AroundPadSkeleton<DummyIntersector>;
|
||||
|
||||
class BelowPadSkeleton : public PadSkeleton
|
||||
{
|
||||
public:
|
||||
BelowPadSkeleton(const ExPolygons &support_blueprint,
|
||||
const ExPolygons &model_blueprint,
|
||||
const PadConfig & cfg,
|
||||
ThrowOnCancel thr)
|
||||
{
|
||||
outer.reserve(support_blueprint.size() + model_blueprint.size());
|
||||
|
||||
for (auto &ep : support_blueprint) outer.emplace_back(ep.contour);
|
||||
for (auto &ep : model_blueprint) outer.emplace_back(ep.contour);
|
||||
|
||||
ConcaveHull ochull{outer, get_merge_distance(cfg), thr};
|
||||
|
||||
outer = offset_waffle_style_ex(ochull, get_waffle_offset(cfg));
|
||||
}
|
||||
};
|
||||
|
||||
// Offset the contour only, leave the holes untouched
|
||||
template<class...Args>
|
||||
ExPolygon offset_contour_only(const ExPolygon &poly, coord_t delta, Args...args)
|
||||
{
|
||||
Polygons tmp = offset(poly.contour, float(delta), args...);
|
||||
|
||||
if (tmp.empty()) return {};
|
||||
|
||||
Polygons holes = poly.holes;
|
||||
for (auto &h : holes) h.reverse();
|
||||
|
||||
ExPolygons tmp2 = diff_ex(tmp, holes);
|
||||
|
||||
if (tmp2.empty()) return {};
|
||||
|
||||
return std::move(tmp2.front());
|
||||
}
|
||||
|
||||
bool add_cavity(indexed_triangle_set &pad,
|
||||
ExPolygon & top_poly,
|
||||
const PadConfig3D & cfg,
|
||||
ThrowOnCancel thr)
|
||||
{
|
||||
auto logerr = []{BOOST_LOG_TRIVIAL(error)<<"Could not create pad cavity";};
|
||||
|
||||
double wing_distance = cfg.wing_height / std::tan(cfg.slope);
|
||||
coord_t delta_inner = -scaled(cfg.thickness + wing_distance);
|
||||
coord_t delta_middle = -scaled(cfg.thickness);
|
||||
ExPolygon inner_base = offset_contour_only(top_poly, delta_inner);
|
||||
ExPolygon middle_base = offset_contour_only(top_poly, delta_middle);
|
||||
|
||||
if (inner_base.empty() || middle_base.empty()) { logerr(); return false; }
|
||||
|
||||
ExPolygons pdiff = diff_ex(top_poly, middle_base.contour);
|
||||
|
||||
if (pdiff.size() != 1) { logerr(); return false; }
|
||||
|
||||
top_poly = pdiff.front();
|
||||
|
||||
double z_min = -cfg.wing_height, z_max = 0;
|
||||
its_merge(pad, walls(inner_base.contour, middle_base.contour, z_min, z_max));
|
||||
thr();
|
||||
its_merge(pad, triangulate_expolygon_3d(inner_base, z_min, NORMALS_UP));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
indexed_triangle_set create_outer_pad_geometry(const ExPolygons & skeleton,
|
||||
const PadConfig3D &cfg,
|
||||
ThrowOnCancel thr)
|
||||
{
|
||||
indexed_triangle_set ret;
|
||||
|
||||
for (const ExPolygon &pad_part : skeleton) {
|
||||
ExPolygon top_poly{pad_part};
|
||||
ExPolygon bottom_poly =
|
||||
offset_contour_only(pad_part, -scaled(cfg.bottom_offset()));
|
||||
|
||||
if (bottom_poly.empty()) continue;
|
||||
thr();
|
||||
|
||||
double z_min = -cfg.height, z_max = 0;
|
||||
its_merge(ret, walls(top_poly.contour, bottom_poly.contour, z_max, z_min));
|
||||
|
||||
if (cfg.wing_height > 0. && add_cavity(ret, top_poly, cfg, thr))
|
||||
z_max = -cfg.wing_height;
|
||||
|
||||
for (auto &h : bottom_poly.holes)
|
||||
its_merge(ret, straight_walls(h, z_max, z_min));
|
||||
|
||||
its_merge(ret, triangulate_expolygon_3d(bottom_poly, z_min, NORMALS_DOWN));
|
||||
its_merge(ret, triangulate_expolygon_3d(top_poly, NORMALS_UP));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
indexed_triangle_set create_inner_pad_geometry(const ExPolygons & skeleton,
|
||||
const PadConfig3D &cfg,
|
||||
ThrowOnCancel thr)
|
||||
{
|
||||
indexed_triangle_set ret;
|
||||
|
||||
double z_max = 0., z_min = -cfg.height;
|
||||
for (const ExPolygon &pad_part : skeleton) {
|
||||
thr();
|
||||
its_merge(ret, straight_walls(pad_part.contour, z_max, z_min));
|
||||
|
||||
for (auto &h : pad_part.holes)
|
||||
its_merge(ret, straight_walls(h, z_max, z_min));
|
||||
|
||||
its_merge(ret, triangulate_expolygon_3d(pad_part, z_min, NORMALS_DOWN));
|
||||
its_merge(ret, triangulate_expolygon_3d(pad_part, z_max, NORMALS_UP));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
indexed_triangle_set create_pad_geometry(const PadSkeleton &skelet,
|
||||
const PadConfig & cfg,
|
||||
ThrowOnCancel thr)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
SVG svg("pad_skeleton.svg");
|
||||
svg.draw(skelet.outer, "green");
|
||||
svg.draw(skelet.inner, "blue");
|
||||
svg.Close();
|
||||
#endif
|
||||
|
||||
PadConfig3D cfg3d(cfg);
|
||||
auto pg = create_outer_pad_geometry(skelet.outer, cfg3d, thr);
|
||||
its_merge(pg, create_inner_pad_geometry(skelet.inner, cfg3d, thr));
|
||||
|
||||
return pg;
|
||||
}
|
||||
|
||||
indexed_triangle_set create_pad_geometry(const ExPolygons &supp_bp,
|
||||
const ExPolygons &model_bp,
|
||||
const PadConfig & cfg,
|
||||
ThrowOnCancel thr)
|
||||
{
|
||||
PadSkeleton skelet;
|
||||
|
||||
if (cfg.embed_object.enabled) {
|
||||
if (cfg.embed_object.everywhere)
|
||||
skelet = BrimPadSkeleton(supp_bp, model_bp, cfg, thr);
|
||||
else
|
||||
skelet = AroundPadSkeleton(supp_bp, model_bp, cfg, thr);
|
||||
} else
|
||||
skelet = BelowPadSkeleton(supp_bp, model_bp, cfg, thr);
|
||||
|
||||
return create_pad_geometry(skelet, cfg, thr);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void pad_blueprint(const indexed_triangle_set &mesh,
|
||||
ExPolygons & output,
|
||||
const std::vector<float> & heights,
|
||||
ThrowOnCancel thrfn)
|
||||
{
|
||||
if (mesh.empty()) return;
|
||||
|
||||
std::vector<ExPolygons> out = slice_mesh_ex(mesh, heights, thrfn);
|
||||
|
||||
size_t count = 0;
|
||||
for(auto& o : out) count += o.size();
|
||||
|
||||
// Unification is expensive, a simplify also speeds up the pad generation
|
||||
auto tmp = reserve_vector<ExPolygon>(count);
|
||||
for(ExPolygons& o : out)
|
||||
for(ExPolygon& e : o) {
|
||||
auto&& exss = e.simplify(scaled<double>(0.1));
|
||||
for(ExPolygon& ep : exss) tmp.emplace_back(std::move(ep));
|
||||
}
|
||||
|
||||
ExPolygons utmp = union_ex(tmp);
|
||||
|
||||
for(auto& o : utmp) {
|
||||
auto&& smp = o.simplify(scaled<double>(0.1));
|
||||
output.insert(output.end(), smp.begin(), smp.end());
|
||||
}
|
||||
}
|
||||
|
||||
void pad_blueprint(const indexed_triangle_set &mesh,
|
||||
ExPolygons & output,
|
||||
float h,
|
||||
float layerh,
|
||||
ThrowOnCancel thrfn)
|
||||
{
|
||||
float gnd = float(bounding_box(mesh).min(Z));
|
||||
|
||||
std::vector<float> slicegrid = grid(gnd, gnd + h, layerh);
|
||||
pad_blueprint(mesh, output, slicegrid, thrfn);
|
||||
}
|
||||
|
||||
void create_pad(const ExPolygons & sup_blueprint,
|
||||
const ExPolygons & model_blueprint,
|
||||
indexed_triangle_set &out,
|
||||
const PadConfig & cfg,
|
||||
ThrowOnCancel thr)
|
||||
{
|
||||
auto t = create_pad_geometry(sup_blueprint, model_blueprint, cfg, thr);
|
||||
its_merge(out, t);
|
||||
}
|
||||
|
||||
std::string PadConfig::validate() const
|
||||
{
|
||||
static const double constexpr MIN_BRIM_SIZE_MM = .1;
|
||||
|
||||
if (brim_size_mm < MIN_BRIM_SIZE_MM ||
|
||||
bottom_offset() > brim_size_mm + wing_distance() ||
|
||||
get_waffle_offset(*this) <= MIN_BRIM_SIZE_MM)
|
||||
return _u8L("Pad brim size is too small for the current configuration.");
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
97
src/libslic3r/SLA/Pad.hpp
Normal file
97
src/libslic3r/SLA/Pad.hpp
Normal file
@@ -0,0 +1,97 @@
|
||||
#ifndef SLA_PAD_HPP
|
||||
#define SLA_PAD_HPP
|
||||
|
||||
#include <vector>
|
||||
#include <functional>
|
||||
#include <cmath>
|
||||
#include <string>
|
||||
|
||||
#include <libslic3r/Point.hpp>
|
||||
|
||||
struct indexed_triangle_set;
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
class ExPolygon;
|
||||
class Polygon;
|
||||
using ExPolygons = std::vector<ExPolygon>;
|
||||
using Polygons = std::vector<Polygon, PointsAllocator<Polygon>>;
|
||||
|
||||
namespace sla {
|
||||
|
||||
using ThrowOnCancel = std::function<void(void)>;
|
||||
|
||||
/// Calculate the polygon representing the silhouette.
|
||||
void pad_blueprint(
|
||||
const indexed_triangle_set &mesh, // input mesh
|
||||
ExPolygons & output, // Output will be merged with
|
||||
const std::vector<float> &, // Exact Z levels to sample
|
||||
ThrowOnCancel thrfn = [] {}); // Function that throws if cancel was requested
|
||||
|
||||
void pad_blueprint(
|
||||
const indexed_triangle_set &mesh,
|
||||
ExPolygons & output,
|
||||
float samplingheight = 0.1f, // The height range to sample
|
||||
float layerheight = 0.05f, // The sampling height
|
||||
ThrowOnCancel thrfn = [] {});
|
||||
|
||||
struct PadConfig {
|
||||
double wall_thickness_mm = 1.;
|
||||
double wall_height_mm = 1.;
|
||||
double max_merge_dist_mm = 50;
|
||||
double wall_slope = std::atan(1.0); // Universal constant for Pi/4
|
||||
double brim_size_mm = 1.6;
|
||||
|
||||
struct EmbedObject {
|
||||
double object_gap_mm = 1.;
|
||||
double stick_stride_mm = 10.;
|
||||
double stick_width_mm = 0.5;
|
||||
double stick_penetration_mm = 0.1;
|
||||
bool enabled = false;
|
||||
bool everywhere = false;
|
||||
operator bool() const { return enabled; }
|
||||
} embed_object;
|
||||
|
||||
inline PadConfig() = default;
|
||||
inline PadConfig(double thickness,
|
||||
double height,
|
||||
double mergedist,
|
||||
double slope)
|
||||
: wall_thickness_mm(thickness)
|
||||
, wall_height_mm(height)
|
||||
, max_merge_dist_mm(mergedist)
|
||||
, wall_slope(slope)
|
||||
{}
|
||||
|
||||
inline double bottom_offset() const
|
||||
{
|
||||
return (wall_thickness_mm + wall_height_mm) / std::tan(wall_slope);
|
||||
}
|
||||
|
||||
inline double wing_distance() const
|
||||
{
|
||||
return wall_height_mm / std::tan(wall_slope);
|
||||
}
|
||||
|
||||
inline double full_height() const
|
||||
{
|
||||
return wall_height_mm + wall_thickness_mm;
|
||||
}
|
||||
|
||||
/// Returns the elevation needed for compensating the pad.
|
||||
inline double required_elevation() const { return wall_thickness_mm; }
|
||||
|
||||
std::string validate() const;
|
||||
};
|
||||
|
||||
void create_pad(
|
||||
const ExPolygons & support_contours,
|
||||
const ExPolygons & model_contours,
|
||||
indexed_triangle_set &output_mesh,
|
||||
const PadConfig & = PadConfig(),
|
||||
ThrowOnCancel throw_on_cancel = [] {});
|
||||
|
||||
} // namespace sla
|
||||
} // namespace Slic3r
|
||||
|
||||
#endif // SLABASEPOOL_HPP
|
||||
86
src/libslic3r/SLA/RasterBase.cpp
Normal file
86
src/libslic3r/SLA/RasterBase.cpp
Normal file
@@ -0,0 +1,86 @@
|
||||
#ifndef SLARASTER_CPP
|
||||
#define SLARASTER_CPP
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include <libslic3r/SLA/RasterBase.hpp>
|
||||
#include <libslic3r/SLA/AGGRaster.hpp>
|
||||
|
||||
// minz image write:
|
||||
#include <miniz.h>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
EncodedRaster PNGRasterEncoder::operator()(const void *ptr, size_t w, size_t h,
|
||||
size_t num_components)
|
||||
{
|
||||
std::vector<uint8_t> buf;
|
||||
size_t s = 0;
|
||||
|
||||
void *rawdata = tdefl_write_image_to_png_file_in_memory(
|
||||
ptr, int(w), int(h), int(num_components), &s);
|
||||
|
||||
// On error, data() will return an empty vector. No other info can be
|
||||
// retrieved from miniz anyway...
|
||||
if (rawdata == nullptr) return EncodedRaster({}, "png");
|
||||
|
||||
auto pptr = static_cast<std::uint8_t*>(rawdata);
|
||||
|
||||
buf.reserve(s);
|
||||
std::copy(pptr, pptr + s, std::back_inserter(buf));
|
||||
|
||||
MZ_FREE(rawdata);
|
||||
return EncodedRaster(std::move(buf), "png");
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &stream, const EncodedRaster &bytes)
|
||||
{
|
||||
stream.write(reinterpret_cast<const char *>(bytes.data()),
|
||||
std::streamsize(bytes.size()));
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
EncodedRaster PPMRasterEncoder::operator()(const void *ptr, size_t w, size_t h,
|
||||
size_t num_components)
|
||||
{
|
||||
std::vector<uint8_t> buf;
|
||||
|
||||
auto header = std::string("P5 ") +
|
||||
std::to_string(w) + " " +
|
||||
std::to_string(h) + " " + "255 ";
|
||||
|
||||
auto sz = w * h * num_components;
|
||||
size_t s = sz + header.size();
|
||||
|
||||
buf.reserve(s);
|
||||
|
||||
auto buff = reinterpret_cast<const std::uint8_t*>(ptr);
|
||||
std::copy(header.begin(), header.end(), std::back_inserter(buf));
|
||||
std::copy(buff, buff+sz, std::back_inserter(buf));
|
||||
|
||||
return EncodedRaster(std::move(buf), "ppm");
|
||||
}
|
||||
|
||||
std::unique_ptr<RasterBase> create_raster_grayscale_aa(
|
||||
const Resolution &res,
|
||||
const PixelDim &pxdim,
|
||||
double gamma,
|
||||
const RasterBase::Trafo &tr)
|
||||
{
|
||||
std::unique_ptr<RasterBase> rst;
|
||||
|
||||
if (gamma > 0)
|
||||
rst = std::make_unique<RasterGrayscaleAAGammaPower>(res, pxdim, tr, gamma);
|
||||
else if (std::abs(gamma - 1.) < 1e-6)
|
||||
rst = std::make_unique<RasterGrayscaleAA>(res, pxdim, tr, agg::gamma_none());
|
||||
else
|
||||
rst = std::make_unique<RasterGrayscaleAA>(res, pxdim, tr, agg::gamma_threshold(.5));
|
||||
|
||||
return rst;
|
||||
}
|
||||
|
||||
} // namespace sla
|
||||
} // namespace Slic3r
|
||||
|
||||
#endif // SLARASTER_CPP
|
||||
118
src/libslic3r/SLA/RasterBase.hpp
Normal file
118
src/libslic3r/SLA/RasterBase.hpp
Normal file
@@ -0,0 +1,118 @@
|
||||
#ifndef SLA_RASTERBASE_HPP
|
||||
#define SLA_RASTERBASE_HPP
|
||||
|
||||
#include <ostream>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <array>
|
||||
#include <utility>
|
||||
#include <cstdint>
|
||||
|
||||
#include <libslic3r/ExPolygon.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
namespace sla {
|
||||
|
||||
// Raw byte buffer paired with its size. Suitable for compressed image data.
|
||||
class EncodedRaster {
|
||||
protected:
|
||||
std::vector<uint8_t> m_buffer;
|
||||
std::string m_ext;
|
||||
public:
|
||||
EncodedRaster() = default;
|
||||
explicit EncodedRaster(std::vector<uint8_t> &&buf, std::string ext)
|
||||
: m_buffer(std::move(buf)), m_ext(std::move(ext))
|
||||
{}
|
||||
|
||||
size_t size() const { return m_buffer.size(); }
|
||||
const void * data() const { return m_buffer.data(); }
|
||||
const char * extension() const { return m_ext.c_str(); }
|
||||
};
|
||||
|
||||
/// Type that represents a resolution in pixels.
|
||||
struct Resolution {
|
||||
size_t width_px = 0;
|
||||
size_t height_px = 0;
|
||||
|
||||
Resolution() = default;
|
||||
Resolution(size_t w, size_t h) : width_px(w), height_px(h) {}
|
||||
size_t pixels() const { return width_px * height_px; }
|
||||
};
|
||||
|
||||
/// Types that represents the dimension of a pixel in millimeters.
|
||||
struct PixelDim {
|
||||
double w_mm = 1.;
|
||||
double h_mm = 1.;
|
||||
|
||||
PixelDim() = default;
|
||||
PixelDim(double px_width_mm, double px_height_mm)
|
||||
: w_mm(px_width_mm), h_mm(px_height_mm)
|
||||
{}
|
||||
};
|
||||
|
||||
using RasterEncoder =
|
||||
std::function<EncodedRaster(const void *ptr, size_t w, size_t h, size_t num_components)>;
|
||||
|
||||
class RasterBase {
|
||||
public:
|
||||
|
||||
enum Orientation { roLandscape, roPortrait };
|
||||
|
||||
using TMirroring = std::array<bool, 2>;
|
||||
static const constexpr TMirroring NoMirror = {false, false};
|
||||
static const constexpr TMirroring MirrorX = {true, false};
|
||||
static const constexpr TMirroring MirrorY = {false, true};
|
||||
static const constexpr TMirroring MirrorXY = {true, true};
|
||||
|
||||
struct Trafo {
|
||||
bool mirror_x = false, mirror_y = false, flipXY = false;
|
||||
coord_t center_x = 0, center_y = 0;
|
||||
|
||||
// Portrait orientation will make sure the drawed polygons are rotated
|
||||
// by 90 degrees.
|
||||
Trafo(Orientation o = roLandscape, const TMirroring &mirror = NoMirror)
|
||||
// XY flipping implicitly does an X mirror
|
||||
: mirror_x(o == roPortrait ? !mirror[0] : mirror[0])
|
||||
, mirror_y(!mirror[1]) // Makes raster origin to be top left corner
|
||||
, flipXY(o == roPortrait)
|
||||
{}
|
||||
|
||||
TMirroring get_mirror() const { return { (roPortrait ? !mirror_x : mirror_x), mirror_y}; }
|
||||
Orientation get_orientation() const { return flipXY ? roPortrait : roLandscape; }
|
||||
Point get_center() const { return {center_x, center_y}; }
|
||||
};
|
||||
|
||||
virtual ~RasterBase() = default;
|
||||
|
||||
/// Draw a polygon with holes.
|
||||
virtual void draw(const ExPolygon& poly) = 0;
|
||||
|
||||
/// Get the resolution of the raster.
|
||||
// virtual Resolution resolution() const = 0;
|
||||
// virtual PixelDim pixel_dimensions() const = 0;
|
||||
virtual Trafo trafo() const = 0;
|
||||
|
||||
virtual EncodedRaster encode(RasterEncoder encoder) const = 0;
|
||||
};
|
||||
|
||||
struct PNGRasterEncoder {
|
||||
EncodedRaster operator()(const void *ptr, size_t w, size_t h, size_t num_components);
|
||||
};
|
||||
|
||||
struct PPMRasterEncoder {
|
||||
EncodedRaster operator()(const void *ptr, size_t w, size_t h, size_t num_components);
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream &stream, const EncodedRaster &bytes);
|
||||
|
||||
// If gamma is zero, thresholding will be performed which disables AA.
|
||||
std::unique_ptr<RasterBase> create_raster_grayscale_aa(
|
||||
const Resolution &res,
|
||||
const PixelDim &pxdim,
|
||||
double gamma = 1.0,
|
||||
const RasterBase::Trafo &tr = {});
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // SLARASTERBASE_HPP
|
||||
91
src/libslic3r/SLA/RasterToPolygons.cpp
Normal file
91
src/libslic3r/SLA/RasterToPolygons.cpp
Normal file
@@ -0,0 +1,91 @@
|
||||
#include "RasterToPolygons.hpp"
|
||||
|
||||
#include "AGGRaster.hpp"
|
||||
#include "libslic3r/MarchingSquares.hpp"
|
||||
#include "MTUtils.hpp"
|
||||
#include "ClipperUtils.hpp"
|
||||
|
||||
namespace marchsq {
|
||||
|
||||
// Specialize this struct to register a raster type for the Marching squares alg
|
||||
template<> struct _RasterTraits<Slic3r::sla::RasterGrayscaleAA> {
|
||||
using Rst = Slic3r::sla::RasterGrayscaleAA;
|
||||
|
||||
// The type of pixel cell in the raster
|
||||
using ValueType = uint8_t;
|
||||
|
||||
// Value at a given position
|
||||
static uint8_t get(const Rst &rst, size_t row, size_t col) { return rst.read_pixel(col, row); }
|
||||
|
||||
// Number of rows and cols of the raster
|
||||
static size_t rows(const Rst &rst) { return rst.resolution().height_px; }
|
||||
static size_t cols(const Rst &rst) { return rst.resolution().width_px; }
|
||||
};
|
||||
|
||||
} // namespace Slic3r::marchsq
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
template<class Fn> void foreach_vertex(ExPolygon &poly, Fn &&fn)
|
||||
{
|
||||
for (auto &p : poly.contour.points) fn(p);
|
||||
for (auto &h : poly.holes)
|
||||
for (auto &p : h.points) fn(p);
|
||||
}
|
||||
|
||||
ExPolygons raster_to_polygons(const RasterGrayscaleAA &rst, Vec2i windowsize)
|
||||
{
|
||||
size_t rows = rst.resolution().height_px, cols = rst.resolution().width_px;
|
||||
|
||||
if (rows < 2 || cols < 2) return {};
|
||||
|
||||
Polygons polys;
|
||||
long w_rows = std::max(2l, long(windowsize.y()));
|
||||
long w_cols = std::max(2l, long(windowsize.x()));
|
||||
|
||||
std::vector<marchsq::Ring> rings =
|
||||
marchsq::execute(rst, 128, {w_rows, w_cols});
|
||||
|
||||
polys.reserve(rings.size());
|
||||
|
||||
auto pxd = rst.pixel_dimensions();
|
||||
pxd.w_mm = (rst.resolution().width_px * pxd.w_mm) / (rst.resolution().width_px - 1);
|
||||
pxd.h_mm = (rst.resolution().height_px * pxd.h_mm) / (rst.resolution().height_px - 1);
|
||||
|
||||
for (const marchsq::Ring &ring : rings) {
|
||||
Polygon poly; Points &pts = poly.points;
|
||||
pts.reserve(ring.size());
|
||||
|
||||
for (const marchsq::Coord &crd : ring)
|
||||
pts.emplace_back(scaled(crd.c * pxd.w_mm), scaled(crd.r * pxd.h_mm));
|
||||
|
||||
polys.emplace_back(poly);
|
||||
}
|
||||
|
||||
// reverse the raster transformations
|
||||
ExPolygons unioned = union_ex(polys);
|
||||
coord_t width = scaled(cols * pxd.h_mm), height = scaled(rows * pxd.w_mm);
|
||||
|
||||
auto tr = rst.trafo();
|
||||
for (ExPolygon &expoly : unioned) {
|
||||
if (tr.mirror_y)
|
||||
foreach_vertex(expoly, [height](Point &p) {p.y() = height - p.y(); });
|
||||
|
||||
if (tr.mirror_x)
|
||||
foreach_vertex(expoly, [width](Point &p) {p.x() = width - p.x(); });
|
||||
|
||||
expoly.translate(-tr.center_x, -tr.center_y);
|
||||
|
||||
if (tr.flipXY)
|
||||
foreach_vertex(expoly, [](Point &p) { std::swap(p.x(), p.y()); });
|
||||
|
||||
if ((tr.mirror_x + tr.mirror_y + tr.flipXY) % 2) {
|
||||
expoly.contour.reverse();
|
||||
for (auto &h : expoly.holes) h.reverse();
|
||||
}
|
||||
}
|
||||
|
||||
return unioned;
|
||||
}
|
||||
|
||||
}} // namespace Slic3r
|
||||
15
src/libslic3r/SLA/RasterToPolygons.hpp
Normal file
15
src/libslic3r/SLA/RasterToPolygons.hpp
Normal file
@@ -0,0 +1,15 @@
|
||||
#ifndef RASTERTOPOLYGONS_HPP
|
||||
#define RASTERTOPOLYGONS_HPP
|
||||
|
||||
#include "libslic3r/ExPolygon.hpp"
|
||||
|
||||
namespace Slic3r {
|
||||
namespace sla {
|
||||
|
||||
class RasterGrayscaleAA;
|
||||
|
||||
ExPolygons raster_to_polygons(const RasterGrayscaleAA &rst, Vec2i windowsize = {2, 2});
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // RASTERTOPOLYGONS_HPP
|
||||
46
src/libslic3r/SLA/ReprojectPointsOnMesh.hpp
Normal file
46
src/libslic3r/SLA/ReprojectPointsOnMesh.hpp
Normal file
@@ -0,0 +1,46 @@
|
||||
#ifndef REPROJECTPOINTSONMESH_HPP
|
||||
#define REPROJECTPOINTSONMESH_HPP
|
||||
|
||||
#include "libslic3r/Point.hpp"
|
||||
#include "libslic3r/AABBMesh.hpp"
|
||||
#include "SupportPoint.hpp"
|
||||
#include "Hollowing.hpp"
|
||||
#include "libslic3r/Model.hpp"
|
||||
|
||||
#include <tbb/parallel_for.h>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
template<class Pt> Vec3d pos(const Pt &p) { return p.pos.template cast<double>(); }
|
||||
template<class Pt> void pos(Pt &p, const Vec3d &pp) { p.pos = pp.cast<float>(); }
|
||||
|
||||
template<class PointType>
|
||||
void reproject_support_points(const AABBMesh &mesh, std::vector<PointType> &pts)
|
||||
{
|
||||
tbb::parallel_for(size_t(0), pts.size(), [&mesh, &pts](size_t idx) {
|
||||
int junk;
|
||||
Vec3d new_pos;
|
||||
mesh.squared_distance(pos(pts[idx]), junk, new_pos);
|
||||
pos(pts[idx], new_pos);
|
||||
});
|
||||
}
|
||||
|
||||
inline void reproject_points_and_holes(ModelObject *object)
|
||||
{
|
||||
bool has_sppoints = !object->sla_support_points.empty();
|
||||
bool has_holes = !object->sla_drain_holes.empty();
|
||||
|
||||
if (!object || (!has_holes && !has_sppoints)) return;
|
||||
|
||||
TriangleMesh rmsh = object->raw_mesh();
|
||||
AABBMesh emesh{rmsh};
|
||||
|
||||
if (has_sppoints)
|
||||
reproject_support_points(emesh, object->sla_support_points);
|
||||
|
||||
if (has_holes)
|
||||
reproject_support_points(emesh, object->sla_drain_holes);
|
||||
}
|
||||
|
||||
}}
|
||||
#endif // REPROJECTPOINTSONMESH_HPP
|
||||
478
src/libslic3r/SLA/Rotfinder.cpp
Normal file
478
src/libslic3r/SLA/Rotfinder.cpp
Normal file
@@ -0,0 +1,478 @@
|
||||
#include <limits>
|
||||
|
||||
#include <libslic3r/SLA/Rotfinder.hpp>
|
||||
|
||||
#include <libslic3r/Execution/ExecutionTBB.hpp>
|
||||
#include <libslic3r/Execution/ExecutionSeq.hpp>
|
||||
|
||||
#include <libslic3r/Optimize/BruteforceOptimizer.hpp>
|
||||
#include <libslic3r/Optimize/NLoptOptimizer.hpp>
|
||||
|
||||
#include "libslic3r/SLAPrint.hpp"
|
||||
#include "libslic3r/PrintConfig.hpp"
|
||||
|
||||
#include <libslic3r/Geometry.hpp>
|
||||
|
||||
#include <thread>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
namespace {
|
||||
|
||||
inline const Vec3f DOWN = {0.f, 0.f, -1.f};
|
||||
constexpr double POINTS_PER_UNIT_AREA = 1.f;
|
||||
|
||||
// Get the vertices of a triangle directly in an array of 3 points
|
||||
std::array<Vec3f, 3> get_triangle_vertices(const TriangleMesh &mesh,
|
||||
size_t faceidx)
|
||||
{
|
||||
const auto &face = mesh.its.indices[faceidx];
|
||||
return {mesh.its.vertices[face(0)],
|
||||
mesh.its.vertices[face(1)],
|
||||
mesh.its.vertices[face(2)]};
|
||||
}
|
||||
|
||||
std::array<Vec3f, 3> get_transformed_triangle(const TriangleMesh &mesh,
|
||||
const Transform3f & tr,
|
||||
size_t faceidx)
|
||||
{
|
||||
const auto &tri = get_triangle_vertices(mesh, faceidx);
|
||||
return {tr * tri[0], tr * tri[1], tr * tri[2]};
|
||||
}
|
||||
|
||||
template<class T> Vec<3, T> normal(const std::array<Vec<3, T>, 3> &tri)
|
||||
{
|
||||
Vec<3, T> U = tri[1] - tri[0];
|
||||
Vec<3, T> V = tri[2] - tri[0];
|
||||
return U.cross(V).normalized();
|
||||
}
|
||||
|
||||
template<class T, class AccessFn>
|
||||
T sum_score(AccessFn &&accessfn, size_t facecount, size_t Nthreads)
|
||||
{
|
||||
T initv = 0.;
|
||||
auto mergefn = [](T a, T b) { return a + b; };
|
||||
size_t grainsize = facecount / Nthreads;
|
||||
size_t from = 0, to = facecount;
|
||||
|
||||
return execution::reduce(ex_tbb, from, to, initv, mergefn, accessfn, grainsize);
|
||||
}
|
||||
|
||||
// Get area and normal of a triangle
|
||||
struct Facestats {
|
||||
Vec3f normal;
|
||||
double area;
|
||||
|
||||
explicit Facestats(const std::array<Vec3f, 3> &triangle)
|
||||
{
|
||||
Vec3f U = triangle[1] - triangle[0];
|
||||
Vec3f V = triangle[2] - triangle[0];
|
||||
Vec3f C = U.cross(V);
|
||||
normal = C.normalized();
|
||||
area = 0.5 * C.norm();
|
||||
}
|
||||
};
|
||||
|
||||
// Try to guess the number of support points needed to support a mesh
|
||||
double get_misalginment_score(const TriangleMesh &mesh, const Transform3f &tr)
|
||||
{
|
||||
if (mesh.its.vertices.empty()) return NaNd;
|
||||
|
||||
auto accessfn = [&mesh, &tr](size_t fi) {
|
||||
Facestats fc{get_transformed_triangle(mesh, tr, fi)};
|
||||
|
||||
float score = fc.area
|
||||
* (std::abs(fc.normal.dot(Vec3f::UnitX()))
|
||||
+ std::abs(fc.normal.dot(Vec3f::UnitY()))
|
||||
+ std::abs(fc.normal.dot(Vec3f::UnitZ())));
|
||||
|
||||
// We should score against the alignment with the reference planes
|
||||
return scaled<int_fast64_t>(score);
|
||||
};
|
||||
|
||||
size_t facecount = mesh.its.indices.size();
|
||||
size_t Nthreads = std::thread::hardware_concurrency();
|
||||
double S = unscaled(sum_score<int_fast64_t>(accessfn, facecount, Nthreads));
|
||||
|
||||
return S / facecount;
|
||||
}
|
||||
|
||||
// The score function for a particular face
|
||||
inline double get_supportedness_score(const Facestats &fc)
|
||||
{
|
||||
// Simply get the angle (acos of dot product) between the face normal and
|
||||
// the DOWN vector.
|
||||
float cosphi = fc.normal.dot(DOWN);
|
||||
float phi = 1.f - std::acos(cosphi) / float(PI);
|
||||
|
||||
// Make the huge slopes more significant than the smaller slopes
|
||||
phi = phi * phi * phi;
|
||||
|
||||
// Multiply with the square root of face area of the current face,
|
||||
// the area is less important as it grows.
|
||||
// This makes many smaller overhangs a bigger impact.
|
||||
return std::sqrt(fc.area) * POINTS_PER_UNIT_AREA * phi;
|
||||
}
|
||||
|
||||
// Try to guess the number of support points needed to support a mesh
|
||||
double get_supportedness_score(const TriangleMesh &mesh, const Transform3f &tr)
|
||||
{
|
||||
if (mesh.its.vertices.empty()) return NaNd;
|
||||
|
||||
auto accessfn = [&mesh, &tr](size_t fi) {
|
||||
Facestats fc{get_transformed_triangle(mesh, tr, fi)};
|
||||
return scaled<int_fast64_t>(get_supportedness_score(fc));
|
||||
};
|
||||
|
||||
size_t facecount = mesh.its.indices.size();
|
||||
size_t Nthreads = std::thread::hardware_concurrency();
|
||||
double S = unscaled(sum_score<int_fast64_t>(accessfn, facecount, Nthreads));
|
||||
|
||||
return S / facecount;
|
||||
}
|
||||
|
||||
// Find transformed mesh ground level without copy and with parallel reduce.
|
||||
float find_ground_level(const TriangleMesh &mesh,
|
||||
const Transform3f & tr,
|
||||
size_t threads)
|
||||
{
|
||||
size_t vsize = mesh.its.vertices.size();
|
||||
|
||||
auto minfn = [](float a, float b) { return std::min(a, b); };
|
||||
|
||||
auto accessfn = [&mesh, &tr] (size_t vi) {
|
||||
return (tr * mesh.its.vertices[vi]).z();
|
||||
};
|
||||
|
||||
auto zmin = std::numeric_limits<float>::max();
|
||||
size_t granularity = vsize / threads;
|
||||
return execution::reduce(ex_tbb, size_t(0), vsize, zmin, minfn, accessfn, granularity);
|
||||
}
|
||||
|
||||
double get_supportedness_onfloor_score(const TriangleMesh &mesh,
|
||||
const Transform3f &tr)
|
||||
{
|
||||
if (mesh.its.vertices.empty()) return NaNd;
|
||||
|
||||
size_t Nthreads = std::thread::hardware_concurrency();
|
||||
|
||||
float zmin = find_ground_level(mesh, tr, Nthreads);
|
||||
float zlvl = zmin + 0.1f; // Set up a slight tolerance from z level
|
||||
|
||||
auto accessfn = [&mesh, &tr, zlvl](size_t fi) {
|
||||
std::array<Vec3f, 3> tri = get_transformed_triangle(mesh, tr, fi);
|
||||
Facestats fc{tri};
|
||||
|
||||
if (tri[0].z() <= zlvl && tri[1].z() <= zlvl && tri[2].z() <= zlvl)
|
||||
return -2 * fc.area * POINTS_PER_UNIT_AREA;
|
||||
|
||||
return get_supportedness_score(fc);
|
||||
};
|
||||
|
||||
size_t facecount = mesh.its.indices.size();
|
||||
double S = unscaled(sum_score<int_fast64_t>(accessfn, facecount, Nthreads));
|
||||
|
||||
return S / facecount;
|
||||
}
|
||||
|
||||
using XYRotation = std::array<double, 2>;
|
||||
|
||||
// prepare the rotation transformation
|
||||
Transform3f to_transform3f(const XYRotation &rot)
|
||||
{
|
||||
Transform3f rt = Transform3f::Identity();
|
||||
rt.rotate(Eigen::AngleAxisf(float(rot[1]), Vec3f::UnitY()));
|
||||
rt.rotate(Eigen::AngleAxisf(float(rot[0]), Vec3f::UnitX()));
|
||||
|
||||
return rt;
|
||||
}
|
||||
|
||||
XYRotation from_transform3f(const Transform3f &tr)
|
||||
{
|
||||
Vec3d rot3 = Geometry::Transformation{tr.cast<double>()}.get_rotation();
|
||||
return {rot3.x(), rot3.y()};
|
||||
}
|
||||
|
||||
inline bool is_on_floor(const SLAPrintObjectConfig &cfg)
|
||||
{
|
||||
auto opt_elevation = cfg.support_object_elevation.getFloat();
|
||||
auto opt_padaround = cfg.pad_around_object.getBool();
|
||||
|
||||
return opt_elevation < EPSILON || opt_padaround;
|
||||
}
|
||||
|
||||
// collect the rotations for each face of the convex hull
|
||||
std::vector<XYRotation> get_chull_rotations(const TriangleMesh &mesh, size_t max_count)
|
||||
{
|
||||
TriangleMesh chull = mesh.convex_hull_3d();
|
||||
double chull2d_area = chull.convex_hull().area();
|
||||
double area_threshold = chull2d_area / (scaled<double>(1e3) * scaled(1.));
|
||||
|
||||
size_t facecount = chull.its.indices.size();
|
||||
|
||||
struct RotArea { XYRotation rot; double area; };
|
||||
|
||||
auto inputs = reserve_vector<RotArea>(facecount);
|
||||
|
||||
auto rotcmp = [](const RotArea &r1, const RotArea &r2) {
|
||||
double xdiff = r1.rot[X] - r2.rot[X], ydiff = r1.rot[Y] - r2.rot[Y];
|
||||
return std::abs(xdiff) < EPSILON ? ydiff < 0. : xdiff < 0.;
|
||||
};
|
||||
|
||||
auto eqcmp = [](const XYRotation &r1, const XYRotation &r2) {
|
||||
double xdiff = r1[X] - r2[X], ydiff = r1[Y] - r2[Y];
|
||||
return std::abs(xdiff) < EPSILON && std::abs(ydiff) < EPSILON;
|
||||
};
|
||||
|
||||
for (size_t fi = 0; fi < facecount; ++fi) {
|
||||
Facestats fc{get_triangle_vertices(chull, fi)};
|
||||
|
||||
if (fc.area > area_threshold) {
|
||||
auto q = Eigen::Quaternionf{}.FromTwoVectors(fc.normal, DOWN);
|
||||
XYRotation rot = from_transform3f(Transform3f::Identity() * q);
|
||||
RotArea ra = {rot, fc.area};
|
||||
|
||||
auto it = std::lower_bound(inputs.begin(), inputs.end(), ra, rotcmp);
|
||||
|
||||
if (it == inputs.end() || !eqcmp(it->rot, rot))
|
||||
inputs.insert(it, ra);
|
||||
}
|
||||
}
|
||||
|
||||
inputs.shrink_to_fit();
|
||||
if (!max_count) max_count = inputs.size();
|
||||
std::sort(inputs.begin(), inputs.end(),
|
||||
[](const RotArea &ra, const RotArea &rb) {
|
||||
return ra.area > rb.area;
|
||||
});
|
||||
|
||||
auto ret = reserve_vector<XYRotation>(std::min(max_count, inputs.size()));
|
||||
for (const RotArea &ra : inputs) ret.emplace_back(ra.rot);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Find the best score from a set of function inputs. Evaluate for every point.
|
||||
template<size_t N, class Fn, class It, class StopCond>
|
||||
std::array<double, N> find_min_score(Fn &&fn, It from, It to, StopCond &&stopfn)
|
||||
{
|
||||
std::array<double, N> ret = {};
|
||||
|
||||
double score = std::numeric_limits<double>::max();
|
||||
|
||||
size_t Nthreads = std::thread::hardware_concurrency();
|
||||
size_t dist = std::distance(from, to);
|
||||
std::vector<double> scores(dist, score);
|
||||
|
||||
execution::for_each(
|
||||
ex_tbb, size_t(0), dist, [&stopfn, &scores, &fn, &from](size_t i) {
|
||||
if (stopfn()) return;
|
||||
|
||||
scores[i] = fn(*(from + i));
|
||||
},
|
||||
dist / Nthreads);
|
||||
|
||||
auto it = std::min_element(scores.begin(), scores.end());
|
||||
|
||||
if (it != scores.end())
|
||||
ret = *(from + std::distance(scores.begin(), it));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
|
||||
|
||||
template<unsigned MAX_ITER>
|
||||
struct RotfinderBoilerplate {
|
||||
static constexpr unsigned MAX_TRIES = MAX_ITER;
|
||||
|
||||
int status = 0, prev_status = 0;
|
||||
TriangleMesh mesh;
|
||||
unsigned max_tries;
|
||||
const RotOptimizeParams ¶ms;
|
||||
|
||||
// Assemble the mesh with the correct transformation to be used in rotation
|
||||
// optimization.
|
||||
static TriangleMesh get_mesh_to_rotate(const ModelObject &mo)
|
||||
{
|
||||
TriangleMesh mesh = mo.raw_mesh();
|
||||
|
||||
ModelInstance *mi = mo.instances[0];
|
||||
const Geometry::Transformation trafo = mi->get_transformation();
|
||||
Transform3d trafo_instance = trafo.get_scaling_factor_matrix() * trafo.get_mirror_matrix();
|
||||
mesh.transform(trafo_instance);
|
||||
|
||||
return mesh;
|
||||
}
|
||||
|
||||
RotfinderBoilerplate(const ModelObject &mo, const RotOptimizeParams &p)
|
||||
: mesh{get_mesh_to_rotate(mo)}
|
||||
, max_tries(p.accuracy() * MAX_TRIES)
|
||||
, params{p}
|
||||
{}
|
||||
|
||||
void statusfn() {
|
||||
int s = status * 100 / max_tries;
|
||||
if (s != prev_status) {
|
||||
params.statuscb()(s);
|
||||
prev_status = s;
|
||||
}
|
||||
|
||||
++status;
|
||||
}
|
||||
|
||||
bool stopcond() { return ! params.statuscb()(-1); }
|
||||
};
|
||||
|
||||
Vec2d find_best_misalignment_rotation(const ModelObject & mo,
|
||||
const RotOptimizeParams ¶ms)
|
||||
{
|
||||
RotfinderBoilerplate<1000> bp{mo, params};
|
||||
|
||||
// Preparing the optimizer.
|
||||
size_t gridsize = std::sqrt(bp.max_tries);
|
||||
opt::Optimizer<opt::AlgBruteForce> solver(
|
||||
opt::StopCriteria{}.max_iterations(bp.max_tries)
|
||||
.stop_condition([&bp] { return bp.stopcond(); }),
|
||||
gridsize
|
||||
);
|
||||
|
||||
// We are searching rotations around only two axes x, y. Thus the
|
||||
// problem becomes a 2 dimensional optimization task.
|
||||
// We can specify the bounds for a dimension in the following way:
|
||||
auto bounds = opt::bounds({ {-PI, PI}, {-PI, PI} });
|
||||
|
||||
auto result = solver.to_max().optimize(
|
||||
[&bp] (const XYRotation &rot)
|
||||
{
|
||||
bp.statusfn();
|
||||
return get_misalginment_score(bp.mesh, to_transform3f(rot));
|
||||
}, opt::initvals({0., 0.}), bounds);
|
||||
|
||||
return {result.optimum[0], result.optimum[1]};
|
||||
}
|
||||
|
||||
Vec2d find_least_supports_rotation(const ModelObject & mo,
|
||||
const RotOptimizeParams ¶ms)
|
||||
{
|
||||
RotfinderBoilerplate<1000> bp{mo, params};
|
||||
|
||||
SLAPrintObjectConfig pocfg;
|
||||
if (params.print_config())
|
||||
pocfg.apply(*params.print_config(), true);
|
||||
|
||||
pocfg.apply(mo.config.get());
|
||||
|
||||
XYRotation rot;
|
||||
|
||||
// Different search methods have to be used depending on the model elevation
|
||||
if (is_on_floor(pocfg)) {
|
||||
|
||||
std::vector<XYRotation> inputs = get_chull_rotations(bp.mesh, bp.max_tries);
|
||||
bp.max_tries = inputs.size();
|
||||
|
||||
// If the model can be placed on the bed directly, we only need to
|
||||
// check the 3D convex hull face rotations.
|
||||
|
||||
auto objfn = [&bp](const XYRotation &rot) {
|
||||
bp.statusfn();
|
||||
Transform3f tr = to_transform3f(rot);
|
||||
return get_supportedness_onfloor_score(bp.mesh, tr);
|
||||
};
|
||||
|
||||
rot = find_min_score<2>(objfn, inputs.begin(), inputs.end(), [&bp] {
|
||||
return bp.stopcond();
|
||||
});
|
||||
|
||||
} else {
|
||||
// Preparing the optimizer.
|
||||
size_t gridsize = std::sqrt(bp.max_tries); // 2D grid has gridsize^2 calls
|
||||
opt::Optimizer<opt::AlgBruteForce> solver(
|
||||
opt::StopCriteria{}.max_iterations(bp.max_tries)
|
||||
.stop_condition([&bp] { return bp.stopcond(); }),
|
||||
gridsize
|
||||
);
|
||||
|
||||
// We are searching rotations around only two axes x, y. Thus the
|
||||
// problem becomes a 2 dimensional optimization task.
|
||||
// We can specify the bounds for a dimension in the following way:
|
||||
auto bounds = opt::bounds({ {-PI, PI}, {-PI, PI} });
|
||||
|
||||
auto result = solver.to_min().optimize(
|
||||
[&bp] (const XYRotation &rot)
|
||||
{
|
||||
bp.statusfn();
|
||||
return get_supportedness_score(bp.mesh, to_transform3f(rot));
|
||||
}, opt::initvals({0., 0.}), bounds);
|
||||
|
||||
// Save the result
|
||||
rot = result.optimum;
|
||||
}
|
||||
|
||||
return {rot[0], rot[1]};
|
||||
}
|
||||
|
||||
inline BoundingBoxf3 bounding_box_with_tr(const indexed_triangle_set &its,
|
||||
const Transform3f &tr)
|
||||
{
|
||||
if (its.vertices.empty())
|
||||
return {};
|
||||
|
||||
Vec3f bmin = tr * its.vertices.front(), bmax = tr * its.vertices.front();
|
||||
|
||||
for (const Vec3f &p : its.vertices) {
|
||||
Vec3f pp = tr * p;
|
||||
bmin = pp.cwiseMin(bmin);
|
||||
bmax = pp.cwiseMax(bmax);
|
||||
}
|
||||
|
||||
return {bmin.cast<double>(), bmax.cast<double>()};
|
||||
}
|
||||
|
||||
Vec2d find_min_z_height_rotation(const ModelObject &mo,
|
||||
const RotOptimizeParams ¶ms)
|
||||
{
|
||||
RotfinderBoilerplate<1000> bp{mo, params};
|
||||
|
||||
TriangleMesh chull = bp.mesh.convex_hull_3d();
|
||||
auto inputs = reserve_vector<XYRotation>(chull.its.indices.size());
|
||||
auto rotcmp = [](const XYRotation &r1, const XYRotation &r2) {
|
||||
double xdiff = r1[X] - r2[X], ydiff = r1[Y] - r2[Y];
|
||||
return std::abs(xdiff) < EPSILON ? ydiff < 0. : xdiff < 0.;
|
||||
};
|
||||
auto eqcmp = [](const XYRotation &r1, const XYRotation &r2) {
|
||||
double xdiff = r1[X] - r2[X], ydiff = r1[Y] - r2[Y];
|
||||
return std::abs(xdiff) < EPSILON && std::abs(ydiff) < EPSILON;
|
||||
};
|
||||
|
||||
for (size_t fi = 0; fi < chull.its.indices.size(); ++fi) {
|
||||
Facestats fc{get_triangle_vertices(chull, fi)};
|
||||
|
||||
auto q = Eigen::Quaternionf{}.FromTwoVectors(fc.normal, DOWN);
|
||||
XYRotation rot = from_transform3f(Transform3f::Identity() * q);
|
||||
|
||||
auto it = std::lower_bound(inputs.begin(), inputs.end(), rot, rotcmp);
|
||||
|
||||
if (it == inputs.end() || !eqcmp(*it, rot))
|
||||
inputs.insert(it, rot);
|
||||
}
|
||||
|
||||
inputs.shrink_to_fit();
|
||||
bp.max_tries = inputs.size();
|
||||
|
||||
auto objfn = [&bp, &chull](const XYRotation &rot) {
|
||||
bp.statusfn();
|
||||
Transform3f tr = to_transform3f(rot);
|
||||
return bounding_box_with_tr(chull.its, tr).size().z();
|
||||
};
|
||||
|
||||
XYRotation rot = find_min_score<2>(objfn, inputs.begin(), inputs.end(), [&bp] {
|
||||
return bp.stopcond();
|
||||
});
|
||||
|
||||
return {rot[0], rot[1]};
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
72
src/libslic3r/SLA/Rotfinder.hpp
Normal file
72
src/libslic3r/SLA/Rotfinder.hpp
Normal file
@@ -0,0 +1,72 @@
|
||||
#ifndef SLA_ROTFINDER_HPP
|
||||
#define SLA_ROTFINDER_HPP
|
||||
|
||||
#include <functional>
|
||||
#include <array>
|
||||
|
||||
#include <libslic3r/Point.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
class ModelObject;
|
||||
class SLAPrintObject;
|
||||
class TriangleMesh;
|
||||
class DynamicPrintConfig;
|
||||
|
||||
namespace sla {
|
||||
|
||||
using RotOptimizeStatusCB = std::function<bool(int)>;
|
||||
|
||||
class RotOptimizeParams {
|
||||
float m_accuracy = 1.;
|
||||
const DynamicPrintConfig *m_print_config = nullptr;
|
||||
RotOptimizeStatusCB m_statuscb = [](int) { return true; };
|
||||
|
||||
public:
|
||||
|
||||
RotOptimizeParams &accuracy(float a) { m_accuracy = a; return *this; }
|
||||
RotOptimizeParams &print_config(const DynamicPrintConfig *c)
|
||||
{
|
||||
m_print_config = c;
|
||||
return *this;
|
||||
}
|
||||
RotOptimizeParams &statucb(RotOptimizeStatusCB cb)
|
||||
{
|
||||
m_statuscb = std::move(cb);
|
||||
return *this;
|
||||
}
|
||||
|
||||
float accuracy() const { return m_accuracy; }
|
||||
const DynamicPrintConfig * print_config() const { return m_print_config; }
|
||||
const RotOptimizeStatusCB &statuscb() const { return m_statuscb; }
|
||||
};
|
||||
|
||||
/**
|
||||
* The function should find the best rotation for SLA upside down printing.
|
||||
*
|
||||
* @param modelobj The model object representing the 3d mesh.
|
||||
* @param accuracy The optimization accuracy from 0.0f to 1.0f. Currently,
|
||||
* the nlopt genetic optimizer is used and the number of iterations is
|
||||
* accuracy * 100000. This can change in the future.
|
||||
* @param statuscb A status indicator callback called with the int
|
||||
* argument spanning from 0 to 100. May not reach 100 if the optimization finds
|
||||
* an optimum before max iterations are reached. It should return a boolean
|
||||
* signaling if the operation may continue (true) or not (false). A status
|
||||
* value lower than 0 shall not update the status but still return a valid
|
||||
* continuation indicator.
|
||||
*
|
||||
* @return Returns the rotations around each axis (x, y, z)
|
||||
*/
|
||||
Vec2d find_best_misalignment_rotation(const ModelObject &modelobj,
|
||||
const RotOptimizeParams & = {});
|
||||
|
||||
Vec2d find_least_supports_rotation(const ModelObject &modelobj,
|
||||
const RotOptimizeParams & = {});
|
||||
|
||||
Vec2d find_min_z_height_rotation(const ModelObject &mo,
|
||||
const RotOptimizeParams ¶ms = {});
|
||||
|
||||
} // namespace sla
|
||||
} // namespace Slic3r
|
||||
|
||||
#endif // SLAROTFINDER_HPP
|
||||
161
src/libslic3r/SLA/SpatIndex.cpp
Normal file
161
src/libslic3r/SLA/SpatIndex.cpp
Normal file
@@ -0,0 +1,161 @@
|
||||
#include "SpatIndex.hpp"
|
||||
|
||||
// for concave hull merging decisions
|
||||
#include <libslic3r/BoostAdapter.hpp>
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable: 4244)
|
||||
#pragma warning(disable: 4267)
|
||||
#endif
|
||||
|
||||
#include "boost/geometry/index/rtree.hpp"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
/* **************************************************************************
|
||||
* PointIndex implementation
|
||||
* ************************************************************************** */
|
||||
|
||||
class PointIndex::Impl {
|
||||
public:
|
||||
using BoostIndex = boost::geometry::index::rtree< PointIndexEl,
|
||||
boost::geometry::index::rstar<16, 4> /* ? */ >;
|
||||
|
||||
BoostIndex m_store;
|
||||
};
|
||||
|
||||
PointIndex::PointIndex(): m_impl(new Impl()) {}
|
||||
PointIndex::~PointIndex() {}
|
||||
|
||||
PointIndex::PointIndex(const PointIndex &cpy): m_impl(new Impl(*cpy.m_impl)) {}
|
||||
PointIndex::PointIndex(PointIndex&& cpy): m_impl(std::move(cpy.m_impl)) {}
|
||||
|
||||
PointIndex& PointIndex::operator=(const PointIndex &cpy)
|
||||
{
|
||||
m_impl.reset(new Impl(*cpy.m_impl));
|
||||
return *this;
|
||||
}
|
||||
|
||||
PointIndex& PointIndex::operator=(PointIndex &&cpy)
|
||||
{
|
||||
m_impl.swap(cpy.m_impl);
|
||||
return *this;
|
||||
}
|
||||
|
||||
void PointIndex::insert(const PointIndexEl &el)
|
||||
{
|
||||
m_impl->m_store.insert(el);
|
||||
}
|
||||
|
||||
bool PointIndex::remove(const PointIndexEl& el)
|
||||
{
|
||||
return m_impl->m_store.remove(el) == 1;
|
||||
}
|
||||
|
||||
std::vector<PointIndexEl>
|
||||
PointIndex::query(std::function<bool(const PointIndexEl &)> fn) const
|
||||
{
|
||||
namespace bgi = boost::geometry::index;
|
||||
|
||||
std::vector<PointIndexEl> ret;
|
||||
m_impl->m_store.query(bgi::satisfies(fn), std::back_inserter(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::vector<PointIndexEl> PointIndex::nearest(const Vec3d &el, unsigned k = 1) const
|
||||
{
|
||||
namespace bgi = boost::geometry::index;
|
||||
std::vector<PointIndexEl> ret; ret.reserve(k);
|
||||
m_impl->m_store.query(bgi::nearest(el, k), std::back_inserter(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t PointIndex::size() const
|
||||
{
|
||||
return m_impl->m_store.size();
|
||||
}
|
||||
|
||||
void PointIndex::foreach(std::function<void (const PointIndexEl &)> fn)
|
||||
{
|
||||
for(auto& el : m_impl->m_store) fn(el);
|
||||
}
|
||||
|
||||
void PointIndex::foreach(std::function<void (const PointIndexEl &)> fn) const
|
||||
{
|
||||
for(const auto &el : m_impl->m_store) fn(el);
|
||||
}
|
||||
|
||||
/* **************************************************************************
|
||||
* BoxIndex implementation
|
||||
* ************************************************************************** */
|
||||
|
||||
class BoxIndex::Impl {
|
||||
public:
|
||||
using BoostIndex = boost::geometry::index::
|
||||
rtree<BoxIndexEl, boost::geometry::index::rstar<16, 4> /* ? */>;
|
||||
|
||||
BoostIndex m_store;
|
||||
};
|
||||
|
||||
BoxIndex::BoxIndex(): m_impl(new Impl()) {}
|
||||
BoxIndex::~BoxIndex() {}
|
||||
|
||||
BoxIndex::BoxIndex(const BoxIndex &cpy): m_impl(new Impl(*cpy.m_impl)) {}
|
||||
BoxIndex::BoxIndex(BoxIndex&& cpy): m_impl(std::move(cpy.m_impl)) {}
|
||||
|
||||
BoxIndex& BoxIndex::operator=(const BoxIndex &cpy)
|
||||
{
|
||||
m_impl.reset(new Impl(*cpy.m_impl));
|
||||
return *this;
|
||||
}
|
||||
|
||||
BoxIndex& BoxIndex::operator=(BoxIndex &&cpy)
|
||||
{
|
||||
m_impl.swap(cpy.m_impl);
|
||||
return *this;
|
||||
}
|
||||
|
||||
void BoxIndex::insert(const BoxIndexEl &el)
|
||||
{
|
||||
m_impl->m_store.insert(el);
|
||||
}
|
||||
|
||||
bool BoxIndex::remove(const BoxIndexEl& el)
|
||||
{
|
||||
return m_impl->m_store.remove(el) == 1;
|
||||
}
|
||||
|
||||
std::vector<BoxIndexEl> BoxIndex::query(const BoundingBox &qrbb,
|
||||
BoxIndex::QueryType qt)
|
||||
{
|
||||
namespace bgi = boost::geometry::index;
|
||||
|
||||
std::vector<BoxIndexEl> ret; ret.reserve(m_impl->m_store.size());
|
||||
|
||||
switch (qt) {
|
||||
case qtIntersects:
|
||||
m_impl->m_store.query(bgi::intersects(qrbb), std::back_inserter(ret));
|
||||
break;
|
||||
case qtWithin:
|
||||
m_impl->m_store.query(bgi::within(qrbb), std::back_inserter(ret));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t BoxIndex::size() const
|
||||
{
|
||||
return m_impl->m_store.size();
|
||||
}
|
||||
|
||||
void BoxIndex::foreach(std::function<void (const BoxIndexEl &)> fn)
|
||||
{
|
||||
for(auto& el : m_impl->m_store) fn(el);
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
97
src/libslic3r/SLA/SpatIndex.hpp
Normal file
97
src/libslic3r/SLA/SpatIndex.hpp
Normal file
@@ -0,0 +1,97 @@
|
||||
#ifndef SLA_SPATINDEX_HPP
|
||||
#define SLA_SPATINDEX_HPP
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <Eigen/Geometry>
|
||||
|
||||
#include <libslic3r/BoundingBox.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
namespace sla {
|
||||
|
||||
typedef Eigen::Matrix<double, 3, 1, Eigen::DontAlign> Vec3d;
|
||||
using PointIndexEl = std::pair<Vec3d, unsigned>;
|
||||
|
||||
class PointIndex {
|
||||
class Impl;
|
||||
|
||||
// We use Pimpl because it takes a long time to compile boost headers which
|
||||
// is the engine of this class. We include it only in the cpp file.
|
||||
std::unique_ptr<Impl> m_impl;
|
||||
public:
|
||||
|
||||
PointIndex();
|
||||
~PointIndex();
|
||||
|
||||
PointIndex(const PointIndex&);
|
||||
PointIndex(PointIndex&&);
|
||||
PointIndex& operator=(const PointIndex&);
|
||||
PointIndex& operator=(PointIndex&&);
|
||||
|
||||
void insert(const PointIndexEl&);
|
||||
bool remove(const PointIndexEl&);
|
||||
|
||||
inline void insert(const Vec3d& v, unsigned idx)
|
||||
{
|
||||
insert(std::make_pair(v, unsigned(idx)));
|
||||
}
|
||||
|
||||
std::vector<PointIndexEl> query(std::function<bool(const PointIndexEl&)>) const;
|
||||
std::vector<PointIndexEl> nearest(const Vec3d&, unsigned k) const;
|
||||
std::vector<PointIndexEl> query(const Vec3d &v, unsigned k) const // wrapper
|
||||
{
|
||||
return nearest(v, k);
|
||||
}
|
||||
|
||||
// For testing
|
||||
size_t size() const;
|
||||
bool empty() const { return size() == 0; }
|
||||
|
||||
void foreach(std::function<void(const PointIndexEl& el)> fn);
|
||||
void foreach(std::function<void(const PointIndexEl& el)> fn) const;
|
||||
};
|
||||
|
||||
using BoxIndexEl = std::pair<Slic3r::BoundingBox, unsigned>;
|
||||
|
||||
class BoxIndex {
|
||||
class Impl;
|
||||
|
||||
// We use Pimpl because it takes a long time to compile boost headers which
|
||||
// is the engine of this class. We include it only in the cpp file.
|
||||
std::unique_ptr<Impl> m_impl;
|
||||
public:
|
||||
|
||||
BoxIndex();
|
||||
~BoxIndex();
|
||||
|
||||
BoxIndex(const BoxIndex&);
|
||||
BoxIndex(BoxIndex&&);
|
||||
BoxIndex& operator=(const BoxIndex&);
|
||||
BoxIndex& operator=(BoxIndex&&);
|
||||
|
||||
void insert(const BoxIndexEl&);
|
||||
void insert(const BoundingBox& bb, unsigned idx)
|
||||
{
|
||||
insert(std::make_pair(bb, unsigned(idx)));
|
||||
}
|
||||
|
||||
bool remove(const BoxIndexEl&);
|
||||
|
||||
enum QueryType { qtIntersects, qtWithin };
|
||||
|
||||
std::vector<BoxIndexEl> query(const BoundingBox&, QueryType qt);
|
||||
|
||||
// For testing
|
||||
size_t size() const;
|
||||
bool empty() const { return size() == 0; }
|
||||
|
||||
void foreach(std::function<void(const BoxIndexEl& el)> fn);
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SPATINDEX_HPP
|
||||
74
src/libslic3r/SLA/SupportPoint.hpp
Normal file
74
src/libslic3r/SLA/SupportPoint.hpp
Normal file
@@ -0,0 +1,74 @@
|
||||
#ifndef SLA_SUPPORTPOINT_HPP
|
||||
#define SLA_SUPPORTPOINT_HPP
|
||||
|
||||
#include <libslic3r/Point.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
class ModelObject;
|
||||
|
||||
namespace sla {
|
||||
|
||||
// An enum to keep track of where the current points on the ModelObject came from.
|
||||
enum class PointsStatus {
|
||||
NoPoints, // No points were generated so far.
|
||||
Generating, // The autogeneration algorithm triggered, but not yet finished.
|
||||
AutoGenerated, // Points were autogenerated (i.e. copied from the backend).
|
||||
UserModified // User has done some edits.
|
||||
};
|
||||
|
||||
struct SupportPoint
|
||||
{
|
||||
Vec3f pos;
|
||||
float head_front_radius;
|
||||
bool is_new_island;
|
||||
|
||||
SupportPoint()
|
||||
: pos(Vec3f::Zero()), head_front_radius(0.f), is_new_island(false)
|
||||
{}
|
||||
|
||||
SupportPoint(float pos_x,
|
||||
float pos_y,
|
||||
float pos_z,
|
||||
float head_radius,
|
||||
bool new_island = false)
|
||||
: pos(pos_x, pos_y, pos_z)
|
||||
, head_front_radius(head_radius)
|
||||
, is_new_island(new_island)
|
||||
{}
|
||||
|
||||
SupportPoint(Vec3f position, float head_radius, bool new_island = false)
|
||||
: pos(position)
|
||||
, head_front_radius(head_radius)
|
||||
, is_new_island(new_island)
|
||||
{}
|
||||
|
||||
SupportPoint(Eigen::Matrix<float, 5, 1, Eigen::DontAlign> data)
|
||||
: pos(data(0), data(1), data(2))
|
||||
, head_front_radius(data(3))
|
||||
, is_new_island(data(4) != 0.f)
|
||||
{}
|
||||
|
||||
bool operator==(const SupportPoint &sp) const
|
||||
{
|
||||
float rdiff = std::abs(head_front_radius - sp.head_front_radius);
|
||||
return (pos == sp.pos) && rdiff < float(EPSILON) &&
|
||||
is_new_island == sp.is_new_island;
|
||||
}
|
||||
|
||||
bool operator!=(const SupportPoint &sp) const { return !(sp == (*this)); }
|
||||
|
||||
template<class Archive> void serialize(Archive &ar)
|
||||
{
|
||||
ar(pos, head_front_radius, is_new_island);
|
||||
}
|
||||
};
|
||||
|
||||
using SupportPoints = std::vector<SupportPoint>;
|
||||
|
||||
SupportPoints transformed_support_points(const ModelObject &mo,
|
||||
const Transform3d &trafo);
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // SUPPORTPOINT_HPP
|
||||
677
src/libslic3r/SLA/SupportPointGenerator.cpp
Normal file
677
src/libslic3r/SLA/SupportPointGenerator.cpp
Normal file
@@ -0,0 +1,677 @@
|
||||
#include <tbb/parallel_for.h>
|
||||
|
||||
#include "SupportPointGenerator.hpp"
|
||||
#include "Execution/ExecutionTBB.hpp"
|
||||
#include "Geometry/ConvexHull.hpp"
|
||||
#include "Model.hpp"
|
||||
#include "ExPolygon.hpp"
|
||||
#include "SVG.hpp"
|
||||
#include "Point.hpp"
|
||||
#include "ClipperUtils.hpp"
|
||||
#include "Tesselate.hpp"
|
||||
#include "MinAreaBoundingBox.hpp"
|
||||
#include "libslic3r.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
namespace Slic3r {
|
||||
namespace sla {
|
||||
|
||||
/*float SupportPointGenerator::approximate_geodesic_distance(const Vec3d& p1, const Vec3d& p2, Vec3d& n1, Vec3d& n2)
|
||||
{
|
||||
n1.normalize();
|
||||
n2.normalize();
|
||||
|
||||
Vec3d v = (p2-p1);
|
||||
v.normalize();
|
||||
|
||||
float c1 = n1.dot(v);
|
||||
float c2 = n2.dot(v);
|
||||
float result = pow(p1(0)-p2(0), 2) + pow(p1(1)-p2(1), 2) + pow(p1(2)-p2(2), 2);
|
||||
// Check for division by zero:
|
||||
if(fabs(c1 - c2) > 0.0001)
|
||||
result *= (asin(c1) - asin(c2)) / (c1 - c2);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
float SupportPointGenerator::get_required_density(float angle) const
|
||||
{
|
||||
// calculation would be density_0 * cos(angle). To provide one more degree of freedom, we will scale the angle
|
||||
// to get the user-set density for 45 deg. So it ends up as density_0 * cos(K * angle).
|
||||
float K = 4.f * float(acos(m_config.density_at_45/m_config.density_at_horizontal) / M_PI);
|
||||
return std::max(0.f, float(m_config.density_at_horizontal * cos(K*angle)));
|
||||
}
|
||||
|
||||
float SupportPointGenerator::distance_limit(float angle) const
|
||||
{
|
||||
return 1./(2.4*get_required_density(angle));
|
||||
}*/
|
||||
|
||||
SupportPointGenerator::SupportPointGenerator(
|
||||
const AABBMesh &emesh,
|
||||
const std::vector<ExPolygons> &slices,
|
||||
const std::vector<float> & heights,
|
||||
const Config & config,
|
||||
std::function<void(void)> throw_on_cancel,
|
||||
std::function<void(int)> statusfn)
|
||||
: SupportPointGenerator(emesh, config, throw_on_cancel, statusfn)
|
||||
{
|
||||
std::random_device rd;
|
||||
m_rng.seed(rd());
|
||||
execute(slices, heights);
|
||||
}
|
||||
|
||||
SupportPointGenerator::SupportPointGenerator(
|
||||
const AABBMesh &emesh,
|
||||
const SupportPointGenerator::Config &config,
|
||||
std::function<void ()> throw_on_cancel,
|
||||
std::function<void (int)> statusfn)
|
||||
: m_config(config)
|
||||
, m_emesh(emesh)
|
||||
, m_throw_on_cancel(throw_on_cancel)
|
||||
, m_statusfn(statusfn)
|
||||
{
|
||||
}
|
||||
|
||||
void SupportPointGenerator::execute(const std::vector<ExPolygons> &slices,
|
||||
const std::vector<float> & heights)
|
||||
{
|
||||
process(slices, heights);
|
||||
project_onto_mesh(m_output);
|
||||
}
|
||||
|
||||
void SupportPointGenerator::project_onto_mesh(std::vector<sla::SupportPoint>& points) const
|
||||
{
|
||||
// The function makes sure that all the points are really exactly placed on the mesh.
|
||||
|
||||
// Use a reasonable granularity to account for the worker thread synchronization cost.
|
||||
static constexpr size_t gransize = 64;
|
||||
|
||||
execution::for_each(ex_tbb, size_t(0), points.size(), [this, &points](size_t idx)
|
||||
{
|
||||
if ((idx % 16) == 0)
|
||||
// Don't call the following function too often as it flushes CPU write caches due to synchronization primitves.
|
||||
m_throw_on_cancel();
|
||||
|
||||
Vec3f& p = points[idx].pos;
|
||||
// Project the point upward and downward and choose the closer intersection with the mesh.
|
||||
AABBMesh::hit_result hit_up = m_emesh.query_ray_hit(p.cast<double>(), Vec3d(0., 0., 1.));
|
||||
AABBMesh::hit_result hit_down = m_emesh.query_ray_hit(p.cast<double>(), Vec3d(0., 0., -1.));
|
||||
|
||||
bool up = hit_up.is_hit();
|
||||
bool down = hit_down.is_hit();
|
||||
|
||||
if (!up && !down)
|
||||
return;
|
||||
|
||||
AABBMesh::hit_result& hit = (!down || (hit_up.distance() < hit_down.distance())) ? hit_up : hit_down;
|
||||
p = p + (hit.distance() * hit.direction()).cast<float>();
|
||||
}, gransize);
|
||||
}
|
||||
|
||||
static std::vector<SupportPointGenerator::MyLayer> make_layers(
|
||||
const std::vector<ExPolygons>& slices, const std::vector<float>& heights,
|
||||
std::function<void(void)> throw_on_cancel)
|
||||
{
|
||||
assert(slices.size() == heights.size());
|
||||
|
||||
// Allocate empty layers.
|
||||
std::vector<SupportPointGenerator::MyLayer> layers;
|
||||
layers.reserve(slices.size());
|
||||
for (size_t i = 0; i < slices.size(); ++ i)
|
||||
layers.emplace_back(i, heights[i]);
|
||||
|
||||
// FIXME: calculate actual pixel area from printer config:
|
||||
//const float pixel_area = pow(wxGetApp().preset_bundle->project_config.option<ConfigOptionFloat>("display_width") / wxGetApp().preset_bundle->project_config.option<ConfigOptionInt>("display_pixels_x"), 2.f); //
|
||||
const float pixel_area = pow(0.047f, 2.f);
|
||||
|
||||
execution::for_each(ex_tbb, size_t(0), layers.size(),
|
||||
[&layers, &slices, &heights, pixel_area, throw_on_cancel](size_t layer_id)
|
||||
{
|
||||
if ((layer_id % 8) == 0)
|
||||
// Don't call the following function too often as it flushes
|
||||
// CPU write caches due to synchronization primitves.
|
||||
throw_on_cancel();
|
||||
|
||||
SupportPointGenerator::MyLayer &layer = layers[layer_id];
|
||||
const ExPolygons & islands = slices[layer_id];
|
||||
// FIXME WTF?
|
||||
const float height = (layer_id > 2 ?
|
||||
heights[layer_id - 3] :
|
||||
heights[0] - (heights[1] - heights[0]));
|
||||
layer.islands.reserve(islands.size());
|
||||
for (const ExPolygon &island : islands) {
|
||||
float area = float(island.area() * SCALING_FACTOR * SCALING_FACTOR);
|
||||
if (area >= pixel_area)
|
||||
// FIXME this is not a correct centroid of a polygon with holes.
|
||||
layer.islands.emplace_back(layer, island, get_extents(island.contour),
|
||||
unscaled<float>(island.contour.centroid()), area, height);
|
||||
}
|
||||
}, 32 /*gransize*/);
|
||||
|
||||
// Calculate overlap of successive layers. Link overlapping islands.
|
||||
execution::for_each(ex_tbb, size_t(1), layers.size(),
|
||||
[&layers, &heights, throw_on_cancel] (size_t layer_id)
|
||||
{
|
||||
if ((layer_id % 2) == 0)
|
||||
// Don't call the following function too often as it flushes CPU write caches due to synchronization primitves.
|
||||
throw_on_cancel();
|
||||
SupportPointGenerator::MyLayer &layer_above = layers[layer_id];
|
||||
SupportPointGenerator::MyLayer &layer_below = layers[layer_id - 1];
|
||||
//FIXME WTF?
|
||||
const float layer_height = (layer_id!=0 ? heights[layer_id]-heights[layer_id-1] : heights[0]);
|
||||
const float safe_angle = 35.f * (float(M_PI)/180.f); // smaller number - less supports
|
||||
const float between_layers_offset = scaled<float>(layer_height * std::tan(safe_angle));
|
||||
const float slope_angle = 75.f * (float(M_PI)/180.f); // smaller number - less supports
|
||||
const float slope_offset = scaled<float>(layer_height * std::tan(slope_angle));
|
||||
//FIXME This has a quadratic time complexity, it will be excessively slow for many tiny islands.
|
||||
for (SupportPointGenerator::Structure &top : layer_above.islands) {
|
||||
for (SupportPointGenerator::Structure &bottom : layer_below.islands) {
|
||||
float overlap_area = top.overlap_area(bottom);
|
||||
if (overlap_area > 0) {
|
||||
top.islands_below.emplace_back(&bottom, overlap_area);
|
||||
bottom.islands_above.emplace_back(&top, overlap_area);
|
||||
}
|
||||
}
|
||||
if (! top.islands_below.empty()) {
|
||||
Polygons bottom_polygons = top.polygons_below();
|
||||
top.overhangs = diff_ex(*top.polygon, bottom_polygons);
|
||||
if (! top.overhangs.empty()) {
|
||||
|
||||
// Produce 2 bands around the island, a safe band for dangling overhangs
|
||||
// and an unsafe band for sloped overhangs.
|
||||
// These masks include the original island
|
||||
auto dangl_mask = expand(bottom_polygons, between_layers_offset, ClipperLib::jtSquare);
|
||||
auto overh_mask = expand(bottom_polygons, slope_offset, ClipperLib::jtSquare);
|
||||
|
||||
// Absolutely hopeless overhangs are those outside the unsafe band
|
||||
top.overhangs = diff_ex(*top.polygon, overh_mask);
|
||||
|
||||
// Now cut out the supported core from the safe band
|
||||
// and cut the safe band from the unsafe band to get distinct
|
||||
// zones.
|
||||
overh_mask = diff(overh_mask, dangl_mask);
|
||||
dangl_mask = diff(dangl_mask, bottom_polygons);
|
||||
|
||||
top.dangling_areas = intersection_ex(*top.polygon, dangl_mask);
|
||||
top.overhangs_slopes = intersection_ex(*top.polygon, overh_mask);
|
||||
|
||||
top.overhangs_area = 0.f;
|
||||
std::vector<std::pair<ExPolygon*, float>> expolys_with_areas;
|
||||
for (ExPolygon &ex : top.overhangs) {
|
||||
float area = float(ex.area());
|
||||
expolys_with_areas.emplace_back(&ex, area);
|
||||
top.overhangs_area += area;
|
||||
}
|
||||
std::sort(expolys_with_areas.begin(), expolys_with_areas.end(),
|
||||
[](const std::pair<ExPolygon*, float> &p1, const std::pair<ExPolygon*, float> &p2)
|
||||
{ return p1.second > p2.second; });
|
||||
ExPolygons overhangs_sorted;
|
||||
for (auto &p : expolys_with_areas)
|
||||
overhangs_sorted.emplace_back(std::move(*p.first));
|
||||
top.overhangs = std::move(overhangs_sorted);
|
||||
top.overhangs_area *= float(SCALING_FACTOR * SCALING_FACTOR);
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 8 /* gransize */);
|
||||
|
||||
return layers;
|
||||
}
|
||||
|
||||
void SupportPointGenerator::process(const std::vector<ExPolygons>& slices, const std::vector<float>& heights)
|
||||
{
|
||||
#ifdef SLA_SUPPORTPOINTGEN_DEBUG
|
||||
std::vector<std::pair<ExPolygon, coord_t>> islands;
|
||||
#endif /* SLA_SUPPORTPOINTGEN_DEBUG */
|
||||
|
||||
std::vector<SupportPointGenerator::MyLayer> layers = make_layers(slices, heights, m_throw_on_cancel);
|
||||
|
||||
PointGrid3D point_grid;
|
||||
point_grid.cell_size = Vec3f(10.f, 10.f, 10.f);
|
||||
|
||||
double increment = 100.0 / layers.size();
|
||||
double status = 0;
|
||||
|
||||
for (unsigned int layer_id = 0; layer_id < layers.size(); ++ layer_id) {
|
||||
SupportPointGenerator::MyLayer *layer_top = &layers[layer_id];
|
||||
SupportPointGenerator::MyLayer *layer_bottom = (layer_id > 0) ? &layers[layer_id - 1] : nullptr;
|
||||
std::vector<float> support_force_bottom;
|
||||
if (layer_bottom != nullptr) {
|
||||
support_force_bottom.assign(layer_bottom->islands.size(), 0.f);
|
||||
for (size_t i = 0; i < layer_bottom->islands.size(); ++ i)
|
||||
support_force_bottom[i] = layer_bottom->islands[i].supports_force_total();
|
||||
}
|
||||
for (Structure &top : layer_top->islands)
|
||||
for (Structure::Link &bottom_link : top.islands_below) {
|
||||
Structure &bottom = *bottom_link.island;
|
||||
//float centroids_dist = (bottom.centroid - top.centroid).norm();
|
||||
// Penalization resulting from centroid offset:
|
||||
// bottom.supports_force *= std::min(1.f, 1.f - std::min(1.f, (1600.f * layer_height) * centroids_dist * centroids_dist / bottom.area));
|
||||
float &support_force = support_force_bottom[&bottom - layer_bottom->islands.data()];
|
||||
//FIXME this condition does not reflect a bifurcation into a one large island and one tiny island well, it incorrectly resets the support force to zero.
|
||||
// One should rather work with the overlap area vs overhang area.
|
||||
// support_force *= std::min(1.f, 1.f - std::min(1.f, 0.1f * centroids_dist * centroids_dist / bottom.area));
|
||||
// Penalization resulting from increasing polygon area:
|
||||
support_force *= std::min(1.f, 20.f * bottom.area / top.area);
|
||||
}
|
||||
// Let's assign proper support force to each of them:
|
||||
if (layer_id > 0) {
|
||||
for (Structure &below : layer_bottom->islands) {
|
||||
float below_support_force = support_force_bottom[&below - layer_bottom->islands.data()];
|
||||
float above_overlap_area = 0.f;
|
||||
for (Structure::Link &above_link : below.islands_above)
|
||||
above_overlap_area += above_link.overlap_area;
|
||||
for (Structure::Link &above_link : below.islands_above)
|
||||
above_link.island->supports_force_inherited += below_support_force * above_link.overlap_area / above_overlap_area;
|
||||
}
|
||||
}
|
||||
// Now iterate over all polygons and append new points if needed.
|
||||
for (Structure &s : layer_top->islands) {
|
||||
// Penalization resulting from large diff from the last layer:
|
||||
s.supports_force_inherited /= std::max(1.f, 0.17f * (s.overhangs_area) / s.area);
|
||||
|
||||
add_support_points(s, point_grid);
|
||||
}
|
||||
|
||||
m_throw_on_cancel();
|
||||
|
||||
status += increment;
|
||||
m_statusfn(int(std::round(status)));
|
||||
|
||||
#ifdef SLA_SUPPORTPOINTGEN_DEBUG
|
||||
/*std::string layer_num_str = std::string((i<10 ? "0" : "")) + std::string((i<100 ? "0" : "")) + std::to_string(i);
|
||||
output_expolygons(expolys_top, "top" + layer_num_str + ".svg");
|
||||
output_expolygons(diff, "diff" + layer_num_str + ".svg");
|
||||
if (!islands.empty())
|
||||
output_expolygons(islands, "islands" + layer_num_str + ".svg");*/
|
||||
#endif /* SLA_SUPPORTPOINTGEN_DEBUG */
|
||||
}
|
||||
}
|
||||
|
||||
void SupportPointGenerator::add_support_points(SupportPointGenerator::Structure &s, SupportPointGenerator::PointGrid3D &grid3d)
|
||||
{
|
||||
// Select each type of surface (overrhang, dangling, slope), derive the support
|
||||
// force deficit for it and call uniformly conver with the right params
|
||||
|
||||
float tp = m_config.tear_pressure();
|
||||
float current = s.supports_force_total();
|
||||
|
||||
if (s.islands_below.empty()) {
|
||||
// completely new island - needs support no doubt
|
||||
// deficit is full, there is nothing below that would hold this island
|
||||
uniformly_cover({ *s.polygon }, s, s.area * tp, grid3d, IslandCoverageFlags(icfIsNew | icfWithBoundary) );
|
||||
return;
|
||||
}
|
||||
|
||||
if (! s.overhangs.empty()) {
|
||||
uniformly_cover(s.overhangs, s, s.overhangs_area * tp, grid3d);
|
||||
}
|
||||
|
||||
auto areafn = [](double sum, auto &p) { return sum + p.area() * SCALING_FACTOR * SCALING_FACTOR; };
|
||||
|
||||
current = s.supports_force_total();
|
||||
if (! s.dangling_areas.empty()) {
|
||||
// Let's see if there's anything that overlaps enough to need supports:
|
||||
// What we now have in polygons needs support, regardless of what the forces are, so we can add them.
|
||||
|
||||
double a = std::accumulate(s.dangling_areas.begin(), s.dangling_areas.end(), 0., areafn);
|
||||
uniformly_cover(s.dangling_areas, s, a * tp - a * current * s.area, grid3d, icfWithBoundary);
|
||||
}
|
||||
|
||||
current = s.supports_force_total();
|
||||
if (! s.overhangs_slopes.empty()) {
|
||||
double a = std::accumulate(s.overhangs_slopes.begin(), s.overhangs_slopes.end(), 0., areafn);
|
||||
uniformly_cover(s.overhangs_slopes, s, a * tp - a * current / s.area, grid3d, icfWithBoundary);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<Vec2f> sample_expolygon(const ExPolygon &expoly, float samples_per_mm2, std::mt19937 &rng)
|
||||
{
|
||||
// Triangulate the polygon with holes into triplets of 3D points.
|
||||
std::vector<Vec2f> triangles = Slic3r::triangulate_expolygon_2f(expoly);
|
||||
|
||||
std::vector<Vec2f> out;
|
||||
if (! triangles.empty())
|
||||
{
|
||||
// Calculate area of each triangle.
|
||||
auto areas = reserve_vector<float>(triangles.size() / 3);
|
||||
double aback = 0.;
|
||||
for (size_t i = 0; i < triangles.size(); ) {
|
||||
const Vec2f &a = triangles[i ++];
|
||||
const Vec2f v1 = triangles[i ++] - a;
|
||||
const Vec2f v2 = triangles[i ++] - a;
|
||||
|
||||
// Prefix sum of the areas.
|
||||
areas.emplace_back(aback + 0.5f * std::abs(cross2(v1, v2)));
|
||||
aback = areas.back();
|
||||
}
|
||||
|
||||
size_t num_samples = size_t(ceil(areas.back() * samples_per_mm2));
|
||||
std::uniform_real_distribution<> random_triangle(0., double(areas.back()));
|
||||
std::uniform_real_distribution<> random_float(0., 1.);
|
||||
for (size_t i = 0; i < num_samples; ++ i) {
|
||||
double r = random_triangle(rng);
|
||||
size_t idx_triangle = std::min<size_t>(std::upper_bound(areas.begin(), areas.end(), (float)r) - areas.begin(), areas.size() - 1) * 3;
|
||||
// Select a random point on the triangle.
|
||||
const Vec2f &a = triangles[idx_triangle ++];
|
||||
const Vec2f &b = triangles[idx_triangle++];
|
||||
const Vec2f &c = triangles[idx_triangle];
|
||||
#if 1
|
||||
// https://www.cs.princeton.edu/~funk/tog02.pdf
|
||||
// page 814, formula 1.
|
||||
double u = float(std::sqrt(random_float(rng)));
|
||||
double v = float(random_float(rng));
|
||||
out.emplace_back(a * (1.f - u) + b * (u * (1.f - v)) + c * (v * u));
|
||||
#else
|
||||
// Greg Turk, Graphics Gems
|
||||
// https://devsplorer.wordpress.com/2019/08/07/find-a-random-point-on-a-plane-using-barycentric-coordinates-in-unity/
|
||||
double u = float(random_float(rng));
|
||||
double v = float(random_float(rng));
|
||||
if (u + v >= 1.f) {
|
||||
u = 1.f - u;
|
||||
v = 1.f - v;
|
||||
}
|
||||
out.emplace_back(a + u * (b - a) + v * (c - a));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
std::vector<Vec2f> sample_expolygon(const ExPolygons &expolys, float samples_per_mm2, std::mt19937 &rng)
|
||||
{
|
||||
std::vector<Vec2f> out;
|
||||
for (const ExPolygon &expoly : expolys)
|
||||
append(out, sample_expolygon(expoly, samples_per_mm2, rng));
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
void sample_expolygon_boundary(const ExPolygon & expoly,
|
||||
float samples_per_mm,
|
||||
std::vector<Vec2f> &out,
|
||||
std::mt19937 & /*rng*/)
|
||||
{
|
||||
double point_stepping_scaled = scale_(1.f) / samples_per_mm;
|
||||
for (size_t i_contour = 0; i_contour <= expoly.holes.size(); ++ i_contour) {
|
||||
const Polygon &contour = (i_contour == 0) ? expoly.contour :
|
||||
expoly.holes[i_contour - 1];
|
||||
|
||||
const Points pts = contour.equally_spaced_points(point_stepping_scaled);
|
||||
for (size_t i = 0; i < pts.size(); ++ i)
|
||||
out.emplace_back(unscale<float>(pts[i].x()),
|
||||
unscale<float>(pts[i].y()));
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<Vec2f> sample_expolygon_with_boundary(const ExPolygon &expoly, float samples_per_mm2, float samples_per_mm_boundary, std::mt19937 &rng)
|
||||
{
|
||||
std::vector<Vec2f> out = sample_expolygon(expoly, samples_per_mm2, rng);
|
||||
sample_expolygon_boundary(expoly, samples_per_mm_boundary, out, rng);
|
||||
return out;
|
||||
}
|
||||
|
||||
std::vector<Vec2f> sample_expolygon_with_boundary(const ExPolygons &expolys, float samples_per_mm2, float samples_per_mm_boundary, std::mt19937 &rng)
|
||||
{
|
||||
std::vector<Vec2f> out;
|
||||
for (const ExPolygon &expoly : expolys)
|
||||
append(out, sample_expolygon_with_boundary(expoly, samples_per_mm2, samples_per_mm_boundary, rng));
|
||||
return out;
|
||||
}
|
||||
|
||||
template<typename REFUSE_FUNCTION>
|
||||
static inline std::vector<Vec2f> poisson_disk_from_samples(const std::vector<Vec2f> &raw_samples, float radius, REFUSE_FUNCTION refuse_function)
|
||||
{
|
||||
Vec2f corner_min(std::numeric_limits<float>::max(), std::numeric_limits<float>::max());
|
||||
for (const Vec2f &pt : raw_samples) {
|
||||
corner_min.x() = std::min(corner_min.x(), pt.x());
|
||||
corner_min.y() = std::min(corner_min.y(), pt.y());
|
||||
}
|
||||
|
||||
// Assign the raw samples to grid cells, sort the grid cells lexicographically.
|
||||
struct RawSample
|
||||
{
|
||||
Vec2f coord;
|
||||
Vec2i cell_id;
|
||||
RawSample(const Vec2f &crd = {}, const Vec2i &id = {}): coord{crd}, cell_id{id} {}
|
||||
};
|
||||
|
||||
auto raw_samples_sorted = reserve_vector<RawSample>(raw_samples.size());
|
||||
for (const Vec2f &pt : raw_samples)
|
||||
raw_samples_sorted.emplace_back(pt, ((pt - corner_min) / radius).cast<int>());
|
||||
|
||||
std::sort(raw_samples_sorted.begin(), raw_samples_sorted.end(), [](const RawSample &lhs, const RawSample &rhs)
|
||||
{ return lhs.cell_id.x() < rhs.cell_id.x() || (lhs.cell_id.x() == rhs.cell_id.x() && lhs.cell_id.y() < rhs.cell_id.y()); });
|
||||
|
||||
struct PoissonDiskGridEntry {
|
||||
// Resulting output sample points for this cell:
|
||||
enum {
|
||||
max_positions = 4
|
||||
};
|
||||
Vec2f poisson_samples[max_positions];
|
||||
int num_poisson_samples = 0;
|
||||
|
||||
// Index into raw_samples:
|
||||
int first_sample_idx;
|
||||
int sample_cnt;
|
||||
};
|
||||
|
||||
struct CellIDHash {
|
||||
std::size_t operator()(const Vec2i &cell_id) const {
|
||||
return std::hash<int>()(cell_id.x()) ^ std::hash<int>()(cell_id.y() * 593);
|
||||
}
|
||||
};
|
||||
|
||||
// Map from cell IDs to hash_data. Each hash_data points to the range in raw_samples corresponding to that cell.
|
||||
// (We could just store the samples in hash_data. This implementation is an artifact of the reference paper, which
|
||||
// is optimizing for GPU acceleration that we haven't implemented currently.)
|
||||
typedef std::unordered_map<Vec2i, PoissonDiskGridEntry, CellIDHash> Cells;
|
||||
Cells cells;
|
||||
{
|
||||
typename Cells::iterator last_cell_id_it;
|
||||
Vec2i last_cell_id(-1, -1);
|
||||
for (size_t i = 0; i < raw_samples_sorted.size(); ++ i) {
|
||||
const RawSample &sample = raw_samples_sorted[i];
|
||||
if (sample.cell_id == last_cell_id) {
|
||||
// This sample is in the same cell as the previous, so just increase the count. Cells are
|
||||
// always contiguous, since we've sorted raw_samples_sorted by cell ID.
|
||||
++ last_cell_id_it->second.sample_cnt;
|
||||
} else {
|
||||
// This is a new cell.
|
||||
PoissonDiskGridEntry data;
|
||||
data.first_sample_idx = int(i);
|
||||
data.sample_cnt = 1;
|
||||
auto result = cells.insert({sample.cell_id, data});
|
||||
last_cell_id = sample.cell_id;
|
||||
last_cell_id_it = result.first;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const int max_trials = 5;
|
||||
const float radius_squared = radius * radius;
|
||||
for (int trial = 0; trial < max_trials; ++ trial) {
|
||||
// Create sample points for each entry in cells.
|
||||
for (auto &it : cells) {
|
||||
const Vec2i &cell_id = it.first;
|
||||
PoissonDiskGridEntry &cell_data = it.second;
|
||||
// This cell's raw sample points start at first_sample_idx. On trial 0, try the first one. On trial 1, try first_sample_idx + 1.
|
||||
int next_sample_idx = cell_data.first_sample_idx + trial;
|
||||
if (trial >= cell_data.sample_cnt)
|
||||
// There are no more points to try for this cell.
|
||||
continue;
|
||||
const RawSample &candidate = raw_samples_sorted[next_sample_idx];
|
||||
// See if this point conflicts with any other points in this cell, or with any points in
|
||||
// neighboring cells. Note that it's possible to have more than one point in the same cell.
|
||||
bool conflict = refuse_function(candidate.coord);
|
||||
for (int i = -1; i < 2 && ! conflict; ++ i) {
|
||||
for (int j = -1; j < 2; ++ j) {
|
||||
const auto &it_neighbor = cells.find(cell_id + Vec2i(i, j));
|
||||
if (it_neighbor != cells.end()) {
|
||||
const PoissonDiskGridEntry &neighbor = it_neighbor->second;
|
||||
for (int i_sample = 0; i_sample < neighbor.num_poisson_samples; ++ i_sample)
|
||||
if ((neighbor.poisson_samples[i_sample] - candidate.coord).squaredNorm() < radius_squared) {
|
||||
conflict = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (! conflict) {
|
||||
// Store the new sample.
|
||||
assert(cell_data.num_poisson_samples < cell_data.max_positions);
|
||||
if (cell_data.num_poisson_samples < cell_data.max_positions)
|
||||
cell_data.poisson_samples[cell_data.num_poisson_samples ++] = candidate.coord;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the results to the output.
|
||||
std::vector<Vec2f> out;
|
||||
for (const auto& it : cells)
|
||||
for (int i = 0; i < it.second.num_poisson_samples; ++ i)
|
||||
out.emplace_back(it.second.poisson_samples[i]);
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
void SupportPointGenerator::uniformly_cover(const ExPolygons& islands, Structure& structure, float deficit, PointGrid3D &grid3d, IslandCoverageFlags flags)
|
||||
{
|
||||
//int num_of_points = std::max(1, (int)((island.area()*pow(SCALING_FACTOR, 2) * m_config.tear_pressure)/m_config.support_force));
|
||||
|
||||
float support_force_deficit = deficit;
|
||||
// auto bb = get_extents(islands);
|
||||
|
||||
if (flags & icfIsNew) {
|
||||
auto chull = Geometry::convex_hull(islands);
|
||||
auto rotbox = MinAreaBoundigBox{chull, MinAreaBoundigBox::pcConvex};
|
||||
Vec2d bbdim = {unscaled(rotbox.width()), unscaled(rotbox.height())};
|
||||
|
||||
if (bbdim.x() > bbdim.y()) std::swap(bbdim.x(), bbdim.y());
|
||||
double aspectr = bbdim.y() / bbdim.x();
|
||||
|
||||
support_force_deficit *= (1 + aspectr / 2.);
|
||||
}
|
||||
|
||||
if (support_force_deficit < 0)
|
||||
return;
|
||||
|
||||
// Number of newly added points.
|
||||
const size_t poisson_samples_target = size_t(ceil(support_force_deficit / m_config.support_force()));
|
||||
|
||||
const float density_horizontal = m_config.tear_pressure() / m_config.support_force();
|
||||
//FIXME why?
|
||||
float poisson_radius = std::max(m_config.minimal_distance, 1.f / (5.f * density_horizontal));
|
||||
// const float poisson_radius = 1.f / (15.f * density_horizontal);
|
||||
const float samples_per_mm2 = 30.f / (float(M_PI) * poisson_radius * poisson_radius);
|
||||
// Minimum distance between samples, in 3D space.
|
||||
// float min_spacing = poisson_radius / 3.f;
|
||||
float min_spacing = poisson_radius;
|
||||
|
||||
//FIXME share the random generator. The random generator may be not so cheap to initialize, also we don't want the random generator to be restarted for each polygon.
|
||||
|
||||
std::vector<Vec2f> raw_samples =
|
||||
flags & icfWithBoundary ?
|
||||
sample_expolygon_with_boundary(islands, samples_per_mm2,
|
||||
5.f / poisson_radius, m_rng) :
|
||||
sample_expolygon(islands, samples_per_mm2, m_rng);
|
||||
|
||||
std::vector<Vec2f> poisson_samples;
|
||||
for (size_t iter = 0; iter < 4; ++ iter) {
|
||||
poisson_samples = poisson_disk_from_samples(raw_samples, poisson_radius,
|
||||
[&structure, &grid3d, min_spacing](const Vec2f &pos) {
|
||||
return grid3d.collides_with(pos, structure.layer->print_z, min_spacing);
|
||||
});
|
||||
if (poisson_samples.size() >= poisson_samples_target || m_config.minimal_distance > poisson_radius-EPSILON)
|
||||
break;
|
||||
float coeff = 0.5f;
|
||||
if (poisson_samples.size() * 2 > poisson_samples_target)
|
||||
coeff = float(poisson_samples.size()) / float(poisson_samples_target);
|
||||
poisson_radius = std::max(m_config.minimal_distance, poisson_radius * coeff);
|
||||
min_spacing = std::max(m_config.minimal_distance, min_spacing * coeff);
|
||||
}
|
||||
|
||||
#ifdef SLA_SUPPORTPOINTGEN_DEBUG
|
||||
{
|
||||
static int irun = 0;
|
||||
Slic3r::SVG svg(debug_out_path("SLA_supports-uniformly_cover-%d.svg", irun ++), get_extents(islands));
|
||||
for (const ExPolygon &island : islands)
|
||||
svg.draw(island);
|
||||
for (const Vec2f &pt : raw_samples)
|
||||
svg.draw(Point(scale_(pt.x()), scale_(pt.y())), "red");
|
||||
for (const Vec2f &pt : poisson_samples)
|
||||
svg.draw(Point(scale_(pt.x()), scale_(pt.y())), "blue");
|
||||
}
|
||||
#endif /* NDEBUG */
|
||||
|
||||
// assert(! poisson_samples.empty());
|
||||
if (poisson_samples_target < poisson_samples.size()) {
|
||||
std::shuffle(poisson_samples.begin(), poisson_samples.end(), m_rng);
|
||||
poisson_samples.erase(poisson_samples.begin() + poisson_samples_target, poisson_samples.end());
|
||||
}
|
||||
for (const Vec2f &pt : poisson_samples) {
|
||||
m_output.emplace_back(float(pt(0)), float(pt(1)), structure.zlevel, m_config.head_diameter/2.f, flags & icfIsNew);
|
||||
structure.supports_force_this_layer += m_config.support_force();
|
||||
grid3d.insert(pt, &structure);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void remove_bottom_points(std::vector<SupportPoint> &pts, float lvl)
|
||||
{
|
||||
// get iterator to the reorganized vector end
|
||||
auto endit = std::remove_if(pts.begin(), pts.end(), [lvl]
|
||||
(const sla::SupportPoint &sp) {
|
||||
return sp.pos.z() <= lvl;
|
||||
});
|
||||
|
||||
// erase all elements after the new end
|
||||
pts.erase(endit, pts.end());
|
||||
}
|
||||
|
||||
#ifdef SLA_SUPPORTPOINTGEN_DEBUG
|
||||
void SupportPointGenerator::output_structures(const std::vector<Structure>& structures)
|
||||
{
|
||||
for (unsigned int i=0 ; i<structures.size(); ++i) {
|
||||
std::stringstream ss;
|
||||
ss << structures[i].unique_id.count() << "_" << std::setw(10) << std::setfill('0') << 1000 + (int)structures[i].height/1000 << ".png";
|
||||
output_expolygons(std::vector<ExPolygon>{*structures[i].polygon}, ss.str());
|
||||
}
|
||||
}
|
||||
|
||||
void SupportPointGenerator::output_expolygons(const ExPolygons& expolys, const std::string &filename)
|
||||
{
|
||||
BoundingBox bb(Point(-30000000, -30000000), Point(30000000, 30000000));
|
||||
Slic3r::SVG svg_cummulative(filename, bb);
|
||||
for (size_t i = 0; i < expolys.size(); ++ i) {
|
||||
/*Slic3r::SVG svg("single"+std::to_string(i)+".svg", bb);
|
||||
svg.draw(expolys[i]);
|
||||
svg.draw_outline(expolys[i].contour, "black", scale_(0.05));
|
||||
svg.draw_outline(expolys[i].holes, "blue", scale_(0.05));
|
||||
svg.Close();*/
|
||||
|
||||
svg_cummulative.draw(expolys[i]);
|
||||
svg_cummulative.draw_outline(expolys[i].contour, "black", scale_(0.05));
|
||||
svg_cummulative.draw_outline(expolys[i].holes, "blue", scale_(0.05));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
SupportPoints transformed_support_points(const ModelObject &mo,
|
||||
const Transform3d &trafo)
|
||||
{
|
||||
auto spts = mo.sla_support_points;
|
||||
Transform3f tr = trafo.cast<float>();
|
||||
for (sla::SupportPoint& suppt : spts) {
|
||||
suppt.pos = tr * suppt.pos;
|
||||
}
|
||||
|
||||
return spts;
|
||||
}
|
||||
|
||||
} // namespace sla
|
||||
} // namespace Slic3r
|
||||
233
src/libslic3r/SLA/SupportPointGenerator.hpp
Normal file
233
src/libslic3r/SLA/SupportPointGenerator.hpp
Normal file
@@ -0,0 +1,233 @@
|
||||
#ifndef SLA_SUPPORTPOINTGENERATOR_HPP
|
||||
#define SLA_SUPPORTPOINTGENERATOR_HPP
|
||||
|
||||
#include <random>
|
||||
|
||||
#include <libslic3r/AABBMesh.hpp>
|
||||
|
||||
#include <libslic3r/SLA/SupportPoint.hpp>
|
||||
#include <libslic3r/BoundingBox.hpp>
|
||||
#include <libslic3r/ClipperUtils.hpp>
|
||||
#include <libslic3r/Point.hpp>
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
|
||||
// #define SLA_SUPPORTPOINTGEN_DEBUG
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
class SupportPointGenerator {
|
||||
public:
|
||||
struct Config {
|
||||
float density_relative {1.f};
|
||||
float minimal_distance {1.f};
|
||||
float head_diameter {0.4f};
|
||||
|
||||
// Originally calibrated to 7.7f, reduced density by Tamas to 70% which is 11.1 (7.7 / 0.7) to adjust for new algorithm changes in tm_suppt_gen_improve
|
||||
inline float support_force() const { return 11.1f / density_relative; } // a force one point can support (arbitrary force unit)
|
||||
inline float tear_pressure() const { return 1.f; } // pressure that the display exerts (the force unit per mm2)
|
||||
};
|
||||
|
||||
SupportPointGenerator(const AABBMesh& emesh, const std::vector<ExPolygons>& slices,
|
||||
const std::vector<float>& heights, const Config& config, std::function<void(void)> throw_on_cancel, std::function<void(int)> statusfn);
|
||||
|
||||
SupportPointGenerator(const AABBMesh& emesh, const Config& config, std::function<void(void)> throw_on_cancel, std::function<void(int)> statusfn);
|
||||
|
||||
const std::vector<SupportPoint>& output() const { return m_output; }
|
||||
std::vector<SupportPoint>& output() { return m_output; }
|
||||
|
||||
struct MyLayer;
|
||||
|
||||
struct Structure {
|
||||
Structure(MyLayer &layer, const ExPolygon& poly, const BoundingBox &bbox, const Vec2f ¢roid, float area, float h) :
|
||||
layer(&layer), polygon(&poly), bbox(bbox), centroid(centroid), area(area), zlevel(h)
|
||||
#ifdef SLA_SUPPORTPOINTGEN_DEBUG
|
||||
, unique_id(std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()))
|
||||
#endif /* SLA_SUPPORTPOINTGEN_DEBUG */
|
||||
{}
|
||||
MyLayer *layer;
|
||||
const ExPolygon* polygon = nullptr;
|
||||
const BoundingBox bbox;
|
||||
const Vec2f centroid = Vec2f::Zero();
|
||||
const float area = 0.f;
|
||||
float zlevel = 0;
|
||||
// How well is this ExPolygon held to the print base?
|
||||
// Positive number, the higher the better.
|
||||
float supports_force_this_layer = 0.f;
|
||||
float supports_force_inherited = 0.f;
|
||||
float supports_force_total() const { return this->supports_force_this_layer + this->supports_force_inherited; }
|
||||
#ifdef SLA_SUPPORTPOINTGEN_DEBUG
|
||||
std::chrono::milliseconds unique_id;
|
||||
#endif /* SLA_SUPPORTPOINTGEN_DEBUG */
|
||||
|
||||
struct Link {
|
||||
Link(Structure *island, float overlap_area) : island(island), overlap_area(overlap_area) {}
|
||||
Structure *island;
|
||||
float overlap_area;
|
||||
};
|
||||
|
||||
#ifdef NDEBUG
|
||||
// In release mode, use the optimized container.
|
||||
boost::container::small_vector<Link, 4> islands_above;
|
||||
boost::container::small_vector<Link, 4> islands_below;
|
||||
#else
|
||||
// In debug mode, use the standard vector, which is well handled by debugger visualizer.
|
||||
std::vector<Link> islands_above;
|
||||
std::vector<Link> islands_below;
|
||||
#endif
|
||||
// Overhangs, that are dangling considerably.
|
||||
ExPolygons dangling_areas;
|
||||
// Complete overhands.
|
||||
ExPolygons overhangs;
|
||||
// Overhangs, where the surface must slope.
|
||||
ExPolygons overhangs_slopes;
|
||||
float overhangs_area = 0.f;
|
||||
|
||||
bool overlaps(const Structure &rhs) const {
|
||||
return this->bbox.overlap(rhs.bbox) && this->polygon->overlaps(*rhs.polygon);
|
||||
}
|
||||
float overlap_area(const Structure &rhs) const {
|
||||
double out = 0.;
|
||||
if (this->bbox.overlap(rhs.bbox)) {
|
||||
Polygons polys = intersection(*this->polygon, *rhs.polygon);
|
||||
for (const Polygon &poly : polys)
|
||||
out += poly.area();
|
||||
}
|
||||
return float(out);
|
||||
}
|
||||
float area_below() const {
|
||||
float area = 0.f;
|
||||
for (const Link &below : this->islands_below)
|
||||
area += below.island->area;
|
||||
return area;
|
||||
}
|
||||
Polygons polygons_below() const {
|
||||
size_t cnt = 0;
|
||||
for (const Link &below : this->islands_below)
|
||||
cnt += 1 + below.island->polygon->holes.size();
|
||||
Polygons out;
|
||||
out.reserve(cnt);
|
||||
for (const Link &below : this->islands_below) {
|
||||
out.emplace_back(below.island->polygon->contour);
|
||||
append(out, below.island->polygon->holes);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
ExPolygons expolygons_below() const {
|
||||
ExPolygons out;
|
||||
out.reserve(this->islands_below.size());
|
||||
for (const Link &below : this->islands_below)
|
||||
out.emplace_back(*below.island->polygon);
|
||||
return out;
|
||||
}
|
||||
// Positive deficit of the supports. If negative, this area is well supported. If positive, more supports need to be added.
|
||||
float support_force_deficit(const float tear_pressure) const { return this->area * tear_pressure - this->supports_force_total(); }
|
||||
};
|
||||
|
||||
struct MyLayer {
|
||||
MyLayer(const size_t layer_id, coordf_t print_z) : layer_id(layer_id), print_z(print_z) {}
|
||||
size_t layer_id;
|
||||
coordf_t print_z;
|
||||
std::vector<Structure> islands;
|
||||
};
|
||||
|
||||
struct RichSupportPoint {
|
||||
Vec3f position;
|
||||
Structure *island;
|
||||
};
|
||||
|
||||
struct PointGrid3D {
|
||||
struct GridHash {
|
||||
std::size_t operator()(const Vec3i &cell_id) const {
|
||||
return std::hash<int>()(cell_id.x()) ^ std::hash<int>()(cell_id.y() * 593) ^ std::hash<int>()(cell_id.z() * 7919);
|
||||
}
|
||||
};
|
||||
typedef std::unordered_multimap<Vec3i, RichSupportPoint, GridHash> Grid;
|
||||
|
||||
Vec3f cell_size;
|
||||
Grid grid;
|
||||
|
||||
Vec3i cell_id(const Vec3f &pos) {
|
||||
return Vec3i(int(floor(pos.x() / cell_size.x())),
|
||||
int(floor(pos.y() / cell_size.y())),
|
||||
int(floor(pos.z() / cell_size.z())));
|
||||
}
|
||||
|
||||
void insert(const Vec2f &pos, Structure *island) {
|
||||
RichSupportPoint pt;
|
||||
pt.position = Vec3f(pos.x(), pos.y(), float(island->layer->print_z));
|
||||
pt.island = island;
|
||||
grid.emplace(cell_id(pt.position), pt);
|
||||
}
|
||||
|
||||
bool collides_with(const Vec2f &pos, float print_z, float radius) {
|
||||
Vec3f pos3d(pos.x(), pos.y(), print_z);
|
||||
Vec3i cell = cell_id(pos3d);
|
||||
std::pair<Grid::const_iterator, Grid::const_iterator> it_pair = grid.equal_range(cell);
|
||||
if (collides_with(pos3d, radius, it_pair.first, it_pair.second))
|
||||
return true;
|
||||
for (int i = -1; i < 2; ++ i)
|
||||
for (int j = -1; j < 2; ++ j)
|
||||
for (int k = -1; k < 1; ++ k) {
|
||||
if (i == 0 && j == 0 && k == 0)
|
||||
continue;
|
||||
it_pair = grid.equal_range(cell + Vec3i(i, j, k));
|
||||
if (collides_with(pos3d, radius, it_pair.first, it_pair.second))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
bool collides_with(const Vec3f &pos, float radius, Grid::const_iterator it_begin, Grid::const_iterator it_end) {
|
||||
for (Grid::const_iterator it = it_begin; it != it_end; ++ it) {
|
||||
float dist2 = (it->second.position - pos).squaredNorm();
|
||||
if (dist2 < radius * radius)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void execute(const std::vector<ExPolygons> &slices,
|
||||
const std::vector<float> & heights);
|
||||
|
||||
void seed(std::mt19937::result_type s) { m_rng.seed(s); }
|
||||
private:
|
||||
std::vector<SupportPoint> m_output;
|
||||
|
||||
SupportPointGenerator::Config m_config;
|
||||
|
||||
void process(const std::vector<ExPolygons>& slices, const std::vector<float>& heights);
|
||||
|
||||
public:
|
||||
enum IslandCoverageFlags : uint8_t { icfNone = 0x0, icfIsNew = 0x1, icfWithBoundary = 0x2 };
|
||||
|
||||
private:
|
||||
|
||||
void uniformly_cover(const ExPolygons& islands, Structure& structure, float deficit, PointGrid3D &grid3d, IslandCoverageFlags flags = icfNone);
|
||||
|
||||
void add_support_points(Structure& structure, PointGrid3D &grid3d);
|
||||
|
||||
void project_onto_mesh(std::vector<SupportPoint>& points) const;
|
||||
|
||||
#ifdef SLA_SUPPORTPOINTGEN_DEBUG
|
||||
static void output_expolygons(const ExPolygons& expolys, const std::string &filename);
|
||||
static void output_structures(const std::vector<Structure> &structures);
|
||||
#endif // SLA_SUPPORTPOINTGEN_DEBUG
|
||||
|
||||
const AABBMesh& m_emesh;
|
||||
std::function<void(void)> m_throw_on_cancel;
|
||||
std::function<void(int)> m_statusfn;
|
||||
|
||||
std::mt19937 m_rng;
|
||||
};
|
||||
|
||||
void remove_bottom_points(std::vector<SupportPoint> &pts, float lvl);
|
||||
|
||||
std::vector<Vec2f> sample_expolygon(const ExPolygon &expoly, float samples_per_mm2, std::mt19937 &rng);
|
||||
void sample_expolygon_boundary(const ExPolygon &expoly, float samples_per_mm, std::vector<Vec2f> &out, std::mt19937 &rng);
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // SUPPORTPOINTGENERATOR_HPP
|
||||
147
src/libslic3r/SLA/SupportTree.cpp
Normal file
147
src/libslic3r/SLA/SupportTree.cpp
Normal file
@@ -0,0 +1,147 @@
|
||||
/**
|
||||
* In this file we will implement the automatic SLA support tree generation.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <numeric>
|
||||
#include <libslic3r/SLA/SupportTree.hpp>
|
||||
#include <libslic3r/SLA/SpatIndex.hpp>
|
||||
#include <libslic3r/SLA/SupportTreeBuilder.hpp>
|
||||
#include <libslic3r/SLA/DefaultSupportTree.hpp>
|
||||
#include <libslic3r/SLA/BranchingTreeSLA.hpp>
|
||||
|
||||
#include <libslic3r/MTUtils.hpp>
|
||||
#include <libslic3r/ClipperUtils.hpp>
|
||||
#include <libslic3r/Model.hpp>
|
||||
#include <libslic3r/TriangleMeshSlicer.hpp>
|
||||
|
||||
#include <boost/log/trivial.hpp>
|
||||
|
||||
#include <libnest2d/tools/benchmark.h>
|
||||
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
indexed_triangle_set create_support_tree(const SupportableMesh &sm,
|
||||
const JobController &ctl)
|
||||
{
|
||||
auto builder = make_unique<SupportTreeBuilder>(ctl);
|
||||
|
||||
if (sm.cfg.enabled) {
|
||||
Benchmark bench;
|
||||
bench.start();
|
||||
|
||||
switch (sm.cfg.tree_type) {
|
||||
case SupportTreeType::Default: {
|
||||
create_default_tree(*builder, sm);
|
||||
break;
|
||||
}
|
||||
case SupportTreeType::Branching: {
|
||||
create_branching_tree(*builder, sm);
|
||||
break;
|
||||
}
|
||||
case SupportTreeType::Organic: {
|
||||
// TODO
|
||||
}
|
||||
default:;
|
||||
}
|
||||
|
||||
bench.stop();
|
||||
|
||||
BOOST_LOG_TRIVIAL(info) << "Support tree creation took: "
|
||||
<< bench.getElapsedSec()
|
||||
<< " seconds";
|
||||
|
||||
builder->merge_and_cleanup(); // clean metadata, leave only the meshes.
|
||||
}
|
||||
|
||||
indexed_triangle_set out = builder->retrieve_mesh(MeshType::Support);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
indexed_triangle_set create_pad(const SupportableMesh &sm,
|
||||
const indexed_triangle_set &support_mesh,
|
||||
const JobController &ctl)
|
||||
{
|
||||
constexpr float PadSamplingLH = 0.1f;
|
||||
|
||||
ExPolygons model_contours; // This will store the base plate of the pad.
|
||||
double pad_h = sm.pad_cfg.full_height();
|
||||
auto gndlvl = float(ground_level(sm));
|
||||
float zstart = gndlvl - bool(sm.pad_cfg.embed_object) * sm.pad_cfg.wall_thickness_mm;
|
||||
float zend = zstart + float(pad_h + PadSamplingLH + EPSILON);
|
||||
auto heights = grid(zstart, zend, PadSamplingLH);
|
||||
|
||||
if (!sm.cfg.enabled || sm.pad_cfg.embed_object) {
|
||||
// No support (thus no elevation) or zero elevation mode
|
||||
// we sometimes call it "builtin pad" is enabled so we will
|
||||
// get a sample from the bottom of the mesh and use it for pad
|
||||
// creation.
|
||||
sla::pad_blueprint(*sm.emesh.get_triangle_mesh(), model_contours,
|
||||
heights, ctl.cancelfn);
|
||||
}
|
||||
|
||||
ExPolygons sup_contours;
|
||||
pad_blueprint(support_mesh, sup_contours, heights, ctl.cancelfn);
|
||||
|
||||
indexed_triangle_set out;
|
||||
create_pad(sup_contours, model_contours, out, sm.pad_cfg);
|
||||
|
||||
Vec3f offs{.0f, .0f, gndlvl};
|
||||
for (auto &p : out.vertices) p += offs;
|
||||
|
||||
its_merge_vertices(out);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
std::vector<ExPolygons> slice(const indexed_triangle_set &sup_mesh,
|
||||
const indexed_triangle_set &pad_mesh,
|
||||
const std::vector<float> &grid,
|
||||
float cr,
|
||||
const JobController &ctl)
|
||||
{
|
||||
using Slices = std::vector<ExPolygons>;
|
||||
|
||||
auto slices = reserve_vector<Slices>(2);
|
||||
|
||||
if (!sup_mesh.empty()) {
|
||||
slices.emplace_back();
|
||||
slices.back() = slice_mesh_ex(sup_mesh, grid, cr, ctl.cancelfn);
|
||||
}
|
||||
|
||||
if (!pad_mesh.empty()) {
|
||||
slices.emplace_back();
|
||||
|
||||
auto bb = bounding_box(pad_mesh);
|
||||
auto maxzit = std::upper_bound(grid.begin(), grid.end(), bb.max.z());
|
||||
|
||||
auto cap = grid.end() - maxzit;
|
||||
auto padgrid = reserve_vector<float>(size_t(cap > 0 ? cap : 0));
|
||||
std::copy(grid.begin(), maxzit, std::back_inserter(padgrid));
|
||||
|
||||
slices.back() = slice_mesh_ex(pad_mesh, padgrid, cr, ctl.cancelfn);
|
||||
}
|
||||
|
||||
size_t len = grid.size();
|
||||
for (const Slices &slv : slices)
|
||||
len = std::min(len, slv.size());
|
||||
|
||||
// Either the support or the pad or both has to be non empty
|
||||
if (slices.empty()) return {};
|
||||
|
||||
Slices &mrg = slices.front();
|
||||
|
||||
for (auto it = std::next(slices.begin()); it != slices.end(); ++it) {
|
||||
for (size_t i = 0; i < len; ++i) {
|
||||
Slices &slv = *it;
|
||||
std::copy(slv[i].begin(), slv[i].end(), std::back_inserter(mrg[i]));
|
||||
slv[i] = {}; // clear and delete
|
||||
}
|
||||
}
|
||||
|
||||
return mrg;
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
159
src/libslic3r/SLA/SupportTree.hpp
Normal file
159
src/libslic3r/SLA/SupportTree.hpp
Normal file
@@ -0,0 +1,159 @@
|
||||
#ifndef SLA_SUPPORTTREE_HPP
|
||||
#define SLA_SUPPORTTREE_HPP
|
||||
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
#include <libslic3r/Polygon.hpp>
|
||||
#include <libslic3r/ExPolygon.hpp>
|
||||
#include <libslic3r/AABBMesh.hpp>
|
||||
|
||||
#include <libslic3r/SLA/Pad.hpp>
|
||||
#include <libslic3r/SLA/SupportPoint.hpp>
|
||||
#include <libslic3r/SLA/JobController.hpp>
|
||||
#include <libslic3r/SLA/SupportTreeStrategies.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
namespace sla {
|
||||
|
||||
struct SupportTreeConfig
|
||||
{
|
||||
bool enabled = true;
|
||||
|
||||
// Type of the support tree, for
|
||||
SupportTreeType tree_type = SupportTreeType::Default;
|
||||
|
||||
// Radius in mm of the pointing side of the head.
|
||||
double head_front_radius_mm = 0.2;
|
||||
|
||||
// How much the pinhead has to penetrate the model surface
|
||||
double head_penetration_mm = 0.5;
|
||||
|
||||
// Radius of the back side of the 3d arrow.
|
||||
double head_back_radius_mm = 0.5;
|
||||
|
||||
double head_fallback_radius_mm = 0.25;
|
||||
|
||||
// Width in mm from the back sphere center to the front sphere center.
|
||||
double head_width_mm = 1.0;
|
||||
|
||||
// How to connect pillars
|
||||
PillarConnectionMode pillar_connection_mode = PillarConnectionMode::dynamic;
|
||||
|
||||
// Only generate pillars that can be routed to ground
|
||||
bool ground_facing_only = false;
|
||||
|
||||
// TODO: unimplemented at the moment. This coefficient will have an impact
|
||||
// when bridges and pillars are merged. The resulting pillar should be a bit
|
||||
// thicker than the ones merging into it. How much thicker? I don't know
|
||||
// but it will be derived from this value.
|
||||
double pillar_widening_factor = .5;
|
||||
|
||||
// Radius in mm of the pillar base.
|
||||
double base_radius_mm = 2.0;
|
||||
|
||||
// The height of the pillar base cone in mm.
|
||||
double base_height_mm = 1.0;
|
||||
|
||||
// The default angle for connecting support sticks and junctions.
|
||||
double bridge_slope = M_PI/4;
|
||||
|
||||
// The max length of a bridge in mm
|
||||
double max_bridge_length_mm = 10.0;
|
||||
|
||||
// The max distance of a pillar to pillar link.
|
||||
double max_pillar_link_distance_mm = 10.0;
|
||||
|
||||
// The elevation in Z direction upwards. This is the space between the pad
|
||||
// and the model object's bounding box bottom.
|
||||
double object_elevation_mm = 10;
|
||||
|
||||
// The shortest distance between a pillar base perimeter from the model
|
||||
// body. This is only useful when elevation is set to zero.
|
||||
double pillar_base_safety_distance_mm = 0.5;
|
||||
|
||||
unsigned max_bridges_on_pillar = 3;
|
||||
|
||||
double max_weight_on_model_support = 10.f;
|
||||
|
||||
double head_fullwidth() const {
|
||||
return 2 * head_front_radius_mm + head_width_mm +
|
||||
2 * head_back_radius_mm - head_penetration_mm;
|
||||
}
|
||||
|
||||
double safety_distance() const { return safety_distance_mm; }
|
||||
double safety_distance(double r) const
|
||||
{
|
||||
return std::min(safety_distance_mm, r * safety_distance_mm / head_back_radius_mm);
|
||||
}
|
||||
|
||||
// /////////////////////////////////////////////////////////////////////////
|
||||
// Compile time configuration values (candidates for runtime)
|
||||
// /////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// The max Z angle for a normal at which it will get completely ignored.
|
||||
static const double constexpr normal_cutoff_angle = 150.0 * M_PI / 180.0;
|
||||
|
||||
// The safety gap between a support structure and model body. For support
|
||||
// struts smaller than head_back_radius, the safety distance is scaled
|
||||
// down accordingly. see method safety_distance()
|
||||
static const double constexpr safety_distance_mm = 0.5;
|
||||
|
||||
static const double constexpr max_solo_pillar_height_mm = 15.0;
|
||||
static const double constexpr max_dual_pillar_height_mm = 35.0;
|
||||
static const double constexpr optimizer_rel_score_diff = 1e-10;
|
||||
static const unsigned constexpr optimizer_max_iterations = 2000;
|
||||
static const unsigned constexpr pillar_cascade_neighbors = 3;
|
||||
|
||||
};
|
||||
|
||||
enum class MeshType { Support, Pad };
|
||||
|
||||
struct SupportableMesh
|
||||
{
|
||||
AABBMesh emesh;
|
||||
SupportPoints pts;
|
||||
SupportTreeConfig cfg;
|
||||
PadConfig pad_cfg;
|
||||
double zoffset = 0.;
|
||||
|
||||
explicit SupportableMesh(const indexed_triangle_set &trmsh,
|
||||
const SupportPoints &sp,
|
||||
const SupportTreeConfig &c)
|
||||
: emesh{trmsh}, pts{sp}, cfg{c}
|
||||
{}
|
||||
|
||||
// explicit SupportableMesh(const AABBMesh &em,
|
||||
// const SupportPoints &sp,
|
||||
// const SupportTreeConfig &c)
|
||||
// : emesh{em}, pts{sp}, cfg{c}
|
||||
// {}
|
||||
};
|
||||
|
||||
inline double ground_level(const SupportableMesh &sm)
|
||||
{
|
||||
double lvl = sm.zoffset -
|
||||
!bool(sm.pad_cfg.embed_object) * sm.cfg.enabled * sm.cfg.object_elevation_mm +
|
||||
bool(sm.pad_cfg.embed_object) * sm.pad_cfg.wall_thickness_mm;
|
||||
|
||||
return lvl;
|
||||
}
|
||||
|
||||
indexed_triangle_set create_support_tree(const SupportableMesh &mesh,
|
||||
const JobController &ctl);
|
||||
|
||||
indexed_triangle_set create_pad(const SupportableMesh &model_mesh,
|
||||
const indexed_triangle_set &support_mesh,
|
||||
const JobController &ctl);
|
||||
|
||||
std::vector<ExPolygons> slice(const indexed_triangle_set &support_mesh,
|
||||
const indexed_triangle_set &pad_mesh,
|
||||
const std::vector<float> &grid,
|
||||
float closing_radius,
|
||||
const JobController &ctl);
|
||||
|
||||
} // namespace sla
|
||||
} // namespace Slic3r
|
||||
|
||||
#endif // SLASUPPORTTREE_HPP
|
||||
178
src/libslic3r/SLA/SupportTreeBuilder.cpp
Normal file
178
src/libslic3r/SLA/SupportTreeBuilder.cpp
Normal file
@@ -0,0 +1,178 @@
|
||||
#define NOMINMAX
|
||||
|
||||
#include <libslic3r/SLA/SupportTreeBuilder.hpp>
|
||||
#include <libslic3r/SLA/SupportTreeUtils.hpp>
|
||||
#include <libslic3r/SLA/SupportTreeMesher.hpp>
|
||||
//#include <libslic3r/SLA/Contour3D.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
namespace sla {
|
||||
|
||||
Head::Head(double r_big_mm,
|
||||
double r_small_mm,
|
||||
double length_mm,
|
||||
double penetration,
|
||||
const Vec3d &direction,
|
||||
const Vec3d &offset)
|
||||
: dir(direction)
|
||||
, pos(offset)
|
||||
, r_back_mm(r_big_mm)
|
||||
, r_pin_mm(r_small_mm)
|
||||
, width_mm(length_mm)
|
||||
, penetration_mm(penetration)
|
||||
{
|
||||
}
|
||||
|
||||
SupportTreeBuilder::SupportTreeBuilder(SupportTreeBuilder &&o)
|
||||
: m_heads(std::move(o.m_heads))
|
||||
, m_head_indices{std::move(o.m_head_indices)}
|
||||
, m_pillars{std::move(o.m_pillars)}
|
||||
, m_bridges{std::move(o.m_bridges)}
|
||||
, m_crossbridges{std::move(o.m_crossbridges)}
|
||||
, m_meshcache{std::move(o.m_meshcache)}
|
||||
, m_meshcache_valid{o.m_meshcache_valid}
|
||||
, m_model_height{o.m_model_height}
|
||||
{}
|
||||
|
||||
SupportTreeBuilder::SupportTreeBuilder(const SupportTreeBuilder &o)
|
||||
: m_heads(o.m_heads)
|
||||
, m_head_indices{o.m_head_indices}
|
||||
, m_pillars{o.m_pillars}
|
||||
, m_bridges{o.m_bridges}
|
||||
, m_crossbridges{o.m_crossbridges}
|
||||
, m_meshcache{o.m_meshcache}
|
||||
, m_meshcache_valid{o.m_meshcache_valid}
|
||||
, m_model_height{o.m_model_height}
|
||||
{}
|
||||
|
||||
SupportTreeBuilder &SupportTreeBuilder::operator=(SupportTreeBuilder &&o)
|
||||
{
|
||||
m_heads = std::move(o.m_heads);
|
||||
m_head_indices = std::move(o.m_head_indices);
|
||||
m_pillars = std::move(o.m_pillars);
|
||||
m_bridges = std::move(o.m_bridges);
|
||||
m_crossbridges = std::move(o.m_crossbridges);
|
||||
m_meshcache = std::move(o.m_meshcache);
|
||||
m_meshcache_valid = o.m_meshcache_valid;
|
||||
m_model_height = o.m_model_height;
|
||||
return *this;
|
||||
}
|
||||
|
||||
SupportTreeBuilder &SupportTreeBuilder::operator=(const SupportTreeBuilder &o)
|
||||
{
|
||||
m_heads = o.m_heads;
|
||||
m_head_indices = o.m_head_indices;
|
||||
m_pillars = o.m_pillars;
|
||||
m_bridges = o.m_bridges;
|
||||
m_crossbridges = o.m_crossbridges;
|
||||
m_meshcache = o.m_meshcache;
|
||||
m_meshcache_valid = o.m_meshcache_valid;
|
||||
m_model_height = o.m_model_height;
|
||||
return *this;
|
||||
}
|
||||
|
||||
void SupportTreeBuilder::add_pillar_base(long pid, double baseheight, double radius)
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
assert(pid >= 0 && size_t(pid) < m_pillars.size());
|
||||
Pillar& pll = m_pillars[size_t(pid)];
|
||||
m_pedestals.emplace_back(pll.endpt, std::min(baseheight, pll.height),
|
||||
std::max(radius, pll.r_start), pll.r_start);
|
||||
|
||||
m_pedestals.back().id = m_pedestals.size() - 1;
|
||||
m_meshcache_valid = false;
|
||||
}
|
||||
|
||||
const indexed_triangle_set &SupportTreeBuilder::merged_mesh(size_t steps) const
|
||||
{
|
||||
if (m_meshcache_valid) return m_meshcache;
|
||||
|
||||
indexed_triangle_set merged;
|
||||
|
||||
for (auto &head : m_heads) {
|
||||
if (ctl().stopcondition()) break;
|
||||
if (head.is_valid()) its_merge(merged, get_mesh(head, steps));
|
||||
}
|
||||
|
||||
for (auto &pill : m_pillars) {
|
||||
if (ctl().stopcondition()) break;
|
||||
its_merge(merged, get_mesh(pill, steps));
|
||||
}
|
||||
|
||||
for (auto &pedest : m_pedestals) {
|
||||
if (ctl().stopcondition()) break;
|
||||
its_merge(merged, get_mesh(pedest, steps));
|
||||
}
|
||||
|
||||
for (auto &j : m_junctions) {
|
||||
if (ctl().stopcondition()) break;
|
||||
its_merge(merged, get_mesh(j, steps));
|
||||
}
|
||||
|
||||
for (auto &bs : m_bridges) {
|
||||
if (ctl().stopcondition()) break;
|
||||
its_merge(merged, get_mesh(bs, steps));
|
||||
}
|
||||
|
||||
for (auto &bs : m_crossbridges) {
|
||||
if (ctl().stopcondition()) break;
|
||||
its_merge(merged, get_mesh(bs, steps));
|
||||
}
|
||||
|
||||
for (auto &bs : m_diffbridges) {
|
||||
if (ctl().stopcondition()) break;
|
||||
its_merge(merged, get_mesh(bs, steps));
|
||||
}
|
||||
|
||||
for (auto &anch : m_anchors) {
|
||||
if (ctl().stopcondition()) break;
|
||||
its_merge(merged, get_mesh(anch, steps));
|
||||
}
|
||||
|
||||
if (ctl().stopcondition()) {
|
||||
// In case of failure we have to return an empty mesh
|
||||
m_meshcache = {};
|
||||
return m_meshcache;
|
||||
}
|
||||
|
||||
m_meshcache = std::move(merged);
|
||||
|
||||
// The mesh will be passed by const-pointer to TriangleMeshSlicer,
|
||||
// which will need this.
|
||||
its_merge_vertices(m_meshcache);
|
||||
|
||||
BoundingBoxf3 bb = bounding_box(m_meshcache);
|
||||
m_model_height = bb.max(Z) - bb.min(Z);
|
||||
|
||||
m_meshcache_valid = true;
|
||||
return m_meshcache;
|
||||
}
|
||||
|
||||
const indexed_triangle_set &SupportTreeBuilder::merge_and_cleanup()
|
||||
{
|
||||
// in case the mesh is not generated, it should be...
|
||||
auto &ret = merged_mesh();
|
||||
|
||||
// Doing clear() does not garantee to release the memory.
|
||||
clear_and_shrink(m_heads);
|
||||
clear_and_shrink(m_head_indices);
|
||||
clear_and_shrink(m_pillars);
|
||||
clear_and_shrink(m_junctions);
|
||||
clear_and_shrink(m_bridges);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
const indexed_triangle_set &SupportTreeBuilder::retrieve_mesh(MeshType meshtype) const
|
||||
{
|
||||
static const indexed_triangle_set EMPTY_MESH;
|
||||
|
||||
switch(meshtype) {
|
||||
case MeshType::Support: return merged_mesh();
|
||||
case MeshType::Pad: return EMPTY_MESH; //pad().tmesh;
|
||||
}
|
||||
|
||||
return m_meshcache;
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
435
src/libslic3r/SLA/SupportTreeBuilder.hpp
Normal file
435
src/libslic3r/SLA/SupportTreeBuilder.hpp
Normal file
@@ -0,0 +1,435 @@
|
||||
#ifndef SLA_SUPPORTTREEBUILDER_HPP
|
||||
#define SLA_SUPPORTTREEBUILDER_HPP
|
||||
|
||||
#include <libslic3r/Execution/ExecutionTBB.hpp>
|
||||
#include <libslic3r/SLA/SupportTree.hpp>
|
||||
#include <libslic3r/TriangleMesh.hpp>
|
||||
#include <libslic3r/SLA/Pad.hpp>
|
||||
#include <libslic3r/MTUtils.hpp>
|
||||
|
||||
namespace Slic3r {
|
||||
namespace sla {
|
||||
|
||||
/**
|
||||
* Terminology:
|
||||
*
|
||||
* Support point:
|
||||
* The point on the model surface that needs support.
|
||||
*
|
||||
* Pillar:
|
||||
* A thick column that spans from a support point to the ground and has
|
||||
* a thick cone shaped base where it touches the ground.
|
||||
*
|
||||
* Ground facing support point:
|
||||
* A support point that can be directly connected with the ground with a pillar
|
||||
* that does not collide or cut through the model.
|
||||
*
|
||||
* Non ground facing support point:
|
||||
* A support point that cannot be directly connected with the ground (only with
|
||||
* the model surface).
|
||||
*
|
||||
* Head:
|
||||
* The pinhead that connects to the model surface with the sharp end end
|
||||
* to a pillar or bridge stick with the dull end.
|
||||
*
|
||||
* Headless support point:
|
||||
* A support point on the model surface for which there is not enough place for
|
||||
* the head. It is either in a hole or there is some barrier that would collide
|
||||
* with the head geometry. The headless support point can be ground facing and
|
||||
* non ground facing as well.
|
||||
*
|
||||
* Bridge:
|
||||
* A stick that connects two pillars or a head with a pillar.
|
||||
*
|
||||
* Junction:
|
||||
* A small ball in the intersection of two or more sticks (pillar, bridge, ...)
|
||||
*
|
||||
* CompactBridge:
|
||||
* A bridge that connects a headless support point with the model surface or a
|
||||
* nearby pillar.
|
||||
*/
|
||||
|
||||
template<class T, int I> T distance(const Vec<I, T>& p) {
|
||||
return p.norm();
|
||||
}
|
||||
|
||||
template<class T, int I>
|
||||
T distance(const Vec<I, T>& pp1, const Vec<I, T>& pp2) {
|
||||
return (pp1 - pp2).norm();
|
||||
}
|
||||
|
||||
const Vec3d DOWN = {0.0, 0.0, -1.0};
|
||||
|
||||
struct SupportTreeNode
|
||||
{
|
||||
static const constexpr long ID_UNSET = -1;
|
||||
|
||||
long id = ID_UNSET; // For identification withing a tree.
|
||||
};
|
||||
|
||||
// A junction connecting bridges and pillars
|
||||
struct Junction: public SupportTreeNode {
|
||||
double r = 1;
|
||||
Vec3d pos;
|
||||
|
||||
Junction(const Vec3d &tr, double r_mm) : r(r_mm), pos(tr) {}
|
||||
};
|
||||
|
||||
// A pinhead originating from a support point
|
||||
struct Head: public SupportTreeNode {
|
||||
Vec3d dir = DOWN;
|
||||
Vec3d pos = {0, 0, 0};
|
||||
|
||||
double r_back_mm = 1;
|
||||
double r_pin_mm = 0.5;
|
||||
double width_mm = 2;
|
||||
double penetration_mm = 0.5;
|
||||
|
||||
// If there is a pillar connecting to this head, then the id will be set.
|
||||
long pillar_id = ID_UNSET;
|
||||
|
||||
long bridge_id = ID_UNSET;
|
||||
|
||||
inline void invalidate() { id = ID_UNSET; }
|
||||
inline bool is_valid() const { return id >= 0; }
|
||||
|
||||
Head(double r_big_mm,
|
||||
double r_small_mm,
|
||||
double length_mm,
|
||||
double penetration,
|
||||
const Vec3d &direction = DOWN, // direction (normal to the dull end)
|
||||
const Vec3d &offset = {0, 0, 0} // displacement
|
||||
);
|
||||
|
||||
inline double real_width() const
|
||||
{
|
||||
return 2 * r_pin_mm + width_mm + 2 * r_back_mm ;
|
||||
}
|
||||
|
||||
inline double fullwidth() const
|
||||
{
|
||||
return real_width() - penetration_mm;
|
||||
}
|
||||
|
||||
inline Junction junction() const
|
||||
{
|
||||
Junction j{pos + (fullwidth() - r_back_mm) * dir, r_back_mm};
|
||||
j.id = -this->id; // Remember that this junction is from a head
|
||||
|
||||
return j;
|
||||
}
|
||||
|
||||
inline Vec3d junction_point() const
|
||||
{
|
||||
return junction().pos;
|
||||
}
|
||||
};
|
||||
|
||||
// A straight pillar. Only has an endpoint and a height. No explicit starting
|
||||
// point is given, as it would allow the pillar to be angled.
|
||||
// Some connection info with other primitives can also be tracked.
|
||||
struct Pillar: public SupportTreeNode {
|
||||
double height, r_start, r_end;
|
||||
Vec3d endpt;
|
||||
|
||||
// If the pillar connects to a head, this is the id of that head
|
||||
bool starts_from_head = true; // Could start from a junction as well
|
||||
long start_junction_id = ID_UNSET;
|
||||
|
||||
// How many bridges are connected to this pillar
|
||||
unsigned bridges = 0;
|
||||
|
||||
// How many pillars are cascaded with this one
|
||||
unsigned links = 0;
|
||||
|
||||
Pillar(const Vec3d &endp, double h, double start_radius, double end_radius)
|
||||
: height{h}
|
||||
, r_start(start_radius)
|
||||
, r_end(end_radius)
|
||||
, endpt(endp)
|
||||
, starts_from_head(false)
|
||||
{}
|
||||
|
||||
Pillar(const Vec3d &endp, double h, double start_radius)
|
||||
: Pillar(endp, h, start_radius, start_radius)
|
||||
{}
|
||||
|
||||
Vec3d startpoint() const
|
||||
{
|
||||
return {endpt.x(), endpt.y(), endpt.z() + height};
|
||||
}
|
||||
|
||||
const Vec3d& endpoint() const { return endpt; }
|
||||
};
|
||||
|
||||
// A base for pillars or bridges that end on the ground
|
||||
struct Pedestal: public SupportTreeNode {
|
||||
Vec3d pos;
|
||||
double height, r_bottom, r_top;
|
||||
|
||||
Pedestal(const Vec3d &p, double h, double rbottom, double rtop)
|
||||
: pos{p}, height{h}, r_bottom{rbottom}, r_top{rtop}
|
||||
{}
|
||||
};
|
||||
|
||||
// This is the thing that anchors a pillar or bridge to the model body.
|
||||
// It is actually a reverse pinhead.
|
||||
struct Anchor: public Head { using Head::Head; };
|
||||
|
||||
// A Bridge between two pillars (with junction endpoints)
|
||||
struct Bridge: public SupportTreeNode {
|
||||
double r = 0.8;
|
||||
Vec3d startp = Vec3d::Zero(), endp = Vec3d::Zero();
|
||||
|
||||
Bridge(const Vec3d &j1,
|
||||
const Vec3d &j2,
|
||||
double r_mm = 0.8): r{r_mm}, startp{j1}, endp{j2}
|
||||
{}
|
||||
|
||||
double get_length() const { return (endp - startp).norm(); }
|
||||
Vec3d get_dir() const { return (endp - startp).normalized(); }
|
||||
};
|
||||
|
||||
struct DiffBridge: public Bridge {
|
||||
double end_r;
|
||||
|
||||
DiffBridge(const Vec3d &p_s, const Vec3d &p_e, double r_s, double r_e)
|
||||
: Bridge{p_s, p_e, r_s}, end_r{r_e}
|
||||
{}
|
||||
|
||||
DiffBridge(const Junction &j_s, const Junction &j_e)
|
||||
: Bridge{j_s.pos, j_e.pos, j_s.r}, end_r{j_e.r}
|
||||
{}
|
||||
};
|
||||
|
||||
// This class will hold the support tree parts (not meshes, but logical parts)
|
||||
// with some additional bookkeeping as well. Various parts of the support
|
||||
// geometry are stored separately and are merged when the caller queries the
|
||||
// merged mesh (for every part, there is a meshing routine, see
|
||||
// SupportTreeMesher.hpp). The merged result is cached for fast subsequent
|
||||
// delivery of the merged mesh which can be quite complex. The support tree
|
||||
// creation algorithm can use an instance of this class as a somewhat higher
|
||||
// level tool for crafting the 3D support mesh. Parts can be added with the
|
||||
// appropriate methods such as add_head or add_pillar which forwards the
|
||||
// constructor arguments and fills the IDs of these substructures. The IDs are
|
||||
// basically indices into the arrays of the appropriate type (heads, pillars,
|
||||
// etc...). One can later query e.g. a pillar for a specific head...
|
||||
class SupportTreeBuilder {
|
||||
// For heads it is beneficial to use the same IDs as for the support points.
|
||||
std::vector<Head> m_heads;
|
||||
std::vector<size_t> m_head_indices;
|
||||
std::vector<Pillar> m_pillars;
|
||||
std::vector<Junction> m_junctions;
|
||||
std::vector<Bridge> m_bridges;
|
||||
std::vector<Bridge> m_crossbridges;
|
||||
std::vector<DiffBridge> m_diffbridges;
|
||||
std::vector<Pedestal> m_pedestals;
|
||||
std::vector<Anchor> m_anchors;
|
||||
|
||||
JobController m_ctl;
|
||||
|
||||
using Mutex = tbb::spin_mutex;
|
||||
|
||||
mutable indexed_triangle_set m_meshcache;
|
||||
mutable Mutex m_mutex;
|
||||
mutable bool m_meshcache_valid = false;
|
||||
mutable double m_model_height = 0; // the full height of the model
|
||||
|
||||
template<class BridgeT, class...Args>
|
||||
const BridgeT& _add_bridge(std::vector<BridgeT> &br, Args&&... args)
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
br.emplace_back(std::forward<Args>(args)...);
|
||||
br.back().id = long(br.size() - 1);
|
||||
m_meshcache_valid = false;
|
||||
return br.back();
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
explicit SupportTreeBuilder(const JobController &ctl = {}) : m_ctl{ctl} {}
|
||||
SupportTreeBuilder(SupportTreeBuilder &&o);
|
||||
SupportTreeBuilder(const SupportTreeBuilder &o);
|
||||
SupportTreeBuilder& operator=(SupportTreeBuilder &&o);
|
||||
SupportTreeBuilder& operator=(const SupportTreeBuilder &o);
|
||||
|
||||
const JobController &ctl() const { return m_ctl; }
|
||||
|
||||
template<class...Args> Head& add_head(unsigned id, Args&&... args)
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
m_heads.emplace_back(std::forward<Args>(args)...);
|
||||
m_heads.back().id = id;
|
||||
|
||||
if (id >= m_head_indices.size()) m_head_indices.resize(id + 1);
|
||||
m_head_indices[id] = m_heads.size() - 1;
|
||||
|
||||
m_meshcache_valid = false;
|
||||
return m_heads.back();
|
||||
}
|
||||
|
||||
long add_pillar(long headid, double length)
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
if (m_pillars.capacity() < m_heads.size())
|
||||
m_pillars.reserve(m_heads.size() * 10);
|
||||
|
||||
assert(headid >= 0 && size_t(headid) < m_head_indices.size());
|
||||
Head &head = m_heads[m_head_indices[size_t(headid)]];
|
||||
|
||||
Vec3d hjp = head.junction_point() - Vec3d{0, 0, length};
|
||||
m_pillars.emplace_back(hjp, length, head.r_back_mm);
|
||||
|
||||
Pillar& pillar = m_pillars.back();
|
||||
pillar.id = long(m_pillars.size() - 1);
|
||||
head.pillar_id = pillar.id;
|
||||
pillar.start_junction_id = head.id;
|
||||
pillar.starts_from_head = true;
|
||||
|
||||
m_meshcache_valid = false;
|
||||
return pillar.id;
|
||||
}
|
||||
|
||||
void add_pillar_base(long pid, double baseheight = 3, double radius = 2);
|
||||
|
||||
template<class...Args> const Anchor& add_anchor(Args&&...args)
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
m_anchors.emplace_back(std::forward<Args>(args)...);
|
||||
m_anchors.back().id = long(m_junctions.size() - 1);
|
||||
m_meshcache_valid = false;
|
||||
return m_anchors.back();
|
||||
}
|
||||
|
||||
void increment_bridges(const Pillar& pillar)
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
assert(pillar.id >= 0 && size_t(pillar.id) < m_pillars.size());
|
||||
|
||||
if(pillar.id >= 0 && size_t(pillar.id) < m_pillars.size())
|
||||
m_pillars[size_t(pillar.id)].bridges++;
|
||||
}
|
||||
|
||||
void increment_links(const Pillar& pillar)
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
assert(pillar.id >= 0 && size_t(pillar.id) < m_pillars.size());
|
||||
|
||||
if(pillar.id >= 0 && size_t(pillar.id) < m_pillars.size())
|
||||
m_pillars[size_t(pillar.id)].links++;
|
||||
}
|
||||
|
||||
unsigned bridgecount(const Pillar &pillar) const {
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
assert(pillar.id >= 0 && size_t(pillar.id) < m_pillars.size());
|
||||
return pillar.bridges;
|
||||
}
|
||||
|
||||
template<class...Args> long add_pillar(Args&&...args)
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
if (m_pillars.capacity() < m_heads.size())
|
||||
m_pillars.reserve(m_heads.size() * 10);
|
||||
|
||||
m_pillars.emplace_back(std::forward<Args>(args)...);
|
||||
Pillar& pillar = m_pillars.back();
|
||||
pillar.id = long(m_pillars.size() - 1);
|
||||
pillar.starts_from_head = false;
|
||||
m_meshcache_valid = false;
|
||||
return pillar.id;
|
||||
}
|
||||
|
||||
template<class...Args> const Junction& add_junction(Args&&... args)
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
m_junctions.emplace_back(std::forward<Args>(args)...);
|
||||
m_junctions.back().id = long(m_junctions.size() - 1);
|
||||
m_meshcache_valid = false;
|
||||
return m_junctions.back();
|
||||
}
|
||||
|
||||
const Bridge& add_bridge(const Vec3d &s, const Vec3d &e, double r)
|
||||
{
|
||||
return _add_bridge(m_bridges, s, e, r);
|
||||
}
|
||||
|
||||
const Bridge& add_bridge(long headid, const Vec3d &endp)
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
assert(headid >= 0 && size_t(headid) < m_head_indices.size());
|
||||
|
||||
Head &h = m_heads[m_head_indices[size_t(headid)]];
|
||||
m_bridges.emplace_back(h.junction_point(), endp, h.r_back_mm);
|
||||
m_bridges.back().id = long(m_bridges.size() - 1);
|
||||
|
||||
h.bridge_id = m_bridges.back().id;
|
||||
m_meshcache_valid = false;
|
||||
return m_bridges.back();
|
||||
}
|
||||
|
||||
template<class...Args> const Bridge& add_crossbridge(Args&&... args)
|
||||
{
|
||||
return _add_bridge(m_crossbridges, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<class...Args> const DiffBridge& add_diffbridge(Args&&... args)
|
||||
{
|
||||
return _add_bridge(m_diffbridges, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
Head &head(unsigned id)
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
assert(id < m_head_indices.size());
|
||||
|
||||
m_meshcache_valid = false;
|
||||
return m_heads[m_head_indices[id]];
|
||||
}
|
||||
|
||||
inline size_t pillarcount() const {
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
return m_pillars.size();
|
||||
}
|
||||
|
||||
inline const std::vector<Pillar> &pillars() const { return m_pillars; }
|
||||
inline const std::vector<Head> &heads() const { return m_heads; }
|
||||
inline const std::vector<Bridge> &bridges() const { return m_bridges; }
|
||||
inline const std::vector<Bridge> &crossbridges() const { return m_crossbridges; }
|
||||
|
||||
template<class T> inline IntegerOnly<T, const Pillar&> pillar(T id) const
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
assert(id >= 0 && size_t(id) < m_pillars.size() &&
|
||||
size_t(id) < std::numeric_limits<size_t>::max());
|
||||
|
||||
return m_pillars[size_t(id)];
|
||||
}
|
||||
|
||||
template<class T> inline IntegerOnly<T, Pillar&> pillar(T id)
|
||||
{
|
||||
std::lock_guard<Mutex> lk(m_mutex);
|
||||
assert(id >= 0 && size_t(id) < m_pillars.size() &&
|
||||
size_t(id) < std::numeric_limits<size_t>::max());
|
||||
|
||||
return m_pillars[size_t(id)];
|
||||
}
|
||||
|
||||
// WITHOUT THE PAD!!!
|
||||
const indexed_triangle_set &merged_mesh(size_t steps = 45) const;
|
||||
|
||||
// Intended to be called after the generation is fully complete
|
||||
const indexed_triangle_set & merge_and_cleanup();
|
||||
|
||||
const indexed_triangle_set &retrieve_mesh(
|
||||
MeshType meshtype = MeshType::Support) const;
|
||||
|
||||
void retrieve_full_mesh(indexed_triangle_set &outmesh) const
|
||||
{
|
||||
its_merge(outmesh, retrieve_mesh(MeshType::Support));
|
||||
its_merge(outmesh, retrieve_mesh(MeshType::Pad));
|
||||
}
|
||||
};
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // SUPPORTTREEBUILDER_HPP
|
||||
271
src/libslic3r/SLA/SupportTreeMesher.cpp
Normal file
271
src/libslic3r/SLA/SupportTreeMesher.cpp
Normal file
@@ -0,0 +1,271 @@
|
||||
#include "SupportTreeMesher.hpp"
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
indexed_triangle_set sphere(double rho, Portion portion, double fa) {
|
||||
|
||||
indexed_triangle_set ret;
|
||||
|
||||
// prohibit close to zero radius
|
||||
if(rho <= 1e-6 && rho >= -1e-6) return ret;
|
||||
|
||||
auto& vertices = ret.vertices;
|
||||
auto& facets = ret.indices;
|
||||
|
||||
// Algorithm:
|
||||
// Add points one-by-one to the sphere grid and form facets using relative
|
||||
// coordinates. Sphere is composed effectively of a mesh of stacked circles.
|
||||
|
||||
// adjust via rounding to get an even multiple for any provided angle.
|
||||
double angle = (2 * PI / floor(2*PI / fa) );
|
||||
|
||||
// Ring to be scaled to generate the steps of the sphere
|
||||
std::vector<double> ring;
|
||||
|
||||
for (double i = 0; i < 2*PI; i+=angle) ring.emplace_back(i);
|
||||
|
||||
const auto sbegin = size_t(2*std::get<0>(portion)/angle);
|
||||
const auto send = size_t(2*std::get<1>(portion)/angle);
|
||||
|
||||
const size_t steps = ring.size();
|
||||
const double increment = 1.0 / double(steps);
|
||||
|
||||
// special case: first ring connects to 0,0,0
|
||||
// insert and form facets.
|
||||
if (sbegin == 0)
|
||||
vertices.emplace_back(
|
||||
Vec3f(0.f, 0.f, float(-rho + increment * sbegin * 2. * rho)));
|
||||
|
||||
auto id = coord_t(vertices.size());
|
||||
for (size_t i = 0; i < ring.size(); i++) {
|
||||
// Fixed scaling
|
||||
const double z = -rho + increment*rho*2.0 * (sbegin + 1.0);
|
||||
// radius of the circle for this step.
|
||||
const double r = std::sqrt(std::abs(rho*rho - z*z));
|
||||
Vec2d b = Eigen::Rotation2Dd(ring[i]) * Eigen::Vector2d(0, r);
|
||||
vertices.emplace_back(Vec3d(b(0), b(1), z).cast<float>());
|
||||
|
||||
if (sbegin == 0)
|
||||
(i == 0) ? facets.emplace_back(coord_t(ring.size()), 0, 1) :
|
||||
facets.emplace_back(id - 1, 0, id);
|
||||
++id;
|
||||
}
|
||||
|
||||
// General case: insert and form facets for each step,
|
||||
// joining it to the ring below it.
|
||||
for (size_t s = sbegin + 2; s < send - 1; s++) {
|
||||
const double z = -rho + increment * double(s * 2. * rho);
|
||||
const double r = std::sqrt(std::abs(rho*rho - z*z));
|
||||
|
||||
for (size_t i = 0; i < ring.size(); i++) {
|
||||
Vec2d b = Eigen::Rotation2Dd(ring[i]) * Eigen::Vector2d(0, r);
|
||||
vertices.emplace_back(Vec3d(b(0), b(1), z).cast<float>());
|
||||
auto id_ringsize = coord_t(id - int(ring.size()));
|
||||
if (i == 0) {
|
||||
// wrap around
|
||||
facets.emplace_back(id - 1, id, id + coord_t(ring.size() - 1) );
|
||||
facets.emplace_back(id - 1, id_ringsize, id);
|
||||
} else {
|
||||
facets.emplace_back(id_ringsize - 1, id_ringsize, id);
|
||||
facets.emplace_back(id - 1, id_ringsize - 1, id);
|
||||
}
|
||||
id++;
|
||||
}
|
||||
}
|
||||
|
||||
// special case: last ring connects to 0,0,rho*2.0
|
||||
// only form facets.
|
||||
if(send >= size_t(2*PI / angle)) {
|
||||
vertices.emplace_back(0.f, 0.f, float(-rho + increment*send*2.0*rho));
|
||||
for (size_t i = 0; i < ring.size(); i++) {
|
||||
auto id_ringsize = coord_t(id - int(ring.size()));
|
||||
if (i == 0) {
|
||||
// third vertex is on the other side of the ring.
|
||||
facets.emplace_back(id - 1, id_ringsize, id);
|
||||
} else {
|
||||
auto ci = coord_t(id_ringsize + coord_t(i));
|
||||
facets.emplace_back(ci - 1, ci, id);
|
||||
}
|
||||
}
|
||||
}
|
||||
id++;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
indexed_triangle_set pinhead(double r_pin,
|
||||
double r_back,
|
||||
double length,
|
||||
size_t steps)
|
||||
{
|
||||
assert(steps > 0);
|
||||
assert(length >= 0.);
|
||||
assert(r_back > 0.);
|
||||
assert(r_pin > 0.);
|
||||
|
||||
indexed_triangle_set mesh;
|
||||
|
||||
// We create two spheres which will be connected with a robe that fits
|
||||
// both circles perfectly.
|
||||
|
||||
// Set up the model detail level
|
||||
const double detail = 2 * PI / steps;
|
||||
|
||||
// We don't generate whole circles. Instead, we generate only the
|
||||
// portions which are visible (not covered by the robe) To know the
|
||||
// exact portion of the bottom and top circles we need to use some
|
||||
// rules of tangent circles from which we can derive (using simple
|
||||
// triangles the following relations:
|
||||
|
||||
// The height of the whole mesh
|
||||
const double h = r_back + r_pin + length;
|
||||
double phi = PI / 2. - std::acos((r_back - r_pin) / h);
|
||||
|
||||
if (std::isnan(phi))
|
||||
return mesh;
|
||||
|
||||
// To generate a whole circle we would pass a portion of (0, Pi)
|
||||
// To generate only a half horizontal circle we can pass (0, Pi/2)
|
||||
// The calculated phi is an offset to the half circles needed to smooth
|
||||
// the transition from the circle to the robe geometry
|
||||
|
||||
auto s1 = sphere(r_back, make_portion(0, PI / 2 + phi), detail);
|
||||
auto s2 = sphere(r_pin, make_portion(PI / 2 + phi, PI), detail);
|
||||
|
||||
for (auto &p : s2.vertices) p.z() += h;
|
||||
|
||||
its_merge(mesh, s1);
|
||||
its_merge(mesh, s2);
|
||||
|
||||
for (size_t idx1 = s1.vertices.size() - steps, idx2 = s1.vertices.size();
|
||||
idx1 < s1.vertices.size() - 1; idx1++, idx2++) {
|
||||
coord_t i1s1 = coord_t(idx1), i1s2 = coord_t(idx2);
|
||||
coord_t i2s1 = i1s1 + 1, i2s2 = i1s2 + 1;
|
||||
|
||||
mesh.indices.emplace_back(i1s1, i2s1, i2s2);
|
||||
mesh.indices.emplace_back(i1s1, i2s2, i1s2);
|
||||
}
|
||||
|
||||
auto i1s1 = coord_t(s1.vertices.size()) - coord_t(steps);
|
||||
auto i2s1 = coord_t(s1.vertices.size()) - 1;
|
||||
auto i1s2 = coord_t(s1.vertices.size());
|
||||
auto i2s2 = coord_t(s1.vertices.size()) + coord_t(steps) - 1;
|
||||
|
||||
mesh.indices.emplace_back(i2s2, i2s1, i1s1);
|
||||
mesh.indices.emplace_back(i1s2, i2s2, i1s1);
|
||||
|
||||
return mesh;
|
||||
}
|
||||
|
||||
indexed_triangle_set halfcone(double baseheight,
|
||||
double r_bottom,
|
||||
double r_top,
|
||||
const Vec3d &pos,
|
||||
size_t steps)
|
||||
{
|
||||
assert(steps > 0);
|
||||
|
||||
if (baseheight <= 0 || steps <= 0 || (r_bottom <= 0. && r_top <= 0.))
|
||||
return {};
|
||||
|
||||
indexed_triangle_set base;
|
||||
|
||||
double a = 2 * PI / steps;
|
||||
auto last = int(steps - 1);
|
||||
Vec3d ep{pos.x(), pos.y(), pos.z() + baseheight};
|
||||
for (size_t i = 0; i < steps; ++i) {
|
||||
double phi = i * a;
|
||||
auto x = float(pos.x() + r_top * std::cos(phi));
|
||||
auto y = float(pos.y() + r_top * std::sin(phi));
|
||||
base.vertices.emplace_back(x, y, float(ep.z()));
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < steps; ++i) {
|
||||
double phi = i * a;
|
||||
auto x = float(pos.x() + r_bottom * std::cos(phi));
|
||||
auto y = float(pos.y() + r_bottom * std::sin(phi));
|
||||
base.vertices.emplace_back(x, y, float(pos.z()));
|
||||
}
|
||||
|
||||
base.vertices.emplace_back(pos.cast<float>());
|
||||
base.vertices.emplace_back(ep.cast<float>());
|
||||
|
||||
auto &indices = base.indices;
|
||||
auto hcenter = int(base.vertices.size() - 1);
|
||||
auto lcenter = int(base.vertices.size() - 2);
|
||||
auto offs = int(steps);
|
||||
for (int i = 0; i < last; ++i) {
|
||||
indices.emplace_back(i, i + offs, offs + i + 1);
|
||||
indices.emplace_back(i, offs + i + 1, i + 1);
|
||||
indices.emplace_back(i, i + 1, hcenter);
|
||||
indices.emplace_back(lcenter, offs + i + 1, offs + i);
|
||||
}
|
||||
|
||||
indices.emplace_back(0, last, offs);
|
||||
indices.emplace_back(last, offs + last, offs);
|
||||
indices.emplace_back(hcenter, last, 0);
|
||||
indices.emplace_back(offs, offs + last, lcenter);
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
indexed_triangle_set get_mesh(const Head &h, size_t steps)
|
||||
{
|
||||
indexed_triangle_set mesh = pinhead(h.r_pin_mm, h.r_back_mm, h.width_mm, steps);
|
||||
|
||||
for (auto& p : mesh.vertices) p.z() -= (h.fullwidth() - h.r_back_mm);
|
||||
|
||||
using Quaternion = Eigen::Quaternion<float>;
|
||||
|
||||
// We rotate the head to the specified direction. The head's pointing
|
||||
// side is facing upwards so this means that it would hold a support
|
||||
// point with a normal pointing straight down. This is the reason of
|
||||
// the -1 z coordinate
|
||||
auto quatern = Quaternion::FromTwoVectors(Vec3f{0.f, 0.f, -1.f},
|
||||
h.dir.cast<float>());
|
||||
|
||||
Vec3f pos = h.pos.cast<float>();
|
||||
for (auto& p : mesh.vertices) p = quatern * p + pos;
|
||||
|
||||
return mesh;
|
||||
}
|
||||
|
||||
indexed_triangle_set get_mesh(const Bridge &br, size_t steps)
|
||||
{
|
||||
using Quaternion = Eigen::Quaternion<float>;
|
||||
Vec3d v = (br.endp - br.startp);
|
||||
Vec3d dir = v.normalized();
|
||||
double d = v.norm();
|
||||
|
||||
indexed_triangle_set mesh = cylinder(br.r, d, steps);
|
||||
|
||||
auto quater = Quaternion::FromTwoVectors(Vec3f{0.f, 0.f, 1.f},
|
||||
dir.cast<float>());
|
||||
|
||||
Vec3f startp = br.startp.cast<float>();
|
||||
for(auto& p : mesh.vertices) p = quater * p + startp;
|
||||
|
||||
return mesh;
|
||||
}
|
||||
|
||||
indexed_triangle_set get_mesh(const DiffBridge &br, size_t steps)
|
||||
{
|
||||
double h = br.get_length();
|
||||
indexed_triangle_set mesh = halfcone(h, br.r, br.end_r, Vec3d::Zero(), steps);
|
||||
|
||||
using Quaternion = Eigen::Quaternion<float>;
|
||||
|
||||
// We rotate the head to the specified direction. The head's pointing
|
||||
// side is facing upwards so this means that it would hold a support
|
||||
// point with a normal pointing straight down. This is the reason of
|
||||
// the -1 z coordinate
|
||||
auto quatern = Quaternion::FromTwoVectors(Vec3f{0.f, 0.f, 1.f},
|
||||
br.get_dir().cast<float>());
|
||||
|
||||
Vec3f startp = br.startp.cast<float>();
|
||||
for(auto& p : mesh.vertices) p = quatern * p + startp;
|
||||
|
||||
return mesh;
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
79
src/libslic3r/SLA/SupportTreeMesher.hpp
Normal file
79
src/libslic3r/SLA/SupportTreeMesher.hpp
Normal file
@@ -0,0 +1,79 @@
|
||||
#ifndef SUPPORTTREEMESHER_HPP
|
||||
#define SUPPORTTREEMESHER_HPP
|
||||
|
||||
#include "libslic3r/Point.hpp"
|
||||
|
||||
#include "libslic3r/SLA/SupportTreeBuilder.hpp"
|
||||
#include "libslic3r/TriangleMesh.hpp"
|
||||
//#include "libslic3r/SLA/Contour3D.hpp"
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
using Portion = std::tuple<double, double>;
|
||||
|
||||
inline Portion make_portion(double a, double b)
|
||||
{
|
||||
return std::make_tuple(a, b);
|
||||
}
|
||||
|
||||
indexed_triangle_set sphere(double rho,
|
||||
Portion portion = make_portion(0., PI),
|
||||
double fa = (2. * PI / 360.));
|
||||
|
||||
// Down facing cylinder in Z direction with arguments:
|
||||
// r: radius
|
||||
// h: height
|
||||
// ssteps: how many edges will create the base circle
|
||||
// sp: starting point
|
||||
inline indexed_triangle_set cylinder(double r,
|
||||
double h,
|
||||
size_t steps = 45)
|
||||
{
|
||||
return its_make_cylinder(r, h, 2 * PI / steps);
|
||||
}
|
||||
|
||||
indexed_triangle_set pinhead(double r_pin,
|
||||
double r_back,
|
||||
double length,
|
||||
size_t steps = 45);
|
||||
|
||||
indexed_triangle_set halfcone(double baseheight,
|
||||
double r_bottom,
|
||||
double r_top,
|
||||
const Vec3d &pt = Vec3d::Zero(),
|
||||
size_t steps = 45);
|
||||
|
||||
indexed_triangle_set get_mesh(const Head &h, size_t steps);
|
||||
|
||||
inline indexed_triangle_set get_mesh(const Pillar &p, size_t steps)
|
||||
{
|
||||
if(p.height > EPSILON) { // Endpoint is below the starting point
|
||||
// We just create a bridge geometry with the pillar parameters and
|
||||
// move the data.
|
||||
//return cylinder(p.r_start, p.height, steps, p.endpoint());
|
||||
return halfcone(p.height, p.r_end, p.r_start, p.endpt, steps);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
inline indexed_triangle_set get_mesh(const Pedestal &p, size_t steps)
|
||||
{
|
||||
return halfcone(p.height, p.r_bottom, p.r_top, p.pos, steps);
|
||||
}
|
||||
|
||||
inline indexed_triangle_set get_mesh(const Junction &j, size_t steps)
|
||||
{
|
||||
indexed_triangle_set mesh = sphere(j.r, make_portion(0, PI), 2 * PI / steps);
|
||||
Vec3f pos = j.pos.cast<float>();
|
||||
for(auto& p : mesh.vertices) p += pos;
|
||||
return mesh;
|
||||
}
|
||||
|
||||
indexed_triangle_set get_mesh(const Bridge &br, size_t steps);
|
||||
|
||||
indexed_triangle_set get_mesh(const DiffBridge &br, size_t steps);
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // SUPPORTTREEMESHER_HPP
|
||||
13
src/libslic3r/SLA/SupportTreeStrategies.hpp
Normal file
13
src/libslic3r/SLA/SupportTreeStrategies.hpp
Normal file
@@ -0,0 +1,13 @@
|
||||
#ifndef SUPPORTTREESTRATEGIES_HPP
|
||||
#define SUPPORTTREESTRATEGIES_HPP
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
enum class SupportTreeType { Default, Branching, Organic };
|
||||
enum class PillarConnectionMode { zigzag, cross, dynamic };
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // SUPPORTTREESTRATEGIES_HPP
|
||||
945
src/libslic3r/SLA/SupportTreeUtils.hpp
Normal file
945
src/libslic3r/SLA/SupportTreeUtils.hpp
Normal file
@@ -0,0 +1,945 @@
|
||||
#ifndef SLASUPPORTTREEUTILS_H
|
||||
#define SLASUPPORTTREEUTILS_H
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
#include <libslic3r/Execution/Execution.hpp>
|
||||
#include <libslic3r/Optimize/NLoptOptimizer.hpp>
|
||||
#include <libslic3r/Optimize/BruteforceOptimizer.hpp>
|
||||
#include <libslic3r/MeshNormals.hpp>
|
||||
#include <libslic3r/Geometry.hpp>
|
||||
#include <libslic3r/SLA/SupportTreeBuilder.hpp>
|
||||
|
||||
#include <boost/variant.hpp>
|
||||
#include <boost/container/small_vector.hpp>
|
||||
#include <boost/log/trivial.hpp>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
using Slic3r::opt::initvals;
|
||||
using Slic3r::opt::bounds;
|
||||
using Slic3r::opt::StopCriteria;
|
||||
using Slic3r::opt::Optimizer;
|
||||
using Slic3r::opt::AlgNLoptSubplex;
|
||||
using Slic3r::opt::AlgNLoptGenetic;
|
||||
using Slic3r::Geometry::dir_to_spheric;
|
||||
using Slic3r::Geometry::spheric_to_dir;
|
||||
|
||||
// Give points on a 3D ring with given center, radius and orientation
|
||||
// method based on:
|
||||
// https://math.stackexchange.com/questions/73237/parametric-equation-of-a-circle-in-3d-space
|
||||
template<size_t N>
|
||||
class PointRing {
|
||||
std::array<double, N - 1> m_phis;
|
||||
|
||||
// Two vectors that will be perpendicular to each other and to the
|
||||
// axis. Values for a(X) and a(Y) are now arbitrary, a(Z) is just a
|
||||
// placeholder.
|
||||
// a and b vectors are perpendicular to the ring direction and to each other.
|
||||
// Together they define the plane where we have to iterate with the
|
||||
// given angles in the 'm_phis' vector
|
||||
Vec3d a = {0, 1, 0}, b;
|
||||
double m_radius = 0.;
|
||||
|
||||
static inline bool constexpr is_one(double val)
|
||||
{
|
||||
constexpr double eps = 1e-20;
|
||||
|
||||
return std::abs(std::abs(val) - 1) < eps;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
PointRing(const Vec3d &n) : m_phis{linspace_array<N - 1>(0., 2 * PI)}
|
||||
{
|
||||
// We have to address the case when the direction vector v (same as
|
||||
// dir) is coincident with one of the world axes. In this case two of
|
||||
// its components will be completely zero and one is 1.0. Our method
|
||||
// becomes dangerous here due to division with zero. Instead, vector
|
||||
// 'a' can be an element-wise rotated version of 'v'
|
||||
if(is_one(n(X)) || is_one(n(Y)) || is_one(n(Z))) {
|
||||
a = {n(Z), n(X), n(Y)};
|
||||
b = {n(Y), n(Z), n(X)};
|
||||
}
|
||||
else {
|
||||
a(Z) = -(n(Y)*a(Y)) / n(Z); a.normalize();
|
||||
b = a.cross(n);
|
||||
}
|
||||
}
|
||||
|
||||
Vec3d get(size_t idx, const Vec3d &src, double r) const
|
||||
{
|
||||
if (idx == 0)
|
||||
return src;
|
||||
|
||||
double phi = m_phis[idx - 1];
|
||||
double sinphi = std::sin(phi);
|
||||
double cosphi = std::cos(phi);
|
||||
|
||||
double rpscos = r * cosphi;
|
||||
double rpssin = r * sinphi;
|
||||
|
||||
// Point on the sphere
|
||||
return {src(X) + rpscos * a(X) + rpssin * b(X),
|
||||
src(Y) + rpscos * a(Y) + rpssin * b(Y),
|
||||
src(Z) + rpscos * a(Z) + rpssin * b(Z)};
|
||||
}
|
||||
};
|
||||
|
||||
template<class T, int N>
|
||||
Vec<N, T> dirv(const Vec<N, T>& startp, const Vec<N, T>& endp) {
|
||||
return (endp - startp).normalized();
|
||||
}
|
||||
|
||||
using Hit = AABBMesh::hit_result;
|
||||
|
||||
template<class It> Hit min_hit(It from, It to)
|
||||
{
|
||||
auto mit = std::min_element(from, to, [](const Hit &h1, const Hit &h2) {
|
||||
return h1.distance() < h2.distance();
|
||||
});
|
||||
|
||||
return *mit;
|
||||
}
|
||||
|
||||
inline StopCriteria get_criteria(const SupportTreeConfig &cfg)
|
||||
{
|
||||
return StopCriteria{}
|
||||
.rel_score_diff(cfg.optimizer_rel_score_diff)
|
||||
.max_iterations(cfg.optimizer_max_iterations);
|
||||
}
|
||||
|
||||
// A simple sphere with a center and a radius
|
||||
struct Ball { Vec3d p; double R; };
|
||||
|
||||
template<size_t Samples = 8>
|
||||
struct Beam_ { // Defines a set of rays displaced along a cone's surface
|
||||
static constexpr size_t SAMPLES = Samples;
|
||||
|
||||
Vec3d src;
|
||||
Vec3d dir;
|
||||
double r1;
|
||||
double r2; // radius of the beam 1 unit further from src in dir direction
|
||||
|
||||
Beam_(const Vec3d &s, const Vec3d &d, double R1, double R2)
|
||||
: src{s}, dir{d}, r1{R1}, r2{R2} {};
|
||||
|
||||
Beam_(const Ball &src_ball, const Ball &dst_ball)
|
||||
: src{src_ball.p}, dir{dirv(src_ball.p, dst_ball.p)}, r1{src_ball.R}
|
||||
{
|
||||
r2 = src_ball.R;
|
||||
auto d = distance(src_ball.p, dst_ball.p);
|
||||
|
||||
if (d > EPSILON)
|
||||
r2 += (dst_ball.R - src_ball.R) / d;
|
||||
}
|
||||
|
||||
Beam_(const Vec3d &s, const Vec3d &d, double R)
|
||||
: src{s}, dir{d}, r1{R}, r2{R}
|
||||
{}
|
||||
};
|
||||
|
||||
using Beam = Beam_<>;
|
||||
|
||||
template<class Ex, size_t RayCount = Beam::SAMPLES>
|
||||
Hit beam_mesh_hit(Ex policy,
|
||||
const AABBMesh &mesh,
|
||||
const Beam_<RayCount> &beam,
|
||||
double sd)
|
||||
{
|
||||
Vec3d src = beam.src;
|
||||
Vec3d dst = src + beam.dir;
|
||||
double r_src = beam.r1;
|
||||
double r_dst = beam.r2;
|
||||
|
||||
Vec3d D = (dst - src);
|
||||
Vec3d dir = D.normalized();
|
||||
PointRing<RayCount> ring{dir};
|
||||
|
||||
using Hit = AABBMesh::hit_result;
|
||||
|
||||
// Hit results
|
||||
std::array<Hit, RayCount> hits;
|
||||
|
||||
execution::for_each(
|
||||
policy, size_t(0), hits.size(),
|
||||
[&mesh, r_src, r_dst, src, dst, &ring, dir, sd, &hits](size_t i) {
|
||||
Hit &hit = hits[i];
|
||||
|
||||
// Point on the circle on the pin sphere
|
||||
Vec3d p_src = ring.get(i, src, r_src + sd);
|
||||
Vec3d p_dst = ring.get(i, dst, r_dst + sd);
|
||||
Vec3d raydir = (p_dst - p_src).normalized();
|
||||
|
||||
auto hr = mesh.query_ray_hit(p_src + r_src * raydir, raydir);
|
||||
|
||||
if (hr.is_inside()) {
|
||||
if (hr.distance() > 2 * r_src + sd)
|
||||
hit = Hit(0.0);
|
||||
else {
|
||||
// re-cast the ray from the outside of the object
|
||||
auto q = p_src + (hr.distance() + EPSILON) * raydir;
|
||||
hit = mesh.query_ray_hit(q, raydir);
|
||||
}
|
||||
} else
|
||||
hit = hr;
|
||||
}, std::min(execution::max_concurrency(policy), RayCount));
|
||||
|
||||
return min_hit(hits.begin(), hits.end());
|
||||
}
|
||||
|
||||
template<class Ex>
|
||||
Hit pinhead_mesh_hit(Ex ex,
|
||||
const AABBMesh &mesh,
|
||||
const Vec3d &s,
|
||||
const Vec3d &dir,
|
||||
double r_pin,
|
||||
double r_back,
|
||||
double width,
|
||||
double sd)
|
||||
{
|
||||
// Support tree generation speed depends heavily on this value. 8 is almost
|
||||
// ok, but to prevent rare cases of collision, 16 is necessary, which makes
|
||||
// the algorithm run about 60% longer.
|
||||
static const size_t SAMPLES = 16;
|
||||
|
||||
// Move away slightly from the touching point to avoid raycasting on the
|
||||
// inner surface of the mesh.
|
||||
|
||||
auto &m = mesh;
|
||||
using HitResult = AABBMesh::hit_result;
|
||||
|
||||
// Hit results
|
||||
std::array<HitResult, SAMPLES> hits;
|
||||
|
||||
struct Rings
|
||||
{
|
||||
double rpin;
|
||||
double rback;
|
||||
Vec3d spin;
|
||||
Vec3d sback;
|
||||
PointRing<SAMPLES> ring;
|
||||
|
||||
Vec3d backring(size_t idx) { return ring.get(idx, sback, rback); }
|
||||
Vec3d pinring(size_t idx) { return ring.get(idx, spin, rpin); }
|
||||
} rings{r_pin + sd, r_back + sd, s, s + (r_pin + width + r_back) * dir, dir};
|
||||
|
||||
// We will shoot multiple rays from the head pinpoint in the direction
|
||||
// of the pinhead robe (side) surface. The result will be the smallest
|
||||
// hit distance.
|
||||
|
||||
execution::for_each(
|
||||
ex, size_t(0), hits.size(), [&m, &rings, sd, &hits](size_t i) {
|
||||
// Point on the circle on the pin sphere
|
||||
Vec3d ps = rings.pinring(i);
|
||||
// This is the point on the circle on the back sphere
|
||||
Vec3d p = rings.backring(i);
|
||||
|
||||
auto &hit = hits[i];
|
||||
|
||||
// Point ps is not on mesh but can be inside or
|
||||
// outside as well. This would cause many problems
|
||||
// with ray-casting. To detect the position we will
|
||||
// use the ray-casting result (which has an is_inside
|
||||
// predicate).
|
||||
|
||||
Vec3d n = (p - ps).normalized();
|
||||
auto q = m.query_ray_hit(ps + sd * n, n);
|
||||
|
||||
if (q.is_inside()) { // the hit is inside the model
|
||||
if (q.distance() > rings.rpin) {
|
||||
// If we are inside the model and the hit
|
||||
// distance is bigger than our pin circle
|
||||
// diameter, it probably indicates that the
|
||||
// support point was already inside the
|
||||
// model, or there is really no space
|
||||
// around the point. We will assign a zero
|
||||
// hit distance to these cases which will
|
||||
// enforce the function return value to be
|
||||
// an invalid ray with zero hit distance.
|
||||
// (see min_element at the end)
|
||||
hit = HitResult(0.0);
|
||||
} else {
|
||||
// re-cast the ray from the outside of the
|
||||
// object. The starting point has an offset
|
||||
// of 2*safety_distance because the
|
||||
// original ray has also had an offset
|
||||
auto q2 = m.query_ray_hit(ps + (q.distance() + 2 * sd) * n, n);
|
||||
hit = q2;
|
||||
}
|
||||
} else
|
||||
hit = q;
|
||||
}, std::min(execution::max_concurrency(ex), SAMPLES));
|
||||
|
||||
return min_hit(hits.begin(), hits.end());
|
||||
}
|
||||
|
||||
template<class Ex>
|
||||
Hit pinhead_mesh_hit(Ex ex,
|
||||
const AABBMesh &mesh,
|
||||
const Head &head,
|
||||
double safety_d)
|
||||
{
|
||||
return pinhead_mesh_hit(ex, mesh, head.pos, head.dir, head.r_pin_mm,
|
||||
head.r_back_mm, head.width_mm, safety_d);
|
||||
}
|
||||
|
||||
inline double distance(const SupportPoint &a, const SupportPoint &b)
|
||||
{
|
||||
return (a.pos - b.pos).norm();
|
||||
}
|
||||
|
||||
template<class PtIndex>
|
||||
std::vector<size_t> non_duplicate_suppt_indices(const PtIndex &index,
|
||||
const SupportPoints &suppts,
|
||||
double eps)
|
||||
{
|
||||
std::vector<bool> to_remove(suppts.size(), false);
|
||||
|
||||
for (size_t i = 0; i < suppts.size(); ++i) {
|
||||
size_t closest_idx =
|
||||
find_closest_point(index, suppts[i].pos,
|
||||
[&i, &to_remove](size_t i_closest) {
|
||||
return i_closest != i &&
|
||||
!to_remove[i_closest];
|
||||
});
|
||||
|
||||
if (closest_idx < suppts.size() &&
|
||||
(suppts[i].pos - suppts[closest_idx].pos).norm() < eps)
|
||||
to_remove[i] = true;
|
||||
}
|
||||
|
||||
auto ret = reserve_vector<size_t>(suppts.size());
|
||||
for (size_t i = 0; i < to_remove.size(); i++)
|
||||
if (!to_remove[i])
|
||||
ret.emplace_back(i);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<class Ex>
|
||||
bool optimize_pinhead_placement(Ex policy,
|
||||
const SupportableMesh &m,
|
||||
Head &head)
|
||||
{
|
||||
Vec3d n = get_normal(m.emesh, head.pos);
|
||||
assert(std::abs(n.norm() - 1.0) < EPSILON);
|
||||
|
||||
// for all normals the spherical coordinates are generated and
|
||||
// the polar angle is saturated to 45 degrees from the bottom then
|
||||
// converted back to standard coordinates to get the new normal.
|
||||
// Then a simple quaternion is created from the two normals
|
||||
// (Quaternion::FromTwoVectors) and the rotation is applied to the
|
||||
// pinhead.
|
||||
|
||||
auto [polar, azimuth] = dir_to_spheric(n);
|
||||
|
||||
double back_r = head.r_back_mm;
|
||||
|
||||
// skip if the tilt is not sane
|
||||
if (polar < PI - m.cfg.normal_cutoff_angle) return false;
|
||||
|
||||
// We saturate the polar angle to 3pi/4
|
||||
polar = std::max(polar, PI - m.cfg.bridge_slope);
|
||||
|
||||
// save the head (pinpoint) position
|
||||
Vec3d hp = head.pos;
|
||||
|
||||
double lmin = m.cfg.head_width_mm, lmax = lmin;
|
||||
|
||||
if (back_r < m.cfg.head_back_radius_mm) {
|
||||
lmin = 0., lmax = m.cfg.head_penetration_mm;
|
||||
}
|
||||
|
||||
// The distance needed for a pinhead to not collide with model.
|
||||
double w = lmin + 2 * back_r + 2 * m.cfg.head_front_radius_mm -
|
||||
m.cfg.head_penetration_mm;
|
||||
|
||||
double pin_r = head.r_pin_mm;
|
||||
|
||||
// Reassemble the now corrected normal
|
||||
auto nn = spheric_to_dir(polar, azimuth).normalized();
|
||||
|
||||
double sd = m.cfg.safety_distance(back_r);
|
||||
|
||||
// check available distance
|
||||
Hit t = pinhead_mesh_hit(policy, m.emesh, hp, nn, pin_r, back_r, w, sd);
|
||||
|
||||
if (t.distance() < w) {
|
||||
// Let's try to optimize this angle, there might be a
|
||||
// viable normal that doesn't collide with the model
|
||||
// geometry and its very close to the default.
|
||||
|
||||
Optimizer<opt::AlgNLoptMLSL_Subplx> solver(get_criteria(m.cfg).stop_score(w).max_iterations(100));
|
||||
solver.seed(0); // we want deterministic behavior
|
||||
|
||||
auto oresult = solver.to_max().optimize(
|
||||
[&m, pin_r, back_r, hp, sd, policy](const opt::Input<3> &input) {
|
||||
auto &[plr, azm, l] = input;
|
||||
|
||||
auto dir = spheric_to_dir(plr, azm).normalized();
|
||||
|
||||
return pinhead_mesh_hit(policy, m.emesh, hp, dir, pin_r,
|
||||
back_r, l, sd).distance();
|
||||
},
|
||||
initvals({polar, azimuth,
|
||||
(lmin + lmax) / 2.}), // start with what we have
|
||||
bounds({{PI - m.cfg.bridge_slope, PI}, // Must not exceed the slope limit
|
||||
{-PI, PI}, // azimuth can be a full search
|
||||
{lmin, lmax}}));
|
||||
|
||||
if(oresult.score > w) {
|
||||
polar = std::get<0>(oresult.optimum);
|
||||
azimuth = std::get<1>(oresult.optimum);
|
||||
nn = spheric_to_dir(polar, azimuth).normalized();
|
||||
lmin = std::get<2>(oresult.optimum);
|
||||
t = AABBMesh::hit_result(oresult.score);
|
||||
}
|
||||
}
|
||||
|
||||
bool ret = false;
|
||||
if (t.distance() > w && hp.z() + w * nn.z() >= ground_level(m)) {
|
||||
head.dir = nn;
|
||||
head.width_mm = lmin;
|
||||
head.r_back_mm = back_r;
|
||||
|
||||
ret = true;
|
||||
} else if (back_r > m.cfg.head_fallback_radius_mm) {
|
||||
head.r_back_mm = m.cfg.head_fallback_radius_mm;
|
||||
ret = optimize_pinhead_placement(policy, m, head);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<class Ex>
|
||||
std::optional<Head> calculate_pinhead_placement(Ex policy,
|
||||
const SupportableMesh &sm,
|
||||
size_t suppt_idx)
|
||||
{
|
||||
if (suppt_idx >= sm.pts.size())
|
||||
return {};
|
||||
|
||||
const SupportPoint &sp = sm.pts[suppt_idx];
|
||||
Head head{
|
||||
sm.cfg.head_back_radius_mm,
|
||||
sp.head_front_radius,
|
||||
0.,
|
||||
sm.cfg.head_penetration_mm,
|
||||
Vec3d::Zero(), // dir
|
||||
sp.pos.cast<double>() // displacement
|
||||
};
|
||||
|
||||
if (optimize_pinhead_placement(policy, sm, head)) {
|
||||
head.id = long(suppt_idx);
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
struct GroundConnection {
|
||||
// Currently, a ground connection will contain at most 2 additional junctions
|
||||
// which will not require any allocations. If I come up with an algo that
|
||||
// can produce a route to ground with more junctions, this design will be
|
||||
// able to handle that.
|
||||
static constexpr size_t MaxExpectedJunctions = 3;
|
||||
|
||||
boost::container::small_vector<Junction, MaxExpectedJunctions> path;
|
||||
std::optional<Pedestal> pillar_base;
|
||||
|
||||
operator bool() const { return pillar_base.has_value() && !path.empty(); }
|
||||
};
|
||||
|
||||
inline long build_ground_connection(SupportTreeBuilder &builder,
|
||||
const SupportableMesh &sm,
|
||||
const GroundConnection &conn)
|
||||
{
|
||||
long ret = SupportTreeNode::ID_UNSET;
|
||||
|
||||
if (!conn)
|
||||
return ret;
|
||||
|
||||
auto it = conn.path.begin();
|
||||
auto itnx = std::next(it);
|
||||
|
||||
while (itnx != conn.path.end()) {
|
||||
builder.add_diffbridge(*it, *itnx);
|
||||
builder.add_junction(*itnx);
|
||||
++it; ++itnx;
|
||||
}
|
||||
|
||||
auto gp = conn.path.back().pos;
|
||||
gp.z() = ground_level(sm);
|
||||
double h = conn.path.back().pos.z() - gp.z();
|
||||
|
||||
if (conn.pillar_base->r_top < sm.cfg.head_back_radius_mm) {
|
||||
h += sm.pad_cfg.wall_thickness_mm;
|
||||
gp.z() -= sm.pad_cfg.wall_thickness_mm;
|
||||
}
|
||||
|
||||
// TODO: does not work yet
|
||||
// if (conn.path.back().id < 0) {
|
||||
// // this is a head
|
||||
// long head_id = std::abs(conn.path.back().id);
|
||||
// ret = builder.add_pillar(head_id, h);
|
||||
// } else
|
||||
|
||||
ret = builder.add_pillar(gp, h, conn.path.back().r, conn.pillar_base->r_top);
|
||||
|
||||
if (conn.pillar_base->r_top >= sm.cfg.head_back_radius_mm)
|
||||
builder.add_pillar_base(ret, conn.pillar_base->height, conn.pillar_base->r_bottom);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<class Fn>
|
||||
constexpr bool IsWideningFn = std::is_invocable_r_v</*retval*/ double,
|
||||
Fn,
|
||||
Ball /*source*/,
|
||||
Vec3d /*dir*/,
|
||||
double /*length*/>;
|
||||
|
||||
// A widening function can determine how many ray samples should a beam contain
|
||||
// (see in beam_mesh_hit)
|
||||
template<class WFn> struct BeamSamples { static constexpr size_t Value = 8; };
|
||||
template<class WFn> constexpr size_t BeamSamplesV = BeamSamples<remove_cvref_t<WFn>>::Value;
|
||||
|
||||
// To use with check_ground_route, full will check the bridge and the pillar,
|
||||
// PillarOnly checks only the pillar for collisions.
|
||||
enum class GroundRouteCheck { Full, PillarOnly };
|
||||
|
||||
// Returns the collision point with mesh if there is a collision or a ground point,
|
||||
// given a source point with a direction of a potential avoidance bridge and
|
||||
// a bridge length.
|
||||
template<class Ex, class WideningFn,
|
||||
class = std::enable_if_t<IsWideningFn<WideningFn>> >
|
||||
Vec3d check_ground_route(
|
||||
Ex policy,
|
||||
const SupportableMesh &sm,
|
||||
const Junction &source, // source location
|
||||
const Vec3d &dir, // direction of the bridge from the source
|
||||
double bridge_len, // lenght of the avoidance bridge
|
||||
WideningFn &&wideningfn, // Widening strategy
|
||||
GroundRouteCheck type = GroundRouteCheck::Full
|
||||
)
|
||||
{
|
||||
static const constexpr auto Samples = BeamSamplesV<WideningFn>;
|
||||
|
||||
Vec3d ret;
|
||||
|
||||
const auto sd = sm.cfg.safety_distance(source.r);
|
||||
const auto gndlvl = ground_level(sm);
|
||||
|
||||
// Intersection of the suggested bridge with ground plane. If the bridge
|
||||
// spans below ground, stop it at ground level.
|
||||
double t = (gndlvl - source.pos.z()) / dir.z();
|
||||
bridge_len = std::min(t, bridge_len);
|
||||
|
||||
Vec3d bridge_end = source.pos + bridge_len * dir;
|
||||
|
||||
double down_l = bridge_end.z() - gndlvl;
|
||||
double bridge_r = wideningfn(Ball{source.pos, source.r}, dir, bridge_len);
|
||||
double brhit_dist = 0.;
|
||||
|
||||
if (bridge_len > EPSILON && type == GroundRouteCheck::Full) {
|
||||
// beam_mesh_hit with a zero lenght bridge is invalid
|
||||
|
||||
Beam_<Samples> bridgebeam{Ball{source.pos, source.r},
|
||||
Ball{bridge_end, bridge_r}};
|
||||
|
||||
auto brhit = beam_mesh_hit(policy, sm.emesh, bridgebeam, sd);
|
||||
brhit_dist = brhit.distance();
|
||||
} else {
|
||||
brhit_dist = bridge_len;
|
||||
}
|
||||
|
||||
if (brhit_dist < bridge_len) {
|
||||
ret = (source.pos + brhit_dist * dir);
|
||||
} else if (down_l > 0.) {
|
||||
// check if pillar can be placed below
|
||||
auto gp = Vec3d{bridge_end.x(), bridge_end.y(), gndlvl};
|
||||
double end_radius = wideningfn(
|
||||
Ball{bridge_end, bridge_r}, DOWN, bridge_end.z() - gndlvl);
|
||||
|
||||
Beam_<Samples> gndbeam {{bridge_end, bridge_r}, {gp, end_radius}};
|
||||
auto gndhit = beam_mesh_hit(policy, sm.emesh, gndbeam, sd);
|
||||
double gnd_hit_d = std::min(gndhit.distance(), down_l + EPSILON);
|
||||
|
||||
if (source.r >= sm.cfg.head_back_radius_mm && gndhit.distance() > down_l && sm.cfg.object_elevation_mm < EPSILON) {
|
||||
// Dealing with zero elevation mode, to not route pillars
|
||||
// into the gap between the optional pad and the model
|
||||
double gap = std::sqrt(sm.emesh.squared_distance(gp));
|
||||
double base_r = std::max(sm.cfg.base_radius_mm, end_radius);
|
||||
double min_gap = sm.cfg.pillar_base_safety_distance_mm + base_r;
|
||||
|
||||
if (gap < min_gap) {
|
||||
gnd_hit_d = down_l - min_gap + gap;
|
||||
}
|
||||
}
|
||||
|
||||
ret = Vec3d{bridge_end.x(), bridge_end.y(), bridge_end.z() - gnd_hit_d};
|
||||
} else {
|
||||
ret = bridge_end;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Searching a ground connection from an arbitrary source point.
|
||||
// Currently, the result will contain one avoidance bridge (at most) and a
|
||||
// pillar to the ground, if it's feasible
|
||||
template<class Ex, class WideningFn,
|
||||
class = std::enable_if_t<IsWideningFn<WideningFn>> >
|
||||
GroundConnection deepsearch_ground_connection(
|
||||
Ex policy,
|
||||
const SupportableMesh &sm,
|
||||
const Junction &source,
|
||||
WideningFn &&wideningfn,
|
||||
const Vec3d &init_dir = DOWN)
|
||||
{
|
||||
constexpr unsigned MaxIterationsGlobal = 5000;
|
||||
constexpr unsigned MaxIterationsLocal = 100;
|
||||
constexpr double RelScoreDiff = 0.05;
|
||||
|
||||
const auto gndlvl = ground_level(sm);
|
||||
|
||||
// The used solver (AlgNLoptMLSL_Subplx search method) is composed of a global (MLSL)
|
||||
// and a local (Subplex) search method. Criteria can be set in a way that
|
||||
// local searches are quick and less accurate. The global method will only
|
||||
// consider the max iteration number and the stop score (Z level <= ground)
|
||||
|
||||
auto criteria = get_criteria(sm.cfg); // get defaults from cfg
|
||||
criteria.max_iterations(MaxIterationsGlobal);
|
||||
criteria.abs_score_diff(NaNd);
|
||||
criteria.rel_score_diff(NaNd);
|
||||
criteria.stop_score(gndlvl);
|
||||
|
||||
auto criteria_loc = criteria;
|
||||
criteria_loc.max_iterations(MaxIterationsLocal);
|
||||
criteria_loc.abs_score_diff(EPSILON);
|
||||
criteria_loc.rel_score_diff(RelScoreDiff);
|
||||
|
||||
Optimizer<opt::AlgNLoptMLSL_Subplx> solver(criteria);
|
||||
solver.set_loc_criteria(criteria_loc);
|
||||
solver.seed(0); // require repeatability
|
||||
|
||||
// functor returns the z height of collision point, given a polar and
|
||||
// azimuth angles as bridge direction and bridge length. The route is
|
||||
// traced from source, through this bridge and an attached pillar. If there
|
||||
// is a collision with the mesh, the Z height is returned. Otherwise the
|
||||
// z level of ground is returned.
|
||||
auto z_fn = [&](const opt::Input<3> &input) {
|
||||
// solver suggests polar, azimuth and bridge length values:
|
||||
auto &[plr, azm, bridge_len] = input;
|
||||
|
||||
Vec3d n = spheric_to_dir(plr, azm);
|
||||
|
||||
Vec3d hitpt = check_ground_route(policy, sm, source, n, bridge_len, wideningfn);
|
||||
|
||||
return hitpt.z();
|
||||
};
|
||||
|
||||
// Calculate the initial direction of the search by
|
||||
// saturating the polar angle to max tilt defined in config
|
||||
auto [plr_init, azm_init] = dir_to_spheric(init_dir);
|
||||
plr_init = std::max(plr_init, PI - sm.cfg.bridge_slope);
|
||||
|
||||
auto bound_constraints =
|
||||
bounds({
|
||||
{PI - sm.cfg.bridge_slope, PI}, // bounds for polar angle
|
||||
{-PI, PI}, // bounds for azimuth
|
||||
{0., sm.cfg.max_bridge_length_mm} // bounds bridge length
|
||||
});
|
||||
|
||||
// The optimizer can navigate fairly well on the mesh surface, finding
|
||||
// lower and lower Z coordinates as collision points. MLSL is not a local
|
||||
// search method, so it should not be trapped in a local minima. Eventually,
|
||||
// this search should arrive at a ground location.
|
||||
auto oresult = solver.to_min().optimize(
|
||||
z_fn,
|
||||
initvals({plr_init, azm_init, 0.}),
|
||||
bound_constraints
|
||||
);
|
||||
|
||||
GroundConnection conn;
|
||||
|
||||
// Extract and apply the result
|
||||
auto [plr, azm, bridge_l] = oresult.optimum;
|
||||
Vec3d n = spheric_to_dir(plr, azm);
|
||||
assert(std::abs(n.norm() - 1.) < EPSILON);
|
||||
|
||||
double t = (gndlvl - source.pos.z()) / n.z();
|
||||
bridge_l = std::min(t, bridge_l);
|
||||
|
||||
// Now the optimizer gave a possible route to ground with a bridge direction
|
||||
// and length. This length can be shortened further by brute-force queries
|
||||
// of free route straigt down for a possible pillar.
|
||||
// NOTE: This requirement could be incorporated into the optimization as a
|
||||
// constraint, but it would not find quickly enough an accurate solution,
|
||||
// and it would be very hard to define a stop score which is very useful in
|
||||
// terminating the search as soon as the ground is found.
|
||||
double l = 0., l_max = bridge_l;
|
||||
double zlvl = std::numeric_limits<double>::infinity();
|
||||
while(zlvl > gndlvl && l <= l_max) {
|
||||
|
||||
zlvl = check_ground_route(policy, sm, source, n, l, wideningfn,
|
||||
GroundRouteCheck::PillarOnly).z();
|
||||
|
||||
if (zlvl <= gndlvl)
|
||||
bridge_l = l;
|
||||
|
||||
l += source.r;
|
||||
}
|
||||
|
||||
Vec3d bridge_end = source.pos + bridge_l * n;
|
||||
Vec3d gp{bridge_end.x(), bridge_end.y(), gndlvl};
|
||||
|
||||
double bridge_r = wideningfn(Ball{source.pos, source.r}, n, bridge_l);
|
||||
double down_l = bridge_end.z() - gndlvl;
|
||||
double end_radius = wideningfn(Ball{bridge_end, bridge_r}, DOWN, down_l);
|
||||
double base_r = std::max(sm.cfg.base_radius_mm, end_radius);
|
||||
|
||||
// Even if the search was not succesful, the result is populated by the
|
||||
// source and the last best result of the optimization.
|
||||
conn.path.emplace_back(source);
|
||||
if (bridge_l > EPSILON)
|
||||
conn.path.emplace_back(Junction{bridge_end, bridge_r});
|
||||
|
||||
// The resulting ground connection is only valid if the pillar base is set.
|
||||
// At this point it will only be set if the search was succesful.
|
||||
if (z_fn(opt::Input<3>({plr, azm, bridge_l})) <= gndlvl)
|
||||
conn.pillar_base =
|
||||
Pedestal{gp, sm.cfg.base_height_mm, base_r, end_radius};
|
||||
|
||||
return conn;
|
||||
}
|
||||
|
||||
// Ground route search with a predefined end radius
|
||||
template<class Ex>
|
||||
GroundConnection deepsearch_ground_connection(Ex policy,
|
||||
const SupportableMesh &sm,
|
||||
const Junction &source,
|
||||
double end_radius,
|
||||
const Vec3d &init_dir = DOWN)
|
||||
{
|
||||
double gndlvl = ground_level(sm);
|
||||
auto wfn = [end_radius, gndlvl](const Ball &src, const Vec3d &dir, double len) {
|
||||
if (len < EPSILON)
|
||||
return src.R;
|
||||
|
||||
Vec3d dst = src.p + len * dir;
|
||||
double widening = end_radius - src.R;
|
||||
double zlen = dst.z() - gndlvl;
|
||||
double full_len = len + zlen;
|
||||
double r = src.R + widening * len / full_len;
|
||||
|
||||
return r;
|
||||
};
|
||||
|
||||
static_assert(IsWideningFn<decltype(wfn)>, "Not a widening function");
|
||||
|
||||
return deepsearch_ground_connection(policy, sm, source, wfn, init_dir);
|
||||
}
|
||||
|
||||
struct DefaultWideningModel {
|
||||
static constexpr double WIDENING_SCALE = 0.02;
|
||||
const SupportableMesh &sm;
|
||||
|
||||
double operator()(const Ball &src, const Vec3d & /*dir*/, double len) {
|
||||
static_assert(IsWideningFn<DefaultWideningModel>,
|
||||
"DefaultWideningModel is not a widening function");
|
||||
|
||||
double w = WIDENING_SCALE * sm.cfg.pillar_widening_factor * len;
|
||||
return std::max(src.R, sm.cfg.head_back_radius_mm) + w;
|
||||
};
|
||||
};
|
||||
|
||||
template<> struct BeamSamples<DefaultWideningModel> {
|
||||
static constexpr size_t Value = 16;
|
||||
};
|
||||
|
||||
template<class Ex>
|
||||
GroundConnection deepsearch_ground_connection(Ex policy,
|
||||
const SupportableMesh &sm,
|
||||
const Junction &source,
|
||||
const Vec3d &init_dir = DOWN)
|
||||
{
|
||||
return deepsearch_ground_connection(policy, sm, source,
|
||||
DefaultWideningModel{sm}, init_dir);
|
||||
}
|
||||
|
||||
template<class Ex>
|
||||
bool optimize_anchor_placement(Ex policy,
|
||||
const SupportableMesh &sm,
|
||||
const Junction &from,
|
||||
Anchor &anchor)
|
||||
{
|
||||
Vec3d n = get_normal(sm.emesh, anchor.pos);
|
||||
|
||||
auto [polar, azimuth] = dir_to_spheric(n);
|
||||
|
||||
// Saturate the polar angle to 3pi/4
|
||||
polar = std::min(polar, sm.cfg.bridge_slope);
|
||||
|
||||
double lmin = 0;
|
||||
double lmax = std::min(sm.cfg.head_width_mm,
|
||||
distance(from.pos, anchor.pos) - 2 * from.r);
|
||||
|
||||
double sd = sm.cfg.safety_distance(anchor.r_back_mm);
|
||||
|
||||
Optimizer<AlgNLoptGenetic> solver(get_criteria(sm.cfg)
|
||||
.stop_score(anchor.fullwidth())
|
||||
.max_iterations(100));
|
||||
|
||||
solver.seed(0); // deterministic behavior
|
||||
|
||||
auto oresult = solver.to_max().optimize(
|
||||
[&sm, &anchor, sd, policy](const opt::Input<3> &input) {
|
||||
auto &[plr, azm, l] = input;
|
||||
|
||||
auto dir = spheric_to_dir(plr, azm).normalized();
|
||||
|
||||
anchor.width_mm = l;
|
||||
anchor.dir = dir;
|
||||
|
||||
return pinhead_mesh_hit(policy, sm.emesh, anchor, sd)
|
||||
.distance();
|
||||
},
|
||||
initvals({polar, azimuth, (lmin + lmax) / 2.}),
|
||||
bounds({{0., sm.cfg.bridge_slope}, // Must not exceed the slope limit
|
||||
{-PI, PI}, // azimuth can be a full search
|
||||
{lmin, lmax}}));
|
||||
|
||||
polar = std::get<0>(oresult.optimum);
|
||||
azimuth = std::get<1>(oresult.optimum);
|
||||
anchor.dir = spheric_to_dir(polar, azimuth).normalized();
|
||||
anchor.width_mm = std::get<2>(oresult.optimum);
|
||||
|
||||
if (oresult.score < anchor.fullwidth()) {
|
||||
// Unsuccesful search, the anchor does not fit into its intended space.
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class Ex>
|
||||
std::optional<Anchor> calculate_anchor_placement(Ex policy,
|
||||
const SupportableMesh &sm,
|
||||
const Junction &from,
|
||||
const Vec3d &to_hint)
|
||||
{
|
||||
double back_r = from.r;
|
||||
double pin_r = sm.cfg.head_front_radius_mm;
|
||||
double penetr = sm.cfg.head_penetration_mm;
|
||||
double hwidth = sm.cfg.head_width_mm;
|
||||
Vec3d bridgedir = dirv(from.pos, to_hint);
|
||||
Vec3d anchordir = -bridgedir;
|
||||
|
||||
std::optional<Anchor> ret;
|
||||
|
||||
Anchor anchor(back_r, pin_r, hwidth, penetr, anchordir, to_hint);
|
||||
|
||||
if (optimize_anchor_placement(policy, sm, from, anchor)) {
|
||||
ret = anchor;
|
||||
} else if (anchor.r_back_mm = sm.cfg.head_fallback_radius_mm;
|
||||
optimize_anchor_placement(policy, sm, from, anchor)) {
|
||||
// Retrying with the fallback strut radius as a last resort.
|
||||
ret = anchor;
|
||||
}
|
||||
|
||||
return anchor;
|
||||
}
|
||||
|
||||
inline bool is_outside_support_cone(const Vec3f &supp,
|
||||
const Vec3f &pt,
|
||||
float angle)
|
||||
{
|
||||
using namespace Slic3r;
|
||||
|
||||
Vec3d D = (pt - supp).cast<double>();
|
||||
double dot_sq = -D.z() * std::abs(-D.z());
|
||||
|
||||
return dot_sq <
|
||||
D.squaredNorm() * std::cos(angle) * std::abs(std::cos(angle));
|
||||
}
|
||||
|
||||
inline // TODO: should be in a cpp
|
||||
std::optional<Vec3f> find_merge_pt(const Vec3f &A,
|
||||
const Vec3f &B,
|
||||
float critical_angle)
|
||||
{
|
||||
// The idea is that A and B both have their support cones. But searching
|
||||
// for the intersection of these support cones is difficult and its enough
|
||||
// to reduce this problem to 2D and search for the intersection of two
|
||||
// rays that merge somewhere between A and B. The 2D plane is a vertical
|
||||
// slice of the 3D scene where the 2D Y axis is equal to the 3D Z axis and
|
||||
// the 2D X axis is determined by the XY direction of the AB vector.
|
||||
//
|
||||
// Z^
|
||||
// | A *
|
||||
// | . . B *
|
||||
// | . . . .
|
||||
// | . . . .
|
||||
// | . x .
|
||||
// -------------------> XY
|
||||
|
||||
// Determine the transformation matrix for the 2D projection:
|
||||
Vec3f diff = {B.x() - A.x(), B.y() - A.y(), 0.f};
|
||||
Vec3f dir = diff.normalized(); // TODO: avoid normalization
|
||||
|
||||
Eigen::Matrix<float, 2, 3> tr2D;
|
||||
tr2D.row(0) = Vec3f{dir.x(), dir.y(), dir.z()};
|
||||
tr2D.row(1) = Vec3f{0.f, 0.f, 1.f};
|
||||
|
||||
// Transform the 2 vectors A and B into 2D vector 'a' and 'b'. Here we can
|
||||
// omit 'a', pretend that its the origin and use BA as the vector b.
|
||||
Vec2f b = tr2D * (B - A);
|
||||
|
||||
// Get the square sine of the ray emanating from 'a' towards 'b'. This ray might
|
||||
// exceed the allowed angle but that is corrected subsequently.
|
||||
// The sign of the original sine is also needed, hence b.y is multiplied by
|
||||
// abs(b.y)
|
||||
float b_sqn = b.squaredNorm();
|
||||
float sin2sig_a = b_sqn > EPSILON ? (b.y() * std::abs(b.y())) / b_sqn : 0.f;
|
||||
|
||||
// sine2 from 'b' to 'a' is the opposite of sine2 from a to b
|
||||
float sin2sig_b = -sin2sig_a;
|
||||
|
||||
// Derive the allowed angles from the given critical angle.
|
||||
// critical_angle is measured from the horizontal X axis.
|
||||
// The rays need to go downwards which corresponds to negative angles
|
||||
|
||||
float sincrit = std::sin(critical_angle); // sine of the critical angle
|
||||
float sin2crit = -sincrit * sincrit; // signed sine squared
|
||||
sin2sig_a = std::min(sin2sig_a, sin2crit); // Do the angle saturation of both rays
|
||||
sin2sig_b = std::min(sin2sig_b, sin2crit); //
|
||||
float sin2_a = std::abs(sin2sig_a); // Get cosine squared values
|
||||
float sin2_b = std::abs(sin2sig_b);
|
||||
float cos2_a = 1.f - sin2_a;
|
||||
float cos2_b = 1.f - sin2_b;
|
||||
|
||||
// Derive the new direction vectors. This is by square rooting the sin2
|
||||
// and cos2 values and restoring the original signs
|
||||
Vec2f Da = {std::copysign(std::sqrt(cos2_a), b.x()), std::copysign(std::sqrt(sin2_a), sin2sig_a)};
|
||||
Vec2f Db = {-std::copysign(std::sqrt(cos2_b), b.x()), std::copysign(std::sqrt(sin2_b), sin2sig_b)};
|
||||
|
||||
// Determine where two rays ([0, 0], Da), (b, Db) intersect.
|
||||
// Based on
|
||||
// https://stackoverflow.com/questions/27459080/given-two-points-and-two-direction-vectors-find-the-point-where-they-intersect
|
||||
// One ray is emanating from (0, 0) so the formula is simplified
|
||||
double t1 = (Db.y() * b.x() - b.y() * Db.x()) /
|
||||
(Da.x() * Db.y() - Da.y() * Db.x());
|
||||
|
||||
Vec2f mp = t1 * Da;
|
||||
Vec3f Mp = A + tr2D.transpose() * mp;
|
||||
|
||||
return t1 >= 0.f ? Mp : Vec3f{};
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // SLASUPPORTTREEUTILS_H
|
||||
301
src/libslic3r/SLA/SupportTreeUtilsLegacy.hpp
Normal file
301
src/libslic3r/SLA/SupportTreeUtilsLegacy.hpp
Normal file
@@ -0,0 +1,301 @@
|
||||
#ifndef SUPPORTTREEUTILSLEGACY_HPP
|
||||
#define SUPPORTTREEUTILSLEGACY_HPP
|
||||
|
||||
#include "SupportTreeUtils.hpp"
|
||||
|
||||
// Old functions are gathered here that are used in DefaultSupportTree
|
||||
// to maintain functionality that was well tested.
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
// Helper function for pillar interconnection where pairs of already connected
|
||||
// pillars should be checked for not to be processed again. This can be done
|
||||
// in constant time with a set of hash values uniquely representing a pair of
|
||||
// integers. The order of numbers within the pair should not matter, it has
|
||||
// the same unique hash. The hash value has to have twice as many bits as the
|
||||
// arguments need. If the same integral type is used for args and return val,
|
||||
// make sure the arguments use only the half of the type's bit depth.
|
||||
template<class I, class DoubleI = IntegerOnly<I>>
|
||||
IntegerOnly<DoubleI> pairhash(I a, I b)
|
||||
{
|
||||
using std::ceil; using std::log2; using std::max; using std::min;
|
||||
static const auto constexpr Ibits = int(sizeof(I) * CHAR_BIT);
|
||||
static const auto constexpr DoubleIbits = int(sizeof(DoubleI) * CHAR_BIT);
|
||||
static const auto constexpr shift = DoubleIbits / 2 < Ibits ? Ibits / 2 : Ibits;
|
||||
|
||||
I g = min(a, b), l = max(a, b);
|
||||
|
||||
// Assume the hash will fit into the output variable
|
||||
assert((g ? (ceil(log2(g))) : 0) <= shift);
|
||||
assert((l ? (ceil(log2(l))) : 0) <= shift);
|
||||
|
||||
return (DoubleI(g) << shift) + l;
|
||||
}
|
||||
|
||||
template<class Ex>
|
||||
std::optional<DiffBridge> search_widening_path(Ex policy,
|
||||
const SupportableMesh &sm,
|
||||
const Vec3d &jp,
|
||||
const Vec3d &dir,
|
||||
double radius,
|
||||
double new_radius)
|
||||
{
|
||||
double w = radius + 2 * sm.cfg.head_back_radius_mm;
|
||||
double stopval = w + jp.z() - ground_level(sm);
|
||||
Optimizer<AlgNLoptSubplex> solver(get_criteria(sm.cfg).stop_score(stopval));
|
||||
|
||||
auto [polar, azimuth] = dir_to_spheric(dir);
|
||||
|
||||
double fallback_ratio = radius / sm.cfg.head_back_radius_mm;
|
||||
|
||||
auto oresult = solver.to_max().optimize(
|
||||
[&policy, &sm, jp, radius, new_radius](const opt::Input<3> &input) {
|
||||
auto &[plr, azm, t] = input;
|
||||
|
||||
auto d = spheric_to_dir(plr, azm).normalized();
|
||||
|
||||
auto sd = sm.cfg.safety_distance(new_radius);
|
||||
|
||||
double ret = pinhead_mesh_hit(policy, sm.emesh, jp, d, radius,
|
||||
new_radius, t, sd)
|
||||
.distance();
|
||||
|
||||
Beam beam{jp + t * d, d, new_radius};
|
||||
double down = beam_mesh_hit(policy, sm.emesh, beam, sd).distance();
|
||||
|
||||
if (ret > t && std::isinf(down))
|
||||
ret += jp.z() - ground_level(sm);
|
||||
|
||||
return ret;
|
||||
},
|
||||
initvals({polar, azimuth, w}), // start with what we have
|
||||
bounds({
|
||||
{PI - sm.cfg.bridge_slope, PI}, // Must not exceed the slope limit
|
||||
{-PI, PI}, // azimuth can be a full search
|
||||
{radius + sm.cfg.head_back_radius_mm,
|
||||
fallback_ratio * sm.cfg.max_bridge_length_mm}
|
||||
}));
|
||||
|
||||
if (oresult.score >= stopval) {
|
||||
polar = std::get<0>(oresult.optimum);
|
||||
azimuth = std::get<1>(oresult.optimum);
|
||||
double t = std::get<2>(oresult.optimum);
|
||||
Vec3d endp = jp + t * spheric_to_dir(polar, azimuth);
|
||||
|
||||
return DiffBridge(jp, endp, radius, sm.cfg.head_back_radius_mm);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
// This is a proxy function for pillar creation which will mind the gap
|
||||
// between the pad and the model bottom in zero elevation mode.
|
||||
// 'pinhead_junctionpt' is the starting junction point which needs to be
|
||||
// routed down. sourcedir is the allowed direction of an optional bridge
|
||||
// between the jp junction and the final pillar.
|
||||
template<class Ex>
|
||||
std::pair<bool, long> create_ground_pillar(
|
||||
Ex policy,
|
||||
SupportTreeBuilder &builder,
|
||||
const SupportableMesh &sm,
|
||||
const Vec3d &pinhead_junctionpt,
|
||||
const Vec3d &sourcedir,
|
||||
double radius,
|
||||
double end_radius,
|
||||
long head_id = SupportTreeNode::ID_UNSET)
|
||||
{
|
||||
Vec3d jp = pinhead_junctionpt, endp = jp, dir = sourcedir;
|
||||
long pillar_id = SupportTreeNode::ID_UNSET;
|
||||
bool can_add_base = false, non_head = false;
|
||||
|
||||
double gndlvl = 0.; // The Z level where pedestals should be
|
||||
double jp_gnd = 0.; // The lowest Z where a junction center can be
|
||||
double gap_dist = 0.; // The gap distance between the model and the pad
|
||||
|
||||
double r2 = radius + (end_radius - radius) / (jp.z() - ground_level(sm));
|
||||
|
||||
auto to_floor = [&gndlvl](const Vec3d &p) { return Vec3d{p.x(), p.y(), gndlvl}; };
|
||||
|
||||
auto eval_limits = [&sm, &radius, &can_add_base, &gndlvl, &gap_dist, &jp_gnd]
|
||||
(bool base_en = true)
|
||||
{
|
||||
can_add_base = base_en && radius >= sm.cfg.head_back_radius_mm;
|
||||
double base_r = can_add_base ? sm.cfg.base_radius_mm : 0.;
|
||||
gndlvl = ground_level(sm);
|
||||
if (!can_add_base) gndlvl -= sm.pad_cfg.wall_thickness_mm;
|
||||
jp_gnd = gndlvl + (can_add_base ? 0. : sm.cfg.head_back_radius_mm);
|
||||
gap_dist = sm.cfg.pillar_base_safety_distance_mm + base_r + EPSILON;
|
||||
};
|
||||
|
||||
eval_limits();
|
||||
|
||||
// We are dealing with a mini pillar that's potentially too long
|
||||
if (radius < sm.cfg.head_back_radius_mm && jp.z() - gndlvl > 20 * radius)
|
||||
{
|
||||
std::optional<DiffBridge> diffbr =
|
||||
search_widening_path(policy, sm, jp, dir, radius,
|
||||
sm.cfg.head_back_radius_mm);
|
||||
|
||||
if (diffbr && diffbr->endp.z() > jp_gnd) {
|
||||
auto &br = builder.add_diffbridge(*diffbr);
|
||||
if (head_id >= 0) builder.head(head_id).bridge_id = br.id;
|
||||
endp = diffbr->endp;
|
||||
radius = diffbr->end_r;
|
||||
end_radius = diffbr->end_r;
|
||||
builder.add_junction(endp, radius);
|
||||
non_head = true;
|
||||
dir = diffbr->get_dir();
|
||||
eval_limits();
|
||||
} else return {false, pillar_id};
|
||||
}
|
||||
|
||||
if (sm.cfg.object_elevation_mm < EPSILON)
|
||||
{
|
||||
// get a suitable direction for the corrector bridge. It is the
|
||||
// original sourcedir's azimuth but the polar angle is saturated to the
|
||||
// configured bridge slope.
|
||||
auto [polar, azimuth] = dir_to_spheric(dir);
|
||||
polar = PI - sm.cfg.bridge_slope;
|
||||
Vec3d d = spheric_to_dir(polar, azimuth).normalized();
|
||||
auto sd = radius * sm.cfg.safety_distance_mm / sm.cfg.head_back_radius_mm;
|
||||
double t = beam_mesh_hit(policy, sm.emesh, Beam{endp, d, radius, r2}, sd).distance();
|
||||
double tmax = std::min(sm.cfg.max_bridge_length_mm, t);
|
||||
t = 0.;
|
||||
|
||||
double zd = endp.z() - jp_gnd;
|
||||
double tmax2 = zd / std::sqrt(1 - sm.cfg.bridge_slope * sm.cfg.bridge_slope);
|
||||
tmax = std::min(tmax, tmax2);
|
||||
|
||||
Vec3d nexp = endp;
|
||||
double dlast = 0.;
|
||||
while (((dlast = std::sqrt(sm.emesh.squared_distance(to_floor(nexp)))) < gap_dist ||
|
||||
!std::isinf(beam_mesh_hit(policy, sm.emesh, Beam{nexp, DOWN, radius, r2}, sd).distance())) &&
|
||||
t < tmax)
|
||||
{
|
||||
t += radius;
|
||||
nexp = endp + t * d;
|
||||
}
|
||||
|
||||
if (dlast < gap_dist && can_add_base) {
|
||||
nexp = endp;
|
||||
t = 0.;
|
||||
can_add_base = false;
|
||||
eval_limits(can_add_base);
|
||||
|
||||
zd = endp.z() - jp_gnd;
|
||||
tmax2 = zd / std::sqrt(1 - sm.cfg.bridge_slope * sm.cfg.bridge_slope);
|
||||
tmax = std::min(tmax, tmax2);
|
||||
|
||||
while (((dlast = std::sqrt(sm.emesh.squared_distance(to_floor(nexp)))) < gap_dist ||
|
||||
!std::isinf(beam_mesh_hit(policy, sm.emesh, Beam{nexp, DOWN, radius}, sd).distance())) && t < tmax) {
|
||||
t += radius;
|
||||
nexp = endp + t * d;
|
||||
}
|
||||
}
|
||||
|
||||
// Could not find a path to avoid the pad gap
|
||||
if (dlast < gap_dist) return {false, pillar_id};
|
||||
|
||||
if (t > 0.) { // Need to make additional bridge
|
||||
const Bridge& br = builder.add_bridge(endp, nexp, radius);
|
||||
if (head_id >= 0) builder.head(head_id).bridge_id = br.id;
|
||||
|
||||
builder.add_junction(nexp, radius);
|
||||
endp = nexp;
|
||||
non_head = true;
|
||||
}
|
||||
}
|
||||
|
||||
Vec3d gp = to_floor(endp);
|
||||
double h = endp.z() - gp.z();
|
||||
|
||||
pillar_id = head_id >= 0 && !non_head ? builder.add_pillar(head_id, h) :
|
||||
builder.add_pillar(gp, h, radius, end_radius);
|
||||
|
||||
if (can_add_base)
|
||||
builder.add_pillar_base(pillar_id, sm.cfg.base_height_mm,
|
||||
sm.cfg.base_radius_mm);
|
||||
|
||||
return {true, pillar_id};
|
||||
}
|
||||
|
||||
template<class Ex>
|
||||
std::pair<bool, long> connect_to_ground(Ex policy,
|
||||
SupportTreeBuilder &builder,
|
||||
const SupportableMesh &sm,
|
||||
const Junction &j,
|
||||
const Vec3d &dir,
|
||||
double end_r)
|
||||
{
|
||||
auto hjp = j.pos;
|
||||
double r = j.r;
|
||||
auto sd = r * sm.cfg.safety_distance_mm / sm.cfg.head_back_radius_mm;
|
||||
double r2 = j.r + (end_r - j.r) / (j.pos.z() - ground_level(sm));
|
||||
|
||||
double t = beam_mesh_hit(policy, sm.emesh, Beam{hjp, dir, r, r2}, sd).distance();
|
||||
double d = 0, tdown = 0;
|
||||
t = std::min(t, sm.cfg.max_bridge_length_mm * r / sm.cfg.head_back_radius_mm);
|
||||
|
||||
while (d < t &&
|
||||
!std::isinf(tdown = beam_mesh_hit(policy, sm.emesh,
|
||||
Beam{hjp + d * dir, DOWN, r, r2}, sd)
|
||||
.distance())) {
|
||||
d += r;
|
||||
}
|
||||
|
||||
if(!std::isinf(tdown))
|
||||
return {false, SupportTreeNode::ID_UNSET};
|
||||
|
||||
Vec3d endp = hjp + d * dir;
|
||||
auto ret = create_ground_pillar(policy, builder, sm, endp, dir, r, end_r);
|
||||
|
||||
if (ret.second >= 0) {
|
||||
builder.add_bridge(hjp, endp, r);
|
||||
builder.add_junction(endp, r);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<class Ex>
|
||||
std::pair<bool, long> search_ground_route(Ex policy,
|
||||
SupportTreeBuilder &builder,
|
||||
const SupportableMesh &sm,
|
||||
const Junction &j,
|
||||
double end_radius,
|
||||
const Vec3d &init_dir = DOWN)
|
||||
{
|
||||
double downdst = j.pos.z() - ground_level(sm);
|
||||
|
||||
auto res = connect_to_ground(policy, builder, sm, j, init_dir, end_radius);
|
||||
if (res.first)
|
||||
return res;
|
||||
|
||||
// Optimize bridge direction:
|
||||
// Straight path failed so we will try to search for a suitable
|
||||
// direction out of the cavity.
|
||||
auto [polar, azimuth] = dir_to_spheric(init_dir);
|
||||
|
||||
Optimizer<AlgNLoptGenetic> solver(get_criteria(sm.cfg).stop_score(1e6));
|
||||
solver.seed(0); // we want deterministic behavior
|
||||
|
||||
auto sd = j.r * sm.cfg.safety_distance_mm / sm.cfg.head_back_radius_mm;
|
||||
auto oresult = solver.to_max().optimize(
|
||||
[&j, sd, &policy, &sm, &downdst, &end_radius](const opt::Input<2> &input) {
|
||||
auto &[plr, azm] = input;
|
||||
Vec3d n = spheric_to_dir(plr, azm).normalized();
|
||||
Beam beam{Ball{j.pos, j.r}, Ball{j.pos + downdst * n, end_radius}};
|
||||
return beam_mesh_hit(policy, sm.emesh, beam, sd).distance();
|
||||
},
|
||||
initvals({polar, azimuth}), // let's start with what we have
|
||||
bounds({ {PI - sm.cfg.bridge_slope, PI}, {-PI, PI} })
|
||||
);
|
||||
|
||||
Vec3d bridgedir = spheric_to_dir(oresult.optimum).normalized();
|
||||
|
||||
return connect_to_ground(policy, builder, sm, j, bridgedir, end_radius);
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|
||||
|
||||
#endif // SUPPORTTREEUTILSLEGACY_HPP
|
||||
Reference in New Issue
Block a user