summaryrefslogtreecommitdiffstatshomepage
path: root/src/lib/netlist/solver
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/netlist/solver')
-rw-r--r--src/lib/netlist/solver/mat_cr.h175
-rw-r--r--src/lib/netlist/solver/nld_matrix_solver.cpp937
-rw-r--r--src/lib/netlist/solver/nld_matrix_solver.h471
-rw-r--r--src/lib/netlist/solver/nld_ms_direct.h386
-rw-r--r--src/lib/netlist/solver/nld_ms_direct1.h50
-rw-r--r--src/lib/netlist/solver/nld_ms_direct2.h59
-rw-r--r--src/lib/netlist/solver/nld_ms_direct_lu.h26
-rw-r--r--src/lib/netlist/solver/nld_ms_gcr.h562
-rw-r--r--src/lib/netlist/solver/nld_ms_gmres.h412
-rw-r--r--src/lib/netlist/solver/nld_ms_sm.h416
-rw-r--r--src/lib/netlist/solver/nld_ms_sor.h112
-rw-r--r--src/lib/netlist/solver/nld_ms_sor_mat.h338
-rw-r--r--src/lib/netlist/solver/nld_ms_w.h165
-rw-r--r--src/lib/netlist/solver/nld_solver.cpp641
-rw-r--r--src/lib/netlist/solver/nld_solver.h178
-rw-r--r--src/lib/netlist/solver/vector_base.h144
16 files changed, 2231 insertions, 2841 deletions
diff --git a/src/lib/netlist/solver/mat_cr.h b/src/lib/netlist/solver/mat_cr.h
deleted file mode 100644
index 8693c49c3e1..00000000000
--- a/src/lib/netlist/solver/mat_cr.h
+++ /dev/null
@@ -1,175 +0,0 @@
-// license:GPL-2.0+
-// copyright-holders:Couriersud
-/*
- * mat_cr.h
- *
- * Compressed row format matrices
- *
- */
-
-#ifndef MAT_CR_H_
-#define MAT_CR_H_
-
-#include <algorithm>
-#include "../plib/pconfig.h"
-#include "../plib/palloc.h"
-
-template<std::size_t N, typename C = uint16_t, typename T = double>
-struct mat_cr_t
-{
- typedef C index_type;
- typedef T value_type;
-
- C diag[N]; // diagonal index pointer n
- C ia[N+1]; // row index pointer n + 1
- C ja[N*N]; // column index array nz_num, initially (n * n)
- T A[N*N]; // Matrix elements nz_num, initially (n * n)
-
- std::size_t size;
- std::size_t nz_num;
-
- explicit mat_cr_t(const std::size_t n)
- : size(n)
- , nz_num(0)
- {
-#if 0
-#if 0
- ia = plib::palloc_array<C>(n + 1);
- ja = plib::palloc_array<C>(n * n);
- diag = plib::palloc_array<C>(n);
-#else
- diag = plib::palloc_array<C>(n + (n + 1) + n * n);
- ia = diag + n;
- ja = ia + (n+1);
- A = plib::palloc_array<T>(n * n);
-#endif
-#endif
- }
-
- ~mat_cr_t()
- {
-#if 0
- plib::pfree_array(diag);
-#if 0
- plib::pfree_array(ia);
- plib::pfree_array(ja);
-#endif
- plib::pfree_array(A);
-#endif
- }
-
- void set_scalar(const T scalar)
- {
- for (std::size_t i=0, e=nz_num; i<e; i++)
- A[i] = scalar;
- }
-
- void mult_vec(const T * RESTRICT x, T * RESTRICT res)
- {
- /*
- * res = A * x
- */
-
- std::size_t i = 0;
- std::size_t k = 0;
- const std::size_t oe = nz_num;
-
- while (k < oe)
- {
- T tmp = 0.0;
- const std::size_t e = ia[i+1];
- for (; k < e; k++)
- tmp += A[k] * x[ja[k]];
- res[i++] = tmp;
- }
- }
-
- void incomplete_LU_factorization(T * RESTRICT LU)
- {
- /*
- * incomplete LU Factorization according to http://de.wikipedia.org/wiki/ILU-Zerlegung
- *
- * Result is stored in matrix LU
- *
- */
-
- const std::size_t lnz = nz_num;
-
- for (std::size_t k = 0; k < lnz; k++)
- LU[k] = A[k];
-
- for (std::size_t i = 1; ia[i] < lnz; i++) // row i
- {
- const std::size_t iai1 = ia[i + 1];
- const std::size_t pke = diag[i];
- for (std::size_t pk = ia[i]; pk < pke; pk++) // all columns left of diag in row i
- {
- // pk == (i, k)
- const std::size_t k = ja[pk];
- const std::size_t iak1 = ia[k + 1];
- const T LUpk = LU[pk] = LU[pk] / LU[diag[k]];
-
- std::size_t pt = ia[k];
-
- for (std::size_t pj = pk + 1; pj < iai1; pj++) // pj = (i, j)
- {
- // we can assume that within a row ja increases continuously */
- const std::size_t ej = ja[pj];
- while (ja[pt] < ej && pt < iak1)
- pt++;
- if (pt < iak1 && ja[pt] == ej)
- LU[pj] = LU[pj] - LUpk * LU[pt];
- }
- }
- }
- }
-
- void solveLUx (const T * RESTRICT LU, T * RESTRICT r)
- {
- /*
- * Solve a linear equation Ax = r
- * where
- * A = L*U
- *
- * L unit lower triangular
- * U upper triangular
- *
- * ==> LUx = r
- *
- * ==> Ux = L⁻¹ r = w
- *
- * ==> r = Lw
- *
- * This can be solved for w using backwards elimination in L.
- *
- * Now Ux = w
- *
- * This can be solved for x using backwards elimination in U.
- *
- */
-
- for (std::size_t i = 1; ia[i] < nz_num; ++i )
- {
- T tmp = 0.0;
- const std::size_t j1 = ia[i];
- const std::size_t j2 = diag[i];
-
- for (std::size_t j = j1; j < j2; ++j )
- tmp += LU[j] * r[ja[j]];
-
- r[i] -= tmp;
- }
- // i now is equal to n;
- for (std::size_t i = size; i-- > 0; )
- {
- T tmp = 0.0;
- const std::size_t di = diag[i];
- const std::size_t j2 = ia[i+1];
- for (std::size_t j = di + 1; j < j2; j++ )
- tmp += LU[j] * r[ja[j]];
- r[i] = (r[i] - tmp) / LU[di];
- }
- }
-};
-
-#endif /* MAT_CR_H_ */
diff --git a/src/lib/netlist/solver/nld_matrix_solver.cpp b/src/lib/netlist/solver/nld_matrix_solver.cpp
index 87c9b46ba21..1fe14b0f5e1 100644
--- a/src/lib/netlist/solver/nld_matrix_solver.cpp
+++ b/src/lib/netlist/solver/nld_matrix_solver.cpp
@@ -6,552 +6,623 @@
*/
#include "nld_matrix_solver.h"
-#include "../plib/putil.h"
+#include "plib/putil.h"
#include <cmath> // <<= needed by windows build
namespace netlist
{
- namespace devices
- {
-
-proxied_analog_output_t::~proxied_analog_output_t()
+namespace devices
{
-}
-terms_for_net_t::terms_for_net_t()
- : m_railstart(0)
- , m_last_V(0.0)
- , m_DD_n_m_1(0.0)
- , m_h_n_m_1(1e-9)
-{
-}
+ terms_for_net_t::terms_for_net_t()
+ : m_railstart(0)
+ , m_last_V(0.0)
+ , m_DD_n_m_1(0.0)
+ , m_h_n_m_1(1e-9)
+ {
+ }
-void terms_for_net_t::clear()
-{
- m_terms.clear();
- m_connected_net_idx.clear();
- m_gt.clear();
- m_go.clear();
- m_Idr.clear();
- m_connected_net_V.clear();
-}
-
-void terms_for_net_t::add(terminal_t *term, int net_other, bool sorted)
-{
- if (sorted)
- for (unsigned i=0; i < m_connected_net_idx.size(); i++)
- {
- if (m_connected_net_idx[i] > net_other)
+ void terms_for_net_t::add(terminal_t *term, int net_other, bool sorted)
+ {
+ if (sorted)
+ for (std::size_t i=0; i < m_connected_net_idx.size(); i++)
{
- plib::container::insert_at(m_terms, i, term);
- plib::container::insert_at(m_connected_net_idx, i, net_other);
- plib::container::insert_at(m_gt, i, 0.0);
- plib::container::insert_at(m_go, i, 0.0);
- plib::container::insert_at(m_Idr, i, 0.0);
- plib::container::insert_at(m_connected_net_V, i, nullptr);
- return;
+ if (m_connected_net_idx[i] > net_other)
+ {
+ plib::container::insert_at(m_terms, i, term);
+ plib::container::insert_at(m_connected_net_idx, i, net_other);
+ return;
+ }
}
- }
- m_terms.push_back(term);
- m_connected_net_idx.push_back(net_other);
- m_gt.push_back(0.0);
- m_go.push_back(0.0);
- m_Idr.push_back(0.0);
- m_connected_net_V.push_back(nullptr);
-}
-
-void terms_for_net_t::set_pointers()
-{
- for (unsigned i = 0; i < count(); i++)
+ m_terms.push_back(term);
+ m_connected_net_idx.push_back(net_other);
+ }
+
+ // ----------------------------------------------------------------------------------------
+ // matrix_solver
+ // ----------------------------------------------------------------------------------------
+
+ matrix_solver_t::matrix_solver_t(netlist_state_t &anetlist, const pstring &name,
+ const eSortType sort, const solver_parameters_t *params)
+ : device_t(anetlist, name)
+ , m_params(*params)
+ , m_stat_calculations(*this, "m_stat_calculations", 0)
+ , m_stat_newton_raphson(*this, "m_stat_newton_raphson", 0)
+ , m_stat_vsolver_calls(*this, "m_stat_vsolver_calls", 0)
+ , m_iterative_fail(*this, "m_iterative_fail", 0)
+ , m_iterative_total(*this, "m_iterative_total", 0)
+ , m_last_step(*this, "m_last_step", netlist_time::zero())
+ , m_fb_sync(*this, "FB_sync")
+ , m_Q_sync(*this, "Q_sync")
+ , m_ops(0)
+ , m_sort(sort)
{
- m_terms[i]->set_ptrs(&m_gt[i], &m_go[i], &m_Idr[i]);
- m_connected_net_V[i] = m_terms[i]->m_otherterm->net().Q_Analog_state_ptr();
+ connect_post_start(m_fb_sync, m_Q_sync);
}
-}
-
-// ----------------------------------------------------------------------------------------
-// matrix_solver
-// ----------------------------------------------------------------------------------------
-
-matrix_solver_t::matrix_solver_t(netlist_t &anetlist, const pstring &name,
- const eSortType sort, const solver_parameters_t *params)
- : device_t(anetlist, name)
- , m_params(*params)
- , m_stat_calculations(*this, "m_stat_calculations", 0)
- , m_stat_newton_raphson(*this, "m_stat_newton_raphson", 0)
- , m_stat_vsolver_calls(*this, "m_stat_vsolver_calls", 0)
- , m_iterative_fail(*this, "m_iterative_fail", 0)
- , m_iterative_total(*this, "m_iterative_total", 0)
- , m_last_step(*this, "m_last_step", netlist_time::zero())
- , m_fb_sync(*this, "FB_sync")
- , m_Q_sync(*this, "Q_sync")
- , m_ops(0)
- , m_sort(sort)
-{
- connect_post_start(m_fb_sync, m_Q_sync);
-}
-matrix_solver_t::~matrix_solver_t()
-{
-}
+ void matrix_solver_t::setup_base(analog_net_t::list_t &nets)
+ {
-void matrix_solver_t::setup_base(analog_net_t::list_t &nets)
-{
+ log().debug("New solver setup\n");
- log().debug("New solver setup\n");
+ m_nets.clear();
+ m_terms.clear();
- m_nets.clear();
- m_terms.clear();
+ for (auto & net : nets)
+ {
+ m_nets.push_back(net);
+ m_terms.push_back(plib::make_unique<terms_for_net_t>());
+ m_rails_temp.push_back(plib::make_unique<terms_for_net_t>());
+ }
- for (auto & net : nets)
- {
- m_nets.push_back(net);
- m_terms.push_back(plib::make_unique<terms_for_net_t>());
- m_rails_temp.push_back(plib::palloc<terms_for_net_t>());
- }
+ for (std::size_t k = 0; k < nets.size(); k++)
+ {
+ analog_net_t *net = nets[k];
- for (std::size_t k = 0; k < nets.size(); k++)
- {
- analog_net_t *net = nets[k];
+ log().debug("setting up net\n");
- log().debug("setting up net\n");
+ net->set_solver(this);
- net->set_solver(this);
+ for (auto &p : net->core_terms())
+ {
+ log().debug("{1} {2} {3}\n", p->name(), net->name(), net->isRailNet());
+ switch (p->type())
+ {
+ case detail::terminal_type::TERMINAL:
+ if (p->device().is_timestep())
+ if (!plib::container::contains(m_step_devices, &p->device()))
+ m_step_devices.push_back(&p->device());
+ if (p->device().is_dynamic())
+ if (!plib::container::contains(m_dynamic_devices, &p->device()))
+ m_dynamic_devices.push_back(&p->device());
+ {
+ auto *pterm = dynamic_cast<terminal_t *>(p);
+ add_term(k, pterm);
+ }
+ log().debug("Added terminal {1}\n", p->name());
+ break;
+ case detail::terminal_type::INPUT:
+ {
+ proxied_analog_output_t *net_proxy_output = nullptr;
+ for (auto & input : m_inps)
+ if (input->proxied_net() == &p->net())
+ {
+ net_proxy_output = input.get();
+ break;
+ }
+
+ if (net_proxy_output == nullptr)
+ {
+ pstring nname = this->name() + "." + pstring(plib::pfmt("m{1}")(m_inps.size()));
+ nl_assert(p->net().is_analog());
+ auto net_proxy_output_u = pool().make_poolptr<proxied_analog_output_t>(*this, nname, static_cast<analog_net_t *>(&p->net()));
+ net_proxy_output = net_proxy_output_u.get();
+ m_inps.push_back(std::move(net_proxy_output_u));
+ }
+ net_proxy_output->net().add_terminal(*p);
+ // FIXME: repeated calling - kind of brute force
+ net_proxy_output->net().rebuild_list();
+ log().debug("Added input\n");
+ }
+ break;
+ case detail::terminal_type::OUTPUT:
+ log().fatal(MF_1_UNHANDLED_ELEMENT_1_FOUND,
+ p->name());
+ break;
+ }
+ }
+ log().debug("added net with {1} populated connections\n", net->core_terms().size());
+ }
- for (auto &p : net->m_core_terms)
+ /* now setup the matrix */
+ setup_matrix();
+ }
+
+ void matrix_solver_t::sort_terms(eSortType sort)
+ {
+ /* Sort in descending order by number of connected matrix voltages.
+ * The idea is, that for Gauss-Seidel algo the first voltage computed
+ * depends on the greatest number of previous voltages thus taking into
+ * account the maximum amout of information.
+ *
+ * This actually improves performance on popeye slightly. Average
+ * GS computations reduce from 2.509 to 2.370
+ *
+ * Smallest to largest : 2.613
+ * Unsorted : 2.509
+ * Largest to smallest : 2.370
+ *
+ * Sorting as a general matrix pre-conditioning is mentioned in
+ * literature but I have found no articles about Gauss Seidel.
+ *
+ * For Gaussian Elimination however increasing order is better suited.
+ * NOTE: Even better would be to sort on elements right of the matrix diagonal.
+ *
+ */
+
+ const std::size_t iN = m_nets.size();
+
+ switch (sort)
{
- log().debug("{1} {2} {3}\n", p->name(), net->name(), net->isRailNet());
- switch (p->type())
- {
- case detail::terminal_type::TERMINAL:
- if (p->device().is_timestep())
- if (!plib::container::contains(m_step_devices, &p->device()))
- m_step_devices.push_back(&p->device());
- if (p->device().is_dynamic())
- if (!plib::container::contains(m_dynamic_devices, &p->device()))
- m_dynamic_devices.push_back(&p->device());
+ case PREFER_BAND_MATRIX:
+ {
+ for (std::size_t k = 0; k < iN - 1; k++)
{
- terminal_t *pterm = dynamic_cast<terminal_t *>(p);
- add_term(k, pterm);
+ auto pk = get_weight_around_diag(k,k);
+ for (std::size_t i = k+1; i < iN; i++)
+ {
+ auto pi = get_weight_around_diag(i,k);
+ if (pi < pk)
+ {
+ std::swap(m_terms[i], m_terms[k]);
+ std::swap(m_nets[i], m_nets[k]);
+ pk = get_weight_around_diag(k,k);
+ }
+ }
}
- log().debug("Added terminal {1}\n", p->name());
- break;
- case detail::terminal_type::INPUT:
+ }
+ break;
+ case PREFER_IDENTITY_TOP_LEFT:
+ {
+ for (std::size_t k = 0; k < iN - 1; k++)
{
- proxied_analog_output_t *net_proxy_output = nullptr;
- for (auto & input : m_inps)
- if (input->m_proxied_net == &p->net())
+ auto pk = get_left_right_of_diag(k,k);
+ for (std::size_t i = k+1; i < iN; i++)
+ {
+ auto pi = get_left_right_of_diag(i,k);
+ if (pi.first <= pk.first && pi.second >= pk.second)
{
- net_proxy_output = input.get();
- break;
+ std::swap(m_terms[i], m_terms[k]);
+ std::swap(m_nets[i], m_nets[k]);
+ pk = get_left_right_of_diag(k,k);
}
+ }
+ }
+ }
+ break;
+ case ASCENDING:
+ case DESCENDING:
+ {
+ int sort_order = (m_sort == DESCENDING ? 1 : -1);
- if (net_proxy_output == nullptr)
+ for (std::size_t k = 0; k < iN - 1; k++)
+ for (std::size_t i = k+1; i < iN; i++)
{
- pstring nname = this->name() + "." + pstring(plib::pfmt("m{1}")(m_inps.size()));
- auto net_proxy_output_u = plib::make_unique<proxied_analog_output_t>(*this, nname);
- net_proxy_output = net_proxy_output_u.get();
- m_inps.push_back(std::move(net_proxy_output_u));
- nl_assert(p->net().is_analog());
- net_proxy_output->m_proxied_net = static_cast<analog_net_t *>(&p->net());
+ if ((static_cast<int>(m_terms[k]->m_railstart) - static_cast<int>(m_terms[i]->m_railstart)) * sort_order < 0)
+ {
+ std::swap(m_terms[i], m_terms[k]);
+ std::swap(m_nets[i], m_nets[k]);
+ }
}
- net_proxy_output->net().add_terminal(*p);
- // FIXME: repeated calling - kind of brute force
- net_proxy_output->net().rebuild_list();
- log().debug("Added input\n");
- }
- break;
- case detail::terminal_type::OUTPUT:
- log().fatal(MF_1_UNHANDLED_ELEMENT_1_FOUND,
- p->name());
- break;
- }
+ }
+ break;
+ case NOSORT:
+ break;
+ }
+ /* rebuild */
+ for (auto &term : m_terms)
+ {
+ int *other = term->m_connected_net_idx.data();
+ for (std::size_t i = 0; i < term->count(); i++)
+ //FIXME: this is weird
+ if (other[i] != -1)
+ other[i] = get_net_idx(&term->terms()[i]->connected_terminal()->net());
}
- log().debug("added net with {1} populated connections\n", net->m_core_terms.size());
}
- /* now setup the matrix */
- setup_matrix();
-}
+ void matrix_solver_t::setup_matrix()
+ {
+ const std::size_t iN = m_nets.size();
-void matrix_solver_t::setup_matrix()
-{
- const std::size_t iN = m_nets.size();
+ for (std::size_t k = 0; k < iN; k++)
+ {
+ m_terms[k]->m_railstart = m_terms[k]->count();
+ for (std::size_t i = 0; i < m_rails_temp[k]->count(); i++)
+ this->m_terms[k]->add(m_rails_temp[k]->terms()[i], m_rails_temp[k]->m_connected_net_idx.data()[i], false);
+ }
- for (std::size_t k = 0; k < iN; k++)
- {
- m_terms[k]->m_railstart = m_terms[k]->count();
- for (std::size_t i = 0; i < m_rails_temp[k]->count(); i++)
- this->m_terms[k]->add(m_rails_temp[k]->terms()[i], m_rails_temp[k]->connected_net_idx()[i], false);
+ // free all - no longer needed
+ m_rails_temp.clear();
- m_terms[k]->set_pointers();
- }
+ sort_terms(m_sort);
- for (terms_for_net_t *rt : m_rails_temp)
- {
- rt->clear(); // no longer needed
- plib::pfree(rt); // no longer needed
- }
+ this->set_pointers();
- m_rails_temp.clear();
+ /* create a list of non zero elements. */
+ for (unsigned k = 0; k < iN; k++)
+ {
+ terms_for_net_t * t = m_terms[k].get();
+ /* pretty brutal */
+ int *other = t->m_connected_net_idx.data();
- /* Sort in descending order by number of connected matrix voltages.
- * The idea is, that for Gauss-Seidel algo the first voltage computed
- * depends on the greatest number of previous voltages thus taking into
- * account the maximum amout of information.
- *
- * This actually improves performance on popeye slightly. Average
- * GS computations reduce from 2.509 to 2.370
- *
- * Smallest to largest : 2.613
- * Unsorted : 2.509
- * Largest to smallest : 2.370
- *
- * Sorting as a general matrix pre-conditioning is mentioned in
- * literature but I have found no articles about Gauss Seidel.
- *
- * For Gaussian Elimination however increasing order is better suited.
- * NOTE: Even better would be to sort on elements right of the matrix diagonal.
- *
- */
+ t->m_nz.clear();
- if (m_sort != NOSORT)
- {
- int sort_order = (m_sort == DESCENDING ? 1 : -1);
+ for (std::size_t i = 0; i < t->m_railstart; i++)
+ if (!plib::container::contains(t->m_nz, static_cast<unsigned>(other[i])))
+ t->m_nz.push_back(static_cast<unsigned>(other[i]));
+
+ t->m_nz.push_back(k); // add diagonal
+
+ /* and sort */
+ std::sort(t->m_nz.begin(), t->m_nz.end());
+ }
+
+ /* create a list of non zero elements right of the diagonal
+ * These list anticipate the population of array elements by
+ * Gaussian elimination.
+ */
+ for (std::size_t k = 0; k < iN; k++)
+ {
+ terms_for_net_t * t = m_terms[k].get();
+ /* pretty brutal */
+ int *other = t->m_connected_net_idx.data();
- for (unsigned k = 0; k < iN - 1; k++)
- for (unsigned i = k+1; i < iN; i++)
+ if (k==0)
+ t->m_nzrd.clear();
+ else
{
- if ((static_cast<int>(m_terms[k]->m_railstart) - static_cast<int>(m_terms[i]->m_railstart)) * sort_order < 0)
+ t->m_nzrd = m_terms[k-1]->m_nzrd;
+ for (auto j = t->m_nzrd.begin(); j != t->m_nzrd.end(); )
{
- std::swap(m_terms[i], m_terms[k]);
- std::swap(m_nets[i], m_nets[k]);
+ if (*j < k + 1)
+ j = t->m_nzrd.erase(j);
+ else
+ ++j;
}
}
- for (auto &term : m_terms)
- {
- int *other = term->connected_net_idx();
- for (unsigned i = 0; i < term->count(); i++)
- if (other[i] != -1)
- other[i] = get_net_idx(&term->terms()[i]->m_otherterm->net());
- }
- }
+ for (std::size_t i = 0; i < t->m_railstart; i++)
+ if (!plib::container::contains(t->m_nzrd, static_cast<unsigned>(other[i])) && other[i] >= static_cast<int>(k + 1))
+ t->m_nzrd.push_back(static_cast<unsigned>(other[i]));
- /* create a list of non zero elements. */
- for (unsigned k = 0; k < iN; k++)
- {
- terms_for_net_t * t = m_terms[k].get();
- /* pretty brutal */
- int *other = t->connected_net_idx();
-
- t->m_nz.clear();
-
- for (unsigned i = 0; i < t->m_railstart; i++)
- if (!plib::container::contains(t->m_nz, static_cast<unsigned>(other[i])))
- t->m_nz.push_back(static_cast<unsigned>(other[i]));
+ /* and sort */
+ std::sort(t->m_nzrd.begin(), t->m_nzrd.end());
+ }
- t->m_nz.push_back(k); // add diagonal
+ /* create a list of non zero elements below diagonal k
+ * This should reduce cache misses ...
+ */
- /* and sort */
- std::sort(t->m_nz.begin(), t->m_nz.end());
- }
+ std::vector<std::vector<bool>> touched(iN, std::vector<bool>(iN));
- /* create a list of non zero elements right of the diagonal
- * These list anticipate the population of array elements by
- * Gaussian elimination.
- */
- for (unsigned k = 0; k < iN; k++)
- {
- terms_for_net_t * t = m_terms[k].get();
- /* pretty brutal */
- int *other = t->connected_net_idx();
+ for (std::size_t k = 0; k < iN; k++)
+ {
+ for (std::size_t j = 0; j < iN; j++)
+ touched[k][j] = false;
+ for (std::size_t j = 0; j < m_terms[k]->m_nz.size(); j++)
+ touched[k][m_terms[k]->m_nz[j]] = true;
+ }
- if (k==0)
- t->m_nzrd.clear();
- else
+ m_ops = 0;
+ for (unsigned k = 0; k < iN; k++)
{
- t->m_nzrd = m_terms[k-1]->m_nzrd;
- for (auto j = t->m_nzrd.begin(); j != t->m_nzrd.end(); )
+ m_ops++; // 1/A(k,k)
+ for (unsigned row = k + 1; row < iN; row++)
{
- if (*j < k + 1)
- j = t->m_nzrd.erase(j);
- else
- ++j;
+ if (touched[row][k])
+ {
+ m_ops++;
+ if (!plib::container::contains(m_terms[k]->m_nzbd, row))
+ m_terms[k]->m_nzbd.push_back(row);
+ for (std::size_t col = k + 1; col < iN; col++)
+ if (touched[k][col])
+ {
+ touched[row][col] = true;
+ m_ops += 2;
+ }
+ }
}
}
+ log().verbose("Number of mults/adds for {1}: {2}", name(), m_ops);
- for (unsigned i = 0; i < t->m_railstart; i++)
- if (!plib::container::contains(t->m_nzrd, static_cast<unsigned>(other[i])) && other[i] >= static_cast<int>(k + 1))
- t->m_nzrd.push_back(static_cast<unsigned>(other[i]));
+ if ((false))
+ for (std::size_t k = 0; k < iN; k++)
+ {
+ pstring line = plib::pfmt("{1:3}")(k);
+ for (const auto & nzrd : m_terms[k]->m_nzrd)
+ line += plib::pfmt(" {1:3}")(nzrd);
+ log().verbose("{1}", line);
+ }
- /* and sort */
- std::sort(t->m_nzrd.begin(), t->m_nzrd.end());
- }
+ /*
+ * save states
+ */
+ for (std::size_t k = 0; k < iN; k++)
+ {
+ pstring num = plib::pfmt("{1}")(k);
- /* create a list of non zero elements below diagonal k
- * This should reduce cache misses ...
- */
+ state().save(*this, m_terms[k]->m_last_V, this->name(), "lastV." + num);
+ state().save(*this, m_terms[k]->m_DD_n_m_1, this->name(), "m_DD_n_m_1." + num);
+ state().save(*this, m_terms[k]->m_h_n_m_1, this->name(), "m_h_n_m_1." + num);
- bool **touched = plib::palloc_array<bool *>(iN);
- for (unsigned k=0; k<iN; k++)
- touched[k] = plib::palloc_array<bool>(iN);
+ // FIXME: This shouldn't be necessary, recalculate on each entry ...
+ state().save(*this, m_gonn[k],"GO" + num, this->name(), m_terms[k]->count());
+ state().save(*this, m_gtn[k],"GT" + num, this->name(), m_terms[k]->count());
+ state().save(*this, m_Idrn[k],"IDR" + num, this->name(), m_terms[k]->count());
+ }
+ }
- for (unsigned k = 0; k < iN; k++)
+ void matrix_solver_t::update_inputs()
{
- for (unsigned j = 0; j < iN; j++)
- touched[k][j] = false;
- for (unsigned j = 0; j < m_terms[k]->m_nz.size(); j++)
- touched[k][m_terms[k]->m_nz[j]] = true;
+ // avoid recursive calls. Inputs are updated outside this call
+ for (auto &inp : m_inps)
+ inp->push(inp->proxied_net()->Q_Analog());
}
- m_ops = 0;
- for (unsigned k = 0; k < iN; k++)
+ void matrix_solver_t::update_dynamic()
{
- m_ops++; // 1/A(k,k)
- for (unsigned row = k + 1; row < iN; row++)
- {
- if (touched[row][k])
- {
- m_ops++;
- if (!plib::container::contains(m_terms[k]->m_nzbd, row))
- m_terms[k]->m_nzbd.push_back(row);
- for (unsigned col = k + 1; col < iN; col++)
- if (touched[k][col])
- {
- touched[row][col] = true;
- m_ops += 2;
- }
- }
- }
+ /* update all non-linear devices */
+ for (auto &dyn : m_dynamic_devices)
+ dyn->update_terminals();
}
- log().verbose("Number of mults/adds for {1}: {2}", name(), m_ops);
- if ((0))
- for (unsigned k = 0; k < iN; k++)
+ void matrix_solver_t::reset()
+ {
+ m_last_step = netlist_time::zero();
+ }
+
+ void matrix_solver_t::update() NL_NOEXCEPT
+ {
+ const netlist_time new_timestep = solve(exec().time());
+ update_inputs();
+
+ if (m_params.m_dynamic_ts && has_timestep_devices() && new_timestep > netlist_time::zero())
{
- pstring line = plib::pfmt("{1:3}")(k);
- for (unsigned j = 0; j < m_terms[k]->m_nzrd.size(); j++)
- line += plib::pfmt(" {1:3}")(m_terms[k]->m_nzrd[j]);
- log().verbose("{1}", line);
+ m_Q_sync.net().toggle_and_push_to_queue(new_timestep);
}
+ }
- /*
- * save states
+ /* update_forced is called from within param_update
+ *
+ * this should only occur outside of execution and thus
+ * using time should be safe.
+ *
*/
- for (unsigned k = 0; k < iN; k++)
+ void matrix_solver_t::update_forced()
{
- pstring num = plib::pfmt("{1}")(k);
+ const netlist_time new_timestep = solve(exec().time());
+ plib::unused_var(new_timestep);
- netlist().save(*this, m_terms[k]->m_last_V, "lastV." + num);
- netlist().save(*this, m_terms[k]->m_DD_n_m_1, "m_DD_n_m_1." + num);
- netlist().save(*this, m_terms[k]->m_h_n_m_1, "m_h_n_m_1." + num);
+ update_inputs();
- netlist().save(*this, m_terms[k]->go(),"GO" + num, m_terms[k]->count());
- netlist().save(*this, m_terms[k]->gt(),"GT" + num, m_terms[k]->count());
- netlist().save(*this, m_terms[k]->Idr(),"IDR" + num , m_terms[k]->count());
+ if (m_params.m_dynamic_ts && has_timestep_devices())
+ {
+ m_Q_sync.net().toggle_and_push_to_queue(netlist_time::from_double(m_params.m_min_timestep));
+ }
}
- for (unsigned k=0; k<iN; k++)
- plib::pfree_array(touched[k]);
- plib::pfree_array(touched);
-}
+ void matrix_solver_t::step(const netlist_time &delta)
+ {
+ const nl_double dd = delta.as_double();
+ for (auto &d : m_step_devices)
+ d->timestep(dd);
+ }
-void matrix_solver_t::update_inputs()
-{
- // avoid recursive calls. Inputs are updated outside this call
- for (auto &inp : m_inps)
- inp->push(inp->m_proxied_net->Q_Analog());
-}
+ void matrix_solver_t::solve_base()
+ {
+ ++m_stat_vsolver_calls;
+ if (has_dynamic_devices())
+ {
+ std::size_t this_resched;
+ std::size_t newton_loops = 0;
+ do
+ {
+ update_dynamic();
+ // Gauss-Seidel will revert to Gaussian elemination if steps exceeded.
+ this_resched = this->vsolve_non_dynamic(true);
+ newton_loops++;
+ } while (this_resched > 1 && newton_loops < m_params.m_nr_loops);
+
+ m_stat_newton_raphson += newton_loops;
+ // reschedule ....
+ if (this_resched > 1 && !m_Q_sync.net().is_queued())
+ {
+ log().warning(MW_1_NEWTON_LOOPS_EXCEEDED_ON_NET_1, this->name());
+ m_Q_sync.net().toggle_and_push_to_queue(m_params.m_nr_recalc_delay);
+ }
+ }
+ else
+ {
+ this->vsolve_non_dynamic(false);
+ }
+ }
-void matrix_solver_t::update_dynamic()
-{
- /* update all non-linear devices */
- for (auto &dyn : m_dynamic_devices)
- dyn->update_terminals();
-}
+ const netlist_time matrix_solver_t::solve(netlist_time now)
+ {
+ const netlist_time delta = now - m_last_step;
-void matrix_solver_t::reset()
-{
- m_last_step = netlist_time::zero();
-}
+ // We are already up to date. Avoid oscillations.
+ // FIXME: Make this a parameter!
+ if (delta < netlist_time::quantum())
+ return netlist_time::zero();
-void matrix_solver_t::update() NL_NOEXCEPT
-{
- const netlist_time new_timestep = solve();
- update_inputs();
+ /* update all terminals for new time step */
+ m_last_step = now;
+ step(delta);
+ solve_base();
+ const netlist_time next_time_step = compute_next_timestep(delta.as_double());
- if (m_params.m_dynamic_ts && has_timestep_devices() && new_timestep > netlist_time::zero())
- {
- m_Q_sync.net().toggle_and_push_to_queue(new_timestep);
+ return next_time_step;
}
-}
-
-void matrix_solver_t::update_forced()
-{
- ATTR_UNUSED const netlist_time new_timestep = solve();
- update_inputs();
- if (m_params.m_dynamic_ts && has_timestep_devices())
+ int matrix_solver_t::get_net_idx(detail::net_t *net)
{
- m_Q_sync.net().toggle_and_push_to_queue(netlist_time::from_double(m_params.m_min_timestep));
+ for (std::size_t k = 0; k < m_nets.size(); k++)
+ if (m_nets[k] == net)
+ return static_cast<int>(k);
+ return -1;
}
-}
-
-void matrix_solver_t::step(const netlist_time &delta)
-{
- const nl_double dd = delta.as_double();
- for (std::size_t k=0; k < m_step_devices.size(); k++)
- m_step_devices[k]->timestep(dd);
-}
-void matrix_solver_t::solve_base()
-{
- ++m_stat_vsolver_calls;
- if (has_dynamic_devices())
+ std::pair<int, int> matrix_solver_t::get_left_right_of_diag(std::size_t irow, std::size_t idiag)
{
- unsigned this_resched;
- unsigned newton_loops = 0;
- do
- {
- update_dynamic();
- // Gauss-Seidel will revert to Gaussian elemination if steps exceeded.
- this_resched = this->vsolve_non_dynamic(true);
- newton_loops++;
- } while (this_resched > 1 && newton_loops < m_params.m_nr_loops);
-
- m_stat_newton_raphson += newton_loops;
- // reschedule ....
- if (this_resched > 1 && !m_Q_sync.net().is_queued())
- {
- log().warning(MW_1_NEWTON_LOOPS_EXCEEDED_ON_NET_1, this->name());
- m_Q_sync.net().toggle_and_push_to_queue(m_params.m_nr_recalc_delay);
- }
- }
- else
- {
- this->vsolve_non_dynamic(false);
- }
-}
+ /*
+ * return the maximum column left of the diagonal (-1 if no cols found)
+ * return the minimum column right of the diagonal (999999 if no cols found)
+ */
-const netlist_time matrix_solver_t::solve()
-{
- const netlist_time now = netlist().time();
- const netlist_time delta = now - m_last_step;
+ const auto row = static_cast<int>(irow);
+ const auto diag = static_cast<int>(idiag);
- // We are already up to date. Avoid oscillations.
- // FIXME: Make this a parameter!
- if (delta < netlist_time::quantum())
- return netlist_time::zero();
+ int colmax = -1;
+ int colmin = 999999;
- /* update all terminals for new time step */
- m_last_step = now;
- step(delta);
- solve_base();
- const netlist_time next_time_step = compute_next_timestep(delta.as_double());
+ auto &term = m_terms[irow];
- return next_time_step;
-}
+ for (std::size_t i = 0; i < term->count(); i++)
+ {
+ auto col = get_net_idx(&term->terms()[i]->connected_terminal()->net());
+ if (col != -1)
+ {
+ if (col==row) col = diag;
+ else if (col==diag) col = row;
-int matrix_solver_t::get_net_idx(detail::net_t *net)
-{
- for (std::size_t k = 0; k < m_nets.size(); k++)
- if (m_nets[k] == net)
- return static_cast<int>(k);
- return -1;
-}
+ if (col > diag && col < colmin)
+ colmin = col;
+ else if (col < diag && col > colmax)
+ colmax = col;
+ }
+ }
+ return {colmax, colmin};
+ }
-void matrix_solver_t::add_term(std::size_t k, terminal_t *term)
-{
- if (term->m_otherterm->net().isRailNet())
+ double matrix_solver_t::get_weight_around_diag(std::size_t row, std::size_t diag)
{
- m_rails_temp[k]->add(term, -1, false);
+ {
+ /*
+ * return average absolute distance
+ */
+
+ std::vector<bool> touched(1024, false); // FIXME!
+
+ double weight = 0.0;
+ auto &term = m_terms[row];
+ for (std::size_t i = 0; i < term->count(); i++)
+ {
+ auto col = get_net_idx(&term->terms()[i]->connected_terminal()->net());
+ if (col >= 0)
+ {
+ auto colu = static_cast<std::size_t>(col);
+ if (!touched[colu])
+ {
+ if (colu==row) colu = static_cast<unsigned>(diag);
+ else if (colu==diag) colu = static_cast<unsigned>(row);
+
+ weight = weight + std::abs(static_cast<double>(colu) - static_cast<double>(diag));
+ touched[colu] = true;
+ }
+ }
+ }
+ return weight; // / static_cast<double>(term->m_railstart);
+ }
}
- else
+
+ void matrix_solver_t::add_term(std::size_t k, terminal_t *term)
{
- int ot = get_net_idx(&term->m_otherterm->net());
- if (ot>=0)
+ if (term->connected_terminal()->net().isRailNet())
{
- m_terms[k]->add(term, ot, true);
+ m_rails_temp[k]->add(term, -1, false);
}
- /* Should this be allowed ? */
- else // if (ot<0)
+ else
{
- m_rails_temp[k]->add(term, ot, true);
- log().fatal(MF_1_FOUND_TERM_WITH_MISSING_OTHERNET, term->name());
+ int ot = get_net_idx(&term->connected_terminal()->net());
+ if (ot>=0)
+ {
+ m_terms[k]->add(term, ot, true);
+ }
+ /* Should this be allowed ? */
+ else // if (ot<0)
+ {
+ m_rails_temp[k]->add(term, ot, true);
+ log().fatal(MF_1_FOUND_TERM_WITH_MISSING_OTHERNET, term->name());
+ }
}
}
-}
-netlist_time matrix_solver_t::compute_next_timestep(const double cur_ts)
-{
- nl_double new_solver_timestep = m_params.m_max_timestep;
-
- if (m_params.m_dynamic_ts)
+ netlist_time matrix_solver_t::compute_next_timestep(const double cur_ts)
{
- for (std::size_t k = 0, iN=m_terms.size(); k < iN; k++)
+ nl_double new_solver_timestep = m_params.m_max_timestep;
+
+ if (m_params.m_dynamic_ts)
{
- analog_net_t *n = m_nets[k];
- terms_for_net_t *t = m_terms[k].get();
+ for (std::size_t k = 0, iN=m_terms.size(); k < iN; k++)
+ {
+ analog_net_t *n = m_nets[k];
+ terms_for_net_t *t = m_terms[k].get();
- const nl_double DD_n = (n->Q_Analog() - t->m_last_V);
- const nl_double hn = cur_ts;
+ const nl_double DD_n = (n->Q_Analog() - t->m_last_V);
+ const nl_double hn = cur_ts;
- //printf("%f %f %f %f\n", DD_n, t->m_DD_n_m_1, hn, t->m_h_n_m_1);
- nl_double DD2 = (DD_n / hn - t->m_DD_n_m_1 / t->m_h_n_m_1) / (hn + t->m_h_n_m_1);
- nl_double new_net_timestep;
+ nl_double DD2 = (DD_n / hn - t->m_DD_n_m_1 / t->m_h_n_m_1) / (hn + t->m_h_n_m_1);
+ nl_double new_net_timestep;
- t->m_h_n_m_1 = hn;
- t->m_DD_n_m_1 = DD_n;
- if (std::fabs(DD2) > NL_FCONST(1e-60)) // avoid div-by-zero
- new_net_timestep = std::sqrt(m_params.m_dynamic_lte / std::fabs(NL_FCONST(0.5)*DD2));
- else
- new_net_timestep = m_params.m_max_timestep;
+ t->m_h_n_m_1 = hn;
+ t->m_DD_n_m_1 = DD_n;
+ if (std::fabs(DD2) > plib::constants<nl_double>::cast(1e-60)) // avoid div-by-zero
+ new_net_timestep = std::sqrt(m_params.m_dynamic_lte / std::fabs(plib::constants<nl_double>::cast(0.5)*DD2));
+ else
+ new_net_timestep = m_params.m_max_timestep;
- if (new_net_timestep < new_solver_timestep)
- new_solver_timestep = new_net_timestep;
+ if (new_net_timestep < new_solver_timestep)
+ new_solver_timestep = new_net_timestep;
- t->m_last_V = n->Q_Analog();
- }
- if (new_solver_timestep < m_params.m_min_timestep)
- {
- //log().warning("Dynamic timestep below min timestep. Consider decreasing MIN_TIMESTEP: {1} us", new_solver_timestep*1.0e6);
- new_solver_timestep = m_params.m_min_timestep;
+ t->m_last_V = n->Q_Analog();
+ }
+ if (new_solver_timestep < m_params.m_min_timestep)
+ {
+ //log().warning("Dynamic timestep below min timestep. Consider decreasing MIN_TIMESTEP: {1} us", new_solver_timestep*1.0e6);
+ new_solver_timestep = m_params.m_min_timestep;
+ }
}
+ //if (new_solver_timestep > 10.0 * hn)
+ // new_solver_timestep = 10.0 * hn;
+ /*
+ * FIXME: Factor 2 below is important. Without, we get timing issues. This must be a bug elsewhere.
+ */
+ return std::max(netlist_time::from_double(new_solver_timestep), netlist_time::quantum() * 2);
}
- //if (new_solver_timestep > 10.0 * hn)
- // new_solver_timestep = 10.0 * hn;
- /*
- * FIXME: Factor 2 below is important. Without, we get timing issues. This must be a bug elsewhere.
- */
- return std::max(netlist_time::from_double(new_solver_timestep), netlist_time::quantum() * 2);
-}
-
-
-void matrix_solver_t::log_stats()
-{
- if (this->m_stat_calculations != 0 && this->m_stat_vsolver_calls && this->m_params.m_log_stats)
+ void matrix_solver_t::log_stats()
{
- log().verbose("==============================================");
- log().verbose("Solver {1}", this->name());
- log().verbose(" ==> {1} nets", this->m_nets.size()); //, (*(*groups[i].first())->m_core_terms.first())->name());
- log().verbose(" has {1} elements", this->has_dynamic_devices() ? "dynamic" : "no dynamic");
- log().verbose(" has {1} elements", this->has_timestep_devices() ? "timestep" : "no timestep");
- log().verbose(" {1:6.3} average newton raphson loops",
- static_cast<double>(this->m_stat_newton_raphson) / static_cast<double>(this->m_stat_vsolver_calls));
- log().verbose(" {1:10} invocations ({2:6.0} Hz) {3:10} gs fails ({4:6.2} %) {5:6.3} average",
- this->m_stat_calculations,
- static_cast<double>(this->m_stat_calculations) / this->netlist().time().as_double(),
- this->m_iterative_fail,
- 100.0 * static_cast<double>(this->m_iterative_fail)
- / static_cast<double>(this->m_stat_calculations),
- static_cast<double>(this->m_iterative_total) / static_cast<double>(this->m_stat_calculations));
+ if (this->m_stat_calculations != 0 && this->m_stat_vsolver_calls && this->m_params.m_log_stats)
+ {
+ log().verbose("==============================================");
+ log().verbose("Solver {1}", this->name());
+ log().verbose(" ==> {1} nets", this->m_nets.size()); //, (*(*groups[i].first())->m_core_terms.first())->name());
+ log().verbose(" has {1} elements", this->has_dynamic_devices() ? "dynamic" : "no dynamic");
+ log().verbose(" has {1} elements", this->has_timestep_devices() ? "timestep" : "no timestep");
+ log().verbose(" {1:6.3} average newton raphson loops",
+ static_cast<double>(this->m_stat_newton_raphson) / static_cast<double>(this->m_stat_vsolver_calls));
+ log().verbose(" {1:10} invocations ({2:6.0} Hz) {3:10} gs fails ({4:6.2} %) {5:6.3} average",
+ this->m_stat_calculations,
+ static_cast<double>(this->m_stat_calculations) / this->exec().time().as_double(),
+ this->m_iterative_fail,
+ 100.0 * static_cast<double>(this->m_iterative_fail)
+ / static_cast<double>(this->m_stat_calculations),
+ static_cast<double>(this->m_iterative_total) / static_cast<double>(this->m_stat_calculations));
+ }
}
-}
-
- } //namespace devices
+} // namespace devices
} // namespace netlist
diff --git a/src/lib/netlist/solver/nld_matrix_solver.h b/src/lib/netlist/solver/nld_matrix_solver.h
index da44370d0d9..f76660e1cb9 100644
--- a/src/lib/netlist/solver/nld_matrix_solver.h
+++ b/src/lib/netlist/solver/nld_matrix_solver.h
@@ -10,282 +10,367 @@
#include "netlist/nl_base.h"
#include "netlist/nl_errstr.h"
-#include "netlist/plib/putil.h"
+#include "plib/palloc.h"
+#include "plib/pmatrix2d.h"
+#include "plib/putil.h"
+#include "plib/vector_ops.h"
+
+#include <cmath>
namespace netlist
{
- namespace devices
- {
+namespace devices
+{
/* FIXME: these should become proper devices */
struct solver_parameters_t
{
- int m_pivot;
+ bool m_pivot;
nl_double m_accuracy;
nl_double m_dynamic_lte;
nl_double m_min_timestep;
nl_double m_max_timestep;
nl_double m_gs_sor;
bool m_dynamic_ts;
- unsigned m_gs_loops;
- unsigned m_nr_loops;
+ std::size_t m_gs_loops;
+ std::size_t m_nr_loops;
netlist_time m_nr_recalc_delay;
+ bool m_use_gabs;
+ bool m_use_linear_prediction;
bool m_log_stats;
};
-class terms_for_net_t : plib::nocopyassignmove
-{
-public:
- terms_for_net_t();
+ class terms_for_net_t : plib::nocopyassignmove
+ {
+ public:
+ terms_for_net_t();
- void clear();
+ void clear();
- void add(terminal_t *term, int net_other, bool sorted);
+ void add(terminal_t *term, int net_other, bool sorted);
- inline std::size_t count() const { return m_terms.size(); }
+ std::size_t count() const { return m_terms.size(); }
- inline terminal_t **terms() { return m_terms.data(); }
- inline int *connected_net_idx() { return m_connected_net_idx.data(); }
- inline nl_double *gt() { return m_gt.data(); }
- inline nl_double *go() { return m_go.data(); }
- inline nl_double *Idr() { return m_Idr.data(); }
- inline nl_double * const *connected_net_V() const { return m_connected_net_V.data(); }
+ terminal_t **terms() { return m_terms.data(); }
- void set_pointers();
+ std::size_t m_railstart;
- std::size_t m_railstart;
+ std::vector<unsigned> m_nz; /* all non zero for multiplication */
+ std::vector<unsigned> m_nzrd; /* non zero right of the diagonal for elimination, may include RHS element */
+ std::vector<unsigned> m_nzbd; /* non zero below of the diagonal for elimination */
- std::vector<unsigned> m_nz; /* all non zero for multiplication */
- std::vector<unsigned> m_nzrd; /* non zero right of the diagonal for elimination, may include RHS element */
- std::vector<unsigned> m_nzbd; /* non zero below of the diagonal for elimination */
+ /* state */
+ nl_double m_last_V;
+ nl_double m_DD_n_m_1;
+ nl_double m_h_n_m_1;
- /* state */
- nl_double m_last_V;
- nl_double m_DD_n_m_1;
- nl_double m_h_n_m_1;
+ std::vector<int> m_connected_net_idx;
+ private:
+ std::vector<terminal_t *> m_terms;
-private:
- std::vector<int> m_connected_net_idx;
- std::vector<nl_double> m_go;
- std::vector<nl_double> m_gt;
- std::vector<nl_double> m_Idr;
- std::vector<nl_double *> m_connected_net_V;
- std::vector<terminal_t *> m_terms;
+ };
-};
+ class proxied_analog_output_t : public analog_output_t
+ {
+ public:
-class proxied_analog_output_t : public analog_output_t
-{
-public:
+ proxied_analog_output_t(core_device_t &dev, const pstring &aname, analog_net_t *pnet)
+ : analog_output_t(dev, aname)
+ , m_proxied_net(pnet)
+ { }
- proxied_analog_output_t(core_device_t &dev, const pstring &aname)
- : analog_output_t(dev, aname)
- , m_proxied_net(nullptr)
- { }
- virtual ~proxied_analog_output_t();
+ analog_net_t *proxied_net() const { return m_proxied_net;}
+ private:
+ analog_net_t *m_proxied_net; // only for proxy nets in analog input logic
+ };
- analog_net_t *m_proxied_net; // only for proxy nets in analog input logic
-};
+ class matrix_solver_t : public device_t
+ {
+ public:
+ using list_t = std::vector<matrix_solver_t *>;
+ enum eSortType
+ {
+ NOSORT,
+ ASCENDING,
+ DESCENDING,
+ PREFER_IDENTITY_TOP_LEFT,
+ PREFER_BAND_MATRIX
+ };
+
+ void setup(analog_net_t::list_t &nets)
+ {
+ vsetup(nets);
+ }
-class matrix_solver_t : public device_t
-{
-public:
- using list_t = std::vector<matrix_solver_t *>;
+ void solve_base();
- enum eSortType
- {
- NOSORT,
- ASCENDING,
- DESCENDING
- };
+ /* after every call to solve, update inputs must be called.
+ * this can be done as well as a batch to ease parallel processing.
+ */
+ const netlist_time solve(netlist_time now);
+ void update_inputs();
- virtual ~matrix_solver_t() override;
+ bool has_dynamic_devices() const { return m_dynamic_devices.size() > 0; }
+ bool has_timestep_devices() const { return m_step_devices.size() > 0; }
- void setup(analog_net_t::list_t &nets)
- {
- vsetup(nets);
- }
+ void update_forced();
+ void update_after(const netlist_time after)
+ {
+ m_Q_sync.net().toggle_and_push_to_queue(after);
+ }
- void solve_base();
+ /* netdevice functions */
+ NETLIB_UPDATEI();
+ NETLIB_RESETI();
- /* after every call to solve, update inputs must be called.
- * this can be done as well as a batch to ease parallel processing.
- */
- const netlist_time solve();
- void update_inputs();
+ public:
+ int get_net_idx(detail::net_t *net);
+ std::pair<int, int> get_left_right_of_diag(std::size_t row, std::size_t diag);
+ double get_weight_around_diag(std::size_t row, std::size_t diag);
- inline bool has_dynamic_devices() const { return m_dynamic_devices.size() > 0; }
- inline bool has_timestep_devices() const { return m_step_devices.size() > 0; }
+ virtual void log_stats();
- void update_forced();
- void update_after(const netlist_time &after)
- {
- m_Q_sync.net().toggle_and_push_to_queue(after);
- }
+ virtual std::pair<pstring, pstring> create_solver_code()
+ {
+ return std::pair<pstring, pstring>("", plib::pfmt("/* solver doesn't support static compile */\n\n"));
+ }
- /* netdevice functions */
- NETLIB_UPDATEI();
- NETLIB_RESETI();
+ /* return number of floating point operations for solve */
+ std::size_t ops() { return m_ops; }
-public:
- int get_net_idx(detail::net_t *net);
+ protected:
- virtual void log_stats();
+ matrix_solver_t(netlist_state_t &anetlist, const pstring &name,
+ eSortType sort, const solver_parameters_t *params);
- virtual std::pair<pstring, pstring> create_solver_code()
- {
- return std::pair<pstring, pstring>("", plib::pfmt("/* solver doesn't support static compile */\n\n"));
- }
+ void sort_terms(eSortType sort);
- /* return number of floating point operations for solve */
- std::size_t ops() { return m_ops; }
+ void setup_base(analog_net_t::list_t &nets);
+ void update_dynamic();
-protected:
+ virtual void vsetup(analog_net_t::list_t &nets) = 0;
+ virtual unsigned vsolve_non_dynamic(const bool newton_raphson) = 0;
- matrix_solver_t(netlist_t &anetlist, const pstring &name,
- const eSortType sort, const solver_parameters_t *params);
+ netlist_time compute_next_timestep(const double cur_ts);
+ /* virtual */ void add_term(std::size_t net_idx, terminal_t *term);
- void setup_base(analog_net_t::list_t &nets);
- void update_dynamic();
+ template <typename T>
+ void store(const T & V);
- virtual void vsetup(analog_net_t::list_t &nets) = 0;
- virtual unsigned vsolve_non_dynamic(const bool newton_raphson) = 0;
+ template <typename T>
+ auto delta(const T & V) -> typename std::decay<decltype(V[0])>::type;
- netlist_time compute_next_timestep(const double cur_ts);
- /* virtual */ void add_term(std::size_t net_idx, terminal_t *term);
+ template <typename T>
+ void build_LE_A(T &child);
+ template <typename T>
+ void build_LE_RHS(T &child);
- template <typename T>
- void store(const T * RESTRICT V);
- template <typename T>
- T delta(const T * RESTRICT V);
+ void set_pointers()
+ {
+ const std::size_t iN = this->m_nets.size();
+
+ std::size_t max_count = 0;
+ std::size_t max_rail = 0;
+ for (std::size_t k = 0; k < iN; k++)
+ {
+ max_count = std::max(max_count, m_terms[k]->count());
+ max_rail = std::max(max_rail, m_terms[k]->m_railstart);
+ }
+
+ m_mat_ptr.resize(iN, max_rail+1);
+ m_gtn.resize(iN, max_count);
+ m_gonn.resize(iN, max_count);
+ m_Idrn.resize(iN, max_count);
+ m_connected_net_Vn.resize(iN, max_count);
+
+ for (std::size_t k = 0; k < iN; k++)
+ {
+ auto count = m_terms[k]->count();
+
+ for (std::size_t i = 0; i < count; i++)
+ {
+ m_terms[k]->terms()[i]->set_ptrs(&m_gtn[k][i], &m_gonn[k][i], &m_Idrn[k][i]);
+ m_connected_net_Vn[k][i] = m_terms[k]->terms()[i]->connected_terminal()->net().Q_Analog_state_ptr();
+ }
+ }
+ }
- template <typename T>
- void build_LE_A();
- template <typename T>
- void build_LE_RHS();
+ template <typename AP, typename FT>
+ void fill_matrix(std::size_t N, AP &tcr, FT &RHS)
+ {
+ for (std::size_t k = 0; k < N; k++)
+ {
+ auto *net = m_terms[k].get();
+ auto **tcr_r = &(tcr[k][0]);
+
+ const std::size_t term_count = net->count();
+ const std::size_t railstart = net->m_railstart;
+ const auto &go = m_gonn[k];
+ const auto &gt = m_gtn[k];
+ const auto &Idr = m_Idrn[k];
+ const auto &cnV = m_connected_net_Vn[k];
+
+ for (std::size_t i = 0; i < railstart; i++)
+ *tcr_r[i] += go[i];
+
+ typename FT::value_type gtot_t = 0.0;
+ typename FT::value_type RHS_t = 0.0;
+
+ for (std::size_t i = 0; i < term_count; i++)
+ {
+ gtot_t += gt[i];
+ RHS_t += Idr[i];
+ }
+ // FIXME: Code above is faster than vec_sum - Check this
+ #if 0
+ auto gtot_t = plib::vec_sum<FT>(term_count, m_gt);
+ auto RHS_t = plib::vec_sum<FT>(term_count, m_Idr);
+ #endif
+
+ for (std::size_t i = railstart; i < term_count; i++)
+ {
+ RHS_t += (/*m_Idr[i]*/ (- go[i]) * *cnV[i]);
+ }
+
+ RHS[k] = RHS_t;
+ // update diagonal element ...
+ *tcr_r[railstart] += gtot_t; //mat.A[mat.diag[k]] += gtot_t;
+ }
- std::vector<std::unique_ptr<terms_for_net_t>> m_terms;
- std::vector<analog_net_t *> m_nets;
- std::vector<std::unique_ptr<proxied_analog_output_t>> m_inps;
+ }
- std::vector<terms_for_net_t *> m_rails_temp;
+ template <typename T>
+ using aligned_alloc = plib::aligned_allocator<T, PALIGN_VECTOROPT>;
- const solver_parameters_t &m_params;
+ plib::pmatrix2d<nl_double, aligned_alloc<nl_double>> m_gonn;
+ plib::pmatrix2d<nl_double, aligned_alloc<nl_double>> m_gtn;
+ plib::pmatrix2d<nl_double, aligned_alloc<nl_double>> m_Idrn;
+ plib::pmatrix2d<nl_double *, aligned_alloc<nl_double *>> m_mat_ptr;
+ plib::pmatrix2d<nl_double *, aligned_alloc<nl_double *>> m_connected_net_Vn;
- state_var<int> m_stat_calculations;
- state_var<int> m_stat_newton_raphson;
- state_var<int> m_stat_vsolver_calls;
- state_var<int> m_iterative_fail;
- state_var<int> m_iterative_total;
+ plib::pmatrix2d<nl_double> m_test;
-private:
+ std::vector<plib::unique_ptr<terms_for_net_t>> m_terms;
+ std::vector<analog_net_t *> m_nets;
+ std::vector<pool_owned_ptr<proxied_analog_output_t>> m_inps;
- state_var<netlist_time> m_last_step;
- std::vector<core_device_t *> m_step_devices;
- std::vector<core_device_t *> m_dynamic_devices;
+ std::vector<plib::unique_ptr<terms_for_net_t>> m_rails_temp;
- logic_input_t m_fb_sync;
- logic_output_t m_Q_sync;
+ const solver_parameters_t &m_params;
- /* calculate matrix */
- void setup_matrix();
+ state_var<int> m_stat_calculations;
+ state_var<int> m_stat_newton_raphson;
+ state_var<int> m_stat_vsolver_calls;
+ state_var<int> m_iterative_fail;
+ state_var<int> m_iterative_total;
- void step(const netlist_time &delta);
+ private:
- std::size_t m_ops;
- const eSortType m_sort;
-};
+ state_var<netlist_time> m_last_step;
+ std::vector<core_device_t *> m_step_devices;
+ std::vector<core_device_t *> m_dynamic_devices;
-template <typename T>
-T matrix_solver_t::delta(const T * RESTRICT V)
-{
- /* NOTE: Ideally we should also include currents (RHS) here. This would
- * need a reevaluation of the right hand side after voltages have been updated
- * and thus belong into a different calculation. This applies to all solvers.
- */
-
- const std::size_t iN = this->m_terms.size();
- T cerr = 0;
- for (std::size_t i = 0; i < iN; i++)
- cerr = std::max(cerr, std::abs(V[i] - static_cast<T>(this->m_nets[i]->Q_Analog())));
- return cerr;
-}
-
-template <typename T>
-void matrix_solver_t::store(const T * RESTRICT V)
-{
- const std::size_t iN = this->m_terms.size();
- for (std::size_t i = 0; i < iN; i++)
- this->m_nets[i]->set_Q_Analog(V[i]);
-}
+ logic_input_t m_fb_sync;
+ logic_output_t m_Q_sync;
-template <typename T>
-void matrix_solver_t::build_LE_A()
-{
- static_assert(std::is_base_of<matrix_solver_t, T>::value, "T must derive from matrix_solver_t");
+ /* calculate matrix */
+ void setup_matrix();
- T &child = static_cast<T &>(*this);
+ void step(const netlist_time &delta);
- const std::size_t iN = child.N();
- for (std::size_t k = 0; k < iN; k++)
+ std::size_t m_ops;
+ const eSortType m_sort;
+ };
+
+ template <typename T>
+ auto matrix_solver_t::delta(const T & V) -> typename std::decay<decltype(V[0])>::type
{
- terms_for_net_t *terms = m_terms[k].get();
- nl_double * Ak = &child.A(k, 0);
+ /* NOTE: Ideally we should also include currents (RHS) here. This would
+ * need a reevaluation of the right hand side after voltages have been updated
+ * and thus belong into a different calculation. This applies to all solvers.
+ */
+
+ const std::size_t iN = this->m_terms.size();
+ typename std::decay<decltype(V[0])>::type cerr = 0;
+ for (std::size_t i = 0; i < iN; i++)
+ cerr = std::max(cerr, std::abs(V[i] - this->m_nets[i]->Q_Analog()));
+ return cerr;
+ }
- for (std::size_t i=0; i < iN; i++)
- Ak[i] = 0.0;
+ template <typename T>
+ void matrix_solver_t::store(const T & V)
+ {
+ const std::size_t iN = this->m_terms.size();
+ for (std::size_t i = 0; i < iN; i++)
+ this->m_nets[i]->set_Q_Analog(V[i]);
+ }
- const std::size_t terms_count = terms->count();
- const std::size_t railstart = terms->m_railstart;
- const nl_double * const RESTRICT gt = terms->gt();
+ template <typename T>
+ void matrix_solver_t::build_LE_A(T &child)
+ {
+ using float_type = typename T::float_type;
+ static_assert(std::is_base_of<matrix_solver_t, T>::value, "T must derive from matrix_solver_t");
+ const std::size_t iN = child.size();
+ for (std::size_t k = 0; k < iN; k++)
{
- nl_double akk = 0.0;
- for (std::size_t i = 0; i < terms_count; i++)
- akk += gt[i];
+ terms_for_net_t *terms = m_terms[k].get();
+ float_type * Ak = &child.A(k, 0ul);
- Ak[k] = akk;
- }
+ for (std::size_t i=0; i < iN; i++)
+ Ak[i] = 0.0;
- const nl_double * const RESTRICT go = terms->go();
- int * RESTRICT net_other = terms->connected_net_idx();
+ const std::size_t terms_count = terms->count();
+ const std::size_t railstart = terms->m_railstart;
+ const float_type * const gt = m_gtn[k];
- for (std::size_t i = 0; i < railstart; i++)
- Ak[net_other[i]] -= go[i];
- }
-}
+ {
+ float_type akk = 0.0;
+ for (std::size_t i = 0; i < terms_count; i++)
+ akk += gt[i];
-template <typename T>
-void matrix_solver_t::build_LE_RHS()
-{
- static_assert(std::is_base_of<matrix_solver_t, T>::value, "T must derive from matrix_solver_t");
- T &child = static_cast<T &>(*this);
+ Ak[k] = akk;
+ }
+
+ const float_type * const go = m_gonn[k];
+ int * net_other = terms->m_connected_net_idx.data();
+
+ for (std::size_t i = 0; i < railstart; i++)
+ Ak[net_other[i]] += go[i];
+ }
+ }
- const std::size_t iN = child.N();
- for (std::size_t k = 0; k < iN; k++)
+ template <typename T>
+ void matrix_solver_t::build_LE_RHS(T &child)
{
- nl_double rhsk_a = 0.0;
- nl_double rhsk_b = 0.0;
+ static_assert(std::is_base_of<matrix_solver_t, T>::value, "T must derive from matrix_solver_t");
+ using float_type = typename T::float_type;
- const std::size_t terms_count = m_terms[k]->count();
- const nl_double * const RESTRICT go = m_terms[k]->go();
- const nl_double * const RESTRICT Idr = m_terms[k]->Idr();
- const nl_double * const * RESTRICT other_cur_analog = m_terms[k]->connected_net_V();
+ const std::size_t iN = child.size();
+ for (std::size_t k = 0; k < iN; k++)
+ {
+ float_type rhsk_a = 0.0;
+ float_type rhsk_b = 0.0;
- for (std::size_t i = 0; i < terms_count; i++)
- rhsk_a = rhsk_a + Idr[i];
+ const std::size_t terms_count = m_terms[k]->count();
+ const float_type * const go = m_gonn[k];
+ const float_type * const Idr = m_Idrn[k];
+ const float_type * const * other_cur_analog = m_connected_net_Vn[k];
- for (std::size_t i = m_terms[k]->m_railstart; i < terms_count; i++)
- //rhsk = rhsk + go[i] * terms[i]->m_otherterm->net().as_analog().Q_Analog();
- rhsk_b = rhsk_b + go[i] * *other_cur_analog[i];
+ for (std::size_t i = 0; i < terms_count; i++)
+ rhsk_a = rhsk_a + Idr[i];
+
+ for (std::size_t i = m_terms[k]->m_railstart; i < terms_count; i++)
+ //rhsk = rhsk + go[i] * terms[i]->m_otherterm->net().as_analog().Q_Analog();
+ rhsk_b = rhsk_b - go[i] * *other_cur_analog[i];
- child.RHS(k) = rhsk_a + rhsk_b;
+ child.RHS(k) = rhsk_a + rhsk_b;
+ }
}
-}
- } //namespace devices
+} //namespace devices
} // namespace netlist
#endif /* NLD_MS_DIRECT_H_ */
diff --git a/src/lib/netlist/solver/nld_ms_direct.h b/src/lib/netlist/solver/nld_ms_direct.h
index f03cb738028..2501742218d 100644
--- a/src/lib/netlist/solver/nld_ms_direct.h
+++ b/src/lib/netlist/solver/nld_ms_direct.h
@@ -8,286 +8,228 @@
#ifndef NLD_MS_DIRECT_H_
#define NLD_MS_DIRECT_H_
-#include <algorithm>
-
-#include "nld_solver.h"
#include "nld_matrix_solver.h"
-#include "vector_base.h"
+#include "nld_solver.h"
+#include "plib/mat_cr.h"
+#include "plib/vector_ops.h"
-/* Disabling dynamic allocation gives a ~10% boost in performance
- * This flag has been added to support continuous storage for arrays
- * going forward in case we implement cuda solvers in the future.
- */
-#define NL_USE_DYNAMIC_ALLOCATION (1)
+#include <algorithm>
+#include <cmath>
namespace netlist
{
- namespace devices
- {
-//#define nl_ext_double _float128 // slow, very slow
-//#define nl_ext_double long double // slightly slower
-#define nl_ext_double nl_double
-
-
-template <std::size_t m_N, std::size_t storage_N>
-class matrix_solver_direct_t: public matrix_solver_t
+namespace devices
{
- friend class matrix_solver_t;
-public:
- matrix_solver_direct_t(netlist_t &anetlist, const pstring &name, const solver_parameters_t *params, const std::size_t size);
- matrix_solver_direct_t(netlist_t &anetlist, const pstring &name, const eSortType sort, const solver_parameters_t *params, const std::size_t size);
+ template <typename FT, int SIZE>
+ class matrix_solver_direct_t: public matrix_solver_t
+ {
+ friend class matrix_solver_t;
+ public:
- virtual ~matrix_solver_direct_t() override;
+ using float_type = FT;
- virtual void vsetup(analog_net_t::list_t &nets) override;
- virtual void reset() override { matrix_solver_t::reset(); }
+ matrix_solver_direct_t(netlist_state_t &anetlist, const pstring &name, const solver_parameters_t *params, const std::size_t size);
+ matrix_solver_direct_t(netlist_state_t &anetlist, const pstring &name, const eSortType sort, const solver_parameters_t *params, const std::size_t size);
-protected:
- virtual unsigned vsolve_non_dynamic(const bool newton_raphson) override;
- unsigned solve_non_dynamic(const bool newton_raphson);
+ void vsetup(analog_net_t::list_t &nets) override;
+ void reset() override { matrix_solver_t::reset(); }
- constexpr std::size_t N() const { return (m_N == 0) ? m_dim : m_N; }
+ protected:
+ unsigned vsolve_non_dynamic(const bool newton_raphson) override;
+ unsigned solve_non_dynamic(const bool newton_raphson);
- void LE_solve();
+ constexpr std::size_t size() const { return (SIZE > 0) ? static_cast<std::size_t>(SIZE) : m_dim; }
- template <typename T>
- void LE_back_subst(T * RESTRICT x);
-
-#if (NL_USE_DYNAMIC_ALLOCATION)
- template <typename T1, typename T2>
- nl_ext_double &A(const T1 &r, const T2 &c) { return m_A[r * m_pitch + c]; }
- template <typename T1>
- nl_ext_double &RHS(const T1 &r) { return m_A[r * m_pitch + N()]; }
-#else
- template <typename T1, typename T2>
- nl_ext_double &A(const T1 &r, const T2 &c) { return m_A[r][c]; }
- template <typename T1>
- nl_ext_double &RHS(const T1 &r) { return m_A[r][N()]; }
-#endif
- nl_double m_last_RHS[storage_N]; // right hand side - contains currents
-
-private:
- //static const std::size_t m_pitch = (((storage_N + 1) + 0) / 1) * 1;
- static constexpr std::size_t m_pitch = (((storage_N + 1) + 7) / 8) * 8;
- //static const std::size_t m_pitch = (((storage_N + 1) + 15) / 16) * 16;
- //static const std::size_t m_pitch = (((storage_N + 1) + 31) / 32) * 32;
-#if (NL_USE_DYNAMIC_ALLOCATION)
- //nl_ext_double * RESTRICT m_A;
- std::vector<nl_ext_double> m_A;
-#else
- nl_ext_double m_A[storage_N][m_pitch];
-#endif
- //nl_ext_double m_RHSx[storage_N];
-
- const std::size_t m_dim;
-
-};
-
-// ----------------------------------------------------------------------------------------
-// matrix_solver_direct
-// ----------------------------------------------------------------------------------------
-
-template <std::size_t m_N, std::size_t storage_N>
-matrix_solver_direct_t<m_N, storage_N>::~matrix_solver_direct_t()
-{
-#if (NL_USE_DYNAMIC_ALLOCATION)
- //plib::pfree_array(m_A);
-#endif
-}
+ void LE_solve();
-template <std::size_t m_N, std::size_t storage_N>
-void matrix_solver_direct_t<m_N, storage_N>::vsetup(analog_net_t::list_t &nets)
-{
- matrix_solver_t::setup_base(nets);
+ template <typename T>
+ void LE_back_subst(T & x);
- /* add RHS element */
- for (std::size_t k = 0; k < N(); k++)
- {
- terms_for_net_t * t = m_terms[k].get();
+ FT &A(std::size_t r, std::size_t c) { return m_A[r * m_pitch + c]; }
+ FT &RHS(std::size_t r) { return m_A[r * m_pitch + size()]; }
+ plib::parray<FT, SIZE> m_new_V;
- if (!plib::container::contains(t->m_nzrd, static_cast<unsigned>(N())))
- t->m_nzrd.push_back(static_cast<unsigned>(N()));
- }
+ private:
+ static constexpr const std::size_t SIZEABS = plib::parray<FT, SIZE>::SIZEABS();
+ static constexpr const std::size_t m_pitch_ABS = (((SIZEABS + 1) + 7) / 8) * 8;
- netlist().save(*this, m_last_RHS, "m_last_RHS");
+ const std::size_t m_dim;
+ const std::size_t m_pitch;
+ plib::parray<FT, SIZE * int(m_pitch_ABS)> m_A;
- for (std::size_t k = 0; k < N(); k++)
- netlist().save(*this, RHS(k), plib::pfmt("RHS.{1}")(k));
-}
+ };
+ // ----------------------------------------------------------------------------------------
+ // matrix_solver_direct
+ // ----------------------------------------------------------------------------------------
-template <std::size_t m_N, std::size_t storage_N>
-void matrix_solver_direct_t<m_N, storage_N>::LE_solve()
-{
- const std::size_t kN = N();
- if (!m_params.m_pivot)
+ template <typename FT, int SIZE>
+ void matrix_solver_direct_t<FT, SIZE>::vsetup(analog_net_t::list_t &nets)
{
- for (std::size_t i = 0; i < kN; i++)
- {
+ matrix_solver_t::setup_base(nets);
- /* FIXME: Singular matrix? */
- nl_double *Ai = &A(i, 0);
- const nl_double f = 1.0 / A(i,i);
- const auto &nzrd = m_terms[i]->m_nzrd;
- const auto &nzbd = m_terms[i]->m_nzbd;
+ /* add RHS element */
+ for (std::size_t k = 0; k < size(); k++)
+ {
+ terms_for_net_t * t = m_terms[k].get();
- for (std::size_t j : nzbd)
- {
- nl_double *Aj = &A(j, 0);
- const nl_double f1 = -f * Aj[i];
- for (std::size_t k : nzrd)
- Aj[k] += Ai[k] * f1;
- //RHS(j) += RHS(i) * f1;
- }
+ if (!plib::container::contains(t->m_nzrd, static_cast<unsigned>(size())))
+ t->m_nzrd.push_back(static_cast<unsigned>(size()));
}
+
+ // FIXME: This shouldn't be necessary ...
+ for (std::size_t k = 0; k < size(); k++)
+ state().save(*this, RHS(k), this->name(), plib::pfmt("RHS.{1}")(k));
}
- else
+
+ template <typename FT, int SIZE>
+ void matrix_solver_direct_t<FT, SIZE>::LE_solve()
{
- for (std::size_t i = 0; i < kN; i++)
+ const std::size_t kN = size();
+ if (!m_params.m_pivot)
{
- /* Find the row with the largest first value */
- std::size_t maxrow = i;
- for (std::size_t j = i + 1; j < kN; j++)
+ for (std::size_t i = 0; i < kN; i++)
{
- //if (std::abs(m_A[j][i]) > std::abs(m_A[maxrow][i]))
- if (A(j,i) * A(j,i) > A(maxrow,i) * A(maxrow,i))
- maxrow = j;
- }
+ /* FIXME: Singular matrix? */
+ const FT f = 1.0 / A(i,i);
+ const auto &nzrd = m_terms[i]->m_nzrd;
+ const auto &nzbd = m_terms[i]->m_nzbd;
- if (maxrow != i)
- {
- /* Swap the maxrow and ith row */
- for (std::size_t k = 0; k < kN + 1; k++) {
- std::swap(A(i,k), A(maxrow,k));
+ for (std::size_t j : nzbd)
+ {
+ const FT f1 = -f * A(j, i);
+ for (std::size_t k : nzrd)
+ A(j, k) += A(i, k) * f1;
+ //RHS(j) += RHS(i) * f1;
}
- //std::swap(RHS(i), RHS(maxrow));
}
- /* FIXME: Singular matrix? */
- const nl_double f = 1.0 / A(i,i);
+ }
+ else
+ {
+ for (std::size_t i = 0; i < kN; i++)
+ {
+ /* Find the row with the largest first value */
+ std::size_t maxrow = i;
+ for (std::size_t j = i + 1; j < kN; j++)
+ {
+ //if (std::abs(m_A[j][i]) > std::abs(m_A[maxrow][i]))
+ if (A(j,i) * A(j,i) > A(maxrow,i) * A(maxrow,i))
+ maxrow = j;
+ }
- /* Eliminate column i from row j */
+ if (maxrow != i)
+ {
+ /* Swap the maxrow and ith row */
+ for (std::size_t k = 0; k < kN + 1; k++) {
+ std::swap(A(i,k), A(maxrow,k));
+ }
+ //std::swap(RHS(i), RHS(maxrow));
+ }
+ /* FIXME: Singular matrix? */
+ const FT f = 1.0 / A(i,i);
- for (std::size_t j = i + 1; j < kN; j++)
- {
- const nl_double f1 = - A(j,i) * f;
- if (f1 != NL_FCONST(0.0))
+ /* Eliminate column i from row j */
+
+ for (std::size_t j = i + 1; j < kN; j++)
{
- const nl_double * RESTRICT pi = &A(i,i+1);
- nl_double * RESTRICT pj = &A(j,i+1);
-#if 1
- vec_add_mult_scalar_p(kN-i,pi,f1,pj);
-#else
- vec_add_mult_scalar_p(kN-i-1,pj,f1,pi);
- //for (unsigned k = i+1; k < kN; k++)
- // pj[k] = pj[k] + pi[k] * f1;
- //for (unsigned k = i+1; k < kN; k++)
- //A(j,k) += A(i,k) * f1;
- RHS(j) += RHS(i) * f1;
-#endif
+ const FT f1 = - A(j,i) * f;
+ if (f1 != plib::constants<FT>::zero())
+ {
+ const FT * pi = &A(i,i+1);
+ FT * pj = &A(j,i+1);
+ #if 1
+ plib::vec_add_mult_scalar_p(kN-i,pj, pi,f1);
+ #else
+ vec_add_mult_scalar_p1(kN-i-1,pj,pi,f1);
+ //for (unsigned k = i+1; k < kN; k++)
+ // pj[k] = pj[k] + pi[k] * f1;
+ //for (unsigned k = i+1; k < kN; k++)
+ //A(j,k) += A(i,k) * f1;
+ RHS(j) += RHS(i) * f1;
+ #endif
+ }
}
}
}
}
-}
-template <std::size_t m_N, std::size_t storage_N>
-template <typename T>
-void matrix_solver_direct_t<m_N, storage_N>::LE_back_subst(
- T * RESTRICT x)
-{
- const std::size_t kN = N();
-
- /* back substitution */
- if (m_params.m_pivot)
+ template <typename FT, int SIZE>
+ template <typename T>
+ void matrix_solver_direct_t<FT, SIZE>::LE_back_subst(
+ T & x)
{
- for (std::size_t j = kN; j-- > 0; )
+ const std::size_t kN = size();
+
+ /* back substitution */
+ if (m_params.m_pivot)
{
- T tmp = 0;
- for (std::size_t k = j+1; k < kN; k++)
- tmp += A(j,k) * x[k];
- x[j] = (RHS(j) - tmp) / A(j,j);
+ for (std::size_t j = kN; j-- > 0; )
+ {
+ FT tmp = 0;
+ for (std::size_t k = j+1; k < kN; k++)
+ tmp += A(j,k) * x[k];
+ x[j] = (RHS(j) - tmp) / A(j,j);
+ }
}
- }
- else
- {
- for (std::size_t j = kN; j-- > 0; )
+ else
{
- T tmp = 0;
-
- const auto *p = m_terms[j]->m_nzrd.data();
- const auto e = m_terms[j]->m_nzrd.size() - 1; /* exclude RHS element */
- T * Aj = &A(j,0);
- for (std::size_t k = 0; k < e; k++)
+ for (std::size_t j = kN; j-- > 0; )
{
- const auto pk = p[k];
- tmp += Aj[pk] * x[pk];
+ FT tmp = 0;
+ const auto &nzrd = m_terms[j]->m_nzrd;
+ const auto e = nzrd.size() - 1; /* exclude RHS element */
+ for ( std::size_t k = 0; k < e; k++)
+ tmp += A(j, nzrd[k]) * x[nzrd[k]];
+ x[j] = (RHS(j) - tmp) / A(j,j);
}
- x[j] = (RHS(j) - tmp) / A(j,j);
}
}
-}
+ template <typename FT, int SIZE>
+ unsigned matrix_solver_direct_t<FT, SIZE>::solve_non_dynamic(const bool newton_raphson)
+ {
+ this->LE_solve();
+ this->LE_back_subst(m_new_V);
-template <std::size_t m_N, std::size_t storage_N>
-unsigned matrix_solver_direct_t<m_N, storage_N>::solve_non_dynamic(const bool newton_raphson)
-{
- nl_double new_V[storage_N]; // = { 0.0 };
-
- this->LE_solve();
- this->LE_back_subst(new_V);
-
- const nl_double err = (newton_raphson ? delta(new_V) : 0.0);
- store(new_V);
- return (err > this->m_params.m_accuracy) ? 2 : 1;
-}
-
-template <std::size_t m_N, std::size_t storage_N>
-inline unsigned matrix_solver_direct_t<m_N, storage_N>::vsolve_non_dynamic(const bool newton_raphson)
-{
- build_LE_A<matrix_solver_direct_t>();
- build_LE_RHS<matrix_solver_direct_t>();
+ const FT err = (newton_raphson ? delta(m_new_V) : 0.0);
+ store(m_new_V);
+ return (err > this->m_params.m_accuracy) ? 2 : 1;
+ }
- for (std::size_t i=0, iN=N(); i < iN; i++)
- m_last_RHS[i] = RHS(i);
+ template <typename FT, int SIZE>
+ unsigned matrix_solver_direct_t<FT, SIZE>::vsolve_non_dynamic(const bool newton_raphson)
+ {
+ this->build_LE_A(*this);
+ this->build_LE_RHS(*this);
- this->m_stat_calculations++;
- return this->solve_non_dynamic(newton_raphson);
-}
+ this->m_stat_calculations++;
+ return this->solve_non_dynamic(newton_raphson);
+ }
-template <std::size_t m_N, std::size_t storage_N>
-matrix_solver_direct_t<m_N, storage_N>::matrix_solver_direct_t(netlist_t &anetlist, const pstring &name,
- const solver_parameters_t *params, const std::size_t size)
-: matrix_solver_t(anetlist, name, ASCENDING, params)
-, m_dim(size)
-{
-#if (NL_USE_DYNAMIC_ALLOCATION)
- m_A.resize(N() * m_pitch);
- //m_A = plib::palloc_array<nl_ext_double>(N() * m_pitch);
-#endif
- for (unsigned k = 0; k < N(); k++)
+ template <typename FT, int SIZE>
+ matrix_solver_direct_t<FT, SIZE>::matrix_solver_direct_t(netlist_state_t &anetlist, const pstring &name,
+ const solver_parameters_t *params, const std::size_t size)
+ : matrix_solver_t(anetlist, name, ASCENDING, params)
+ , m_new_V(size)
+ , m_dim(size)
+ , m_pitch(m_pitch_ABS ? m_pitch_ABS : (((m_dim + 1) + 7) / 8) * 8)
+ , m_A(size * m_pitch)
{
- m_last_RHS[k] = 0.0;
}
-}
-template <std::size_t m_N, std::size_t storage_N>
-matrix_solver_direct_t<m_N, storage_N>::matrix_solver_direct_t(netlist_t &anetlist, const pstring &name,
- const eSortType sort, const solver_parameters_t *params, const std::size_t size)
-: matrix_solver_t(anetlist, name, sort, params)
-, m_dim(size)
-{
-#if (NL_USE_DYNAMIC_ALLOCATION)
- m_A.resize(N() * m_pitch);
- //m_A = plib::palloc_array<nl_ext_double>(N() * m_pitch);
-#endif
- for (unsigned k = 0; k < N(); k++)
+ template <typename FT, int SIZE>
+ matrix_solver_direct_t<FT, SIZE>::matrix_solver_direct_t(netlist_state_t &anetlist, const pstring &name,
+ const eSortType sort, const solver_parameters_t *params, const std::size_t size)
+ : matrix_solver_t(anetlist, name, sort, params)
+ , m_new_V(size)
+ , m_dim(size)
+ , m_pitch(m_pitch_ABS ? m_pitch_ABS : (((m_dim + 1) + 7) / 8) * 8)
+ , m_A(size * m_pitch)
{
- m_last_RHS[k] = 0.0;
}
-}
- } //namespace devices
+} // namespace devices
} // namespace netlist
#endif /* NLD_MS_DIRECT_H_ */
diff --git a/src/lib/netlist/solver/nld_ms_direct1.h b/src/lib/netlist/solver/nld_ms_direct1.h
index 6e1f99bad7d..fbbb8ecb098 100644
--- a/src/lib/netlist/solver/nld_ms_direct1.h
+++ b/src/lib/netlist/solver/nld_ms_direct1.h
@@ -13,37 +13,41 @@
namespace netlist
{
- namespace devices
- {
-class matrix_solver_direct1_t: public matrix_solver_direct_t<1,1>
+namespace devices
{
-public:
+ template <typename FT>
+ class matrix_solver_direct1_t: public matrix_solver_direct_t<FT, 1>
+ {
+ public:
- matrix_solver_direct1_t(netlist_t &anetlist, const pstring &name, const solver_parameters_t *params)
- : matrix_solver_direct_t<1, 1>(anetlist, name, params, 1)
- {}
- virtual unsigned vsolve_non_dynamic(const bool newton_raphson) override;
+ using float_type = FT;
+ using base_type = matrix_solver_direct_t<FT, 1>;
-};
+ matrix_solver_direct1_t(netlist_state_t &anetlist, const pstring &name, const solver_parameters_t *params)
+ : matrix_solver_direct_t<FT, 1>(anetlist, name, params, 1)
+ {}
-// ----------------------------------------------------------------------------------------
-// matrix_solver - Direct1
-// ----------------------------------------------------------------------------------------
+ // ----------------------------------------------------------------------------------------
+ // matrix_solver - Direct1
+ // ----------------------------------------------------------------------------------------
+ unsigned vsolve_non_dynamic(const bool newton_raphson) override
+ {
+ this->build_LE_A(*this);
+ this->build_LE_RHS(*this);
+ //NL_VERBOSE_OUT(("{1} {2}\n", new_val, m_RHS[0] / m_A[0][0]);
-inline unsigned matrix_solver_direct1_t::vsolve_non_dynamic(ATTR_UNUSED const bool newton_raphson)
-{
- build_LE_A<matrix_solver_direct1_t>();
- build_LE_RHS<matrix_solver_direct1_t>();
- //NL_VERBOSE_OUT(("{1} {2}\n", new_val, m_RHS[0] / m_A[0][0]);
+ std::array<FT, 1> new_V = { this->RHS(0) / this->A(0,0) };
+
+ const FT err = (newton_raphson ? this->delta(new_V) : 0.0);
+ this->store(new_V);
+ return (err > this->m_params.m_accuracy) ? 2 : 1;
+ }
+
+ };
- nl_double new_V[1] = { RHS(0) / A(0,0) };
- const nl_double err = (newton_raphson ? delta(new_V) : 0.0);
- store(new_V);
- return (err > this->m_params.m_accuracy) ? 2 : 1;
-}
- } //namespace devices
+} //namespace devices
} // namespace netlist
diff --git a/src/lib/netlist/solver/nld_ms_direct2.h b/src/lib/netlist/solver/nld_ms_direct2.h
index 4004bce9cc4..01f77c3bc3c 100644
--- a/src/lib/netlist/solver/nld_ms_direct2.h
+++ b/src/lib/netlist/solver/nld_ms_direct2.h
@@ -13,43 +13,46 @@
namespace netlist
{
- namespace devices
- {
-class matrix_solver_direct2_t: public matrix_solver_direct_t<2,2>
+namespace devices
{
-public:
- matrix_solver_direct2_t(netlist_t &anetlist, const pstring &name, const solver_parameters_t *params)
- : matrix_solver_direct_t<2, 2>(anetlist, name, params, 2)
- {}
- virtual unsigned vsolve_non_dynamic(const bool newton_raphson) override;
+ // ----------------------------------------------------------------------------------------
+ // matrix_solver - Direct2
+ // ----------------------------------------------------------------------------------------
+
+ template <typename FT>
+ class matrix_solver_direct2_t: public matrix_solver_direct_t<FT, 2>
+ {
+ public:
-};
+ using float_type = FT;
-// ----------------------------------------------------------------------------------------
-// matrix_solver - Direct2
-// ----------------------------------------------------------------------------------------
+ matrix_solver_direct2_t(netlist_state_t &anetlist, const pstring &name, const solver_parameters_t *params)
+ : matrix_solver_direct_t<double, 2>(anetlist, name, params, 2)
+ {}
+ unsigned vsolve_non_dynamic(const bool newton_raphson) override
+ {
+ this->build_LE_A(*this);
+ this->build_LE_RHS(*this);
-inline unsigned matrix_solver_direct2_t::vsolve_non_dynamic(ATTR_UNUSED const bool newton_raphson)
-{
- build_LE_A<matrix_solver_direct2_t>();
- build_LE_RHS<matrix_solver_direct2_t>();
+ const float_type a = this->A(0,0);
+ const float_type b = this->A(0,1);
+ const float_type c = this->A(1,0);
+ const float_type d = this->A(1,1);
- const nl_double a = A(0,0);
- const nl_double b = A(0,1);
- const nl_double c = A(1,0);
- const nl_double d = A(1,1);
+ const float_type v1 = (a * this->RHS(1) - c * this->RHS(0)) / (a * d - b * c);
+ const float_type v0 = (this->RHS(0) - b * v1) / a;
+ std::array<float_type, 2> new_V = {v0, v1};
- nl_double new_V[2];
- new_V[1] = (a * RHS(1) - c * RHS(0)) / (a * d - b * c);
- new_V[0] = (RHS(0) - b * new_V[1]) / a;
+ this->m_stat_calculations++;
+ const float_type err = (newton_raphson ? this->delta(new_V) : 0.0);
+ this->store(new_V);
+ return (err > this->m_params.m_accuracy) ? 2 : 1;
+ }
- const nl_double err = (newton_raphson ? delta(new_V) : 0.0);
- store(new_V);
- return (err > this->m_params.m_accuracy) ? 2 : 1;
-}
+ };
- } //namespace devices
+} //namespace devices
} // namespace netlist
#endif /* NLD_MS_DIRECT2_H_ */
diff --git a/src/lib/netlist/solver/nld_ms_direct_lu.h b/src/lib/netlist/solver/nld_ms_direct_lu.h
index c379cf8d055..e7cedc1dd29 100644
--- a/src/lib/netlist/solver/nld_ms_direct_lu.h
+++ b/src/lib/netlist/solver/nld_ms_direct_lu.h
@@ -8,11 +8,11 @@
#ifndef NLD_MS_DIRECT_H_
#define NLD_MS_DIRECT_H_
-#include <algorithm>
-
#include "solver/nld_solver.h"
#include "solver/nld_matrix_solver.h"
+#include <algorithm>
+
//#define A(r, c) m_A[_r][_c]
namespace netlist
@@ -36,9 +36,9 @@ public:
virtual void vsetup(analog_net_t::list_t &nets) override;
virtual void reset() override { matrix_solver_t::reset(); }
- inline unsigned N() const { if (m_N == 0) return m_dim; else return m_N; }
+ unsigned N() const { if (m_N == 0) return m_dim; else return m_N; }
- inline int vsolve_non_dynamic(const bool newton_raphson);
+ int vsolve_non_dynamic(const bool newton_raphson);
protected:
virtual void add_term(int net_idx, terminal_t *term) override;
@@ -139,12 +139,10 @@ protected:
nl_double compute_next_timestep();
template <typename T1, typename T2>
- inline nl_ext_double &A(const T1 r, const T2 c) { return m_A[r][c]; }
+ nl_ext_double &A(const T1 r, const T2 c) { return m_A[r][c]; }
//nl_double m_A[storage_N][((storage_N + 7) / 8) * 8];
nl_double m_RHS[storage_N];
- nl_double m_last_RHS[storage_N]; // right hand side - contains currents
- nl_double m_last_V[storage_N];
terms_for_net_t *m_rails_temp;
@@ -355,7 +353,6 @@ void matrix_solver_direct_t<m_N, storage_N>::vsetup(analog_net_t::list_t &nets)
* save states
*/
save(NLNAME(m_RHS));
- save(NLNAME(m_last_RHS));
save(NLNAME(m_last_V));
for (unsigned k = 0; k < N(); k++)
@@ -590,13 +587,10 @@ unsigned matrix_solver_direct_t<m_N, storage_N>::solve_non_dynamic(const bool ne
}
template <unsigned m_N, unsigned storage_N>
-inline int matrix_solver_direct_t<m_N, storage_N>::vsolve_non_dynamic(const bool newton_raphson)
+int matrix_solver_direct_t<m_N, storage_N>::vsolve_non_dynamic(const bool newton_raphson)
{
this->build_LE_A();
- this->build_LE_RHS(m_last_RHS);
-
- for (unsigned i=0, iN=N(); i < iN; i++)
- m_RHS[i] = m_last_RHS[i];
+ this->build_LE_RHS(m_RHS);
return this->solve_non_dynamic(newton_raphson);
}
@@ -608,11 +602,6 @@ matrix_solver_direct_t<m_N, storage_N>::matrix_solver_direct_t(const solver_para
, m_lp_fact(0)
{
m_rails_temp = palloc_array(terms_for_net_t, N());
-
- for (unsigned k = 0; k < N(); k++)
- {
- m_last_RHS[k] = 0.0;
- }
}
template <unsigned m_N, unsigned storage_N>
@@ -626,7 +615,6 @@ matrix_solver_direct_t<m_N, storage_N>::matrix_solver_direct_t(const eSolverType
for (unsigned k = 0; k < N(); k++)
{
m_terms[k] = palloc(terms_for_net_t);
- m_last_RHS[k] = 0.0;
}
}
diff --git a/src/lib/netlist/solver/nld_ms_gcr.h b/src/lib/netlist/solver/nld_ms_gcr.h
index a17ca1ec282..f3e56a342d6 100644
--- a/src/lib/netlist/solver/nld_ms_gcr.h
+++ b/src/lib/netlist/solver/nld_ms_gcr.h
@@ -10,390 +10,201 @@
#ifndef NLD_MS_GCR_H_
#define NLD_MS_GCR_H_
-#include <algorithm>
+#include "plib/mat_cr.h"
-#include "../plib/pdynlib.h"
-#include "mat_cr.h"
#include "nld_ms_direct.h"
#include "nld_solver.h"
-#include "vector_base.h"
-#include "../plib/pstream.h"
+#include "plib/pdynlib.h"
+#include "plib/pstream.h"
+#include "plib/vector_ops.h"
+
+#include <algorithm>
namespace netlist
{
- namespace devices
- {
-template <std::size_t m_N, std::size_t storage_N>
-class matrix_solver_GCR_t: public matrix_solver_t
+namespace devices
{
-public:
-
- matrix_solver_GCR_t(netlist_t &anetlist, const pstring &name,
- const solver_parameters_t *params, const std::size_t size)
- : matrix_solver_t(anetlist, name, matrix_solver_t::ASCENDING, params)
- , m_dim(size)
- , mat(size)
- , m_proc()
- {
- }
- virtual ~matrix_solver_GCR_t() override
+ template <typename FT, int SIZE>
+ class matrix_solver_GCR_t: public matrix_solver_t
{
- }
-
- constexpr std::size_t N() const { return (m_N == 0) ? m_dim : m_N; }
-
- virtual void vsetup(analog_net_t::list_t &nets) override;
- virtual unsigned vsolve_non_dynamic(const bool newton_raphson) override;
+ public:
+
+ using mat_type = plib::matrix_compressed_rows_t<FT, SIZE>;
+ // FIXME: dirty hack to make this compile
+ static constexpr const std::size_t storage_N = 100;
+
+ matrix_solver_GCR_t(netlist_state_t &anetlist, const pstring &name,
+ const solver_parameters_t *params, const std::size_t size)
+ : matrix_solver_t(anetlist, name, matrix_solver_t::PREFER_IDENTITY_TOP_LEFT, params)
+ , m_dim(size)
+ , RHS(size)
+ , new_V(size)
+ , mat(static_cast<typename mat_type::index_type>(size))
+ , m_proc()
+ {
+ }
- virtual std::pair<pstring, pstring> create_solver_code() override;
+ constexpr std::size_t N() const { return m_dim; }
-private:
+ void vsetup(analog_net_t::list_t &nets) override;
+ unsigned vsolve_non_dynamic(const bool newton_raphson) override;
- //typedef typename mat_cr_t<storage_N>::type mattype;
- typedef typename mat_cr_t<storage_N>::index_type mattype;
+ std::pair<pstring, pstring> create_solver_code() override;
- void csc_private(plib::putf8_fmt_writer &strm);
+ private:
- using extsolver = void (*)(double * RESTRICT m_A, double * RESTRICT RHS, double * RESTRICT V);
+ using mat_index_type = typename plib::matrix_compressed_rows_t<FT, SIZE>::index_type;
- pstring static_compile_name();
+ void csc_private(plib::putf8_fmt_writer &strm);
- const std::size_t m_dim;
- std::vector<unsigned> m_term_cr[storage_N];
- mat_cr_t<storage_N> mat;
+ using extsolver = void (*)(double * m_A, double * RHS, double * V);
- //extsolver m_proc;
- plib::dynproc<void, double * RESTRICT, double * RESTRICT, double * RESTRICT> m_proc;
+ pstring static_compile_name();
-};
+ const std::size_t m_dim;
+ plib::parray<FT, SIZE> RHS;
+ plib::parray<FT, SIZE> new_V;
-// ----------------------------------------------------------------------------------------
-// matrix_solver - GCR
-// ----------------------------------------------------------------------------------------
+ mat_type mat;
-template <std::size_t m_N, std::size_t storage_N>
-void matrix_solver_GCR_t<m_N, storage_N>::vsetup(analog_net_t::list_t &nets)
-{
- setup_base(nets);
+ //extsolver m_proc;
+ plib::dynproc<void, double * , double * , double * > m_proc;
- mattype nz = 0;
- const std::size_t iN = this->N();
+ };
- /* build the final matrix */
+ // ----------------------------------------------------------------------------------------
+ // matrix_solver - GCR
+ // ----------------------------------------------------------------------------------------
- bool touched[storage_N][storage_N] = { { false } };
- for (std::size_t k = 0; k < iN; k++)
+ // FIXME: namespace or static class member
+ template <typename V>
+ std::size_t inline get_level(const V &v, std::size_t k)
{
- for (auto &j : this->m_terms[k]->m_nz)
- touched[k][j] = true;
+ for (std::size_t i = 0; i < v.size(); i++)
+ if (plib::container::contains(v[i], k))
+ return i;
+ throw plib::pexception("Error in get_level");
}
- unsigned fc = 0;
+ template <typename FT, int SIZE>
+ void matrix_solver_GCR_t<FT, SIZE>::vsetup(analog_net_t::list_t &nets)
+ {
+ setup_base(nets);
- unsigned ops = 0;
+ const std::size_t iN = this->N();
- for (std::size_t k = 0; k < iN; k++)
- {
- ops++; // 1/A(k,k)
- for (std::size_t row = k + 1; row < iN; row++)
- {
- if (touched[row][k])
- {
- ops++;
- fc++;
- for (std::size_t col = k + 1; col < iN; col++)
- if (touched[k][col])
- {
- touched[row][col] = true;
- ops += 2;
- }
- }
- }
- }
+ /* build the final matrix */
+ std::vector<std::vector<unsigned>> fill(iN);
- for (mattype k=0; k<iN; k++)
- {
- mat.ia[k] = nz;
+ std::size_t raw_elements = 0;
- for (mattype j=0; j<iN; j++)
+ for (std::size_t k = 0; k < iN; k++)
{
- if (touched[k][j])
+ fill[k].resize(iN, decltype(mat)::FILL_INFINITY);
+ for (auto &j : this->m_terms[k]->m_nz)
{
- mat.ja[nz] = j;
- if (j == k)
- mat.diag[k] = nz;
- nz++;
+ fill[k][j] = 0;
+ raw_elements++;
}
- }
- m_term_cr[k].clear();
- /* build pointers into the compressed row format matrix for each terminal */
- for (std::size_t j=0; j< this->m_terms[k]->m_railstart;j++)
- {
- int other = this->m_terms[k]->connected_net_idx()[j];
- for (auto i = mat.ia[k]; i < nz; i++)
- if (other == static_cast<int>(mat.ja[i]))
- {
- m_term_cr[k].push_back(i);
- break;
- }
}
- nl_assert(m_term_cr[k].size() == this->m_terms[k]->m_railstart);
- }
- mat.ia[iN] = nz;
- mat.nz_num = nz;
+ auto gr = mat.gaussian_extend_fill_mat(fill);
- this->log().verbose("Ops: {1} Occupancy ratio: {2}\n", ops,
- static_cast<double>(nz) / static_cast<double>(iN * iN));
-
- // FIXME: Move me
-
- if (netlist().lib().isLoaded())
- {
- pstring symname = static_compile_name();
-#if 0
- m_proc = this->netlist().lib().template getsym<extsolver>(symname);
- if (m_proc != nullptr)
- this->log().verbose("External static solver {1} found ...", symname);
- else
- this->log().warning("External static solver {1} not found ...", symname);
-#else
- m_proc.load(this->netlist().lib(), symname);
- if (m_proc.resolved())
- this->log().warning("External static solver {1} found ...", symname);
- else
- this->log().warning("External static solver {1} not found ...", symname);
-#endif
- }
+ /* FIXME: move this to the cr matrix class and use computed
+ * parallel ordering once it makes sense.
+ */
-}
-#if 0
-template <std::size_t m_N, std::size_t storage_N>
-void matrix_solver_GCR_t<m_N, storage_N>::csc_private(plib::putf8_fmt_writer &strm)
-{
- const std::size_t iN = N();
- for (std::size_t i = 0; i < iN - 1; i++)
- {
- const auto &nzbd = this->m_terms[i]->m_nzbd;
+ std::vector<unsigned> levL(iN, 0);
+ std::vector<unsigned> levU(iN, 0);
- if (nzbd.size() > 0)
+ // parallel scheme for L x = y
+ for (std::size_t k = 0; k < iN; k++)
{
- std::size_t pi = mat.diag[i];
-
- //const nl_double f = 1.0 / m_A[pi++];
- strm("const double f{1} = 1.0 / m_A[{2}];\n", i, pi);
- pi++;
- const std::size_t piie = mat.ia[i+1];
-
- //for (auto & j : nzbd)
- for (std::size_t j : nzbd)
- {
- // proceed to column i
- std::size_t pj = mat.ia[j];
-
- while (mat.ja[pj] < i)
- pj++;
-
- //const nl_double f1 = - m_A[pj++] * f;
- strm("\tconst double f{1}_{2} = -f{3} * m_A[{4}];\n", i, j, i, pj);
- pj++;
-
- // subtract row i from j */
- for (std::size_t pii = pi; pii<piie; )
- {
- while (mat.ja[pj] < mat.ja[pii])
- pj++;
- //m_A[pj++] += m_A[pii++] * f1;
- strm("\tm_A[{1}] += m_A[{2}] * f{3}_{4};\n", pj, pii, i, j);
- pj++; pii++;
- }
- //RHS[j] += f1 * RHS[i];
- strm("\tRHS[{1}] += f{2}_{3} * RHS[{4}];\n", j, i, j, i);
- }
+ unsigned lm=0;
+ for (std::size_t j = 0; j<k; j++)
+ if (fill[k][j] < decltype(mat)::FILL_INFINITY)
+ lm = std::max(lm, levL[j]);
+ levL[k] = 1+lm;
}
- }
- //new_V[iN - 1] = RHS[iN - 1] / mat.A[mat.diag[iN - 1]];
- strm("\tV[{1}] = RHS[{2}] / m_A[{3}];\n", iN - 1, iN - 1, mat.diag[iN - 1]);
- for (std::size_t j = iN - 1; j-- > 0;)
- {
- strm("\tdouble tmp{1} = 0.0;\n", j);
- const std::size_t e = mat.ia[j+1];
- for (std::size_t pk = mat.diag[j] + 1; pk < e; pk++)
+ // parallel scheme for U x = y
+ for (std::size_t k = iN; k-- > 0; )
{
- strm("\ttmp{1} += m_A[{2}] * V[{3}];\n", j, pk, mat.ja[pk]);
+ unsigned lm=0;
+ for (std::size_t j = iN; --j > k; )
+ if (fill[k][j] < decltype(mat)::FILL_INFINITY)
+ lm = std::max(lm, levU[j]);
+ levU[k] = 1+lm;
}
- strm("\tV[{1}] = (RHS[{1}] - tmp{1}) / m_A[{4}];\n", j, j, j, mat.diag[j]);
- }
-}
-#else
-template <std::size_t m_N, std::size_t storage_N>
-void matrix_solver_GCR_t<m_N, storage_N>::csc_private(plib::putf8_fmt_writer &strm)
-{
- const std::size_t iN = N();
- for (std::size_t i = 0; i < mat.nz_num; i++)
- strm("double m_A{1} = m_A[{2}];\n", i, i);
- for (std::size_t i = 0; i < iN - 1; i++)
- {
- const auto &nzbd = this->m_terms[i]->m_nzbd;
-
- if (nzbd.size() > 0)
+ for (std::size_t k = 0; k < iN; k++)
{
- std::size_t pi = mat.diag[i];
-
- //const nl_double f = 1.0 / m_A[pi++];
- strm("const double f{1} = 1.0 / m_A{2};\n", i, pi);
- pi++;
- const std::size_t piie = mat.ia[i+1];
-
- //for (auto & j : nzbd)
- for (std::size_t j : nzbd)
+ unsigned fm = 0;
+ pstring ml = "";
+ for (std::size_t j = 0; j < iN; j++)
{
- // proceed to column i
- std::size_t pj = mat.ia[j];
-
- while (mat.ja[pj] < i)
- pj++;
-
- //const nl_double f1 = - m_A[pj++] * f;
- strm("\tconst double f{1}_{2} = -f{3} * m_A{4};\n", i, j, i, pj);
- pj++;
-
- // subtract row i from j */
- for (std::size_t pii = pi; pii<piie; )
- {
- while (mat.ja[pj] < mat.ja[pii])
- pj++;
- //m_A[pj++] += m_A[pii++] * f1;
- strm("\tm_A{1} += m_A{2} * f{3}_{4};\n", pj, pii, i, j);
- pj++; pii++;
- }
- //RHS[j] += f1 * RHS[i];
- strm("\tRHS[{1}] += f{2}_{3} * RHS[{4}];\n", j, i, j, i);
+ ml += fill[k][j] == 0 ? "X" : fill[k][j] < decltype(mat)::FILL_INFINITY ? "+" : ".";
+ if (fill[k][j] < decltype(mat)::FILL_INFINITY)
+ if (fill[k][j] > fm)
+ fm = fill[k][j];
}
+ this->log().verbose("{1:4} {2} {3:4} {4:4} {5:4} {6:4}", k, ml, levL[k], levU[k], get_level(mat.m_ge_par, k), fm);
}
- }
-
- //new_V[iN - 1] = RHS[iN - 1] / mat.A[mat.diag[iN - 1]];
- strm("\tV[{1}] = RHS[{2}] / m_A{3};\n", iN - 1, iN - 1, mat.diag[iN - 1]);
- for (std::size_t j = iN - 1; j-- > 0;)
- {
- strm("\tdouble tmp{1} = 0.0;\n", j);
- const std::size_t e = mat.ia[j+1];
- for (std::size_t pk = mat.diag[j] + 1; pk < e; pk++)
- {
- strm("\ttmp{1} += m_A{2} * V[{3}];\n", j, pk, mat.ja[pk]);
- }
- strm("\tV[{1}] = (RHS[{1}] - tmp{1}) / m_A{4};\n", j, j, j, mat.diag[j]);
- }
-}
-#endif
-
-template <std::size_t m_N, std::size_t storage_N>
-pstring matrix_solver_GCR_t<m_N, storage_N>::static_compile_name()
-{
- plib::postringstream t;
- plib::putf8_fmt_writer w(t);
- csc_private(w);
- std::hash<pstring> h;
-
- return plib::pfmt("nl_gcr_{1:x}_{2}")(h( t.str() ))(mat.nz_num);
-}
-
-template <std::size_t m_N, std::size_t storage_N>
-std::pair<pstring, pstring> matrix_solver_GCR_t<m_N, storage_N>::create_solver_code()
-{
- plib::postringstream t;
- plib::putf8_fmt_writer strm(t);
- pstring name = static_compile_name();
-
- strm.writeline(plib::pfmt("extern \"C\" void {1}(double * __restrict m_A, double * __restrict RHS, double * __restrict V)\n")(name));
- strm.writeline("{\n");
- csc_private(strm);
- strm.writeline("}\n");
- return std::pair<pstring, pstring>(name, t.str());
-}
-template <std::size_t m_N, std::size_t storage_N>
-unsigned matrix_solver_GCR_t<m_N, storage_N>::vsolve_non_dynamic(const bool newton_raphson)
-{
- const std::size_t iN = this->N();
-
- nl_double RHS[storage_N];
- nl_double new_V[storage_N];
+ mat.build_from_fill_mat(fill);
- mat.set_scalar(0.0);
-
- for (std::size_t k = 0; k < iN; k++)
- {
- terms_for_net_t *t = this->m_terms[k].get();
- nl_double gtot_t = 0.0;
- nl_double RHS_t = 0.0;
-
- const std::size_t term_count = t->count();
- const std::size_t railstart = t->m_railstart;
- const nl_double * const RESTRICT gt = t->gt();
- const nl_double * const RESTRICT go = t->go();
- const nl_double * const RESTRICT Idr = t->Idr();
- const nl_double * const * RESTRICT other_cur_analog = t->connected_net_V();
- const unsigned * const RESTRICT tcr = m_term_cr[k].data();
-
-#if 0
- for (std::size_t i = 0; i < term_count; i++)
+ for (mat_index_type k=0; k<iN; k++)
{
- gtot_t += gt[i];
- RHS_t += Idr[i];
+ std::size_t cnt(0);
+ /* build pointers into the compressed row format matrix for each terminal */
+ for (std::size_t j=0; j< this->m_terms[k]->m_railstart;j++)
+ {
+ int other = this->m_terms[k]->m_connected_net_idx[j];
+ for (auto i = mat.row_idx[k]; i < mat.row_idx[k+1]; i++)
+ if (other == static_cast<int>(mat.col_idx[i]))
+ {
+ m_mat_ptr[k][j] = &mat.A[i];
+ cnt++;
+ break;
+ }
+ }
+ nl_assert(cnt == this->m_terms[k]->m_railstart);
+ m_mat_ptr[k][this->m_terms[k]->m_railstart] = &mat.A[mat.diag[k]];
}
- for (std::size_t i = railstart; i < term_count; i++)
- RHS_t += go[i] * *other_cur_analog[i];
+ this->log().verbose("maximum fill: {1}", gr.first);
+ this->log().verbose("Post elimination occupancy ratio: {2} Ops: {1}", gr.second,
+ static_cast<double>(mat.nz_num) / static_cast<double>(iN * iN));
+ this->log().verbose(" Pre elimination occupancy ratio: {2}",
+ static_cast<double>(raw_elements) / static_cast<double>(iN * iN));
- RHS[k] = RHS_t;
+ // FIXME: Move me
- // add diagonal element
- mat.A[mat.diag[k]] = gtot_t;
-
- for (std::size_t i = 0; i < railstart; i++)
- mat.A[tcr[i]] -= go[i];
- }
-#else
- for (std::size_t i = 0; i < railstart; i++)
- mat.A[tcr[i]] -= go[i];
-
- for (std::size_t i = 0; i < railstart; i++)
+ if (state().lib().isLoaded())
{
- gtot_t += gt[i];
- RHS_t += Idr[i];
+ pstring symname = static_compile_name();
+ m_proc.load(this->state().lib(), symname);
+ if (m_proc.resolved())
+ this->log().warning("External static solver {1} found ...", symname);
+ else
+ this->log().warning("External static solver {1} not found ...", symname);
}
- for (std::size_t i = railstart; i < term_count; i++)
- {
- RHS_t += (Idr[i] + go[i] * *other_cur_analog[i]);
- gtot_t += gt[i];
- }
-
- RHS[k] = RHS_t;
- mat.A[mat.diag[k]] += gtot_t;
}
-#endif
- mat.ia[iN] = static_cast<mattype>(mat.nz_num);
-
- /* now solve it */
- //if (m_proc != nullptr)
- if (m_proc.resolved())
- {
- //static_solver(m_A, RHS);
- m_proc(&mat.A[0], &RHS[0], &new_V[0]);
- }
- else
+ template <typename FT, int SIZE>
+ void matrix_solver_GCR_t<FT, SIZE>::csc_private(plib::putf8_fmt_writer &strm)
{
+ const std::size_t iN = N();
+
+ for (std::size_t i = 0; i < mat.nz_num; i++)
+ strm("double m_A{1} = m_A[{2}];\n", i, i);
+
for (std::size_t i = 0; i < iN - 1; i++)
{
const auto &nzbd = this->m_terms[i]->m_nzbd;
@@ -401,61 +212,114 @@ unsigned matrix_solver_GCR_t<m_N, storage_N>::vsolve_non_dynamic(const bool newt
if (nzbd.size() > 0)
{
std::size_t pi = mat.diag[i];
- const nl_double f = 1.0 / mat.A[pi++];
- const std::size_t piie = mat.ia[i+1];
- for (std::size_t j : nzbd) // for (std::size_t j = i + 1; j < iN; j++)
+ //const FT f = 1.0 / m_A[pi++];
+ strm("const double f{1} = 1.0 / m_A{2};\n", i, pi);
+ pi++;
+ const std::size_t piie = mat.row_idx[i+1];
+
+ //for (auto & j : nzbd)
+ for (std::size_t j : nzbd)
{
// proceed to column i
- //__builtin_prefetch(&m_A[mat.diag[j+1]], 1);
- std::size_t pj = mat.ia[j];
+ std::size_t pj = mat.row_idx[j];
- while (mat.ja[pj] < i)
+ while (mat.col_idx[pj] < i)
pj++;
- const nl_double f1 = - mat.A[pj++] * f;
+ //const FT f1 = - m_A[pj++] * f;
+ strm("\tconst double f{1}_{2} = -f{3} * m_A{4};\n", i, j, i, pj);
+ pj++;
// subtract row i from j */
for (std::size_t pii = pi; pii<piie; )
{
- while (mat.ja[pj] < mat.ja[pii])
+ while (mat.col_idx[pj] < mat.col_idx[pii])
pj++;
- mat.A[pj++] += mat.A[pii++] * f1;
+ //m_A[pj++] += m_A[pii++] * f1;
+ strm("\tm_A{1} += m_A{2} * f{3}_{4};\n", pj, pii, i, j);
+ pj++; pii++;
}
- RHS[j] += f1 * RHS[i];
+ //RHS[j] += f1 * RHS[i];
+ strm("\tRHS[{1}] += f{2}_{3} * RHS[{4}];\n", j, i, j, i);
}
}
}
- /* backward substitution
- *
- */
-
- /* row n-1 */
- new_V[iN - 1] = RHS[iN - 1] / mat.A[mat.diag[iN - 1]];
+ //new_V[iN - 1] = RHS[iN - 1] / mat.A[mat.diag[iN - 1]];
+ strm("\tV[{1}] = RHS[{2}] / m_A{3};\n", iN - 1, iN - 1, mat.diag[iN - 1]);
for (std::size_t j = iN - 1; j-- > 0;)
{
- //__builtin_prefetch(&new_V[j-1], 1);
- //if (j>0)__builtin_prefetch(&m_A[mat.diag[j-1]], 0);
- double tmp = 0;
- auto jdiag = mat.diag[j];
- const std::size_t e = mat.ia[j+1];
- for (std::size_t pk = jdiag + 1; pk < e; pk++)
+ strm("\tdouble tmp{1} = 0.0;\n", j);
+ const std::size_t e = mat.row_idx[j+1];
+ for (std::size_t pk = mat.diag[j] + 1; pk < e; pk++)
{
- tmp += mat.A[pk] * new_V[mat.ja[pk]];
+ strm("\ttmp{1} += m_A{2} * V[{3}];\n", j, pk, mat.col_idx[pk]);
}
- new_V[j] = (RHS[j] - tmp) / mat.A[jdiag];
+ strm("\tV[{1}] = (RHS[{1}] - tmp{1}) / m_A{4};\n", j, j, j, mat.diag[j]);
}
}
- this->m_stat_calculations++;
+ template <typename FT, int SIZE>
+ pstring matrix_solver_GCR_t<FT, SIZE>::static_compile_name()
+ {
+ plib::postringstream t;
+ plib::putf8_fmt_writer w(&t);
+ csc_private(w);
+ std::hash<pstring> h;
+
+ return plib::pfmt("nl_gcr_{1:x}_{2}")(h( t.str() ))(mat.nz_num);
+ }
- const nl_double err = (newton_raphson ? delta(new_V) : 0.0);
- store(new_V);
- return (err > this->m_params.m_accuracy) ? 2 : 1;
-}
+ template <typename FT, int SIZE>
+ std::pair<pstring, pstring> matrix_solver_GCR_t<FT, SIZE>::create_solver_code()
+ {
+ plib::postringstream t;
+ plib::putf8_fmt_writer strm(&t);
+ pstring name = static_compile_name();
+
+ strm.writeline(plib::pfmt("extern \"C\" void {1}(double * __restrict m_A, double * __restrict RHS, double * __restrict V)\n")(name));
+ strm.writeline("{\n");
+ csc_private(strm);
+ strm.writeline("}\n");
+ return std::pair<pstring, pstring>(name, t.str());
+ }
+
+ template <typename FT, int SIZE>
+ unsigned matrix_solver_GCR_t<FT, SIZE>::vsolve_non_dynamic(const bool newton_raphson)
+ {
+ const std::size_t iN = this->N();
+
+ mat.set_scalar(0.0);
+
+ /* populate matrix */
+
+ this->fill_matrix(iN, m_mat_ptr, RHS);
+
+ /* now solve it */
+
+ //if (m_proc != nullptr)
+ if (m_proc.resolved())
+ {
+ //static_solver(m_A, RHS);
+ m_proc(&mat.A[0], &RHS[0], &new_V[0]);
+ }
+ else
+ {
+ // mat.gaussian_elimination_parallel(RHS);
+ mat.gaussian_elimination(RHS);
+ /* backward substitution */
+ mat.gaussian_back_substitution(new_V, RHS);
+ }
+
+ this->m_stat_calculations++;
+
+ const FT err = (newton_raphson ? delta(new_V) : 0.0);
+ store(new_V);
+ return (err > this->m_params.m_accuracy) ? 2 : 1;
+ }
- } //namespace devices
+} // namespace devices
} // namespace netlist
#endif /* NLD_MS_GCR_H_ */
diff --git a/src/lib/netlist/solver/nld_ms_gmres.h b/src/lib/netlist/solver/nld_ms_gmres.h
index 2e4e447d14f..2ff515ebda7 100644
--- a/src/lib/netlist/solver/nld_ms_gmres.h
+++ b/src/lib/netlist/solver/nld_ms_gmres.h
@@ -1,387 +1,145 @@
// license:GPL-2.0+
// copyright-holders:Couriersud
/*
- * nld_ms_sor.h
- *
- * Generic successive over relaxation solver.
- *
- * Fow w==1 we will do the classic Gauss-Seidel approach
+ * nld_ms_gmres.h
*
*/
#ifndef NLD_MS_GMRES_H_
#define NLD_MS_GMRES_H_
-#include <algorithm>
-
-#include "mat_cr.h"
#include "nld_ms_direct.h"
#include "nld_solver.h"
-#include "vector_base.h"
+#include "plib/gmres.h"
+#include "plib/mat_cr.h"
+#include "plib/parray.h"
+#include "plib/vector_ops.h"
+
+#include <algorithm>
+#include <cmath>
+
namespace netlist
{
- namespace devices
- {
-template <std::size_t m_N, std::size_t storage_N>
-class matrix_solver_GMRES_t: public matrix_solver_direct_t<m_N, storage_N>
+namespace devices
{
-public:
-
- matrix_solver_GMRES_t(netlist_t &anetlist, const pstring &name, const solver_parameters_t *params, const std::size_t size)
- : matrix_solver_direct_t<m_N, storage_N>(anetlist, name, matrix_solver_t::ASCENDING, params, size)
- , m_use_iLU_preconditioning(true)
- , m_use_more_precise_stop_condition(false)
- , m_accuracy_mult(1.0)
- , mat(size)
- {
- }
- virtual ~matrix_solver_GMRES_t() override
+ template <typename FT, int SIZE>
+ class matrix_solver_GMRES_t: public matrix_solver_direct_t<FT, SIZE>
{
- }
-
- virtual void vsetup(analog_net_t::list_t &nets) override;
- virtual unsigned vsolve_non_dynamic(const bool newton_raphson) override;
+ public:
-private:
+ using float_type = FT;
- //typedef typename mat_cr_t<storage_N>::type mattype;
- typedef typename mat_cr_t<storage_N>::index_type mattype;
-
- unsigned solve_ilu_gmres(nl_double (& RESTRICT x)[storage_N], const nl_double (& RESTRICT rhs)[storage_N], const unsigned restart_max, std::size_t mr, nl_double accuracy);
-
- std::vector<unsigned> m_term_cr[storage_N];
-
- bool m_use_iLU_preconditioning;
- bool m_use_more_precise_stop_condition;
- nl_double m_accuracy_mult; // FXIME: Save state
-
- mat_cr_t<storage_N> mat;
-
- nl_double m_LU[storage_N * storage_N];
+ /* Sort rows in ascending order. This should minimize fill-in and thus
+ * maximize the efficiency of the incomplete LUT.
+ * This is already preconditioning.
+ */
+ matrix_solver_GMRES_t(netlist_state_t &anetlist, const pstring &name, const solver_parameters_t *params, const std::size_t size)
+ : matrix_solver_direct_t<FT, SIZE>(anetlist, name, matrix_solver_t::PREFER_BAND_MATRIX, params, size)
+ //, m_ops(size, 2)
+ , m_ops(size, 4)
+ , m_gmres(size)
+ {
+ }
- nl_double m_c[storage_N + 1]; /* mr + 1 */
- nl_double m_g[storage_N + 1]; /* mr + 1 */
- nl_double m_ht[storage_N + 1][storage_N]; /* (mr + 1), mr */
- nl_double m_s[storage_N + 1]; /* mr + 1 */
- nl_double m_v[storage_N + 1][storage_N]; /*(mr + 1), n */
- nl_double m_y[storage_N + 1]; /* mr + 1 */
+ void vsetup(analog_net_t::list_t &nets) override;
+ unsigned vsolve_non_dynamic(const bool newton_raphson) override;
-};
+ private:
-// ----------------------------------------------------------------------------------------
-// matrix_solver - GMRES
-// ----------------------------------------------------------------------------------------
+ using mattype = typename plib::matrix_compressed_rows_t<FT, SIZE>::index_type;
-template <std::size_t m_N, std::size_t storage_N>
-void matrix_solver_GMRES_t<m_N, storage_N>::vsetup(analog_net_t::list_t &nets)
-{
- matrix_solver_direct_t<m_N, storage_N>::vsetup(nets);
+ plib::mat_precondition_ILU<FT, SIZE> m_ops;
+ plib::gmres_t<FT, SIZE> m_gmres;
+ };
- mattype nz = 0;
- const std::size_t iN = this->N();
+ // ----------------------------------------------------------------------------------------
+ // matrix_solver - GMRES
+ // ----------------------------------------------------------------------------------------
- for (std::size_t k=0; k<iN; k++)
+ template <typename FT, int SIZE>
+ void matrix_solver_GMRES_t<FT, SIZE>::vsetup(analog_net_t::list_t &nets)
{
- terms_for_net_t * RESTRICT row = this->m_terms[k].get();
- mat.ia[k] = nz;
+ matrix_solver_direct_t<FT, SIZE>::vsetup(nets);
- for (std::size_t j=0; j<row->m_nz.size(); j++)
- {
- mat.ja[nz] = static_cast<mattype>(row->m_nz[j]);
- if (row->m_nz[j] == k)
- mat.diag[k] = nz;
- nz++;
- }
+ const std::size_t iN = this->size();
- /* build pointers into the compressed row format matrix for each terminal */
+ std::vector<std::vector<unsigned>> fill(iN);
- for (unsigned j=0; j< this->m_terms[k]->m_railstart;j++)
+ for (std::size_t k=0; k<iN; k++)
{
- for (unsigned i = mat.ia[k]; i<nz; i++)
- if (this->m_terms[k]->connected_net_idx()[j] == static_cast<int>(mat.ja[i]))
- {
- m_term_cr[k].push_back(i);
- break;
- }
- nl_assert(m_term_cr[k].size() == this->m_terms[k]->m_railstart);
- }
- }
-
- mat.ia[iN] = nz;
- mat.nz_num = nz;
-}
-
-template <std::size_t m_N, std::size_t storage_N>
-unsigned matrix_solver_GMRES_t<m_N, storage_N>::vsolve_non_dynamic(const bool newton_raphson)
-{
- const std::size_t iN = this->N();
-
- /* ideally, we could get an estimate for the spectral radius of
- * Inv(D - L) * U
- *
- * and estimate using
- *
- * omega = 2.0 / (1.0 + std::sqrt(1-rho))
- */
-
- //nz_num = 0;
- nl_double RHS[storage_N];
- nl_double new_V[storage_N];
-
- mat.set_scalar(0.0);
-
- for (std::size_t k = 0; k < iN; k++)
- {
- nl_double gtot_t = 0.0;
- nl_double RHS_t = 0.0;
-
- const std::size_t term_count = this->m_terms[k]->count();
- const std::size_t railstart = this->m_terms[k]->m_railstart;
- const nl_double * const RESTRICT gt = this->m_terms[k]->gt();
- const nl_double * const RESTRICT go = this->m_terms[k]->go();
- const nl_double * const RESTRICT Idr = this->m_terms[k]->Idr();
- const nl_double * const * RESTRICT other_cur_analog = this->m_terms[k]->connected_net_V();
-
- for (std::size_t i = 0; i < term_count; i++)
- {
- gtot_t = gtot_t + gt[i];
- RHS_t = RHS_t + Idr[i];
+ fill[k].resize(iN, decltype(m_ops.m_mat)::FILL_INFINITY);
+ terms_for_net_t * row = this->m_terms[k].get();
+ for (const auto &nz_j : row->m_nz)
+ {
+ fill[k][static_cast<mattype>(nz_j)] = 0;
+ }
}
- for (std::size_t i = railstart; i < term_count; i++)
- RHS_t = RHS_t + go[i] * *other_cur_analog[i];
+ m_ops.build(fill);
- RHS[k] = RHS_t;
-
- // add diagonal element
- mat.A[mat.diag[k]] = gtot_t;
+ /* build pointers into the compressed row format matrix for each terminal */
- for (std::size_t i = 0; i < railstart; i++)
+ for (std::size_t k=0; k<iN; k++)
{
- const std::size_t pi = m_term_cr[k][i];
- mat.A[pi] -= go[i];
+ std::size_t cnt = 0;
+ for (std::size_t j=0; j< this->m_terms[k]->m_railstart;j++)
+ {
+ for (std::size_t i = m_ops.m_mat.row_idx[k]; i<m_ops.m_mat.row_idx[k+1]; i++)
+ if (this->m_terms[k]->m_connected_net_idx[j] == static_cast<int>(m_ops.m_mat.col_idx[i]))
+ {
+ this->m_mat_ptr[k][j] = &m_ops.m_mat.A[i];
+ cnt++;
+ break;
+ }
+ }
+ nl_assert(cnt == this->m_terms[k]->m_railstart);
+ this->m_mat_ptr[k][this->m_terms[k]->m_railstart] = &m_ops.m_mat.A[m_ops.m_mat.diag[k]];
}
-
- new_V[k] = this->m_nets[k]->Q_Analog();
-
}
- mat.ia[iN] = static_cast<mattype>(mat.nz_num);
-
- const nl_double accuracy = this->m_params.m_accuracy;
-
- unsigned mr = iN;
- if (iN > 3 )
- mr = static_cast<unsigned>(std::sqrt(iN) * 2.0);
- unsigned iter = std::max(1u, this->m_params.m_gs_loops);
- unsigned gsl = solve_ilu_gmres(new_V, RHS, iter, mr, accuracy);
- unsigned failed = mr * iter;
- this->m_iterative_total += gsl;
- this->m_stat_calculations++;
-
- if (gsl>=failed)
- {
- this->m_iterative_fail++;
- return matrix_solver_direct_t<m_N, storage_N>::vsolve_non_dynamic(newton_raphson);
- }
-
- const nl_double err = (newton_raphson ? this->delta(new_V) : 0.0);
- this->store(new_V);
- return (err > this->m_params.m_accuracy) ? 2 : 1;
-}
-
-template <typename T>
-inline static void givens_mult( const T c, const T s, T & g0, T & g1 )
-{
- const T tg0 = c * g0 - s * g1;
- const T tg1 = s * g0 + c * g1;
-
- g0 = tg0;
- g1 = tg1;
-}
-
-template <std::size_t m_N, std::size_t storage_N>
-unsigned matrix_solver_GMRES_t<m_N, storage_N>::solve_ilu_gmres (nl_double (& RESTRICT x)[storage_N], const nl_double (& RESTRICT rhs)[storage_N], const unsigned restart_max, std::size_t mr, nl_double accuracy)
-{
- /*-------------------------------------------------------------------------
- * The code below was inspired by code published by John Burkardt under
- * the LPGL here:
- *
- * http://people.sc.fsu.edu/~jburkardt/cpp_src/mgmres/mgmres.html
- *
- * The code below was completely written from scratch based on the pseudo code
- * found here:
- *
- * http://de.wikipedia.org/wiki/GMRES-Verfahren
- *
- * The Algorithm itself is described in
- *
- * Yousef Saad,
- * Iterative Methods for Sparse Linear Systems,
- * Second Edition,
- * SIAM, 20003,
- * ISBN: 0898715342,
- * LC: QA188.S17.
- *
- *------------------------------------------------------------------------*/
-
- unsigned itr_used = 0;
- double rho_delta = 0.0;
-
- const std::size_t n = this->N();
-
- if (mr > n) mr = n;
-
- if (m_use_iLU_preconditioning)
- mat.incomplete_LU_factorization(m_LU);
-
- if (m_use_more_precise_stop_condition)
+ template <typename FT, int SIZE>
+ unsigned matrix_solver_GMRES_t<FT, SIZE>::vsolve_non_dynamic(const bool newton_raphson)
{
- /* derive residual for a given delta x
- *
- * LU y = A dx
- *
- * ==> rho / accuracy = sqrt(y * y)
- *
- * This approach will approximate the iterative stop condition
- * based |xnew - xold| pretty precisely. But it is slow, or expressed
- * differently: The invest doesn't pay off.
- * Therefore we use the approach in the else part.
- */
- nl_double t[storage_N];
- nl_double Ax[storage_N];
- vec_set(n, accuracy, t);
- mat.mult_vec(t, Ax);
-
- mat.solveLUx(m_LU, Ax);
-
- const nl_double rho_to_accuracy = std::sqrt(vec_mult2(n, Ax)) / accuracy;
-
- rho_delta = accuracy * rho_to_accuracy;
- }
- else
- rho_delta = accuracy * std::sqrt(n) * m_accuracy_mult;
-
- for (unsigned itr = 0; itr < restart_max; itr++)
- {
- std::size_t last_k = mr;
- nl_double rho;
-
- nl_double Ax[storage_N];
- nl_double residual[storage_N];
-
- mat.mult_vec(x, Ax);
-
- vec_sub(n, rhs, Ax, residual);
-
- if (m_use_iLU_preconditioning)
- {
- mat.solveLUx(m_LU, residual);
- }
-
- rho = std::sqrt(vec_mult2(n, residual));
+ const std::size_t iN = this->size();
- if (rho < rho_delta)
- return itr_used + 1;
+ plib::parray<FT, SIZE> RHS(iN);
+ //float_type new_V[storage_N];
- vec_set(mr+1, NL_FCONST(0.0), m_g);
- m_g[0] = rho;
+ m_ops.m_mat.set_scalar(0.0);
- for (std::size_t i = 0; i < mr; i++)
- vec_set(mr + 1, NL_FCONST(0.0), m_ht[i]);
+ /* populate matrix and V for first estimate */
+ this->fill_matrix(iN, this->m_mat_ptr, RHS);
- vec_mult_scalar(n, residual, NL_FCONST(1.0) / rho, m_v[0]);
-
- for (std::size_t k = 0; k < mr; k++)
+ for (std::size_t k = 0; k < iN; k++)
{
- const std::size_t k1 = k + 1;
-
- mat.mult_vec(m_v[k], m_v[k1]);
-
- if (m_use_iLU_preconditioning)
- mat.solveLUx(m_LU, m_v[k1]);
-
- for (std::size_t j = 0; j <= k; j++)
- {
- m_ht[j][k] = vec_mult(n, m_v[k1], m_v[j]);
- vec_add_mult_scalar(n, m_v[j], -m_ht[j][k], m_v[k1]);
- }
- m_ht[k1][k] = std::sqrt(vec_mult2(n, m_v[k1]));
-
- if (m_ht[k1][k] != 0.0)
- vec_scale(n, m_v[k1], NL_FCONST(1.0) / m_ht[k1][k]);
-
- for (std::size_t j = 0; j < k; j++)
- givens_mult(m_c[j], m_s[j], m_ht[j][k], m_ht[j+1][k]);
-
- const nl_double mu = 1.0 / std::hypot(m_ht[k][k], m_ht[k1][k]);
-
- m_c[k] = m_ht[k][k] * mu;
- m_s[k] = -m_ht[k1][k] * mu;
- m_ht[k][k] = m_c[k] * m_ht[k][k] - m_s[k] * m_ht[k1][k];
- m_ht[k1][k] = 0.0;
-
- givens_mult(m_c[k], m_s[k], m_g[k], m_g[k1]);
-
- rho = std::abs(m_g[k1]);
-
- itr_used = itr_used + 1;
-
- if (rho <= rho_delta)
- {
- last_k = k;
- break;
- }
+ this->m_new_V[k] = this->m_nets[k]->Q_Analog();
}
- if (last_k >= mr)
- /* didn't converge within accuracy */
- last_k = mr - 1;
+ const float_type accuracy = this->m_params.m_accuracy;
- /* Solve the system H * y = g */
- /* x += m_v[j] * m_y[j] */
- for (std::size_t i = last_k + 1; i-- > 0;)
- {
- double tmp = m_g[i];
- for (std::size_t j = i + 1; j <= last_k; j++)
- {
- tmp -= m_ht[i][j] * m_y[j];
- }
- m_y[i] = tmp / m_ht[i][i];
- }
-
- for (std::size_t i = 0; i <= last_k; i++)
- vec_add_mult_scalar(n, m_v[i], m_y[i], x);
+ auto iter = std::max(plib::constants<std::size_t>::one(), this->m_params.m_gs_loops);
+ auto gsl = m_gmres.solve(m_ops, this->m_new_V, RHS, iter, accuracy);
-#if 1
- if (rho <= rho_delta)
- {
- break;
- }
-#else
- /* we try to approximate the x difference between to steps using m_v[last_k] */
+ this->m_iterative_total += gsl;
+ this->m_stat_calculations++;
- double xdelta = m_y[last_k] * vec_maxabs(n, m_v[last_k]);
- if (xdelta < accuracy)
+ if (gsl > iter)
{
- if (m_accuracy_mult < 16384.0)
- m_accuracy_mult = m_accuracy_mult * 2.0;
- break;
+ this->m_iterative_fail++;
+ return matrix_solver_direct_t<FT, SIZE>::vsolve_non_dynamic(newton_raphson);
}
- else
- m_accuracy_mult = m_accuracy_mult / 2.0;
-#endif
+ const float_type err = (newton_raphson ? this->delta(this->m_new_V) : 0.0);
+ this->store(this->m_new_V);
+ return (err > this->m_params.m_accuracy) ? 2 : 1;
}
- return itr_used;
-}
- } //namespace devices
+} // namespace devices
} // namespace netlist
#endif /* NLD_MS_GMRES_H_ */
diff --git a/src/lib/netlist/solver/nld_ms_sm.h b/src/lib/netlist/solver/nld_ms_sm.h
index ed95cd2dc03..d85ab0044f4 100644
--- a/src/lib/netlist/solver/nld_ms_sm.h
+++ b/src/lib/netlist/solver/nld_ms_sm.h
@@ -33,293 +33,275 @@
#ifndef NLD_MS_SM_H_
#define NLD_MS_SM_H_
-#include <algorithm>
-
-#include "nld_solver.h"
#include "nld_matrix_solver.h"
-#include "vector_base.h"
+#include "nld_solver.h"
+#include "plib/vector_ops.h"
+
+#include <algorithm>
namespace netlist
{
- namespace devices
- {
-//#define nl_ext_double _float128 // slow, very slow
-//#define nl_ext_double long double // slightly slower
-#define nl_ext_double nl_double
-
-template <std::size_t m_N, std::size_t storage_N>
-class matrix_solver_sm_t: public matrix_solver_t
+namespace devices
{
- friend class matrix_solver_t;
-
-public:
- matrix_solver_sm_t(netlist_t &anetlist, const pstring &name,
- const solver_parameters_t *params, const std::size_t size);
-
- virtual ~matrix_solver_sm_t() override;
-
- virtual void vsetup(analog_net_t::list_t &nets) override;
- virtual void reset() override { matrix_solver_t::reset(); }
-
-protected:
- virtual unsigned vsolve_non_dynamic(const bool newton_raphson) override;
- unsigned solve_non_dynamic(const bool newton_raphson);
-
- constexpr std::size_t N() const { return (m_N == 0) ? m_dim : m_N; }
-
- void LE_invert();
-
- template <typename T>
- void LE_compute_x(T * RESTRICT x);
+ template <typename FT, int SIZE>
+ class matrix_solver_sm_t: public matrix_solver_t
+ {
+ friend class matrix_solver_t;
+ public:
- template <typename T1, typename T2>
- nl_ext_double &A(const T1 &r, const T2 &c) { return m_A[r][c]; }
- template <typename T1, typename T2>
- nl_ext_double &W(const T1 &r, const T2 &c) { return m_W[r][c]; }
- template <typename T1, typename T2>
- nl_ext_double &Ainv(const T1 &r, const T2 &c) { return m_Ainv[r][c]; }
- template <typename T1>
- nl_ext_double &RHS(const T1 &r) { return m_RHS[r]; }
+ using float_ext_type = FT;
+ using float_type = FT;
+ // FIXME: dirty hack to make this compile
+ static constexpr const std::size_t storage_N = 100;
+ matrix_solver_sm_t(netlist_state_t &anetlist, const pstring &name,
+ const solver_parameters_t *params, const std::size_t size);
- template <typename T1, typename T2>
- nl_ext_double &lA(const T1 &r, const T2 &c) { return m_lA[r][c]; }
- template <typename T1, typename T2>
- nl_ext_double &lAinv(const T1 &r, const T2 &c) { return m_lAinv[r][c]; }
+ void vsetup(analog_net_t::list_t &nets) override;
+ void reset() override { matrix_solver_t::reset(); }
- nl_double m_last_RHS[storage_N]; // right hand side - contains currents
+ protected:
+ unsigned vsolve_non_dynamic(const bool newton_raphson) override;
+ unsigned solve_non_dynamic(const bool newton_raphson);
-private:
- static constexpr std::size_t m_pitch = ((( storage_N) + 7) / 8) * 8;
- nl_ext_double m_A[storage_N][m_pitch];
- nl_ext_double m_Ainv[storage_N][m_pitch];
- nl_ext_double m_W[storage_N][m_pitch];
- nl_ext_double m_RHS[storage_N]; // right hand side - contains currents
+ constexpr std::size_t size() const { return m_dim; }
- nl_ext_double m_lA[storage_N][m_pitch];
- nl_ext_double m_lAinv[storage_N][m_pitch];
+ void LE_invert();
- //nl_ext_double m_RHSx[storage_N];
+ template <typename T>
+ void LE_compute_x(T * x);
- const std::size_t m_dim;
- std::size_t m_cnt;
-};
+ template <typename T1, typename T2>
+ float_ext_type &A(const T1 &r, const T2 &c) { return m_A[r][c]; }
+ template <typename T1, typename T2>
+ float_ext_type &W(const T1 &r, const T2 &c) { return m_W[r][c]; }
+ template <typename T1, typename T2>
+ float_ext_type &Ainv(const T1 &r, const T2 &c) { return m_Ainv[r][c]; }
+ template <typename T1>
+ float_ext_type &RHS(const T1 &r) { return m_RHS[r]; }
-// ----------------------------------------------------------------------------------------
-// matrix_solver_direct
-// ----------------------------------------------------------------------------------------
-template <std::size_t m_N, std::size_t storage_N>
-matrix_solver_sm_t<m_N, storage_N>::~matrix_solver_sm_t()
-{
-}
+ template <typename T1, typename T2>
+ float_ext_type &lA(const T1 &r, const T2 &c) { return m_lA[r][c]; }
+ template <typename T1, typename T2>
+ float_ext_type &lAinv(const T1 &r, const T2 &c) { return m_lAinv[r][c]; }
-template <std::size_t m_N, std::size_t storage_N>
-void matrix_solver_sm_t<m_N, storage_N>::vsetup(analog_net_t::list_t &nets)
-{
- matrix_solver_t::setup_base(nets);
+ private:
+ static constexpr std::size_t m_pitch = ((( storage_N) + 7) / 8) * 8;
+ float_ext_type m_A[storage_N][m_pitch];
+ float_ext_type m_Ainv[storage_N][m_pitch];
+ float_ext_type m_W[storage_N][m_pitch];
+ float_ext_type m_RHS[storage_N]; // right hand side - contains currents
- netlist().save(*this, m_last_RHS, "m_last_RHS");
+ float_ext_type m_lA[storage_N][m_pitch];
+ float_ext_type m_lAinv[storage_N][m_pitch];
- for (unsigned k = 0; k < N(); k++)
- netlist().save(*this, RHS(k), plib::pfmt("RHS.{1}")(k));
-}
+ //float_ext_type m_RHSx[storage_N];
+ const std::size_t m_dim;
+ std::size_t m_cnt;
+ };
-template <std::size_t m_N, std::size_t storage_N>
-void matrix_solver_sm_t<m_N, storage_N>::LE_invert()
-{
- const std::size_t kN = N();
+ // ----------------------------------------------------------------------------------------
+ // matrix_solver_direct
+ // ----------------------------------------------------------------------------------------
- for (std::size_t i = 0; i < kN; i++)
+ template <typename FT, int SIZE>
+ void matrix_solver_sm_t<FT, SIZE>::vsetup(analog_net_t::list_t &nets)
{
- for (std::size_t j = 0; j < kN; j++)
- {
- W(i,j) = lA(i,j) = A(i,j);
- Ainv(i,j) = 0.0;
- }
- Ainv(i,i) = 1.0;
+ matrix_solver_t::setup_base(nets);
+
+ /* FIXME: Shouldn't be necessary */
+ for (std::size_t k = 0; k < size(); k++)
+ state().save(*this, RHS(k), this->name(), plib::pfmt("RHS.{1}")(k));
}
- /* down */
- for (std::size_t i = 0; i < kN; i++)
- {
- /* FIXME: Singular matrix? */
- const nl_double f = 1.0 / W(i,i);
- const auto * RESTRICT const p = m_terms[i]->m_nzrd.data();
- const std::size_t e = m_terms[i]->m_nzrd.size();
- /* Eliminate column i from row j */
+ template <typename FT, int SIZE>
+ void matrix_solver_sm_t<FT, SIZE>::LE_invert()
+ {
+ const std::size_t kN = size();
- const auto * RESTRICT const pb = m_terms[i]->m_nzbd.data();
- const std::size_t eb = m_terms[i]->m_nzbd.size();
- for (std::size_t jb = 0; jb < eb; jb++)
+ for (std::size_t i = 0; i < kN; i++)
{
- const unsigned j = pb[jb];
- const nl_double f1 = - W(j,i) * f;
- if (f1 != 0.0)
+ for (std::size_t j = 0; j < kN; j++)
{
- for (std::size_t k = 0; k < e; k++)
- W(j,p[k]) += W(i,p[k]) * f1;
- for (std::size_t k = 0; k <= i; k ++)
- Ainv(j,k) += Ainv(i,k) * f1;
+ W(i,j) = lA(i,j) = A(i,j);
+ Ainv(i,j) = 0.0;
}
+ Ainv(i,i) = 1.0;
}
- }
- /* up */
- for (std::size_t i = kN; i-- > 0; )
- {
- /* FIXME: Singular matrix? */
- const nl_double f = 1.0 / W(i,i);
- for (std::size_t j = i; j-- > 0; )
+ /* down */
+ for (std::size_t i = 0; i < kN; i++)
{
- const nl_double f1 = - W(j,i) * f;
- if (f1 != 0.0)
+ /* FIXME: Singular matrix? */
+ const float_type f = 1.0 / W(i,i);
+ const auto * const p = m_terms[i]->m_nzrd.data();
+ const std::size_t e = m_terms[i]->m_nzrd.size();
+
+ /* Eliminate column i from row j */
+
+ const auto * const pb = m_terms[i]->m_nzbd.data();
+ const std::size_t eb = m_terms[i]->m_nzbd.size();
+ for (std::size_t jb = 0; jb < eb; jb++)
{
- for (std::size_t k = i; k < kN; k++)
- W(j,k) += W(i,k) * f1;
- for (std::size_t k = 0; k < kN; k++)
- Ainv(j,k) += Ainv(i,k) * f1;
+ const unsigned j = pb[jb];
+ const float_type f1 = - W(j,i) * f;
+ if (f1 != 0.0)
+ {
+ for (std::size_t k = 0; k < e; k++)
+ W(j,p[k]) += W(i,p[k]) * f1;
+ for (std::size_t k = 0; k <= i; k ++)
+ Ainv(j,k) += Ainv(i,k) * f1;
+ }
}
}
- for (std::size_t k = 0; k < kN; k++)
+ /* up */
+ for (std::size_t i = kN; i-- > 0; )
{
- Ainv(i,k) *= f;
- lAinv(i,k) = Ainv(i,k);
+ /* FIXME: Singular matrix? */
+ const float_type f = 1.0 / W(i,i);
+ for (std::size_t j = i; j-- > 0; )
+ {
+ const float_type f1 = - W(j,i) * f;
+ if (f1 != 0.0)
+ {
+ for (std::size_t k = i; k < kN; k++)
+ W(j,k) += W(i,k) * f1;
+ for (std::size_t k = 0; k < kN; k++)
+ Ainv(j,k) += Ainv(i,k) * f1;
+ }
+ }
+ for (std::size_t k = 0; k < kN; k++)
+ {
+ Ainv(i,k) *= f;
+ lAinv(i,k) = Ainv(i,k);
+ }
}
}
-}
-
-template <std::size_t m_N, std::size_t storage_N>
-template <typename T>
-void matrix_solver_sm_t<m_N, storage_N>::LE_compute_x(
- T * RESTRICT x)
-{
- const std::size_t kN = N();
- for (std::size_t i=0; i<kN; i++)
- x[i] = 0.0;
-
- for (std::size_t k=0; k<kN; k++)
+ template <typename FT, int SIZE>
+ template <typename T>
+ void matrix_solver_sm_t<FT, SIZE>::LE_compute_x(
+ T * x)
{
- const nl_double f = RHS(k);
+ const std::size_t kN = size();
for (std::size_t i=0; i<kN; i++)
- x[i] += Ainv(i,k) * f;
- }
-}
-
-
-template <std::size_t m_N, std::size_t storage_N>
-unsigned matrix_solver_sm_t<m_N, storage_N>::solve_non_dynamic(const bool newton_raphson)
-{
- static constexpr const bool incremental = true;
- const std::size_t iN = N();
+ x[i] = 0.0;
- nl_double new_V[storage_N]; // = { 0.0 };
+ for (std::size_t k=0; k<kN; k++)
+ {
+ const float_type f = RHS(k);
- if ((m_cnt % 50) == 0)
- {
- /* complete calculation */
- this->LE_invert();
+ for (std::size_t i=0; i<kN; i++)
+ x[i] += Ainv(i,k) * f;
+ }
}
- else
+
+ template <typename FT, int SIZE>
+ unsigned matrix_solver_sm_t<FT, SIZE>::solve_non_dynamic(const bool newton_raphson)
{
- if (!incremental)
+ static constexpr const bool incremental = true;
+ const std::size_t iN = size();
+
+ float_type new_V[storage_N]; // = { 0.0 };
+
+ if ((m_cnt % 50) == 0)
{
- for (std::size_t row = 0; row < iN; row ++)
- for (std::size_t k = 0; k < iN; k++)
- Ainv(row,k) = lAinv(row, k);
+ /* complete calculation */
+ this->LE_invert();
}
- for (std::size_t row = 0; row < iN; row ++)
+ else
{
- nl_double v[m_pitch] = {0};
- std::size_t cols[m_pitch];
- std::size_t colcount = 0;
-
- auto &nz = m_terms[row]->m_nz;
- for (unsigned & col : nz)
+ if (!incremental)
{
- v[col] = A(row,col) - lA(row,col);
- if (incremental)
- lA(row,col) = A(row,col);
- if (v[col] != 0.0)
- cols[colcount++] = col;
+ for (std::size_t row = 0; row < iN; row ++)
+ for (std::size_t k = 0; k < iN; k++)
+ Ainv(row,k) = lAinv(row, k);
}
-
- if (colcount > 0)
+ for (std::size_t row = 0; row < iN; row ++)
{
- nl_double lamba = 0.0;
- nl_double w[m_pitch] = {0};
-
- nl_double z[m_pitch];
- /* compute w and lamba */
- for (std::size_t i = 0; i < iN; i++)
- z[i] = Ainv(i, row); /* u is row'th column */
+ float_type v[m_pitch] = {0};
+ std::size_t cols[m_pitch];
+ std::size_t colcount = 0;
- for (std::size_t j = 0; j < colcount; j++)
- lamba += v[cols[j]] * z[cols[j]];
-
- for (std::size_t j=0; j<colcount; j++)
+ auto &nz = m_terms[row]->m_nz;
+ for (unsigned & col : nz)
{
- std::size_t col = cols[j];
- nl_double f = v[col];
- for (std::size_t k = 0; k < iN; k++)
- w[k] += Ainv(col,k) * f; /* Transpose(Ainv) * v */
+ v[col] = A(row,col) - lA(row,col);
+ if (incremental)
+ lA(row,col) = A(row,col);
+ if (v[col] != 0.0)
+ cols[colcount++] = col;
}
- lamba = -1.0 / (1.0 + lamba);
- for (std::size_t i=0; i<iN; i++)
+ if (colcount > 0)
{
- const nl_double f = lamba * z[i];
- if (f != 0.0)
+ float_type lamba = 0.0;
+ float_type w[m_pitch] = {0};
+
+ float_type z[m_pitch];
+ /* compute w and lamba */
+ for (std::size_t i = 0; i < iN; i++)
+ z[i] = Ainv(i, row); /* u is row'th column */
+
+ for (std::size_t j = 0; j < colcount; j++)
+ lamba += v[cols[j]] * z[cols[j]];
+
+ for (std::size_t j=0; j<colcount; j++)
+ {
+ std::size_t col = cols[j];
+ float_type f = v[col];
for (std::size_t k = 0; k < iN; k++)
- Ainv(i,k) += f * w[k];
+ w[k] += Ainv(col,k) * f; /* Transpose(Ainv) * v */
+ }
+
+ lamba = -1.0 / (1.0 + lamba);
+ for (std::size_t i=0; i<iN; i++)
+ {
+ const float_type f = lamba * z[i];
+ if (f != 0.0)
+ for (std::size_t k = 0; k < iN; k++)
+ Ainv(i,k) += f * w[k];
+ }
}
- }
+ }
}
- }
- m_cnt++;
+ m_cnt++;
- this->LE_compute_x(new_V);
+ this->LE_compute_x(new_V);
- const nl_double err = (newton_raphson ? delta(new_V) : 0.0);
- store(new_V);
- return (err > this->m_params.m_accuracy) ? 2 : 1;
-}
+ const float_type err = (newton_raphson ? delta(new_V) : 0.0);
+ store(new_V);
+ return (err > this->m_params.m_accuracy) ? 2 : 1;
+ }
-template <std::size_t m_N, std::size_t storage_N>
-inline unsigned matrix_solver_sm_t<m_N, storage_N>::vsolve_non_dynamic(const bool newton_raphson)
-{
- build_LE_A<matrix_solver_sm_t>();
- build_LE_RHS<matrix_solver_sm_t>();
-
- for (std::size_t i=0, iN=N(); i < iN; i++)
- m_last_RHS[i] = RHS(i);
-
- this->m_stat_calculations++;
- return this->solve_non_dynamic(newton_raphson);
-}
-
-template <std::size_t m_N, std::size_t storage_N>
-matrix_solver_sm_t<m_N, storage_N>::matrix_solver_sm_t(netlist_t &anetlist, const pstring &name,
- const solver_parameters_t *params, const std::size_t size)
-: matrix_solver_t(anetlist, name, NOSORT, params)
-, m_dim(size)
-, m_cnt(0)
-{
- for (std::size_t k = 0; k < N(); k++)
+ template <typename FT, int SIZE>
+ unsigned matrix_solver_sm_t<FT, SIZE>::vsolve_non_dynamic(const bool newton_raphson)
+ {
+ this->build_LE_A(*this);
+ this->build_LE_RHS(*this);
+
+ this->m_stat_calculations++;
+ return this->solve_non_dynamic(newton_raphson);
+ }
+
+ template <typename FT, int SIZE>
+ matrix_solver_sm_t<FT, SIZE>::matrix_solver_sm_t(netlist_state_t &anetlist, const pstring &name,
+ const solver_parameters_t *params, const std::size_t size)
+ : matrix_solver_t(anetlist, name, NOSORT, params)
+ , m_dim(size)
+ , m_cnt(0)
{
- m_last_RHS[k] = 0.0;
}
-}
- } //namespace devices
+} // namespace devices
} // namespace netlist
#endif /* NLD_MS_DIRECT_H_ */
diff --git a/src/lib/netlist/solver/nld_ms_sor.h b/src/lib/netlist/solver/nld_ms_sor.h
index eea692d6c47..c31aaa6d46a 100644
--- a/src/lib/netlist/solver/nld_ms_sor.h
+++ b/src/lib/netlist/solver/nld_ms_sor.h
@@ -12,33 +12,42 @@
#ifndef NLD_MS_SOR_H_
#define NLD_MS_SOR_H_
-#include <algorithm>
-
#include "nld_ms_direct.h"
#include "nld_solver.h"
+#include <algorithm>
+
namespace netlist
{
namespace devices
- {
-template <std::size_t m_N, std::size_t storage_N>
-class matrix_solver_SOR_t: public matrix_solver_direct_t<m_N, storage_N>
+{
+
+template <typename FT, int SIZE>
+class matrix_solver_SOR_t: public matrix_solver_direct_t<FT, SIZE>
{
public:
- matrix_solver_SOR_t(netlist_t &anetlist, const pstring &name, const solver_parameters_t *params, const std::size_t size)
- : matrix_solver_direct_t<m_N, storage_N>(anetlist, name, matrix_solver_t::ASCENDING, params, size)
+ using float_type = FT;
+
+ matrix_solver_SOR_t(netlist_state_t &anetlist, const pstring &name, const solver_parameters_t *params, const std::size_t size)
+ : matrix_solver_direct_t<FT, SIZE>(anetlist, name, matrix_solver_t::ASCENDING, params, size)
, m_lp_fact(*this, "m_lp_fact", 0)
+ , w(size, 0.0)
+ , one_m_w(size, 0.0)
+ , RHS(size, 0.0)
+ //, new_V(size, 0.0)
{
}
- virtual ~matrix_solver_SOR_t() override {}
-
- virtual void vsetup(analog_net_t::list_t &nets) override;
- virtual unsigned vsolve_non_dynamic(const bool newton_raphson) override;
+ void vsetup(analog_net_t::list_t &nets) override;
+ unsigned vsolve_non_dynamic(const bool newton_raphson) override;
private:
- state_var<nl_double> m_lp_fact;
+ state_var<float_type> m_lp_fact;
+ std::vector<float_type> w;
+ std::vector<float_type> one_m_w;
+ std::vector<float_type> RHS;
+ //std::vector<float_type> new_V;
};
// ----------------------------------------------------------------------------------------
@@ -46,16 +55,16 @@ private:
// ----------------------------------------------------------------------------------------
-template <std::size_t m_N, std::size_t storage_N>
-void matrix_solver_SOR_t<m_N, storage_N>::vsetup(analog_net_t::list_t &nets)
+template <typename FT, int SIZE>
+void matrix_solver_SOR_t<FT, SIZE>::vsetup(analog_net_t::list_t &nets)
{
- matrix_solver_direct_t<m_N, storage_N>::vsetup(nets);
+ matrix_solver_direct_t<FT, SIZE>::vsetup(nets);
}
-template <std::size_t m_N, std::size_t storage_N>
-unsigned matrix_solver_SOR_t<m_N, storage_N>::vsolve_non_dynamic(const bool newton_raphson)
+template <typename FT, int SIZE>
+unsigned matrix_solver_SOR_t<FT, SIZE>::vsolve_non_dynamic(const bool newton_raphson)
{
- const std::size_t iN = this->N();
+ const std::size_t iN = this->size();
bool resched = false;
unsigned resched_cnt = 0;
@@ -67,26 +76,21 @@ unsigned matrix_solver_SOR_t<m_N, storage_N>::vsolve_non_dynamic(const bool newt
* omega = 2.0 / (1.0 + std::sqrt(1-rho))
*/
- const nl_double ws = this->m_params.m_gs_sor;
-
- nl_double w[storage_N];
- nl_double one_m_w[storage_N];
- nl_double RHS[storage_N];
- nl_double new_V[storage_N];
+ const float_type ws = this->m_params.m_gs_sor;
for (std::size_t k = 0; k < iN; k++)
{
- nl_double gtot_t = 0.0;
- nl_double gabs_t = 0.0;
- nl_double RHS_t = 0.0;
+ float_type gtot_t = 0.0;
+ float_type gabs_t = 0.0;
+ float_type RHS_t = 0.0;
const std::size_t term_count = this->m_terms[k]->count();
- const nl_double * const RESTRICT gt = this->m_terms[k]->gt();
- const nl_double * const RESTRICT go = this->m_terms[k]->go();
- const nl_double * const RESTRICT Idr = this->m_terms[k]->Idr();
- const nl_double * const *other_cur_analog = this->m_terms[k]->connected_net_V();
+ const float_type * const gt = this->m_gtn[k];
+ const float_type * const go = this->m_gonn[k];
+ const float_type * const Idr = this->m_Idrn[k];
+ auto other_cur_analog = this->m_connected_net_Vn[k];
- new_V[k] = this->m_nets[k]->Q_Analog();
+ this->m_new_V[k] = this->m_nets[k]->Q_Analog();
for (std::size_t i = 0; i < term_count; i++)
{
@@ -95,61 +99,60 @@ unsigned matrix_solver_SOR_t<m_N, storage_N>::vsolve_non_dynamic(const bool newt
}
for (std::size_t i = this->m_terms[k]->m_railstart; i < term_count; i++)
- RHS_t = RHS_t + go[i] * *other_cur_analog[i];
+ RHS_t = RHS_t - go[i] * *other_cur_analog[i];
RHS[k] = RHS_t;
- if (USE_GABS)
+ if (this->m_params.m_use_gabs)
{
for (std::size_t i = 0; i < term_count; i++)
gabs_t = gabs_t + std::abs(go[i]);
- gabs_t *= NL_FCONST(0.5); // derived by try and error
+ gabs_t *= plib::constants<nl_double>::cast(0.5); // derived by try and error
if (gabs_t <= gtot_t)
{
w[k] = ws / gtot_t;
- one_m_w[k] = NL_FCONST(1.0) - ws;
+ one_m_w[k] = plib::constants<FT>::one() - ws;
}
else
{
- w[k] = NL_FCONST(1.0) / (gtot_t + gabs_t);
- one_m_w[k] = NL_FCONST(1.0) - NL_FCONST(1.0) * gtot_t / (gtot_t + gabs_t);
+ w[k] = plib::constants<FT>::one() / (gtot_t + gabs_t);
+ one_m_w[k] = plib::constants<FT>::one() - plib::constants<FT>::one() * gtot_t / (gtot_t + gabs_t);
}
}
else
{
w[k] = ws / gtot_t;
- one_m_w[k] = NL_FCONST(1.0) - ws;
+ one_m_w[k] = plib::constants<FT>::one() - ws;
}
}
- const nl_double accuracy = this->m_params.m_accuracy;
+ const float_type accuracy = this->m_params.m_accuracy;
do {
resched = false;
- nl_double err = 0;
+ float_type err = 0;
for (std::size_t k = 0; k < iN; k++)
{
- const int * RESTRICT net_other = this->m_terms[k]->connected_net_idx();
+ const int * net_other = this->m_terms[k]->m_connected_net_idx.data();
const std::size_t railstart = this->m_terms[k]->m_railstart;
- const nl_double * RESTRICT go = this->m_terms[k]->go();
+ const float_type * go = this->m_gonn[k];
- nl_double Idrive = 0.0;
+ float_type Idrive = 0.0;
for (std::size_t i = 0; i < railstart; i++)
- Idrive = Idrive + go[i] * new_V[net_other[i]];
+ Idrive = Idrive - go[i] * this->m_new_V[static_cast<std::size_t>(net_other[i])];
- const nl_double new_val = new_V[k] * one_m_w[k] + (Idrive + RHS[k]) * w[k];
+ const float_type new_val = this->m_new_V[k] * one_m_w[k] + (Idrive + RHS[k]) * w[k];
- err = std::max(std::abs(new_val - new_V[k]), err);
- new_V[k] = new_val;
+ err = std::max(std::abs(new_val - this->m_new_V[k]), err);
+ this->m_new_V[k] = new_val;
}
if (err > accuracy)
resched = true;
resched_cnt++;
- //} while (resched && (resched_cnt < this->m_params.m_gs_loops));
- } while (resched && ((resched_cnt < this->m_params.m_gs_loops)));
+ } while (resched && (resched_cnt < this->m_params.m_gs_loops));
this->m_iterative_total += resched_cnt;
this->m_stat_calculations++;
@@ -158,13 +161,12 @@ unsigned matrix_solver_SOR_t<m_N, storage_N>::vsolve_non_dynamic(const bool newt
{
// Fallback to direct solver ...
this->m_iterative_fail++;
- return matrix_solver_direct_t<m_N, storage_N>::vsolve_non_dynamic(newton_raphson);
+ return matrix_solver_direct_t<FT, SIZE>::vsolve_non_dynamic(newton_raphson);
}
- for (std::size_t k = 0; k < iN; k++)
- this->m_nets[k]->set_Q_Analog(new_V[k]);
-
- return resched_cnt;
+ const float_type err = (newton_raphson ? this->delta(this->m_new_V) : 0.0);
+ this->store(this->m_new_V);
+ return (err > this->m_params.m_accuracy) ? 2 : 1;
}
} //namespace devices
diff --git a/src/lib/netlist/solver/nld_ms_sor_mat.h b/src/lib/netlist/solver/nld_ms_sor_mat.h
index 50bcac1a52d..83e4870cf28 100644
--- a/src/lib/netlist/solver/nld_ms_sor_mat.h
+++ b/src/lib/netlist/solver/nld_ms_sor_mat.h
@@ -12,212 +12,220 @@
#ifndef NLD_MS_SOR_MAT_H_
#define NLD_MS_SOR_MAT_H_
-#include <algorithm>
-
-#include "nld_ms_direct.h"
#include "nld_matrix_solver.h"
+#include "nld_ms_direct.h"
#include "nld_solver.h"
+#include <algorithm>
+
namespace netlist
{
- namespace devices
- {
-template <std::size_t m_N, std::size_t storage_N>
-class matrix_solver_SOR_mat_t: public matrix_solver_direct_t<m_N, storage_N>
+namespace devices
{
- friend class matrix_solver_t;
-public:
+ template <typename FT, int SIZE>
+ class matrix_solver_SOR_mat_t: public matrix_solver_direct_t<FT, SIZE>
+ {
+ friend class matrix_solver_t;
- matrix_solver_SOR_mat_t(netlist_t &anetlist, const pstring &name, const solver_parameters_t *params, std::size_t size)
- : matrix_solver_direct_t<m_N, storage_N>(anetlist, name, matrix_solver_t::DESCENDING, params, size)
- , m_Vdelta(*this, "m_Vdelta", 0.0)
- , m_omega(*this, "m_omega", params->m_gs_sor)
- , m_lp_fact(*this, "m_lp_fact", 0)
- , m_gs_fail(*this, "m_gs_fail", 0)
- , m_gs_total(*this, "m_gs_total", 0)
- {
- }
+ public:
- virtual ~matrix_solver_SOR_mat_t() override {}
+ using float_type = FT;
- virtual void vsetup(analog_net_t::list_t &nets) override;
+ matrix_solver_SOR_mat_t(netlist_state_t &anetlist, const pstring &name, const solver_parameters_t *params, std::size_t size)
+ : matrix_solver_direct_t<FT, SIZE>(anetlist, name, matrix_solver_t::ASCENDING, params, size)
+ , m_Vdelta(*this, "m_Vdelta", std::vector<float_type>(size))
+ , m_omega(*this, "m_omega", params->m_gs_sor)
+ , m_lp_fact(*this, "m_lp_fact", 0)
+ {
+ }
- virtual unsigned vsolve_non_dynamic(const bool newton_raphson) override;
+ void vsetup(analog_net_t::list_t &nets) override;
-private:
- state_var<nl_double[storage_N]> m_Vdelta;
+ unsigned vsolve_non_dynamic(const bool newton_raphson) override;
- state_var<nl_double> m_omega;
- state_var<nl_double> m_lp_fact;
- state_var<int> m_gs_fail;
- state_var<int> m_gs_total;
-};
+ private:
+ //state_var<float_type[storage_N]> m_Vdelta;
+ state_var<std::vector<float_type>> m_Vdelta;
-// ----------------------------------------------------------------------------------------
-// matrix_solver - Gauss - Seidel
-// ----------------------------------------------------------------------------------------
+ state_var<float_type> m_omega;
+ state_var<float_type> m_lp_fact;
-template <std::size_t m_N, std::size_t storage_N>
-void matrix_solver_SOR_mat_t<m_N, storage_N>::vsetup(analog_net_t::list_t &nets)
-{
- matrix_solver_direct_t<m_N, storage_N>::vsetup(nets);
-}
+ };
-#if 0
-//FIXME: move to solve_base
-template <unsigned m_N, unsigned storage_N>
-nl_double matrix_solver_SOR_mat_t<m_N, storage_N>::vsolve()
-{
- /*
- * enable linear prediction on first newton pass
- */
+ // ----------------------------------------------------------------------------------------
+ // matrix_solver - Gauss - Seidel
+ // ----------------------------------------------------------------------------------------
- if (USE_LINEAR_PREDICTION)
- for (unsigned k = 0; k < this->N(); k++)
- {
- this->m_last_V[k] = this->m_nets[k]->m_cur_Analog;
- this->m_nets[k]->m_cur_Analog = this->m_nets[k]->m_cur_Analog + this->m_Vdelta[k] * this->current_timestep() * m_lp_fact;
- }
- else
- for (unsigned k = 0; k < this->N(); k++)
- {
- this->m_last_V[k] = this->m_nets[k]->m_cur_Analog;
- }
-
- this->solve_base(this);
-
- if (USE_LINEAR_PREDICTION)
+ template <typename FT, int SIZE>
+ void matrix_solver_SOR_mat_t<FT, SIZE>::vsetup(analog_net_t::list_t &nets)
{
- nl_double sq = 0;
- nl_double sqo = 0;
- const nl_double rez_cts = 1.0 / this->current_timestep();
- for (unsigned k = 0; k < this->N(); k++)
- {
- const analog_net_t *n = this->m_nets[k];
- const nl_double nv = (n->Q_Analog() - this->m_last_V[k]) * rez_cts ;
- sq += nv * nv;
- sqo += this->m_Vdelta[k] * this->m_Vdelta[k];
- this->m_Vdelta[k] = nv;
- }
-
- // FIXME: used to be 1e90, but this would not be compatible with float
- if (sqo > NL_FCONST(1e-20))
- m_lp_fact = std::min(std::sqrt(sq/sqo), (nl_double) 2.0);
- else
- m_lp_fact = NL_FCONST(0.0);
+ matrix_solver_direct_t<FT, SIZE>::vsetup(nets);
}
+ #if 0
+ //FIXME: move to solve_base
+ template <unsigned m_N, unsigned storage_N>
+ float_type matrix_solver_SOR_mat_t<m_N, storage_N>::vsolve()
+ {
+ /*
+ * enable linear prediction on first newton pass
+ */
+
+ if (this->m_params->use_linear_prediction)
+ for (unsigned k = 0; k < this->size(); k++)
+ {
+ this->m_last_V[k] = this->m_nets[k]->m_cur_Analog;
+ this->m_nets[k]->m_cur_Analog = this->m_nets[k]->m_cur_Analog + this->m_Vdelta[k] * this->current_timestep() * m_lp_fact;
+ }
+ else
+ for (unsigned k = 0; k < this->size(); k++)
+ {
+ this->m_last_V[k] = this->m_nets[k]->m_cur_Analog;
+ }
- return this->compute_next_timestep();
-}
-#endif
-
-template <std::size_t m_N, std::size_t storage_N>
-unsigned matrix_solver_SOR_mat_t<m_N, storage_N>::vsolve_non_dynamic(const bool newton_raphson)
-{
- /* The matrix based code looks a lot nicer but actually is 30% slower than
- * the optimized code which works directly on the data structures.
- * Need something like that for gaussian elimination as well.
- */
-
-
- nl_double new_v[storage_N] = { 0.0 };
- const std::size_t iN = this->N();
-
- matrix_solver_t::build_LE_A<matrix_solver_SOR_mat_t>();
- matrix_solver_t::build_LE_RHS<matrix_solver_SOR_mat_t>();
+ this->solve_base(this);
- bool resched = false;
+ if (this->m_params->use_linear_prediction)
+ {
+ float_type sq = 0;
+ float_type sqo = 0;
+ const float_type rez_cts = 1.0 / this->current_timestep();
+ for (unsigned k = 0; k < this->size(); k++)
+ {
+ const analog_net_t *n = this->m_nets[k];
+ const float_type nv = (n->Q_Analog() - this->m_last_V[k]) * rez_cts ;
+ sq += nv * nv;
+ sqo += this->m_Vdelta[k] * this->m_Vdelta[k];
+ this->m_Vdelta[k] = nv;
+ }
+
+ // FIXME: used to be 1e90, but this would not be compatible with float
+ if (sqo > NL_FCONST(1e-20))
+ m_lp_fact = std::min(std::sqrt(sq/sqo), (float_type) 2.0);
+ else
+ m_lp_fact = NL_FCONST(0.0);
+ }
- unsigned resched_cnt = 0;
+ return this->compute_next_timestep();
+ }
+ #endif
-#if 0
- static int ws_cnt = 0;
- ws_cnt++;
- if (1 && ws_cnt % 200 == 0)
+ template <typename FT, int SIZE>
+ unsigned matrix_solver_SOR_mat_t<FT, SIZE>::vsolve_non_dynamic(const bool newton_raphson)
{
- // update omega
- nl_double lambdaN = 0;
- nl_double lambda1 = 1e9;
- for (int k = 0; k < iN; k++)
- {
- #if 0
- nl_double akk = std::abs(this->m_A[k][k]);
- if ( akk > lambdaN)
- lambdaN = akk;
- if (akk < lambda1)
- lambda1 = akk;
- #else
- nl_double akk = std::abs(this->m_A[k][k]);
- nl_double s = 0.0;
- for (int i=0; i<iN; i++)
- s = s + std::abs(this->m_A[k][i]);
- akk = s / akk - 1.0;
- if ( akk > lambdaN)
- lambdaN = akk;
- if (akk < lambda1)
- lambda1 = akk;
- #endif
- }
- //printf("lambda: %f %f\n", lambda, 2.0 / (1.0 + 2 * sqrt(lambda)) );
+ /* The matrix based code looks a lot nicer but actually is 30% slower than
+ * the optimized code which works directly on the data structures.
+ * Need something like that for gaussian elimination as well.
+ */
- //ws = 2.0 / (2.0 - lambdaN - lambda1);
- m_omega = 2.0 / (2.0 - lambda1);
- //printf("%f %f %f\n", m_omega, lambda1, lambdaN);
- }
-#endif
- for (std::size_t k = 0; k < iN; k++)
- new_v[k] = this->m_nets[k]->Q_Analog();
+ const std::size_t iN = this->size();
- do {
- resched = false;
- nl_double cerr = 0.0;
+ this->build_LE_A(*this);
+ this->build_LE_RHS(*this);
- for (std::size_t k = 0; k < iN; k++)
- {
- nl_double Idrive = 0;
+ bool resched = false;
- const auto *p = this->m_terms[k]->m_nz.data();
- const std::size_t e = this->m_terms[k]->m_nz.size();
+ unsigned resched_cnt = 0;
- for (std::size_t i = 0; i < e; i++)
- Idrive = Idrive + this->A(k,p[i]) * new_v[p[i]];
- const nl_double delta = m_omega * (this->RHS(k) - Idrive) / this->A(k,k);
- cerr = std::max(cerr, std::abs(delta));
- new_v[k] += delta;
+ #if 0
+ static int ws_cnt = 0;
+ ws_cnt++;
+ if (1 && ws_cnt % 200 == 0)
+ {
+ // update omega
+ float_type lambdaN = 0;
+ float_type lambda1 = 1e9;
+ for (int k = 0; k < iN; k++)
+ {
+ #if 0
+ float_type akk = std::abs(this->m_A[k][k]);
+ if ( akk > lambdaN)
+ lambdaN = akk;
+ if (akk < lambda1)
+ lambda1 = akk;
+ #else
+ float_type akk = std::abs(this->m_A[k][k]);
+ float_type s = 0.0;
+ for (int i=0; i<iN; i++)
+ s = s + std::abs(this->m_A[k][i]);
+ akk = s / akk - 1.0;
+ if ( akk > lambdaN)
+ lambdaN = akk;
+ if (akk < lambda1)
+ lambda1 = akk;
+ #endif
+ }
+
+ //ws = 2.0 / (2.0 - lambdaN - lambda1);
+ m_omega = 2.0 / (2.0 - lambda1);
}
+ #endif
- if (cerr > this->m_params.m_accuracy)
+ for (std::size_t k = 0; k < iN; k++)
+ this->m_new_V[k] = this->m_nets[k]->Q_Analog();
+
+ do {
+ resched = false;
+ float_type cerr = 0.0;
+
+ for (std::size_t k = 0; k < iN; k++)
+ {
+ float_type Idrive = 0;
+
+ const auto *p = this->m_terms[k]->m_nz.data();
+ const std::size_t e = this->m_terms[k]->m_nz.size();
+
+ for (std::size_t i = 0; i < e; i++)
+ Idrive = Idrive + this->A(k,p[i]) * this->m_new_V[p[i]];
+
+ FT w = m_omega / this->A(k,k);
+ if (this->m_params.m_use_gabs)
+ {
+ FT gabs_t = 0.0;
+ for (std::size_t i = 0; i < e; i++)
+ if (p[i] != k)
+ gabs_t = gabs_t + std::abs(this->A(k,p[i]));
+
+ gabs_t *= plib::constants<FT>::one(); // derived by try and error
+ if (gabs_t > this->A(k,k))
+ {
+ w = plib::constants<FT>::one() / (this->A(k,k) + gabs_t);
+ }
+ }
+
+ const float_type delta = w * (this->RHS(k) - Idrive) ;
+ cerr = std::max(cerr, std::abs(delta));
+ this->m_new_V[k] += delta;
+ }
+
+ if (cerr > this->m_params.m_accuracy)
+ {
+ resched = true;
+ }
+ resched_cnt++;
+ } while (resched && (resched_cnt < this->m_params.m_gs_loops));
+
+ this->m_stat_calculations++;
+ this->m_iterative_total += resched_cnt;
+
+ if (resched)
{
- resched = true;
+ this->m_iterative_fail++;
+ //this->netlist().warning("Falling back to direct solver .. Consider increasing RESCHED_LOOPS");
+ return matrix_solver_direct_t<FT, SIZE>::solve_non_dynamic(newton_raphson);
}
- resched_cnt++;
- } while (resched && (resched_cnt < this->m_params.m_gs_loops));
-
- this->m_stat_calculations++;
- this->m_iterative_total += resched_cnt;
- this->m_gs_total += resched_cnt;
- if (resched)
- {
- this->m_iterative_fail++;
- //this->netlist().warning("Falling back to direct solver .. Consider increasing RESCHED_LOOPS");
- this->m_gs_fail++;
+ const float_type err = (newton_raphson ? this->delta(this->m_new_V) : 0.0);
+ this->store(this->m_new_V);
+ return (err > this->m_params.m_accuracy) ? 2 : 1;
- return matrix_solver_direct_t<m_N, storage_N>::solve_non_dynamic(newton_raphson);
- }
- else {
- this->store(new_v);
- return resched_cnt;
}
-}
-
- } //namespace devices
+} // namespace devices
} // namespace netlist
#endif /* NLD_MS_GAUSS_SEIDEL_H_ */
diff --git a/src/lib/netlist/solver/nld_ms_w.h b/src/lib/netlist/solver/nld_ms_w.h
index 0d4e7781eed..3372b50c7c5 100644
--- a/src/lib/netlist/solver/nld_ms_w.h
+++ b/src/lib/netlist/solver/nld_ms_w.h
@@ -40,80 +40,80 @@
#ifndef NLD_MS_W_H_
#define NLD_MS_W_H_
-#include <algorithm>
-
-#include "nld_solver.h"
#include "nld_matrix_solver.h"
-#include "vector_base.h"
+#include "nld_solver.h"
+#include "plib/vector_ops.h"
+
+#include <algorithm>
namespace netlist
{
namespace devices
{
-//#define nl_ext_double _float128 // slow, very slow
-//#define nl_ext_double long double // slightly slower
-#define nl_ext_double nl_double
-template <std::size_t m_N, std::size_t storage_N>
+template <typename FT, int SIZE>
class matrix_solver_w_t: public matrix_solver_t
{
friend class matrix_solver_t;
+
public:
+ using float_ext_type = FT;
+ using float_type = FT;
- matrix_solver_w_t(netlist_t &anetlist, const pstring &name, const solver_parameters_t *params, const std::size_t size);
+ // FIXME: dirty hack to make this compile
+ static constexpr const std::size_t storage_N = 100;
- virtual ~matrix_solver_w_t() override;
+ matrix_solver_w_t(netlist_state_t &anetlist, const pstring &name, const solver_parameters_t *params, const std::size_t size);
- virtual void vsetup(analog_net_t::list_t &nets) override;
- virtual void reset() override { matrix_solver_t::reset(); }
+ void vsetup(analog_net_t::list_t &nets) override;
+ void reset() override { matrix_solver_t::reset(); }
protected:
- virtual unsigned vsolve_non_dynamic(const bool newton_raphson) override;
+ unsigned vsolve_non_dynamic(const bool newton_raphson) override;
unsigned solve_non_dynamic(const bool newton_raphson);
- constexpr std::size_t N() const { return (m_N == 0) ? m_dim : m_N; }
+ constexpr std::size_t size() const { return m_dim; }
void LE_invert();
template <typename T>
- void LE_compute_x(T * RESTRICT x);
+ void LE_compute_x(T * x);
template <typename T1, typename T2>
- inline nl_ext_double &A(const T1 &r, const T2 &c) { return m_A[r][c]; }
+ float_ext_type &A(const T1 &r, const T2 &c) { return m_A[r][c]; }
template <typename T1, typename T2>
- inline nl_ext_double &W(const T1 &r, const T2 &c) { return m_W[r][c]; }
+ float_ext_type &W(const T1 &r, const T2 &c) { return m_W[r][c]; }
/* access to Ainv for fixed columns over row, there store transposed */
template <typename T1, typename T2>
- inline nl_ext_double &Ainv(const T1 &r, const T2 &c) { return m_Ainv[c][r]; }
+ float_ext_type &Ainv(const T1 &r, const T2 &c) { return m_Ainv[c][r]; }
template <typename T1>
- inline nl_ext_double &RHS(const T1 &r) { return m_RHS[r]; }
+ float_ext_type &RHS(const T1 &r) { return m_RHS[r]; }
template <typename T1, typename T2>
- inline nl_ext_double &lA(const T1 &r, const T2 &c) { return m_lA[r][c]; }
+ float_ext_type &lA(const T1 &r, const T2 &c) { return m_lA[r][c]; }
- nl_double m_last_RHS[storage_N]; // right hand side - contains currents
private:
static constexpr std::size_t m_pitch = ((( storage_N) + 7) / 8) * 8;
- nl_ext_double m_A[storage_N][m_pitch];
- nl_ext_double m_Ainv[storage_N][m_pitch];
- nl_ext_double m_W[storage_N][m_pitch];
- nl_ext_double m_RHS[storage_N]; // right hand side - contains currents
+ float_ext_type m_A[storage_N][m_pitch];
+ float_ext_type m_Ainv[storage_N][m_pitch];
+ float_ext_type m_W[storage_N][m_pitch];
+ float_ext_type m_RHS[storage_N]; // right hand side - contains currents
- nl_ext_double m_lA[storage_N][m_pitch];
+ float_ext_type m_lA[storage_N][m_pitch];
/* temporary */
- nl_double H[storage_N][m_pitch] ;
+ float_type H[storage_N][m_pitch] ;
unsigned rows[storage_N];
unsigned cols[storage_N][m_pitch];
unsigned colcount[storage_N];
unsigned m_cnt;
- //nl_ext_double m_RHSx[storage_N];
+ //float_ext_type m_RHSx[storage_N];
const std::size_t m_dim;
@@ -123,28 +123,22 @@ private:
// matrix_solver_direct
// ----------------------------------------------------------------------------------------
-template <std::size_t m_N, std::size_t storage_N>
-matrix_solver_w_t<m_N, storage_N>::~matrix_solver_w_t()
-{
-}
-
-template <std::size_t m_N, std::size_t storage_N>
-void matrix_solver_w_t<m_N, storage_N>::vsetup(analog_net_t::list_t &nets)
+template <typename FT, int SIZE>
+void matrix_solver_w_t<FT, SIZE>::vsetup(analog_net_t::list_t &nets)
{
matrix_solver_t::setup_base(nets);
- netlist().save(*this, m_last_RHS, "m_last_RHS");
-
- for (unsigned k = 0; k < N(); k++)
- netlist().save(*this, RHS(k), plib::pfmt("RHS.{1}")(k));
+ // FIXME: This shouldn't be necessary, recalculate on each entry ...
+ for (std::size_t k = 0; k < size(); k++)
+ state().save(*this, RHS(k), this->name(), plib::pfmt("RHS.{1}")(k));
}
-template <std::size_t m_N, std::size_t storage_N>
-void matrix_solver_w_t<m_N, storage_N>::LE_invert()
+template <typename FT, int SIZE>
+void matrix_solver_w_t<FT, SIZE>::LE_invert()
{
- const std::size_t kN = N();
+ const std::size_t kN = size();
for (std::size_t i = 0; i < kN; i++)
{
@@ -159,18 +153,18 @@ void matrix_solver_w_t<m_N, storage_N>::LE_invert()
for (std::size_t i = 0; i < kN; i++)
{
/* FIXME: Singular matrix? */
- const nl_double f = 1.0 / W(i,i);
- const auto * RESTRICT const p = m_terms[i]->m_nzrd.data();
+ const float_type f = 1.0 / W(i,i);
+ const auto * const p = m_terms[i]->m_nzrd.data();
const size_t e = m_terms[i]->m_nzrd.size();
/* Eliminate column i from row j */
- const auto * RESTRICT const pb = m_terms[i]->m_nzbd.data();
+ const auto * const pb = m_terms[i]->m_nzbd.data();
const size_t eb = m_terms[i]->m_nzbd.size();
for (std::size_t jb = 0; jb < eb; jb++)
{
const auto j = pb[jb];
- const nl_double f1 = - W(j,i) * f;
+ const float_type f1 = - W(j,i) * f;
if (f1 != 0.0)
{
for (std::size_t k = 0; k < e; k++)
@@ -184,10 +178,10 @@ void matrix_solver_w_t<m_N, storage_N>::LE_invert()
for (std::size_t i = kN; i-- > 0; )
{
/* FIXME: Singular matrix? */
- const nl_double f = 1.0 / W(i,i);
+ const float_type f = 1.0 / W(i,i);
for (std::size_t j = i; j-- > 0; )
{
- const nl_double f1 = - W(j,i) * f;
+ const float_type f1 = - W(j,i) * f;
if (f1 != 0.0)
{
for (std::size_t k = i; k < kN; k++)
@@ -203,19 +197,19 @@ void matrix_solver_w_t<m_N, storage_N>::LE_invert()
}
}
-template <std::size_t m_N, std::size_t storage_N>
+template <typename FT, int SIZE>
template <typename T>
-void matrix_solver_w_t<m_N, storage_N>::LE_compute_x(
- T * RESTRICT x)
+void matrix_solver_w_t<FT, SIZE>::LE_compute_x(
+ T * x)
{
- const std::size_t kN = N();
+ const std::size_t kN = size();
for (std::size_t i=0; i<kN; i++)
x[i] = 0.0;
for (std::size_t k=0; k<kN; k++)
{
- const nl_double f = RHS(k);
+ const float_type f = RHS(k);
for (std::size_t i=0; i<kN; i++)
x[i] += Ainv(i,k) * f;
@@ -223,14 +217,14 @@ void matrix_solver_w_t<m_N, storage_N>::LE_compute_x(
}
-template <std::size_t m_N, std::size_t storage_N>
-unsigned matrix_solver_w_t<m_N, storage_N>::solve_non_dynamic(const bool newton_raphson)
+template <typename FT, int SIZE>
+unsigned matrix_solver_w_t<FT, SIZE>::solve_non_dynamic(const bool newton_raphson)
{
- const auto iN = N();
+ const auto iN = size();
- nl_double new_V[storage_N]; // = { 0.0 };
+ float_type new_V[storage_N]; // = { 0.0 };
- if ((m_cnt % 100) == 0)
+ if ((m_cnt % 50) == 0)
{
/* complete calculation */
this->LE_invert();
@@ -266,7 +260,7 @@ unsigned matrix_solver_w_t<m_N, storage_N>::solve_non_dynamic(const bool newton_
/* construct w = transform(V) * y
* dim: rowcount x iN
* */
- nl_double w[storage_N];
+ float_type w[storage_N];
for (unsigned i = 0; i < rowcount; i++)
{
const unsigned r = rows[i];
@@ -287,7 +281,7 @@ unsigned matrix_solver_w_t<m_N, storage_N>::solve_non_dynamic(const bool newton_
for (unsigned k=0; k< colcount[i]; k++)
{
const unsigned col = cols[i][k];
- nl_double f = VT(rows[i],col);
+ float_type f = VT(rows[i],col);
if (f!=0.0)
for (unsigned j= 0; j < rowcount; j++)
H[i][j] += f * Ainv(col,rows[j]);
@@ -297,16 +291,16 @@ unsigned matrix_solver_w_t<m_N, storage_N>::solve_non_dynamic(const bool newton_
for (unsigned i = 0; i < rowcount; i++)
{
if (H[i][i] == 0.0)
- printf("%s H singular\n", this->name().c_str());
- const nl_double f = 1.0 / H[i][i];
+ plib::perrlogger("{} H singular\n", this->name());
+ const float_type f = 1.0 / H[i][i];
for (unsigned j = i+1; j < rowcount; j++)
{
- const nl_double f1 = - f * H[j][i];
+ const float_type f1 = - f * H[j][i];
if (f1!=0.0)
{
- nl_double *pj = &H[j][i+1];
- const nl_double *pi = &H[i][i+1];
+ float_type *pj = &H[j][i+1];
+ const float_type *pi = &H[i][i+1];
for (unsigned k = 0; k < rowcount-i-1; k++)
pj[k] += f1 * pi[k];
//H[j][k] += f1 * H[i][k];
@@ -316,12 +310,12 @@ unsigned matrix_solver_w_t<m_N, storage_N>::solve_non_dynamic(const bool newton_
}
/* Back substitution */
//inv(H) w = t w = H t
- nl_double t[storage_N]; // FIXME: convert to member
+ float_type t[storage_N]; // FIXME: convert to member
for (unsigned j = rowcount; j-- > 0; )
{
- nl_double tmp = 0;
- const nl_double *pj = &H[j][j+1];
- const nl_double *tj = &t[j+1];
+ float_type tmp = 0;
+ const float_type *pj = &H[j][j+1];
+ const float_type *tj = &t[j+1];
for (unsigned k = 0; k < rowcount-j-1; k++)
tmp += pj[k] * tj[k];
//tmp += H[j][k] * t[k];
@@ -331,7 +325,7 @@ unsigned matrix_solver_w_t<m_N, storage_N>::solve_non_dynamic(const bool newton_
/* x = y - Zt */
for (unsigned i=0; i<iN; i++)
{
- nl_double tmp = 0.0;
+ float_type tmp = 0.0;
for (unsigned j=0; j<rowcount;j++)
{
const unsigned row = rows[j];
@@ -343,47 +337,40 @@ unsigned matrix_solver_w_t<m_N, storage_N>::solve_non_dynamic(const bool newton_
}
m_cnt++;
- if (0)
+ if (false)
for (unsigned i=0; i<iN; i++)
{
- nl_double tmp = 0.0;
+ float_type tmp = 0.0;
for (unsigned j=0; j<iN; j++)
{
tmp += A(i,j) * new_V[j];
}
if (std::abs(tmp-RHS(i)) > 1e-6)
- printf("%s failed on row %d: %f RHS: %f\n", this->name().c_str(), i, std::abs(tmp-RHS(i)), RHS(i));
+ plib::perrlogger("{} failed on row {}: {} RHS: {}\n", this->name(), i, std::abs(tmp-RHS(i)), RHS(i));
}
- const nl_double err = (newton_raphson ? delta(new_V) : 0.0);
+ const float_type err = (newton_raphson ? delta(new_V) : 0.0);
store(new_V);
return (err > this->m_params.m_accuracy) ? 2 : 1;
}
-template <std::size_t m_N, std::size_t storage_N>
-inline unsigned matrix_solver_w_t<m_N, storage_N>::vsolve_non_dynamic(const bool newton_raphson)
+template <typename FT, int SIZE>
+unsigned matrix_solver_w_t<FT, SIZE>::vsolve_non_dynamic(const bool newton_raphson)
{
- build_LE_A<matrix_solver_w_t>();
- build_LE_RHS<matrix_solver_w_t>();
-
- for (std::size_t i=0, iN=N(); i < iN; i++)
- m_last_RHS[i] = RHS(i);
+ this->build_LE_A(*this);
+ this->build_LE_RHS(*this);
this->m_stat_calculations++;
return this->solve_non_dynamic(newton_raphson);
}
-template <std::size_t m_N, std::size_t storage_N>
-matrix_solver_w_t<m_N, storage_N>::matrix_solver_w_t(netlist_t &anetlist, const pstring &name,
+template <typename FT, int SIZE>
+matrix_solver_w_t<FT, SIZE>::matrix_solver_w_t(netlist_state_t &anetlist, const pstring &name,
const solver_parameters_t *params, const std::size_t size)
-: matrix_solver_t(anetlist, name, NOSORT, params)
- ,m_cnt(0)
+ : matrix_solver_t(anetlist, name, NOSORT, params)
+ , m_cnt(0)
, m_dim(size)
{
- for (std::size_t k = 0; k < N(); k++)
- {
- m_last_RHS[k] = 0.0;
- }
}
} //namespace devices
diff --git a/src/lib/netlist/solver/nld_solver.cpp b/src/lib/netlist/solver/nld_solver.cpp
index cae76caab1a..4734cc3624d 100644
--- a/src/lib/netlist/solver/nld_solver.cpp
+++ b/src/lib/netlist/solver/nld_solver.cpp
@@ -31,393 +31,398 @@
#pragma GCC optimize "ivopts"
#endif
-#include <algorithm>
-#include <cmath> // <<= needed by windows build
-
-#include "../nl_lists.h"
-
-#include "../plib/pomp.h"
-
-#include "../nl_factory.h"
-
-#include "nld_solver.h"
+#include "netlist/nl_lists.h"
+#include "netlist/nl_factory.h"
#include "nld_matrix_solver.h"
-
-#if 1
#include "nld_ms_direct.h"
-#include "nld_ms_gcr.h"
-#else
-#include "nld_ms_direct_lu.h"
-#endif
-#include "nld_ms_w.h"
-#include "nld_ms_sm.h"
#include "nld_ms_direct1.h"
#include "nld_ms_direct2.h"
+#include "nld_ms_gcr.h"
+#include "nld_ms_gmres.h"
+#include "nld_ms_sm.h"
#include "nld_ms_sor.h"
#include "nld_ms_sor_mat.h"
-#include "nld_ms_gmres.h"
+#include "nld_ms_w.h"
+#include "nld_solver.h"
+#include "plib/pomp.h"
+
+#include <algorithm>
+#include <cmath>
namespace netlist
{
- namespace devices
- {
-
-
+namespace devices
+{
-// ----------------------------------------------------------------------------------------
-// solver
-// ----------------------------------------------------------------------------------------
+ // ----------------------------------------------------------------------------------------
+ // solver
+ // ----------------------------------------------------------------------------------------
-NETLIB_RESET(solver)
-{
- for (std::size_t i = 0; i < m_mat_solvers.size(); i++)
- m_mat_solvers[i]->do_reset();
-}
+ NETLIB_RESET(solver)
+ {
+ for (auto &s : m_mat_solvers)
+ s->reset();
+ }
-void NETLIB_NAME(solver)::stop()
-{
- for (std::size_t i = 0; i < m_mat_solvers.size(); i++)
- m_mat_solvers[i]->log_stats();
-}
+ void NETLIB_NAME(solver)::stop()
+ {
+ for (auto &s : m_mat_solvers)
+ s->log_stats();
+ }
-NETLIB_NAME(solver)::~NETLIB_NAME(solver)()
-{
-}
+ NETLIB_UPDATE(solver)
+ {
+ if (m_params.m_dynamic_ts)
+ return;
-NETLIB_UPDATE(solver)
-{
- if (m_params.m_dynamic_ts)
- return;
+ netlist_time now(exec().time());
+ /* force solving during start up if there are no time-step devices */
+ /* FIXME: Needs a more elegant solution */
+ bool force_solve = (now < netlist_time::from_double(2 * m_params.m_max_timestep));
- /* force solving during start up if there are no time-step devices */
- /* FIXME: Needs a more elegant solution */
- bool force_solve = (netlist().time() < netlist_time::from_double(2 * m_params.m_max_timestep));
+ std::size_t nthreads = std::min(static_cast<std::size_t>(m_parallel()), plib::omp::get_max_threads());
- std::size_t nthreads = std::min(m_parallel(), plib::omp::get_max_threads());
- std::size_t t_cnt = 0;
- int solv[128];
- for (int i = 0; i < m_mat_solvers.size(); i++)
- if (m_mat_solvers[i]->has_timestep_devices() || force_solve)
- solv[t_cnt++] = i;
+ std::vector<matrix_solver_t *> &solvers = (force_solve ? m_mat_solvers_all : m_mat_solvers_timestepping);
- if (nthreads > 1 && t_cnt > 1)
- {
- plib::omp::set_num_threads(nthreads);
- plib::omp::for_static(0, t_cnt, [this, &solv](int i) { ATTR_UNUSED const netlist_time ts = this->m_mat_solvers[solv[i]]->solve(); });
- }
- else
- for (auto & solver : m_mat_solvers)
- if (solver->has_timestep_devices() || force_solve)
- ATTR_UNUSED const netlist_time ts = solver->solve();
+ if (nthreads > 1 && solvers.size() > 1)
+ {
+ plib::omp::set_num_threads(nthreads);
+ plib::omp::for_static(static_cast<std::size_t>(0), solvers.size(), [&solvers, now](std::size_t i)
+ {
+ const netlist_time ts = solvers[i]->solve(now);
+ plib::unused_var(ts);
+ });
+ }
+ else
+ for (auto & solver : solvers)
+ {
+ const netlist_time ts = solver->solve(now);
+ plib::unused_var(ts);
+ }
- for (auto & solver : m_mat_solvers)
- if (solver->has_timestep_devices() || force_solve)
+ for (auto & solver : solvers)
solver->update_inputs();
- /* step circuit */
- if (!m_Q_step.net().is_queued())
- {
- m_Q_step.net().toggle_and_push_to_queue(netlist_time::from_double(m_params.m_max_timestep));
+ /* step circuit */
+ if (!m_Q_step.net().is_queued())
+ {
+ m_Q_step.net().toggle_and_push_to_queue(netlist_time::from_double(m_params.m_max_timestep));
+ }
}
-}
-
-template <class C>
-std::unique_ptr<matrix_solver_t> create_it(netlist_t &nl, pstring name, solver_parameters_t &params, std::size_t size)
-{
- typedef C solver;
- return plib::make_unique<solver>(nl, name, &params, size);
-}
-template <std::size_t m_N, std::size_t storage_N>
-std::unique_ptr<matrix_solver_t> NETLIB_NAME(solver)::create_solver(std::size_t size, const pstring &solvername)
-{
- if (pstring("SOR_MAT").equals(m_method()))
+ template <class C>
+ pool_owned_ptr<matrix_solver_t> create_it(netlist_state_t &nl, pstring name, solver_parameters_t &params, std::size_t size)
{
- return create_it<matrix_solver_SOR_mat_t<m_N, storage_N>>(netlist(), solvername, m_params, size);
- //typedef matrix_solver_SOR_mat_t<m_N,storage_N> solver_sor_mat;
- //return plib::make_unique<solver_sor_mat>(netlist(), solvername, &m_params, size);
+ return pool().make_poolptr<C>(nl, name, &params, size);
}
- else if (pstring("MAT_CR").equals(m_method()))
+
+ template <typename FT, int SIZE>
+ pool_owned_ptr<matrix_solver_t> NETLIB_NAME(solver)::create_solver(std::size_t size, const pstring &solvername)
{
- if (size > 0) // GCR always outperforms MAT solver
+ if (m_method() == "SOR_MAT")
+ {
+ return create_it<matrix_solver_SOR_mat_t<FT, SIZE>>(state(), solvername, m_params, size);
+ //typedef matrix_solver_SOR_mat_t<m_N,storage_N> solver_sor_mat;
+ //return plib::make_unique<solver_sor_mat>(state(), solvername, &m_params, size);
+ }
+ else if (m_method() == "MAT_CR")
+ {
+ if (size > 0) // GCR always outperforms MAT solver
+ {
+ return create_it<matrix_solver_GCR_t<FT, SIZE>>(state(), solvername, m_params, size);
+ }
+ else
+ {
+ return create_it<matrix_solver_direct_t<FT, SIZE>>(state(), solvername, m_params, size);
+ }
+ }
+ else if (m_method() == "MAT")
+ {
+ return create_it<matrix_solver_direct_t<FT, SIZE>>(state(), solvername, m_params, size);
+ }
+ else if (m_method() == "SM")
+ {
+ /* Sherman-Morrison Formula */
+ return create_it<matrix_solver_sm_t<FT, SIZE>>(state(), solvername, m_params, size);
+ }
+ else if (m_method() == "W")
{
- typedef matrix_solver_GCR_t<m_N,storage_N> solver_mat;
- return plib::make_unique<solver_mat>(netlist(), solvername, &m_params, size);
+ /* Woodbury Formula */
+ return create_it<matrix_solver_w_t<FT, SIZE>>(state(), solvername, m_params, size);
+ }
+ else if (m_method() == "SOR")
+ {
+ return create_it<matrix_solver_SOR_t<FT, SIZE>>(state(), solvername, m_params, size);
+ }
+ else if (m_method() == "GMRES")
+ {
+ return create_it<matrix_solver_GMRES_t<FT, SIZE>>(state(), solvername, m_params, size);
}
else
{
- typedef matrix_solver_direct_t<m_N,storage_N> solver_mat;
- return plib::make_unique<solver_mat>(netlist(), solvername, &m_params, size);
+ log().fatal(MF_1_UNKNOWN_SOLVER_TYPE, m_method());
+ return pool_owned_ptr<matrix_solver_t>();
}
}
- else if (pstring("MAT").equals(m_method()))
- {
- typedef matrix_solver_direct_t<m_N,storage_N> solver_mat;
- return plib::make_unique<solver_mat>(netlist(), solvername, &m_params, size);
- }
- else if (pstring("SM").equals(m_method()))
- {
- /* Sherman-Morrison Formula */
- typedef matrix_solver_sm_t<m_N,storage_N> solver_mat;
- return plib::make_unique<solver_mat>(netlist(), solvername, &m_params, size);
- }
- else if (pstring("W").equals(m_method()))
- {
- /* Woodbury Formula */
- typedef matrix_solver_w_t<m_N,storage_N> solver_mat;
- return plib::make_unique<solver_mat>(netlist(), solvername, &m_params, size);
- }
- else if (pstring("SOR").equals(m_method()))
- {
- typedef matrix_solver_SOR_t<m_N,storage_N> solver_GS;
- return plib::make_unique<solver_GS>(netlist(), solvername, &m_params, size);
- }
- else if (pstring("GMRES").equals(m_method()))
- {
- typedef matrix_solver_GMRES_t<m_N,storage_N> solver_GMRES;
- return plib::make_unique<solver_GMRES>(netlist(), solvername, &m_params, size);
- }
- else
+
+ template <typename FT, int SIZE>
+ pool_owned_ptr<matrix_solver_t> NETLIB_NAME(solver)::create_solver_x(std::size_t size, const pstring &solvername)
{
- log().fatal(MF_1_UNKNOWN_SOLVER_TYPE, m_method());
- return nullptr;
+ if (SIZE > 0)
+ {
+ if (size == SIZE)
+ return create_solver<FT, SIZE>(size, solvername);
+ else
+ return this->create_solver_x<FT, SIZE-1>(size, solvername);
+ }
+ else
+ {
+ if (size * 2 > -SIZE )
+ return create_solver<FT, SIZE>(size, solvername);
+ else
+ return this->create_solver_x<FT, SIZE / 2>(size, solvername);
+ }
}
-}
-
-struct net_splitter
-{
- bool already_processed(analog_net_t *n)
+ struct net_splitter
{
- if (n->isRailNet())
- return true;
- for (auto & grp : groups)
- if (plib::container::contains(grp, n))
+
+ bool already_processed(const analog_net_t &n) const
+ {
+ /* no need to process rail nets - these are known variables */
+ if (n.isRailNet())
return true;
- return false;
- }
+ /* if it's already processed - no need to continue */
+ for (auto & grp : groups)
+ if (plib::container::contains(grp, &n))
+ return true;
+ return false;
+ }
- void process_net(analog_net_t *n)
- {
- if (n->num_cons() == 0)
- return;
- /* add the net */
- groups.back().push_back(n);
- for (auto &p : n->m_core_terms)
+ void process_net(analog_net_t &n)
{
- if (p->is_type(detail::terminal_type::TERMINAL))
+ /* ignore empty nets. FIXME: print a warning message */
+ if (n.num_cons() == 0)
+ return;
+ /* add the net */
+ groups.back().push_back(&n);
+ /* process all terminals connected to this net */
+ for (auto &term : n.core_terms())
{
- terminal_t *pt = static_cast<terminal_t *>(p);
- analog_net_t *other_net = &pt->m_otherterm->net();
- if (!already_processed(other_net))
- process_net(other_net);
+ /* only process analog terminals */
+ if (term->is_type(detail::terminal_type::TERMINAL))
+ {
+ auto *pt = static_cast<terminal_t *>(term);
+ /* check the connected terminal */
+ analog_net_t &connected_net = pt->connected_terminal()->net();
+ if (!already_processed(connected_net))
+ process_net(connected_net);
+ }
}
}
- }
- void run(netlist_t &netlist)
- {
- for (auto & net : netlist.m_nets)
+ void run(netlist_state_t &netlist)
{
- netlist.log().debug("processing {1}\n", net->name());
- if (!net->isRailNet() && net->num_cons() > 0)
+ for (auto & net : netlist.nets())
{
- netlist.log().debug(" ==> not a rail net\n");
- /* Must be an analog net */
- analog_net_t *n = static_cast<analog_net_t *>(net.get());
- if (!already_processed(n))
+ netlist.log().debug("processing {1}\n", net->name());
+ if (!net->isRailNet() && net->num_cons() > 0)
{
- groups.push_back(analog_net_t::list_t());
- process_net(n);
+ netlist.log().debug(" ==> not a rail net\n");
+ /* Must be an analog net */
+ auto &n = *static_cast<analog_net_t *>(net.get());
+ if (!already_processed(n))
+ {
+ groups.emplace_back(analog_net_t::list_t());
+ process_net(n);
+ }
}
}
}
- }
- std::vector<analog_net_t::list_t> groups;
-};
+ std::vector<analog_net_t::list_t> groups;
+ };
-void NETLIB_NAME(solver)::post_start()
-{
- const bool use_specific = true;
-
- m_params.m_pivot = m_pivot();
- m_params.m_accuracy = m_accuracy();
- /* FIXME: Throw when negative */
- m_params.m_gs_loops = static_cast<unsigned>(m_gs_loops());
- m_params.m_nr_loops = static_cast<unsigned>(m_nr_loops());
- m_params.m_nr_recalc_delay = netlist_time::from_double(m_nr_recalc_delay());
- m_params.m_dynamic_lte = m_dynamic_lte();
- m_params.m_gs_sor = m_gs_sor();
-
- m_params.m_min_timestep = m_dynamic_min_ts();
- m_params.m_dynamic_ts = (m_dynamic_ts() == 1 ? true : false);
- m_params.m_max_timestep = netlist_time::from_double(1.0 / m_freq()).as_double();
-
- if (m_params.m_dynamic_ts)
+ void NETLIB_NAME(solver)::post_start()
{
- m_params.m_max_timestep *= 1;//NL_FCONST(1000.0);
- }
- else
- {
- m_params.m_min_timestep = m_params.m_max_timestep;
- }
+ m_params.m_pivot = m_pivot();
+ m_params.m_accuracy = m_accuracy();
+ /* FIXME: Throw when negative */
+ m_params.m_gs_loops = static_cast<unsigned>(m_gs_loops());
+ m_params.m_nr_loops = static_cast<unsigned>(m_nr_loops());
+ m_params.m_nr_recalc_delay = netlist_time::from_double(m_nr_recalc_delay());
+ m_params.m_dynamic_lte = m_dynamic_lte();
+ m_params.m_gs_sor = m_gs_sor();
+
+ m_params.m_min_timestep = m_dynamic_min_ts();
+ m_params.m_dynamic_ts = (m_dynamic_ts() == 1 ? true : false);
+ m_params.m_max_timestep = netlist_time::from_double(1.0 / m_freq()).as_double();
- //m_params.m_max_timestep = std::max(m_params.m_max_timestep, m_params.m_max_timestep::)
+ m_params.m_use_gabs = m_use_gabs();
+ m_params.m_use_linear_prediction = m_use_linear_prediction();
+
+
+ if (m_params.m_dynamic_ts)
+ {
+ m_params.m_max_timestep *= 1;//NL_FCONST(1000.0);
+ }
+ else
+ {
+ m_params.m_min_timestep = m_params.m_max_timestep;
+ }
- // Override log statistics
- pstring p = plib::util::environment("NL_STATS", "");
- if (p != "")
- m_params.m_log_stats = p.as_long();
- else
- m_params.m_log_stats = m_log_stats();
+ //m_params.m_max_timestep = std::max(m_params.m_max_timestep, m_params.m_max_timestep::)
- log().verbose("Scanning net groups ...");
- // determine net groups
+ // Override log statistics
+ pstring p = plib::util::environment("NL_STATS", "");
+ if (p != "")
+ m_params.m_log_stats = plib::pstonum<decltype(m_params.m_log_stats)>(p);
+ else
+ m_params.m_log_stats = m_log_stats();
- net_splitter splitter;
+ log().verbose("Scanning net groups ...");
+ // determine net groups
- splitter.run(netlist());
+ net_splitter splitter;
- // setup the solvers
- log().verbose("Found {1} net groups in {2} nets\n", splitter.groups.size(), netlist().m_nets.size());
- for (auto & grp : splitter.groups)
- {
- std::unique_ptr<matrix_solver_t> ms;
- std::size_t net_count = grp.size();
- pstring sname = plib::pfmt("Solver_{1}")(m_mat_solvers.size());
+ splitter.run(state());
- switch (net_count)
+ // setup the solvers
+ log().verbose("Found {1} net groups in {2} nets\n", splitter.groups.size(), state().nets().size());
+ for (auto & grp : splitter.groups)
{
-#if 1
- case 1:
- if (use_specific)
- ms = plib::make_unique<matrix_solver_direct1_t>(netlist(), sname, &m_params);
- else
- ms = create_solver<1,1>(1, sname);
- break;
- case 2:
- if (use_specific)
- ms = plib::make_unique<matrix_solver_direct2_t>(netlist(), sname, &m_params);
- else
- ms = create_solver<2,2>(2, sname);
- break;
-#if 0
- case 3:
- ms = create_solver<3,3>(3, sname);
- break;
- case 4:
- ms = create_solver<4,4>(4, sname);
- break;
- case 5:
- ms = create_solver<5,5>(5, sname);
- break;
- case 6:
- ms = create_solver<6,6>(6, sname);
- break;
- case 7:
- ms = create_solver<7,7>(7, sname);
- break;
- case 8:
- ms = create_solver<8,8>(8, sname);
- break;
- case 9:
- ms = create_solver<9,9>(9, sname);
- break;
- case 10:
- ms = create_solver<10,10>(10, sname);
- break;
- case 11:
- ms = create_solver<11,11>(11, sname);
- break;
- case 12:
- ms = create_solver<12,12>(12, sname);
- break;
- case 15:
- ms = create_solver<15,15>(15, sname);
- break;
- case 31:
- ms = create_solver<31,31>(31, sname);
- break;
- case 35:
- ms = create_solver<35,35>(35, sname);
- break;
- case 43:
- ms = create_solver<43,43>(43, sname);
- break;
- case 49:
- ms = create_solver<49,49>(49, sname);
- break;
-#endif
-#if 0
- case 87:
- ms = create_solver<87,87>(87, sname);
- break;
-#endif
-#endif
- default:
- log().warning(MW_1_NO_SPECIFIC_SOLVER, net_count);
- if (net_count <= 8)
- {
- ms = create_solver<0, 8>(net_count, sname);
- }
- else if (net_count <= 16)
- {
- ms = create_solver<0,16>(net_count, sname);
- }
- else if (net_count <= 32)
- {
- ms = create_solver<0,32>(net_count, sname);
- }
- else
- if (net_count <= 64)
- {
- ms = create_solver<0,64>(net_count, sname);
- }
- else
- if (net_count <= 128)
- {
- ms = create_solver<0,128>(net_count, sname);
- }
- else
- {
- log().fatal(MF_1_NETGROUP_SIZE_EXCEEDED_1, 128);
- ms = nullptr; /* tease compilers */
- }
+ pool_owned_ptr<matrix_solver_t> ms;
+ std::size_t net_count = grp.size();
+ pstring sname = plib::pfmt("Solver_{1}")(m_mat_solvers.size());
- break;
- }
+ switch (net_count)
+ {
+ #if 1
+ case 1:
+ ms = pool().make_poolptr<matrix_solver_direct1_t<double>>(state(), sname, &m_params);
+ break;
+ case 2:
+ ms = pool().make_poolptr<matrix_solver_direct2_t<double>>(state(), sname, &m_params);
+ break;
+ case 3:
+ ms = create_solver<double, 3>(3, sname);
+ break;
+ case 4:
+ ms = create_solver<double, 4>(4, sname);
+ break;
+ case 5:
+ ms = create_solver<double, 5>(5, sname);
+ break;
+ case 6:
+ ms = create_solver<double, 6>(6, sname);
+ break;
+ case 7:
+ ms = create_solver<double, 7>(7, sname);
+ break;
+ case 8:
+ ms = create_solver<double, 8>(8, sname);
+ break;
+ case 9:
+ ms = create_solver<double, 9>(9, sname);
+ break;
+ case 10:
+ ms = create_solver<double, 10>(10, sname);
+ break;
+ #if 0
+ case 11:
+ ms = create_solver<double, 11>(11, sname);
+ break;
+ case 12:
+ ms = create_solver<double, 12>(12, sname);
+ break;
+ case 15:
+ ms = create_solver<double, 15>(15, sname);
+ break;
+ case 31:
+ ms = create_solver<double, 31>(31, sname);
+ break;
+ case 35:
+ ms = create_solver<double, 35>(35, sname);
+ break;
+ case 43:
+ ms = create_solver<double, 43>(43, sname);
+ break;
+ case 49:
+ ms = create_solver<double, 49>(49, sname);
+ break;
+ #endif
+ #if 1
+ case 86:
+ ms = create_solver<double,86>(86, sname);
+ break;
+ #endif
+ #endif
+ default:
+ log().warning(MW_1_NO_SPECIFIC_SOLVER, net_count);
+ if (net_count <= 8)
+ {
+ ms = create_solver<double, -8>(net_count, sname);
+ }
+ else if (net_count <= 16)
+ {
+ ms = create_solver<double, -16>(net_count, sname);
+ }
+ else if (net_count <= 32)
+ {
+ ms = create_solver<double, -32>(net_count, sname);
+ }
+ else
+ if (net_count <= 64)
+ {
+ ms = create_solver<double, -64>(net_count, sname);
+ }
+ else
+ if (net_count <= 128)
+ {
+ ms = create_solver<double, -128>(net_count, sname);
+ }
+ else
+ {
+ log().fatal(MF_1_NETGROUP_SIZE_EXCEEDED_1, 128);
+ return; /* tease compilers */
+ }
+ break;
+ }
- // FIXME ...
- ms->setup(grp);
+ // FIXME ...
+ ms->setup(grp);
- log().verbose("Solver {1}", ms->name());
- log().verbose(" ==> {2} nets", grp.size());
- log().verbose(" has {1} elements", ms->has_dynamic_devices() ? "dynamic" : "no dynamic");
- log().verbose(" has {1} elements", ms->has_timestep_devices() ? "timestep" : "no timestep");
- for (auto &n : grp)
- {
- log().verbose("Net {1}", n->name());
- for (const auto &pcore : n->m_core_terms)
+ log().verbose("Solver {1}", ms->name());
+ log().verbose(" ==> {2} nets", grp.size());
+ log().verbose(" has {1} elements", ms->has_dynamic_devices() ? "dynamic" : "no dynamic");
+ log().verbose(" has {1} elements", ms->has_timestep_devices() ? "timestep" : "no timestep");
+ for (auto &n : grp)
{
- log().verbose(" {1}", pcore->name());
+ log().verbose("Net {1}", n->name());
+ for (const auto &pcore : n->core_terms())
+ {
+ log().verbose(" {1}", pcore->name());
+ }
}
- }
- m_mat_solvers.push_back(std::move(ms));
+ m_mat_solvers_all.push_back(ms.get());
+ if (ms->has_timestep_devices())
+ m_mat_solvers_timestepping.push_back(ms.get());
+
+ m_mat_solvers.emplace_back(std::move(ms));
+ }
}
-}
-void NETLIB_NAME(solver)::create_solver_code(std::map<pstring, pstring> &mp)
-{
- for (auto & s : m_mat_solvers)
+ void NETLIB_NAME(solver)::create_solver_code(std::map<pstring, pstring> &mp)
{
- auto r = s->create_solver_code();
- mp[r.first] = r.second; // automatically overwrites identical names
+ for (auto & s : m_mat_solvers)
+ {
+ auto r = s->create_solver_code();
+ mp[r.first] = r.second; // automatically overwrites identical names
+ }
}
-}
- NETLIB_DEVICE_IMPL(solver)
+ NETLIB_DEVICE_IMPL(solver, "SOLVER", "FREQ")
- } //namespace devices
+} // namespace devices
} // namespace netlist
diff --git a/src/lib/netlist/solver/nld_solver.h b/src/lib/netlist/solver/nld_solver.h
index 986d14f401a..c9ec967a72a 100644
--- a/src/lib/netlist/solver/nld_solver.h
+++ b/src/lib/netlist/solver/nld_solver.h
@@ -8,11 +8,13 @@
#ifndef NLD_SOLVER_H_
#define NLD_SOLVER_H_
-#include <map>
-
-#include "../nl_base.h"
-#include "../plib/pstream.h"
+#include "netlist/nl_base.h"
#include "nld_matrix_solver.h"
+#include "plib/pstream.h"
+
+#include <map>
+#include <memory>
+#include <vector>
//#define ATTR_ALIGNED(N) __attribute__((aligned(N)))
#define ATTR_ALIGNED(N) ATTR_ALIGN
@@ -23,89 +25,97 @@
namespace netlist
{
- namespace devices
- {
-class NETLIB_NAME(solver);
-
-
-class matrix_solver_t;
-
-NETLIB_OBJECT(solver)
+namespace devices
{
- NETLIB_CONSTRUCTOR(solver)
- , m_fb_step(*this, "FB_step")
- , m_Q_step(*this, "Q_step")
- , m_freq(*this, "FREQ", 48000.0)
-
- /* iteration parameters */
- , m_gs_sor(*this, "SOR_FACTOR", 1.059)
- , m_method(*this, "METHOD", "MAT_CR")
- , m_accuracy(*this, "ACCURACY", 1e-7)
- , m_gs_loops(*this, "GS_LOOPS",9) // Gauss-Seidel loops
-
- /* general parameters */
- , m_gmin(*this, "GMIN", NETLIST_GMIN_DEFAULT)
- , m_pivot(*this, "PIVOT", 0) // use pivoting - on supported solvers
- , m_nr_loops(*this, "NR_LOOPS", 250) // Newton-Raphson loops
- , m_nr_recalc_delay(*this, "NR_RECALC_DELAY", NLTIME_FROM_NS(10).as_double()) // Delay to next solve attempt if nr loops exceeded
- , m_parallel(*this, "PARALLEL", 0)
-
- /* automatic time step */
- , m_dynamic_ts(*this, "DYNAMIC_TS", 0)
- , m_dynamic_lte(*this, "DYNAMIC_LTE", 1e-5) // diff/timestep
- , m_dynamic_min_ts(*this, "DYNAMIC_MIN_TIMESTEP", 1e-6) // nl_double timestep resolution
-
- , m_log_stats(*this, "LOG_STATS", 0) // log statistics on shutdown
- , m_params()
- {
- // internal staff
-
- connect(m_fb_step, m_Q_step);
- }
-
- virtual ~NETLIB_NAME(solver)() override;
+ class NETLIB_NAME(solver);
- void post_start();
- void stop();
+ class matrix_solver_t;
- inline nl_double gmin() { return m_gmin(); }
-
- void create_solver_code(std::map<pstring, pstring> &mp);
-
- NETLIB_UPDATEI();
- NETLIB_RESETI();
- // NETLIB_UPDATE_PARAMI();
-
-protected:
- logic_input_t m_fb_step;
- logic_output_t m_Q_step;
-
- param_double_t m_freq;
- param_double_t m_gs_sor;
- param_str_t m_method;
- param_double_t m_accuracy;
- param_int_t m_gs_loops;
- param_double_t m_gmin;
- param_logic_t m_pivot;
- param_int_t m_nr_loops;
- param_double_t m_nr_recalc_delay;
- param_int_t m_parallel;
- param_logic_t m_dynamic_ts;
- param_double_t m_dynamic_lte;
- param_double_t m_dynamic_min_ts;
-
- param_logic_t m_log_stats;
-
- std::vector<std::unique_ptr<matrix_solver_t>> m_mat_solvers;
-private:
-
- solver_parameters_t m_params;
-
- template <std::size_t m_N, std::size_t storage_N>
- std::unique_ptr<matrix_solver_t> create_solver(std::size_t size, const pstring &solvername);
-};
-
- } //namespace devices
+ NETLIB_OBJECT(solver)
+ {
+ NETLIB_CONSTRUCTOR(solver)
+ , m_fb_step(*this, "FB_step")
+ , m_Q_step(*this, "Q_step")
+ , m_freq(*this, "FREQ", 48000.0)
+
+ /* iteration parameters */
+ , m_gs_sor(*this, "SOR_FACTOR", 1.059)
+ , m_method(*this, "METHOD", "MAT_CR")
+ , m_accuracy(*this, "ACCURACY", 1e-7)
+ , m_gs_loops(*this, "GS_LOOPS", 9) // Gauss-Seidel loops
+
+ /* general parameters */
+ , m_gmin(*this, "GMIN", 1e-9)
+ , m_pivot(*this, "PIVOT", false) // use pivoting - on supported solvers
+ , m_nr_loops(*this, "NR_LOOPS", 250) // Newton-Raphson loops
+ , m_nr_recalc_delay(*this, "NR_RECALC_DELAY", NLTIME_FROM_NS(10).as_double()) // Delay to next solve attempt if nr loops exceeded
+ , m_parallel(*this, "PARALLEL", 0)
+
+ /* automatic time step */
+ , m_dynamic_ts(*this, "DYNAMIC_TS", false)
+ , m_dynamic_lte(*this, "DYNAMIC_LTE", 1e-5) // diff/timestep
+ , m_dynamic_min_ts(*this, "DYNAMIC_MIN_TIMESTEP", 1e-6) // nl_double timestep resolution
+
+ /* special */
+ , m_use_gabs(*this, "USE_GABS", true)
+ , m_use_linear_prediction(*this, "USE_LINEAR_PREDICTION", false) // // savings are eaten up by effort
+
+ , m_log_stats(*this, "LOG_STATS", true) // log statistics on shutdown
+ , m_params()
+ {
+ // internal staff
+
+ connect(m_fb_step, m_Q_step);
+ }
+
+ void post_start();
+ void stop();
+
+ nl_double gmin() const { return m_gmin(); }
+
+ void create_solver_code(std::map<pstring, pstring> &mp);
+
+ NETLIB_UPDATEI();
+ NETLIB_RESETI();
+ // NETLIB_UPDATE_PARAMI();
+
+ private:
+ logic_input_t m_fb_step;
+ logic_output_t m_Q_step;
+
+ param_double_t m_freq;
+ param_double_t m_gs_sor;
+ param_str_t m_method;
+ param_double_t m_accuracy;
+ param_int_t m_gs_loops;
+ param_double_t m_gmin;
+ param_logic_t m_pivot;
+ param_int_t m_nr_loops;
+ param_double_t m_nr_recalc_delay;
+ param_int_t m_parallel;
+ param_logic_t m_dynamic_ts;
+ param_double_t m_dynamic_lte;
+ param_double_t m_dynamic_min_ts;
+
+ param_logic_t m_use_gabs;
+ param_logic_t m_use_linear_prediction;
+
+ param_logic_t m_log_stats;
+
+ std::vector<pool_owned_ptr<matrix_solver_t>> m_mat_solvers;
+ std::vector<matrix_solver_t *> m_mat_solvers_all;
+ std::vector<matrix_solver_t *> m_mat_solvers_timestepping;
+
+ solver_parameters_t m_params;
+
+ template <typename FT, int SIZE>
+ pool_owned_ptr<matrix_solver_t> create_solver(std::size_t size, const pstring &solvername);
+
+ template <typename FT, int SIZE>
+ pool_owned_ptr<matrix_solver_t> create_solver_x(std::size_t size, const pstring &solvername);
+ };
+
+} //namespace devices
} // namespace netlist
#endif /* NLD_SOLVER_H_ */
diff --git a/src/lib/netlist/solver/vector_base.h b/src/lib/netlist/solver/vector_base.h
deleted file mode 100644
index 28f5fa0dd44..00000000000
--- a/src/lib/netlist/solver/vector_base.h
+++ /dev/null
@@ -1,144 +0,0 @@
-// license:GPL-2.0+
-// copyright-holders:Couriersud
-/*
- * vector_base.h
- *
- * Base vector operations
- *
- */
-
-#ifndef VECTOR_BASE_H_
-#define VECTOR_BASE_H_
-
-#include <algorithm>
-#include "../plib/pconfig.h"
-
-#if 0
-template <unsigned storage_N>
-struct pvector
-{
- pvector(unsigned size)
- : m_N(size) { }
-
- unsigned size() {
- if (storage_N)
- }
-
- double m_V[storage_N];
-private:
- unsigned m_N;
-};
-#endif
-
-#if !defined(__clang__) && !defined(_MSC_VER) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 6))
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
-#endif
-
-template<typename T, std::size_t N>
-inline static void vec_set (const std::size_t n, const T scalar, T (& RESTRICT v)[N])
-{
- if (n != N)
- for ( std::size_t i = 0; i < n; i++ )
- v[i] = scalar;
- else
- for ( std::size_t i = 0; i < N; i++ )
- v[i] = scalar;
-}
-
-template<typename T, std::size_t N>
-inline static T vec_mult (const std::size_t n, const T (& RESTRICT v1)[N], const T (& RESTRICT v2)[N] )
-{
- T value = 0.0;
- if (n != N)
- for ( std::size_t i = 0; i < n; i++ )
- value += v1[i] * v2[i];
- else
- for ( std::size_t i = 0; i < N; i++ )
- value += v1[i] * v2[i];
- return value;
-}
-
-template<typename T, std::size_t N>
-inline static T vec_mult2 (const std::size_t n, const T (& RESTRICT v)[N])
-{
- T value = 0.0;
- if (n != N)
- for ( std::size_t i = 0; i < n; i++ )
- value += v[i] * v[i];
- else
- for ( std::size_t i = 0; i < N; i++ )
- value += v[i] * v[i];
- return value;
-}
-
-template<typename T, std::size_t N>
-inline static void vec_mult_scalar (const std::size_t n, const T (& RESTRICT v)[N], const T & scalar, T (& RESTRICT result)[N])
-{
- if (n != N)
- for ( std::size_t i = 0; i < n; i++ )
- result[i] = scalar * v[i];
- else
- for ( std::size_t i = 0; i < N; i++ )
- result[i] = scalar * v[i];
-}
-
-template<typename T, std::size_t N>
-inline static void vec_add_mult_scalar (const std::size_t n, const T (& RESTRICT v)[N], const T scalar, T (& RESTRICT result)[N])
-{
- if (n != N)
- for ( std::size_t i = 0; i < n; i++ )
- result[i] = result[i] + scalar * v[i];
- else
- for ( std::size_t i = 0; i < N; i++ )
- result[i] = result[i] + scalar * v[i];
-}
-
-template<typename T>
-inline static void vec_add_mult_scalar_p(const std::size_t & n, const T * RESTRICT v, const T scalar, T * RESTRICT result)
-{
- for ( std::size_t i = 0; i < n; i++ )
- result[i] += scalar * v[i];
-}
-
-inline static void vec_add_ip(const std::size_t n, const double * RESTRICT v, double * RESTRICT result)
-{
- for ( std::size_t i = 0; i < n; i++ )
- result[i] += v[i];
-}
-
-template<typename T, std::size_t N>
-inline void vec_sub(const std::size_t n, const T (& RESTRICT v1)[N], const T (& RESTRICT v2)[N], T (& RESTRICT result)[N])
-{
- if (n != N)
- for ( std::size_t i = 0; i < n; i++ )
- result[i] = v1[i] - v2[i];
- else
- for ( std::size_t i = 0; i < N; i++ )
- result[i] = v1[i] - v2[i];
-}
-
-template<typename T, std::size_t N>
-inline void vec_scale(const std::size_t n, T (& RESTRICT v)[N], const T scalar)
-{
- if (n != N)
- for ( std::size_t i = 0; i < n; i++ )
- v[i] = scalar * v[i];
- else
- for ( std::size_t i = 0; i < N; i++ )
- v[i] = scalar * v[i];
-}
-
-inline double vec_maxabs(const std::size_t n, const double * RESTRICT v)
-{
- double ret = 0.0;
- for ( std::size_t i = 0; i < n; i++ )
- ret = std::max(ret, std::abs(v[i]));
-
- return ret;
-}
-#if !defined(__clang__) && !defined(_MSC_VER) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 6))
-#pragma GCC diagnostic pop
-#endif
-
-#endif /* MAT_CR_H_ */