diff options
author | 2019-01-30 02:28:55 +0100 | |
---|---|---|
committer | 2019-01-31 01:03:35 +0100 | |
commit | 1513c777b47d19c7df99ba43142d7a334392a0b5 (patch) | |
tree | 1698a72b6f570e579d9fe409ecb87c18108d5f1a /src/lib/netlist/plib | |
parent | 3d09dec7b80bfaaed75d04ace63fcdc7a289f68a (diff) |
netlist: Refactoring continues ... plus some innovations (nw)
Still some work ahead to separate interface from execution. This is a
preparation to switch to another sparse matrix format easily which may
be better suited for parallel processing.
On the linear algebra side there are some nice additions:
- Two additional sort modes: One tries to obtain a upper left identity
matrix, the other prefers a diagonal band matrix structure. Both deliver
slightly better performance than just sorting.
- Parallel execution analysis for Gaussian elimination and LU solve.
This determines which operations may be done independently.
All of this is not really useful right now. The matrix sizes are below
100 nets. I estimate that we at least need four times more so that CPU
parallel processing overhead pays off. For GPU, add another order. But
it's nice to have code which may scale.
Diffstat (limited to 'src/lib/netlist/plib')
-rw-r--r-- | src/lib/netlist/plib/gmres.h | 444 | ||||
-rw-r--r-- | src/lib/netlist/plib/mat_cr.h | 518 | ||||
-rw-r--r-- | src/lib/netlist/plib/pomp.h | 10 | ||||
-rw-r--r-- | src/lib/netlist/plib/putil.h | 4 | ||||
-rw-r--r-- | src/lib/netlist/plib/vector_ops.h | 115 |
5 files changed, 1088 insertions, 3 deletions
diff --git a/src/lib/netlist/plib/gmres.h b/src/lib/netlist/plib/gmres.h new file mode 100644 index 00000000000..7a24de8e03f --- /dev/null +++ b/src/lib/netlist/plib/gmres.h @@ -0,0 +1,444 @@ +// license:GPL-2.0+ +// copyright-holders:Couriersud +/* + * gmres.h + * + */ + +#ifndef PLIB_GMRES_H_ +#define PLIB_GMRES_H_ + +#include "pconfig.h" +#include "mat_cr.h" +#include "parray.h" +#include "vector_ops.h" + +#include <algorithm> +#include <cmath> + + +namespace plib +{ + + template <typename FT, int SIZE> + struct mat_precondition_ILU + { + typedef plib::matrix_compressed_rows_t<FT, SIZE> mat_type; + + mat_precondition_ILU(std::size_t size, int ilu_scale = 4 + , std::size_t bw = plib::matrix_compressed_rows_t<FT, SIZE>::FILL_INFINITY) + : m_mat(static_cast<typename mat_type::index_type>(size)) + , m_LU(static_cast<typename mat_type::index_type>(size)) + , m_use_iLU_preconditioning(ilu_scale >= 0) + , m_ILU_scale(static_cast<std::size_t>(ilu_scale)) + , m_band_width(bw) + { + } + + template <typename M> + void build(M &fill) + { + m_mat.build_from_fill_mat(fill, 0); + if (m_use_iLU_preconditioning) + { + m_LU.gaussian_extend_fill_mat(fill); + m_LU.build_from_fill_mat(fill, m_ILU_scale, m_band_width); // ILU(2) + //m_LU.build_from_fill_mat(fill, 9999, 20); // Band matrix width 20 + } + } + + + template<typename R, typename V> + void calc_rhs(R &rhs, const V &v) + { + m_mat.mult_vec(rhs, v); + } + + void precondition() + { + if (m_use_iLU_preconditioning) + { + if (m_ILU_scale < 1) + m_LU.raw_copy_from(m_mat); + else + m_LU.reduction_copy_from(m_mat); + m_LU.incomplete_LU_factorization(); + } + } + + template<typename V> + void solve_LU_inplace(V &v) + { + if (m_use_iLU_preconditioning) + { + m_LU.solveLUx(v); + } + } + + mat_type m_mat; + mat_type m_LU; + bool m_use_iLU_preconditioning; + std::size_t m_ILU_scale; + std::size_t m_band_width; + }; + + template <typename FT, int SIZE> + struct mat_precondition_diag + { + mat_precondition_diag(std::size_t size) + : m_mat(size) + , m_diag(size) + , m_use_iLU_preconditioning(true) + { + } + + template <typename M> + void build(M &fill) + { + m_mat.build_from_fill_mat(fill, 0); + } + + template<typename R, typename V> + void calc_rhs(R &rhs, const V &v) + { + m_mat.mult_vec(rhs, v); + } + + void precondition() + { + if (m_use_iLU_preconditioning) + { + for (std::size_t i = 0; i< m_diag.size(); i++) + { + m_diag[i] = 1.0 / m_mat.A[m_mat.diag[i]]; + } + } + } + + template<typename V> + void solve_LU_inplace(V &v) + { + if (m_use_iLU_preconditioning) + { + for (std::size_t i = 0; i< m_diag.size(); i++) + v[i] = v[i] * m_diag[i]; + } + } + + plib::matrix_compressed_rows_t<FT, SIZE> m_mat; + plib::parray<FT, SIZE> m_diag; + bool m_use_iLU_preconditioning; + }; + + /* FIXME: hardcoding RESTART to 20 becomes an issue on very large + * systems. + */ + template <typename FT, int SIZE, int RESTART = 20> + struct gmres_t + { + public: + + typedef FT float_type; + // FIXME: dirty hack to make this compile + static constexpr const std::size_t storage_N = plib::sizeabs<FT, SIZE>::ABS(); + + gmres_t(std::size_t size) + : m_use_more_precise_stop_condition(false) + , residual(size) + , Ax(size) + , m_size(size) + { + } + + void givens_mult( const FT c, const FT s, FT & g0, FT & g1 ) + { + const FT g0_last(g0); + + g0 = c * g0 - s * g1; + g1 = s * g0_last + c * g1; + } + + std::size_t size() const { return (SIZE<=0) ? m_size : static_cast<std::size_t>(SIZE); } + + template <typename OPS, typename VT, typename VRHS> + std::size_t solve(OPS &ops, VT &x, const VRHS & rhs, const std::size_t itr_max, float_type accuracy) + { + /*------------------------------------------------------------------------- + * The code below was inspired by code published by John Burkardt under + * the LPGL here: + * + * http://people.sc.fsu.edu/~jburkardt/cpp_src/mgmres/mgmres.html + * + * The code below was completely written from scratch based on the pseudo code + * found here: + * + * http://de.wikipedia.org/wiki/GMRES-Verfahren + * + * The Algorithm itself is described in + * + * Yousef Saad, + * Iterative Methods for Sparse Linear Systems, + * Second Edition, + * SIAM, 20003, + * ISBN: 0898715342, + * LC: QA188.S17. + * + *------------------------------------------------------------------------*/ + + std::size_t itr_used = 0; + double rho_delta = 0.0; + + const std::size_t n = size(); + + ops.precondition(); + + if (m_use_more_precise_stop_condition) + { + /* derive residual for a given delta x + * + * LU y = A dx + * + * ==> rho / accuracy = sqrt(y * y) + * + * This approach will approximate the iterative stop condition + * based |xnew - xold| pretty precisely. But it is slow, or expressed + * differently: The invest doesn't pay off. + */ + + vec_set_scalar(n, residual, accuracy); + ops.calc_rhs(Ax, residual); + + ops.solve_LU_inplace(Ax); + + const float_type rho_to_accuracy = std::sqrt(vec_mult2<FT>(n, Ax)) / accuracy; + + rho_delta = accuracy * rho_to_accuracy; + //printf("%e %e\n", rho_delta, accuracy * std::sqrt(static_cast<FT>(n))); + } + else + rho_delta = accuracy * std::sqrt(static_cast<FT>(n)); + + /* + * Using + * + * vec_set(n, x, rhs); + * ops.solve_LU_inplace(x); + * + * to get a starting point for x degrades convergence speed compared + * to using the last solution for x. + * + * LU x = b; solve for x; + * + */ + + while (itr_used < itr_max) + { + std::size_t last_k = RESTART; + float_type rho; + + ops.calc_rhs(Ax, x); + + vec_sub(n, rhs, Ax, residual); + + ops.solve_LU_inplace(residual); + + rho = std::sqrt(vec_mult2<FT>(n, residual)); + + if (rho < rho_delta) + return itr_used + 1; + + vec_set_scalar(RESTART+1, m_g, NL_FCONST(0.0)); + m_g[0] = rho; + + //for (std::size_t i = 0; i < mr + 1; i++) + // vec_set_scalar(mr, m_ht[i], NL_FCONST(0.0)); + + vec_mult_scalar(n, residual, NL_FCONST(1.0) / rho, m_v[0]); + + for (std::size_t k = 0; k < RESTART; k++) + { + const std::size_t kp1 = k + 1; + + ops.calc_rhs(m_v[kp1], m_v[k]); + ops.solve_LU_inplace(m_v[kp1]); + + for (std::size_t j = 0; j <= k; j++) + { + m_ht[j][k] = vec_mult<float_type>(n, m_v[kp1], m_v[j]); + vec_add_mult_scalar(n, m_v[j], -m_ht[j][k], m_v[kp1]); + } + m_ht[kp1][k] = std::sqrt(vec_mult2<FT>(n, m_v[kp1])); + + if (m_ht[kp1][k] != 0.0) + vec_scale(n, m_v[kp1], NL_FCONST(1.0) / m_ht[kp1][k]); + + for (std::size_t j = 0; j < k; j++) + givens_mult(m_c[j], m_s[j], m_ht[j][k], m_ht[j+1][k]); + + const float_type mu = 1.0 / std::hypot(m_ht[k][k], m_ht[kp1][k]); + + m_c[k] = m_ht[k][k] * mu; + m_s[k] = -m_ht[kp1][k] * mu; + m_ht[k][k] = m_c[k] * m_ht[k][k] - m_s[k] * m_ht[kp1][k]; + m_ht[kp1][k] = 0.0; + + givens_mult(m_c[k], m_s[k], m_g[k], m_g[kp1]); + + rho = std::abs(m_g[kp1]); + + itr_used = itr_used + 1; + + if (rho <= rho_delta) + { + last_k = k; + break; + } + } + + if (last_k >= RESTART) + /* didn't converge within accuracy */ + last_k = RESTART - 1; + + /* Solve the system H * y = g */ + /* x += m_v[j] * m_y[j] */ + for (std::size_t i = last_k + 1; i-- > 0;) + { + double tmp = m_g[i]; + for (std::size_t j = i + 1; j <= last_k; j++) + tmp -= m_ht[i][j] * m_y[j]; + m_y[i] = tmp / m_ht[i][i]; + } + + for (std::size_t i = 0; i <= last_k; i++) + vec_add_mult_scalar(n, m_v[i], m_y[i], x); + + if (rho <= rho_delta) + break; + + } + return itr_used; + } + + private: + + bool m_use_more_precise_stop_condition; + + //typedef typename plib::mat_cr_t<FT, SIZE>::index_type mattype; + + plib::parray<float_type, SIZE> residual; + plib::parray<float_type, SIZE> Ax; + + float_type m_c[RESTART + 1]; /* mr + 1 */ + float_type m_g[RESTART + 1]; /* mr + 1 */ + float_type m_ht[RESTART + 1][RESTART]; /* (mr + 1), mr */ + float_type m_s[RESTART + 1]; /* mr + 1 */ + float_type m_y[RESTART + 1]; /* mr + 1 */ + + //plib::parray<float_type, SIZE> m_v[RESTART + 1]; /* mr + 1, n */ + float_type m_v[RESTART + 1][storage_N]; /* mr + 1, n */ + + std::size_t m_size; + + }; + + +#if 0 + /* Example of a Chebyshev iteration solver. This one doesn't work yet, + * it needs to be extended for non-symmetric matrix operation and + * depends on spectral radius estimates - which we don't have. + * + * Left here as another example. + */ + + template <typename FT, int SIZE> + struct ch_t + { + public: + + typedef FT float_type; + // FIXME: dirty hack to make this compile + static constexpr const std::size_t storage_N = plib::sizeabs<FT, SIZE>::ABS(); + + // Maximum iterations before a restart ... + static constexpr const std::size_t restart_N = (storage_N > 0 ? 20 : 0); + + ch_t(std::size_t size) + : residual(size) + , Ax(size) + , m_size(size) + { + } + + std::size_t size() const { return (SIZE<=0) ? m_size : static_cast<std::size_t>(SIZE); } + + template <typename OPS, typename VT, typename VRHS> + std::size_t solve(OPS &ops, VT &x0, const VRHS & rhs, const std::size_t iter_max, float_type accuracy) + { + /*------------------------------------------------------------------------- + * + * + *------------------------------------------------------------------------*/ + + ops.precondition(); + + const FT lmax = 20.0; + const FT lmin = 0.0001; + + const FT d = (lmax+lmin)/2.0; + const FT c = (lmax-lmin)/2.0; + FT alpha = 0; + FT beta = 0; + std::size_t itr_used = 0; + + plib::parray<FT, SIZE> x(size()); + plib::parray<FT, SIZE> p(size()); + + plib::vec_set(size(), x, x0); + + ops.calc_rhs(Ax, x); + vec_sub(size(), rhs, Ax, residual); + + FT rho_delta = accuracy * std::sqrt(static_cast<FT>(size())); + + rho_delta = 1e-9; + + for (int i = 0; i < iter_max; i++) + { + ops.solve_LU_inplace(residual); + if (i==0) + { + vec_set(size(), p, residual); + alpha = 2.0 / d; + } + else + { + beta = alpha * ( c / 2.0)*( c / 2.0); + alpha = 1.0 / (d - beta); + for (std::size_t k = 0; k < size(); k++) + p[k] = residual[k] + beta * p[k]; + } + plib::vec_add_mult_scalar(size(), p, alpha, x); + ops.calc_rhs(Ax, x); + plib::vec_sub(size(), rhs, Ax, residual); + FT rho = std::sqrt(plib::vec_mult2<FT>(size(), residual)); + if (rho < rho_delta) + break; + itr_used++; + } + return itr_used; + } + private: + + //typedef typename plib::mat_cr_t<FT, SIZE>::index_type mattype; + + plib::parray<float_type, SIZE> residual; + plib::parray<float_type, SIZE> Ax; + + std::size_t m_size; + + }; +#endif + +} // namespace plib + +#endif /* PLIB_GMRES_H_ */ diff --git a/src/lib/netlist/plib/mat_cr.h b/src/lib/netlist/plib/mat_cr.h new file mode 100644 index 00000000000..9490a07f367 --- /dev/null +++ b/src/lib/netlist/plib/mat_cr.h @@ -0,0 +1,518 @@ +// license:GPL-2.0+ +// copyright-holders:Couriersud +/* + * mat_cr.h + * + * Compressed row format matrices + * + */ + +#ifndef MAT_CR_H_ +#define MAT_CR_H_ + +#include <algorithm> +#include <type_traits> +#include <array> +#include <vector> +#include <cmath> +#include <cstdlib> + +#include "pconfig.h" +#include "palloc.h" +#include "pstate.h" +#include "parray.h" + +namespace plib +{ + + template<typename T, int N, typename C = uint16_t> + struct matrix_compressed_rows_t + { + typedef C index_type; + typedef T value_type; + + enum constants_e + { + FILL_INFINITY = 9999999 + }; + + parray<index_type, N> diag; // diagonal index pointer n + parray<index_type, (N == 0) ? 0 : (N < 0 ? N - 1 : N + 1)> row_idx; // row index pointer n + 1 + parray<index_type, N < 0 ? -N * N : N *N> col_idx; // column index array nz_num, initially (n * n) + parray<value_type, N < 0 ? -N * N : N *N> A; // Matrix elements nz_num, initially (n * n) + //parray<C, N < 0 ? -N * (N-1) / 2 : N * (N+1) / 2 > nzbd; // Support for gaussian elimination + parray<std::vector<index_type>, N > nzbd; // Support for gaussian elimination + // contains elimination rows below the diagonal + // FIXME: convert to pvector + std::vector<std::vector<index_type>> m_ge_par; + + index_type nz_num; + + explicit matrix_compressed_rows_t(const index_type n) + : diag(n) + , row_idx(n+1) + , col_idx(n*n) + , A(n*n) + //, nzbd(n * (n+1) / 2) + , nzbd(n) + , nz_num(0) + , m_size(n) + { + for (index_type i=0; i<n+1; i++) + A[i] = 0; + } + + ~matrix_compressed_rows_t() + { + } + + index_type size() const { return m_size; } + + void set_scalar(const T scalar) + { + for (index_type i=0, e=nz_num; i<e; i++) + A[i] = scalar; + } + + void set(C r, C c, T val) + { + C ri = row_idx[r]; + while (ri < row_idx[r+1] && col_idx[ri] < c) + ri++; + // we have the position now; + if (nz_num > 0 && col_idx[ri] == c) + A[ri] = val; + else + { + for (C i = nz_num; i>ri; i--) + { + A[i] = A[i-1]; + col_idx[i] = col_idx[i-1]; + } + A[ri] = val; + col_idx[ri] = c; + for (C i = row_idx[r]; i < size()+1;i++) + row_idx[i]++; + nz_num++; + if (c==r) + diag[r] = ri; + } + } + + template <typename M> + std::pair<std::size_t, std::size_t> gaussian_extend_fill_mat(M &fill) + { + std::size_t ops = 0; + std::size_t fill_max = 0; + + for (std::size_t k = 0; k < fill.size(); k++) + { + ops++; // 1/A(k,k) + for (std::size_t row = k + 1; row < fill.size(); row++) + { + if (fill[row][k] < FILL_INFINITY) + { + ops++; + for (std::size_t col = k + 1; col < fill[row].size(); col++) + //if (fill[k][col] < FILL_INFINITY) + { + auto f = std::min(fill[row][col], 1 + fill[row][k] + fill[k][col]); + if (f < FILL_INFINITY) + { + if (f > fill_max) + fill_max = f; + ops += 2; + } + fill[row][col] = f; + } + } + } + } + build_parallel_gaussian_execution_scheme(fill); + return { fill_max, ops }; + } + + template <typename M> + void build_from_fill_mat(const M &f, std::size_t max_fill = FILL_INFINITY - 1, + std::size_t band_width = FILL_INFINITY) + { + C nz = 0; + if (nz_num != 0) + throw pexception("build_from_mat only allowed on empty CR matrix"); + for (std::size_t k=0; k < size(); k++) + { + row_idx[k] = nz; + + for (std::size_t j=0; j < size(); j++) + if (f[k][j] <= max_fill && std::abs(static_cast<int>(k)-static_cast<int>(j)) <= static_cast<int>(band_width)) + { + col_idx[nz] = static_cast<C>(j); + if (j == k) + diag[k] = nz; + nz++; + } + } + + row_idx[size()] = nz; + nz_num = nz; + /* build nzbd */ + + for (std::size_t k=0; k < size(); k++) + { + for (std::size_t j=k + 1; j < size(); j++) + if (f[j][k] < FILL_INFINITY) + nzbd[k].push_back(static_cast<C>(j)); + nzbd[k].push_back(0); // end of sequence + } + } + + template <typename V> + void gaussian_elimination(V & RHS) + { + const std::size_t iN = size(); + + for (std::size_t i = 0; i < iN - 1; i++) + { + std::size_t nzbdp = 0; + std::size_t pi = diag[i]; + const value_type f = 1.0 / A[pi++]; + const std::size_t piie = row_idx[i+1]; + const auto &nz = nzbd[i]; + + while (auto j = nz[nzbdp++]) + { + // proceed to column i + + std::size_t pj = row_idx[j]; + + while (col_idx[pj] < i) + pj++; + + const value_type f1 = - A[pj++] * f; + + // subtract row i from j + // fill-in available assumed, i.e. matrix was prepared + + for (std::size_t pii = pi; pii<piie; pii++) + { + while (col_idx[pj] < col_idx[pii]) + pj++; + if (col_idx[pj] == col_idx[pii]) + A[pj++] += A[pii] * f1; + } + + RHS[j] += f1 * RHS[i]; + } + } + } + + template <typename V> + void gaussian_elimination_parallel(V & RHS) + { + // FIXME: move into solver creation ... + plib::omp::set_num_threads(4); + for (auto l = 0ul; l < m_ge_par.size(); l++) + plib::omp::for_static(0ul, m_ge_par[l].size(), [this, &RHS, &l] (unsigned ll) + { + auto &i = m_ge_par[l][ll]; + { + std::size_t nzbdp = 0; + std::size_t pi = diag[i]; + const value_type f = 1.0 / A[pi++]; + const std::size_t piie = row_idx[i+1]; + + while (auto j = nzbd[i][nzbdp++]) + { + // proceed to column i + + std::size_t pj = row_idx[j]; + + while (col_idx[pj] < i) + pj++; + + const value_type f1 = - A[pj++] * f; + + // subtract row i from j + // fill-in available assumed, i.e. matrix was prepared + for (std::size_t pii = pi; pii<piie; pii++) + { + while (col_idx[pj] < col_idx[pii]) + pj++; + if (col_idx[pj] == col_idx[pii]) + A[pj++] += A[pii] * f1; + } + RHS[j] += f1 * RHS[i]; + } + } + }); + } + + template <typename V1, typename V2> + void gaussian_back_substitution(V1 &V, const V2 &RHS) + { + const std::size_t iN = size(); + /* row n-1 */ + V[iN - 1] = RHS[iN - 1] / A[diag[iN - 1]]; + + for (std::size_t j = iN - 1; j-- > 0;) + { + value_type tmp = 0; + const auto jdiag = diag[j]; + const std::size_t e = row_idx[j+1]; + for (std::size_t pk = jdiag + 1; pk < e; pk++) + tmp += A[pk] * V[col_idx[pk]]; + V[j] = (RHS[j] - tmp) / A[jdiag]; + } + } + + template <typename V1> + void gaussian_back_substitution(V1 &V) + { + const std::size_t iN = size(); + /* row n-1 */ + V[iN - 1] = V[iN - 1] / A[diag[iN - 1]]; + + for (std::size_t j = iN - 1; j-- > 0;) + { + value_type tmp = 0; + const auto jdiag = diag[j]; + const std::size_t e = row_idx[j+1]; + for (std::size_t pk = jdiag + 1; pk < e; pk++) + tmp += A[pk] * V[col_idx[pk]]; + V[j] = (V[j] - tmp) / A[jdiag]; + } + } + + + template <typename VTV, typename VTR> + void mult_vec(VTR & RESTRICT res, const VTV & RESTRICT x) + { + /* + * res = A * x + */ + + std::size_t row = 0; + std::size_t k = 0; + const std::size_t oe = nz_num; + + while (k < oe) + { + T tmp = 0.0; + const std::size_t e = row_idx[row+1]; + for (; k < e; k++) + tmp += A[k] * x[col_idx[k]]; + res[row++] = tmp; + } + } + + /* throws error if P(source)>P(destination) */ + template <typename LUMAT> + void slim_copy_from(LUMAT & src) + { + for (std::size_t r=0; r<src.size(); r++) + { + C dp = row_idx[r]; + for (C sp = src.row_idx[r]; sp < src.row_idx[r+1]; sp++) + { + /* advance dp to source column and fill 0s if necessary */ + while (col_idx[dp] < src.col_idx[sp]) + A[dp++] = 0; + if (row_idx[r+1] <= dp || col_idx[dp] != src.col_idx[sp]) + throw plib::pexception("slim_copy_from error"); + A[dp++] = src.A[sp]; + } + /* fill remaining elements in row */ + while (dp < row_idx[r+1]) + A[dp++] = 0; + } + } + + /* only copies common elements */ + template <typename LUMAT> + void reduction_copy_from(LUMAT & src) + { + C sp = 0; + for (std::size_t r=0; r<src.size(); r++) + { + C dp = row_idx[r]; + while(sp < src.row_idx[r+1]) + { + /* advance dp to source column and fill 0s if necessary */ + if (col_idx[dp] < src.col_idx[sp]) + A[dp++] = 0; + else if (col_idx[dp] == src.col_idx[sp]) + A[dp++] = src.A[sp++]; + else + sp++; + } + /* fill remaining elements in row */ + while (dp < row_idx[r+1]) + A[dp++] = 0; + } + } + + /* checks at all - may crash */ + template <typename LUMAT> + void raw_copy_from(LUMAT & src) + { + for (std::size_t k = 0; k < nz_num; k++) + A[k] = src.A[k]; + } + + void incomplete_LU_factorization() + { + /* + * incomplete LU Factorization according to http://de.wikipedia.org/wiki/ILU-Zerlegung + * + * Result is stored in matrix LU + * + * For i = 1,...,N-1 + * For k = 0, ... , i - 1 + * If a[i,k] != 0 + * a[i,k] = a[i,k] / a[k,k] + * For j = k + 1, ... , N - 1 + * If a[i,j] != 0 + * a[i,j] = a[i,j] - a[i,k] * a[k,j] + * j=j+1 + * k=k+1 + * i=i+1 + * + */ + + for (std::size_t i = 1; i < m_size; i++) // row i + { + const std::size_t p_i_end = row_idx[i + 1]; + // loop over all columns k left of diag in row i + for (std::size_t i_k = row_idx[i]; i_k < diag[i]; i_k++) + { + const std::size_t k = col_idx[i_k]; + const std::size_t p_k_end = row_idx[k + 1]; + const T LUp_i_k = A[i_k] = A[i_k] / A[diag[k]]; + + std::size_t k_j = diag[k] + 1; + std::size_t i_j = i_k + 1; + + while (i_j < p_i_end && k_j < p_k_end ) // pj = (i, j) + { + // we can assume that within a row ja increases continuously */ + const auto c_i_j = col_idx[i_j]; // row i, column j + const auto c_k_j = col_idx[k_j]; // row i, column j + if (c_k_j < c_i_j) + k_j++; + else if (c_k_j == c_i_j) + A[i_j++] -= LUp_i_k * A[k_j++]; + else + i_j++; + } + } + } + } + + template <typename R> + void solveLUx (R &r) + { + /* + * Solve a linear equation Ax = r + * where + * A = L*U + * + * L unit lower triangular + * U upper triangular + * + * ==> LUx = r + * + * ==> Ux = L⁻¹ r = w + * + * ==> r = Lw + * + * This can be solved for w using backwards elimination in L. + * + * Now Ux = w + * + * This can be solved for x using backwards elimination in U. + * + */ + for (std::size_t i = 1; i < m_size; ++i ) + { + T tmp = 0.0; + const std::size_t j1 = row_idx[i]; + const std::size_t j2 = diag[i]; + + for (std::size_t j = j1; j < j2; ++j ) + tmp += A[j] * r[col_idx[j]]; + + r[i] -= tmp; + } + // i now is equal to n; + for (std::size_t i = m_size; i-- > 0; ) + { + T tmp = 0.0; + const std::size_t di = diag[i]; + const std::size_t j2 = row_idx[i+1]; + for (std::size_t j = di + 1; j < j2; j++ ) + tmp += A[j] * r[col_idx[j]]; + r[i] = (r[i] - tmp) / A[di]; + } + } + private: + template <typename M> + void build_parallel_gaussian_execution_scheme(const M &fill) + { + // calculate parallel scheme for gaussian elimination + std::vector<std::vector<index_type>> rt(size()); + for (index_type k = 0; k < size(); k++) + { + for (index_type j = k+1; j < size(); j++) + { + if (fill[j][k] < FILL_INFINITY) + { + rt[k].push_back(j); + } + } + } + + std::vector<index_type> levGE(size(), 0); + index_type cl = 0; + + for (index_type k = 0; k < size(); k++ ) + { + if (levGE[k] >= cl) + { + std::vector<index_type> t = rt[k]; + for (index_type j = k+1; j < size(); j++ ) + { + bool overlap = false; + // is there overlap + if (plib::container::contains(t, j)) + overlap = true; + for (auto &x : rt[j]) + if (plib::container::contains(t, x)) + { + overlap = true; + break; + } + if (overlap) + levGE[j] = cl + 1; + else + { + t.push_back(j); + for (auto &x : rt[j]) + t.push_back(x); + } + } + cl++; + } + } + + m_ge_par.clear(); + m_ge_par.resize(cl+1); + for (index_type k = 0; k < size(); k++) + m_ge_par[levGE[k]].push_back(k); + } + + index_type m_size; + }; + +} + +#endif /* MAT_CR_H_ */ diff --git a/src/lib/netlist/plib/pomp.h b/src/lib/netlist/plib/pomp.h index f13df2539ac..19b39466025 100644 --- a/src/lib/netlist/plib/pomp.h +++ b/src/lib/netlist/plib/pomp.h @@ -28,13 +28,21 @@ void for_static(const I start, const I end, const T &what) #endif { #if HAS_OPENMP && USE_OPENMP - #pragma omp for schedule(static) + #pragma omp for //schedule(static) #endif for (I i = start; i < end; i++) what(i); } } +template <typename I, class T> +void for_static_np(const I start, const I end, const T &what) +{ + for (I i = start; i < end; i++) + what(i); +} + + inline void set_num_threads(const std::size_t threads) { #if HAS_OPENMP && USE_OPENMP diff --git a/src/lib/netlist/plib/putil.h b/src/lib/netlist/plib/putil.h index ed27867af6b..4212677c81c 100644 --- a/src/lib/netlist/plib/putil.h +++ b/src/lib/netlist/plib/putil.h @@ -25,8 +25,8 @@ namespace plib namespace container { - template <class C> - bool contains(C &con, const typename C::value_type &elem) + template <class C, class T> + bool contains(C &con, const T &elem) { return std::find(con.begin(), con.end(), elem) != con.end(); } diff --git a/src/lib/netlist/plib/vector_ops.h b/src/lib/netlist/plib/vector_ops.h new file mode 100644 index 00000000000..9bcdb6ee8c1 --- /dev/null +++ b/src/lib/netlist/plib/vector_ops.h @@ -0,0 +1,115 @@ +// license:GPL-2.0+ +// copyright-holders:Couriersud +/* + * vector_ops.h + * + * Base vector operations + * + */ + +#ifndef PLIB_VECTOR_OPS_H_ +#define PLIB_VECTOR_OPS_H_ + +#include <algorithm> +#include <cmath> +#include <type_traits> + +#include "pconfig.h" + +#if !defined(__clang__) && !defined(_MSC_VER) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 6)) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + +namespace plib +{ + template<typename VT, typename T> + void vec_set_scalar (const std::size_t n, VT &v, const T & scalar) + { + for ( std::size_t i = 0; i < n; i++ ) + v[i] = scalar; + } + + template<typename VT, typename VS> + void vec_set (const std::size_t n, VT &v, const VS & source) + { + for ( std::size_t i = 0; i < n; i++ ) + v[i] = source [i]; + } + + template<typename T, typename V1, typename V2> + T vec_mult (const std::size_t n, const V1 & v1, const V2 & v2 ) + { + T value = 0.0; + for ( std::size_t i = 0; i < n; i++ ) + value += v1[i] * v2[i]; + return value; + } + + template<typename T, typename VT> + T vec_mult2 (const std::size_t n, const VT &v) + { + T value = 0.0; + for ( std::size_t i = 0; i < n; i++ ) + value += v[i] * v[i]; + return value; + } + + template<typename VV, typename T, typename VR> + void vec_mult_scalar (const std::size_t n, const VV & v, const T & scalar, VR & result) + { + for ( std::size_t i = 0; i < n; i++ ) + result[i] = scalar * v[i]; + } + + template<typename VV, typename T, typename VR> + void vec_add_mult_scalar (const std::size_t n, const VV & v, const T scalar, VR & result) + { + for ( std::size_t i = 0; i < n; i++ ) + result[i] = result[i] + scalar * v[i]; + } + + template<typename T> + void vec_add_mult_scalar_p(const std::size_t & n, const T * RESTRICT v, const T scalar, T * RESTRICT result) + { + for ( std::size_t i = 0; i < n; i++ ) + result[i] += scalar * v[i]; + } + + template<typename V, typename R> + void vec_add_ip(const std::size_t n, const V & v, R & result) + { + for ( std::size_t i = 0; i < n; i++ ) + result[i] += v[i]; + } + + template<typename V1, typename V2, typename VR> + void vec_sub(const std::size_t n, const V1 &v1, const V2 & v2, VR & result) + { + for ( std::size_t i = 0; i < n; i++ ) + result[i] = v1[i] - v2[i]; + } + + template<typename V, typename T> + void vec_scale(const std::size_t n, V & v, const T scalar) + { + for ( std::size_t i = 0; i < n; i++ ) + v[i] = scalar * v[i]; + } + + template<typename T, typename V> + T vec_maxabs(const std::size_t n, const V & v) + { + T ret = 0.0; + for ( std::size_t i = 0; i < n; i++ ) + ret = std::max(ret, std::abs(v[i])); + + return ret; + } +} + +#if !defined(__clang__) && !defined(_MSC_VER) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 6)) +#pragma GCC diagnostic pop +#endif + +#endif /* PLIB_VECTOR_OPS_H_ */ |