From 1513c777b47d19c7df99ba43142d7a334392a0b5 Mon Sep 17 00:00:00 2001 From: couriersud Date: Wed, 30 Jan 2019 02:28:55 +0100 Subject: netlist: Refactoring continues ... plus some innovations (nw) Still some work ahead to separate interface from execution. This is a preparation to switch to another sparse matrix format easily which may be better suited for parallel processing. On the linear algebra side there are some nice additions: - Two additional sort modes: One tries to obtain a upper left identity matrix, the other prefers a diagonal band matrix structure. Both deliver slightly better performance than just sorting. - Parallel execution analysis for Gaussian elimination and LU solve. This determines which operations may be done independently. All of this is not really useful right now. The matrix sizes are below 100 nets. I estimate that we at least need four times more so that CPU parallel processing overhead pays off. For GPU, add another order. But it's nice to have code which may scale. --- src/lib/netlist/plib/mat_cr.h | 518 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 518 insertions(+) create mode 100644 src/lib/netlist/plib/mat_cr.h (limited to 'src/lib/netlist/plib/mat_cr.h') diff --git a/src/lib/netlist/plib/mat_cr.h b/src/lib/netlist/plib/mat_cr.h new file mode 100644 index 00000000000..9490a07f367 --- /dev/null +++ b/src/lib/netlist/plib/mat_cr.h @@ -0,0 +1,518 @@ +// license:GPL-2.0+ +// copyright-holders:Couriersud +/* + * mat_cr.h + * + * Compressed row format matrices + * + */ + +#ifndef MAT_CR_H_ +#define MAT_CR_H_ + +#include +#include +#include +#include +#include +#include + +#include "pconfig.h" +#include "palloc.h" +#include "pstate.h" +#include "parray.h" + +namespace plib +{ + + template + struct matrix_compressed_rows_t + { + typedef C index_type; + typedef T value_type; + + enum constants_e + { + FILL_INFINITY = 9999999 + }; + + parray diag; // diagonal index pointer n + parray row_idx; // row index pointer n + 1 + parray col_idx; // column index array nz_num, initially (n * n) + parray A; // Matrix elements nz_num, initially (n * n) + //parray nzbd; // Support for gaussian elimination + parray, N > nzbd; // Support for gaussian elimination + // contains elimination rows below the diagonal + // FIXME: convert to pvector + std::vector> m_ge_par; + + index_type nz_num; + + explicit matrix_compressed_rows_t(const index_type n) + : diag(n) + , row_idx(n+1) + , col_idx(n*n) + , A(n*n) + //, nzbd(n * (n+1) / 2) + , nzbd(n) + , nz_num(0) + , m_size(n) + { + for (index_type i=0; i 0 && col_idx[ri] == c) + A[ri] = val; + else + { + for (C i = nz_num; i>ri; i--) + { + A[i] = A[i-1]; + col_idx[i] = col_idx[i-1]; + } + A[ri] = val; + col_idx[ri] = c; + for (C i = row_idx[r]; i < size()+1;i++) + row_idx[i]++; + nz_num++; + if (c==r) + diag[r] = ri; + } + } + + template + std::pair gaussian_extend_fill_mat(M &fill) + { + std::size_t ops = 0; + std::size_t fill_max = 0; + + for (std::size_t k = 0; k < fill.size(); k++) + { + ops++; // 1/A(k,k) + for (std::size_t row = k + 1; row < fill.size(); row++) + { + if (fill[row][k] < FILL_INFINITY) + { + ops++; + for (std::size_t col = k + 1; col < fill[row].size(); col++) + //if (fill[k][col] < FILL_INFINITY) + { + auto f = std::min(fill[row][col], 1 + fill[row][k] + fill[k][col]); + if (f < FILL_INFINITY) + { + if (f > fill_max) + fill_max = f; + ops += 2; + } + fill[row][col] = f; + } + } + } + } + build_parallel_gaussian_execution_scheme(fill); + return { fill_max, ops }; + } + + template + void build_from_fill_mat(const M &f, std::size_t max_fill = FILL_INFINITY - 1, + std::size_t band_width = FILL_INFINITY) + { + C nz = 0; + if (nz_num != 0) + throw pexception("build_from_mat only allowed on empty CR matrix"); + for (std::size_t k=0; k < size(); k++) + { + row_idx[k] = nz; + + for (std::size_t j=0; j < size(); j++) + if (f[k][j] <= max_fill && std::abs(static_cast(k)-static_cast(j)) <= static_cast(band_width)) + { + col_idx[nz] = static_cast(j); + if (j == k) + diag[k] = nz; + nz++; + } + } + + row_idx[size()] = nz; + nz_num = nz; + /* build nzbd */ + + for (std::size_t k=0; k < size(); k++) + { + for (std::size_t j=k + 1; j < size(); j++) + if (f[j][k] < FILL_INFINITY) + nzbd[k].push_back(static_cast(j)); + nzbd[k].push_back(0); // end of sequence + } + } + + template + void gaussian_elimination(V & RHS) + { + const std::size_t iN = size(); + + for (std::size_t i = 0; i < iN - 1; i++) + { + std::size_t nzbdp = 0; + std::size_t pi = diag[i]; + const value_type f = 1.0 / A[pi++]; + const std::size_t piie = row_idx[i+1]; + const auto &nz = nzbd[i]; + + while (auto j = nz[nzbdp++]) + { + // proceed to column i + + std::size_t pj = row_idx[j]; + + while (col_idx[pj] < i) + pj++; + + const value_type f1 = - A[pj++] * f; + + // subtract row i from j + // fill-in available assumed, i.e. matrix was prepared + + for (std::size_t pii = pi; pii + void gaussian_elimination_parallel(V & RHS) + { + // FIXME: move into solver creation ... + plib::omp::set_num_threads(4); + for (auto l = 0ul; l < m_ge_par.size(); l++) + plib::omp::for_static(0ul, m_ge_par[l].size(), [this, &RHS, &l] (unsigned ll) + { + auto &i = m_ge_par[l][ll]; + { + std::size_t nzbdp = 0; + std::size_t pi = diag[i]; + const value_type f = 1.0 / A[pi++]; + const std::size_t piie = row_idx[i+1]; + + while (auto j = nzbd[i][nzbdp++]) + { + // proceed to column i + + std::size_t pj = row_idx[j]; + + while (col_idx[pj] < i) + pj++; + + const value_type f1 = - A[pj++] * f; + + // subtract row i from j + // fill-in available assumed, i.e. matrix was prepared + for (std::size_t pii = pi; pii + void gaussian_back_substitution(V1 &V, const V2 &RHS) + { + const std::size_t iN = size(); + /* row n-1 */ + V[iN - 1] = RHS[iN - 1] / A[diag[iN - 1]]; + + for (std::size_t j = iN - 1; j-- > 0;) + { + value_type tmp = 0; + const auto jdiag = diag[j]; + const std::size_t e = row_idx[j+1]; + for (std::size_t pk = jdiag + 1; pk < e; pk++) + tmp += A[pk] * V[col_idx[pk]]; + V[j] = (RHS[j] - tmp) / A[jdiag]; + } + } + + template + void gaussian_back_substitution(V1 &V) + { + const std::size_t iN = size(); + /* row n-1 */ + V[iN - 1] = V[iN - 1] / A[diag[iN - 1]]; + + for (std::size_t j = iN - 1; j-- > 0;) + { + value_type tmp = 0; + const auto jdiag = diag[j]; + const std::size_t e = row_idx[j+1]; + for (std::size_t pk = jdiag + 1; pk < e; pk++) + tmp += A[pk] * V[col_idx[pk]]; + V[j] = (V[j] - tmp) / A[jdiag]; + } + } + + + template + void mult_vec(VTR & RESTRICT res, const VTV & RESTRICT x) + { + /* + * res = A * x + */ + + std::size_t row = 0; + std::size_t k = 0; + const std::size_t oe = nz_num; + + while (k < oe) + { + T tmp = 0.0; + const std::size_t e = row_idx[row+1]; + for (; k < e; k++) + tmp += A[k] * x[col_idx[k]]; + res[row++] = tmp; + } + } + + /* throws error if P(source)>P(destination) */ + template + void slim_copy_from(LUMAT & src) + { + for (std::size_t r=0; r + void reduction_copy_from(LUMAT & src) + { + C sp = 0; + for (std::size_t r=0; r + void raw_copy_from(LUMAT & src) + { + for (std::size_t k = 0; k < nz_num; k++) + A[k] = src.A[k]; + } + + void incomplete_LU_factorization() + { + /* + * incomplete LU Factorization according to http://de.wikipedia.org/wiki/ILU-Zerlegung + * + * Result is stored in matrix LU + * + * For i = 1,...,N-1 + * For k = 0, ... , i - 1 + * If a[i,k] != 0 + * a[i,k] = a[i,k] / a[k,k] + * For j = k + 1, ... , N - 1 + * If a[i,j] != 0 + * a[i,j] = a[i,j] - a[i,k] * a[k,j] + * j=j+1 + * k=k+1 + * i=i+1 + * + */ + + for (std::size_t i = 1; i < m_size; i++) // row i + { + const std::size_t p_i_end = row_idx[i + 1]; + // loop over all columns k left of diag in row i + for (std::size_t i_k = row_idx[i]; i_k < diag[i]; i_k++) + { + const std::size_t k = col_idx[i_k]; + const std::size_t p_k_end = row_idx[k + 1]; + const T LUp_i_k = A[i_k] = A[i_k] / A[diag[k]]; + + std::size_t k_j = diag[k] + 1; + std::size_t i_j = i_k + 1; + + while (i_j < p_i_end && k_j < p_k_end ) // pj = (i, j) + { + // we can assume that within a row ja increases continuously */ + const auto c_i_j = col_idx[i_j]; // row i, column j + const auto c_k_j = col_idx[k_j]; // row i, column j + if (c_k_j < c_i_j) + k_j++; + else if (c_k_j == c_i_j) + A[i_j++] -= LUp_i_k * A[k_j++]; + else + i_j++; + } + } + } + } + + template + void solveLUx (R &r) + { + /* + * Solve a linear equation Ax = r + * where + * A = L*U + * + * L unit lower triangular + * U upper triangular + * + * ==> LUx = r + * + * ==> Ux = L⁻¹ r = w + * + * ==> r = Lw + * + * This can be solved for w using backwards elimination in L. + * + * Now Ux = w + * + * This can be solved for x using backwards elimination in U. + * + */ + for (std::size_t i = 1; i < m_size; ++i ) + { + T tmp = 0.0; + const std::size_t j1 = row_idx[i]; + const std::size_t j2 = diag[i]; + + for (std::size_t j = j1; j < j2; ++j ) + tmp += A[j] * r[col_idx[j]]; + + r[i] -= tmp; + } + // i now is equal to n; + for (std::size_t i = m_size; i-- > 0; ) + { + T tmp = 0.0; + const std::size_t di = diag[i]; + const std::size_t j2 = row_idx[i+1]; + for (std::size_t j = di + 1; j < j2; j++ ) + tmp += A[j] * r[col_idx[j]]; + r[i] = (r[i] - tmp) / A[di]; + } + } + private: + template + void build_parallel_gaussian_execution_scheme(const M &fill) + { + // calculate parallel scheme for gaussian elimination + std::vector> rt(size()); + for (index_type k = 0; k < size(); k++) + { + for (index_type j = k+1; j < size(); j++) + { + if (fill[j][k] < FILL_INFINITY) + { + rt[k].push_back(j); + } + } + } + + std::vector levGE(size(), 0); + index_type cl = 0; + + for (index_type k = 0; k < size(); k++ ) + { + if (levGE[k] >= cl) + { + std::vector t = rt[k]; + for (index_type j = k+1; j < size(); j++ ) + { + bool overlap = false; + // is there overlap + if (plib::container::contains(t, j)) + overlap = true; + for (auto &x : rt[j]) + if (plib::container::contains(t, x)) + { + overlap = true; + break; + } + if (overlap) + levGE[j] = cl + 1; + else + { + t.push_back(j); + for (auto &x : rt[j]) + t.push_back(x); + } + } + cl++; + } + } + + m_ge_par.clear(); + m_ge_par.resize(cl+1); + for (index_type k = 0; k < size(); k++) + m_ge_par[levGE[k]].push_back(k); + } + + index_type m_size; + }; + +} + +#endif /* MAT_CR_H_ */ -- cgit v1.2.3-70-g09d2