CoolProp 6.8.1dev
An open-source fluid property and humid air property database
superancillary.h
Go to the documentation of this file.
1/*
2# NIST Disclaimer of Copyright and Warranty
3
4This software was developed by employees of the National Institute of
5Standards and Technology (NIST), an agency of the Federal Government
6and is being made available as a public service. Pursuant to title 17
7United States Code Section 105, works of NIST employees are not
8subject to copyright protection in the United States. This software
9may be subject to foreign copyright. Permission in the United States
10and in foreign countries, to the extent that NIST may hold copyright,
11to use, copy, modify, create derivative works, and distribute this
12software and its documentation without fee is hereby granted on a
13non-exclusive basis, provided that this notice and disclaimer of
14warranty appears in all copies.
15
16THE SOFTWARE IS PROVIDED 'AS IS' WITHOUT ANY WARRANTY OF ANY KIND,
17EITHER EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED
18TO, ANY WARRANTY THAT THE SOFTWARE WILL CONFORM TO SPECIFICATIONS,
19ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
20PURPOSE, AND FREEDOM FROM INFRINGEMENT, AND ANY WARRANTY THAT THE
21DOCUMENTATION WILL CONFORM TO THE SOFTWARE, OR ANY WARRANTY THAT THE
22SOFTWARE WILL BE ERROR FREE. IN NO EVENT SHALL NIST BE LIABLE FOR ANY
23DAMAGES, INCLUDING, BUT NOT LIMITED TO, DIRECT, INDIRECT, SPECIAL OR
24CONSEQUENTIAL DAMAGES, ARISING OUT OF, RESULTING FROM, OR IN ANY WAY
25CONNECTED WITH THIS SOFTWARE, WHETHER OR NOT BASED UPON WARRANTY,
26CONTRACT, TORT, OR OTHERWISE, WHETHER OR NOT INJURY WAS SUSTAINED BY
27PERSONS OR PROPERTY OR OTHERWISE, AND WHETHER OR NOT LOSS WAS
28SUSTAINED FROM, OR AROSE OUT OF THE RESULTS OF, OR USE OF, THE
29SOFTWARE OR SERVICES PROVIDED HEREUNDER.
30
31Subsequent edits by Ian Bell
32*/
33
34#pragma once
35
36#include <iostream>
37#include <utility>
38#include <optional>
39#include <Eigen/Dense>
40
41#include "boost/math/tools/toms748_solve.hpp"
42#include "nlohmann/json.hpp"
43
44namespace CoolProp{
45namespace superancillary{
46
47namespace detail{
48
49// From https://arxiv.org/pdf/1401.5766.pdf (Algorithm #3)
50template<typename Matrix>
51inline void balance_matrix(const Matrix &A, Matrix &Aprime, Matrix &D) {
52 const int p = 2;
53 double beta = 2; // Radix base (2?)
54 int iter = 0;
55 Aprime = A;
56 D = Matrix::Identity(A.rows(), A.cols());
57 bool converged = false;
58 do {
59 converged = true;
60 for (Eigen::Index i = 0; i < A.rows(); ++i) {
61 double c = Aprime.col(i).template lpNorm<p>();
62 double r = Aprime.row(i).template lpNorm<p>();
63 double s = pow(c, p) + pow(r, p);
64 double f = 1;
65 //if (!ValidNumber(c)){
66 // std::cout << A << std::endl;
67 // throw std::range_error("c is not a valid number in balance_matrix"); }
68 //if (!ValidNumber(r)) { throw std::range_error("r is not a valid number in balance_matrix"); }
69 while (c < r/beta) {
70 c *= beta;
71 r /= beta;
72 f *= beta;
73 }
74 while (c >= r*beta) {
75 c /= beta;
76 r *= beta;
77 f /= beta;
78 }
79 if (pow(c, p) + pow(r, p) < 0.95*s) {
80 converged = false;
81 D(i, i) *= f;
82 Aprime.col(i) *= f;
83 Aprime.row(i) /= f;
84 }
85 }
86 iter++;
87 if (iter > 50) {
88 break;
89 }
90 } while (!converged);
91}
92
93inline void companion_matrix_transposed(const Eigen::ArrayXd &coeffs, Eigen::MatrixXd &A) {
94 auto N = coeffs.size() - 1; // degree
95 if (A.rows() != N){ throw std::invalid_argument("A.rows() != N"); }
96 A.setZero();
97 // First col
98 A(1, 0) = 1;
99 // Last col
100 A.col(N-1) = -coeffs.head(N)/(2.0*coeffs(N));
101 A(N - 2, N - 1) += 0.5;
102 // All the other cols
103 for (int j = 1; j < N - 1; ++j) {
104 A(j - 1, j) = 0.5;
105 A(j + 1, j) = 0.5;
106 }
107}
108inline void companion_matrix_transposed(const std::vector<double> &coeffs, Eigen::MatrixXd &A) {
109 Eigen::ArrayXd coeffs_ = Eigen::Map<const Eigen::ArrayXd>(&coeffs[0], coeffs.size());
110 return companion_matrix_transposed(coeffs_, A);
111}
112
113
122inline auto get_LU_matrices(std::size_t N){
123 Eigen::MatrixXd L(N + 1, N + 1);
124 Eigen::MatrixXd U(N + 1, N + 1);
125 for (std::size_t j = 0; j <= N; ++j) {
126 for (std::size_t k = j; k <= N; ++k) {
127 double p_j = (j == 0 || j == N) ? 2 : 1;
128 double p_k = (k == 0 || k == N) ? 2 : 1;
129 double cosjPikN = cos((j*EIGEN_PI*k) / N);
130 L(j, k) = 2.0/(p_j*p_k*N)*cosjPikN;
131 // Exploit symmetry to fill in the symmetric elements in the matrix
132 L(k, j) = L(j, k);
133
134 U(j, k) = cosjPikN;
135 // Exploit symmetry to fill in the symmetric elements in the matrix
136 U(k, j) = U(j, k);
137 }
138 }
139 return std::make_tuple(L, U);
140}
141
142inline double M_element_norm(const std::vector<double>& x, Eigen::Index M){
143 Eigen::Map<const Eigen::ArrayXd> X(&x[0], x.size());
144 return X.tail(M).matrix().norm() / X.head(M).matrix().norm();
145}
146
147inline double M_element_norm(const Eigen::ArrayXd& x, Eigen::Index M){
148 return x.tail(M).matrix().norm() / x.head(M).matrix().norm();
149}
150
155template<typename Function, typename Container>
156inline auto dyadic_splitting(const std::size_t N, const Function& func, const double xmin, const double xmax,
157 const int M=3, const double tol=1e-12, const int max_refine_passes = 8,
158 const std::optional<std::function<void(int, const Container&)>>& callback = std::nullopt) -> Container
159{
160 using CE_t = std::decay_t<decltype(Container().front())>;
161 using ArrayType = std::decay_t<decltype(CE_t().coeff())>;
162
163 Eigen::MatrixXd Lmat, Umat;
164 std::tie(Lmat, Umat) = detail::get_LU_matrices(N);
165
166 auto builder = [&](double xmin, double xmax) -> CE_t{
167
168 auto get_nodes_realworld = [N, xmin, xmax]() -> Eigen::ArrayXd{
169 Eigen::ArrayXd nodes = (Eigen::ArrayXd::LinSpaced(N + 1, 0, static_cast<double>(N)).array()*EIGEN_PI / N).cos();
170 return ((xmax - xmin)*nodes + (xmax + xmin))*0.5;
171 };
172
173 Eigen::ArrayXd x = get_nodes_realworld();
174
175 // Apply the function to do the transformation
176 // of the functional values at the nodes
177 for (auto j = 0L; j < x.size(); ++j){
178 x(j) = func(j, static_cast<long>(x.size()), x(j));
179 }
180
181 // And now rebuild the expansion by left-multiplying by the L matrix
182 Eigen::ArrayXd c = Lmat*x.matrix();
183// std::cout << "c: " << c << std::endl;
184 // Check if any coefficients are invalid, stop if so
185 if (!c.allFinite() ) {
186 throw std::invalid_argument("At least one coefficient is non-finite");
187 }
188 if constexpr (std::is_same_v<ArrayType, std::vector<double>>){
189 return {xmin, xmax, std::vector<double>(&c[0], &c[0] + c.size())};
190 }
191 else{
192 // Probably an Eigen::ArrayXd, just pass that into constructor
193 return {xmin, xmax, c};
194 }
195 };
196
197 // Start off with the full domain from xmin to xmax
198 Container expansions;
199 expansions.emplace_back(builder(xmin, xmax));
200
201 // Now enter into refinement passes
202 for (int refine_pass = 0; refine_pass < max_refine_passes; ++refine_pass) {
203 bool all_converged = true;
204 // Start at the right and move left because insertions will make the length increase
205 for (int iexpansion = static_cast<int>(expansions.size())-1; iexpansion >= 0; --iexpansion) {
206 auto& expan = expansions[iexpansion];
207 auto err = M_element_norm(expan.coeff(), M);
208// std::cout << "err: " << err << std::endl;
209 if (err > tol) {
210 // Splitting is required, do a dyadic split
211 auto xmid = (expan.xmin() + expan.xmax()) / 2;
212 CE_t newleft{builder(expan.xmin(), xmid)};
213 CE_t newright{builder(xmid, expan.xmax())};
214
215 expansions.at(iexpansion) = std::move(newleft);
216 expansions.insert(expansions.begin() + iexpansion+1, newright);
217 all_converged = false;
218 }
219// std::cout << expansions.size() << std::endl;
220 }
221 if (callback) {
222 callback.value()(refine_pass, expansions);
223 }
224 if (all_converged) { break; }
225 }
226 return expansions;
227}
228
229
230}
231
242template<typename ArrayType>
244private:
245 double m_xmin,
246 m_xmax;
247 ArrayType m_coeff;
248public:
250
252 ChebyshevExpansion(double xmin, double xmax, const ArrayType& coeff) : m_xmin(xmin), m_xmax(xmax), m_coeff(coeff){};
253
258
260 const auto xmin() const { return m_xmin; }
261
263 const auto xmax() const { return m_xmax; }
264
266 const auto& coeff() const { return m_coeff; }
267
271 template<typename T>
272 auto eval(const T& x) const{
273 // Scale to (-1, 1)
274 T xscaled = (2.0*x - (m_xmax + m_xmin)) / (m_xmax - m_xmin);
275 int Norder = static_cast<int>(m_coeff.size()) - 1;
276 T u_k = 0, u_kp1 = m_coeff[Norder], u_kp2 = 0;
277 for (int k = Norder-1; k > 0; k--){ // k must be signed!
278 // Do the recurrent calculation
279 u_k = 2.0*xscaled*u_kp1 - u_kp2 + m_coeff[k];
280 // Update the values
281 u_kp2 = u_kp1; u_kp1 = u_k;
282 }
283 T retval = m_coeff[0] + xscaled*u_kp1 - u_kp2; // This seems inefficient but required to ensure that Eigen::Array are evaluated and an expression is not returned
284 return retval;
285 }
286
288 template<typename T>
289 auto eval_many(const T& x, T& y) const{
290 if (x.size() != y.size()){ throw std::invalid_argument("x and y are not the same size"); }
291 for (auto i = 0; i < x.size(); ++i){ y(i) = eval(x(i)); }
292 }
293
295 template<typename T>
296 auto eval_manyC(const T x[], T y[], std::size_t N) const{
297 for (std::size_t i = 0; i < N; ++i){ y[i] = eval(x[i]); }
298 }
299
301 template<typename T>
302 auto eval_Eigen(const T& x, T& y) const{
303 if (x.size() != y.size()){ throw std::invalid_argument("x and y are not the same size"); }
304 y = eval(x);
305 }
306
308 Eigen::ArrayXd get_nodes_realworld() const {
309 Eigen::Index N = m_coeff.size()-1;
310 Eigen::ArrayXd nodes = (Eigen::ArrayXd::LinSpaced(N + 1, 0, N).array()*EIGEN_PI / N).cos();
311 return ((m_xmax - m_xmin)*nodes + (m_xmax + m_xmin))*0.5;
312 }
313
327 auto solve_for_x_count(double y, double a, double b, unsigned int bits, std::size_t max_iter, double boundsytol) const{
328 using namespace boost::math::tools;
329 std::size_t counter = 0;
330 auto f = [&](double x){ counter++; return eval(x) - y; };
331 double fa = f(a);
332 if (std::abs(fa) < boundsytol){ return std::make_tuple(a, std::size_t{1}); }
333 double fb = f(b);
334 if (std::abs(fb) < boundsytol){ return std::make_tuple(b, std::size_t{2}); }
335 boost::math::uintmax_t max_iter_ = static_cast<boost::math::uintmax_t>(max_iter);
336 auto [l, r] = toms748_solve(f, a, b, fa, fb, eps_tolerance<double>(bits), max_iter_);
337 return std::make_tuple((r+l)/2.0, counter);
338 }
339
342 auto solve_for_x(double y, double a, double b, unsigned int bits, std::size_t max_iter, double boundsytol) const{
343 return std::get<0>(solve_for_x_count(y, a, b, bits, max_iter, boundsytol));
344 }
345
347 template<typename T>
348 auto solve_for_x_many(const T& y, double a, double b, unsigned int bits, std::size_t max_iter, double boundsytol, T& x, T& counts) const{
349 if (x.size() != y.size()){ throw std::invalid_argument("x and y are not the same size"); }
350 for (auto i = 0; i < x.size(); ++i){ std::tie(x(i), counts(i)) = solve_for_x_count(y(i), a, b, bits, max_iter, boundsytol); }
351 }
352
354 template<typename T>
355 auto solve_for_x_manyC(const T y[], std::size_t N, double a, double b, unsigned int bits, std::size_t max_iter, double boundsytol, T x[], T counts[]) const{
356 for (std::size_t i = 0; i < N; ++i){
357 std::tie(x[i], counts[i]) = solve_for_x_count(y[i], a, b, bits, max_iter, boundsytol);
358 }
359 }
360
361 ArrayType do_derivs(std::size_t Nderiv) const{
362 // See Mason and Handscomb, p. 34, Eq. 2.52
363 // and example in https ://github.com/numpy/numpy/blob/master/numpy/polynomial/chebyshev.py#L868-L964
364 ArrayType c = m_coeff;
365 for (std::size_t deriv_counter = 0; deriv_counter < Nderiv; ++deriv_counter) {
366 std::size_t N = c.size() - 1,
367 Nd = N - 1;
368 ArrayType cd(N);
369 for (std::size_t r = 0; r <= Nd; ++r) {
370 cd[r] = 0;
371 for (std::size_t k = r + 1; k <= N; ++k) {
372 // Terms where k-r is odd have values, otherwise, they are zero
373 if ((k - r) % 2 == 1) {
374 cd[r] += 2*k*c[k];
375 }
376 }
377 // The first term with r = 0 is divided by 2 (the single prime in Mason and Handscomb, p. 34, Eq. 2.52)
378 if (r == 0) {
379 cd[r] /= 2;
380 }
381 // Rescale the values if the range is not [-1,1]. Arrives from the derivative of d(xreal)/d(x_{-1,1})
382 cd[r] /= (m_xmax-m_xmin)/2.0;
383 }
384 if (Nderiv == 1) {
385 return cd;
386 }
387 else{
388 c = cd;
389 }
390 }
391 return c;
392 }
393};
394
395static_assert(std::is_move_assignable_v<ChebyshevExpansion<std::vector<double>>>);
396//static_assert(std::is_copy_assignable_v<ChebyshevExpansion<std::vector<double>>>);
397
400 std::size_t idx;
401 double ymin,
406 bool contains_y (double y) const { return y >= ymin && y <= ymax; }
407};
408
411 std::vector<MonotonicExpansionMatch> expansioninfo;
412 double xmin,
417 bool contains_y (double y) const { return y >= ymin && y <= ymax; }
418};
419
429template<typename ArrayType=Eigen::ArrayXd>
431private:
432 const double thresh_imag = 1e-15;
433 std::vector<ChebyshevExpansion<ArrayType>> m_expansions;
434 std::vector<double> m_x_at_extrema;
435 std::vector<IntervalMatch> m_monotonic_intervals;
436
437 Eigen::ArrayXd head(const Eigen::ArrayXd& c, Eigen::Index N) const{
438 return c.head(N);
439 }
440 std::vector<double> head(const std::vector<double>& c, Eigen::Index N) const{
441 return std::vector<double>(c.begin(), c.begin()+N);
442 }
443
448 std::vector<double> determine_extrema(const std::decay_t<decltype(m_expansions)>& expansions, double thresh_im) const{
449 std::vector<double> x_at_extrema;
450 Eigen::MatrixXd companion_matrix, cprime, D;
451 for (auto& expan : expansions){
452 // Get the coefficients of the derivative's expansion (for x in [-1, 1])
453 auto cd = expan.do_derivs(1);
454 // First, right-trim the coefficients that are equal to zero
455 int ilastnonzero = static_cast<int>(cd.size())-1;
456 for (int i = static_cast<int>(cd.size())-1; i >= 0; --i){
457 if (std::abs(cd[i]) != 0){
458 ilastnonzero = i; break;
459 }
460 }
461 if (ilastnonzero != static_cast<int>(cd.size()-1)){
462 cd = head(cd, ilastnonzero);
463 }
464 // Then do eigenvalue rootfinding after balancing
465 // Define working buffers here to avoid allocations all over the place
466 if (companion_matrix.rows() != static_cast<int>(cd.size()-1)){
467 companion_matrix.resize(cd.size()-1, cd.size()-1); companion_matrix.setZero();
468 D.resizeLike(companion_matrix); D.setZero();
469 cprime.resizeLike(companion_matrix); cprime.setZero();
470 }
471 detail::companion_matrix_transposed(cd, companion_matrix);
472 cprime = companion_matrix;
473
474 detail::balance_matrix(companion_matrix, cprime, D);
475 for (auto &root : companion_matrix.eigenvalues()){
476 // re_n11 is in [-1,1], need to rescale back to real units
477 auto re_n11 = root.real(), im = root.imag();
478 if (std::abs(im) < thresh_im){
479 if (re_n11 >= -1 && re_n11 <= 1){
480 x_at_extrema.push_back(((expan.xmax() - expan.xmin())*re_n11 + (expan.xmax() + expan.xmin())) / 2.0);
481 }
482 }
483 }
484 }
485 std::sort(x_at_extrema.begin(), x_at_extrema.end());
486 return x_at_extrema;
487 }
488
493 std::vector<IntervalMatch> build_monotonic_intervals(const std::vector<double>& x_at_extrema) const{
494 std::vector<IntervalMatch> intervals;
495
496 auto sort = [](double& x, double &y){ if (x > y){ std::swap(x, y);} };
497
498 if (false){//x_at_extrema.empty()){
499 auto xmin = m_expansions.front().xmin();
500 auto xmax = m_expansions.back().xmax();
501 auto ymin = eval(xmin), ymax = eval(xmax);
502 sort(ymin, ymax);
504 mem.idx = 0;
505 mem.xmin = xmin;
506 mem.xmax = xmax;
507 mem.ymin = ymin;
508 mem.ymax = ymax;
509 IntervalMatch im;
510 im.expansioninfo = {mem};
511 im.xmin = mem.xmin;
512 im.xmax = mem.xmax;
513 im.ymin = mem.ymin;
514 im.ymax = mem.ymax;
515 intervals.push_back(im);
516 }
517 else{
518 auto newx = x_at_extrema;
519 newx.insert(newx.begin(), m_expansions.front().xmin());
520 newx.insert(newx.end(), m_expansions.back().xmax());
521
523 auto interval_intersection = [](const auto&t1, const auto& t2){
524 auto a = std::max(t1.xmin(), t2.xmin);
525 auto b = std::min(t1.xmax(), t2.xmax);
526 return std::make_tuple(a, b);
527 };
528
529 for (auto j = 0; j < static_cast<int>(newx.size()-1); ++j){
530 double xmin = newx[j], xmax = newx[j+1];
531 IntervalMatch im;
532 // Loop over the expansions that contain one of the values of x
533 // that intersect the interval defined by [xmin, xmax]
534 for (auto i = 0UL; i < m_expansions.size(); ++i){
535 struct A {double xmin, xmax; };
536 auto [a,b] = interval_intersection(m_expansions[i], A{xmin, xmax} );
537 if (a < b){
538 const auto&e = m_expansions[i];
540 mem.idx = i;
541 mem.xmin = a;
542 mem.xmax = b;
543 double ymin = e.eval(a), ymax = e.eval(b); sort(ymin, ymax);
544 mem.ymin = ymin;
545 mem.ymax = ymax;
546 im.expansioninfo.push_back(mem);
547 }
548 }
549 // These are the limits for the entire interval
550 im.xmin = xmin;
551 im.xmax = xmax;
552 double yminoverall = eval(xmin), ymaxoverall = eval(xmax); sort(yminoverall, ymaxoverall);
553 im.ymin = yminoverall;
554 im.ymax = ymaxoverall;
555 intervals.push_back(im);
556 }
557 }
558 return intervals;
559 }
560 double m_xmin,
561 m_xmax;
562
563public:
564
565 // Move constructor given a vector of expansions
567 m_expansions(std::move(expansions)),
568 m_x_at_extrema(determine_extrema(m_expansions, thresh_imag)),
569 m_monotonic_intervals(build_monotonic_intervals(m_x_at_extrema)),
570 m_xmin(get_expansions().front().xmin()),
571 m_xmax(get_expansions().back().xmax())
572 {}
573
575 m_expansions(other.m_expansions),
576 m_x_at_extrema(other.m_x_at_extrema),
577 m_monotonic_intervals(other.m_monotonic_intervals),
578 m_xmin(other.xmin()),
579 m_xmax(other.xmax())
580 {}
581
583// ChebyshevApproximation1D& operator=(const ChebyshevApproximation1D& other) = default;
585 std::swap(m_expansions, other.m_expansions);
586 std::swap(m_x_at_extrema, other.m_x_at_extrema);
587 std::swap(m_monotonic_intervals, other.m_monotonic_intervals);
588 std::swap(m_xmin, other.m_xmin);
589 std::swap(m_xmax, other.m_xmax);
590 return *this;
591 }
592
594 const auto& get_expansions() const { return m_expansions; }
595
597 const auto& get_x_at_extrema() const { return m_x_at_extrema; }
598
600 const auto& get_monotonic_intervals() const { return m_monotonic_intervals; }
601
603 const auto xmin() const { return m_xmin; }
604
606 const auto xmax() const { return m_xmax; }
607
610 bool is_monotonic() const {
611 return m_monotonic_intervals.size() == 1;
612 }
613
618 auto get_index(double x) const {
619
620 // https://proquest.safaribooksonline.com/9780321637413
621 // https://web.stanford.edu/class/archive/cs/cs107/cs107.1202/lab1/
622 auto midpoint_Knuth = [](int x, int y) {
623 return (x & y) + ((x ^ y) >> 1);
624 };
625
626 int iL = 0U, iR = static_cast<int>(m_expansions.size()) - 1, iM;
627 while (iR - iL > 1) {
628 iM = midpoint_Knuth(iL, iR);
629 if (x >= m_expansions[iM].xmin()) {
630 iL = iM;
631 }
632 else {
633 iR = iM;
634 }
635 }
636 return (x < m_expansions[iL].xmax()) ? iL : iR;
637 };
638
643 double eval(double x) const{
644 return m_expansions[get_index(x)].eval(x);
645 }
646
648 template<typename Container>
649 const auto eval_many(const Container& x, Container& y) const {
650 if (x.size() != y.size()){ throw std::invalid_argument("x and y are not the same size"); }
651 for (auto i = 0U; i < x.size(); ++i){
652 y(i) = eval(x(i));
653 }
654 }
655
657 template<typename Container>
658 const auto eval_manyC(const Container x[], Container y[], std::size_t N) const {
659 for (auto i = 0U; i < N; ++i){
660 y[i] = eval(x[i]);
661 }
662 }
663
665 const std::vector<IntervalMatch> get_intervals_containing_y(double y) const{
666 std::vector<IntervalMatch> matches;
667 for (auto & interval : m_monotonic_intervals){
668 if (y >= interval.ymin && y <= interval.ymax){
669 matches.push_back(interval);
670 }
671 }
672 return matches;
673 }
674
677 const auto get_x_for_y(double y, unsigned int bits, std::size_t max_iter, double boundsftol) const {
678 std::vector<std::pair<double, int>> solns;
679 for (const auto& interval: m_monotonic_intervals){
680 // Each interval is required to be monotonic internally, so if the value of
681 // y is within the y values at the endpoints it is a candidate
682 // for rootfinding, otherwise continue
683 if (interval.contains_y(y)){
684 // Now look at the expansions that intersect the interval
685 for (const auto& ei: interval.expansioninfo){
686 // Again, since the portions of the expansions are required
687 // to be monotonic, if it is contained then a solution must exist
688 if (ei.contains_y(y)){
689 const ChebyshevExpansion<ArrayType>& e = m_expansions[ei.idx];
690 auto [xvalue, num_steps] = e.solve_for_x_count(y, ei.xmin, ei.xmax, bits, max_iter, boundsftol);
691 solns.emplace_back(xvalue, static_cast<int>(num_steps));
692 }
693 }
694 }
695 }
696 return solns;
697 }
698
700 template<typename Container>
701 const auto count_x_for_y_many(const Container& y, unsigned int bits, std::size_t max_iter, double boundsftol, Container& x) const {
702 if (x.size() != y.size()){ throw std::invalid_argument("x and y are not the same size"); }
703 for (auto i = 0U; i < x.size(); ++i){
704 x(i) = get_x_for_y(y(i), bits, max_iter, boundsftol).size();
705 }
706 }
707
709 template<typename Container>
710 const auto count_x_for_y_manyC(const Container y[], size_t N, unsigned int bits, std::size_t max_iter, double boundsftol, Container x[]) const {
711 for (auto i = 0U; i < N; ++i){
712 x[i] = get_x_for_y(y[i], bits, max_iter, boundsftol).size();
713 }
714 }
715};
716
717static_assert(std::is_copy_constructible_v<ChebyshevApproximation1D<std::vector<double>>>);
718static_assert(std::is_copy_assignable_v<ChebyshevApproximation1D<std::vector<double>>>);
719
721 double T,
723 std::size_t counter;
724};
725
734template<typename ArrayType=Eigen::ArrayXd>
736private:
739 m_rhoV,
740 m_p;
741
742 // These ones *may* be present
743 std::optional<ChebyshevApproximation1D<ArrayType>> m_hL,
744 m_hV,
745 m_sL,
746 m_sV,
747 m_uL,
748 m_uV,
749 m_invlnp;
750
751 double m_Tmin;
752 double m_Tcrit_num;
753 double m_rhocrit_num;
754 double m_pmin;
755 double m_pmax;
756
757
758
763 auto loader(const nlohmann::json &j, const std::string& key){
764 std::vector<ChebyshevExpansion<ArrayType>> buf;
765 // If you want to use Eigen...
766 // auto toeig = [](const nlohmann::json &j) -> Eigen::ArrayXd{
767 // auto vec = j.get<std::vector<double>>();
768 // return Eigen::Map<const Eigen::ArrayXd>(&vec[0], vec.size());
769 // };
770 // for (auto& block : j[key]){
771 // buf.emplace_back(block.at("xmin"), block.at("xmax"), toeig(block.at("coef")));
772 // }
773 for (auto& block : j[key]){
774 buf.emplace_back(block.at("xmin"), block.at("xmax"), block.at("coef"));
775 }
776 return buf;
777 }
778
782 auto make_invlnp(Eigen::Index Ndegree){
783
784 auto pmin = m_p.eval(m_p.xmin());
785 auto pmax = m_p.eval(m_p.xmax());
786 // auto N = m_p.get_expansions().front().coeff().size()-1;
787 const double EPSILON = std::numeric_limits<double>::epsilon();
788
789 auto func = [&](long i, long Npts, double lnp) -> double{
790 double p = exp(lnp);
791 auto solns = m_p.get_x_for_y(p, 64, 100U, 1e-8);
792
793 if (solns.size() != 1){
794 if ((i == 0 || i == Npts-1) && ( p > pmin*(1-EPSILON*1000) && p < pmin*(1+EPSILON*1000))){
795 return m_p.get_monotonic_intervals()[0].xmin;
796 }
797 if ((i == 0 || i == Npts-1) && ( p > pmax*(1-EPSILON*1000) && p < pmax*(1+EPSILON*1000))){
798 return m_p.get_monotonic_intervals()[0].xmax;
799 }
800 std::stringstream ss;
801 ss << std::setprecision(20) << "Must have one solution for T(p), got " << solns.size() << " for " << p << " Pa; limits are [" << pmin << + " Pa , " << pmax << " Pa]; i is " << i;
802 throw std::invalid_argument(ss.str());
803 }
804 auto [T, iters] = solns[0];
805 return T;
806 };
807
808 using CE_t = std::vector<ChebyshevExpansion<ArrayType>>;
809 return detail::dyadic_splitting<decltype(func), CE_t>(Ndegree, func, log(pmin), log(pmax), 3, 1e-12, 26);
810 }
811
812 // using PropertyPairs = properties::PropertyPairs; ///< A convenience alias to save some typing
813
814public:
817 SuperAncillary(const nlohmann::json &j) :
818 m_rhoL(std::move(loader(j, "jexpansions_rhoL"))),
819 m_rhoV(std::move(loader(j, "jexpansions_rhoV"))),
820 m_p(std::move(loader(j, "jexpansions_p"))),
821 m_Tmin(m_p.xmin()),
822 m_Tcrit_num(j.at("meta").at("Tcrittrue / K")),
823 m_rhocrit_num(j.at("meta").at("rhocrittrue / mol/m^3")),
824 m_pmin(m_p.eval(m_p.xmin())),
825 m_pmax(m_p.eval(m_p.xmax()))
826 {};
827
831 SuperAncillary(const std::string& s) : SuperAncillary(nlohmann::json::parse(s)) {};
832
838 const auto& get_approx1d(char k, short Q) const {
839 auto get_or_throw = [&](const auto& v) -> const auto& {
840 if (v){
841 return v.value();
842 }
843 else{
844 throw std::invalid_argument("unable to get the variable "+std::string(1, k)+", make sure it has been added to superancillary");
845 }
846 };
847 switch(k){
848 case 'P': return m_p;
849 case 'D': return (Q == 0) ? m_rhoL : m_rhoV;
850 case 'H': return (Q == 0) ? get_or_throw(m_hL) : get_or_throw(m_hV);
851 case 'S': return (Q == 0) ? get_or_throw(m_sL) : get_or_throw(m_sV);
852 case 'U': return (Q == 0) ? get_or_throw(m_uL) : get_or_throw(m_uV);
853 default: throw std::invalid_argument("Bad key of '" + std::string(1, k) + "'");
854 }
855 }
857 const auto& get_invlnp(){
858 // Lazily construct on the first access
859 if (!m_invlnp){
860 // Degree of expansion is the same as
861 auto Ndegree = m_p.get_expansions()[0].coeff().size()-1;
862 m_invlnp = make_invlnp(Ndegree);
863 }
864 return m_invlnp;
865 }
867 const double get_pmin() const{ return m_pmin; }
869 const double get_pmax() const{ return m_pmax; }
871 const double get_Tmin() const{ return m_Tmin; }
873 const double get_Tcrit_num() const{ return m_Tcrit_num; }
875 const double get_rhocrit_num() const{ return m_rhocrit_num; }
876
882 void add_variable(char var, const std::function<double(double, double)> & caller){
883 Eigen::MatrixXd Lmat, Umat;
884 std::tie(Lmat, Umat) = detail::get_LU_matrices(12);
885
886 auto builder = [&](char var, auto& variantL, auto& variantV){
887 std::vector<ChebyshevExpansion<ArrayType>> newexpL, newexpV;
888 const auto& expsL = get_approx1d('D', 0);
889 const auto& expsV = get_approx1d('D', 1);
890 if (expsL.get_expansions().size() != expsV.get_expansions().size()){
891 throw std::invalid_argument("L&V are not the same size");
892 }
893 for (auto i = 0U; i < expsL.get_expansions().size(); ++i){
894 const auto& expL = expsL.get_expansions()[i];
895 const auto& expV = expsV.get_expansions()[i];
896 const auto& T = expL.get_nodes_realworld();
897 // Get the functional values at the Chebyshev nodes
898 Eigen::ArrayXd funcL = Umat*expL.coeff().matrix();
899 Eigen::ArrayXd funcV = Umat*expV.coeff().matrix();
900 // Apply the function inplace to do the transformation
901 // of the functional values at the nodes
902 for (auto j = 0; j < funcL.size(); ++j){
903 funcL(j) = caller(T(j), funcL(j));
904 funcV(j) = caller(T(j), funcV(j));
905 }
906 // And now rebuild the expansions by left-multiplying by the L matrix
907 newexpL.emplace_back(expL.xmin(), expL.xmax(), (Lmat*funcL.matrix()).eval());
908 newexpV.emplace_back(expV.xmin(), expV.xmax(), (Lmat*funcV.matrix()).eval());
909 }
910
911 variantL.emplace(std::move(newexpL));
912 variantV.emplace(std::move(newexpV));
913 };
914
915 switch(var){
916 case 'H': builder(var, m_hL, m_hV); break;
917 case 'S': builder(var, m_sL, m_sV); break;
918 case 'U': builder(var, m_uL, m_uV); break;
919 default: throw std::invalid_argument("nope");
920 }
921 }
922
928 double eval_sat(double T, char k, short Q) const {
929 if (Q == 1 || Q == 0){
930 return get_approx1d(k, Q).eval(T);
931 }
932 else{
933 throw std::invalid_argument("invalid_value for Q");
934 }
935 }
936
940 template <typename Container>
941 void eval_sat_many(const Container& T, char k, short Q, Container& y) const {
942 if (T.size() != y.size()){ throw std::invalid_argument("T and y are not the same size"); }
943 const auto& approx = get_approx1d(k, Q);
944 for (auto i = 0; i < T.size(); ++i){
945 y(i) = approx.eval(T(i));
946 }
947 }
948
952 template <typename Container>
953 void eval_sat_manyC(const Container T[], std::size_t N, char k, short Q, Container y[]) const {
954 // if (T.size() != y.size()){ throw std::invalid_argument("T and y are not the same size"); }
955 const auto& approx = get_approx1d(k, Q);
956 for (std::size_t i = 0; i < N; ++i){
957 y[i] = approx.eval(T[i]);
958 }
959 }
960
970 auto solve_for_T(double propval, char k, bool Q, unsigned int bits=64, unsigned int max_iter=100, double boundsftol=1e-13) const{
971 return get_approx1d(k, Q).get_x_for_y(propval, bits, max_iter, boundsftol);
972 }
973
980 auto get_vaporquality(double T, double propval, char k) const {
981 if (k == 'D'){
982 // Need to special-case here because v = q*v_V + (1-q)*v_V but it is NOT(!!!!) the case that rho = q*rho_V + (1-q)*rho_L
983 double v_L = 1/get_approx1d('D', 0).eval(T);
984 double v_V = 1/get_approx1d('D', 1).eval(T);
985 return (1/propval-v_L)/(v_V-v_L);
986 }
987 else{
988 double L = get_approx1d(k, 0).eval(T);
989 double V = get_approx1d(k, 1).eval(T);
990 return (propval-L)/(V-L);
991 }
992 }
993
999 auto get_T_from_p(double p) {
1000 return get_invlnp().value().eval(log(p));
1001 }
1002
1010 auto get_yval(double T, double q, char k) const{
1011
1012 if (k == 'D'){
1013 // Need to special-case here because v = q*v_V + (1-q)*v_V but it is NOT(!!!!) the case that rho = q*rho_V + (1-q)*rho_L
1014 double v_L = 1/get_approx1d('D', 0).eval(T);
1015 double v_V = 1/get_approx1d('D', 1).eval(T);
1016 double v = q*v_V + (1-q)*v_L;
1017 return 1/v; // rho = 1/v
1018 }
1019 else{
1020 double L = get_approx1d(k, 0).eval(T);
1021 double V = get_approx1d(k, 1).eval(T);
1022 return q*V + (1-q)*L;
1023 }
1024 }
1025
1027 template <typename Container>
1028 void get_yval_many(const Container& T, char k, const Container& q, Container& y) const{
1029 if (T.size() != y.size() || T.size() != q.size()){ throw std::invalid_argument("T, q, and y are not all the same size"); }
1030
1031 const auto& L = get_approx1d(k, 0);
1032 const auto& V = get_approx1d(k, 1);
1033
1034 if (k == 'D'){
1035 for (auto i = 0; i < T.size(); ++i){
1036 // Need to special-case here because v = q*v_V + (1-q)*v_V but it is NOT(!!!!) the case that rho = q*rho_V + (1-q)*rho_L
1037 double v_L = 1.0/L.eval(T(i));
1038 double v_V = 1.0/V.eval(T(i));
1039 double v = q(i)*v_V + (1-q(i))*v_L;
1040 y(i) = 1/v;
1041 }
1042 }
1043 else{
1044 for (auto i = 0; i < T.size(); ++i){
1045 y(i) = q(i)*V.eval(T(i)) + (1-q(i))*L.eval(T(i));
1046 }
1047 }
1048 }
1056 auto get_all_intersections(const char k, const double val, unsigned int bits, std::size_t max_iter, double boundsftol) const{
1057 const auto& L = get_approx1d(k, 0);
1058 const auto& V = get_approx1d(k, 1);
1059 auto TsatL = L.get_x_for_y(val, bits, max_iter, boundsftol);
1060 const auto TsatV = V.get_x_for_y(val, bits, max_iter, boundsftol);
1061 for (auto &&el : TsatV){
1062 TsatL.push_back(el);
1063 }
1064// TsatL.insert(TsatL.end(),
1065// std::make_move_iterator(TsatV.begin() + TsatV.size()),
1066// std::make_move_iterator(TsatV.end()));
1067 return TsatL;
1068 }
1069
1084 std::optional<SuperAncillaryTwoPhaseSolution> iterate_for_Tq_XY(double Tmin, double Tmax, char ch1, double val1, char ch2, double val2, unsigned int bits, std::size_t max_iter, double boundsftol) const {
1085
1086 std::size_t counter = 0;
1087 auto f = [&](double T_){
1088 counter++;
1089 double q_fromv1 = get_vaporquality(T_, val1, ch1);
1090 double resid = get_yval(T_, q_fromv1, ch2) - val2;
1091 return resid;
1092 };
1093 using namespace boost::math::tools;
1094 double fTmin = f(Tmin), fTmax = f(Tmax);
1095
1096 // First we check if we are converged enough (TOMS748 does not stop based on function value)
1097 double T;
1098
1099 // A little lambda to make it easier to return
1100 // in different logical branches
1101 auto returner = [&](){
1103 soln.T = T;
1104 soln.q = get_vaporquality(T, val1, ch1);
1105 soln.counter = counter;
1106 return soln;
1107 };
1108
1109 if (std::abs(fTmin) < boundsftol){
1110 T = Tmin; return returner();
1111 }
1112 if (std::abs(fTmax) < boundsftol){
1113 T = Tmax; return returner();
1114 }
1115 if (fTmin*fTmax > 0){
1116 // No sign change, this means that the inputs are not within the two-phase region
1117 // and thus no iteration is needed
1118 return std::nullopt;
1119 }
1120 // Neither bound meets the convergence criterion, we need to iterate on temperature
1121 try{
1122 boost::math::uintmax_t max_iter_ = static_cast<boost::math::uintmax_t>(max_iter);
1123 auto [l, r] = toms748_solve(f, Tmin, Tmax, fTmin, fTmax, eps_tolerance<double>(bits), max_iter_);
1124 T = (r+l)/2.0;
1125 return returner();
1126 }
1127 catch(...){
1128 std::cout << "fTmin,fTmax: " << fTmin << "," << fTmax << std::endl;
1129 throw;
1130 }
1131 }
1132
1142 std::optional<SuperAncillaryTwoPhaseSolution> solve_for_Tq_DX(const double rho, const double propval, const char k, unsigned int bits, std::size_t max_iter, double boundsftol) const {
1143
1144 const auto& Lrho = get_approx1d('D', 0);
1145 const auto& Vrho = get_approx1d('D', 1);
1146 auto Tsat = get_all_intersections(k, propval, bits, max_iter, boundsftol);
1147 std::size_t rhosat_soln_count = Tsat.size();
1148
1149 std::tuple<double, double> Tsearchrange;
1150 if (rhosat_soln_count == 1){
1151 // Search the complete range from Tmin to the intersection point where rhosat(T) = rho
1152 // obtained just above
1153 Tsearchrange = std::make_tuple(Lrho.xmin*0.999, std::get<0>(Tsat[0]));
1154 }else if (rhosat_soln_count == 2){
1155 double y1 = std::get<0>(Tsat[0]), y2 = std::get<0>(Tsat[1]);
1156 if (y2 < y1){ std::swap(y2, y2); } // Required to be sorted in increasing value
1157 Tsearchrange = std::make_tuple(y1, y2);
1158 }
1159 else{
1160 throw std::invalid_argument("cannot have number of solutions other than 1 or 2; got "+std::to_string(rhosat_soln_count)+" solutions");
1161 }
1162 auto [a, b] = Tsearchrange;
1163 return iterate_for_Tq_XY(a, b, 'D', rho, k, propval, bits, max_iter, boundsftol);
1164 }
1165
1167 template <typename Container>
1168 void solve_for_Tq_DX_many(const Container& rho, const Container& propval, const char k, unsigned int bits, std::size_t max_iter, double boundsftol, Container& T, Container& q, Container& count){
1169 if (std::set<std::size_t>({rho.size(), propval.size(), T.size(), q.size(), count.size()}).size() != 1){
1170 throw std::invalid_argument("rho, propval, T, q, count are not all the same size");
1171 }
1172 for (auto i = 0U; i < T.size(); ++i){
1173 auto osoln = solve_for_Tq_DX(rho(i), propval(i), k, bits, max_iter, boundsftol);
1174 if (osoln){
1175 const auto& soln = osoln.value();
1176 T(i) = soln.T;
1177 q(i) = soln.q;
1178 count(i) = soln.counter;
1179 }
1180 else{
1181 T(i) = -1;
1182 q(i) = -1;
1183 count(i) = -1;
1184 }
1185 }
1186 }
1187
1188// /**
1189// The high-level function used to carry out a solution. It handles all the different permutations of variables and delegates to lower-level functions to actually od the calculations
1190
1191// \param pair The enumerated pair of thermodynamic variables being provided
1192// \param val1 The first value
1193// \param val2 The second value
1194// */
1195// auto flash(PropertyPairs pair, double val1, double val2) const -> std::optional<SuperAncillaryTwoPhaseSolution>{
1196// double T, q;
1197// std::size_t counter = 0;
1198// double boundsftol = 1e-12;
1199
1200// auto returner = [&](){
1201// SuperAncillaryTwoPhaseSolution soln;
1202// soln.T = T;
1203// soln.q = q;
1204// soln.counter = counter;
1205// return soln;
1206// };
1207
1208// auto get_T_other = [&](){
1209// switch (pair){
1210// case PropertyPairs::DT: return std::make_tuple(val2, val1, 'D');
1211// case PropertyPairs::HT: return std::make_tuple(val2, val1, 'H');
1212// case PropertyPairs::ST: return std::make_tuple(val2, val1, 'S');
1213// case PropertyPairs::TU: return std::make_tuple(val1, val2, 'U');
1214// default:
1215// throw std::invalid_argument("not valid");
1216// };
1217// };
1218// auto get_p_other = [&](){
1219// switch (pair){
1220// case PropertyPairs::DP: return std::make_tuple(val2, val1, 'D');
1221// case PropertyPairs::HP: return std::make_tuple(val2, val1, 'H');
1222// case PropertyPairs::PS: return std::make_tuple(val1, val2, 'S');
1223// case PropertyPairs::PU: return std::make_tuple(val1, val2, 'U');
1224// default:
1225// throw std::invalid_argument("not valid");
1226// };
1227// };
1228// auto get_rho_other = [&](){
1229// switch (pair){
1230// case PropertyPairs::DH: return std::make_tuple(val1, val2, 'H');
1231// case PropertyPairs::DS: return std::make_tuple(val1, val2, 'S');
1232// case PropertyPairs::DU: return std::make_tuple(val1, val2, 'U');
1233// default:
1234// throw std::invalid_argument("not valid");
1235// };
1236// };
1237
1238// // Given the arguments, unpack them into (xchar, xval, ychar, yval) where x is the
1239// // variable of convenience that will be used to do the interval intersection and then
1240// // the iteration will be in the other variable
1241// auto parse_HSU = [&](){
1242// switch (pair){
1243// case PropertyPairs::HS: return std::make_tuple('S', val2, 'H', val1);
1244// case PropertyPairs::SU: return std::make_tuple('S', val1, 'U', val2);
1245// case PropertyPairs::HU: return std::make_tuple('H', val1, 'U', val2);
1246// default:
1247// throw std::invalid_argument("not valid");
1248// };
1249// };
1250
1251// switch(pair){
1252// // Case 0, PT is always single-phase, by definition
1253// case PropertyPairs::PT: throw std::invalid_argument("PT inputs are trivial");
1254
1255// // Case 1, T is a given variable
1256// case PropertyPairs::DT:
1257// case PropertyPairs::HT:
1258// case PropertyPairs::ST:
1259// case PropertyPairs::TU:
1260// {
1261// auto [Tval, other, otherchar] = get_T_other();
1262// q = get_vaporquality(Tval, other, otherchar);
1263// T = Tval;
1264// if (0 <= q && q <= 1){
1265// return returner();
1266// }
1267// break;
1268// }
1269// // Case 2, p is a given variable, p gets converted to T, and then q calculated
1270// case PropertyPairs::DP:
1271// case PropertyPairs::HP:
1272// case PropertyPairs::PS:
1273// case PropertyPairs::PU:
1274// {
1275// auto [p, other, otherchar] = get_p_other();
1276// T = get_T_from_p(p);
1277// q = get_vaporquality(T, other, otherchar); // Based on the T and the other variable
1278// if (0 <= q && q <= 1){
1279// return returner();
1280// }
1281// break;
1282// }
1283// // Case 3, rho is a given variable, special case that because it is relatively simple
1284// // and only water and heavy water have more than one density solution along the saturation curve
1285// case PropertyPairs::DH:
1286// case PropertyPairs::DS:
1287// case PropertyPairs::DU:
1288// {
1289// auto [rho, otherval, otherchar] = get_rho_other();
1290// auto optsoln = solve_for_Tq_DX(rho, otherval, otherchar, 64, 100, boundsftol);
1291// if (optsoln){
1292// const auto& soln = optsoln.value();
1293// T = soln.T;
1294// q = soln.q;
1295// counter = soln.counter;
1296// return returner();
1297// }
1298// break;
1299// }
1300// // Case 4, handle all the cases where complicated interval checks are
1301// // necessary
1302// case PropertyPairs::HS:
1303// case PropertyPairs::HU:
1304// case PropertyPairs::SU:
1305// {
1306// auto [xchar, xval, ychar, yval] = parse_HSU();
1307// auto Tsat = get_all_intersections(xchar, xval, 64, 100, boundsftol);
1308// auto sort_predicate = [](const auto& x, const auto& y) {
1309// return std::get<0>(x) < std::get<0>(y);
1310// };
1311// std::sort(Tsat.begin(), Tsat.end(), sort_predicate);
1312// // std::cout << Tsat.size() << " T crossings for " << xval << std::endl;
1313// // for (auto & el : Tsat){
1314// // std::cout << std::get<0>(el) << std::endl;
1315// // }
1316// if (Tsat.size() == 1){
1317// // A single crossing, in which the other temperature bound for iteration
1318// // is assumed to be the minimum temperature of the superancillary
1319// double Tmin = get_approx1d('D', 0).xmin*0.9999;
1320// double Tmax = std::get<0>(Tsat[0]);
1321// auto o = iterate_for_Tq_XY(Tmin, Tmax, xchar, xval, ychar, yval, 64, 100, boundsftol);
1322// if (o){ return o.value(); }
1323// }
1324// else if (Tsat.size() % 2 == 0){ // Even number of crossings
1325// // neighboring intersections should bound the solution, or if not, the point is single-phase
1326// for (auto i = 0; i < Tsat.size(); i += 2){
1327// auto o = iterate_for_Tq_XY(std::get<0>(Tsat[i]), std::get<0>(Tsat[i+1]), xchar, xval, ychar, yval, 64, 100, boundsftol);
1328// if (o){ return o.value(); }
1329// }
1330// }
1331// else{
1332// throw std::invalid_argument("Don't know what to do about this number of T crossings ("+std::to_string(Tsat.size())+") yet");
1333// }
1334// break;
1335// }
1336// default:
1337// throw std::invalid_argument("These inputs are not yet supported in flash");
1338// }
1339// return std::nullopt;
1340// }
1341
1342// /// A vectorized version of the flash function used in the Python interface for profiling
1343// template <typename Container>
1344// void flash_many(const PropertyPairs ppair, const Container& val1, const Container& val2, Container& T, Container& q, Container& count){
1345// if (std::set<std::size_t>({val1.size(), val2.size(), T.size(), q.size(), count.size()}).size() != 1){
1346// throw std::invalid_argument("val1, val2, T, q, count are not all the same size");
1347// }
1348// for (auto i = 0U; i < T.size(); ++i){
1349// try{
1350// auto osoln = flash(ppair, val1(i), val2(i));
1351// if (osoln){
1352// const auto& soln = osoln.value();
1353// T(i) = soln.T;
1354// q(i) = soln.q;
1355// count(i) = soln.counter;
1356// }
1357// else{
1358// T(i) = -1;
1359// q(i) = -1;
1360// count(i) = -1;
1361// }
1362// }
1363// catch(std::exception&e){
1364// // std::cout << e.what() << " for " << val1(i) << "," << val2(i) << std::endl;
1365// T(i) = -2;
1366// q(i) = -2;
1367// count(i) = -2;
1368// }
1369
1370// }
1371// }
1372};
1373
1374static_assert(std::is_copy_constructible_v<SuperAncillary<std::vector<double>>>);
1375static_assert(std::is_copy_assignable_v<SuperAncillary<std::vector<double>>>);
1376
1377}
1378}