73 complex(
const double &_x,
const double &_y):
x(_x),
y(_y){};
91 if( fabs(z.y)<fabs(z.x) )
112 const complex
operator/(
const complex& lhs,
const complex& rhs);
113 bool operator==(
const complex& lhs,
const complex& rhs);
114 bool operator!=(
const complex& lhs,
const complex& rhs);
115 const complex
operator+(
const complex& lhs);
116 const complex
operator-(
const complex& lhs);
117 const complex
operator+(
const complex& lhs,
const complex& rhs);
118 const complex
operator+(
const complex& lhs,
const double& rhs);
119 const complex
operator+(
const double& lhs,
const complex& rhs);
120 const complex
operator-(
const complex& lhs,
const complex& rhs);
121 const complex
operator-(
const complex& lhs,
const double& rhs);
122 const complex
operator-(
const double& lhs,
const complex& rhs);
123 const complex
operator*(
const complex& lhs,
const complex& rhs);
124 const complex
operator*(
const complex& lhs,
const double& rhs);
125 const complex
operator*(
const double& lhs,
const complex& rhs);
126 const complex
operator/(
const complex& lhs,
const complex& rhs);
127 const complex
operator/(
const double& lhs,
const complex& rhs);
128 const complex
operator/(
const complex& lhs,
const double& rhs);
130 const complex
conj(
const complex &z);
131 const complex
csqr(
const complex &z);
145 class const_raw_vector
193 if( v1.GetStep()==1 && v2.GetStep()==1 )
199 const T *p1 = v1.GetData();
200 const T *p2 = v2.GetData();
201 int imax = v1.GetLength()/4;
203 for(
i=imax;
i!=0;
i--)
205 r += (*p1)*(*p2) + p1[1]*p2[1] + p1[2]*p2[2] + p1[3]*p2[3];
209 for(
i=0;
i<v1.GetLength()%4;
i++)
210 r += (*(p1++))*(*(p2++));
218 int offset11 = v1.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
219 int offset21 = v2.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
221 const T *p1 = v1.GetData();
222 const T *p2 = v2.GetData();
223 int imax = v1.GetLength()/4;
225 for(
i=0;
i<imax;
i++)
227 r += (*p1)*(*p2) + p1[offset11]*p2[offset21] + p1[offset12]*p2[offset22] + p1[offset13]*p2[offset23];
231 for(
i=0;
i<v1.GetLength()%4;
i++)
249 if( vdst.
GetStep()==1 && vsrc.GetStep()==1 )
255 const T *p2 = vsrc.GetData();
258 for(
i=imax;
i!=0;
i--)
274 int offset11 = vdst.
GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
275 int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
277 const T *p2 = vsrc.GetData();
280 for(
i=0;
i<imax;
i++)
283 p1[offset11] = p2[offset21];
284 p1[offset12] = p2[offset22];
285 p1[offset13] = p2[offset23];
293 p2 += vsrc.GetStep();
304 void vmoveneg(raw_vector<T> vdst, const_raw_vector<T> vsrc)
307 if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
312 T *p1 = vdst.GetData();
313 const T *p2 = vsrc.GetData();
314 int imax = vdst.GetLength()/2;
316 for(
i=0;
i<imax;
i++)
323 if(vdst.GetLength()%2 != 0)
332 int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
333 int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
334 T *p1 = vdst.GetData();
335 const T *p2 = vsrc.GetData();
336 int imax = vdst.GetLength()/4;
338 for(
i=imax;
i!=0;
i--)
341 p1[offset11] = -p2[offset21];
342 p1[offset12] = -p2[offset22];
343 p1[offset13] = -p2[offset23];
347 for(
i=0;
i<vdst.GetLength()%4;
i++)
350 p1 += vdst.GetStep();
351 p2 += vsrc.GetStep();
361 template<
class T,
class T2>
362 void vmove(raw_vector<T> vdst, const_raw_vector<T> vsrc,
T2 alpha)
365 if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
370 T *p1 = vdst.GetData();
371 const T *p2 = vsrc.GetData();
372 int imax = vdst.GetLength()/4;
374 for(
i=imax;
i!=0;
i--)
383 for(
i=0;
i<vdst.GetLength()%4;
i++)
384 *(p1++) =
alpha*(*(p2++));
392 int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
393 int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
394 T *p1 = vdst.GetData();
395 const T *p2 = vsrc.GetData();
396 int imax = vdst.GetLength()/4;
398 for(
i=0;
i<imax;
i++)
401 p1[offset11] =
alpha*p2[offset21];
402 p1[offset12] =
alpha*p2[offset22];
403 p1[offset13] =
alpha*p2[offset23];
407 for(
i=0;
i<vdst.GetLength()%4;
i++)
410 p1 += vdst.GetStep();
411 p2 += vsrc.GetStep();
422 void vadd(raw_vector<T> vdst, const_raw_vector<T> vsrc)
425 if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
430 T *p1 = vdst.GetData();
431 const T *p2 = vsrc.GetData();
432 int imax = vdst.GetLength()/4;
434 for(
i=imax;
i!=0;
i--)
443 for(
i=0;
i<vdst.GetLength()%4;
i++)
452 int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
453 int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
454 T *p1 = vdst.GetData();
455 const T *p2 = vsrc.GetData();
456 int imax = vdst.GetLength()/4;
458 for(
i=0;
i<imax;
i++)
461 p1[offset11] += p2[offset21];
462 p1[offset12] += p2[offset22];
463 p1[offset13] += p2[offset23];
467 for(
i=0;
i<vdst.GetLength()%4;
i++)
470 p1 += vdst.GetStep();
471 p2 += vsrc.GetStep();
481 template<
class T,
class T2>
482 void vadd(raw_vector<T> vdst, const_raw_vector<T> vsrc,
T2 alpha)
485 if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
490 T *p1 = vdst.GetData();
491 const T *p2 = vsrc.GetData();
492 int imax = vdst.GetLength()/4;
494 for(
i=imax;
i!=0;
i--)
497 p1[1] +=
alpha*p2[1];
498 p1[2] +=
alpha*p2[2];
499 p1[3] +=
alpha*p2[3];
503 for(
i=0;
i<vdst.GetLength()%4;
i++)
504 *(p1++) +=
alpha*(*(p2++));
512 int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
513 int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
514 T *p1 = vdst.GetData();
515 const T *p2 = vsrc.GetData();
516 int imax = vdst.GetLength()/4;
518 for(
i=0;
i<imax;
i++)
521 p1[offset11] +=
alpha*p2[offset21];
522 p1[offset12] +=
alpha*p2[offset22];
523 p1[offset13] +=
alpha*p2[offset23];
527 for(
i=0;
i<vdst.GetLength()%4;
i++)
530 p1 += vdst.GetStep();
531 p2 += vsrc.GetStep();
542 void vsub(raw_vector<T> vdst, const_raw_vector<T> vsrc)
545 if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
550 T *p1 = vdst.GetData();
551 const T *p2 = vsrc.GetData();
552 int imax = vdst.GetLength()/4;
554 for(
i=imax;
i!=0;
i--)
563 for(
i=0;
i<vdst.GetLength()%4;
i++)
572 int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
573 int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
574 T *p1 = vdst.GetData();
575 const T *p2 = vsrc.GetData();
576 int imax = vdst.GetLength()/4;
578 for(
i=0;
i<imax;
i++)
581 p1[offset11] -= p2[offset21];
582 p1[offset12] -= p2[offset22];
583 p1[offset13] -= p2[offset23];
587 for(
i=0;
i<vdst.GetLength()%4;
i++)
590 p1 += vdst.GetStep();
591 p2 += vsrc.GetStep();
601 template<
class T,
class T2>
602 void vsub(raw_vector<T> vdst, const_raw_vector<T> vsrc,
T2 alpha)
611 template<
class T,
class T2>
614 if( vdst.GetStep()==1 )
619 T *p1 = vdst.GetData();
620 int imax = vdst.GetLength()/4;
622 for(
i=imax;
i!=0;
i--)
630 for(
i=0;
i<vdst.GetLength()%4;
i++)
639 int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
640 T *p1 = vdst.GetData();
641 int imax = vdst.GetLength()/4;
643 for(
i=0;
i<imax;
i++)
646 p1[offset11] *=
alpha;
647 p1[offset12] *=
alpha;
648 p1[offset13] *=
alpha;
651 for(
i=0;
i<vdst.GetLength()%4;
i++)
654 p1 += vdst.GetStep();
688 #ifndef UNSAFE_MEM_COPY
713 #ifndef UNSAFE_MEM_COPY
755 void setcontent(
int iLow,
int iHigh,
const T *pContent )
758 for(
int i=iLow;
i<=iHigh;
i++)
759 (*
this)(
i) = pContent[
i-iLow];
842 #ifndef UNSAFE_MEM_COPY
869 #ifndef UNSAFE_MEM_COPY
899 void setbounds(
int iLow1,
int iHigh1,
int iLow2,
int iHigh2 )
903 m_iVecSize = (iHigh1-iLow1+1)*(iHigh2-iLow2+1);
913 void setcontent(
int iLow1,
int iHigh1,
int iLow2,
int iHigh2,
const T *pContent )
940 raw_vector<T>
getcolumn(
int iColumn,
int iRowStart,
int iRowEnd)
953 return raw_vector<T>(&((*
this)(iRow, iColumnStart)), iColumnEnd-iColumnStart+1, 1);
1007 double sqr(
double x);
1008 int maxint(
int m1,
int m2);
1009 int minint(
int m1,
int m2);
1010 double maxreal(
double m1,
double m2);
1011 double minreal(
double m1,
double m2);
1022 #include <stdexcept>
1036 class incorrectPrecision :
public exception {};
1037 class overflow :
public exception {};
1038 class divisionByZero :
public exception {};
1039 class sqrtOfNegativeNumber :
public exception {};
1040 class invalidConversion :
public exception {};
1041 class invalidString :
public exception {};
1042 class internalError :
public exception {};
1043 class domainError :
public exception {};
1064 static mpfr_record*
newMpfr(
unsigned int Precision);
1075 class mpfr_reference
1095 template<
unsigned int Precision>
1142 #ifndef _AMP_NO_TEMPLATE_CONSTRUCTORS
1143 template<
unsigned int Precision2>
1148 mpfr_set(
getWritePtr(), r.getReadPtr(), GMP_RNDN);
1183 #ifndef _AMP_NO_TEMPLATE_CONSTRUCTORS
1184 template<
unsigned int Precision2>
1187 if( (
void*)
this==(
void*)(&r) )
1189 mpfr_set(
getWritePtr(), r.getReadPtr(), GMP_RNDN);
1198 template<
class T>
ampf&
operator+=(
const T&
v){ *
this = *
this +
v;
return *
this; };
1199 template<
class T>
ampf&
operator-=(
const T&
v){ *
this = *
this -
v;
return *
this; };
1200 template<
class T>
ampf&
operator*=(
const T&
v){ *
this = *
this *
v;
return *
this; };
1201 template<
class T>
ampf&
operator/=(
const T&
v){ *
this = *
this /
v;
return *
this; };
1258 template<
unsigned int Precision>
1263 WerrorS(
"incorrectPrecision");
1266 template<
unsigned int Precision>
1271 mpfr_set_ui(getWritePtr(), 0, GMP_RNDN);
1274 template<
unsigned int Precision>
1279 mpfr_set_si(getWritePtr(), sv, GMP_RNDN);
1282 template<
unsigned int Precision>
1287 mpfr_set_ui(getWritePtr(),
v, GMP_RNDN);
1290 template<
unsigned int Precision>
1295 mpfr_set_ld(getWritePtr(),
v, GMP_RNDN);
1298 template<
unsigned int Precision>
1303 mpfr_strtofr(getWritePtr(),
s,
NULL, 0, GMP_RNDN);
1306 template<
unsigned int Precision>
1312 template<
unsigned int Precision>
1315 if( rval->refCount==1 )
1318 mpfr_set(newrval->value, rval->value, GMP_RNDN);
1324 template<
unsigned int Precision>
1327 return mpfr_number_p(getReadPtr())!=0;
1330 template<
unsigned int Precision>
1333 if( !isFiniteNumber() )
1335 return mpfr_sgn(getReadPtr())>0;
1338 template<
unsigned int Precision>
1341 return mpfr_zero_p(getReadPtr())!=0;
1344 template<
unsigned int Precision>
1347 if( !isFiniteNumber() )
1349 return mpfr_sgn(getReadPtr())<0;
1352 template<
unsigned int Precision>
1355 return getUlpOf(*
this);
1358 template<
unsigned int Precision>
1361 return mpfr_get_d(getReadPtr(), GMP_RNDN);
1364 template<
unsigned int Precision>
1370 if( !isFiniteNumber() )
1375 ptr = mpfr_get_str(
NULL, &_e, 16, 0, getReadPtr(), GMP_RNDN);
1386 signed long iexpval;
1390 ptr = mpfr_get_str(
NULL, &expval, 16, 0, getReadPtr(), GMP_RNDN);
1393 if( iexpval!=expval )
1396 sprintf(buf_e,
"%ld",
long(iexpval));
1406 mpfr_free_str(ptr2);
1410 template<
unsigned int Precision>
1418 if( !isFiniteNumber() )
1423 ptr = mpfr_get_str(
NULL, &_e, 10, 0, getReadPtr(), GMP_RNDN);
1434 signed long iexpval;
1438 ptr = mpfr_get_str(
NULL, &expval, 10, 0, getReadPtr(), GMP_RNDN);
1441 if( iexpval!=expval )
1444 sprintf(buf_e,
"%ld",
long(iexpval));
1454 mpfr_free_str(ptr2);
1457 template<
unsigned int Precision>
1460 char *toString_Block=(
char *)
omAlloc(256);
1464 if( !isFiniteNumber() )
1468 ptr = mpfr_get_str(
NULL, &_e, 10, 0, getReadPtr(), GMP_RNDN);
1469 strcpy(toString_Block, ptr);
1471 return toString_Block;
1479 signed long iexpval;
1483 ptr = mpfr_get_str(
NULL, &expval, 10, 0, getReadPtr(), GMP_RNDN);
1486 if( iexpval!=expval )
1489 sprintf(buf_e,
"%ld",
long(iexpval));
1493 sprintf(toString_Block,
"-0.%sE%s",ptr,buf_e);
1496 sprintf(toString_Block,
"0.%sE%s",ptr,buf_e);
1497 mpfr_free_str(ptr2);
1498 return toString_Block;
1501 template<
unsigned int Precision>
1504 if( !
x.isFiniteNumber() )
1508 ampf<Precision> r(1);
1509 mpfr_nextabove(r.getWritePtr());
1510 mpfr_sub_ui(r.getWritePtr(), r.getWritePtr(), 1, GMP_RNDN);
1514 mpfr_get_exp(
x.getReadPtr()),
1524 template<
unsigned int Precision>
1527 ampf<Precision> r(1);
1528 mpfr_nextabove(r.getWritePtr());
1529 mpfr_sub_ui(r.getWritePtr(), r.getWritePtr(), 1, GMP_RNDN);
1533 template<
unsigned int Precision>
1536 ampf<Precision> r(1);
1537 mpfr_nextabove(r.getWritePtr());
1538 mpfr_sub_ui(r.getWritePtr(), r.getWritePtr(), 1, GMP_RNDN);
1547 template<
unsigned int Precision>
1550 ampf<Precision> r(1);
1551 mpfr_nextabove(r.getWritePtr());
1552 mpfr_sub_ui(r.getWritePtr(), r.getWritePtr(), 1, GMP_RNDN);
1561 template<
unsigned int Precision>
1564 ampf<Precision> r(1);
1565 mpfr_nextbelow(r.getWritePtr());
1566 mpfr_set_exp(r.getWritePtr(),mpfr_get_emax());
1570 template<
unsigned int Precision>
1573 ampf<Precision> r(1);
1574 mpfr_set_exp(r.getWritePtr(),mpfr_get_emin());
1578 template<
unsigned int Precision>
1584 template<
unsigned int Precision>
1587 ampf<Precision> r(1);
1588 mp_exp_t e1 = mpfr_get_emax();
1589 mp_exp_t e2 = -mpfr_get_emin();
1590 mp_exp_t e = e1>e2 ? e1 : e2;
1591 mpfr_set_exp(r.getWritePtr(), e-5);
1595 template<
unsigned int Precision>
1598 ampf<Precision> r(1);
1599 mp_exp_t e1 = mpfr_get_emax();
1600 mp_exp_t e2 = -mpfr_get_emin();
1601 mp_exp_t e = e1>e2 ? e1 : e2;
1602 mpfr_set_exp(r.getWritePtr(), 2-(e-5));
1606 template<
unsigned int Precision>
1617 template<
unsigned int Precision>
1620 return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())==0;
1623 template<
unsigned int Precision>
1624 bool operator!=(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1626 return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())!=0;
1629 template<
unsigned int Precision>
1630 bool operator<(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1632 return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())<0;
1635 template<
unsigned int Precision>
1636 bool operator>(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1638 return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())>0;
1641 template<
unsigned int Precision>
1642 bool operator<=(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1644 return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())<=0;
1647 template<
unsigned int Precision>
1648 bool operator>=(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1650 return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())>=0;
1656 template<
unsigned int Precision>
1657 const ampf<Precision>
operator+(
const ampf<Precision>& op1)
1662 template<
unsigned int Precision>
1663 const ampf<Precision>
operator-(
const ampf<Precision>& op1)
1666 mpfr_neg(
v->value, op1.getReadPtr(), GMP_RNDN);
1670 template<
unsigned int Precision>
1671 const ampf<Precision>
operator+(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1674 mpfr_add(
v->value, op1.getReadPtr(), op2.getReadPtr(), GMP_RNDN);
1678 template<
unsigned int Precision>
1679 const ampf<Precision>
operator-(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1682 mpfr_sub(
v->value, op1.getReadPtr(), op2.getReadPtr(), GMP_RNDN);
1687 template<
unsigned int Precision>
1688 const ampf<Precision>
operator*(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1691 mpfr_mul(
v->value, op1.getReadPtr(), op2.getReadPtr(), GMP_RNDN);
1695 template<
unsigned int Precision>
1696 const ampf<Precision>
operator/(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1699 mpfr_div(
v->value, op1.getReadPtr(), op2.getReadPtr(), GMP_RNDN);
1706 template<
unsigned int Precision>
1707 const ampf<Precision>
sqr(
const ampf<Precision> &
x)
1710 ampf<Precision>
res;
1711 mpfr_sqr(
res.getWritePtr(),
x.getReadPtr(), GMP_RNDN);
1715 template<
unsigned int Precision>
1716 int sign(
const ampf<Precision> &
x)
1718 int s = mpfr_sgn(
x.getReadPtr());
1726 template<
unsigned int Precision>
1727 const ampf<Precision>
abs(
const ampf<Precision> &
x)
1730 ampf<Precision>
res;
1731 mpfr_abs(
res.getWritePtr(),
x.getReadPtr(), GMP_RNDN);
1735 template<
unsigned int Precision>
1736 const ampf<Precision>
maximum(
const ampf<Precision> &
x,
const ampf<Precision> &
y)
1739 ampf<Precision>
res;
1740 mpfr_max(
res.getWritePtr(),
x.getReadPtr(),
y.getReadPtr(), GMP_RNDN);
1744 template<
unsigned int Precision>
1745 const ampf<Precision>
minimum(
const ampf<Precision> &
x,
const ampf<Precision> &
y)
1748 ampf<Precision>
res;
1749 mpfr_min(
res.getWritePtr(),
x.getReadPtr(),
y.getReadPtr(), GMP_RNDN);
1753 template<
unsigned int Precision>
1754 const ampf<Precision>
sqrt(
const ampf<Precision> &
x)
1757 ampf<Precision>
res;
1758 mpfr_sqrt(
res.getWritePtr(),
x.getReadPtr(), GMP_RNDN);
1762 template<
unsigned int Precision>
1763 signed long trunc(
const ampf<Precision> &
x)
1765 ampf<Precision> tmp;
1767 mpfr_trunc(tmp.getWritePtr(),
x.getReadPtr());
1768 if( mpfr_integer_p(tmp.getReadPtr())==0 )
1771 mpfr_clear_erangeflag();
1772 r = mpfr_get_si(tmp.getReadPtr(), GMP_RNDN);
1773 if( mpfr_erangeflag_p()!=0 )
1779 template<
unsigned int Precision>
1780 const ampf<Precision>
frac(
const ampf<Precision> &
x)
1784 mpfr_frac(r.getWritePtr(),
x.getReadPtr(), GMP_RNDN);
1788 template<
unsigned int Precision>
1789 signed long floor(
const ampf<Precision> &
x)
1791 ampf<Precision> tmp;
1793 mpfr_floor(tmp.getWritePtr(),
x.getReadPtr());
1794 if( mpfr_integer_p(tmp.getReadPtr())==0 )
1797 mpfr_clear_erangeflag();
1798 r = mpfr_get_si(tmp.getReadPtr(), GMP_RNDN);
1799 if( mpfr_erangeflag_p()!=0 )
1805 template<
unsigned int Precision>
1806 signed long ceil(
const ampf<Precision> &
x)
1808 ampf<Precision> tmp;
1810 mpfr_ceil(tmp.getWritePtr(),
x.getReadPtr());
1811 if( mpfr_integer_p(tmp.getReadPtr())==0 )
1814 mpfr_clear_erangeflag();
1815 r = mpfr_get_si(tmp.getReadPtr(), GMP_RNDN);
1816 if( mpfr_erangeflag_p()!=0 )
1822 template<
unsigned int Precision>
1823 signed long round(
const ampf<Precision> &
x)
1825 ampf<Precision> tmp;
1827 mpfr_round(tmp.getWritePtr(),
x.getReadPtr());
1828 if( mpfr_integer_p(tmp.getReadPtr())==0 )
1831 mpfr_clear_erangeflag();
1832 r = mpfr_get_si(tmp.getReadPtr(), GMP_RNDN);
1833 if( mpfr_erangeflag_p()!=0 )
1839 template<
unsigned int Precision>
1840 const ampf<Precision>
frexp2(
const ampf<Precision> &
x, mp_exp_t *
exponent)
1844 if( !
x.isFiniteNumber() )
1854 *
exponent = mpfr_get_exp(r.getReadPtr());
1855 mpfr_set_exp(r.getWritePtr(),0);
1859 template<
unsigned int Precision>
1860 const ampf<Precision>
ldexp2(
const ampf<Precision> &
x, mp_exp_t
exponent)
1864 mpfr_mul_2si(r.getWritePtr(),
x.getReadPtr(),
exponent, GMP_RNDN);
1871 #define __AMP_BINARY_OPI(type) \
1872 template<unsigned int Precision> const ampf<Precision> operator+(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)+op2; } \
1873 template<unsigned int Precision> const ampf<Precision> operator+(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)+op2; } \
1874 template<unsigned int Precision> const ampf<Precision> operator+(const ampf<Precision>& op1, const signed type& op2) { return op1+ampf<Precision>(op2); } \
1875 template<unsigned int Precision> const ampf<Precision> operator+(const ampf<Precision>& op1, const unsigned type& op2) { return op1+ampf<Precision>(op2); } \
1876 template<unsigned int Precision> const ampf<Precision> operator-(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)-op2; } \
1877 template<unsigned int Precision> const ampf<Precision> operator-(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)-op2; } \
1878 template<unsigned int Precision> const ampf<Precision> operator-(const ampf<Precision>& op1, const signed type& op2) { return op1-ampf<Precision>(op2); } \
1879 template<unsigned int Precision> const ampf<Precision> operator-(const ampf<Precision>& op1, const unsigned type& op2) { return op1-ampf<Precision>(op2); } \
1880 template<unsigned int Precision> const ampf<Precision> operator*(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)*op2; } \
1881 template<unsigned int Precision> const ampf<Precision> operator*(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)*op2; } \
1882 template<unsigned int Precision> const ampf<Precision> operator*(const ampf<Precision>& op1, const signed type& op2) { return op1*ampf<Precision>(op2); } \
1883 template<unsigned int Precision> const ampf<Precision> operator*(const ampf<Precision>& op1, const unsigned type& op2) { return op1*ampf<Precision>(op2); } \
1884 template<unsigned int Precision> const ampf<Precision> operator/(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)/op2; } \
1885 template<unsigned int Precision> const ampf<Precision> operator/(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)/op2; } \
1886 template<unsigned int Precision> const ampf<Precision> operator/(const ampf<Precision>& op1, const signed type& op2) { return op1/ampf<Precision>(op2); } \
1887 template<unsigned int Precision> const ampf<Precision> operator/(const ampf<Precision>& op1, const unsigned type& op2) { return op1/ampf<Precision>(op2); } \
1888 template<unsigned int Precision> bool operator==(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)==op2; } \
1889 template<unsigned int Precision> bool operator==(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)==op2; } \
1890 template<unsigned int Precision> bool operator==(const ampf<Precision>& op1, const signed type& op2) { return op1==ampf<Precision>(op2); } \
1891 template<unsigned int Precision> bool operator==(const ampf<Precision>& op1, const unsigned type& op2) { return op1==ampf<Precision>(op2); } \
1892 template<unsigned int Precision> bool operator!=(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)!=op2; } \
1893 template<unsigned int Precision> bool operator!=(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)!=op2; } \
1894 template<unsigned int Precision> bool operator!=(const ampf<Precision>& op1, const signed type& op2) { return op1!=ampf<Precision>(op2); } \
1895 template<unsigned int Precision> bool operator!=(const ampf<Precision>& op1, const unsigned type& op2) { return op1!=ampf<Precision>(op2); } \
1896 template<unsigned int Precision> bool operator<=(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<=op2; } \
1897 template<unsigned int Precision> bool operator<=(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<=op2; } \
1898 template<unsigned int Precision> bool operator<=(const ampf<Precision>& op1, const signed type& op2) { return op1<=ampf<Precision>(op2); } \
1899 template<unsigned int Precision> bool operator<=(const ampf<Precision>& op1, const unsigned type& op2) { return op1<=ampf<Precision>(op2); } \
1900 template<unsigned int Precision> bool operator>=(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>=op2; } \
1901 template<unsigned int Precision> bool operator>=(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>=op2; } \
1902 template<unsigned int Precision> bool operator>=(const ampf<Precision>& op1, const signed type& op2) { return op1>=ampf<Precision>(op2); } \
1903 template<unsigned int Precision> bool operator>=(const ampf<Precision>& op1, const unsigned type& op2) { return op1>=ampf<Precision>(op2); } \
1904 template<unsigned int Precision> bool operator<(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<op2; } \
1905 template<unsigned int Precision> bool operator<(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<op2; } \
1906 template<unsigned int Precision> bool operator<(const ampf<Precision>& op1, const signed type& op2) { return op1<ampf<Precision>(op2); } \
1907 template<unsigned int Precision> bool operator<(const ampf<Precision>& op1, const unsigned type& op2) { return op1<ampf<Precision>(op2); } \
1908 template<unsigned int Precision> bool operator>(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>op2; } \
1909 template<unsigned int Precision> bool operator>(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>op2; } \
1910 template<unsigned int Precision> bool operator>(const ampf<Precision>& op1, const signed type& op2) { return op1>ampf<Precision>(op2); } \
1911 template<unsigned int Precision> bool operator>(const ampf<Precision>& op1, const unsigned type& op2) { return op1>ampf<Precision>(op2); }
1916 #undef __AMP_BINARY_OPI
1917 #define __AMP_BINARY_OPF(type) \
1918 template<unsigned int Precision> const ampf<Precision> operator+(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)+op2; } \
1919 template<unsigned int Precision> const ampf<Precision> operator+(const ampf<Precision>& op1, const type& op2) { return op1+ampf<Precision>(op2); } \
1920 template<unsigned int Precision> const ampf<Precision> operator-(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)-op2; } \
1921 template<unsigned int Precision> const ampf<Precision> operator-(const ampf<Precision>& op1, const type& op2) { return op1-ampf<Precision>(op2); } \
1922 template<unsigned int Precision> const ampf<Precision> operator*(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)*op2; } \
1923 template<unsigned int Precision> const ampf<Precision> operator*(const ampf<Precision>& op1, const type& op2) { return op1*ampf<Precision>(op2); } \
1924 template<unsigned int Precision> const ampf<Precision> operator/(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)/op2; } \
1925 template<unsigned int Precision> const ampf<Precision> operator/(const ampf<Precision>& op1, const type& op2) { return op1/ampf<Precision>(op2); } \
1926 template<unsigned int Precision> bool operator==(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)==op2; } \
1927 template<unsigned int Precision> bool operator==(const ampf<Precision>& op1, const type& op2) { return op1==ampf<Precision>(op2); } \
1928 template<unsigned int Precision> bool operator!=(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)!=op2; } \
1929 template<unsigned int Precision> bool operator!=(const ampf<Precision>& op1, const type& op2) { return op1!=ampf<Precision>(op2); } \
1930 template<unsigned int Precision> bool operator<=(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<=op2; } \
1931 template<unsigned int Precision> bool operator<=(const ampf<Precision>& op1, const type& op2) { return op1<=ampf<Precision>(op2); } \
1932 template<unsigned int Precision> bool operator>=(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>=op2; } \
1933 template<unsigned int Precision> bool operator>=(const ampf<Precision>& op1, const type& op2) { return op1>=ampf<Precision>(op2); } \
1934 template<unsigned int Precision> bool operator<(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<op2; } \
1935 template<unsigned int Precision> bool operator<(const ampf<Precision>& op1, const type& op2) { return op1<ampf<Precision>(op2); } \
1936 template<unsigned int Precision> bool operator>(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>op2; } \
1937 template<unsigned int Precision> bool operator>(const ampf<Precision>& op1, const type& op2) { return op1>ampf<Precision>(op2); }
1941 #undef __AMP_BINARY_OPF
1946 template<
unsigned int Precision>
1947 const ampf<Precision>
pi()
1950 mpfr_const_pi(
v->value, GMP_RNDN);
1954 template<
unsigned int Precision>
1955 const ampf<Precision>
halfpi()
1958 mpfr_const_pi(
v->value, GMP_RNDN);
1959 mpfr_mul_2si(
v->value,
v->value, -1, GMP_RNDN);
1963 template<
unsigned int Precision>
1964 const ampf<Precision>
twopi()
1967 mpfr_const_pi(
v->value, GMP_RNDN);
1968 mpfr_mul_2si(
v->value,
v->value, +1, GMP_RNDN);
1972 template<
unsigned int Precision>
1973 const ampf<Precision>
sin(
const ampf<Precision> &
x)
1976 mpfr_sin(
v->value,
x.getReadPtr(), GMP_RNDN);
1980 template<
unsigned int Precision>
1981 const ampf<Precision>
cos(
const ampf<Precision> &
x)
1984 mpfr_cos(
v->value,
x.getReadPtr(), GMP_RNDN);
1988 template<
unsigned int Precision>
1989 const ampf<Precision>
tan(
const ampf<Precision> &
x)
1992 mpfr_tan(
v->value,
x.getReadPtr(), GMP_RNDN);
1996 template<
unsigned int Precision>
1997 const ampf<Precision>
asin(
const ampf<Precision> &
x)
2000 mpfr_asin(
v->value,
x.getReadPtr(), GMP_RNDN);
2004 template<
unsigned int Precision>
2005 const ampf<Precision>
acos(
const ampf<Precision> &
x)
2008 mpfr_acos(
v->value,
x.getReadPtr(), GMP_RNDN);
2012 template<
unsigned int Precision>
2013 const ampf<Precision>
atan(
const ampf<Precision> &
x)
2016 mpfr_atan(
v->value,
x.getReadPtr(), GMP_RNDN);
2020 template<
unsigned int Precision>
2021 const ampf<Precision>
atan2(
const ampf<Precision> &
y,
const ampf<Precision> &
x)
2024 mpfr_atan2(
v->value,
y.getReadPtr(),
x.getReadPtr(), GMP_RNDN);
2028 template<
unsigned int Precision>
2029 const ampf<Precision>
log(
const ampf<Precision> &
x)
2032 mpfr_log(
v->value,
x.getReadPtr(), GMP_RNDN);
2036 template<
unsigned int Precision>
2037 const ampf<Precision>
log2(
const ampf<Precision> &
x)
2040 mpfr_log2(
v->value,
x.getReadPtr(), GMP_RNDN);
2044 template<
unsigned int Precision>
2045 const ampf<Precision>
log10(
const ampf<Precision> &
x)
2048 mpfr_log10(
v->value,
x.getReadPtr(), GMP_RNDN);
2052 template<
unsigned int Precision>
2053 const ampf<Precision>
exp(
const ampf<Precision> &
x)
2056 mpfr_exp(
v->value,
x.getReadPtr(), GMP_RNDN);
2060 template<
unsigned int Precision>
2061 const ampf<Precision>
sinh(
const ampf<Precision> &
x)
2064 mpfr_sinh(
v->value,
x.getReadPtr(), GMP_RNDN);
2068 template<
unsigned int Precision>
2069 const ampf<Precision>
cosh(
const ampf<Precision> &
x)
2072 mpfr_cosh(
v->value,
x.getReadPtr(), GMP_RNDN);
2076 template<
unsigned int Precision>
2077 const ampf<Precision>
tanh(
const ampf<Precision> &
x)
2080 mpfr_tanh(
v->value,
x.getReadPtr(), GMP_RNDN);
2084 template<
unsigned int Precision>
2085 const ampf<Precision>
pow(
const ampf<Precision> &
x,
const ampf<Precision> &
y)
2088 mpfr_pow(
v->value,
x.getReadPtr(),
y.getReadPtr(), GMP_RNDN);
2095 template<
unsigned int Precision>
2111 campf(
const ampf<Precision> &_x):
x(_x),
y(0){};
2114 #ifndef _AMP_NO_TEMPLATE_CONSTRUCTORS
2115 template<
unsigned int Prec2>
2138 #ifndef _AMP_NO_TEMPLATE_CONSTRUCTORS
2139 template<
unsigned int Precision2>
2148 ampf<Precision>
x,
y;
2154 template<
unsigned int Precision>
2156 {
return lhs.
x==rhs.
x && lhs.
y==rhs.
y; }
2158 template<
unsigned int Precision>
2159 bool operator!=(
const campf<Precision>& lhs,
const campf<Precision>& rhs)
2160 {
return lhs.x!=rhs.x || lhs.y!=rhs.y; }
2162 template<
unsigned int Precision>
2163 const campf<Precision>
operator+(
const campf<Precision>& lhs)
2166 template<
unsigned int Precision>
2167 campf<Precision>&
operator+=(campf<Precision>& lhs,
const campf<Precision>& rhs)
2168 { lhs.x += rhs.x; lhs.y += rhs.y;
return lhs; }
2170 template<
unsigned int Precision>
2171 const campf<Precision>
operator+(
const campf<Precision>& lhs,
const campf<Precision>& rhs)
2172 { campf<Precision> r = lhs; r += rhs;
return r; }
2174 template<
unsigned int Precision>
2175 const campf<Precision>
operator-(
const campf<Precision>& lhs)
2176 {
return campf<Precision>(-lhs.x, -lhs.y); }
2178 template<
unsigned int Precision>
2179 campf<Precision>&
operator-=(campf<Precision>& lhs,
const campf<Precision>& rhs)
2180 { lhs.x -= rhs.x; lhs.y -= rhs.y;
return lhs; }
2182 template<
unsigned int Precision>
2183 const campf<Precision>
operator-(
const campf<Precision>& lhs,
const campf<Precision>& rhs)
2184 { campf<Precision> r = lhs; r -= rhs;
return r; }
2186 template<
unsigned int Precision>
2187 campf<Precision>&
operator*=(campf<Precision>& lhs,
const campf<Precision>& rhs)
2189 ampf<Precision> xx(lhs.x*rhs.x), yy(lhs.y*rhs.y), mm((lhs.x+lhs.y)*(rhs.x+rhs.y));
2195 template<
unsigned int Precision>
2196 const campf<Precision>
operator*(
const campf<Precision>& lhs,
const campf<Precision>& rhs)
2197 { campf<Precision> r = lhs; r *= rhs;
return r; }
2199 template<
unsigned int Precision>
2200 const campf<Precision>
operator/(
const campf<Precision>& lhs,
const campf<Precision>& rhs)
2205 if(
abs(rhs.y)<
abs(rhs.x) )
2217 result.y = (-lhs.x+lhs.y*e)/
f;
2222 template<
unsigned int Precision>
2223 campf<Precision>&
operator/=(campf<Precision>& lhs,
const campf<Precision>& rhs)
2229 template<
unsigned int Precision>
2230 const ampf<Precision>
abscomplex(
const campf<Precision> &z)
2232 ampf<Precision>
w, xabs, yabs,
v;
2236 w = xabs>yabs ? xabs : yabs;
2237 v = xabs<yabs ? xabs : yabs;
2242 ampf<Precision> t =
v/
w;
2247 template<
unsigned int Precision>
2248 const campf<Precision>
conj(
const campf<Precision> &z)
2250 return campf<Precision>(z.x, -z.y);
2253 template<
unsigned int Precision>
2254 const campf<Precision>
csqr(
const campf<Precision> &z)
2256 ampf<Precision> t = z.x*z.y;
2257 return campf<Precision>(
sqr(z.x)-
sqr(z.y), t+t);
2263 #define __AMP_BINARY_OPI(type) \
2264 template<unsigned int Precision> const campf<Precision> operator+ (const signed type& op1, const campf<Precision>& op2) { return campf<Precision>(op1+op2.x, op2.y); } \
2265 template<unsigned int Precision> const campf<Precision> operator+ (const unsigned type& op1, const campf<Precision>& op2) { return campf<Precision>(op1+op2.x, op2.y); } \
2266 template<unsigned int Precision> const campf<Precision> operator+ (const campf<Precision>& op1, const signed type& op2) { return campf<Precision>(op1.x+op2, op1.y); } \
2267 template<unsigned int Precision> const campf<Precision> operator+ (const campf<Precision>& op1, const unsigned type& op2) { return campf<Precision>(op1.x+op2, op1.y); } \
2268 template<unsigned int Precision> const campf<Precision> operator- (const signed type& op1, const campf<Precision>& op2) { return campf<Precision>(op1-op2.x, -op2.y); } \
2269 template<unsigned int Precision> const campf<Precision> operator- (const unsigned type& op1, const campf<Precision>& op2) { return campf<Precision>(op1-op2.x, -op2.y); } \
2270 template<unsigned int Precision> const campf<Precision> operator- (const campf<Precision>& op1, const signed type& op2) { return campf<Precision>(op1.x-op2, op1.y); } \
2271 template<unsigned int Precision> const campf<Precision> operator- (const campf<Precision>& op1, const unsigned type& op2) { return campf<Precision>(op1.x-op2, op1.y); } \
2272 template<unsigned int Precision> const campf<Precision> operator* (const signed type& op1, const campf<Precision>& op2) { return campf<Precision>(op1*op2.x, op1*op2.y); } \
2273 template<unsigned int Precision> const campf<Precision> operator* (const unsigned type& op1, const campf<Precision>& op2) { return campf<Precision>(op1*op2.x, op1*op2.y); } \
2274 template<unsigned int Precision> const campf<Precision> operator* (const campf<Precision>& op1, const signed type& op2) { return campf<Precision>(op2*op1.x, op2*op1.y); } \
2275 template<unsigned int Precision> const campf<Precision> operator* (const campf<Precision>& op1, const unsigned type& op2) { return campf<Precision>(op2*op1.x, op2*op1.y); } \
2276 template<unsigned int Precision> const campf<Precision> operator/ (const signed type& op1, const campf<Precision>& op2) { return campf<Precision>(ampf<Precision>(op1),ampf<Precision>(0))/op2; } \
2277 template<unsigned int Precision> const campf<Precision> operator/ (const unsigned type& op1, const campf<Precision>& op2) { return campf<Precision>(ampf<Precision>(op1),ampf<Precision>(0))/op2; } \
2278 template<unsigned int Precision> const campf<Precision> operator/ (const campf<Precision>& op1, const signed type& op2) { return campf<Precision>(op1.x/op2, op1.y/op2); } \
2279 template<unsigned int Precision> const campf<Precision> operator/ (const campf<Precision>& op1, const unsigned type& op2) { return campf<Precision>(op1.x/op2, op1.y/op2); } \
2280 template<unsigned int Precision> bool operator==(const signed type& op1, const campf<Precision>& op2) { return op1==op2.x && op2.y==0; } \
2281 template<unsigned int Precision> bool operator==(const unsigned type& op1, const campf<Precision>& op2) { return op1==op2.x && op2.y==0; } \
2282 template<unsigned int Precision> bool operator==(const campf<Precision>& op1, const signed type& op2) { return op1.x==op2 && op1.y==0; } \
2283 template<unsigned int Precision> bool operator==(const campf<Precision>& op1, const unsigned type& op2) { return op1.x==op2 && op1.y==0; } \
2284 template<unsigned int Precision> bool operator!=(const campf<Precision>& op1, const signed type& op2) { return op1.x!=op2 || op1.y!=0; } \
2285 template<unsigned int Precision> bool operator!=(const campf<Precision>& op1, const unsigned type& op2) { return op1.x!=op2 || op1.y!=0; } \
2286 template<unsigned int Precision> bool operator!=(const signed type& op1, const campf<Precision>& op2) { return op1!=op2.x || op2.y!=0; } \
2287 template<unsigned int Precision> bool operator!=(const unsigned type& op1, const campf<Precision>& op2) { return op1!=op2.x || op2.y!=0; }
2292 #undef __AMP_BINARY_OPI
2293 #define __AMP_BINARY_OPF(type) \
2294 template<unsigned int Precision> const campf<Precision> operator+ (const type& op1, const campf<Precision>& op2) { return campf<Precision>(op1+op2.x, op2.y); } \
2295 template<unsigned int Precision> const campf<Precision> operator+ (const campf<Precision>& op1, const type& op2) { return campf<Precision>(op1.x+op2, op1.y); } \
2296 template<unsigned int Precision> const campf<Precision> operator- (const type& op1, const campf<Precision>& op2) { return campf<Precision>(op1-op2.x, -op2.y); } \
2297 template<unsigned int Precision> const campf<Precision> operator- (const campf<Precision>& op1, const type& op2) { return campf<Precision>(op1.x-op2, op1.y); } \
2298 template<unsigned int Precision> const campf<Precision> operator* (const type& op1, const campf<Precision>& op2) { return campf<Precision>(op1*op2.x, op1*op2.y); } \
2299 template<unsigned int Precision> const campf<Precision> operator* (const campf<Precision>& op1, const type& op2) { return campf<Precision>(op2*op1.x, op2*op1.y); } \
2300 template<unsigned int Precision> const campf<Precision> operator/ (const type& op1, const campf<Precision>& op2) { return campf<Precision>(ampf<Precision>(op1),ampf<Precision>(0))/op2; } \
2301 template<unsigned int Precision> const campf<Precision> operator/ (const campf<Precision>& op1, const type& op2) { return campf<Precision>(op1.x/op2, op1.y/op2); } \
2302 template<unsigned int Precision> bool operator==(const type& op1, const campf<Precision>& op2) { return op1==op2.x && op2.y==0; } \
2303 template<unsigned int Precision> bool operator==(const campf<Precision>& op1, const type& op2) { return op1.x==op2 && op1.y==0; } \
2304 template<unsigned int Precision> bool operator!=(const type& op1, const campf<Precision>& op2) { return op1!=op2.x || op2.y!=0; } \
2305 template<unsigned int Precision> bool operator!=(const campf<Precision>& op1, const type& op2) { return op1.x!=op2 || op1.y!=0; }
2310 #undef __AMP_BINARY_OPF
2315 template<
unsigned int Precision>
2319 int i, cnt = v1.GetLength();
2320 const ampf<Precision> *p1 = v1.GetData();
2321 const ampf<Precision> *p2 = v2.GetData();
2322 mpfr_record *r =
NULL;
2323 mpfr_record *t =
NULL;
2328 mpfr_set_ui(r->value, 0, GMP_RNDN);
2329 for(
i=0;
i<cnt;
i++)
2331 mpfr_mul(t->value, p1->getReadPtr(), p2->getReadPtr(), GMP_RNDN);
2332 mpfr_add(r->value, r->value, t->value, GMP_RNDN);
2349 template<
unsigned int Precision>
2353 int i, cnt = vDst.GetLength();
2354 ampf<Precision> *pDst = vDst.GetData();
2355 const ampf<Precision> *pSrc = vSrc.GetData();
2358 for(
i=0;
i<cnt;
i++)
2361 pDst += vDst.GetStep();
2362 pSrc += vSrc.GetStep();
2366 template<
unsigned int Precision>
2370 int i, cnt = vDst.GetLength();
2371 ampf<Precision> *pDst = vDst.GetData();
2372 const ampf<Precision> *pSrc = vSrc.GetData();
2373 for(
i=0;
i<cnt;
i++)
2376 mpfr_ptr
v = pDst->getWritePtr();
2377 mpfr_neg(
v,
v, GMP_RNDN);
2378 pDst += vDst.GetStep();
2379 pSrc += vSrc.GetStep();
2383 template<
unsigned int Precision,
class T2>
2387 int i, cnt = vDst.GetLength();
2388 ampf<Precision> *pDst = vDst.GetData();
2389 const ampf<Precision> *pSrc = vSrc.GetData();
2390 ampf<Precision> a(
alpha);
2391 for(
i=0;
i<cnt;
i++)
2394 mpfr_ptr
v = pDst->getWritePtr();
2395 mpfr_mul(
v,
v, a.getReadPtr(), GMP_RNDN);
2396 pDst += vDst.GetStep();
2397 pSrc += vSrc.GetStep();
2401 template<
unsigned int Precision>
2405 int i, cnt = vDst.GetLength();
2406 ampf<Precision> *pDst = vDst.GetData();
2407 const ampf<Precision> *pSrc = vSrc.GetData();
2408 for(
i=0;
i<cnt;
i++)
2410 mpfr_ptr
v = pDst->getWritePtr();
2411 mpfr_srcptr vs = pSrc->getReadPtr();
2412 mpfr_add(
v,
v, vs, GMP_RNDN);
2413 pDst += vDst.GetStep();
2414 pSrc += vSrc.GetStep();
2418 template<
unsigned int Precision,
class T2>
2422 int i, cnt = vDst.GetLength();
2423 ampf<Precision> *pDst = vDst.GetData();
2424 const ampf<Precision> *pSrc = vSrc.GetData();
2425 ampf<Precision> a(
alpha), tmp;
2426 for(
i=0;
i<cnt;
i++)
2428 mpfr_ptr
v = pDst->getWritePtr();
2429 mpfr_srcptr vs = pSrc->getReadPtr();
2430 mpfr_mul(tmp.getWritePtr(), a.getReadPtr(), vs, GMP_RNDN);
2431 mpfr_add(
v,
v, tmp.getWritePtr(), GMP_RNDN);
2432 pDst += vDst.GetStep();
2433 pSrc += vSrc.GetStep();
2437 template<
unsigned int Precision>
2441 int i, cnt = vDst.GetLength();
2442 ampf<Precision> *pDst = vDst.GetData();
2443 const ampf<Precision> *pSrc = vSrc.GetData();
2444 for(
i=0;
i<cnt;
i++)
2446 mpfr_ptr
v = pDst->getWritePtr();
2447 mpfr_srcptr vs = pSrc->getReadPtr();
2448 mpfr_sub(
v,
v, vs, GMP_RNDN);
2449 pDst += vDst.GetStep();
2450 pSrc += vSrc.GetStep();
2454 template<
unsigned int Precision,
class T2>
2460 template<
unsigned int Precision,
class T2>
2463 int i, cnt = vDst.GetLength();
2464 ampf<Precision> *pDst = vDst.GetData();
2465 ampf<Precision> a(
alpha);
2466 for(
i=0;
i<cnt;
i++)
2468 mpfr_ptr
v = pDst->getWritePtr();
2469 mpfr_mul(
v, a.getReadPtr(),
v, GMP_RNDN);
2470 pDst += vDst.GetStep();
2517 template<
unsigned int Precision>
2521 template<
unsigned int Precision>
2530 template<
unsigned int Precision>
2581 template<
unsigned int Precision>
2611 mx = amp::maximum<Precision>(amp::abs<Precision>(
x(
j)), mx);
2618 xnorm = xnorm+amp::sqr<Precision>(
x(
j)/mx);
2620 xnorm = amp::sqrt<Precision>(xnorm)*mx;
2635 mx = amp::maximum<Precision>(amp::abs<Precision>(
alpha), amp::abs<Precision>(xnorm));
2636 beta = -mx*amp::sqrt<Precision>(amp::sqr<Precision>(
alpha/mx)+amp::sqr<Precision>(xnorm/mx));
2676 template<
unsigned int Precision>
2689 if(
tau==0 || n1>n2 || m1>m2 )
2697 for(
i=n1;
i<=n2;
i++)
2701 for(
i=m1;
i<=m2;
i++)
2704 ap::vadd(work.getvector(n1, n2), c.getrow(
i, n1, n2), t);
2710 for(
i=m1;
i<=m2;
i++)
2713 ap::vsub(c.getrow(
i, n1, n2), work.getvector(n1, n2), t);
2746 template<
unsigned int Precision>
2761 if(
tau==0 || n1>n2 || m1>m2 )
2770 for(
i=m1;
i<=m2;
i++)
2779 for(
i=m1;
i<=m2;
i++)
2782 ap::vsub(c.getrow(
i, n1, n2),
v.getvector(1, vm), t);
2829 template<
unsigned int Precision>
2835 template<
unsigned int Precision>
2842 template<
unsigned int Precision>
2852 template<
unsigned int Precision>
2859 template<
unsigned int Precision>
2869 template<
unsigned int Precision>
2876 template<
unsigned int Precision>
2882 template<
unsigned int Precision>
2889 template<
unsigned int Precision>
2899 template<
unsigned int Precision>
2906 template<
unsigned int Precision>
2916 template<
unsigned int Precision>
2975 template<
unsigned int Precision>
3005 tauq.setbounds(0, n-1);
3006 taup.setbounds(0, n-1);
3010 tauq.setbounds(0,
m-1);
3011 taup.setbounds(0,
m-1);
3019 for(
i=0;
i<=n-1;
i++)
3026 reflections::generatereflection<Precision>(t,
m-
i, ltau);
3034 reflections::applyreflectionfromtheleft<Precision>(a, ltau, t,
i,
m-1,
i+1, n-1, work);
3043 reflections::generatereflection<Precision>(t, n-1-
i, ltau);
3051 reflections::applyreflectionfromtheright<Precision>(a, ltau, t,
i+1,
m-1,
i+1, n-1, work);
3065 for(
i=0;
i<=
m-1;
i++)
3072 reflections::generatereflection<Precision>(t, n-
i, ltau);
3080 reflections::applyreflectionfromtheright<Precision>(a, ltau, t,
i+1,
m-1,
i, n-1, work);
3089 reflections::generatereflection<Precision>(t,
m-1-
i, ltau);
3097 reflections::applyreflectionfromtheleft<Precision>(a, ltau, t,
i+1,
m-1,
i+1, n-1, work);
3129 template<
unsigned int Precision>
3143 if(
m==0 || n==0 || qcolumns==0 )
3151 q.setbounds(0,
m-1, 0, qcolumns-1);
3152 for(
i=0;
i<=
m-1;
i++)
3154 for(
j=0;
j<=qcolumns-1;
j++)
3170 rmatrixbdmultiplybyq<Precision>(qp,
m, n, tauq, q,
m, qcolumns,
false,
false);
3203 template<
unsigned int Precision>
3223 if(
m<=0 || n<=0 || zrows<=0 || zcolumns<=0 )
3273 reflections::applyreflectionfromtheright<Precision>(z, tauq(
i),
v, 0, zrows-1,
i,
m-1, work);
3277 reflections::applyreflectionfromtheleft<Precision>(z, tauq(
i),
v,
i,
m-1, 0, zcolumns-1, work);
3281 while(
i!=i2+istep );
3321 reflections::applyreflectionfromtheright<Precision>(z, tauq(
i),
v, 0, zrows-1,
i+1,
m-1, work);
3325 reflections::applyreflectionfromtheleft<Precision>(z, tauq(
i),
v,
i+1,
m-1, 0, zcolumns-1, work);
3329 while(
i!=i2+istep );
3356 template<
unsigned int Precision>
3370 if(
m==0 || n==0 || ptrows==0 )
3378 pt.setbounds(0, ptrows-1, 0, n-1);
3379 for(
i=0;
i<=ptrows-1;
i++)
3381 for(
j=0;
j<=n-1;
j++)
3397 rmatrixbdmultiplybyp<Precision>(qp,
m, n, taup, pt, ptrows, n,
true,
true);
3430 template<
unsigned int Precision>
3450 if(
m<=0 || n<=0 || zrows<=0 || zcolumns<=0 )
3504 reflections::applyreflectionfromtheright<Precision>(z, taup(
i),
v, 0, zrows-1,
i+1, n-1, work);
3508 reflections::applyreflectionfromtheleft<Precision>(z, taup(
i),
v,
i+1, n-1, 0, zcolumns-1, work);
3512 while(
i!=i2+istep );
3551 reflections::applyreflectionfromtheright<Precision>(z, taup(
i),
v, 0, zrows-1,
i, n-1, work);
3555 reflections::applyreflectionfromtheleft<Precision>(z, taup(
i),
v,
i, n-1, 0, zcolumns-1, work);
3559 while(
i!=i2+istep );
3586 template<
unsigned int Precision>
3604 d.setbounds(0, n-1);
3605 e.setbounds(0, n-1);
3606 for(
i=0;
i<=n-2;
i++)
3611 d(n-1) =
b(n-1,n-1);
3615 d.setbounds(0,
m-1);
3616 e.setbounds(0,
m-1);
3617 for(
i=0;
i<=
m-2;
i++)
3622 d(
m-1) =
b(
m-1,
m-1);
3631 template<
unsigned int Precision>
3655 taup.setbounds(1, minmn);
3656 tauq.setbounds(1, minmn);
3671 reflections::generatereflection<Precision>(t, mmip1, ltau);
3679 reflections::applyreflectionfromtheleft<Precision>(a, ltau, t,
i,
m,
i+1, n, work);
3690 reflections::generatereflection<Precision>(t, nmi, ltau);
3698 reflections::applyreflectionfromtheright<Precision>(a, ltau, t,
i+1,
m,
i+1, n, work);
3720 reflections::generatereflection<Precision>(t, nmip1, ltau);
3728 reflections::applyreflectionfromtheright<Precision>(a, ltau, t,
i+1,
m,
i, n, work);
3739 reflections::generatereflection<Precision>(t, mmi, ltau);
3747 reflections::applyreflectionfromtheleft<Precision>(a, ltau, t,
i+1,
m,
i+1, n, work);
3762 template<
unsigned int Precision>
3779 if(
m==0 || n==0 || qcolumns==0 )
3787 q.setbounds(1,
m, 1, qcolumns);
3796 for(
j=1;
j<=qcolumns;
j++)
3815 reflections::applyreflectionfromtheleft<Precision>(q, tauq(
i),
v,
i,
m, 1, qcolumns, work);
3824 ap::vmove(
v.getvector(1, vm), qp.getcolumn(
i, ip1,
m));
3826 reflections::applyreflectionfromtheleft<Precision>(q, tauq(
i),
v,
i+1,
m, 1, qcolumns, work);
3836 template<
unsigned int Precision>
3858 if(
m<=0 || n<=0 || zrows<=0 || zcolumns<=0 )
3909 reflections::applyreflectionfromtheright<Precision>(z, tauq(
i),
v, 1, zrows,
i,
m, work);
3913 reflections::applyreflectionfromtheleft<Precision>(z, tauq(
i),
v,
i,
m, 1, zcolumns, work);
3917 while(
i!=i2+istep );
3955 ap::vmove(
v.getvector(1, vm), qp.getcolumn(
i, ip1,
m));
3959 reflections::applyreflectionfromtheright<Precision>(z, tauq(
i),
v, 1, zrows,
i+1,
m, work);
3963 reflections::applyreflectionfromtheleft<Precision>(z, tauq(
i),
v,
i+1,
m, 1, zcolumns, work);
3967 while(
i!=i2+istep );
3977 template<
unsigned int Precision>
3994 if(
m==0 || n==0 || ptrows==0 )
4002 pt.setbounds(1, ptrows, 1, n);
4009 for(
i=1;
i<=ptrows;
i++)
4029 ap::vmove(
v.getvector(1, vm), qp.getrow(
i, ip1, n));
4031 reflections::applyreflectionfromtheright<Precision>(pt, taup(
i),
v, 1, ptrows,
i+1, n, work);
4041 reflections::applyreflectionfromtheright<Precision>(pt, taup(
i),
v, 1, ptrows,
i, n, work);
4051 template<
unsigned int Precision>
4073 if(
m<=0 || n<=0 || zrows<=0 || zcolumns<=0 )
4125 ap::vmove(
v.getvector(1, vm), qp.getrow(
i, ip1, n));
4129 reflections::applyreflectionfromtheright<Precision>(z, taup(
i),
v, 1, zrows,
i+1, n, work);
4133 reflections::applyreflectionfromtheleft<Precision>(z, taup(
i),
v,
i+1, n, 1, zcolumns, work);
4137 while(
i!=i2+istep );
4177 reflections::applyreflectionfromtheright<Precision>(z, taup(
i),
v, 1, zrows,
i, n, work);
4181 reflections::applyreflectionfromtheleft<Precision>(z, taup(
i),
v,
i, n, 1, zcolumns, work);
4185 while(
i!=i2+istep );
4194 template<
unsigned int Precision>
4214 for(
i=1;
i<=n-1;
i++)
4225 for(
i=1;
i<=
m-1;
i++)
4277 template<
unsigned int Precision>
4282 template<
unsigned int Precision>
4289 template<
unsigned int Precision>
4294 template<
unsigned int Precision>
4299 template<
unsigned int Precision>
4306 template<
unsigned int Precision>
4352 template<
unsigned int Precision>
4373 tau.setbounds(0, minmn-1);
4379 for(
i=0;
i<=
k-1;
i++)
4386 reflections::generatereflection<Precision>(t,
m-
i, tmp);
4396 reflections::applyreflectionfromtheleft<Precision>(a,
tau(
i), t,
i,
m-1,
i+1, n-1, work);
4422 template<
unsigned int Precision>
4439 if(
m<=0 || n<=0 || qcolumns<=0 )
4449 q.setbounds(0,
m-1, 0, qcolumns-1);
4452 for(
i=0;
i<=
m-1;
i++)
4454 for(
j=0;
j<=qcolumns-1;
j++)
4470 for(
i=
k-1;
i>=0;
i--)
4478 reflections::applyreflectionfromtheleft<Precision>(q,
tau(
i),
v,
i,
m-1, 0, qcolumns-1, work);
4498 template<
unsigned int Precision>
4513 r.setbounds(0,
m-1, 0, n-1);
4514 for(
i=0;
i<=n-1;
i++)
4518 for(
i=1;
i<=
m-1;
i++)
4520 ap::vmove(r.getrow(
i, 0, n-1), r.getrow(0, 0, n-1));
4522 for(
i=0;
i<=
k-1;
i++)
4532 template<
unsigned int Precision>
4550 tau.setbounds(1, minmn);
4564 reflections::generatereflection<Precision>(t, mmip1, tmp);
4574 reflections::applyreflectionfromtheleft<Precision>(a,
tau(
i), t,
i,
m,
i+1, n, work);
4583 template<
unsigned int Precision>
4601 if(
m==0 || n==0 || qcolumns==0 )
4611 q.setbounds(1,
m, 1, qcolumns);
4616 for(
j=1;
j<=qcolumns;
j++)
4641 reflections::applyreflectionfromtheleft<Precision>(q,
tau(
i),
v,
i,
m, 1, qcolumns, work);
4649 template<
unsigned int Precision>
4670 q.setbounds(1,
m, 1,
m);
4671 r.setbounds(1,
m, 1, n);
4676 qrdecomposition<Precision>(a,
m, n,
tau);
4687 ap::vmove(r.getrow(
i, 1, n), r.getrow(1, 1, n));
4697 unpackqfromqr<Precision>(a,
m, n,
tau,
m, q);
4737 template<
unsigned int Precision>
4742 template<
unsigned int Precision>
4749 template<
unsigned int Precision>
4754 template<
unsigned int Precision>
4759 template<
unsigned int Precision>
4766 template<
unsigned int Precision>
4808 template<
unsigned int Precision>
4827 tau.setbounds(0, minmn-1);
4829 for(
i=0;
i<=
k-1;
i++)
4836 reflections::generatereflection<Precision>(t, n-
i, tmp);
4846 reflections::applyreflectionfromtheright<Precision>(a,
tau(
i), t,
i+1,
m-1,
i, n-1, work);
4872 template<
unsigned int Precision>
4889 if(
m<=0 || n<=0 || qrows<=0 )
4899 q.setbounds(0, qrows-1, 0, n-1);
4902 for(
i=0;
i<=qrows-1;
i++)
4904 for(
j=0;
j<=n-1;
j++)
4920 for(
i=
k-1;
i>=0;
i--)
4928 reflections::applyreflectionfromtheright<Precision>(q,
tau(
i),
v, 0, qrows-1,
i, n-1, work);
4948 template<
unsigned int Precision>
4962 l.setbounds(0,
m-1, 0, n-1);
4963 for(
i=0;
i<=n-1;
i++)
4967 for(
i=1;
i<=
m-1;
i++)
4971 for(
i=0;
i<=
m-1;
i++)
4983 template<
unsigned int Precision>
5003 tau.setbounds(1, minmn);
5017 reflections::generatereflection<Precision>(t, nmip1, tmp);
5027 reflections::applyreflectionfromtheright<Precision>(a,
tau(
i), t,
i+1,
m,
i, n, work);
5037 template<
unsigned int Precision>
5055 if(
m==0 || n==0 || qrows==0 )
5065 q.setbounds(1, qrows, 1, n);
5068 for(
i=1;
i<=qrows;
i++)
5095 reflections::applyreflectionfromtheright<Precision>(q,
tau(
i),
v, 1, qrows,
i, n, work);
5103 template<
unsigned int Precision>
5119 q.setbounds(1, n, 1, n);
5120 l.setbounds(1,
m, 1, n);
5125 lqdecomposition<Precision>(a,
m, n,
tau);
5148 unpackqfromlq<Precision>(a,
m, n,
tau, n, q);
5188 template<
unsigned int Precision>
5192 template<
unsigned int Precision>
5196 template<
unsigned int Precision>
5201 template<
unsigned int Precision>
5206 template<
unsigned int Precision>
5213 template<
unsigned int Precision>
5224 template<
unsigned int Precision>
5231 template<
unsigned int Precision>
5242 template<
unsigned int Precision>
5257 template<
unsigned int Precision>
5260 template<
unsigned int Precision>
5283 template<
unsigned int Precision>
5304 result = amp::abs<Precision>(
x(i1));
5309 for(ix=i1; ix<=i2; ix++)
5313 absxi = amp::abs<Precision>(
x(ix));
5316 ssq = 1+ssq*amp::sqr<Precision>(scl/absxi);
5321 ssq = ssq+amp::sqr<Precision>(absxi/scl);
5325 result = scl*amp::sqrt<Precision>(ssq);
5330 template<
unsigned int Precision>
5341 a = amp::abs<Precision>(
x(
result));
5342 for(
i=i1+1;
i<=i2;
i++)
5344 if( amp::abs<Precision>(
x(
i))>amp::abs<Precision>(
x(
result)) )
5353 template<
unsigned int Precision>
5365 a = amp::abs<Precision>(
x(
result,
j));
5366 for(
i=i1+1;
i<=i2;
i++)
5368 if( amp::abs<Precision>(
x(
i,
j))>amp::abs<Precision>(
x(
result,
j)) )
5377 template<
unsigned int Precision>
5389 a = amp::abs<Precision>(
x(
i,
result));
5390 for(
j=j1+1;
j<=j2;
j++)
5392 if( amp::abs<Precision>(
x(
i,
j))>amp::abs<Precision>(
x(
i,
result)) )
5401 template<
unsigned int Precision>
5415 for(
j=j1;
j<=j2;
j++)
5419 for(
i=i1;
i<=i2;
i++)
5423 work(
j) = work(
j)+amp::abs<Precision>(a(
i,
j));
5427 for(
j=j1;
j<=j2;
j++)
5435 template<
unsigned int Precision>
5451 if( is1>is2 || js1>js2 )
5457 for(isrc=is1; isrc<=is2; isrc++)
5459 idst = isrc-is1+id1;
5460 ap::vmove(
b.getrow(idst, jd1, jd2), a.getrow(isrc, js1, js2));
5465 template<
unsigned int Precision>
5480 if( i1>i2 || j1>j2 )
5485 for(
i=i1;
i<=i2-1;
i++)
5492 ap::vmove(a.getcolumn(
j, ips, i2), a.getrow(
i, jps, j2));
5498 template<
unsigned int Precision>
5514 if( is1>is2 || js1>js2 )
5520 for(isrc=is1; isrc<=is2; isrc++)
5522 jdst = isrc-is1+jd1;
5523 ap::vmove(
b.getcolumn(jdst, id1, id2), a.getrow(isrc, js1, js2));
5528 template<
unsigned int Precision>
5554 if( i1>i2 || j1>j2 )
5566 for(
i=iy1;
i<=iy2;
i++)
5579 for(
i=i1;
i<=i2;
i++)
5591 if( i1>i2 || j1>j2 )
5603 for(
i=iy1;
i<=iy2;
i++)
5616 for(
i=i1;
i<=i2;
i++)
5619 ap::vadd(
y.getvector(iy1, iy2), a.getrow(
i, j1, j2),
v);
5625 template<
unsigned int Precision>
5636 xabs = amp::abs<Precision>(
x);
5637 yabs = amp::abs<Precision>(
y);
5638 w = amp::maximum<Precision>(xabs, yabs);
5639 z = amp::minimum<Precision>(xabs, yabs);
5646 result =
w*amp::sqrt<Precision>(1+amp::sqr<Precision>(z/
w));
5652 template<
unsigned int Precision>
5713 if( arows<=0 || acols<=0 || brows<=0 || bcols<=0 )
5734 for(
i=ci1;
i<=ci2;
i++)
5736 for(
j=cj1;
j<=cj2;
j++)
5744 for(
i=ci1;
i<=ci2;
i++)
5753 if( !transa && !transb )
5755 for(
l=ai1;
l<=ai2;
l++)
5757 for(r=bi1; r<=bi2; r++)
5761 ap::vadd(c.getrow(
k, cj1, cj2),
b.getrow(r, bj1, bj2),
v);
5770 if( !transa && transb )
5772 if( arows*acols<brows*bcols )
5774 for(r=bi1; r<=bi2; r++)
5776 for(
l=ai1;
l<=ai2;
l++)
5779 c(ci1+
l-ai1,cj1+r-bi1) = c(ci1+
l-ai1,cj1+r-bi1)+
alpha*
v;
5786 for(
l=ai1;
l<=ai2;
l++)
5788 for(r=bi1; r<=bi2; r++)
5791 c(ci1+
l-ai1,cj1+r-bi1) = c(ci1+
l-ai1,cj1+r-bi1)+
alpha*
v;
5801 if( transa && !transb )
5803 for(
l=aj1;
l<=aj2;
l++)
5805 for(r=bi1; r<=bi2; r++)
5809 ap::vadd(c.getrow(
k, cj1, cj2),
b.getrow(r, bj1, bj2),
v);
5818 if( transa && transb )
5820 if( arows*acols<brows*bcols )
5822 for(r=bi1; r<=bi2; r++)
5824 for(
i=1;
i<=crows;
i++)
5828 for(
l=ai1;
l<=ai2;
l++)
5840 for(
l=aj1;
l<=aj2;
l++)
5844 for(r=bi1; r<=bi2; r++)
5847 c(ci1+
l-aj1,cj1+r-bi1) = c(ci1+
l-aj1,cj1+r-bi1)+
alpha*
v;
5898 template<
unsigned int Precision>
5908 template<
unsigned int Precision>
5918 template<
unsigned int Precision>
5951 template<
unsigned int Precision>
5969 if( m1>m2 || n1>n2 )
5985 for(
j=m1;
j<=m2-1;
j++)
5989 if( ctemp!=1 || stemp!=0 )
5995 ap::vadd(a.getrow(
j, n1, n2), a.getrow(jp1, n1, n2), stemp);
6006 for(
j=m1;
j<=m2-1;
j++)
6010 if( ctemp!=1 || stemp!=0 )
6013 a(
j+1,n1) = ctemp*temp-stemp*a(
j,n1);
6014 a(
j,n1) = stemp*temp+ctemp*a(
j,n1);
6027 for(
j=m2-1;
j>=m1;
j--)
6031 if( ctemp!=1 || stemp!=0 )
6037 ap::vadd(a.getrow(
j, n1, n2), a.getrow(jp1, n1, n2), stemp);
6048 for(
j=m2-1;
j>=m1;
j--)
6052 if( ctemp!=1 || stemp!=0 )
6055 a(
j+1,n1) = ctemp*temp-stemp*a(
j,n1);
6056 a(
j,n1) = stemp*temp+ctemp*a(
j,n1);
6089 template<
unsigned int Precision>
6119 for(
j=n1;
j<=n2-1;
j++)
6123 if( ctemp!=1 || stemp!=0 )
6128 ap::vmul(a.getcolumn(
j, m1, m2), ctemp);
6129 ap::vadd(a.getcolumn(
j, m1, m2), a.getcolumn(jp1, m1, m2), stemp);
6140 for(
j=n1;
j<=n2-1;
j++)
6144 if( ctemp!=1 || stemp!=0 )
6147 a(m1,
j+1) = ctemp*temp-stemp*a(m1,
j);
6148 a(m1,
j) = stemp*temp+ctemp*a(m1,
j);
6161 for(
j=n2-1;
j>=n1;
j--)
6165 if( ctemp!=1 || stemp!=0 )
6170 ap::vmul(a.getcolumn(
j, m1, m2), ctemp);
6171 ap::vadd(a.getcolumn(
j, m1, m2), a.getcolumn(jp1, m1, m2), stemp);
6182 for(
j=n2-1;
j>=n1;
j--)
6186 if( ctemp!=1 || stemp!=0 )
6189 a(m1,
j+1) = ctemp*temp-stemp*a(m1,
j);
6190 a(m1,
j) = stemp*temp+ctemp*a(m1,
j);
6206 template<
unsigned int Precision>
6235 r = amp::sqrt<Precision>(amp::sqr<Precision>(f1)+amp::sqr<Precision>(g1));
6238 if( amp::abs<Precision>(
f)>amp::abs<Precision>(
g) && cs<0 )
6291 template<
unsigned int Precision>
6296 bool isfractionalaccuracyrequired,
6303 template<
unsigned int Precision>
6308 bool isfractionalaccuracyrequired,
6315 template<
unsigned int Precision>
6320 bool isfractionalaccuracyrequired,
6330 template<
unsigned int Precision>
6333 template<
unsigned int Precision>
6339 template<
unsigned int Precision>
6429 template<
unsigned int Precision>
6434 bool isfractionalaccuracyrequired,
6454 result = bidiagonalsvddecompositioninternal<Precision>(d1, e1, n, isupper, isfractionalaccuracyrequired, u, 0, nru, c, 0, ncc, vt, 0, ncvt);
6472 template<
unsigned int Precision>
6477 bool isfractionalaccuracyrequired,
6488 result = bidiagonalsvddecompositioninternal<Precision>(d, e, n, isupper, isfractionalaccuracyrequired, u, 1, nru, c, 1, ncc, vt, 1, ncvt);
6496 template<
unsigned int Precision>
6501 bool isfractionalaccuracyrequired,
6558 bool matrixsplitflag;
6586 ap::vmul(vt.getrow(vstart, vstart, vstart+ncvt-1), -1);
6612 for(
i=1;
i<=n-1;
i++)
6617 for(
i=1;
i<=n-1;
i++)
6636 for(
i=1;
i<=n-1;
i++)
6638 rotations::generaterotation<Precision>(d(
i), e(
i), cs, sn, r);
6651 rotations::applyrotationsfromtheright<Precision>(fwddir, ustart, uend, 1+ustart-1, n+ustart-1, work0, work1, u, utemp);
6655 rotations::applyrotationsfromtheleft<Precision>(fwddir, 1+cstart-1, n+cstart-1, cstart, cend, work0, work1, c, ctemp);
6664 tolmul = amp::maximum<Precision>(10, amp::minimum<Precision>(100, amp::pow<Precision>(eps, -
amp::ampf<Precision>(
"0.125"))));
6666 if( !isfractionalaccuracyrequired )
6677 smax = amp::maximum<Precision>(smax, amp::abs<Precision>(d(
i)));
6679 for(
i=1;
i<=n-1;
i++)
6681 smax = amp::maximum<Precision>(smax, amp::abs<Precision>(e(
i)));
6690 sminoa = amp::abs<Precision>(d(1));
6696 mu = amp::abs<Precision>(d(
i))*(
mu/(
mu+amp::abs<Precision>(e(
i-1))));
6697 sminoa = amp::minimum<Precision>(sminoa,
mu);
6704 sminoa = sminoa/amp::sqrt<Precision>(n);
6705 thresh = amp::maximum<Precision>(tol*sminoa, maxitr*n*n*unfl);
6713 thresh = amp::maximum<Precision>(amp::abs<Precision>(tol)*smax, maxitr*n*n*unfl);
6753 if( tol<0 && amp::abs<Precision>(d(
m))<=thresh )
6757 smax = amp::abs<Precision>(d(
m));
6759 matrixsplitflag =
false;
6760 for(lll=1; lll<=
m-1; lll++)
6763 abss = amp::abs<Precision>(d(ll));
6764 abse = amp::abs<Precision>(e(ll));
6765 if( tol<0 && abss<=thresh )
6771 matrixsplitflag =
true;
6774 smin = amp::minimum<Precision>(smin, abss);
6775 smax = amp::maximum<Precision>(smax, amp::maximum<Precision>(abss, abse));
6777 if( !matrixsplitflag )
6809 svdv2x2<Precision>(d(
m-1), e(
m-1), d(
m), sigmn, sigmx, sinr, cosr, sinl, cosl);
6820 mm1 =
m-1+(vstart-1);
6823 ap::vmul(vt.getrow(mm0, vstart, vend), cosr);
6824 ap::vsub(vt.getrow(mm0, vstart, vend), vt.getrow(mm1, vstart, vend), sinr);
6833 ap::vmul(u.getcolumn(mm0, ustart, uend), cosl);
6834 ap::vsub(u.getcolumn(mm0, ustart, uend), u.getcolumn(mm1, ustart, uend), sinl);
6843 ap::vmul(c.getrow(mm0, cstart, cend), cosl);
6844 ap::vsub(c.getrow(mm0, cstart, cend), c.getrow(mm1, cstart, cend), sinl);
6861 if( idir==1 && amp::abs<Precision>(d(ll))<
amp::ampf<Precision>(
"1.0E-3")*amp::abs<Precision>(d(
m)) )
6865 if( idir==2 && amp::abs<Precision>(d(
m))<
amp::ampf<Precision>(
"1.0E-3")*amp::abs<Precision>(d(ll)) )
6869 if( ll!=oldll ||
m!=oldm || bchangedir )
6871 if( amp::abs<Precision>(d(ll))>=amp::abs<Precision>(d(
m)) )
6899 if( amp::abs<Precision>(e(
m-1))<=amp::abs<Precision>(tol)*amp::abs<Precision>(d(
m)) || tol<0 && amp::abs<Precision>(e(
m-1))<=thresh )
6911 mu = amp::abs<Precision>(d(ll));
6914 for(lll=ll; lll<=
m-1; lll++)
6916 if( amp::abs<Precision>(e(lll))<=tol*
mu )
6923 mu = amp::abs<Precision>(d(lll+1))*(
mu/(
mu+amp::abs<Precision>(e(lll))));
6924 sminl = amp::minimum<Precision>(sminl,
mu);
6939 if( amp::abs<Precision>(e(ll))<=amp::abs<Precision>(tol)*amp::abs<Precision>(d(ll)) || tol<0 && amp::abs<Precision>(e(ll))<=thresh )
6951 mu = amp::abs<Precision>(d(
m));
6954 for(lll=
m-1; lll>=ll; lll--)
6956 if( amp::abs<Precision>(e(lll))<=tol*
mu )
6963 mu = amp::abs<Precision>(d(lll))*(
mu/(
mu+amp::abs<Precision>(e(lll))));
6964 sminl = amp::minimum<Precision>(sminl,
mu);
6979 if( tol>=0 && n*tol*(sminl/smax)<=amp::maximum<Precision>(eps,
amp::ampf<Precision>(
"0.01")*tol) )
6995 sll = amp::abs<Precision>(d(ll));
6996 svd2x2<Precision>(d(
m-1), e(
m-1), d(
m), shift, r);
7000 sll = amp::abs<Precision>(d(
m));
7001 svd2x2<Precision>(d(ll), e(ll), d(ll+1), shift, r);
7009 if( amp::sqr<Precision>(shift/sll)<eps )
7035 for(
i=ll;
i<=
m-1;
i++)
7037 rotations::generaterotation<Precision>(d(
i)*cs, e(
i), cs, sn, r);
7042 rotations::generaterotation<Precision>(oldcs*r, d(
i+1)*sn, oldcs, oldsn, tmp);
7046 work2(
i-ll+1) = oldcs;
7047 work3(
i-ll+1) = oldsn;
7058 rotations::applyrotationsfromtheleft<Precision>(fwddir, ll+vstart-1,
m+vstart-1, vstart, vend, work0, work1, vt, vttemp);
7062 rotations::applyrotationsfromtheright<Precision>(fwddir, ustart, uend, ll+ustart-1,
m+ustart-1, work2, work3, u, utemp);
7066 rotations::applyrotationsfromtheleft<Precision>(fwddir, ll+cstart-1,
m+cstart-1, cstart, cend, work2, work3, c, ctemp);
7072 if( amp::abs<Precision>(e(
m-1))<=thresh )
7086 for(
i=
m;
i>=ll+1;
i--)
7088 rotations::generaterotation<Precision>(d(
i)*cs, e(
i-1), cs, sn, r);
7093 rotations::generaterotation<Precision>(oldcs*r, d(
i-1)*sn, oldcs, oldsn, tmp);
7097 work2(
i-ll) = oldcs;
7098 work3(
i-ll) = -oldsn;
7109 rotations::applyrotationsfromtheleft<Precision>(!fwddir, ll+vstart-1,
m+vstart-1, vstart, vend, work2, work3, vt, vttemp);
7113 rotations::applyrotationsfromtheright<Precision>(!fwddir, ustart, uend, ll+ustart-1,
m+ustart-1, work0, work1, u, utemp);
7117 rotations::applyrotationsfromtheleft<Precision>(!fwddir, ll+cstart-1,
m+cstart-1, cstart, cend, work0, work1, c, ctemp);
7123 if( amp::abs<Precision>(e(ll))<=thresh )
7142 f = (amp::abs<Precision>(d(ll))-shift)*(extsignbdsqr<Precision>(1, d(ll))+shift/d(ll));
7144 for(
i=ll;
i<=
m-1;
i++)
7146 rotations::generaterotation<Precision>(
f,
g, cosr, sinr, r);
7151 f = cosr*d(
i)+sinr*e(
i);
7152 e(
i) = cosr*e(
i)-sinr*d(
i);
7154 d(
i+1) = cosr*d(
i+1);
7155 rotations::generaterotation<Precision>(
f,
g, cosl, sinl, r);
7157 f = cosl*e(
i)+sinl*d(
i+1);
7158 d(
i+1) = cosl*d(
i+1)-sinl*e(
i);
7162 e(
i+1) = cosl*e(
i+1);
7164 work0(
i-ll+1) = cosr;
7165 work1(
i-ll+1) = sinr;
7166 work2(
i-ll+1) = cosl;
7167 work3(
i-ll+1) = sinl;
7176 rotations::applyrotationsfromtheleft<Precision>(fwddir, ll+vstart-1,
m+vstart-1, vstart, vend, work0, work1, vt, vttemp);
7180 rotations::applyrotationsfromtheright<Precision>(fwddir, ustart, uend, ll+ustart-1,
m+ustart-1, work2, work3, u, utemp);
7184 rotations::applyrotationsfromtheleft<Precision>(fwddir, ll+cstart-1,
m+cstart-1, cstart, cend, work2, work3, c, ctemp);
7190 if( amp::abs<Precision>(e(
m-1))<=thresh )
7202 f = (amp::abs<Precision>(d(
m))-shift)*(extsignbdsqr<Precision>(1, d(
m))+shift/d(
m));
7204 for(
i=
m;
i>=ll+1;
i--)
7206 rotations::generaterotation<Precision>(
f,
g, cosr, sinr, r);
7211 f = cosr*d(
i)+sinr*e(
i-1);
7212 e(
i-1) = cosr*e(
i-1)-sinr*d(
i);
7214 d(
i-1) = cosr*d(
i-1);
7215 rotations::generaterotation<Precision>(
f,
g, cosl, sinl, r);
7217 f = cosl*e(
i-1)+sinl*d(
i-1);
7218 d(
i-1) = cosl*d(
i-1)-sinl*e(
i-1);
7222 e(
i-2) = cosl*e(
i-2);
7225 work1(
i-ll) = -sinr;
7227 work3(
i-ll) = -sinl;
7234 if( amp::abs<Precision>(e(ll))<=thresh )
7244 rotations::applyrotationsfromtheleft<Precision>(!fwddir, ll+vstart-1,
m+vstart-1, vstart, vend, work2, work3, vt, vttemp);
7248 rotations::applyrotationsfromtheright<Precision>(!fwddir, ustart, uend, ll+ustart-1,
m+ustart-1, work0, work1, u, utemp);
7252 rotations::applyrotationsfromtheleft<Precision>(!fwddir, ll+cstart-1,
m+cstart-1, cstart, cend, work0, work1, c, ctemp);
7277 ap::vmul(vt.getrow(
i+vstart-1, vstart, vend), -1);
7286 for(
i=1;
i<=n-1;
i++)
7294 for(
j=2;
j<=n+1-
i;
j++)
7314 ap::vmove(vt.getrow(isub+vstart-1, vstart, vend), vt.getrow(
j+vstart-1, vstart, vend));
7321 ap::vmove(u.getcolumn(isub+ustart-1, ustart, uend), u.getcolumn(
j+ustart-1, ustart, uend));
7328 ap::vmove(c.getrow(isub+cstart-1, cstart, cend), c.getrow(
j+cstart-1, cstart, cend));
7337 template<
unsigned int Precision>
7346 result = amp::abs<Precision>(a);
7350 result = -amp::abs<Precision>(a);
7356 template<
unsigned int Precision>
7374 fa = amp::abs<Precision>(
f);
7375 ga = amp::abs<Precision>(
g);
7376 ha = amp::abs<Precision>(
h);
7377 fhmn = amp::minimum<Precision>(
fa, ha);
7378 fhmx = amp::maximum<Precision>(
fa, ha);
7388 ssmax = amp::maximum<Precision>(fhmx, ga)*amp::sqrt<Precision>(1+amp::sqr<Precision>(amp::minimum<Precision>(fhmx, ga)/amp::maximum<Precision>(fhmx, ga)));
7396 at = (fhmx-fhmn)/fhmx;
7397 au = amp::sqr<Precision>(ga/fhmx);
7398 c = 2/(amp::sqrt<Precision>(aas*aas+au)+amp::sqrt<Precision>(at*at+au));
7413 ssmin = fhmn*fhmx/ga;
7419 at = (fhmx-fhmn)/fhmx;
7420 c = 1/(amp::sqrt<Precision>(1+amp::sqr<Precision>(aas*au))+amp::sqrt<Precision>(1+amp::sqr<Precision>(at*au)));
7422 ssmin = ssmin+ssmin;
7430 template<
unsigned int Precision>
7469 fa = amp::abs<Precision>(ft);
7471 ha = amp::abs<Precision>(
h);
7496 ga = amp::abs<Precision>(gt);
7559 s = amp::sqrt<Precision>(tt+mm);
7562 r = amp::abs<Precision>(
m);
7566 r = amp::sqrt<Precision>(
l*
l+mm);
7579 t = extsignbdsqr<Precision>(2, ft)*extsignbdsqr<Precision>(1, gt);
7583 t = gt/extsignbdsqr<Precision>(d, ft)+
m/t;
7588 t = (
m/(
s+t)+
m/(r+
l))*(1+a);
7590 l = amp::sqrt<Precision>(t*t+4);
7593 clt = (crt+srt*
m)/a;
7618 tsign = extsignbdsqr<Precision>(1, csr)*extsignbdsqr<Precision>(1, csl)*extsignbdsqr<Precision>(1,
f);
7622 tsign = extsignbdsqr<Precision>(1, snr)*extsignbdsqr<Precision>(1, csl)*extsignbdsqr<Precision>(1,
g);
7626 tsign = extsignbdsqr<Precision>(1, snr)*extsignbdsqr<Precision>(1, snl)*extsignbdsqr<Precision>(1,
h);
7628 ssmax = extsignbdsqr<Precision>(ssmax, tsign);
7629 ssmin = extsignbdsqr<Precision>(ssmin, tsign*extsignbdsqr<Precision>(1,
f)*extsignbdsqr<Precision>(1,
h));
7687 template<
unsigned int Precision>
7693 int additionalmemory,
7697 template<
unsigned int Precision>
7703 int additionalmemory,
7759 template<
unsigned int Precision>
7765 int additionalmemory,
7802 w.setbounds(1, minmn);
7809 u.setbounds(0, nru-1, 0, ncu-1);
7815 u.setbounds(0, nru-1, 0, ncu-1);
7823 vt.setbounds(0, nrvt-1, 0, ncvt-1);
7829 vt.setbounds(0, nrvt-1, 0, ncvt-1);
7844 qr::rmatrixqr<Precision>(a,
m, n,
tau);
7845 for(
i=0;
i<=n-1;
i++)
7847 for(
j=0;
j<=
i-1;
j++)
7852 bidiagonal::rmatrixbd<Precision>(a, n, n, tauq, taup);
7853 bidiagonal::rmatrixbdunpackpt<Precision>(a, n, n, taup, nrvt, vt);
7854 bidiagonal::rmatrixbdunpackdiagonals<Precision>(a, n, n, isupper,
w, e);
7855 result = bdsvd::rmatrixbdsvd<Precision>(
w, e, n, isupper,
false, u, 0, a, 0, vt, ncvt);
7864 qr::rmatrixqr<Precision>(a,
m, n,
tau);
7865 qr::rmatrixqrunpackq<Precision>(a,
m, n,
tau, ncu, u);
7866 for(
i=0;
i<=n-1;
i++)
7868 for(
j=0;
j<=
i-1;
j++)
7873 bidiagonal::rmatrixbd<Precision>(a, n, n, tauq, taup);
7874 bidiagonal::rmatrixbdunpackpt<Precision>(a, n, n, taup, nrvt, vt);
7875 bidiagonal::rmatrixbdunpackdiagonals<Precision>(a, n, n, isupper,
w, e);
7876 if( additionalmemory<1 )
7882 bidiagonal::rmatrixbdmultiplybyq<Precision>(a, n, n, tauq, u,
m, n,
true,
false);
7883 result = bdsvd::rmatrixbdsvd<Precision>(
w, e, n, isupper,
false, u,
m, a, 0, vt, ncvt);
7892 bidiagonal::rmatrixbdunpackq<Precision>(a, n, n, tauq, n, t2);
7893 blas::copymatrix<Precision>(u, 0,
m-1, 0, n-1, a, 0,
m-1, 0, n-1);
7894 blas::inplacetranspose<Precision>(t2, 0, n-1, 0, n-1, work);
7895 result = bdsvd::rmatrixbdsvd<Precision>(
w, e, n, isupper,
false, u, 0, t2, n, vt, ncvt);
7896 blas::matrixmatrixmultiply<Precision>(a, 0,
m-1, 0, n-1,
false, t2, 0, n-1, 0, n-1,
true,
amp::ampf<Precision>(
"1.0"), u, 0,
m-1, 0, n-1,
amp::ampf<Precision>(
"0.0"), work);
7914 lq::rmatrixlq<Precision>(a,
m, n,
tau);
7915 for(
i=0;
i<=
m-1;
i++)
7917 for(
j=
i+1;
j<=
m-1;
j++)
7922 bidiagonal::rmatrixbd<Precision>(a,
m,
m, tauq, taup);
7923 bidiagonal::rmatrixbdunpackq<Precision>(a,
m,
m, tauq, ncu, u);
7924 bidiagonal::rmatrixbdunpackdiagonals<Precision>(a,
m,
m, isupper,
w, e);
7926 blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7927 result = bdsvd::rmatrixbdsvd<Precision>(
w, e,
m, isupper,
false, a, 0, u, nru, vt, 0);
7928 blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7937 lq::rmatrixlq<Precision>(a,
m, n,
tau);
7938 lq::rmatrixlqunpackq<Precision>(a,
m, n,
tau, nrvt, vt);
7939 for(
i=0;
i<=
m-1;
i++)
7941 for(
j=
i+1;
j<=
m-1;
j++)
7946 bidiagonal::rmatrixbd<Precision>(a,
m,
m, tauq, taup);
7947 bidiagonal::rmatrixbdunpackq<Precision>(a,
m,
m, tauq, ncu, u);
7948 bidiagonal::rmatrixbdunpackdiagonals<Precision>(a,
m,
m, isupper,
w, e);
7950 blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7951 if( additionalmemory<1 )
7957 bidiagonal::rmatrixbdmultiplybyp<Precision>(a,
m,
m, taup, vt,
m, n,
false,
true);
7958 result = bdsvd::rmatrixbdsvd<Precision>(
w, e,
m, isupper,
false, a, 0, u, nru, vt, n);
7966 bidiagonal::rmatrixbdunpackpt<Precision>(a,
m,
m, taup,
m, t2);
7967 result = bdsvd::rmatrixbdsvd<Precision>(
w, e,
m, isupper,
false, a, 0, u, nru, t2,
m);
7968 blas::copymatrix<Precision>(vt, 0,
m-1, 0, n-1, a, 0,
m-1, 0, n-1);
7969 blas::matrixmatrixmultiply<Precision>(t2, 0,
m-1, 0,
m-1,
false, a, 0,
m-1, 0, n-1,
false,
amp::ampf<Precision>(
"1.0"), vt, 0,
m-1, 0, n-1,
amp::ampf<Precision>(
"0.0"), work);
7971 blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7982 bidiagonal::rmatrixbd<Precision>(a,
m, n, tauq, taup);
7983 bidiagonal::rmatrixbdunpackq<Precision>(a,
m, n, tauq, ncu, u);
7984 bidiagonal::rmatrixbdunpackpt<Precision>(a,
m, n, taup, nrvt, vt);
7985 bidiagonal::rmatrixbdunpackdiagonals<Precision>(a,
m, n, isupper,
w, e);
7987 blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7988 result = bdsvd::rmatrixbdsvd<Precision>(
w, e, minmn, isupper,
false, a, 0, u, nru, vt, ncvt);
7989 blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7996 bidiagonal::rmatrixbd<Precision>(a,
m, n, tauq, taup);
7997 bidiagonal::rmatrixbdunpackq<Precision>(a,
m, n, tauq, ncu, u);
7998 bidiagonal::rmatrixbdunpackpt<Precision>(a,
m, n, taup, nrvt, vt);
7999 bidiagonal::rmatrixbdunpackdiagonals<Precision>(a,
m, n, isupper,
w, e);
8000 if( additionalmemory<2 || uneeded==0 )
8006 result = bdsvd::rmatrixbdsvd<Precision>(
w, e, minmn, isupper,
false, u, nru, a, 0, vt, ncvt);
8015 blas::copyandtranspose<Precision>(u, 0,
m-1, 0, minmn-1, t2, 0, minmn-1, 0,
m-1);
8016 result = bdsvd::rmatrixbdsvd<Precision>(
w, e, minmn, isupper,
false, u, 0, t2,
m, vt, ncvt);
8017 blas::copyandtranspose<Precision>(t2, 0, minmn-1, 0,
m-1, u, 0,
m-1, 0, minmn-1);
8027 template<
unsigned int Precision>
8033 int additionalmemory,
8070 w.setbounds(1, minmn);
8077 u.setbounds(1, nru, 1, ncu);
8083 u.setbounds(1, nru, 1, ncu);
8091 vt.setbounds(1, nrvt, 1, ncvt);
8097 vt.setbounds(1, nrvt, 1, ncvt);
8112 qr::qrdecomposition<Precision>(a,
m, n,
tau);
8115 for(
j=1;
j<=
i-1;
j++)
8120 bidiagonal::tobidiagonal<Precision>(a, n, n, tauq, taup);
8121 bidiagonal::unpackptfrombidiagonal<Precision>(a, n, n, taup, nrvt, vt);
8122 bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a, n, n, isupper,
w, e);
8123 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e, n, isupper,
false, u, 0, a, 0, vt, ncvt);
8132 qr::qrdecomposition<Precision>(a,
m, n,
tau);
8133 qr::unpackqfromqr<Precision>(a,
m, n,
tau, ncu, u);
8136 for(
j=1;
j<=
i-1;
j++)
8141 bidiagonal::tobidiagonal<Precision>(a, n, n, tauq, taup);
8142 bidiagonal::unpackptfrombidiagonal<Precision>(a, n, n, taup, nrvt, vt);
8143 bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a, n, n, isupper,
w, e);
8144 if( additionalmemory<1 )
8150 bidiagonal::multiplybyqfrombidiagonal<Precision>(a, n, n, tauq, u,
m, n,
true,
false);
8151 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e, n, isupper,
false, u,
m, a, 0, vt, ncvt);
8160 bidiagonal::unpackqfrombidiagonal<Precision>(a, n, n, tauq, n, t2);
8161 blas::copymatrix<Precision>(u, 1,
m, 1, n, a, 1,
m, 1, n);
8162 blas::inplacetranspose<Precision>(t2, 1, n, 1, n, work);
8163 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e, n, isupper,
false, u, 0, t2, n, vt, ncvt);
8164 blas::matrixmatrixmultiply<Precision>(a, 1,
m, 1, n,
false, t2, 1, n, 1, n,
true,
amp::ampf<Precision>(
"1.0"), u, 1,
m, 1, n,
amp::ampf<Precision>(
"0.0"), work);
8182 lq::lqdecomposition<Precision>(a,
m, n,
tau);
8183 for(
i=1;
i<=
m-1;
i++)
8185 for(
j=
i+1;
j<=
m;
j++)
8190 bidiagonal::tobidiagonal<Precision>(a,
m,
m, tauq, taup);
8191 bidiagonal::unpackqfrombidiagonal<Precision>(a,
m,
m, tauq, ncu, u);
8192 bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a,
m,
m, isupper,
w, e);
8194 blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8195 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e,
m, isupper,
false, a, 0, u, nru, vt, 0);
8196 blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8205 lq::lqdecomposition<Precision>(a,
m, n,
tau);
8206 lq::unpackqfromlq<Precision>(a,
m, n,
tau, nrvt, vt);
8207 for(
i=1;
i<=
m-1;
i++)
8209 for(
j=
i+1;
j<=
m;
j++)
8214 bidiagonal::tobidiagonal<Precision>(a,
m,
m, tauq, taup);
8215 bidiagonal::unpackqfrombidiagonal<Precision>(a,
m,
m, tauq, ncu, u);
8216 bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a,
m,
m, isupper,
w, e);
8218 blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8219 if( additionalmemory<1 )
8225 bidiagonal::multiplybypfrombidiagonal<Precision>(a,
m,
m, taup, vt,
m, n,
false,
true);
8226 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e,
m, isupper,
false, a, 0, u, nru, vt, n);
8234 bidiagonal::unpackptfrombidiagonal<Precision>(a,
m,
m, taup,
m, t2);
8235 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e,
m, isupper,
false, a, 0, u, nru, t2,
m);
8236 blas::copymatrix<Precision>(vt, 1,
m, 1, n, a, 1,
m, 1, n);
8237 blas::matrixmatrixmultiply<Precision>(t2, 1,
m, 1,
m,
false, a, 1,
m, 1, n,
false,
amp::ampf<Precision>(
"1.0"), vt, 1,
m, 1, n,
amp::ampf<Precision>(
"0.0"), work);
8239 blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8250 bidiagonal::tobidiagonal<Precision>(a,
m, n, tauq, taup);
8251 bidiagonal::unpackqfrombidiagonal<Precision>(a,
m, n, tauq, ncu, u);
8252 bidiagonal::unpackptfrombidiagonal<Precision>(a,
m, n, taup, nrvt, vt);
8253 bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a,
m, n, isupper,
w, e);
8255 blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8256 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e, minmn, isupper,
false, a, 0, u, nru, vt, ncvt);
8257 blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8264 bidiagonal::tobidiagonal<Precision>(a,
m, n, tauq, taup);
8265 bidiagonal::unpackqfrombidiagonal<Precision>(a,
m, n, tauq, ncu, u);
8266 bidiagonal::unpackptfrombidiagonal<Precision>(a,
m, n, taup, nrvt, vt);
8267 bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a,
m, n, isupper,
w, e);
8268 if( additionalmemory<2 || uneeded==0 )
8274 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e, minmn, isupper,
false, u, nru, a, 0, vt, ncvt);
8283 blas::copyandtranspose<Precision>(u, 1,
m, 1, minmn, t2, 1, minmn, 1,
m);
8284 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e, minmn, isupper,
false, u, 0, t2,
m, vt, ncvt);
8285 blas::copyandtranspose<Precision>(t2, 1, minmn, 1,
m, u, 1,
m, 1, minmn);