383 $ AF, LDAF, COLEQU, C, B, LDB, Y,
384 $ LDY, BERR_OUT, N_NORMS,
385 $ ERR_BNDS_NORM, ERR_BNDS_COMP, RES,
386 $ AYB, DY, Y_TAIL, RCOND, ITHRESH,
387 $ RTHRESH, DZ_UB, IGNORE_CWISE,
396 INTEGER INFO, LDA, LDAF, LDB, LDY, N, NRHS, PREC_TYPE,
399 LOGICAL COLEQU, IGNORE_CWISE
400 DOUBLE PRECISION RTHRESH, DZ_UB
403 COMPLEX*16 A( lda, * ), AF( ldaf, * ), B( ldb, * ),
404 $ y( ldy, * ), res( * ), dy( * ), y_tail( * )
405 DOUBLE PRECISION C( * ), AYB( * ), RCOND, BERR_OUT( * ),
406 $ err_bnds_norm( nrhs, * ),
407 $ err_bnds_comp( nrhs, * )
413 INTEGER UPLO2, CNT, I, J, X_STATE, Z_STATE,
415 DOUBLE PRECISION YK, DYK, YMIN, NORMY, NORMX, NORMDX, DXRAT,
416 $ dzrat, prevnormdx, prev_dz_z, dxratmax,
417 $ dzratmax, dx_x, dz_z, final_dx_x, final_dz_z,
418 $ eps, hugeval, incr_thresh
423 INTEGER UNSTABLE_STATE, WORKING_STATE, CONV_STATE,
424 $ noprog_state, base_residual, extra_residual,
426 parameter( unstable_state = 0, working_state = 1,
427 $ conv_state = 2, noprog_state = 3 )
428 parameter( base_residual = 0, extra_residual = 1,
430 INTEGER FINAL_NRM_ERR_I, FINAL_CMP_ERR_I, BERR_I
431 INTEGER RCOND_I, NRM_RCOND_I, NRM_ERR_I, CMP_RCOND_I
432 INTEGER CMP_ERR_I, PIV_GROWTH_I
433 parameter( final_nrm_err_i = 1, final_cmp_err_i = 2,
435 parameter( rcond_i = 4, nrm_rcond_i = 5, nrm_err_i = 6 )
436 parameter( cmp_rcond_i = 7, cmp_err_i = 8,
438 INTEGER LA_LINRX_ITREF_I, LA_LINRX_ITHRESH_I,
440 parameter( la_linrx_itref_i = 1,
441 $ la_linrx_ithresh_i = 2 )
442 parameter( la_linrx_cwise_i = 3 )
443 INTEGER LA_LINRX_TRUST_I, LA_LINRX_ERR_I,
445 parameter( la_linrx_trust_i = 1, la_linrx_err_i = 2 )
446 parameter( la_linrx_rcond_i = 3 )
457 DOUBLE PRECISION DLAMCH
460 INTRINSIC abs, dble, dimag, max, min
463 DOUBLE PRECISION CABS1
466 cabs1( zdum ) = abs( dble( zdum ) ) + abs( dimag( zdum ) )
470 IF (info.NE.0)
RETURN 471 eps = dlamch(
'Epsilon' )
472 hugeval = dlamch(
'Overflow' )
474 hugeval = hugeval * hugeval
476 incr_thresh = dble(n) * eps
478 IF (lsame(uplo,
'L'))
THEN 479 uplo2 = ilauplo(
'L' )
481 uplo2 = ilauplo(
'U' )
485 y_prec_state = extra_residual
486 IF (y_prec_state .EQ. extra_y)
THEN 503 x_state = working_state
504 z_state = unstable_state
512 CALL zcopy( n, b( 1, j ), 1, res, 1 )
513 IF (y_prec_state .EQ. base_residual)
THEN 514 CALL zhemv(uplo, n, dcmplx(-1.0d+0), a, lda, y(1,j), 1,
515 $ dcmplx(1.0d+0), res, 1)
516 ELSE IF (y_prec_state .EQ. extra_residual)
THEN 517 CALL blas_zhemv_x(uplo2, n, dcmplx(-1.0d+0), a, lda,
518 $ y( 1, j ), 1, dcmplx(1.0d+0), res, 1, prec_type)
520 CALL blas_zhemv2_x(uplo2, n, dcmplx(-1.0d+0), a, lda,
521 $ y(1, j), y_tail, 1, dcmplx(1.0d+0), res, 1,
526 CALL zcopy( n, res, 1, dy, 1 )
527 CALL zpotrs( uplo, n, 1, af, ldaf, dy, n, info)
541 IF (yk .NE. 0.0d+0)
THEN 542 dz_z = max( dz_z, dyk / yk )
543 ELSE IF (dyk .NE. 0.0d+0)
THEN 547 ymin = min( ymin, yk )
549 normy = max( normy, yk )
552 normx = max(normx, yk * c(i))
553 normdx = max(normdx, dyk * c(i))
556 normdx = max(normdx, dyk)
560 IF (normx .NE. 0.0d+0)
THEN 561 dx_x = normdx / normx
562 ELSE IF (normdx .EQ. 0.0d+0)
THEN 568 dxrat = normdx / prevnormdx
569 dzrat = dz_z / prev_dz_z
573 IF (ymin*rcond .LT. incr_thresh*normy
574 $ .AND. y_prec_state .LT. extra_y)
577 IF (x_state .EQ. noprog_state .AND. dxrat .LE. rthresh)
578 $ x_state = working_state
579 IF (x_state .EQ. working_state)
THEN 580 IF (dx_x .LE. eps)
THEN 582 ELSE IF (dxrat .GT. rthresh)
THEN 583 IF (y_prec_state .NE. extra_y)
THEN 586 x_state = noprog_state
589 IF (dxrat .GT. dxratmax) dxratmax = dxrat
591 IF (x_state .GT. working_state) final_dx_x = dx_x
594 IF (z_state .EQ. unstable_state .AND. dz_z .LE. dz_ub)
595 $ z_state = working_state
596 IF (z_state .EQ. noprog_state .AND. dzrat .LE. rthresh)
597 $ z_state = working_state
598 IF (z_state .EQ. working_state)
THEN 599 IF (dz_z .LE. eps)
THEN 601 ELSE IF (dz_z .GT. dz_ub)
THEN 602 z_state = unstable_state
605 ELSE IF (dzrat .GT. rthresh)
THEN 606 IF (y_prec_state .NE. extra_y)
THEN 609 z_state = noprog_state
612 IF (dzrat .GT. dzratmax) dzratmax = dzrat
614 IF (z_state .GT. working_state) final_dz_z = dz_z
617 IF ( x_state.NE.working_state.AND.
618 $ (ignore_cwise.OR.z_state.NE.working_state) )
623 y_prec_state = y_prec_state + 1
634 IF (y_prec_state .LT. extra_y)
THEN 635 CALL zaxpy( n, dcmplx(1.0d+0), dy, 1, y(1,j), 1 )
646 IF (x_state .EQ. working_state) final_dx_x = dx_x
647 IF (z_state .EQ. working_state) final_dz_z = dz_z
651 IF (n_norms .GE. 1)
THEN 652 err_bnds_norm( j, la_linrx_err_i ) =
653 $ final_dx_x / (1 - dxratmax)
655 IF (n_norms .GE. 2)
THEN 656 err_bnds_comp( j, la_linrx_err_i ) =
657 $ final_dz_z / (1 - dzratmax)
668 CALL zcopy( n, b( 1, j ), 1, res, 1 )
669 CALL zhemv(uplo, n, dcmplx(-1.0d+0), a, lda, y(1,j), 1,
670 $ dcmplx(1.0d+0), res, 1)
673 ayb( i ) = cabs1( b( i, j ) )
679 $ a, lda, y(1, j), 1, 1.0d+0, ayb, 1)
double precision function dlamch(CMACH)
DLAMCH
subroutine zhemv(UPLO, N, ALPHA, A, LDA, X, INCX, BETA, Y, INCY)
ZHEMV
subroutine zla_porfsx_extended(PREC_TYPE, UPLO, N, NRHS, A, LDA, AF, LDAF, COLEQU, C, B, LDB, Y, LDY, BERR_OUT, N_NORMS, ERR_BNDS_NORM, ERR_BNDS_COMP, RES, AYB, DY, Y_TAIL, RCOND, ITHRESH, RTHRESH, DZ_UB, IGNORE_CWISE, INFO)
ZLA_PORFSX_EXTENDED improves the computed solution to a system of linear equations for symmetric or H...
subroutine zcopy(N, ZX, INCX, ZY, INCY)
ZCOPY
subroutine zla_heamv(UPLO, N, ALPHA, A, LDA, X, INCX, BETA, Y, INCY)
ZLA_HEAMV computes a matrix-vector product using a Hermitian indefinite matrix to calculate error bou...
subroutine zla_lin_berr(N, NZ, NRHS, RES, AYB, BERR)
ZLA_LIN_BERR computes a component-wise relative backward error.
integer function ilauplo(UPLO)
ILAUPLO
subroutine zla_wwaddw(N, X, Y, W)
ZLA_WWADDW adds a vector into a doubled-single vector.
subroutine zaxpy(N, ZA, ZX, INCX, ZY, INCY)
ZAXPY
subroutine zpotrs(UPLO, N, NRHS, A, LDA, B, LDB, INFO)
ZPOTRS