• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* chbmv.f -- translated by f2c (version 20100827).
2    You must link the resulting object file with libf2c:
3 	on Microsoft Windows system, link with libf2c.lib;
4 	on Linux or Unix systems, link with .../path/to/libf2c.a -lm
5 	or, if you install libf2c.a in a standard place, with -lf2c -lm
6 	-- in that order, at the end of the command line, as in
7 		cc *.o -lf2c -lm
8 	Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
9 
10 		http://www.netlib.org/f2c/libf2c.zip
11 */
12 
13 #include "datatypes.h"
14 
chbmv_(char * uplo,integer * n,integer * k,complex * alpha,complex * a,integer * lda,complex * x,integer * incx,complex * beta,complex * y,integer * incy,ftnlen uplo_len)15 /* Subroutine */ int chbmv_(char *uplo, integer *n, integer *k, complex *
16 	alpha, complex *a, integer *lda, complex *x, integer *incx, complex *
17 	beta, complex *y, integer *incy, ftnlen uplo_len)
18 {
19     /* System generated locals */
20     integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
21     real r__1;
22     complex q__1, q__2, q__3, q__4;
23 
24     /* Builtin functions */
25     void r_cnjg(complex *, complex *);
26 
27     /* Local variables */
28     integer i__, j, l, ix, iy, jx, jy, kx, ky, info;
29     complex temp1, temp2;
30     extern logical lsame_(char *, char *, ftnlen, ftnlen);
31     integer kplus1;
32     extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen);
33 
34 /*     .. Scalar Arguments .. */
35 /*     .. */
36 /*     .. Array Arguments .. */
37 /*     .. */
38 
39 /*  Purpose */
40 /*  ======= */
41 
42 /*  CHBMV  performs the matrix-vector  operation */
43 
44 /*     y := alpha*A*x + beta*y, */
45 
46 /*  where alpha and beta are scalars, x and y are n element vectors and */
47 /*  A is an n by n hermitian band matrix, with k super-diagonals. */
48 
49 /*  Arguments */
50 /*  ========== */
51 
52 /*  UPLO   - CHARACTER*1. */
53 /*           On entry, UPLO specifies whether the upper or lower */
54 /*           triangular part of the band matrix A is being supplied as */
55 /*           follows: */
56 
57 /*              UPLO = 'U' or 'u'   The upper triangular part of A is */
58 /*                                  being supplied. */
59 
60 /*              UPLO = 'L' or 'l'   The lower triangular part of A is */
61 /*                                  being supplied. */
62 
63 /*           Unchanged on exit. */
64 
65 /*  N      - INTEGER. */
66 /*           On entry, N specifies the order of the matrix A. */
67 /*           N must be at least zero. */
68 /*           Unchanged on exit. */
69 
70 /*  K      - INTEGER. */
71 /*           On entry, K specifies the number of super-diagonals of the */
72 /*           matrix A. K must satisfy  0 .le. K. */
73 /*           Unchanged on exit. */
74 
75 /*  ALPHA  - COMPLEX         . */
76 /*           On entry, ALPHA specifies the scalar alpha. */
77 /*           Unchanged on exit. */
78 
79 /*  A      - COMPLEX          array of DIMENSION ( LDA, n ). */
80 /*           Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
81 /*           by n part of the array A must contain the upper triangular */
82 /*           band part of the hermitian matrix, supplied column by */
83 /*           column, with the leading diagonal of the matrix in row */
84 /*           ( k + 1 ) of the array, the first super-diagonal starting at */
85 /*           position 2 in row k, and so on. The top left k by k triangle */
86 /*           of the array A is not referenced. */
87 /*           The following program segment will transfer the upper */
88 /*           triangular part of a hermitian band matrix from conventional */
89 /*           full matrix storage to band storage: */
90 
91 /*                 DO 20, J = 1, N */
92 /*                    M = K + 1 - J */
93 /*                    DO 10, I = MAX( 1, J - K ), J */
94 /*                       A( M + I, J ) = matrix( I, J ) */
95 /*              10    CONTINUE */
96 /*              20 CONTINUE */
97 
98 /*           Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
99 /*           by n part of the array A must contain the lower triangular */
100 /*           band part of the hermitian matrix, supplied column by */
101 /*           column, with the leading diagonal of the matrix in row 1 of */
102 /*           the array, the first sub-diagonal starting at position 1 in */
103 /*           row 2, and so on. The bottom right k by k triangle of the */
104 /*           array A is not referenced. */
105 /*           The following program segment will transfer the lower */
106 /*           triangular part of a hermitian band matrix from conventional */
107 /*           full matrix storage to band storage: */
108 
109 /*                 DO 20, J = 1, N */
110 /*                    M = 1 - J */
111 /*                    DO 10, I = J, MIN( N, J + K ) */
112 /*                       A( M + I, J ) = matrix( I, J ) */
113 /*              10    CONTINUE */
114 /*              20 CONTINUE */
115 
116 /*           Note that the imaginary parts of the diagonal elements need */
117 /*           not be set and are assumed to be zero. */
118 /*           Unchanged on exit. */
119 
120 /*  LDA    - INTEGER. */
121 /*           On entry, LDA specifies the first dimension of A as declared */
122 /*           in the calling (sub) program. LDA must be at least */
123 /*           ( k + 1 ). */
124 /*           Unchanged on exit. */
125 
126 /*  X      - COMPLEX          array of DIMENSION at least */
127 /*           ( 1 + ( n - 1 )*abs( INCX ) ). */
128 /*           Before entry, the incremented array X must contain the */
129 /*           vector x. */
130 /*           Unchanged on exit. */
131 
132 /*  INCX   - INTEGER. */
133 /*           On entry, INCX specifies the increment for the elements of */
134 /*           X. INCX must not be zero. */
135 /*           Unchanged on exit. */
136 
137 /*  BETA   - COMPLEX         . */
138 /*           On entry, BETA specifies the scalar beta. */
139 /*           Unchanged on exit. */
140 
141 /*  Y      - COMPLEX          array of DIMENSION at least */
142 /*           ( 1 + ( n - 1 )*abs( INCY ) ). */
143 /*           Before entry, the incremented array Y must contain the */
144 /*           vector y. On exit, Y is overwritten by the updated vector y. */
145 
146 /*  INCY   - INTEGER. */
147 /*           On entry, INCY specifies the increment for the elements of */
148 /*           Y. INCY must not be zero. */
149 /*           Unchanged on exit. */
150 
151 /*  Further Details */
152 /*  =============== */
153 
154 /*  Level 2 Blas routine. */
155 
156 /*  -- Written on 22-October-1986. */
157 /*     Jack Dongarra, Argonne National Lab. */
158 /*     Jeremy Du Croz, Nag Central Office. */
159 /*     Sven Hammarling, Nag Central Office. */
160 /*     Richard Hanson, Sandia National Labs. */
161 
162 /*  ===================================================================== */
163 
164 /*     .. Parameters .. */
165 /*     .. */
166 /*     .. Local Scalars .. */
167 /*     .. */
168 /*     .. External Functions .. */
169 /*     .. */
170 /*     .. External Subroutines .. */
171 /*     .. */
172 /*     .. Intrinsic Functions .. */
173 /*     .. */
174 
175 /*     Test the input parameters. */
176 
177     /* Parameter adjustments */
178     a_dim1 = *lda;
179     a_offset = 1 + a_dim1;
180     a -= a_offset;
181     --x;
182     --y;
183 
184     /* Function Body */
185     info = 0;
186     if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", (
187 	    ftnlen)1, (ftnlen)1)) {
188 	info = 1;
189     } else if (*n < 0) {
190 	info = 2;
191     } else if (*k < 0) {
192 	info = 3;
193     } else if (*lda < *k + 1) {
194 	info = 6;
195     } else if (*incx == 0) {
196 	info = 8;
197     } else if (*incy == 0) {
198 	info = 11;
199     }
200     if (info != 0) {
201 	xerbla_("CHBMV ", &info, (ftnlen)6);
202 	return 0;
203     }
204 
205 /*     Quick return if possible. */
206 
207     if (*n == 0 || (alpha->r == 0.f && alpha->i == 0.f && (beta->r == 1.f &&
208                                                            beta->i == 0.f))) {
209 	return 0;
210     }
211 
212 /*     Set up the start points in  X  and  Y. */
213 
214     if (*incx > 0) {
215 	kx = 1;
216     } else {
217 	kx = 1 - (*n - 1) * *incx;
218     }
219     if (*incy > 0) {
220 	ky = 1;
221     } else {
222 	ky = 1 - (*n - 1) * *incy;
223     }
224 
225 /*     Start the operations. In this version the elements of the array A */
226 /*     are accessed sequentially with one pass through A. */
227 
228 /*     First form  y := beta*y. */
229 
230     if (beta->r != 1.f || beta->i != 0.f) {
231 	if (*incy == 1) {
232 	    if (beta->r == 0.f && beta->i == 0.f) {
233 		i__1 = *n;
234 		for (i__ = 1; i__ <= i__1; ++i__) {
235 		    i__2 = i__;
236 		    y[i__2].r = 0.f, y[i__2].i = 0.f;
237 /* L10: */
238 		}
239 	    } else {
240 		i__1 = *n;
241 		for (i__ = 1; i__ <= i__1; ++i__) {
242 		    i__2 = i__;
243 		    i__3 = i__;
244 		    q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
245 			    q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
246 			    .r;
247 		    y[i__2].r = q__1.r, y[i__2].i = q__1.i;
248 /* L20: */
249 		}
250 	    }
251 	} else {
252 	    iy = ky;
253 	    if (beta->r == 0.f && beta->i == 0.f) {
254 		i__1 = *n;
255 		for (i__ = 1; i__ <= i__1; ++i__) {
256 		    i__2 = iy;
257 		    y[i__2].r = 0.f, y[i__2].i = 0.f;
258 		    iy += *incy;
259 /* L30: */
260 		}
261 	    } else {
262 		i__1 = *n;
263 		for (i__ = 1; i__ <= i__1; ++i__) {
264 		    i__2 = iy;
265 		    i__3 = iy;
266 		    q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
267 			    q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
268 			    .r;
269 		    y[i__2].r = q__1.r, y[i__2].i = q__1.i;
270 		    iy += *incy;
271 /* L40: */
272 		}
273 	    }
274 	}
275     }
276     if (alpha->r == 0.f && alpha->i == 0.f) {
277 	return 0;
278     }
279     if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) {
280 
281 /*        Form  y  when upper triangle of A is stored. */
282 
283 	kplus1 = *k + 1;
284 	if (*incx == 1 && *incy == 1) {
285 	    i__1 = *n;
286 	    for (j = 1; j <= i__1; ++j) {
287 		i__2 = j;
288 		q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
289 			 alpha->r * x[i__2].i + alpha->i * x[i__2].r;
290 		temp1.r = q__1.r, temp1.i = q__1.i;
291 		temp2.r = 0.f, temp2.i = 0.f;
292 		l = kplus1 - j;
293 /* Computing MAX */
294 		i__2 = 1, i__3 = j - *k;
295 		i__4 = j - 1;
296 		for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
297 		    i__2 = i__;
298 		    i__3 = i__;
299 		    i__5 = l + i__ + j * a_dim1;
300 		    q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
301 			    q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
302 			    .r;
303 		    q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
304 		    y[i__2].r = q__1.r, y[i__2].i = q__1.i;
305 		    r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
306 		    i__2 = i__;
307 		    q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i, q__2.i =
308 			     q__3.r * x[i__2].i + q__3.i * x[i__2].r;
309 		    q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
310 		    temp2.r = q__1.r, temp2.i = q__1.i;
311 /* L50: */
312 		}
313 		i__4 = j;
314 		i__2 = j;
315 		i__3 = kplus1 + j * a_dim1;
316 		r__1 = a[i__3].r;
317 		q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i;
318 		q__2.r = y[i__2].r + q__3.r, q__2.i = y[i__2].i + q__3.i;
319 		q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
320 			alpha->r * temp2.i + alpha->i * temp2.r;
321 		q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
322 		y[i__4].r = q__1.r, y[i__4].i = q__1.i;
323 /* L60: */
324 	    }
325 	} else {
326 	    jx = kx;
327 	    jy = ky;
328 	    i__1 = *n;
329 	    for (j = 1; j <= i__1; ++j) {
330 		i__4 = jx;
331 		q__1.r = alpha->r * x[i__4].r - alpha->i * x[i__4].i, q__1.i =
332 			 alpha->r * x[i__4].i + alpha->i * x[i__4].r;
333 		temp1.r = q__1.r, temp1.i = q__1.i;
334 		temp2.r = 0.f, temp2.i = 0.f;
335 		ix = kx;
336 		iy = ky;
337 		l = kplus1 - j;
338 /* Computing MAX */
339 		i__4 = 1, i__2 = j - *k;
340 		i__3 = j - 1;
341 		for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
342 		    i__4 = iy;
343 		    i__2 = iy;
344 		    i__5 = l + i__ + j * a_dim1;
345 		    q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
346 			    q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
347 			    .r;
348 		    q__1.r = y[i__2].r + q__2.r, q__1.i = y[i__2].i + q__2.i;
349 		    y[i__4].r = q__1.r, y[i__4].i = q__1.i;
350 		    r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
351 		    i__4 = ix;
352 		    q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i =
353 			     q__3.r * x[i__4].i + q__3.i * x[i__4].r;
354 		    q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
355 		    temp2.r = q__1.r, temp2.i = q__1.i;
356 		    ix += *incx;
357 		    iy += *incy;
358 /* L70: */
359 		}
360 		i__3 = jy;
361 		i__4 = jy;
362 		i__2 = kplus1 + j * a_dim1;
363 		r__1 = a[i__2].r;
364 		q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i;
365 		q__2.r = y[i__4].r + q__3.r, q__2.i = y[i__4].i + q__3.i;
366 		q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
367 			alpha->r * temp2.i + alpha->i * temp2.r;
368 		q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
369 		y[i__3].r = q__1.r, y[i__3].i = q__1.i;
370 		jx += *incx;
371 		jy += *incy;
372 		if (j > *k) {
373 		    kx += *incx;
374 		    ky += *incy;
375 		}
376 /* L80: */
377 	    }
378 	}
379     } else {
380 
381 /*        Form  y  when lower triangle of A is stored. */
382 
383 	if (*incx == 1 && *incy == 1) {
384 	    i__1 = *n;
385 	    for (j = 1; j <= i__1; ++j) {
386 		i__3 = j;
387 		q__1.r = alpha->r * x[i__3].r - alpha->i * x[i__3].i, q__1.i =
388 			 alpha->r * x[i__3].i + alpha->i * x[i__3].r;
389 		temp1.r = q__1.r, temp1.i = q__1.i;
390 		temp2.r = 0.f, temp2.i = 0.f;
391 		i__3 = j;
392 		i__4 = j;
393 		i__2 = j * a_dim1 + 1;
394 		r__1 = a[i__2].r;
395 		q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i;
396 		q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
397 		y[i__3].r = q__1.r, y[i__3].i = q__1.i;
398 		l = 1 - j;
399 /* Computing MIN */
400 		i__4 = *n, i__2 = j + *k;
401 		i__3 = min(i__4,i__2);
402 		for (i__ = j + 1; i__ <= i__3; ++i__) {
403 		    i__4 = i__;
404 		    i__2 = i__;
405 		    i__5 = l + i__ + j * a_dim1;
406 		    q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
407 			    q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
408 			    .r;
409 		    q__1.r = y[i__2].r + q__2.r, q__1.i = y[i__2].i + q__2.i;
410 		    y[i__4].r = q__1.r, y[i__4].i = q__1.i;
411 		    r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
412 		    i__4 = i__;
413 		    q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i =
414 			     q__3.r * x[i__4].i + q__3.i * x[i__4].r;
415 		    q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
416 		    temp2.r = q__1.r, temp2.i = q__1.i;
417 /* L90: */
418 		}
419 		i__3 = j;
420 		i__4 = j;
421 		q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
422 			alpha->r * temp2.i + alpha->i * temp2.r;
423 		q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
424 		y[i__3].r = q__1.r, y[i__3].i = q__1.i;
425 /* L100: */
426 	    }
427 	} else {
428 	    jx = kx;
429 	    jy = ky;
430 	    i__1 = *n;
431 	    for (j = 1; j <= i__1; ++j) {
432 		i__3 = jx;
433 		q__1.r = alpha->r * x[i__3].r - alpha->i * x[i__3].i, q__1.i =
434 			 alpha->r * x[i__3].i + alpha->i * x[i__3].r;
435 		temp1.r = q__1.r, temp1.i = q__1.i;
436 		temp2.r = 0.f, temp2.i = 0.f;
437 		i__3 = jy;
438 		i__4 = jy;
439 		i__2 = j * a_dim1 + 1;
440 		r__1 = a[i__2].r;
441 		q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i;
442 		q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
443 		y[i__3].r = q__1.r, y[i__3].i = q__1.i;
444 		l = 1 - j;
445 		ix = jx;
446 		iy = jy;
447 /* Computing MIN */
448 		i__4 = *n, i__2 = j + *k;
449 		i__3 = min(i__4,i__2);
450 		for (i__ = j + 1; i__ <= i__3; ++i__) {
451 		    ix += *incx;
452 		    iy += *incy;
453 		    i__4 = iy;
454 		    i__2 = iy;
455 		    i__5 = l + i__ + j * a_dim1;
456 		    q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
457 			    q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
458 			    .r;
459 		    q__1.r = y[i__2].r + q__2.r, q__1.i = y[i__2].i + q__2.i;
460 		    y[i__4].r = q__1.r, y[i__4].i = q__1.i;
461 		    r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
462 		    i__4 = ix;
463 		    q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i =
464 			     q__3.r * x[i__4].i + q__3.i * x[i__4].r;
465 		    q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
466 		    temp2.r = q__1.r, temp2.i = q__1.i;
467 /* L110: */
468 		}
469 		i__3 = jy;
470 		i__4 = jy;
471 		q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
472 			alpha->r * temp2.i + alpha->i * temp2.r;
473 		q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
474 		y[i__3].r = q__1.r, y[i__3].i = q__1.i;
475 		jx += *incx;
476 		jy += *incy;
477 /* L120: */
478 	    }
479 	}
480     }
481 
482     return 0;
483 
484 /*     End of CHBMV . */
485 
486 } /* chbmv_ */
487 
488