/[ascend]/trunk/base/generic/solver/mtx_perms.c
ViewVC logotype

Contents of /trunk/base/generic/solver/mtx_perms.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 708 - (show annotations) (download) (as text)
Tue Jun 27 07:34:31 2006 UTC (17 years, 11 months ago) by johnpye
File MIME type: text/x-csrc
File size: 52507 byte(s)
Replaced some references to ascmalloc with ASC_NEW_ARRAY
1 /* mtx: Ascend Sparse Matrix Package
2 * by Karl Michael Westerberg
3 * Created: 2/6/90
4 * Version: $Revision: 1.12 $
5 * Version control file: $RCSfile: mtx_perms.c,v $
6 * Date last modified: $Date: 1998/05/06 17:28:51 $
7 * Last modified by: $Author: ballan $
8 *
9 * This file is part of the SLV solver.
10 *
11 * Copyright (C) 1990 Karl Michael Westerberg
12 * Copyright (C) 1993 Joseph Zaher
13 * Copyright (C) 1994 Joseph Zaher, Benjamin Andrew Allan
14 * Copyright (C) 1995 Kirk Andre Abbott, Benjamin Andrew Allan
15 * Copyright (C) 1996 Benjamin Andrew Allan
16 *
17 * The SLV solver is free software; you can redistribute
18 * it and/or modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of the
20 * License, or (at your option) any later version.
21 *
22 * The SLV solver is distributed in hope that it will be
23 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 * General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License along with
28 * the program; if not, write to the Free Software Foundation, Inc., 675
29 * Mass Ave, Cambridge, MA 02139 USA. Check the file named COPYING.
30 * COPYING is found in ../compiler.
31 */
32 #include <utilities/ascConfig.h>
33 #include <utilities/ascMalloc.h>
34 #include <utilities/mem.h>
35 #include "mtx.h"
36 /* grab our private parts */
37 #define __MTX_C_SEEN__
38 #include "mtx_use_only.h"
39
40
41 static mtx_block_perm_t alloc_block_perm()
42 /**
43 *** Allocates a block_perm header and returns a pointer to it.
44 **/
45 {
46 mtx_block_perm_t bp;
47 bp = (mtx_block_perm_t)asccalloc(1,sizeof(struct mtx_block_perm_structure));
48 bp->integrity = OK;
49 bp->data =
50 (struct structural_data_t *)ascmalloc(sizeof(struct structural_data_t));
51 return(bp);
52 }
53 #define free_block_perm(bp) ascfree(bp)
54
55 /**
56 This is a strange little function which, at the expense of much array logic,
57 sets an org_to_cur permutation array based on the data in a cur_to_org.
58 Method:
59 for i = start..len-1
60 o2c[c2o[i]] = i;
61 If the majority of an org_to_cur is being copied, it is probably much
62 better to copy the whole permutation from the relevant original org_to_cur
63 rather than piecewise as is done here.
64 WARNING: ABSOLUTELY NOTHING is being done to insure sanity in the function.
65 If you can't draw the picture of what this function does by hand and
66 compute an example result, don't use it. That said, this saves lots of
67 otherwise very annoying time spent in re-reordering.
68 **/
69 static void fix_org2cur_from_cur2org(int32 *c2o, int32 start,
70 int32 *o2c, int32 len) {
71 int32 i;
72 if (ISNULL(c2o) || ISNULL(o2c) || start <0 || len < 1) {
73 FPRINTF(g_mtxerr,"ERROR (mtx-internal) fix_org2cur_from_cur2org");
74 FPRINTF(g_mtxerr," got bogus args");
75 return;
76 }
77 for (i=start; i < len ; i++) {
78 o2c[c2o[i]] = i;
79 }
80 return;
81 }
82
83 extern mtx_block_perm_t mtx_create_block_perm(mtx_matrix_t mtx)
84 {
85 mtx_block_perm_t bp = NULL;
86 int32 bnum;
87
88 if (ISSLAVE(mtx)) {
89 return (mtx_create_block_perm(mtx->master));
90 }
91 if (!mtx_check_matrix(mtx)) return NULL;
92 if (mtx->capacity < 2) {
93 FPRINTF(g_mtxerr,"ERROR: mtx_create_block_perm: mtx given is too\n");
94 FPRINTF(g_mtxerr," small to permute.\n");
95 FPRINTF(g_mtxerr," Returning NULL.\n");
96 return NULL;
97 }
98 if (mtx->data->symbolic_rank < 0) {
99 FPRINTF(g_mtxerr,"ERROR: mtx_create_block_perm: mtx given is not\n");
100 FPRINTF(g_mtxerr," output assigned.\n");
101 FPRINTF(g_mtxerr," Returning NULL.\n");
102 return NULL;
103 }
104
105 /* create same size matrix information */
106 bp = alloc_block_perm();
107 if (ISNULL(bp)) {
108 FPRINTF(g_mtxerr,"ERROR: mtx_create_block_perm: insufficient memory\n");
109 FPRINTF(g_mtxerr," allocating bperm (1).\n");
110 FPRINTF(g_mtxerr," Returning NULL.\n");
111 return NULL;
112 }
113 bp->order = mtx->order;
114 bp->capacity = mtx->capacity;
115 bp->mtx = mtx;
116
117 /* copy permutation information */
118 bp->perm.row.org_to_cur = mtx_alloc_perm(bp->capacity);
119 bp->perm.row.cur_to_org = mtx_alloc_perm(bp->capacity);
120 bp->perm.col.org_to_cur = mtx_alloc_perm(bp->capacity);
121 bp->perm.col.cur_to_org = mtx_alloc_perm(bp->capacity);
122 if (ISNULL(bp->perm.row.org_to_cur) ||
123 ISNULL(bp->perm.row.cur_to_org) ||
124 ISNULL(bp->perm.col.org_to_cur) ||
125 ISNULL(bp->perm.col.cur_to_org) ) {
126 FPRINTF(g_mtxerr,"ERROR: mtx_create_block_perm: insufficient memory\n");
127 FPRINTF(g_mtxerr," allocating bperm (2).\n");
128 FPRINTF(g_mtxerr," Returning NULL.\n");
129 mtx_free_perm(bp->perm.row.org_to_cur);
130 mtx_free_perm(bp->perm.row.cur_to_org);
131 mtx_free_perm(bp->perm.col.org_to_cur);
132 mtx_free_perm(bp->perm.col.cur_to_org);
133 free_block_perm(bp);
134 return NULL;
135 }
136
137 mtx_copy_perm(bp->perm.row.org_to_cur,
138 mtx->perm.row.org_to_cur,bp->capacity);
139 mtx_copy_perm(bp->perm.row.cur_to_org,
140 mtx->perm.row.cur_to_org,bp->capacity);
141 bp->perm.row.parity = mtx_row_parity(mtx);
142
143 mtx_copy_perm(bp->perm.col.org_to_cur,
144 mtx->perm.col.org_to_cur,bp->capacity);
145 mtx_copy_perm(bp->perm.col.cur_to_org,
146 mtx->perm.col.cur_to_org,bp->capacity);
147 bp->perm.col.parity = mtx_col_parity(mtx);
148
149 bp->data = (struct structural_data_t *)
150 ascmalloc(sizeof(struct structural_data_t));
151 bp->data->symbolic_rank = mtx->data->symbolic_rank;
152 bp->data->nblocks = mtx->data->nblocks;
153 bp->data->block = mtx->data->nblocks > 0 ? (mtx_region_t *)
154 ascmalloc( mtx->data->nblocks*sizeof(mtx_region_t) ) : NULL;
155 for( bnum=0; bnum < mtx->data->nblocks; bnum++ ) {
156 bp->data->block[bnum] = mtx->data->block[bnum];
157 }
158 return bp;
159 }
160
161 int mtx_update_block_perm(mtx_matrix_t mtx, int32 bnum,
162 mtx_block_perm_t bp)
163 {
164 if (ISSLAVE(mtx)) {
165 return (mtx_update_block_perm(mtx->master,bnum,bp));
166 }
167 return 3;
168 }
169
170 int mtx_restore_block_perm(mtx_matrix_t mtx, int32 bnum,
171 mtx_block_perm_t bp)
172 {
173 int32 len;
174 if (mtx_check_matrix(mtx) != 1) {
175 FPRINTF(g_mtxerr,"ERROR: (mtx) restore_block_perm: Called with bad\n");
176 FPRINTF(g_mtxerr," mtx.\n");
177 return 1;
178 }
179 if (ISSLAVE(mtx)) {
180 return(mtx_restore_block_perm(mtx,bnum,bp));
181 }
182 if (ISNULL(bp) || bp->integrity != OK) {
183 FPRINTF(g_mtxerr,"ERROR: (mtx) restore_block_perm: Called with bad\n");
184 FPRINTF(g_mtxerr," bperm.\n");
185 return 2;
186 }
187 if (bp->mtx != mtx) {
188 FPRINTF(g_mtxerr,"ERROR: (mtx) restore_block_perm: Mismatched mtx\n");
189 FPRINTF(g_mtxerr," and bperm given.\n");
190 return 3;
191 }
192 if (bp->order != mtx->order) {
193 FPRINTF(g_mtxerr,"ERROR: (mtx) restore_block_perm: Mismatched order mtx\n");
194 FPRINTF(g_mtxerr," and bperm given.\n");
195 return 3;
196 }
197 if (bnum != mtx_ALL_BLOCKS && bp->data->nblocks != 0) {
198 /* check block sanity */
199 if (mtx->data->symbolic_rank < 0) {
200 FPRINTF(g_mtxerr,"ERROR: mtx_restore_block_perm: mtx given is not\n");
201 FPRINTF(g_mtxerr," output assigned.\n");
202 FPRINTF(g_mtxerr," Returning NULL.\n");
203 return 3;
204 }
205 if ( bp->data->nblocks != mtx->data->nblocks ||
206 bp->data->block[bnum].row.low != mtx->data->block[bnum].row.low ||
207 bp->data->block[bnum].col.low != mtx->data->block[bnum].col.low ||
208 bp->data->block[bnum].row.high != mtx->data->block[bnum].row.high ||
209 bp->data->block[bnum].col.high != mtx->data->block[bnum].col.high ) {
210 FPRINTF(g_mtxerr,
211 "ERROR: (mtx) restore_block_perm: Mismatched block mtx\n");
212 FPRINTF(g_mtxerr,
213 " and bperm given.\n");
214 return 3;
215 }
216 /* now copy block bnumth info accordingly, resetting parity */
217 /* remember that copy_perm is for some weird reason cp(targ,src,len) */
218
219 len = bp->data->block[bnum].row.high - bp->data->block[bnum].row.low +1;
220
221 /* block given is in cur coordinates, so we can copy relevant stretches
222 of the cur_to_org permutations sequentially. pointer arithmetic. */
223 mtx_copy_perm((mtx->perm.row.cur_to_org) + bp->data->block[bnum].row.low,
224 (bp->perm.row.cur_to_org) + bp->data->block[bnum].row.low,
225 len);
226 fix_org2cur_from_cur2org(mtx->perm.row.cur_to_org,
227 bp->data->block[bnum].row.low,
228 mtx->perm.row.org_to_cur, len);
229 mtx->perm.row.parity = bp->perm.row.parity;
230
231 len = bp->data->block[bnum].col.high - bp->data->block[bnum].col.low +1;
232 mtx_copy_perm((mtx->perm.col.cur_to_org) + bp->data->block[bnum].col.low,
233 (bp->perm.col.cur_to_org) + bp->data->block[bnum].col.low,
234 len);
235 fix_org2cur_from_cur2org(mtx->perm.col.cur_to_org,
236 bp->data->block[bnum].col.low,
237 mtx->perm.col.org_to_cur, len);
238 mtx->perm.col.parity = bp->perm.col.parity;
239
240 } else {
241 /* copy all perm info */
242 mtx_copy_perm(mtx->perm.row.cur_to_org,
243 bp->perm.row.cur_to_org,bp->capacity);
244 mtx_copy_perm(mtx->perm.row.org_to_cur,
245 bp->perm.row.org_to_cur,bp->capacity);
246 mtx->perm.row.parity = bp->perm.row.parity;
247
248 mtx_copy_perm(mtx->perm.col.org_to_cur,
249 bp->perm.col.org_to_cur,bp->capacity);
250 mtx_copy_perm(mtx->perm.col.cur_to_org,
251 bp->perm.col.cur_to_org,bp->capacity);
252 mtx->perm.col.parity = bp->perm.col.parity;
253
254 /* copy all oa/block info if any. */
255 mtx->data->symbolic_rank = bp->data->symbolic_rank;
256 if ( mtx->data->nblocks != bp->data->nblocks) {
257 if (NOTNULL(mtx->data->block)) {
258 ascfree(mtx->data->block);
259 }
260 mtx->data->nblocks = bp->data->nblocks;
261 mtx->data->block = bp->data->nblocks > 0 ? (mtx_region_t *)
262 ascmalloc( mtx->data->nblocks*sizeof(mtx_region_t) ) : NULL;
263 }
264 for( bnum=0; bnum < bp->data->nblocks; bnum++ ) {
265 mtx->data->block[bnum] = bp->data->block[bnum];
266 }
267 }
268 #if MTX_DEBUG
269 if (super_check_matrix(mtx)) {
270 FPRINTF(g_mtxerr,"ERROR: mtx_restore_block_perm: mtx restored now\n");
271 FPRINTF(g_mtxerr," fails sanity.\n");
272 return 3;
273 }
274 #endif
275 return 0;
276 }
277
278 extern int mtx_destroy_block_perm(mtx_block_perm_t bp)
279 {
280 if (bp->integrity != OK) {
281 if (bp->integrity == DESTROYED) {
282 FPRINTF(g_mtxerr,
283 "ERROR: mtx_destroy_block_perm: Called with recently\n");
284 FPRINTF(g_mtxerr," destroyed bperm.\n");
285 } else {
286 FPRINTF(g_mtxerr,
287 "ERROR: mtx_destroy_block_perm: Called with apparently\n");
288 FPRINTF(g_mtxerr," corrupted bperm.\n");
289 }
290 return 1;
291 }
292
293 mtx_free_perm(bp->perm.row.org_to_cur);
294 mtx_free_perm(bp->perm.row.cur_to_org);
295 mtx_free_perm(bp->perm.col.org_to_cur);
296 mtx_free_perm(bp->perm.col.cur_to_org);
297
298 if( NOTNULL(bp->data->block) )
299 ascfree(bp->data->block);
300 ascfree(bp->data);
301
302 bp->integrity=DESTROYED;
303 free_block_perm(bp);
304 return 0;
305 }
306
307
308 extern size_t mtx_block_perm_size(mtx_block_perm_t bp)
309 {
310 size_t size=0;
311 if (ISNULL(bp)) return size;
312 size += sizeof(struct mtx_block_perm_structure);
313 /* data */
314 size += sizeof(struct structural_data_t);
315 /* block data */
316 if (bp->data->nblocks >0)
317 size += sizeof(mtx_region_t)*(size_t)(bp->data->nblocks-1);
318 /* permutations */
319 size += (size_t)4*(sizeof(int32)*(size_t)bp->capacity);
320 return size;
321 }
322
323 /* DO NOT CALL this on the perm of a matrix that is a slave */
324 static void swap( struct permutation_t *perm, int32 cur1,int32 cur2)
325 {
326 register int32 org1,org2;
327
328 if( cur1 == cur2 ) return;
329 org1 = perm->cur_to_org[cur1];
330 org2 = perm->cur_to_org[cur2];
331 perm->cur_to_org[cur1] = org2;
332 perm->cur_to_org[cur2] = org1;
333 perm->org_to_cur[org1] = cur2;
334 perm->org_to_cur[org2] = cur1;
335 perm->parity = !(perm->parity);
336 }
337
338 void mtx_swap_rows( mtx_matrix_t mtx, int32 cur1,int32 cur2)
339 {
340 #if MTX_DEBUG
341 if(!mtx_check_matrix(mtx)) return;
342 #endif
343 if (ISSLAVE(mtx)) {
344 mtx = mtx->master;
345 #if MTX_DEBUG
346 if(!mtx_check_matrix(mtx)) return;
347 #endif
348 }
349 swap(&(mtx->perm.row),cur1,cur2);
350 }
351
352 void mtx_swap_cols( mtx_matrix_t mtx, int32 cur1,int32 cur2)
353 {
354 #if MTX_DEBUG
355 if(!mtx_check_matrix(mtx)) return;
356 #endif
357 if (ISSLAVE(mtx)) {
358 mtx = mtx->master;
359 #if MTX_DEBUG
360 if(!mtx_check_matrix(mtx)) return;
361 #endif
362 }
363 swap(&(mtx->perm.col),cur1,cur2);
364 }
365
366 void mtx_drag( mtx_matrix_t mtx, int32 begin, int32 end)
367 {
368 register int32 *rcto, *ccto, *rotc, *cotc;
369 int32 holdrow,holdcol;
370
371 #if MTX_DEBUG
372 if(!mtx_check_matrix(mtx)) return;
373 #endif
374 if (ISSLAVE(mtx)) {
375 mtx = mtx->master;
376 #if MTX_DEBUG
377 if(!mtx_check_matrix(mtx)) return;
378 #endif
379 }
380 if( begin == end ) return;
381 if( abs(begin-end) % 2 ) {
382 mtx->perm.row.parity = !(mtx->perm.row.parity);
383 mtx->perm.col.parity = !(mtx->perm.col.parity);
384 }
385 rcto = mtx->perm.row.cur_to_org;
386 ccto = mtx->perm.col.cur_to_org;
387 rotc = mtx->perm.row.org_to_cur;
388 cotc = mtx->perm.col.org_to_cur;
389 holdrow = rcto[begin];
390 holdcol = ccto[begin];
391 while( begin < end ) {
392 rotc[rcto[begin]=rcto[begin+1]] = begin;
393 cotc[ccto[begin]=ccto[begin+1]] = begin;
394 begin++;
395 }
396 while( begin > end ) {
397 rotc[rcto[begin]=rcto[begin-1]] = begin;
398 cotc[ccto[begin]=ccto[begin-1]] = begin;
399 begin--;
400 }
401 rotc[rcto[end]=holdrow] = end;
402 cotc[ccto[end]=holdcol] = end;
403 }
404
405 void mtx_reverse_diagonal( mtx_matrix_t mtx, int32 begin, int32 end)
406 {
407 register int32 center,j;
408 register struct permutation_t *rperm, *cperm;
409 #if MTX_DEBUG
410 if(!mtx_check_matrix(mtx)) return;
411 #endif
412 if (ISSLAVE(mtx)) {
413 mtx = mtx->master;
414 #if MTX_DEBUG
415 if(!mtx_check_matrix(mtx)) return;
416 #endif
417 }
418 if (begin > end) return;
419 if (end >= mtx->order) return;
420 center=(end-begin+1)/2;
421 rperm = &(mtx->perm.row);
422 cperm = &(mtx->perm.col);
423 for (j=0; j<center; j++) {
424 swap(cperm,begin,end);
425 swap(rperm,begin,end);
426 begin++;
427 end--;
428 }
429 }
430
431 int32 mtx_row_to_org( mtx_matrix_t mtx, int32 row)
432 {
433 #if MTX_DEBUG
434 if(!mtx_check_matrix(mtx)) return -1;
435 #endif
436 if (ISSLAVE(mtx)) {
437 mtx = mtx->master;
438 #if MTX_DEBUG
439 if(!mtx_check_matrix(mtx)) return -1;
440 #endif
441 }
442 return( mtx->perm.row.cur_to_org[row] );
443 }
444
445 int32 mtx_col_to_org(mtx_matrix_t mtx, int32 col)
446 {
447 #if MTX_DEBUG
448 if(!mtx_check_matrix(mtx)) return -1;
449 #endif
450 if (ISSLAVE(mtx)) {
451 mtx = mtx->master;
452 #if MTX_DEBUG
453 if(!mtx_check_matrix(mtx)) return -1;
454 #endif
455 }
456 return( mtx->perm.col.cur_to_org[col] );
457 }
458
459 int32 mtx_org_to_row( mtx_matrix_t mtx, int32 org)
460 {
461 #if MTX_DEBUG
462 if(!mtx_check_matrix(mtx)) return -1;
463 #endif
464 if (ISSLAVE(mtx)) {
465 mtx = mtx->master;
466 #if MTX_DEBUG
467 if(!mtx_check_matrix(mtx)) return -1;
468 #endif
469 }
470 return( mtx->perm.row.org_to_cur[org] );
471 }
472
473 int32 mtx_org_to_col( mtx_matrix_t mtx, int32 org)
474 {
475 #if MTX_DEBUG
476 if(!mtx_check_matrix(mtx)) return -1;
477 #endif
478 if (ISSLAVE(mtx)) {
479 mtx = mtx->master;
480 #if MTX_DEBUG
481 if(!mtx_check_matrix(mtx)) return -1;
482 #endif
483 }
484 return( mtx->perm.col.org_to_cur[org] );
485 }
486
487 boolean mtx_row_parity(mtx_matrix_t mtx)
488 {
489 #if MTX_DEBUG
490 if(!mtx_check_matrix(mtx)) return -1;
491 if (ISSLAVE(mtx)) {
492 if(!mtx_check_matrix(mtx->master)) return -1;
493 }
494 #endif
495 if (ISSLAVE(mtx)) {
496 return( mtx->master->perm.row.parity );
497 } else {
498 return( mtx->perm.row.parity );
499 }
500 }
501
502 boolean mtx_col_parity( mtx_matrix_t mtx)
503 {
504 #if MTX_DEBUG
505 if(!mtx_check_matrix(mtx)) return -1;
506 if (ISSLAVE(mtx)) {
507 if(!mtx_check_matrix(mtx->master)) return -1;
508 }
509 #endif
510 if (ISSLAVE(mtx)) {
511 return( mtx->master->perm.col.parity );
512 } else {
513 return( mtx->perm.col.parity );
514 }
515 }
516
517 /***********************************************************************\
518 structural analysis stuff
519 \***********************************************************************/
520 struct assign_row_vars {
521 int32 rv_indicator; /* rows visited indicator */
522 int32 *row_visited;
523 mtx_range_t unassigned_cols;
524 mtx_range_t assigned_cols;
525 };
526
527 /* MUST NOT BE CALLED ON slave matrix */
528 static int32 assign_row( mtx_matrix_t mtx, int32 row,
529 struct assign_row_vars *vars)
530 /**
531 *** Attempts to assign one of the columns in the range
532 *** vars->unassigned_columns to the given row (exchange that column with
533 *** column "row" in order to place a non-zero on the diagonal). Returns
534 *** the column that is assigned, or < 0 if not possible.
535 ***
536 *** Assignment is done row-wise by starting with the top row, finding the
537 *** nearest column in the row to the right of the diagonal to assign to it,
538 *** and advancing to the next row. If no column exists to the right of the
539 *** diagonal, then it precedes along a steward's path to find amongst the
540 *** previously assigned columns one which will be more suitably assigned to
541 *** it, thereby forcing a row which was visited earlier to be re-assigned to
542 *** some new column. Termination occurs when there are no more unvisited
543 *** rows. If there still remains non-empty yet unassigned rows, they will
544 *** be positioned just after the last assigned row. Same goes for any
545 *** remaining non-empty unassigned columns. The condensed grouping of
546 *** assigned rows and columns will be of dimension equal to the symbolic
547 *** rank of the matrix and are used to form an initial block.
548 **/
549 {
550 struct element_t Rewind, *elt;
551 int32 *tocur;
552
553 tocur = mtx->perm.col.org_to_cur;
554 Rewind.next.col = mtx->hdr.row[mtx->perm.row.cur_to_org[row]];
555 if( NULL != (elt=mtx_next_col(&Rewind,&(vars->unassigned_cols),tocur)) ) {
556 /* Cheap assignment */
557 register int32 col;
558 col = tocur[elt->col];
559 swap(&(mtx->perm.col),col,row);
560 return(col);
561 }
562
563 vars->row_visited[row] = vars->rv_indicator;
564 for( elt = &Rewind ; NULL != (elt=mtx_next_col(elt,&(vars->assigned_cols),tocur)) ;) {
565 int32 col;
566 col = tocur[elt->col];
567 if( vars->row_visited[col] != vars->rv_indicator ) {
568 register int32 assigned_col;
569 if( (assigned_col = assign_row(mtx,col,vars)) >= ZERO ) {
570 swap(&(mtx->perm.col),assigned_col,row);
571 return( assigned_col );
572 }
573 }
574 }
575 return( -1 );
576 }
577
578 /*
579 * This function is very similar to mtx_output_assign. It however
580 * takes a region in which to do this output_assignement.
581 * It returns the number of rows that could be assigned.
582 * Remember that rv stands for row_visited, for newcomers to this file.
583 */
584 int mtx_output_assign_region(mtx_matrix_t mtx,
585 mtx_region_t *region,
586 int *orphaned_rows)
587 {
588 struct assign_row_vars vars;
589 struct element_t Rewind;
590 int32 nrows, *tocur;
591 mtx_range_t single_col;
592 mtx_coord_t nz;
593 int notassigned = 0;
594
595 #if MTX_DEBUG
596 if( !mtx_check_matrix(mtx) ) return 0;
597 #endif
598
599 if (ISSLAVE(mtx)) {
600 mtx = mtx->master;
601 #if MTX_DEBUG
602 if( !mtx_check_matrix(mtx) ) return 0;
603 #endif
604 }
605
606 vars.row_visited = mtx->order > ZERO ?
607 (int32 *)asccalloc(mtx->order, sizeof(int32)) : NULL;
608 vars.rv_indicator = 1;
609
610 if (region!=mtx_ENTIRE_MATRIX) {
611 vars.unassigned_cols.high = region->col.high;
612 vars.assigned_cols.low = region->col.low;
613 tocur = mtx->perm.col.org_to_cur;
614 nrows = region->row.high - region->row.low + 1;
615 }
616 else{
617 vars.unassigned_cols.high = mtx->order - 1;
618 vars.assigned_cols.low = ZERO;
619 tocur = mtx->perm.col.org_to_cur;
620 nrows = mtx->order;
621 }
622
623
624 for( nz.row = region->row.low ; nz.row < nrows ; ) {
625 single_col.low = single_col.high = nz.row;
626 vars.unassigned_cols.low = nz.row;
627 vars.assigned_cols.high = nz.row - 1;
628
629 /*
630 * Find next nnz in row and attempt to assign it using the
631 * recursive function assign_row. If successful, proceed
632 * to the next row. Otherwise, move the row down to end of the
633 * matrix. -- we are structurally singular.
634 */
635 Rewind.next.col = mtx->hdr.row[mtx->perm.row.cur_to_org[nz.row]];
636 if (mtx_next_col(&Rewind, &single_col, tocur) ||
637 assign_row(mtx, nz.row, &vars) >= ZERO) {
638 ++nz.row; /* Go to next row */
639 }
640 else {
641 swap(&(mtx->perm.row), nz.row, --nrows); /* Move row to the end */
642 notassigned++;
643 }
644 ++vars.rv_indicator; /* Effectively clear vars.row_visited */
645 }
646
647 if (NOTNULL(vars.row_visited)) ascfree(vars.row_visited);
648 *orphaned_rows = notassigned; /* return count of unassigned rows */
649 return nrows;
650 }
651
652 void mtx_output_assign( mtx_matrix_t mtx, int32 hirow, int32 hicol)
653 {
654 struct assign_row_vars vars;
655 struct element_t Rewind;
656 int32 nrows, ndxhigh, *tocur;
657 mtx_range_t single_col;
658 mtx_coord_t nz;
659
660 #if MTX_DEBUG
661 if( !mtx_check_matrix(mtx) ) return; /*ben*/
662 #endif
663 if (ISSLAVE(mtx)) {
664 mtx = mtx->master;
665 #if MTX_DEBUG
666 if( !mtx_check_matrix(mtx) ) return;
667 #endif
668 }
669 vars.row_visited = mtx->order > ZERO ?
670 (int32 *)asccalloc( mtx->order,sizeof(int32) ) : NULL;
671 vars.rv_indicator = 1;
672 vars.unassigned_cols.high = mtx->order-1;
673 vars.assigned_cols.low = ZERO;
674 tocur = mtx->perm.col.org_to_cur;
675
676 nrows = mtx->order;
677 for( nz.row = ZERO ; nz.row < nrows ; ) {
678 single_col.low = single_col.high = nz.row;
679 vars.unassigned_cols.low = nz.row;
680 vars.assigned_cols.high = nz.row-1;
681 Rewind.next.col = mtx->hdr.row[mtx->perm.row.cur_to_org[nz.row]];
682 if( mtx_next_col(&Rewind,&single_col,tocur) ||
683 assign_row(mtx,nz.row,&vars) >= ZERO ) {
684 ++nz.row; /* Go to next row */
685 } else {
686 swap(&(mtx->perm.row),nz.row,--nrows); /* Move row to the end */
687 }
688 ++vars.rv_indicator; /* Effectively clear vars.row_visited */
689 }
690
691 mtx->data->symbolic_rank = nrows;
692 if( NOTNULL(mtx->data->block) )
693 ascfree(mtx->data->block);
694
695 /**
696 *** Place all assigned rows and columns into one
697 *** initial block.
698 **/
699 if( mtx->data->symbolic_rank > 0 ) {
700 mtx->data->nblocks = 1;
701 mtx->data->block = (mtx_region_t *)ascmalloc( sizeof(mtx_region_t) );
702 mtx->data->block[0].row.low = mtx->data->block[0].col.low = 0;
703 mtx->data->block[0].row.high = mtx->data->block[0].col.high = nrows - 1;
704 } else {
705 mtx->data->nblocks = 0;
706 mtx->data->block = NULL;
707 }
708
709 /**
710 *** Condense all unassigned rows on the outside of the block.
711 **/
712 ndxhigh = mtx->order-1;
713 for (nz.row = nrows; nz.row <= ndxhigh; ) {
714 if( NOTNULL(mtx->hdr.row[mtx->perm.row.cur_to_org[nz.row]]) ) ++nz.row;
715 else swap(&(mtx->perm.row),nz.row,ndxhigh--);
716 }
717 /**
718 *** Condense any incidenceless but wanted orgrows on
719 *** the outside of the assigned and unassigned block if sensible.
720 **/
721 if (nz.row < hirow) {
722 ndxhigh = mtx->order-1;
723 for (; nz.row <= ndxhigh; ) {
724 if( mtx->perm.row.cur_to_org[nz.row] < hirow ) ++nz.row;
725 else swap(&(mtx->perm.row),nz.row,ndxhigh--);
726 }
727 }
728 /**
729 *** Condense all unassigned columns on
730 *** the outside of the block.
731 **/
732 ndxhigh = mtx->order-1;
733 for (nz.col = nrows; nz.col <= ndxhigh; ) {
734 if( NOTNULL(mtx->hdr.col[mtx->perm.col.cur_to_org[nz.col]]) ) ++nz.col;
735 else swap(&(mtx->perm.col),nz.col,ndxhigh--);
736 }
737 /**
738 *** Condense any incidenceless but wanted orgcols on
739 *** the outside of the assigned and unassigned block if sensible.
740 **/
741 if (nz.col < hicol) {
742 ndxhigh = mtx->order-1;
743 for (; nz.col <= ndxhigh; ) {
744 if( mtx->perm.col.cur_to_org[nz.col] < hicol ) ++nz.col;
745 else swap(&(mtx->perm.col),nz.col,ndxhigh--);
746 }
747 }
748
749 if( NOTNULL(vars.row_visited) )
750 ascfree( vars.row_visited );
751 }
752
753 boolean mtx_output_assigned( mtx_matrix_t mtx)
754 {
755 if(!mtx_check_matrix(mtx)) return 0;
756 if (ISSLAVE(mtx)) {
757 mtx = mtx->master;
758 #if MTX_DEBUG
759 if( !mtx_check_matrix(mtx) ) return 0;
760 #endif
761 }
762 return (mtx->data->symbolic_rank > -1);
763 }
764
765 int32 mtx_symbolic_rank( mtx_matrix_t mtx)
766 {
767 #if MTX_DEBUG
768 if( !mtx_check_matrix(mtx) ) return -2;
769 #endif
770 if (ISSLAVE(mtx)) {
771 mtx = mtx->master;
772 #if MTX_DEBUG
773 if( !mtx_check_matrix(mtx) ) return -2;
774 #endif
775 }
776
777 if( !mtx_output_assigned(mtx) ) { /* matrix will be checked */
778 #if MTX_DEBUG
779 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_symbolic_rank\n");
780 FPRINTF(g_mtxerr," Matrix not output assigned.\n");
781 #endif
782 }
783 return (mtx->data->symbolic_rank);
784 }
785
786 void mtx_set_symbolic_rank( mtx_matrix_t mtx, int32 rank)
787 {
788 mtx->data->symbolic_rank = rank;
789 }
790
791 boolean mtx_make_col_independent( mtx_matrix_t mtx, int32 col, mtx_range_t *rng)
792 {
793 struct assign_row_vars vars;
794 mtx_coord_t nz;
795 int32 ncol;
796
797 if (mtx!=NULL && ISSLAVE(mtx)) {
798 mtx = mtx->master;
799 #if MTX_DEBUG
800 if( !mtx_check_matrix(mtx) ) return -2;
801 #endif
802 }
803 if( !mtx_output_assigned(mtx) ) { /* matrix will be checked */
804 #if MTX_DEBUG
805 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_make_col_independent\n");
806 FPRINTF(g_mtxerr," Matrix not output assigned.\n");
807 #endif
808 if(!mtx_check_matrix(mtx)) return -2;
809 }
810 if( col >= mtx->data->symbolic_rank ) return TRUE; /* col already ind */
811 if( rng->high < rng->low ) return FALSE; /* nobody to choose from */
812 if( rng->low < mtx->data->symbolic_rank ) return FALSE; /* bad choices */
813
814 vars.row_visited = mtx->order > ZERO ?
815 (int32 *)asccalloc( mtx->order,sizeof(int32) ) : NULL;
816 vars.rv_indicator = 1;
817 vars.unassigned_cols.low = rng->low;
818 vars.unassigned_cols.high = rng->high;
819 vars.assigned_cols.low = ZERO;
820 vars.assigned_cols.high = mtx->data->symbolic_rank-2;
821
822 /**
823 *** Move (col,col) to the bottom right corner
824 **/
825 nz.row = nz.col = col;
826 swap(&(mtx->perm.row),nz.row,mtx->data->symbolic_rank-1);
827 swap(&(mtx->perm.col),nz.col,mtx->data->symbolic_rank-1);
828
829 ncol= assign_row(mtx,mtx->data->symbolic_rank-1,&vars);
830 swap(&(mtx->perm.row),nz.row,mtx->data->symbolic_rank-1);
831 swap(&(mtx->perm.col),nz.col,mtx->data->symbolic_rank-1);
832 if(ncol < ZERO ) {
833 /* put it back the way it was */
834 return FALSE;
835 }
836
837 if( NOTNULL(mtx->data->block) && mtx->data->nblocks > 1 ) {
838 ascfree(mtx->data->block);
839 mtx->data->nblocks = 1;
840 mtx->data->block = (mtx_region_t *)ascmalloc( sizeof(mtx_region_t) );
841 mtx->data->block[0].row.low = ZERO;
842 mtx->data->block[0].col.low = ZERO;
843 mtx->data->block[0].row.high = mtx->data->symbolic_rank-1;
844 mtx->data->block[0].col.high = mtx->data->symbolic_rank-1;
845 }
846 if( NOTNULL(vars.row_visited) )
847 ascfree( vars.row_visited );
848 return TRUE;
849 }
850
851 struct analyze_row_vars {
852 int32 done;
853 int32 vstack;
854 int32 *lowlink;
855 int32 *blocksize;
856 int32 nblocks;
857 };
858
859 static
860 int32 analyze_row(mtx_matrix_t mtx, int32 row, struct analyze_row_vars *vars,
861 mtx_range_t *rng)
862 /*
863 * Places the row on the stack (row must be initially unvisited) and
864 * then determines lowlink[row]. If, in the end, lowlink[row] = row,
865 * then this row and all rows "above it on the stack" (i.e.
866 * vstack <= ndx <= row) belong to the same block, which is then moved
867 * from the stack to done (nblocks and blocksize updated). The new row
868 * number is returned (this may be different from the given row number
869 * since rows tend to move around).
870 */
871 {
872 mtx_range_t cols;
873 struct element_t Rewind, *elt;
874 int32 *tocur;
875
876 /* Push row onto the stack */
877 --(vars->vstack);
878 swap(&(mtx->perm.row),row,vars->vstack);
879 swap(&(mtx->perm.col),row,vars->vstack);
880 row = vars->vstack;
881 vars->lowlink[row] = row; /* Starting value of lowlink */
882
883 /*
884 * Scan the row for non-zeros. If unvisited indices are found, analyze
885 * them first (recursive call to this function) and in any case, if they
886 * are still on the stack, update lowlink[row] according to the new
887 * information. When the entire row is scanned, lowlink[row] should be
888 * completely computed.
889 */
890
891 /*
892 * THESE WERE: KAA_MODIFICATION
893 * cols.low = 0; and
894 * cols.high = mtx->data->symbolic_rank-1;
895 */
896 cols.low = rng->low;
897 cols.high = rng->high;
898 tocur = mtx->perm.col.org_to_cur;
899 Rewind.next.col = mtx->hdr.row[mtx->perm.row.cur_to_org[row]];
900 for( elt = &Rewind ; NULL != (elt=mtx_next_col(elt,&cols,tocur)) ; ) {
901 int32 col = tocur[elt->col];
902 if( vars->done <= col && col < vars->vstack ) {
903 /* Column unvisited, compute lowlink[col] */
904 #if SWAPS_PRESERVE_ORDER
905 col = analyze_row(mtx,col,vars,rng);
906 #else
907 if( col != analyze_row(mtx,col,vars,rng) ) {
908 elt = &Rewind;
909 continue;
910 }
911 #endif
912 }
913
914 if( col >= vars->vstack ) {
915 /* Still on stack: update lowlink[row] */
916 if( vars->lowlink[col] > vars->lowlink[row] )
917 vars->lowlink[row] = vars->lowlink[col];
918 } else {
919 /**
920 *** col is done (already in a block) and therefore
921 *** won't contribute anything to lowlink[row]
922 **/
923 }
924 }
925
926 if( vars->lowlink[row] == row ) {
927 /* Rows currently in vstack..row form the next block */
928 ++(vars->nblocks);
929 vars->blocksize[vars->done] = row - vars->vstack + 1; /* Block size */
930 /* Move rows/columns from the stack to done */
931 for( ; vars->vstack <= row ; ++(vars->vstack),++(vars->done) ) {
932 swap(&(mtx->perm.row),vars->vstack,vars->done);
933 swap(&(mtx->perm.col),vars->vstack,vars->done);
934 }
935 return( vars->done - 1 ); /* That's where the row ends up */
936 }
937 return(row);
938 }
939
940 static
941 int32 analyze_col(mtx_matrix_t mtx, int32 col, struct analyze_row_vars *vars,
942 mtx_range_t *rng)
943 /* (swap row for col here)
944 * Places the row on the stack (row must be initially unvisited) and
945 * then determines lowlink[row]. If, in the end, lowlink[row] = row,
946 * then this row and all rows "above it on the stack" (i.e.
947 * vstack <= ndx <= row) belong to the same block, which is then moved
948 * from the stack to done (nblocks and blocksize updated). The new row
949 * number is returned (this may be different from the given row number
950 * since rows tend to move around).
951 */
952 {
953 mtx_range_t rows;
954 struct element_t Rewind, *elt;
955 int32 *tocur;
956
957 /* Push row onto the stack */
958 --(vars->vstack);
959 swap(&(mtx->perm.col),col,vars->vstack);
960 swap(&(mtx->perm.row),col,vars->vstack);
961 col = vars->vstack;
962 vars->lowlink[col] = col; /* Starting value of lowlink */
963
964 /*
965 * Scan the row for non-zeros. If unvisited indices are found, analyze
966 * them first (recursive call to this function) and in any case, if they
967 * are still on the stack, update lowlink[row] according to the new
968 * information. When the entire row is scanned, lowlink[row] should be
969 * completely computed.
970 */
971
972 /* for full version:
973 * cols.low = 0; and
974 * cols.high = mtx->data->symbolic_rank-1;
975 */
976 rows.low = rng->low;
977 rows.high = rng->high;
978 tocur = mtx->perm.row.org_to_cur;
979 Rewind.next.row = mtx->hdr.col[mtx->perm.col.cur_to_org[col]];
980 for( elt = &Rewind ; NULL != (elt=mtx_next_row(elt,&rows,tocur)) ; ) {
981 int32 row = tocur[elt->row];
982 if( vars->done <= row && row < vars->vstack ) {
983 /* Column unvisited, compute lowlink[col] */
984 #if SWAPS_PRESERVE_ORDER
985 row = analyze_col(mtx,row,vars,rng);
986 #else
987 if( row != analyze_col(mtx,row,vars,rng) ) {
988 elt = &Rewind;
989 continue;
990 }
991 #endif
992 }
993
994 if( row >= vars->vstack ) {
995 /* Still on stack: update lowlink[row] */
996 if( vars->lowlink[row] > vars->lowlink[col] ) {
997 vars->lowlink[col] = vars->lowlink[row];
998 }
999 } else {
1000 /*
1001 * col is done (already in a block) and therefore
1002 * won't contribute anything to lowlink[row]
1003 */
1004 }
1005 }
1006
1007 if( vars->lowlink[col] == col ) {
1008 /* Rows currently in vstack..row form the next block */
1009 ++(vars->nblocks);
1010 vars->blocksize[vars->done] = col - vars->vstack + 1; /* Block size */
1011 /* Move rows/columns from the stack to done */
1012 for( ; vars->vstack <= col ; ++(vars->vstack),++(vars->done) ) {
1013 swap(&(mtx->perm.col),vars->vstack,vars->done);
1014 swap(&(mtx->perm.row),vars->vstack,vars->done);
1015 }
1016 return( vars->done - 1 ); /* That's where the row ends up */
1017 }
1018 return(col);
1019 }
1020
1021
1022 /* a function to check the diagonal for holes. if symbolic_rank is
1023 * set, and rng->high < rank, returns immediately.
1024 * Worst Cost: O(nnz) where nnz = incidence count sum over rows in rng.
1025 * Does not pass calls up to master.
1026 * Returns number of holes detected in diagonal in rng given.
1027 * If noisy != 0, writes the hole locations to g_mtxerr.
1028 * On detectably bad input, returns -1.
1029 */
1030 int32 mtx_full_diagonal(mtx_matrix_t mtx, mtx_range_t *rng, int noisy)
1031 {
1032 mtx_coord_t nz;
1033 mtx_range_t diag;
1034 int32 holecount=0;
1035
1036 if (mtx==NULL || rng == NULL) {
1037 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_full_diagonal sent NULL.\n");
1038 return -1;
1039 }
1040 if (mtx_output_assigned(mtx) && rng->high < mtx->data->symbolic_rank) {
1041 return 0;
1042 }
1043 if (!mtx_check_matrix(mtx)) {
1044 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_full_diagonal sent bad mtx.\n");
1045 return -1;
1046 }
1047 if (rng->low <0 || rng->high >= mtx->order) {
1048 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_full_diagonal sent bad rng.\n");
1049 return -1;
1050 }
1051 if (noisy) {
1052 for (nz.row=rng->low; nz.row <= rng->high; nz.row++) {
1053 diag.high = diag.low = nz.row;
1054 nz.col = mtx_FIRST;
1055 mtx_next_in_row(mtx,&nz,&diag); /* find diagonal, if there */
1056 if (nz.col==mtx_LAST) {
1057 holecount++;
1058 FPRINTF(g_mtxerr,"mtx: mtx_full_diagonal found hole at %d.\n",nz.row);
1059 }
1060 }
1061 } else {
1062 for (nz.row=rng->low; nz.row <= rng->high; nz.row++) {
1063 diag.high = diag.low = nz.row;
1064 nz.col = mtx_FIRST;
1065 mtx_next_in_row(mtx,&nz,&diag); /* find diagonal, if there */
1066 if (nz.col==mtx_LAST) holecount++;
1067 }
1068 }
1069 return holecount;
1070 }
1071
1072 /* so long as the mesh is sane, traversing it along one axis transposes
1073 * it in both directions. This must be called just before swapping headers
1074 * or the semantics are not right.
1075 */
1076 static
1077 void transpose_mesh(mtx_matrix_t mtx)
1078 {
1079 struct element_t *elttmp; /* swap var for nextelt pointers in elements */
1080 struct element_t *next, *elt;
1081 int32 ndxtmp; /* swap var for orgrow/orgcol indices */
1082 int32 nrows, ndx;
1083
1084 if (mtx == NULL) {
1085 return;
1086 }
1087 nrows = mtx->order;
1088 for( ndx = ZERO ; ndx < nrows ; ndx++) {
1089 next = mtx->hdr.row[ndx];
1090 /* traverse the row, turning it into a column behind us as we go. */
1091 while (next != NULL) {
1092 elt = next; /* the use of elt is redundant, but much more readable. */
1093 /* swap org row/col indices */
1094 ndxtmp = elt->col;
1095 elt->col = elt->row;
1096 elt->row = ndxtmp;
1097 /* swap next elt pointers */
1098 elttmp = elt->next.col;
1099 elt->next.col = elt->next.row;
1100 elt->next.row = elttmp;
1101 next = elttmp;
1102 }
1103 }
1104 }
1105
1106 int32 mtx_transpose(mtx_matrix_t mtx)
1107 {
1108 struct element_t **hdrtmp; /* swap var for headers */
1109 int32 *permtmp; /* swap var for permutation vectors. */
1110 int32 ndxtmp,bnum; /* swap var for indices */
1111 mtx_matrix_t master;
1112 int32 slave;
1113
1114
1115 #if MTX_DEBUG
1116 if( !mtx_check_matrix(mtx) ) {
1117 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_transpose\n");
1118 FPRINTF(g_mtxerr," Matrix given is in error.\n");
1119 return mtx_NONE; /*ben*/
1120 }
1121 #endif
1122 if (ISSLAVE(mtx)) {
1123 mtx = mtx->master;
1124 #if MTX_DEBUG
1125 if( !mtx_check_matrix(mtx) ) {
1126 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_transpose\n");
1127 FPRINTF(g_mtxerr," Matrix given is in error.\n");
1128 return mtx_NONE;
1129 }
1130 #endif
1131 }
1132 master = mtx;
1133 if (master->perm.transpose == mtx_NONE) {
1134 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_transpose\n");
1135 FPRINTF(g_mtxerr," Matrix given is 0 order.\n");
1136 return mtx_NONE;
1137 }
1138 master->perm.transpose = !(master->perm.transpose);
1139 /* swap perms on master */
1140 permtmp = mtx->perm.col.org_to_cur; /* do o2v */
1141 mtx->perm.col.org_to_cur = mtx->perm.row.org_to_cur;
1142 mtx->perm.row.org_to_cur = permtmp;
1143 permtmp = mtx->perm.col.cur_to_org; /* do v2o */
1144 mtx->perm.col.cur_to_org = mtx->perm.row.cur_to_org;
1145 mtx->perm.row.cur_to_org = permtmp;
1146
1147 /* transpose mesh on master */
1148 transpose_mesh(mtx);
1149
1150 /* swap headers on master */
1151 hdrtmp = mtx->hdr.col;
1152 mtx->hdr.col = mtx->hdr.row;
1153 mtx->hdr.row = hdrtmp;
1154
1155 /* transpose blocks on master */
1156 if (mtx->data->block != NULL) {
1157 for (bnum = 0; bnum < mtx->data->nblocks; bnum++) {
1158 /* upper left */
1159 ndxtmp = mtx->data->block[bnum].row.low;
1160 mtx->data->block[bnum].row.low = mtx->data->block[bnum].col.low;
1161 mtx->data->block[bnum].col.low = ndxtmp;
1162 /* lower right */
1163 ndxtmp = mtx->data->block[bnum].row.high;
1164 mtx->data->block[bnum].row.high = mtx->data->block[bnum].col.high;
1165 mtx->data->block[bnum].col.high = ndxtmp;
1166 }
1167 }
1168
1169 /* swap hdrs, update perm references, and do mesh on slaves */
1170 for (slave = 0; slave < master->nslaves; slave++) {
1171 mtx = master->slaves[slave];
1172 mtx->perm.row.org_to_cur = master->perm.row.org_to_cur;
1173 mtx->perm.row.cur_to_org = master->perm.row.cur_to_org;
1174 mtx->perm.col.org_to_cur = master->perm.col.org_to_cur;
1175 mtx->perm.col.cur_to_org = master->perm.col.cur_to_org;
1176 transpose_mesh(mtx);
1177 hdrtmp = mtx->hdr.col;
1178 mtx->hdr.col = mtx->hdr.row;
1179 mtx->hdr.row = hdrtmp;
1180 }
1181
1182 return 0;
1183 }
1184
1185 int32 mtx_isa_transpose(mtx_matrix_t mtx)
1186 {
1187 #if MTX_DEBUG
1188 if( !mtx_check_matrix(mtx) ) {
1189 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_isa_transpose\n");
1190 FPRINTF(g_mtxerr," Matrix given is in error.\n");
1191 return mtx_NONE; /*ben*/
1192 }
1193 #endif
1194 if (ISSLAVE(mtx)) {
1195 mtx = mtx->master;
1196 #if MTX_DEBUG
1197 if( !mtx_check_matrix(mtx) ) {
1198 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_isa_transpose\n");
1199 FPRINTF(g_mtxerr," Matrix given's master is in error.\n");
1200 return mtx_NONE;
1201 }
1202 #endif
1203 }
1204 return mtx->perm.transpose;
1205 }
1206
1207 /*
1208 * This function uses the same algorithm as its sister function
1209 * mtx_partition (at the moment). It is more general in that it
1210 * explicitly takes a region, and will return a block_list
1211 * structure rather than manipulating the one associated with
1212 * the matrix. It now works (we hope) on an arbitrary square
1213 * region.
1214 */
1215 mtx_block_t *mtx_block_partition( mtx_matrix_t mtx, mtx_region_t *reg)
1216 {
1217 struct analyze_row_vars vars;
1218 int32 blocknum, start, size;
1219 mtx_block_t *blocklist;
1220
1221 if (mtx!=NULL && ISSLAVE(mtx)) {
1222 mtx = mtx->master;
1223 }
1224
1225 if (!mtx_output_assigned(mtx)) {
1226 int mc = mtx_check_matrix(mtx);
1227 if (!mc) return NULL;
1228 if (mtx_full_diagonal(mtx,&(reg->row),1)) {
1229 FPRINTF(g_mtxerr,"WARNING: (mtx) mtx_block_partition\n");
1230 FPRINTF(g_mtxerr," Assignment bad. partitioning may be bad..\n");
1231 }
1232 }
1233
1234 vars.vstack = reg->row.high + 1; /* was symbolic_rank */
1235 if (vars.vstack > 0) {
1236 vars.lowlink = vars.blocksize =
1237 ASC_NEW_ARRAY(int32,vars.vstack);
1238 } else {
1239 vars.lowlink = vars.blocksize = NULL;
1240 }
1241 vars.done = 0;
1242 vars.nblocks = 0;
1243
1244 /*
1245 * The core of the partition routine. Analyze row
1246 * now takes a range, which represents a square region.
1247 */
1248 while( vars.done < vars.vstack ) {
1249 analyze_row(mtx,vars.done,&vars,&(reg->row));
1250 }
1251
1252 /*
1253 * Prepare the block list structure and copy the data
1254 * from into it from vars.
1255 */
1256 if (vars.nblocks > 0) {
1257 blocklist = (mtx_block_t *)ascmalloc(sizeof(mtx_block_t));
1258 blocklist->nblocks = vars.nblocks;
1259 blocklist->block = (mtx_region_t *)
1260 ascmalloc(vars.nblocks * sizeof(mtx_region_t));
1261 } else {
1262 return NULL;
1263 }
1264
1265 for(start = blocknum = 0; blocknum < vars.nblocks; blocknum++) {
1266 size = vars.blocksize[start];
1267 blocklist->block[blocknum].row.low =
1268 blocklist->block[blocknum].col.low = start;
1269 blocklist->block[blocknum].row.high =
1270 blocklist->block[blocknum].col.high = start + size - 1;
1271 start += size;
1272 }
1273 if(NOTNULL(vars.lowlink))
1274 ascfree((POINTER)vars.lowlink);
1275
1276 return blocklist;
1277 }
1278
1279
1280 void mtx_partition( mtx_matrix_t mtx)
1281 /**
1282 *** Matrix must be previously output assigned. Then the assigned
1283 *** region (symbolic basis) portion of the matrix is partitioned
1284 *** leaving unassigned rows and columns on the outside.
1285 ***
1286 *** vstack -----------------------+
1287 *** done -------+ |
1288 *** V V
1289 *** done | unvisited | stack (visited)
1290 ***
1291 *** 0 <= "done" < done <= "unvisited" < vstack <= "stack" < order
1292 ***
1293 *** For all ndx on the stack, lowlink[ndx] "points to" the deepest part of
1294 *** the stack (highest index) which can be reached from row ndx by the
1295 *** associated directed graph (with edges r-->c, where (r,c) is non-zero).
1296 ***
1297 *** For blocks which are already completed, blocksize[start] = block size,
1298 *** where start is the starting row/column index of the block. In every
1299 *** case, 0 <= start < done, so lowlink and blocksize can be the same
1300 *** array.
1301 **/
1302 {
1303 struct analyze_row_vars vars;
1304 int32 blocknum, start, size;
1305 mtx_range_t rng;
1306
1307 if (mtx!=NULL && ISSLAVE(mtx)) {
1308 mtx = mtx->master;
1309 }
1310 if( !mtx_output_assigned(mtx) ) {
1311 int mc=mtx_check_matrix(mtx);
1312 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_partition\n");
1313 FPRINTF(g_mtxerr," Matrix not output assigned.\n");
1314 if(!mc) return;
1315 }
1316
1317 vars.vstack = mtx->data->symbolic_rank;
1318 vars.lowlink = vars.blocksize = vars.vstack > 0 ?
1319 ASC_NEW_ARRAY(int32,vars.vstack) : NULL;
1320 vars.done = 0;
1321 vars.nblocks = 0;
1322
1323 rng.low = 0; /* KAA_MODIFICATION */
1324 rng.high = mtx->data->symbolic_rank-1;
1325 while( vars.done < vars.vstack )
1326 analyze_row(mtx,vars.done,&vars,&rng);
1327
1328 if( NOTNULL(mtx->data->block) )
1329 ascfree(mtx->data->block);
1330 mtx->data->nblocks = vars.nblocks;
1331 mtx->data->block = vars.nblocks > 0 ?
1332 ASC_NEW_ARRAY(mtx_region_t,vars.nblocks) : NULL;
1333 for( start=blocknum=0 ; blocknum < vars.nblocks ; blocknum++ ) {
1334 size = vars.blocksize[start];
1335 mtx->data->block[blocknum].row.low =
1336 mtx->data->block[blocknum].col.low = start;
1337 mtx->data->block[blocknum].row.high =
1338 mtx->data->block[blocknum].col.high = start + size - 1;
1339 start += size;
1340 }
1341 if( NOTNULL(vars.lowlink) )
1342 ascfree( (POINTER)vars.lowlink );
1343 }
1344
1345 void mtx_ut_partition( mtx_matrix_t mtx)
1346 /*
1347 * This is the upper triangular version of the blt mtx_partition function.
1348 * Swap 'row' and 'column' or r and c in all related comments.
1349 * Matrix must be previously output assigned. Then the assigned
1350 * region (symbolic basis) portion of the matrix is partitioned
1351 * leaving unassigned rows and columns on the outside.
1352 *
1353 * vstack -----------------------+
1354 * done -------+ |
1355 * V V
1356 * done | unvisited | stack (visited)
1357 *
1358 * 0 <= "done" < done <= "unvisited" < vstack <= "stack" < order
1359 *
1360 * For all ndx on the stack, lowlink[ndx] "points to" the deepest part of
1361 * the stack (highest index) which can be reached from row ndx by the
1362 * associated directed graph (with edges r-->c, where (r,c) is non-zero).
1363 *
1364 * For blocks which are already completed, blocksize[start] = block size,
1365 * where start is the starting row/column index of the block. In every
1366 * case, 0 <= start < done, so lowlink and blocksize can be the same
1367 * array.
1368 */
1369 {
1370 struct analyze_row_vars vars;
1371 int32 blocknum, start, size;
1372 mtx_range_t rng;
1373
1374 if (mtx!=NULL && ISSLAVE(mtx)) {
1375 mtx = mtx->master;
1376 }
1377 if( !mtx_output_assigned(mtx) ) {
1378 int mc=mtx_check_matrix(mtx);
1379 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_ut_partition\n");
1380 FPRINTF(g_mtxerr," Matrix not output assigned.\n");
1381 if(!mc) return;
1382 }
1383
1384 vars.vstack = mtx->data->symbolic_rank;
1385 vars.lowlink = vars.blocksize = vars.vstack > 0 ?
1386 ASC_NEW_ARRAY(int32,vars.vstack) : NULL;
1387 vars.done = 0;
1388 vars.nblocks = 0;
1389
1390 rng.low = 0;
1391 rng.high = mtx->data->symbolic_rank-1;
1392 while( vars.done < vars.vstack ) {
1393 analyze_col(mtx,vars.done,&vars,&rng);
1394 }
1395
1396 if( NOTNULL(mtx->data->block) ) {
1397 ascfree(mtx->data->block);
1398 }
1399 mtx->data->nblocks = vars.nblocks;
1400 mtx->data->block = vars.nblocks > 0 ?
1401 ASC_NEW_ARRAY(mtx_region_t,vars.nblocks) : NULL;
1402 for( start=blocknum=0 ; blocknum < vars.nblocks ; blocknum++ ) {
1403 size = vars.blocksize[start];
1404 mtx->data->block[blocknum].row.low =
1405 mtx->data->block[blocknum].col.low = start;
1406 mtx->data->block[blocknum].row.high =
1407 mtx->data->block[blocknum].col.high = start + size - 1;
1408 start += size;
1409 }
1410 if( NOTNULL(vars.lowlink) ) {
1411 ascfree( (POINTER)vars.lowlink );
1412 }
1413 }
1414
1415 #ifdef DELETE_THIS_DEAD_FUNCTION
1416 /* a comparator meeting the qsort spec exactly for any pair i1,i2 */
1417 static int mtx_cmpint(int *i1, int *i2)
1418 {
1419 if (*i1 < *i2) return -1;
1420 return (*i1 > *i2);
1421 }
1422 #endif /* DELETE_THIS_DEAD_FUNCTION */
1423
1424 /* a faster comparator which assumes i1 =/= i2 ever */
1425 static int mtx_cmpintfast(int *i1, int *i2)
1426 {
1427 if (*i1 < *i2) return -1;
1428 return 1;
1429 }
1430
1431 /* this could be faster, but as it's a 1 time deal, who cares?
1432 we're at the mercy of the system qsort. */
1433 /* ah, don't you hate prototypes? now it looks like this
1434 function matters... */
1435 void mtx_org_permute(mtx_matrix_t mtx, mtx_region_t *reg)
1436 {
1437 if (ISNULL(mtx)) {
1438 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_org_permute\n");
1439 FPRINTF(g_mtxerr," NULL matrix given.\n");
1440 return;
1441 }
1442 if (ISSLAVE(mtx)) {
1443 mtx = mtx->master;
1444 }
1445 if (ISNULL(reg)) {
1446 mtx_reset_perm(mtx);
1447 } else {
1448 int32 i,j,len;
1449 int32 *sort,*top;
1450 len=reg->col.high-reg->col.low+1;
1451 if (len>1) {
1452 /* create sort space */
1453 top=sort=ASC_NEW_ARRAY(int,len);
1454 if (ISNULL(sort)) {
1455 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_org_permute\n");
1456 FPRINTF(g_mtxerr," Insufficient memory. Not permuted.\n");
1457 return;
1458 }
1459 /* copy current org ordering into array */
1460 for (i = reg->col.low; i <= reg->col.high; i++) {
1461 *top = mtx->perm.col.cur_to_org[i];
1462 top++;
1463 }
1464 /* sort org columns */
1465 qsort((void *)sort,(size_t)len,sizeof(int),
1466 (int (*)(const void *,const void *))mtx_cmpintfast);
1467 /* permute columns */
1468 top = sort;
1469 for (i= reg->col.low; i < reg->col.high; i++ ) {
1470 if (mtx->perm.col.cur_to_org[i] != *top) {
1471 j = mtx->perm.col.org_to_cur[*top];
1472 swap(&(mtx->perm.col),i,j);
1473 }
1474 top++;
1475 }
1476 ascfree(sort);
1477 }
1478 /* ditto for rows */
1479 len=reg->row.high-reg->row.low+1;
1480 if (len>1) {
1481 top=sort=ASC_NEW_ARRAY(int,len);
1482 if (ISNULL(sort)) {
1483 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_org_permute\n");
1484 FPRINTF(g_mtxerr," Insufficient memory. Not permuted.\n");
1485 return;
1486 }
1487 for (i = reg->row.low; i <= reg->row.high; i++) {
1488 *top = mtx->perm.row.cur_to_org[i];
1489 top++;
1490 }
1491 qsort((void *)sort,(size_t)len,sizeof(int),
1492 (int (*)(const void *,const void *))mtx_cmpintfast);
1493 top = sort;
1494 for (i= reg->row.low; i < reg->row.high; i++ ) {
1495 if (mtx->perm.row.cur_to_org[i] != *top) {
1496 j = mtx->perm.row.org_to_cur[*top];
1497 swap(&(mtx->perm.row),i,j);
1498 }
1499 top++;
1500 }
1501 ascfree(sort);
1502 }
1503 }
1504 }
1505
1506 /***********************************************************************\
1507 end structural analysis stuff
1508 \***********************************************************************/
1509 boolean mtx_check_blocks( mtx_matrix_t mtx)
1510 {
1511 mtx_range_t invalid;
1512 int32 blocknum;
1513
1514 if (ISSLAVE(mtx)) {
1515 mtx = mtx->master;
1516 }
1517 if( !mtx_output_assigned(mtx) ) { /* matrix will be checked */
1518 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_check_blocks\n");
1519 FPRINTF(g_mtxerr," Matrix not output assigned.\n");
1520 return( FALSE );
1521 }
1522
1523 invalid.high = mtx->data->symbolic_rank-1;
1524 /* Last block need not be checked */
1525 for( blocknum = 0 ; blocknum < mtx->data->nblocks-1 ; ++blocknum ) {
1526 mtx_coord_t nz;
1527 invalid.low = mtx->data->block[blocknum].row.high + 1;
1528 for( nz.row=mtx->data->block[blocknum].row.low ;
1529 nz.row<=mtx->data->block[blocknum].row.high ;
1530 ++nz.row ) {
1531 struct element_t Rewind;
1532 Rewind.next.col = mtx->hdr.row[mtx->perm.row.cur_to_org[nz.row]];
1533 if( mtx_next_col(&Rewind,&invalid,mtx->perm.col.org_to_cur) ) {
1534 mtx->data->nblocks = -1;
1535 if( NOTNULL(mtx->data->block) )
1536 ascfree(mtx->data->block);
1537 return(FALSE);
1538 }
1539 }
1540 }
1541 return(TRUE);
1542 }
1543
1544 int32 mtx_number_of_blocks( mtx_matrix_t mtx)
1545 {
1546 if( !mtx_output_assigned(mtx) ) { /* matrix will be checked */
1547 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_number_of_blocks\n");
1548 FPRINTF(g_mtxerr," Matrix not output assigned.\n");
1549 }
1550
1551 #if MTX_DEBUG
1552 if( !mtx_check_blocks(mtx) ) {
1553 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_number_of_blocks\n");
1554 FPRINTF(g_mtxerr," Invalid partition.\n");
1555 return mtx_NONE;
1556 }
1557 #endif
1558
1559 return (mtx->data->nblocks);
1560 }
1561
1562 int32 mtx_block( mtx_matrix_t mtx, int32 block_number, mtx_region_t *block)
1563 {
1564 mtx_region_t *reg;
1565 int error=0;
1566
1567 if( !mtx_output_assigned(mtx) ) { /* matrix will be checked */
1568 #if MTX_DEBUG
1569 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_block\n");
1570 FPRINTF(g_mtxerr," Matrix not output assigned.\n");
1571 #endif
1572 error=1;
1573 }
1574
1575 #if MTX_DEBUG
1576 if( !mtx_check_blocks(mtx) ) {
1577 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_block\n");
1578 FPRINTF(g_mtxerr," Invalid partition.\n");
1579 }
1580 #endif
1581
1582 if( (block_number > mtx->data->nblocks-1) || (block_number<0) ) {
1583 #if MTX_DEBUG
1584 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_block\n");
1585 FPRINTF(g_mtxerr," Block number doesn't exist.\n");
1586 #endif
1587 error=1;
1588 }
1589 if (error) {
1590 block->row.low=0;
1591 block->col.low=0;
1592 block->row.high=mtx->order-1;
1593 block->col.high=mtx->order-1;
1594 return mtx_NONE;
1595 }
1596
1597 reg = &(mtx->data->block[block_number]);
1598 mem_copy_cast(reg,block,sizeof(mtx_region_t));
1599 return 0;
1600 }
1601
1602 int32 mtx_block_containing_row(mtx_matrix_t mtx, int32 row, mtx_region_t *block)
1603 {
1604 int32 blow,bhigh;
1605 mtx_region_t *reg;
1606
1607 if( !mtx_output_assigned(mtx) ) { /* matrix will be checked */
1608 #if MTX_DEBUG
1609 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_block_containing_row\n");
1610 FPRINTF(g_mtxerr," Matrix not output assigned.\n");
1611 #endif
1612 return 0;
1613 }
1614
1615 #if MTX_DEBUG
1616 if( !mtx_check_blocks(mtx) ) {
1617 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_block_containing_row\n");
1618 FPRINTF(g_mtxerr," Invalid partition.\n");
1619 }
1620 #endif
1621
1622 blow = 0;
1623 bhigh = mtx->data->nblocks-1;
1624 while( blow <= bhigh ) {
1625 int32 block_number = (blow+bhigh)/2;
1626 if( row > mtx->data->block[block_number].row.high ) {
1627 blow = block_number+1;
1628 } else if( row < mtx->data->block[block_number].row.low ) {
1629 bhigh = block_number-1;
1630 } else {
1631 reg = &(mtx->data->block[block_number]);
1632 mem_copy_cast(reg,block,sizeof(mtx_region_t));
1633 return(block_number);
1634 }
1635 }
1636 return(mtx_NONE);
1637 }
1638
1639 int32 mtx_block_containing_col(mtx_matrix_t mtx, int32 col, mtx_region_t *block)
1640 {
1641 int32 blow,bhigh;
1642 mtx_region_t *reg;
1643
1644 if( !mtx_output_assigned(mtx) ) { /* matrix will be checked */
1645 #if MTX_DEBUG
1646 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_block_containing_col\n");
1647 FPRINTF(g_mtxerr," Matrix not output assigned.\n");
1648 #endif
1649 return 0;
1650 }
1651
1652 #if MTX_DEBUG
1653 if( !mtx_check_blocks(mtx) ) {
1654 FPRINTF(g_mtxerr,"ERROR: (mtx) mtx_block_containing_col\n");
1655 FPRINTF(g_mtxerr," Invalid partition.\n");
1656 }
1657 #endif
1658
1659 blow = 0;
1660 bhigh = mtx->data->nblocks-1;
1661 while( blow <= bhigh ) {
1662 int32 block_number = (blow+bhigh)/2;
1663 if( col > mtx->data->block[block_number].col.high ) {
1664 blow = block_number+1;
1665 } else if( col < mtx->data->block[block_number].col.low ) {
1666 bhigh = block_number-1;
1667 } else {
1668 reg = &(mtx->data->block[block_number]);
1669 mem_copy_cast(reg,block,sizeof(mtx_region_t));
1670 return(block_number);
1671 }
1672 }
1673 return(mtx_NONE);
1674 }
1675
1676 #undef __MTX_C_SEEN__

john.pye@anu.edu.au
ViewVC Help
Powered by ViewVC 1.1.22