/[ascend]/trunk/ascend4/solver/mtx_basic.c
ViewVC logotype

Contents of /trunk/ascend4/solver/mtx_basic.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1 - (show annotations) (download) (as text)
Fri Oct 29 20:54:12 2004 UTC (19 years, 1 month ago) by aw0a
File MIME type: text/x-csrc
File size: 106688 byte(s)
Setting up web subdirectory in repository
1 /*
2 * mtx: Ascend Sparse Matrix Package
3 * by Karl Michael Westerberg
4 * Created: 2/6/90
5 * Version: $Revision: 1.20 $
6 * Version control file: $RCSfile: mtx_basic.c,v $
7 * Date last modified: $Date: 2000/01/25 02:27:07 $
8 * Last modified by: $Author: ballan $
9 *
10 * This file is part of the SLV solver.
11 *
12 * Copyright (C) 1990 Karl Michael Westerberg
13 * Copyright (C) 1993 Joseph Zaher
14 * Copyright (C) 1994 Joseph Zaher, Benjamin Andrew Allan
15 * Copyright (C) 1995 Kirk Andre Abbott, Benjamin Andrew Allan
16 * Copyright (C) 1996 Benjamin Andrew Allan
17 *
18 * The SLV solver is free software; you can redistribute
19 * it and/or modify it under the terms of the GNU General Public License as
20 * published by the Free Software Foundation; either version 2 of the
21 * License, or (at your option) any later version.
22 *
23 * The SLV solver is distributed in hope that it will be
24 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License along with
29 * the program; if not, write to the Free Software Foundation, Inc., 675
30 * Mass Ave, Cambridge, MA 02139 USA. Check the file named COPYING.
31 * COPYING is found in ../compiler.
32 */
33
34 #include <math.h>
35 #include "utilities/ascConfig.h"
36 #include "utilities/ascMalloc.h"
37 #include "utilities/mem.h"
38 #include "solver/mtx.h"
39 /* grab our private parts */
40 #define __MTX_C_SEEN__
41 #include "solver/mtx_use_only.h"
42
43 /* this returns an error count */
44 int super_check_matrix( mtx_matrix_t mtx)
45 /**
46 *** Really check the matrix.
47 **/
48 {
49 int32 ndx,errcnt=0;
50 int32 rowc,colc;
51
52 /* Test consistency of permutation arrays */
53 for( ndx=ZERO ; ndx < mtx->capacity ; ++ndx ) {
54 if( mtx->perm.row.org_to_cur[mtx->perm.row.cur_to_org[ndx]] != ndx ) {
55 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
56 FPRINTF(g_mtxerr," Permutation violation in row %d.\n", ndx);
57 errcnt++;
58 }
59 if( mtx->perm.col.org_to_cur[mtx->perm.col.cur_to_org[ndx]] != ndx ) {
60 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
61 FPRINTF(g_mtxerr," Permutation violation in col %d.\n",ndx);
62 errcnt++;
63 }
64 }
65
66 if( mtx->order > mtx->capacity ) {
67 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
68 FPRINTF(g_mtxerr," Capacity %d is less than order %d\n",
69 mtx->capacity,mtx->order);
70 errcnt++;
71 }
72
73 /* Make sure rows and columns which don't exist are blank */
74 for( ndx = mtx->order ; ndx < mtx->capacity ; ++ndx ) {
75 int32 org_row,org_col;
76 org_row = mtx->perm.row.cur_to_org[ndx];
77 org_col = mtx->perm.col.cur_to_org[ndx];
78 if( NOTNULL(mtx->hdr.row[org_row]) ) {
79 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
80 FPRINTF(g_mtxerr," Non-zeros found in non-existent row %d.\n",ndx);
81 errcnt++;
82 }
83 if( NOTNULL(mtx->hdr.col[org_col]) ) {
84 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
85 FPRINTF(g_mtxerr," Non-zeros found in non-existent col %d.\n",ndx);
86 errcnt++;
87 }
88 }
89
90 /* Verify fishnet structure */
91 for( ndx=rowc=colc=ZERO ; ndx < mtx->capacity ; ++ndx ) {
92 struct element_t *elt;
93 int32 org_row,org_col;
94 org_row = mtx->perm.row.cur_to_org[ndx];
95 org_col = mtx->perm.col.cur_to_org[ndx];
96 for( elt = mtx->hdr.row[org_row] ; NOTNULL(elt) ; elt = elt->next.col ) {
97 if( elt->row != mtx_NONE ) {
98 ++rowc;
99 if( elt->row != org_row ) {
100 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
101 FPRINTF(g_mtxerr," Element mismatch in row %d.\n", ndx);
102 errcnt++;
103 }
104 } else {
105 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
106 FPRINTF(g_mtxerr," Disclaimed element in row %d.\n", ndx);
107 errcnt++;
108 }
109 }
110 for( elt = mtx->hdr.col[org_col] ; NOTNULL(elt) ; elt = elt->next.row ) {
111 if( elt->col != mtx_NONE ) {
112 ++colc;
113 if( elt->col != org_col ) {
114 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
115 FPRINTF(g_mtxerr," Element mismatch in col %d.\n", ndx);
116 errcnt++;
117 }
118 } else {
119 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
120 FPRINTF(g_mtxerr," Disclaimed element in col %d.\n", ndx);
121 errcnt++;
122 }
123 }
124 }
125 if( rowc != colc ) {
126 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
127 FPRINTF(g_mtxerr," Non-zero discrepancy, %d by row, %d by col.\n",
128 rowc, colc);
129 errcnt++;
130 }
131 return errcnt;
132 }
133
134 boolean check_matrix( mtx_matrix_t mtx, char *file, int line)
135 /**
136 *** Checks the integrity flag of the matrix.
137 **/
138 {
139
140 if( ISNULL(mtx) ) {
141 FPRINTF(g_mtxerr,"ERROR: (mtx) check_matrix\n");
142 FPRINTF(g_mtxerr," NULL matrix in %s line %d.\n",file,line);
143 return 0;
144 }
145
146 switch( mtx->integrity ) {
147 case OK:
148 break;
149 case DESTROYED:
150 FPRINTF(g_mtxerr,"ERROR: (mtx) check_matrix\n");
151 FPRINTF(g_mtxerr,
152 " Matrix deceased found in %s line %d.\n",file,line);
153 return 0;
154 default:
155 FPRINTF(g_mtxerr,"ERROR: (mtx) check_matrix\n");
156 FPRINTF(g_mtxerr," Matrix garbage found in %s line %d.\n",
157 file,line);
158 return 0;
159 }
160
161 #if MTX_DEBUG
162 super_check_matrix(mtx);
163 #endif
164
165 if (ISSLAVE(mtx)) {
166 return (check_matrix(mtx->master,file,line));
167 }
168 return 1;
169 }
170
171 /* this function checks the consistency of a sparse as best it can.
172 returns FALSE if something wierd. */
173 boolean check_sparse(const mtx_sparse_t * const sp, char *file, int line)
174 {
175 if ( ISNULL(sp) ||
176 sp->len > sp->cap ||
177 ( sp->cap > 0 && ( ISNULL(sp->idata) || ISNULL(sp->data) ) )
178 ) {
179 FPRINTF(g_mtxerr,"ERROR: (mtx) check_sparse - Inconsistent or\n");
180 FPRINTF(g_mtxerr," NULL sparse in %s line %d.\n",file,line);
181 return 0;
182 }
183 return 1;
184 }
185
186 #define free_unless_null(ptr) if( NOTNULL(ptr) ) ascfree(ptr)
187 /*
188 static void free_unless_null(POINTER ptr)
189 {
190 if( NOTNULL(ptr) )
191 ascfree(ptr);
192 }
193 */
194
195 void mtx_zero_int32(int32 *data, int len)
196 {
197 int i;
198 if (data==NULL) return;
199 for (i=0; i<len; i++) data[i]=0;
200 }
201
202 void mtx_zero_real64(real64 *data, int len)
203 {
204 int i;
205 if (data==NULL) return;
206 for (i=0; i<len; i++) data[i]=0.0;
207 }
208
209 void mtx_zero_ptr(void **data, int len)
210 {
211 int i;
212 if (data==NULL) return;
213 for (i=0; i<len; i++) data[i]=NULL;
214 }
215
216 static mtx_matrix_t alloc_header(void)
217 /**
218 *** Allocates a matrix header and returns a pointer to it.
219 **/
220 {
221 mtx_matrix_t mtx;
222 mtx = (mtx_matrix_t)ascmalloc( sizeof(struct mtx_header) );
223 mtx->integrity = OK;
224 return(mtx);
225 }
226
227 static void free_header(mtx_matrix_t mtx)
228 /**
229 *** Frees a matrix header.
230 **/
231 {
232 mtx->integrity = DESTROYED;
233 ascfree(mtx);
234 }
235
236 #ifdef DELETE_THIS_UNUSED_FUNCTION
237 /* not IN use? */
238 static struct element_t **alloc_nz_header(int32 cap)
239 /**
240 *** Allocates a row or column header randomly initialized.
241 **/
242 {
243 return(cap > 0 ? (struct element_t **)
244 ascmalloc(cap*sizeof(struct element_t *)) : NULL);
245 }
246 #endif /* DELETE_THIS_UNUSED_FUNCTION */
247
248
249 static struct element_t **calloc_nz_header(int32 cap)
250 /**
251 *** Allocates a zeroed row or column header.
252 **/
253 {
254 return(cap > 0 ? (struct element_t **)
255 asccalloc(cap,sizeof(struct element_t *)) : NULL);
256 }
257
258 static void copy_nz_header(struct element_t **tarhdr,
259 struct element_t **srchdr, int32 cap)
260 /**
261 *** Copies srchdr to tarhdr given the capacity of srchdr.
262 **/
263 {
264 mem_copy_cast(srchdr,tarhdr,cap*sizeof(struct element_t *));
265 }
266
267 #define free_nz_header(hdr) free_unless_null( (POINTER)(hdr) )
268 /**
269 *** Frees a row or column header
270 **/
271
272
273 /********************************************************************/
274 /*
275 Any method of deallocating elements that doesn't use
276 delete_from_row/col or nuke_fishnet is DOOMED to die.
277 On ANY entry to delete_from_row or delete_from_col,
278 last_value_matrix should be already set to the mtx being deleted from.
279 What this really means is that before any of the following list of functions
280 is called, the last_value_matrix should be set:
281
282 disclaim element
283 delete_from_row/col
284 blast_row/col
285 clean_row/col
286
287 Note that the above functions call each other, with blast_/clean_ being
288 the top housekeeping/garbage collection pair. Users of delete, blast or
289 clean should set last_value_matrix at the scope where the matrix is known.
290
291 Excuses for this kluge:
292 1) mtx_value/set_value need it.
293 2) the mem_store scheme REQUIRES it.
294 3) it is faster than passing the mtx
295 up and down through the functions listed above, and we do a LOT of
296 these operations.
297 */
298 static mtx_matrix_t last_value_matrix = NULL;
299
300 static void disclaim_element(struct element_t *element)
301 /**
302 *** Indicates that one of the row or column (it doesn't matter) in which
303 *** this element appears will no longer point to this element (it has
304 *** already been removed from the list). Elements disclaimed once have
305 *** their row,col indices set to mtx_NONE. Elements disclaimed twice are
306 *** discarded.
307 *** NOTE WELL: last_value_mtx must be set before this function is called.
308 **/
309 {
310 if( element->row == mtx_NONE ) {
311 mem_free_element(last_value_matrix->ms,(void *)element);
312 } else {
313 element->row = element->col = mtx_NONE;
314 }
315 }
316
317 static void delete_from_row(struct element_t **link)
318 /**
319 *** Deletes the element from the row it is in, given a pointer to
320 *** the row link which leads to this element.
321 *** On return the pointer pointed to by link points to the new next element.
322 *** NOTE WELL: last_value_mtx must be set before this function is called.
323 **/
324 {
325 struct element_t *p;
326 p = *link;
327 *link = p->next.col;
328 disclaim_element(p);
329 /* conservatively cause mtx_set_value to forget */
330 last_value_matrix->last_value = NULL;
331 }
332
333 static void delete_from_col(struct element_t **link)
334 /**
335 *** Deletes the element from the col it is in, given a pointer to
336 *** the col link which leads to this element.
337 *** NOTE WELL: last_value_mtx must be set before this function is called.
338 **/
339 {
340 struct element_t *p;
341 p = *link;
342 *link = p->next.row;
343 disclaim_element(p);
344 /* conservatively cause mtx_set_value to forget */
345 last_value_matrix->last_value = NULL;
346 }
347
348 static void nuke_fishnet(mtx_matrix_t mtx)
349 /**
350 *** Deletes every element even remotely associated with a matrix
351 *** and nulls the headers. Also nulls the headers of slaves
352 *** and cleans out their elements.
353 **/
354 {
355 int32 i;
356
357 if ( ISNULL(mtx) || ISNULL(mtx->hdr.row) ||
358 ISNULL(mtx->hdr.col) || ISSLAVE(mtx)) {
359 FPRINTF(g_mtxerr,"WARNING: (mtx) nuke_fishnet called\n");
360 FPRINTF(g_mtxerr," with bad matrix.\n");
361 return;
362 }
363 if (mtx->capacity<1) return;
364 mtx->last_value = NULL;
365 mem_clear_store(mtx->ms);
366 zero(mtx->hdr.row,mtx->capacity,struct element_t *);
367 zero(mtx->hdr.col,mtx->capacity,struct element_t *);
368 for (i = 0; i < mtx->nslaves; i++) {
369 zero(mtx->slaves[i]->hdr.row,mtx->capacity,struct element_t *);
370 zero(mtx->slaves[i]->hdr.col,mtx->capacity,struct element_t *);
371 }
372 }
373
374 static void blast_row( mtx_matrix_t mtx, int32 org, mtx_range_t *rng)
375 /**
376 *** Destroys all elements in the given row with (current)
377 *** col index in the given col range: if rng == mtx_ALL_COLS,
378 *** entire row is destroyed.
379 *** NOTE WELL: last_value_mtx must be set before this function is called.
380 **/
381 {
382 struct element_t **link;
383
384 link = &(mtx->hdr.row[org]);
385 if( rng == mtx_ALL_COLS ) {
386 while( NOTNULL(*link) )
387 delete_from_row(link);
388 } else {
389 int32 *tocur = mtx->perm.col.org_to_cur;
390 int32 col;
391 while( NOTNULL(*link) ) {
392 col = (*link)->col;
393 if( col == mtx_NONE || in_range(rng,tocur[col]) )
394 delete_from_row(link);
395 else
396 link = &((*link)->next.col);
397 }
398 }
399 }
400
401 static void blast_col( mtx_matrix_t mtx, int32 org, mtx_range_t *rng)
402 /**
403 *** Destroys all elements in the given col with (current)
404 *** row index in the given row range: if rng == mtx_ALL_ROWS,
405 *** entire col is destroyed.
406 *** NOTE WELL: last_value_mtx must be set before this function is called.
407 **/
408 {
409 struct element_t **link;
410
411 link = &(mtx->hdr.col[org]);
412 if( rng == mtx_ALL_ROWS ) {
413 while( NOTNULL(*link) )
414 delete_from_col(link);
415 } else {
416 int32 *tocur = mtx->perm.row.org_to_cur, row;
417
418 while( NOTNULL(*link) ) {
419 row = (*link)->row;
420 if( row == mtx_NONE || in_range(rng,tocur[row]) )
421 delete_from_col(link);
422 else
423 link = &((*link)->next.row);
424 }
425 }
426 }
427
428 static void clean_row(mtx_matrix_t mtx, int32 org)
429 /**
430 *** Disclaims all elements in the given row which have already been
431 *** disclaimed once.
432 *** NOTE WELL: last_value_mtx must be set before this function is called.
433 **/
434 {
435 struct element_t **link;
436
437 link = &(mtx->hdr.row[org]);
438 while( NOTNULL(*link) )
439 if( (*link)->row == mtx_NONE )
440 delete_from_row(link);
441 else
442 link = &((*link)->next.col);
443 }
444
445 static void clean_col(mtx_matrix_t mtx, int32 org)
446 /**
447 *** Disclaims all elements in the given col which have already been
448 *** disclaimed once.
449 *** NOTE WELL: last_value_mtx must be set before this function is called.
450 **/
451 {
452 struct element_t **link;
453
454 link = &(mtx->hdr.col[org]);
455 while( NOTNULL(*link) )
456 if( (*link)->col == mtx_NONE )
457 delete_from_col(link);
458 else
459 link = &((*link)->next.row);
460 }
461 /********************************************************************/
462
463 mtx_coord_t *mtx_coord(mtx_coord_t *coordp, int32 row, int32 col)
464 {
465 coordp->row = row;
466 coordp->col = col;
467 return(coordp);
468 }
469
470 mtx_range_t *mtx_range(mtx_range_t *rangep, int32 low, int32 high)
471 {
472 rangep->low = low;
473 rangep->high = high;
474 return(rangep);
475 }
476
477 mtx_region_t *mtx_region( mtx_region_t *regionp, int32 rowlow, int32 rowhigh,
478 int32 collow, int32 colhigh)
479 {
480 mtx_range(&(regionp->row),rowlow,rowhigh);
481 mtx_range(&(regionp->col),collow,colhigh);
482 return(regionp);
483 }
484
485 static int g_mtx_debug_redirect = 0;
486
487 void mtx_debug_redirect_freeze() {
488 g_mtx_debug_redirect = 1;
489 }
490
491 static void mtx_redirectErrors(FILE *f) {
492 if (!g_mtx_debug_redirect) {
493 assert(f != NULL);
494 g_mtxerr = f;
495 }
496 }
497
498 mtx_matrix_t mtx_create(void)
499 {
500 mtx_matrix_t mtx;
501
502 mtx_redirectErrors(stderr); /* call mtx_debug_redirect_freeze() to bypass */
503
504 mtx = alloc_header();
505 mtx->order = ZERO;
506 mtx->capacity = ZERO;
507 mtx->hdr.row = mtx->hdr.col = NULL;
508 mtx->perm.row.org_to_cur = mtx_alloc_perm(ZERO);
509 mtx->perm.row.cur_to_org = mtx_alloc_perm(ZERO);
510 mtx->perm.row.parity = EVEN;
511 mtx->perm.col.org_to_cur = mtx_alloc_perm(ZERO);
512 mtx->perm.col.cur_to_org = mtx_alloc_perm(ZERO);
513 mtx->perm.col.parity = EVEN;
514 mtx->perm.transpose = mtx_NONE;
515 mtx->data =
516 (struct structural_data_t *)ascmalloc(sizeof(struct structural_data_t));
517 mtx->data->symbolic_rank = -1;
518 mtx->data->nblocks = -1;
519 mtx->data->block = NULL;
520 mtx->master = NULL;
521 mtx->nslaves = ZERO;
522 mtx->slaves = NULL;
523 mtx->last_value = NULL;
524 mtx->ms = mem_create_store(LENMAGIC, WIDTHMAGIC/sizeof(struct element_t) - 1,
525 sizeof(struct element_t),10,2048);
526 return(mtx);
527 }
528
529 /**
530 ** Slave headers share the master's:
531 ** perm.anything, data->anything, ms->anything, order and capacity.
532 ** Unique to the slave are:
533 ** hdr.anything
534 ** last_value.
535 ** All slaves of a master appear as pointers in the masters slaves list
536 ** which is nslaves long. slaves do not have slaves.
537 **/
538 mtx_matrix_t mtx_create_slave(mtx_matrix_t master)
539 {
540 mtx_matrix_t mtx, *mlist;
541 int32 newcnt;
542 mtx_redirectErrors(stderr); /* call mtx_debug_redirect_freeze() to bypass */
543 if(!mtx_check_matrix(master) || ISSLAVE(master)) return NULL;
544
545 mtx = alloc_header();
546 if (ISNULL(mtx)) return mtx;
547 mtx->order = master->order;
548 mtx->capacity = master->capacity;
549 mtx->nslaves = ZERO;
550 mtx->hdr.row = calloc_nz_header(master->capacity);
551 if (ISNULL(mtx->hdr.row)) {
552 ascfree(mtx);
553 FPRINTF(g_mtxerr,"mtx_create_slave: Insufficient memory.\n");
554 return NULL;
555 }
556 mtx->hdr.col = calloc_nz_header(master->capacity);
557 if (ISNULL(mtx->hdr.col)) {
558 ascfree(mtx->hdr.row);
559 ascfree(mtx);
560 FPRINTF(g_mtxerr,"mtx_create_slave: Insufficient memory.\n");
561 return NULL;
562 }
563 mtx->perm.row.org_to_cur = master->perm.row.org_to_cur;
564 mtx->perm.row.cur_to_org = master->perm.row.cur_to_org;
565 mtx->perm.row.parity = EVEN; /* dont look at this again*/
566 mtx->perm.col.org_to_cur = master->perm.col.org_to_cur;
567 mtx->perm.col.cur_to_org = master->perm.col.cur_to_org;
568 mtx->perm.col.parity = EVEN; /* dont look at this again*/
569 mtx->perm.transpose = mtx_NONE; /* dont look at this again*/
570 mtx->slaves = NULL;
571 mtx->data = master->data;
572 mtx->master = master;
573 mtx->last_value = NULL;
574 mtx->ms = master->ms;
575 newcnt = master->nslaves + 1;
576 if (newcnt == 1) {
577 mlist = (mtx_matrix_t *)ascmalloc(sizeof(mtx_matrix_t));
578 } else {
579 mlist =
580 (mtx_matrix_t *)ascrealloc(master->slaves,newcnt*sizeof(mtx_matrix_t));
581 }
582 if (ISNULL(mlist)) {
583 ascfree(mtx->hdr.row);
584 ascfree(mtx->hdr.col);
585 ascfree(mtx);
586 FPRINTF(g_mtxerr,"mtx_create_slave: Insufficient memory.\n");
587 return NULL;
588 } else {
589 master->slaves = mlist;
590 master->slaves[master->nslaves] = mtx;
591 master->nslaves = newcnt;
592 }
593 return(mtx);
594 }
595
596 static void destroy_slave(mtx_matrix_t master, mtx_matrix_t mtx)
597 {
598 int32 sid,i;
599
600 if(!mtx_check_matrix(mtx) || !mtx_check_matrix(master)) {
601 FPRINTF(g_mtxerr,
602 "destroy_slave(mtx.c) called with corrupt slave or master\n");
603 return;
604 }
605 if (ISSLAVE(master) || !ISSLAVE(mtx)) {
606 FPRINTF(g_mtxerr,
607 "destroy_slave(mtx.c) called with mismatched slave/master pair\n");
608 return;
609 }
610 /* get slave index */
611 sid = -1;
612 for (i=0; i<master->nslaves; i++) {
613 if (master->slaves[i] == mtx) {
614 sid = i;
615 }
616 }
617 if (sid < 0) {
618 FPRINTF(g_mtxerr,
619 "destroy_slave(mtx.c) called with mismatched slave/master pair\n");
620 return;
621 }
622 /* move the rest of the slaves up the list */
623 for (i = sid; i < master->nslaves -1;) {
624 master->slaves[i] = master->slaves[++i];
625 }
626 (master->nslaves)--;
627 /* we will not realloc smaller. */
628 if (master->nslaves == 0 && NOTNULL(master->slaves)) {
629 ascfree(master->slaves);
630 master->slaves = NULL;
631 }
632 mtx_clear_region(mtx,mtx_ENTIRE_MATRIX);
633 free_nz_header(mtx->hdr.row);
634 free_nz_header(mtx->hdr.col);
635 free_header(mtx);
636 }
637
638 void mtx_destroy(mtx_matrix_t mtx)
639 {
640 int32 i;
641 if(!mtx_check_matrix(mtx)) return;
642 if (ISSLAVE(mtx)) {
643 destroy_slave(mtx->master,mtx);
644 return;
645 }
646
647 for (i=0; i<mtx->nslaves; i++) {
648 if (mtx_check_matrix(mtx->slaves[i])) {
649 free_nz_header(mtx->slaves[i]->hdr.row);
650 free_nz_header(mtx->slaves[i]->hdr.col);
651 free_header(mtx->slaves[i]);
652 mtx->slaves[i] = NULL;
653 } else {
654 FPRINTF(g_mtxerr,
655 "mtx_destroy: Corrupt slave found while destroying master.\n");
656 FPRINTF(g_mtxerr,
657 " Slave %d being abandoned.\n",i);
658 }
659 }
660 mtx->nslaves = 0;
661
662 mtx_clear_region(mtx,mtx_ENTIRE_MATRIX);
663 /* mtx_check_matrix is called again if MTX_DEBUG*/
664 /**
665 *** The fishnet structure is
666 *** destroyed, just leaving
667 *** the headers and maybe ms.
668 **/
669 mem_destroy_store(mtx->ms);
670
671 free_nz_header(mtx->hdr.row);
672 free_nz_header(mtx->hdr.col);
673
674 mtx_free_perm(mtx->perm.row.org_to_cur);
675 mtx_free_perm(mtx->perm.row.cur_to_org);
676 mtx_free_perm(mtx->perm.col.org_to_cur);
677 mtx_free_perm(mtx->perm.col.cur_to_org);
678
679 if( NOTNULL(mtx->data->block) ) {
680 ascfree(mtx->data->block);
681 }
682 ascfree(mtx->data);
683
684 free_header(mtx);
685 }
686
687 mtx_sparse_t *mtx_create_sparse(int32 cap) {
688 mtx_sparse_t *ret;
689 ret = (mtx_sparse_t *)ascmalloc(sizeof(mtx_sparse_t));
690 if (ISNULL(ret)) {
691 FPRINTF(g_mtxerr,"ERROR: (mtx_create_sparse) Insufficient memory.\n");
692 return ret;
693 }
694 ret->data = (real64 *)ascmalloc(cap*sizeof(real64));
695 if (ISNULL(ret->data)) {
696 FPRINTF(g_mtxerr,"ERROR: (mtx_create_sparse) Insufficient memory.\n");
697 ascfree(ret);
698 return NULL;
699 }
700 ret->idata = (int32 *)ascmalloc(cap*sizeof(int32));
701 if (ISNULL(ret->idata)) {
702 FPRINTF(g_mtxerr,"ERROR: (mtx_create_sparse) Insufficient memory.\n");
703 ascfree(ret->data);
704 ascfree(ret);
705 return NULL;
706 }
707 ret->cap = cap;
708 ret->len = 0;
709 return ret;
710 }
711
712 void mtx_destroy_sparse(mtx_sparse_t *ret)
713 {
714 if (ISNULL(ret)) return;
715 if (NOTNULL(ret->idata)) ascfree(ret->idata);
716 if (NOTNULL(ret->data)) ascfree(ret->data);
717 ascfree(ret);
718 }
719
720 void mtx_destroy_blocklist(mtx_block_t *bl)
721 {
722 if (ISNULL(bl)) return;
723 if (NOTNULL(bl->block) && bl->nblocks>0) ascfree(bl->block);
724 ascfree(bl);
725 }
726
727 mtx_matrix_t mtx_duplicate_region(mtx_matrix_t mtx, mtx_region_t *reg,
728 real64 drop)
729 {
730 mtx_matrix_t new;
731 int32 org_row;
732 struct element_t *elt;
733
734 if (!mtx_check_matrix(mtx) ||
735 (mtx->master != NULL && !mtx_check_matrix(mtx->master))) {
736 return NULL;
737 }
738 drop = fabs(drop);
739 if (ISSLAVE(mtx)) {
740 new = mtx_create_slave(mtx->master);
741 } else {
742 new = mtx_create_slave(mtx);
743 }
744 if (new == NULL) {
745 FPRINTF(g_mtxerr,"ERROR: (mtx_duplicate_region) Insufficient memory.\n");
746 return NULL;
747 }
748 if (reg==mtx_ENTIRE_MATRIX){
749 if (drop >0.0) { /* copy with drop tolerance */
750 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
751 for( elt = mtx->hdr.row[org_row] ;
752 NOTNULL(elt) ; elt = elt->next.col ) {
753 if( elt->row != mtx_NONE && fabs(elt->value) > drop) {
754 mtx_create_element_value(new,elt->row,elt->col,elt->value);
755 }
756 }
757 }
758 } else {
759 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
760 for( elt = mtx->hdr.row[org_row] ;
761 NOTNULL(elt) ; elt = elt->next.col ) {
762 if( elt->row != mtx_NONE ) {
763 mtx_create_element_value(new,elt->row,elt->col,elt->value);
764 }
765 }
766 }
767 }
768 } else {
769 int32 *rowcur, *colcur, row,col;
770 mtx_range_t *colrng,*rowrng;
771 rowcur=mtx->perm.row.org_to_cur;
772 colcur=mtx->perm.col.org_to_cur;
773 rowrng=&(reg->row);
774 colrng=&(reg->col);
775 if (drop >0.0) { /* copy with drop tolerance */
776 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
777 row=rowcur[org_row];
778 if (in_range(rowrng,row)) {
779 for( elt = mtx->hdr.row[org_row] ;
780 NOTNULL(elt) ; elt = elt->next.col ) {
781 col=colcur[elt->col];
782 if( in_range(colrng,col) && elt->row != mtx_NONE &&
783 fabs(elt->value) > drop ) {
784 /* don't copy^bad^elements */
785 mtx_create_element_value(new,elt->row,elt->col,elt->value);
786 }
787 }
788 }
789 }
790 } else {
791 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
792 row=rowcur[org_row];
793 if (in_range(rowrng,row)) {
794 for( elt = mtx->hdr.row[org_row] ;
795 NOTNULL(elt) ; elt = elt->next.col ) {
796 col=colcur[elt->col];
797 if( in_range(colrng,col) && elt->row != mtx_NONE ) {
798 /* don't copy^bad^elements */
799 mtx_create_element_value(new,elt->row,elt->col,elt->value);
800 }
801 }
802 }
803 }
804 }
805 }
806
807 return new;
808 }
809
810 /* WARNING: this function assumes sufficient memory is available.
811 * it needs to be check for null returns in allocs and doesn't.
812 */
813 mtx_matrix_t mtx_copy_options(mtx_matrix_t mtx, boolean blocks,
814 boolean incidence, mtx_region_t *reg,
815 real64 drop)
816 {
817 mtx_matrix_t copy;
818 int32 org_row;
819 struct element_t *elt;
820 int32 bnum;
821
822 if (!mtx_check_matrix(mtx)) return NULL;
823 drop = fabs(drop);
824
825 /* create same size matrix */
826 copy = alloc_header();
827 copy->order = mtx->order;
828 copy->capacity = mtx->capacity;
829 copy->master = NULL;
830 copy->nslaves = ZERO;
831 copy->slaves = NULL;
832 copy->last_value = NULL;
833
834 /* copy headers and fishnet */
835 copy->hdr.row = calloc_nz_header(copy->capacity);
836 copy->hdr.col = calloc_nz_header(copy->capacity);
837 if (incidence) {
838 struct mem_statistics stat;
839 int s_expool;
840 mem_get_stats(&stat,mtx->ms);
841 s_expool = MAX(2048,stat.elt_total/stat.str_wid);
842 copy->ms = mem_create_store(LENMAGIC,
843 WIDTHMAGIC/sizeof(struct element_t) - 1,
844 sizeof(struct element_t),
845 10,s_expool-LENMAGIC);
846 /* copy of a slave or master matrix will end up with an
847 * initial mem_store that is the size cumulative of the
848 * master and all its slaves since they share the ms.
849 */
850 if (reg==mtx_ENTIRE_MATRIX){
851 if (drop >0.0) { /* copy with drop tolerance */
852 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
853 for( elt = mtx->hdr.row[org_row] ;
854 NOTNULL(elt) ; elt = elt->next.col ) {
855 if( elt->row != mtx_NONE && fabs(elt->value) > drop) {
856 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
857 }
858 }
859 }
860 } else {
861 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
862 for( elt = mtx->hdr.row[org_row] ;
863 NOTNULL(elt) ; elt = elt->next.col ) {
864 if( elt->row != mtx_NONE ) {
865 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
866 }
867 }
868 }
869 }
870 } else {
871 int32 *rowcur, *colcur, row,col;
872 mtx_range_t *colrng,*rowrng;
873 rowcur=mtx->perm.row.org_to_cur;
874 colcur=mtx->perm.col.org_to_cur;
875 rowrng=&(reg->row);
876 colrng=&(reg->col);
877 if (drop >0.0) { /* copy with drop tolerance */
878 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
879 row=rowcur[org_row];
880 if (in_range(rowrng,row)) {
881 for( elt = mtx->hdr.row[org_row] ;
882 NOTNULL(elt) ; elt = elt->next.col ) {
883 col=colcur[elt->col];
884 if( in_range(colrng,col) && elt->row != mtx_NONE &&
885 fabs(elt->value) > drop ) {
886 /* don't copy^bad^elements */
887 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
888 }
889 }
890 }
891 }
892 } else {
893 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
894 row=rowcur[org_row];
895 if (in_range(rowrng,row)) {
896 for( elt = mtx->hdr.row[org_row] ;
897 NOTNULL(elt) ; elt = elt->next.col ) {
898 col=colcur[elt->col];
899 if( in_range(colrng,col) && elt->row != mtx_NONE ) {
900 /* don't copy^bad^elements */
901 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
902 }
903 }
904 }
905 }
906 }
907 }
908 } else {
909 copy->ms = mem_create_store(LENMAGIC,
910 WIDTHMAGIC/sizeof(struct element_t) - 1,
911 sizeof(struct element_t),10,2048);
912 /* copy of a slave or master matrix will end up with an
913 initial mem_store that is the default size */
914 }
915
916 /* copy permutation information */
917 copy->perm.row.org_to_cur = mtx_alloc_perm(copy->capacity);
918 mtx_copy_perm(copy->perm.row.org_to_cur,
919 mtx->perm.row.org_to_cur,copy->capacity);
920 copy->perm.row.cur_to_org = mtx_alloc_perm(copy->capacity);
921 mtx_copy_perm(copy->perm.row.cur_to_org,
922 mtx->perm.row.cur_to_org,copy->capacity);
923 copy->perm.row.parity = mtx_row_parity(mtx);
924
925 copy->perm.col.org_to_cur = mtx_alloc_perm(copy->capacity);
926 mtx_copy_perm(copy->perm.col.org_to_cur,
927 mtx->perm.col.org_to_cur,copy->capacity);
928 copy->perm.col.cur_to_org = mtx_alloc_perm(copy->capacity);
929 mtx_copy_perm(copy->perm.col.cur_to_org,
930 mtx->perm.col.cur_to_org,copy->capacity);
931 copy->perm.col.parity = mtx_col_parity(mtx);
932
933 /* copy (or not) block data */
934 copy->data =
935 (struct structural_data_t *)ascmalloc(sizeof(struct structural_data_t));
936 if (blocks) {
937 copy->data->symbolic_rank = mtx->data->symbolic_rank;
938 copy->data->nblocks = mtx->data->nblocks;
939 copy->data->block = mtx->data->nblocks > 0 ? (mtx_region_t *)
940 ascmalloc( mtx->data->nblocks*sizeof(mtx_region_t) ) : NULL;
941 for( bnum=0; bnum < mtx->data->nblocks; bnum++ ) {
942 copy->data->block[bnum].row.low = mtx->data->block[bnum].row.low;
943 copy->data->block[bnum].row.high = mtx->data->block[bnum].row.high;
944 copy->data->block[bnum].col.low = mtx->data->block[bnum].col.low;
945 copy->data->block[bnum].col.high = mtx->data->block[bnum].col.high;
946 }
947 } else {
948 copy->data->symbolic_rank=0;
949 copy->data->nblocks=0;
950 copy->data->block =NULL;
951 }
952
953 return(copy);
954 }
955
956 int32 mtx_order( mtx_matrix_t mtx)
957 {
958 if (!mtx_check_matrix(mtx)) return -1;
959 return(mtx->order);
960 }
961
962 int32 mtx_capacity( mtx_matrix_t mtx)
963 {
964 if (!mtx_check_matrix(mtx)) return -1;
965 return(mtx->capacity);
966 }
967
968 /* this is only to be called from in mtx_set_order */
969 static void trim_incidence(mtx_matrix_t mtx, int32 order)
970 {
971 int32 ndx;
972 int32 *toorg;
973
974 last_value_matrix = mtx;
975 toorg = mtx->perm.col.cur_to_org;
976 for( ndx = order ; ndx < mtx->order ; ++ndx )
977 blast_col(mtx,toorg[ndx],mtx_ALL_ROWS);
978 for( ndx = ZERO ; ndx < order ; ++ndx )
979 clean_row(mtx,toorg[ndx]);
980
981 toorg = mtx->perm.row.cur_to_org;
982 for( ndx = order ; ndx < mtx->order ; ++ndx )
983 blast_row(mtx,toorg[ndx],mtx_ALL_COLS);
984 for( ndx = ZERO ; ndx < order ; ++ndx )
985 clean_col(mtx,toorg[ndx]);
986 }
987
988 /* this is only to be called from in mtx_set_order */
989 static void enlarge_nzheaders(mtx_matrix_t mtx, int32 order)
990 {
991 struct element_t **newhdr;
992
993 /* realloc does not initialize, so calloc is the best we can do here */
994
995 newhdr = calloc_nz_header(order);
996 copy_nz_header(newhdr,mtx->hdr.row,mtx->capacity);
997 free_nz_header(mtx->hdr.row);
998 mtx->hdr.row = newhdr;
999
1000 newhdr = calloc_nz_header(order);
1001 copy_nz_header(newhdr,mtx->hdr.col,mtx->capacity);
1002 free_nz_header(mtx->hdr.col);
1003 mtx->hdr.col = newhdr;
1004 }
1005
1006 void mtx_set_order( mtx_matrix_t mtx, int32 order)
1007 /**
1008 *** This function will preserve the fact that all of the arrays are
1009 *** "correct" out to the capacity of the arrays, not just out to the order
1010 *** of the matrix. In other words, extensions to orders which still do
1011 *** not exceed the capacity of the arrays are trivial.
1012 **/
1013 {
1014 int32 i;
1015 if(!mtx_check_matrix(mtx)) return;
1016 if (ISSLAVE(mtx)) {
1017 mtx_set_order(mtx->master,order);
1018 return;
1019 }
1020 /* we now have a master matrix */
1021 if( order < mtx->order ) { /* Truncate */
1022 trim_incidence(mtx,order); /* clean master */
1023 for (i = 0; i < mtx->nslaves; i++) {
1024 trim_incidence(mtx->slaves[i],order);
1025 }
1026 }
1027 if (mtx->perm.transpose == mtx_NONE) {
1028 mtx->perm.transpose = 0;
1029 }
1030
1031 if( order > mtx->capacity ) {
1032 int32 *newperm;
1033 int32 ndx;
1034
1035 enlarge_nzheaders(mtx,order);
1036 for (i = 0; i < mtx->nslaves; i++) {
1037 enlarge_nzheaders(mtx->slaves[i],order);
1038 }
1039
1040 /* realloc not in order here. Happens only on the master. */
1041 newperm = mtx_alloc_perm(order);
1042 mtx_copy_perm(newperm,mtx->perm.row.org_to_cur,mtx->capacity);
1043 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1044 newperm[ndx] = ndx;
1045 mtx_free_perm(mtx->perm.row.org_to_cur);
1046 mtx->perm.row.org_to_cur = newperm;
1047
1048 newperm = mtx_alloc_perm(order);
1049 mtx_copy_perm(newperm,mtx->perm.row.cur_to_org,mtx->capacity);
1050 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1051 newperm[ndx] = ndx;
1052 mtx_free_perm(mtx->perm.row.cur_to_org);
1053 mtx->perm.row.cur_to_org = newperm;
1054
1055 newperm = mtx_alloc_perm(order);
1056 mtx_copy_perm(newperm,mtx->perm.col.org_to_cur,mtx->capacity);
1057 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1058 newperm[ndx] = ndx;
1059 mtx_free_perm(mtx->perm.col.org_to_cur);
1060 mtx->perm.col.org_to_cur = newperm;
1061
1062 newperm = mtx_alloc_perm(order);
1063 mtx_copy_perm(newperm,mtx->perm.col.cur_to_org,mtx->capacity);
1064 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1065 newperm[ndx] = ndx;
1066 mtx_free_perm(mtx->perm.col.cur_to_org);
1067 mtx->perm.col.cur_to_org = newperm;
1068
1069 mtx->capacity = order;
1070 for (i = 0; i < mtx->nslaves; i++) {
1071 mtx->slaves[i]->capacity = order;
1072 /* point slaves at master perm again in case anything has changed */
1073 mtx->slaves[i]->perm.row.org_to_cur = mtx->perm.row.org_to_cur;
1074 mtx->slaves[i]->perm.row.cur_to_org = mtx->perm.row.cur_to_org;
1075 mtx->slaves[i]->perm.col.org_to_cur = mtx->perm.col.org_to_cur;
1076 mtx->slaves[i]->perm.col.cur_to_org = mtx->perm.col.cur_to_org;
1077 }
1078 }
1079 mtx->order = order;
1080 for (i = 0; i < mtx->nslaves; i++) {
1081 mtx->slaves[i]->order = order;
1082 }
1083 }
1084
1085 void mtx_clear_coord(mtx_matrix_t mtx, int32 row, int32 col)
1086 {
1087 struct element_t **rlink, **clink;
1088 int32 org_row, org_col;
1089
1090 #if MTX_DEBUG
1091 if( !mtx_check_matrix(mtx) ) return; /*ben*/
1092 #endif
1093
1094 last_value_matrix = mtx;
1095 org_row = mtx->perm.row.cur_to_org[row];
1096 org_col = mtx->perm.col.cur_to_org[col];
1097 rlink = &(mtx->hdr.row[org_row]);
1098 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1099 rlink = &((*rlink)->next.col);
1100 if( ISNULL(*rlink) ) return;
1101 clink = &(mtx->hdr.col[org_col]);
1102 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1103 clink = &((*clink)->next.row);
1104 delete_from_row(rlink);
1105 delete_from_col(clink);
1106 }
1107
1108 void mtx_clear_row( mtx_matrix_t mtx, int32 row, mtx_range_t *rng)
1109 {
1110 struct element_t **rlink, **clink;
1111 int32 org_row;
1112
1113 #if MTX_DEBUG
1114 if( !mtx_check_matrix(mtx) ) return; /*ben*/
1115 #endif
1116
1117 last_value_matrix = mtx;
1118 org_row = mtx->perm.row.cur_to_org[row];
1119 rlink = &(mtx->hdr.row[org_row]);
1120 if( rng == mtx_ALL_COLS ) {
1121 while( NOTNULL(*rlink) ) {
1122 clink = &(mtx->hdr.col[(*rlink)->col]);
1123 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1124 clink = &((*clink)->next.row);
1125 delete_from_row(rlink);
1126 delete_from_col(clink);
1127 }
1128 } else if( rng->high >= rng->low ) {
1129 while( NOTNULL(*rlink) ) {
1130 if( in_range(rng,mtx->perm.col.org_to_cur[(*rlink)->col]) ) {
1131 clink = &(mtx->hdr.col[(*rlink)->col]);
1132 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1133 clink = &((*clink)->next.row);
1134 delete_from_row(rlink);
1135 delete_from_col(clink);
1136 } else
1137 rlink = &((*rlink)->next.col);
1138 }
1139 }
1140 }
1141
1142 void mtx_clear_col( mtx_matrix_t mtx, int32 col, mtx_range_t *rng)
1143 {
1144 struct element_t **clink, **rlink;
1145 int32 org_col;
1146
1147 #if MTX_DEBUG
1148 if( !mtx_check_matrix(mtx) ) return;
1149 #endif
1150
1151 last_value_matrix = mtx;
1152 org_col = mtx->perm.col.cur_to_org[col];
1153 clink = &(mtx->hdr.col[org_col]);
1154 if( rng == mtx_ALL_ROWS ) {
1155 while( NOTNULL(*clink) ) {
1156 rlink = &(mtx->hdr.row[(*clink)->row]);
1157 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1158 rlink = &((*rlink)->next.col);
1159 delete_from_col(clink);
1160 delete_from_row(rlink);
1161 }
1162 } else if( rng->high >= rng->low ) {
1163 while( NOTNULL(*clink) ) {
1164 if( in_range(rng,mtx->perm.row.org_to_cur[(*clink)->row]) ) {
1165 rlink = &(mtx->hdr.row[(*clink)->row]);
1166 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1167 rlink = &((*rlink)->next.col);
1168 delete_from_col(clink);
1169 delete_from_row(rlink);
1170 } else
1171 clink = &((*clink)->next.row);
1172 }
1173 }
1174 }
1175
1176 void mtx_clear_region(mtx_matrix_t mtx, mtx_region_t *region)
1177 {
1178 #if MTX_DEBUG
1179 if(!mtx_check_matrix(mtx)) return;
1180 #endif
1181
1182 if( region == mtx_ENTIRE_MATRIX && !ISSLAVE(mtx)) {
1183 /* damn the torpedos, wipe that sucker and slaves fast */
1184 nuke_fishnet(mtx);
1185 } else {
1186 int32 ndx;
1187 int32 *toorg;
1188 mtx_region_t reg;
1189
1190 if (region == mtx_ENTIRE_MATRIX) {
1191 reg.row.low = reg.col.low = 0;
1192 reg.row.high = reg.col.high = mtx->order-1;
1193 } else {
1194 reg = *region;
1195 }
1196 last_value_matrix = mtx;
1197 toorg = mtx->perm.row.cur_to_org;
1198 for( ndx = reg.row.low ; ndx <= reg.row.high ; ++ndx )
1199 blast_row(mtx,toorg[ndx],&(reg.col));
1200 toorg = mtx->perm.col.cur_to_org;
1201 for( ndx = reg.col.low ; ndx <= reg.col.high ; ++ndx )
1202 clean_col(mtx,toorg[ndx]);
1203 }
1204 }
1205
1206
1207 void mtx_reset_perm(mtx_matrix_t mtx)
1208 {
1209 int32 ndx;
1210
1211 #if MTX_DEBUG
1212 if(!mtx_check_matrix(mtx)) return;
1213 #endif
1214 if (ISSLAVE(mtx)) {
1215 mtx_reset_perm(mtx->master);
1216 return;
1217 }
1218
1219 for( ndx=ZERO ; ndx < mtx->capacity ; ++ndx ) {
1220 mtx->perm.row.org_to_cur[ndx] = ndx;
1221 mtx->perm.row.cur_to_org[ndx] = ndx;
1222 mtx->perm.col.org_to_cur[ndx] = ndx;
1223 mtx->perm.col.cur_to_org[ndx] = ndx;
1224 }
1225 mtx->perm.row.parity = mtx->perm.col.parity = EVEN;
1226
1227 mtx->data->symbolic_rank = -1;
1228 mtx->data->nblocks = -1;
1229 if( NOTNULL(mtx->data->block) ) {
1230 ascfree(mtx->data->block);
1231 mtx->data->block = NULL;
1232 }
1233 }
1234
1235 void mtx_clear(mtx_matrix_t mtx)
1236 {
1237 if (ISSLAVE(mtx)) {
1238 mtx_clear_region(mtx->master,mtx_ENTIRE_MATRIX);
1239 }
1240 mtx_clear_region(mtx,mtx_ENTIRE_MATRIX);
1241 mtx_reset_perm(mtx);
1242 }
1243
1244 real64 mtx_value(mtx_matrix_t mtx, mtx_coord_t *coord)
1245 {
1246 struct element_t *elt;
1247 register int32 org_row,org_col;
1248
1249 #if MTX_DEBUG
1250 if(!mtx_check_matrix(mtx)) return D_ZERO;
1251 #endif
1252 org_row = mtx->perm.row.cur_to_org[coord->row];
1253 org_col = mtx->perm.col.cur_to_org[coord->col];
1254 elt = mtx_find_element(mtx,org_row,org_col);
1255 mtx->last_value = elt;
1256 return( ISNULL(elt) ? D_ZERO : elt->value );
1257 }
1258
1259 void mtx_set_value( mtx_matrix_t mtx, mtx_coord_t *coord, real64 value)
1260 {
1261 register int32 org_row,org_col;
1262
1263 #if MTX_DEBUG
1264 if(!mtx_check_matrix(mtx)) return;
1265 #endif
1266 org_row = mtx->perm.row.cur_to_org[coord->row];
1267 org_col = mtx->perm.col.cur_to_org[coord->col];
1268 if ( NOTNULL(mtx->last_value) &&
1269 mtx->last_value->row==org_row &&
1270 mtx->last_value->col==org_col ) {
1271 mtx->last_value->value = value;
1272 } else {
1273 struct element_t *elt;
1274 if( ISNULL(elt = mtx_find_element(mtx,org_row,org_col)) ) {
1275 if (value != D_ZERO ) {
1276 elt = mtx_create_element_value(mtx,org_row,org_col,value);
1277 }
1278 } else {
1279 elt->value = value;
1280 }
1281 }
1282 }
1283
1284 void mtx_fill_value(mtx_matrix_t mtx, mtx_coord_t *coord, real64 value)
1285 {
1286 register int32 org_row,org_col;
1287
1288 #if MTX_DEBUG
1289 if(!mtx_check_matrix(mtx)) return;
1290 #endif
1291 org_row = mtx->perm.row.cur_to_org[coord->row];
1292 org_col = mtx->perm.col.cur_to_org[coord->col];
1293 mtx_create_element_value(mtx,org_row,org_col,value);
1294 }
1295
1296 void mtx_fill_org_value(mtx_matrix_t mtx, const mtx_coord_t *coord,
1297 real64 value)
1298 {
1299 #if MTX_DEBUG
1300 if(!mtx_check_matrix(mtx)) return;
1301 #endif
1302 mtx_create_element_value(mtx,coord->row,coord->col,value);
1303 }
1304
1305 /* sparse matrix assembly of potentially duplicate fills */
1306 /* takes a matrix, assumed to have redundant and otherwise insane incidences
1307 created by 'misusing mtx_fill_value' and sums all like entries, eliminating
1308 the duplicates and the zeroes. Returns -# of elements removed, or 1 if bad.
1309 returns 1 if fails for some reason, 0 otherwise.
1310 Could stand to have the error messages it emits improved.
1311 Could stand to take a rowrange or a rowlist, a colrange or a collist,droptol.
1312 algorithm: O(3tau)
1313 establish a vector mv of columns needing cleaning markers
1314 set lowmark=-1, highmark= -2
1315 for rows
1316 if row empty continue
1317 establish a vector ev of elt pointers set null
1318 prevlink = &header 1st elt pointer
1319 for each elt in row
1320 if elt is not marked in ev
1321 mark ev with elt, update prevlink
1322 else
1323 add value to elt pointed at by ev
1324 mark col in mv as needing cleaning, updating lowmark,highmark
1325 delete elt
1326 endif
1327 endfor
1328 for each elt in row
1329 unmark ev with elt
1330 if elt is 0, delete and mark col in mv.
1331 endfor
1332 endfor
1333 for i = lowmark to highmark
1334 clean col
1335 endfor
1336 My god, the tricks we play on a linked list.
1337 */
1338 int32 mtx_assemble(mtx_matrix_t mtx)
1339 {
1340 char *real_mv=NULL, *mv;
1341 struct element_t **real_ev=NULL, **ev, **prevlink, *elt;
1342 int32 orgrow, orgcol, lowmark,highmark,dup;
1343 /* note elt and prevlink could be combined, but the code is
1344 unreadable utterly if you do so. */
1345
1346 if (ISNULL(mtx)) return 1;
1347 if (mtx->order <1) return 0;
1348 real_mv = mtx_null_mark(mtx->order+1);
1349 mv = real_mv+1;
1350 if (ISNULL(real_mv)) return 1;
1351 real_ev = mtx_null_vector(mtx->order+1);
1352 ev = real_ev+1;
1353 if (ISNULL(real_ev)) {
1354 mtx_null_mark_release();
1355 return 1;
1356 }
1357 /* we have allocated arrays which include a -1 element to buy
1358 ourselves an awful convenient lot of safety. */
1359 lowmark=-1;
1360 highmark=-2;
1361 dup = 0;
1362 last_value_matrix = mtx;
1363 for (orgrow=0; orgrow < mtx->order; orgrow++) {
1364 elt = mtx->hdr.row[orgrow];
1365 prevlink = &(mtx->hdr.row[orgrow]);
1366 while (NOTNULL(elt)) {
1367 if (ISNULL(ev[elt->col])) {
1368 ev[elt->col] = elt; /* mark first elt found for this col */
1369 prevlink= &(elt->next.col); /* collect pointer to where we go next */
1370 elt = elt->next.col; /* go there */
1371 } else {
1372 /* elt is duplicate and must die */
1373 dup++;
1374 ev[elt->col]->value += elt->value; /* accumulate value */
1375 /* update lowmark, highmark . this is a debatable implementation. */
1376 /* for small mods on large matrices, this is sane */
1377 if (lowmark > -1) { /* not first mark */
1378 if (elt->col < lowmark && elt->col > -1) {
1379 lowmark = elt->col;
1380 }
1381 if (elt->col > highmark && elt->col < mtx->order) {
1382 highmark = elt->col;
1383 }
1384 } else { /* very first mark */
1385 if (elt->col > -1 && elt->col < mtx->order) {
1386 lowmark = highmark = elt->col;
1387 }
1388 }
1389 mv[elt->col] = 1; /* mark column as to be cleaned */
1390 delete_from_row(prevlink);
1391 elt = *prevlink;
1392 }
1393 }
1394 elt = mtx->hdr.row[orgrow];
1395 prevlink = &(mtx->hdr.row[orgrow]);
1396 while (NOTNULL(elt)) { /* renull the accumulator and trash 0s */
1397 ev[elt->col] = NULL; /* regardless, reset accum */
1398 if (elt->value != D_ZERO) {
1399 prevlink= &(elt->next.col); /* collect pointer to where we go next */
1400 elt = elt->next.col; /* go there */
1401 } else {
1402 /* this is still a debatable implementation. */
1403 if (lowmark > -1) { /* not first mark */
1404 if (elt->col < lowmark && elt->col > -1) {
1405 lowmark = elt->col;
1406 }
1407 if (elt->col > highmark && elt->col < mtx->order) {
1408 highmark = elt->col;
1409 }
1410 } else { /* very first mark */
1411 if (elt->col > -1 && elt->col < mtx->order) {
1412 lowmark = highmark = elt->col;
1413 }
1414 }
1415 mv[elt->col] = 1; /* mark column as to be cleaned */
1416 delete_from_row(prevlink);
1417 elt = *prevlink;
1418 }
1419 }
1420 }
1421 for (orgcol = lowmark; orgcol <= highmark; orgcol++) {
1422 if (mv[orgcol]) {
1423 clean_col(mtx,orgcol); /* scrap dups and 0s */
1424 }
1425 }
1426 mtx_null_mark_release();
1427 mtx_null_vector_release();
1428 return -dup;
1429 }
1430
1431 void mtx_del_zr_in_row(mtx_matrix_t mtx, int32 row)
1432 {
1433 register int32 org;
1434 struct element_t **rlink, **clink;
1435
1436 #if MTX_DEBUG
1437 if(!mtx_check_matrix(mtx)) return;
1438 #endif
1439 org = mtx->perm.row.cur_to_org[row];
1440 rlink = &(mtx->hdr.row[org]);
1441
1442 last_value_matrix = mtx;
1443 while( NOTNULL(*rlink) )
1444 if( (*rlink)->value == D_ZERO ) {
1445 clink = &(mtx->hdr.col[(*rlink)->col]);
1446 for( ; NOTNULL(*clink) && (*clink)->row != org ; )
1447 clink = &((*clink)->next.row);
1448 delete_from_row(rlink);
1449 delete_from_col(clink);
1450 } else
1451 rlink = &((*rlink)->next.col);
1452 }
1453
1454 void mtx_del_zr_in_col(mtx_matrix_t mtx, int32 col)
1455 {
1456 register int32 org;
1457 struct element_t **clink, **rlink;
1458
1459 #if MTX_DEBUG
1460 if(!mtx_check_matrix(mtx)) return;
1461 #endif
1462 org = mtx->perm.col.cur_to_org[col];
1463 clink = &(mtx->hdr.col[org]);
1464
1465 last_value_matrix = mtx;
1466 while( NOTNULL(*clink) )
1467 if( (*clink)->value == D_ZERO ) {
1468 rlink = &(mtx->hdr.row[(*clink)->row]);
1469 for( ; NOTNULL(*rlink) && (*rlink)->col != org ; )
1470 rlink = &((*rlink)->next.col);
1471 delete_from_col(clink);
1472 delete_from_row(rlink);
1473 } else
1474 clink = &((*clink)->next.row);
1475 }
1476
1477 void mtx_del_zr_in_rowrange(mtx_matrix_t mtx, mtx_range_t *rng)
1478 {
1479 register int32 org,row,rowhi, *toorg;
1480 struct element_t **rlink, **clink;
1481
1482 #if MTX_DEBUG
1483 if(!mtx_check_matrix(mtx)) return;
1484 #endif
1485 rowhi=rng->high;
1486 toorg= mtx->perm.row.cur_to_org;
1487 last_value_matrix = mtx;
1488 for (row=rng->low; row <=rowhi; row++) {
1489 org = toorg[row];
1490 rlink = &(mtx->hdr.row[org]);
1491
1492 while( NOTNULL(*rlink) ) {
1493 if( (*rlink)->value == D_ZERO ) {
1494 clink = &(mtx->hdr.col[(*rlink)->col]);
1495 for( ; NOTNULL(*clink) && (*clink)->row != org ; )
1496 clink = &((*clink)->next.row);
1497 delete_from_row(rlink);
1498 delete_from_col(clink);
1499 } else {
1500 rlink = &((*rlink)->next.col);
1501 }
1502 }
1503
1504 }
1505 }
1506
1507 void mtx_del_zr_in_colrange(mtx_matrix_t mtx, mtx_range_t *rng)
1508 {
1509 register int32 org,col,colhi, *toorg;
1510 struct element_t **clink, **rlink;
1511
1512 #if MTX_DEBUG
1513 if(!mtx_check_matrix(mtx)) return;
1514 #endif
1515 colhi=rng->high;
1516 toorg= mtx->perm.col.cur_to_org;
1517 last_value_matrix = mtx;
1518 for (col=rng->low; col <=colhi; col++) {
1519 org = toorg[col];
1520 clink = &(mtx->hdr.col[org]);
1521
1522 while( NOTNULL(*clink) ) {
1523 if( (*clink)->value == D_ZERO ) {
1524 rlink = &(mtx->hdr.row[(*clink)->row]);
1525 for( ; NOTNULL(*rlink) && (*rlink)->col != org ; )
1526 rlink = &((*rlink)->next.col);
1527 delete_from_col(clink);
1528 delete_from_row(rlink);
1529 } else {
1530 clink = &((*clink)->next.row);
1531 }
1532 }
1533
1534 }
1535 }
1536
1537 void mtx_steal_org_row_vec(mtx_matrix_t mtx, int32 row,
1538 real64 *vec, mtx_range_t *rng)
1539 {
1540 struct element_t **rlink, **clink;
1541 int32 org_row;
1542
1543 #if MTX_DEBUG
1544 if( !mtx_check_matrix(mtx) ) return;
1545 #endif
1546
1547 last_value_matrix = mtx;
1548 org_row = mtx->perm.row.cur_to_org[row];
1549 rlink = &(mtx->hdr.row[org_row]);
1550 if( rng == mtx_ALL_COLS ) {
1551 while( NOTNULL(*rlink) ) {
1552 vec[(*rlink)->col] = (*rlink)->value;
1553 clink = &(mtx->hdr.col[(*rlink)->col]);
1554 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1555 clink = &((*clink)->next.row);
1556 delete_from_row(rlink);
1557 delete_from_col(clink);
1558 }
1559 } else if( rng->high >= rng->low ) {
1560 int32 *tocur;
1561 tocur = mtx->perm.col.org_to_cur;
1562 while( NOTNULL(*rlink) ) {
1563 if( in_range(rng,tocur[(*rlink)->col]) ) {
1564 vec[(*rlink)->col] = (*rlink)->value;
1565 clink = &(mtx->hdr.col[(*rlink)->col]);
1566 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1567 clink = &((*clink)->next.row);
1568 delete_from_row(rlink);
1569 delete_from_col(clink);
1570 } else {
1571 rlink = &((*rlink)->next.col);
1572 }
1573 }
1574 }
1575 }
1576
1577 void mtx_steal_org_col_vec(mtx_matrix_t mtx, int32 col,
1578 real64 *vec, mtx_range_t *rng)
1579 {
1580 struct element_t **clink, **rlink;
1581 int32 org_col;
1582
1583 #if MTX_DEBUG
1584 if( !mtx_check_matrix(mtx) ) return;
1585 #endif
1586
1587 last_value_matrix = mtx;
1588 org_col = mtx->perm.col.cur_to_org[col];
1589 clink = &(mtx->hdr.col[org_col]);
1590 if( rng == mtx_ALL_ROWS ) {
1591 while( NOTNULL(*clink) ) {
1592 vec[(*clink)->row] = (*clink)->value;
1593 rlink = &(mtx->hdr.row[(*clink)->row]);
1594 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1595 rlink = &((*rlink)->next.col);
1596 delete_from_col(clink);
1597 delete_from_row(rlink);
1598 }
1599 } else if( rng->high >= rng->low ) {
1600 int32 *tocur;
1601 tocur = mtx->perm.row.org_to_cur;
1602 while( NOTNULL(*clink) ) {
1603 if( in_range(rng,tocur[(*clink)->row]) ) {
1604 vec[(*clink)->row] = (*clink)->value;
1605 rlink = &(mtx->hdr.row[(*clink)->row]);
1606 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1607 rlink = &((*rlink)->next.col);
1608 delete_from_col(clink);
1609 delete_from_row(rlink);
1610 } else {
1611 clink = &((*clink)->next.row);
1612 }
1613 }
1614 }
1615 }
1616
1617 void mtx_steal_cur_row_vec(mtx_matrix_t mtx, int32 row,
1618 real64 *vec, mtx_range_t *rng)
1619 {
1620 struct element_t **rlink, **clink;
1621 int32 org_row, *tocur;
1622
1623 #if MTX_DEBUG
1624 if( !mtx_check_matrix(mtx) ) return;
1625 #endif
1626
1627 tocur = mtx->perm.col.org_to_cur;
1628 last_value_matrix = mtx;
1629 org_row = mtx->perm.row.cur_to_org[row];
1630 rlink = &(mtx->hdr.row[org_row]);
1631 if( rng == mtx_ALL_COLS ) {
1632 while( NOTNULL(*rlink) ) {
1633 vec[tocur[(*rlink)->col]] = (*rlink)->value;
1634 clink = &(mtx->hdr.col[(*rlink)->col]);
1635 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1636 clink = &((*clink)->next.row);
1637 delete_from_row(rlink);
1638 delete_from_col(clink);
1639 }
1640 } else if( rng->high >= rng->low ) {
1641 while( NOTNULL(*rlink) ) {
1642 if( in_range(rng,tocur[(*rlink)->col]) ) {
1643 vec[tocur[(*rlink)->col]] = (*rlink)->value;
1644 clink = &(mtx->hdr.col[(*rlink)->col]);
1645 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1646 clink = &((*clink)->next.row);
1647 delete_from_row(rlink);
1648 delete_from_col(clink);
1649 } else {
1650 rlink = &((*rlink)->next.col);
1651 }
1652 }
1653 }
1654 }
1655
1656 void mtx_steal_cur_col_vec(mtx_matrix_t mtx, int32 col,
1657 real64 *vec, mtx_range_t *rng)
1658 {
1659 struct element_t **clink, **rlink;
1660 int32 org_col, *tocur;
1661
1662 #if MTX_DEBUG
1663 if( !mtx_check_matrix(mtx) ) return;
1664 #endif
1665
1666 tocur = mtx->perm.row.org_to_cur;
1667 last_value_matrix = mtx;
1668 org_col = mtx->perm.col.cur_to_org[col];
1669 clink = &(mtx->hdr.col[org_col]);
1670 if( rng == mtx_ALL_ROWS ) {
1671 while( NOTNULL(*clink) ) {
1672 vec[tocur[(*clink)->row]] = (*clink)->value;
1673 rlink = &(mtx->hdr.row[(*clink)->row]);
1674 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1675 rlink = &((*rlink)->next.col);
1676 delete_from_col(clink);
1677 delete_from_row(rlink);
1678 }
1679 } else if( rng->high >= rng->low ) {
1680 while( NOTNULL(*clink) ) {
1681 if( in_range(rng,tocur[(*clink)->row]) ) {
1682 vec[tocur[(*clink)->row]] = (*clink)->value;
1683 rlink = &(mtx->hdr.row[(*clink)->row]);
1684 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1685 rlink = &((*rlink)->next.col);
1686 delete_from_col(clink);
1687 delete_from_row(rlink);
1688 } else {
1689 clink = &((*clink)->next.row);
1690 }
1691 }
1692 }
1693 }
1694
1695 #ifdef DELETE_THIS_UNUSED_FUNCTION
1696 /* Little function to enlarge the capacity of a sparse to the len given.
1697 * New memory allocated, if any, is not initialized in any way.
1698 * The pointer given is not itself changed, only its data.
1699 * This function in no way shrinks the data in a sparse.
1700 * (exception: buggy realloc implementations may shrink it, ack!)
1701 * Data/idata values up to the old capacity (ret->cap) are preserved.
1702 */
1703 static int enlarge_sparse(mtx_sparse_t *ret, int32 len)
1704 {
1705 int32 *inew;
1706 real64 *dnew;
1707 if (ISNULL(ret)) {
1708 FPRINTF(g_mtxerr,"ERROR: (mtx.c) enlarge_sparse called with NULL.\n");
1709 return 1;
1710 }
1711 if (len <= ret->cap || len <1) return 0; /* already big enough */
1712
1713 if (ret->idata == NULL) {
1714 inew = (int32 *)ascmalloc(sizeof(int32)*len);
1715 } else {
1716 inew = (int32 *)ascrealloc(ret->idata,sizeof(int32)*len);
1717 }
1718 if (ISNULL(inew)) {
1719 FPRINTF(g_mtxerr,"ERROR: (mtx.c) enlarge_sparse\n");
1720 FPRINTF(g_mtxerr," Insufficient memory.\n");
1721 return 1;
1722 }
1723 ret->idata = inew; /* dnew can still fail without losing inew memory. */
1724
1725 if (ret->data == NULL) {
1726 dnew = (real64 *)ascmalloc(sizeof(real64)*len);
1727 } else {
1728 dnew = (real64 *)ascrealloc(ret->data,sizeof(real64)*len);
1729 }
1730 if (ISNULL(dnew)) {
1731 FPRINTF(g_mtxerr,"ERROR: (mtx.c) enlarge_sparse\n");
1732 FPRINTF(g_mtxerr," Insufficient memory.\n");
1733 return 1;
1734 }
1735 ret->data = dnew; /* we succeeded */
1736 ret->cap = len;
1737 return 0;
1738 }
1739 #endif /* DELETE_THIS_UNUSED_FUNCTION */
1740
1741
1742 /* going to try to make steal also handle sparse creation ...*/
1743 /* don't you dare, whoever you are! */
1744 boolean mtx_steal_org_row_sparse(mtx_matrix_t mtx, int32 row,
1745 mtx_sparse_t *sp, mtx_range_t *rng)
1746 {
1747 struct element_t **rlink, **clink;
1748 int32 org_row;
1749 int32 len,k;
1750
1751 #if MTX_DEBUG
1752 if( !mtx_check_matrix(mtx) ) return TRUE;
1753 #endif
1754 if (sp == mtx_CREATE_SPARSE) {
1755 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_org_row_sparse called with\n");
1756 FPRINTF(g_mtxerr," mtx_CREATE_SPARSE. Not supported.\n");
1757 return TRUE;
1758 }
1759 if (rng == mtx_ALL_COLS) {
1760 len = mtx->order;
1761 } else {
1762 len = rng->high - rng->low +1;
1763 }
1764 if (sp->cap < len) {
1765 sp->len = 0;
1766 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_org_row_sparse called with\n");
1767 FPRINTF(g_mtxerr," sparse of insufficient capacity.\n");
1768 return TRUE;
1769 }
1770
1771 last_value_matrix = mtx;
1772 org_row = mtx->perm.row.cur_to_org[row];
1773 rlink = &(mtx->hdr.row[org_row]);
1774 k = 0;
1775
1776 if( rng == mtx_ALL_COLS ) {
1777 while( NOTNULL(*rlink) ) {
1778 sp->idata[k] = (*rlink)->col;
1779 sp->data[k] = (*rlink)->value;
1780 k++;
1781 clink = &(mtx->hdr.col[(*rlink)->col]);
1782 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1783 clink = &((*clink)->next.row);
1784 delete_from_row(rlink);
1785 delete_from_col(clink);
1786 }
1787 } else if( rng->high >= rng->low ) {
1788 int32 *tocur;
1789 tocur = mtx->perm.col.org_to_cur;
1790 while( NOTNULL(*rlink) ) {
1791 if( in_range(rng,tocur[(*rlink)->col]) ) {
1792 sp->idata[k] = (*rlink)->col;
1793 sp->data[k] = (*rlink)->value;
1794 k++;
1795 clink = &(mtx->hdr.col[(*rlink)->col]);
1796 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1797 clink = &((*clink)->next.row);
1798 delete_from_row(rlink);
1799 delete_from_col(clink);
1800 } else {
1801 rlink = &((*rlink)->next.col);
1802 }
1803 }
1804 }
1805 sp->len = k;
1806 return FALSE;
1807 }
1808
1809 boolean mtx_steal_org_col_sparse(mtx_matrix_t mtx, int32 col,
1810 mtx_sparse_t *sp, mtx_range_t *rng)
1811 {
1812 struct element_t **clink, **rlink;
1813 int32 org_col;
1814 int32 len,k;
1815
1816 #if MTX_DEBUG
1817 if( !mtx_check_matrix(mtx) ) return TRUE;
1818 #endif
1819 if (sp == mtx_CREATE_SPARSE) {
1820 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_org_col_sparse called with\n");
1821 FPRINTF(g_mtxerr," mtx_CREATE_SPARSE. Not supported.\n");
1822 return TRUE;
1823 }
1824 if (rng == mtx_ALL_ROWS) {
1825 len = mtx->order;
1826 } else {
1827 len = rng->high - rng->low +1;
1828 }
1829 if (sp->cap < len) {
1830 sp->len = 0;
1831 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_org_col_sparse called with\n");
1832 FPRINTF(g_mtxerr," sparse of insufficient capacity.\n");
1833 return TRUE;
1834 }
1835
1836 last_value_matrix = mtx;
1837 org_col = mtx->perm.col.cur_to_org[col];
1838 clink = &(mtx->hdr.col[org_col]);
1839 k = 0;
1840
1841 if( rng == mtx_ALL_ROWS ) {
1842 while( NOTNULL(*clink) ) {
1843 sp->idata[k] = (*clink)->row;
1844 sp->data[k] = (*clink)->value;
1845 k++;
1846 rlink = &(mtx->hdr.row[(*clink)->row]);
1847 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1848 rlink = &((*rlink)->next.col);
1849 delete_from_col(clink);
1850 delete_from_row(rlink);
1851 }
1852 } else if( rng->high >= rng->low ) {
1853 int32 *tocur;
1854 tocur = mtx->perm.row.org_to_cur;
1855 while( NOTNULL(*clink) ) {
1856 if( in_range(rng,tocur[(*clink)->row]) ) {
1857 sp->idata[k] = (*clink)->row;
1858 sp->data[k] = (*clink)->value;
1859 k++;
1860 rlink = &(mtx->hdr.row[(*clink)->row]);
1861 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1862 rlink = &((*rlink)->next.col);
1863 delete_from_col(clink);
1864 delete_from_row(rlink);
1865 } else {
1866 clink = &((*clink)->next.row);
1867 }
1868 }
1869 }
1870 sp->len = k;
1871 return FALSE;
1872 }
1873
1874 boolean mtx_steal_cur_row_sparse(mtx_matrix_t mtx, int32 row,
1875 mtx_sparse_t *sp, mtx_range_t *rng)
1876 {
1877 struct element_t **rlink, **clink;
1878 int32 org_row, *tocur;
1879 int32 len,k;
1880
1881 #if MTX_DEBUG
1882 if( !mtx_check_matrix(mtx) ) return TRUE;
1883 #endif
1884 if (sp == mtx_CREATE_SPARSE) {
1885 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_cur_row_sparse called with\n");
1886 FPRINTF(g_mtxerr," mtx_CREATE_SPARSE. Not supported.\n");
1887 return TRUE;
1888 }
1889 if (rng == mtx_ALL_COLS) {
1890 len = mtx->order;
1891 } else {
1892 len = rng->high - rng->low +1;
1893 }
1894 if (sp->cap < len) {
1895 sp->len = 0;
1896 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_cur_row_sparse called with\n");
1897 FPRINTF(g_mtxerr," sparse of insufficient capacity.\n");
1898 return TRUE;
1899 }
1900
1901 tocur = mtx->perm.col.org_to_cur;
1902 last_value_matrix = mtx;
1903 org_row = mtx->perm.row.cur_to_org[row];
1904 rlink = &(mtx->hdr.row[org_row]);
1905 k = 0;
1906
1907 if( rng == mtx_ALL_COLS ) {
1908 while( NOTNULL(*rlink) ) {
1909 sp->idata[k] = tocur[(*rlink)->col];
1910 sp->data[k] = (*rlink)->value;
1911 k++;
1912 clink = &(mtx->hdr.col[(*rlink)->col]);
1913 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1914 clink = &((*clink)->next.row);
1915 delete_from_row(rlink);
1916 delete_from_col(clink);
1917 }
1918 } else if( rng->high >= rng->low ) {
1919 while( NOTNULL(*rlink) ) {
1920 if( in_range(rng,tocur[(*rlink)->col]) ) {
1921 sp->idata[k] = tocur[(*rlink)->col];
1922 sp->data[k] = (*rlink)->value;
1923 k++;
1924 clink = &(mtx->hdr.col[(*rlink)->col]);
1925 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1926 clink = &((*clink)->next.row);
1927 delete_from_row(rlink);
1928 delete_from_col(clink);
1929 } else {
1930 rlink = &((*rlink)->next.col);
1931 }
1932 }
1933 }
1934 sp->len = k;
1935 return FALSE;
1936 }
1937
1938 boolean mtx_steal_cur_col_sparse(mtx_matrix_t mtx, int32 col,
1939 mtx_sparse_t *sp, mtx_range_t *rng)
1940 {
1941 struct element_t **clink, **rlink;
1942 int32 org_col, *tocur;
1943 int32 len,k;
1944
1945 #if MTX_DEBUG
1946 if( !mtx_check_matrix(mtx) ) return TRUE;
1947 #endif
1948 if (sp == mtx_CREATE_SPARSE) {
1949 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_cur_col_sparse called with\n");
1950 FPRINTF(g_mtxerr," mtx_CREATE_SPARSE. Not supported.\n");
1951 return TRUE;
1952 }
1953 if (rng == mtx_ALL_ROWS) {
1954 len = mtx->order;
1955 } else {
1956 len = rng->high - rng->low +1;
1957 }
1958 if (sp->cap < len) {
1959 sp->len = 0;
1960 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_cur_col_sparse called with\n");
1961 FPRINTF(g_mtxerr," sparse of insufficient capacity.\n");
1962 return TRUE;
1963 }
1964
1965 tocur = mtx->perm.row.org_to_cur;
1966 last_value_matrix = mtx;
1967 org_col = mtx->perm.col.cur_to_org[col];
1968 clink = &(mtx->hdr.col[org_col]);
1969 k = 0;
1970
1971 if( rng == mtx_ALL_ROWS ) {
1972 while( NOTNULL(*clink) ) {
1973 sp->idata[k] = tocur[(*clink)->row];
1974 sp->data[k] = (*clink)->value;
1975 k++;
1976 rlink = &(mtx->hdr.row[(*clink)->row]);
1977 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1978 rlink = &((*rlink)->next.col);
1979 delete_from_col(clink);
1980 delete_from_row(rlink);
1981 }
1982 } else if( rng->high >= rng->low ) {
1983 while( NOTNULL(*clink) ) {
1984 if( in_range(rng,tocur[(*clink)->row]) ) {
1985 sp->idata[k] = tocur[(*clink)->row];
1986 sp->data[k] = (*clink)->value;
1987 k++;
1988 rlink = &(mtx->hdr.row[(*clink)->row]);
1989 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1990 rlink = &((*rlink)->next.col);
1991 delete_from_col(clink);
1992 delete_from_row(rlink);
1993 } else {
1994 clink = &((*clink)->next.row);
1995 }
1996 }
1997 }
1998 sp->len = k;
1999 return FALSE;
2000 }
2001
2002 void mtx_fill_org_row_vec(mtx_matrix_t mtx, int32 row,
2003 real64 *vec, mtx_range_t *rng)
2004 {
2005 int32 org_row,highcol, *toorg;
2006 register int32 org_col;
2007 register real64 value;
2008
2009 #if MTX_DEBUG
2010 if(!mtx_check_matrix(mtx)) return;
2011 #endif
2012
2013 org_row = mtx->perm.row.cur_to_org[row];
2014 if( rng == mtx_ALL_COLS ) {
2015 highcol=mtx->order;
2016 for( org_col=0 ; org_col <highcol ; org_col++ ) {
2017 if ((value=vec[org_col]) != D_ZERO)
2018 mtx_create_element_value(mtx,org_row,org_col,value);
2019 }
2020 } else {
2021 register int32 cur_col;
2022
2023 toorg = mtx->perm.col.cur_to_org;
2024 highcol= rng->high;
2025 for ( cur_col=rng->low; cur_col<=highcol; cur_col++) {
2026 if ((value=vec[(org_col=toorg[cur_col])]) != D_ZERO)
2027 mtx_create_element_value(mtx,org_row,org_col,value);
2028 }
2029 }
2030 }
2031
2032 void mtx_fill_org_col_vec(mtx_matrix_t mtx, int32 col,
2033 real64 *vec, mtx_range_t *rng)
2034 {
2035 int32 org_col,highrow, *toorg;
2036 register int32 org_row;
2037 register real64 value;
2038
2039 #if MTX_DEBUG
2040 if(!mtx_check_matrix(mtx)) return;
2041 #endif
2042
2043 org_col = mtx->perm.col.cur_to_org[col];
2044 if( rng == mtx_ALL_ROWS ) {
2045 highrow=mtx->order;
2046 for( org_row=0 ; org_row <highrow ; org_row++ ) {
2047 if ((value=vec[org_row]) != D_ZERO)
2048 mtx_create_element_value(mtx,org_row,org_col,value);
2049 }
2050 } else {
2051 register int32 cur_row;
2052
2053 toorg = mtx->perm.row.cur_to_org;
2054 highrow= rng->high;
2055 for ( cur_row=rng->low; cur_row<=highrow; cur_row++) {
2056 if ((value=vec[(org_row=toorg[cur_row])]) != D_ZERO)
2057 mtx_create_element_value(mtx,org_row,org_col,value);
2058 }
2059 }
2060 }
2061
2062 void mtx_fill_cur_row_vec(mtx_matrix_t mtx, int32 row,
2063 real64 *vec, mtx_range_t *rng)
2064 {
2065 int32 org_row,highcol,lowcol, *toorg;
2066 register int32 cur_col;
2067 register real64 value;
2068
2069 #if MTX_DEBUG
2070 if(!mtx_check_matrix(mtx)) return;
2071 #endif
2072
2073 org_row = mtx->perm.row.cur_to_org[row];
2074 toorg=mtx->perm.col.cur_to_org;
2075 if( rng == mtx_ALL_COLS ) {
2076 highcol = mtx->order-1;
2077 lowcol = 0;
2078 } else {
2079 highcol= rng->high;
2080 lowcol=rng->low;
2081 }
2082 for( cur_col=lowcol ; cur_col <= highcol ; cur_col++ ) {
2083 if ((value=vec[cur_col]) != D_ZERO)
2084 mtx_create_element_value(mtx,org_row,toorg[cur_col],value);
2085 }
2086 }
2087
2088 void mtx_fill_cur_col_vec(mtx_matrix_t mtx, int32 col,
2089 real64 *vec, mtx_range_t *rng)
2090 {
2091 int32 org_col,highrow,lowrow, *toorg;
2092 register int32 cur_row;
2093 register real64 value;
2094
2095 #if MTX_DEBUG
2096 if(!mtx_check_matrix(mtx)) return;
2097 #endif
2098
2099 org_col = mtx->perm.col.cur_to_org[col];
2100 toorg=mtx->perm.row.cur_to_org;
2101 if( rng == mtx_ALL_ROWS ) {
2102 highrow=mtx->order-1;
2103 lowrow=0;
2104 } else {
2105 highrow= rng->high;
2106 lowrow=rng->low;
2107 }
2108 for( cur_row=lowrow ; cur_row <= highrow ; cur_row++ ) {
2109 if ((value=vec[cur_row]) != D_ZERO)
2110 mtx_create_element_value(mtx,toorg[cur_row],org_col,value);
2111 }
2112 }
2113
2114 void mtx_dropfill_cur_col_vec(mtx_matrix_t mtx, int32 col,
2115 real64 *vec, mtx_range_t *rng,
2116 real64 tol)
2117 {
2118 int32 org_col,highrow,lowrow, *toorg;
2119 register int32 cur_row;
2120 register real64 value;
2121
2122 if (tol==0.0) {
2123 mtx_fill_cur_col_vec(mtx,col,vec,rng);
2124 return;
2125 }
2126
2127 #if MTX_DEBUG
2128 if(!mtx_check_matrix(mtx)) return;
2129 #endif
2130
2131 org_col = mtx->perm.col.cur_to_org[col];
2132 toorg=mtx->perm.row.cur_to_org;
2133 if( rng == mtx_ALL_ROWS ) {
2134 highrow=mtx->order-1;
2135 lowrow=0;
2136 } else {
2137 highrow= rng->high;
2138 lowrow=rng->low;
2139 }
2140 for( cur_row=lowrow ; cur_row <= highrow ; cur_row++ ) {
2141 if (fabs(value=vec[cur_row]) > tol)
2142 mtx_create_element_value(mtx,toorg[cur_row],org_col,value);
2143 }
2144 }
2145
2146 void mtx_dropfill_cur_row_vec(mtx_matrix_t mtx, int32 row,
2147 real64 *vec, mtx_range_t *rng,
2148 real64 tol)
2149 {
2150 int32 org_row,highcol,lowcol, *toorg;
2151 register int32 cur_col;
2152 register real64 value;
2153
2154 if (tol==0.0) {
2155 mtx_fill_cur_row_vec(mtx,row,vec,rng);
2156 return;
2157 }
2158
2159 #if MTX_DEBUG
2160 if(!mtx_check_matrix(mtx)) return;
2161 #endif
2162
2163 org_row = mtx->perm.row.cur_to_org[row];
2164 toorg=mtx->perm.col.cur_to_org;
2165 if( rng == mtx_ALL_COLS ) {
2166 highcol = mtx->order-1;
2167 lowcol = 0;
2168 } else {
2169 highcol= rng->high;
2170 lowcol=rng->low;
2171 }
2172 for( cur_col=lowcol ; cur_col <= highcol ; cur_col++ ) {
2173 if (fabs(value=vec[cur_col]) > tol)
2174 mtx_create_element_value(mtx,org_row,toorg[cur_col],value);
2175 }
2176 }
2177
2178 void mtx_fill_org_row_sparse(mtx_matrix_t mtx, int32 row,
2179 const mtx_sparse_t *sp)
2180 {
2181 int32 orgrow,i;
2182 #if MTX_DEBUG
2183 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2184 #endif
2185 orgrow = mtx->perm.row.cur_to_org[row];
2186 for (i=0; i < sp->len; i++) {
2187 if (sp->data[i] != D_ZERO
2188 #if MTX_DEBUG
2189 && sp->idata[i] >=0 && sp->idata[i] < mtx->order
2190 #endif
2191 ) {
2192 mtx_create_element_value(mtx,orgrow,sp->idata[i],sp->data[i]);
2193 }
2194 }
2195 }
2196
2197 void mtx_fill_org_col_sparse(mtx_matrix_t mtx, int32 col,
2198 const mtx_sparse_t *sp)
2199 {
2200 int32 orgcol,i;
2201 #if MTX_DEBUG
2202 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2203 #endif
2204 orgcol = mtx->perm.col.cur_to_org[col];
2205 for (i=0; i < sp->len; i++) {
2206 if (sp->data[i] != D_ZERO
2207 #if MTX_DEBUG
2208 && sp->idata[i] >=0 && sp->idata[i] < mtx->order
2209 #endif
2210 ) {
2211 mtx_create_element_value(mtx,sp->idata[i],orgcol,sp->data[i]);
2212 }
2213 }
2214 }
2215
2216 void mtx_fill_cur_row_sparse(mtx_matrix_t mtx, int32 row,
2217 const mtx_sparse_t *sp)
2218 {
2219 int32 orgrow,i;
2220 #if MTX_DEBUG
2221 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2222 #endif
2223 orgrow = mtx->perm.row.cur_to_org[row];
2224 for (i=0; i < sp->len; i++) {
2225 if (
2226 #if MTX_DEBUG
2227 sp->idata[i] >=0 && sp->idata[i] < mtx->order &&
2228 #endif
2229 sp->data[i] != D_ZERO) {
2230 mtx_create_element_value(mtx,orgrow,
2231 mtx->perm.col.cur_to_org[sp->idata[i]],
2232 sp->data[i]);
2233 }
2234 }
2235 }
2236
2237 void mtx_fill_cur_col_sparse(mtx_matrix_t mtx, int32 col,
2238 const mtx_sparse_t *sp)
2239 {
2240 int32 orgcol,i;
2241 #if MTX_DEBUG
2242 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2243 #endif
2244 orgcol = mtx->perm.col.cur_to_org[col];
2245 for (i=0; i < sp->len; i++) {
2246 if (
2247 #if MTX_DEBUG
2248 sp->idata[i] >=0 && sp->idata[i] < mtx->order &&
2249 #endif
2250 sp->data[i] != D_ZERO) {
2251 mtx_create_element_value(mtx,
2252 mtx->perm.col.cur_to_org[sp->idata[i]],
2253 orgcol,
2254 sp->data[i]);
2255 }
2256 }
2257 }
2258
2259 void mtx_mult_row(mtx_matrix_t mtx, int32 row, real64 factor, mtx_range_t *rng)
2260 {
2261 struct element_t *elt;
2262 int32 *tocur;
2263
2264 if( factor == D_ZERO ) {
2265 mtx_clear_row(mtx,row,rng);
2266 return;
2267 }
2268
2269 if (factor == D_ONE) return;
2270 #if MTX_DEBUG
2271 if(!mtx_check_matrix(mtx)) return;
2272 #endif
2273
2274 tocur = mtx->perm.col.org_to_cur;
2275 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[row]];
2276 if( rng == mtx_ALL_COLS )
2277 for( ; NOTNULL(elt); elt = elt->next.col ) elt->value *= factor;
2278 else if( rng->high >= rng->low )
2279 for( ; NOTNULL(elt); elt = elt->next.col )
2280 if( in_range(rng,tocur[elt->col]) ) elt->value *= factor;
2281 }
2282
2283 void mtx_mult_col(mtx_matrix_t mtx, int32 col, real64 factor, mtx_range_t *rng)
2284 {
2285 struct element_t *elt;
2286 int32 *tocur;
2287
2288 if( factor == D_ZERO ) {
2289 mtx_clear_col(mtx,col,rng);
2290 return;
2291 }
2292 if (factor == D_ONE) return;
2293
2294 #if MTX_DEBUG
2295 if(!mtx_check_matrix(mtx)) return;
2296 #endif
2297
2298 tocur = mtx->perm.row.org_to_cur;
2299 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[col]];
2300 if( rng == mtx_ALL_ROWS )
2301 for( ; NOTNULL(elt); elt = elt->next.row ) elt->value *= factor;
2302 else if( rng->high >= rng->low )
2303 for( ; NOTNULL(elt); elt = elt->next.row )
2304 if( in_range(rng,tocur[elt->row]) ) elt->value *= factor;
2305 }
2306
2307 void mtx_mult_row_zero(mtx_matrix_t mtx, int32 row, mtx_range_t *rng)
2308 {
2309 struct element_t *elt;
2310 int32 *tocur;
2311
2312 #if MTX_DEBUG
2313 if(!mtx_check_matrix(mtx)) return;
2314 #endif
2315
2316 tocur = mtx->perm.col.org_to_cur;
2317 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[row]];
2318 if( rng == mtx_ALL_COLS )
2319 for( ; NOTNULL(elt); elt = elt->next.col ) elt->value = D_ZERO;
2320 else if( rng->high >= rng->low )
2321 for( ; NOTNULL(elt); elt = elt->next.col )
2322 if( in_range(rng,tocur[elt->col]) ) elt->value = D_ZERO;
2323 }
2324
2325 void mtx_mult_col_zero(mtx_matrix_t mtx, int32 col, mtx_range_t *rng)
2326 {
2327 struct element_t *elt;
2328 int32 *tocur;
2329
2330 #if MTX_DEBUG
2331 if(!mtx_check_matrix(mtx)) return;
2332 #endif
2333
2334 tocur = mtx->perm.row.org_to_cur;
2335 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[col]];
2336 if( rng == mtx_ALL_ROWS )
2337 for( ; NOTNULL(elt); elt = elt->next.row ) elt->value = D_ZERO;
2338 else if( rng->high >= rng->low )
2339 for( ; NOTNULL(elt); elt = elt->next.row )
2340 if( in_range(rng,tocur[elt->row]) ) elt->value = D_ZERO;
2341 }
2342
2343 void mtx_add_row(mtx_matrix_t mtx, int32 s_cur, int32 t_cur, real64 factor,
2344 mtx_range_t *rng)
2345 {
2346 register int32 org_col;
2347 int32 t_org,*tocur;
2348 struct element_t **arr,*elt;
2349
2350 #if MTX_DEBUG
2351 if(!mtx_check_matrix(mtx)) return;
2352 #endif
2353
2354 t_org = mtx->perm.row.cur_to_org[t_cur];
2355 if( rng == mtx_ALL_COLS ) {
2356 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2357 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[s_cur]];
2358 for( ; NOTNULL(elt); elt = elt->next.col ) {
2359 if( ISNULL(arr[(org_col=elt->col)]) ) {
2360 arr[org_col] =
2361 mtx_create_element_value(mtx,t_org,org_col,(factor * elt->value));
2362 } else {
2363 arr[org_col]->value += factor * elt->value;
2364 }
2365 }
2366 mtx_renull_using_row(mtx,t_org,arr);
2367 } else if( rng->high >= rng->low ) {
2368 register int32 cur_col;
2369
2370 tocur = mtx->perm.col.org_to_cur;
2371 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2372 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[s_cur]];
2373 for( ; NOTNULL(elt); elt = elt->next.col ) {
2374 cur_col=tocur[(org_col=elt->col)];
2375 if( in_range(rng,cur_col) )
2376 if( NOTNULL(arr[org_col]) ) {
2377 arr[org_col]->value += factor * elt->value;
2378 } else {
2379 arr[org_col] =
2380 mtx_create_element_value(mtx,t_org,org_col,
2381 (factor * elt->value));
2382 /* hit rate usually lower */
2383 }
2384 }
2385 mtx_renull_using_row(mtx,t_org,arr);
2386 }
2387 mtx_null_vector_release();
2388 }
2389
2390 void mtx_add_col(mtx_matrix_t mtx, int32 s_cur, int32 t_cur, real64 factor,
2391 mtx_range_t *rng)
2392 {
2393 int32 t_org,*tocur;
2394 struct element_t **arr,*elt;
2395
2396 #if MTX_DEBUG
2397 if(!mtx_check_matrix(mtx)) return;
2398 #endif
2399
2400 if( rng == mtx_ALL_ROWS ) {
2401 t_org = mtx->perm.col.cur_to_org[t_cur];
2402 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2403 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[s_cur]];
2404 for( ; NOTNULL(elt); elt = elt->next.row )
2405 if( ISNULL(arr[elt->row]) ) {
2406 arr[elt->row] =
2407 mtx_create_element_value(mtx,elt->row,t_org,
2408 (factor * elt->value));
2409 } else arr[elt->row]->value += factor * elt->value;
2410 mtx_renull_using_col(mtx,t_org,arr);
2411 } else if( rng->high >= rng->low ) {
2412 tocur = mtx->perm.row.org_to_cur;
2413 t_org = mtx->perm.col.cur_to_org[t_cur];
2414 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2415 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[s_cur]];
2416 for( ; NOTNULL(elt); elt = elt->next.row )
2417 if( in_range(rng,tocur[elt->row]) )
2418 if( NOTNULL(arr[elt->row]) ) {
2419 arr[elt->row]->value += factor * elt->value;
2420 } else {
2421 arr[elt->row] =
2422 mtx_create_element_value(mtx,elt->row,t_org,
2423 (factor * elt->value));
2424 }
2425 mtx_renull_using_col(mtx,t_org,arr);
2426 }
2427 mtx_null_vector_release();
2428 }
2429
2430 static struct element_t **mtx_expand_row_series( mtx_matrix_t mtx, int32 org)
2431 /**
2432 *** Expands the given row into an array of pointers, indexed on original
2433 *** col number. The array is obtained from mtx_null_row_vector().
2434 *** Be sure to call mtx_null_row_vector_release() when done with the vector and
2435 *** you have rezeroed it.
2436 **/
2437 {
2438 struct element_t **arr;
2439 struct element_t *elt;
2440
2441 arr = mtx_null_row_vector(mtx->order);
2442 for( elt=mtx->hdr.row[org]; NOTNULL(elt); elt=elt->next.col )
2443 arr[elt->col] = elt;
2444 return(arr);
2445 }
2446
2447 static struct element_t **mtx_expand_col_series(mtx_matrix_t mtx, int32 org)
2448 /**
2449 *** Expands the given col into an array of pointers, indexed on original
2450 *** row number. The array is obtained from mtx_null_col_vector().
2451 *** Be sure to call mtx_null_col_vector_release() when done with the vector and
2452 *** you have rezeroed it.
2453 **/
2454 {
2455 struct element_t **arr;
2456 struct element_t *elt;
2457
2458 arr = mtx_null_col_vector(mtx->order);
2459 for( elt = mtx->hdr.col[org] ; NOTNULL(elt) ; elt = elt->next.row )
2460 arr[elt->row] = elt;
2461 return(arr);
2462 }
2463
2464 struct add_series_data {
2465 mtx_matrix_t mtx; /* matrix we're operating on */
2466 struct element_t **arr; /* target row/col expansion array */
2467 int32 *tocur; /* col/row permutation vector */
2468 int32 t_org; /* org row/col which is expanded */
2469 };
2470
2471 static struct add_series_data
2472 rsdata={NULL,NULL,NULL,mtx_NONE},
2473 csdata={NULL,NULL,NULL,mtx_NONE};
2474
2475 static void add_series_data_release(void)
2476 {
2477 /* if apparently sane, and have array release the arr */
2478 if (NOTNULL(rsdata.mtx) && NOTNULL(rsdata.tocur)
2479 && rsdata.t_org >= 0 && NOTNULL(rsdata.arr)) {
2480 mtx_null_row_vector_release();
2481 }
2482 if (NOTNULL(csdata.mtx) && NOTNULL(csdata.tocur)
2483 && csdata.t_org >= 0 && NOTNULL(csdata.arr)) {
2484 mtx_null_col_vector_release();
2485 }
2486 /* reinit data */
2487
2488 rsdata.mtx = NULL;
2489 rsdata.arr = NULL;
2490 rsdata.tocur = NULL;
2491 rsdata.t_org = mtx_NONE;
2492
2493 csdata.mtx = NULL;
2494 csdata.arr = NULL;
2495 csdata.tocur = NULL;
2496 csdata.t_org = mtx_NONE;
2497 }
2498
2499 void mtx_add_row_series_init(mtx_matrix_t mtx,int32 t_cur,boolean use)
2500 {
2501
2502 #if MTX_DEBUG
2503 if(!mtx_check_matrix(mtx)) return;
2504 #endif
2505
2506 if ( !(rsdata.mtx) ) {
2507 /* this is a grabbing call due to memory reuse */
2508 if ( mtx && (t_cur >= 0) && (t_cur < mtx->order) ) {
2509 rsdata.mtx = mtx;
2510 rsdata.tocur = mtx->perm.col.org_to_cur;
2511 rsdata.t_org = mtx->perm.row.cur_to_org[t_cur];
2512 rsdata.arr = mtx_expand_row_series(mtx,rsdata.t_org);
2513 } else {
2514 FPRINTF(g_mtxerr,"ERROR: mtx_add_row_series_init called\n");
2515 FPRINTF(g_mtxerr," for grab with invalid column or mtx.\n");
2516 FPRINTF(g_mtxerr," col number %d.\n",t_cur);
2517 }
2518 return;
2519 } else {
2520 /* this is supposed to be a releasing call */
2521 if (t_cur != mtx_NONE) {
2522 FPRINTF(g_mtxerr,"ERROR: mtx_add_row_series_init called for\n");
2523 FPRINTF(g_mtxerr," release without mtx_NONE.\n");
2524 return;
2525 }
2526 if (mtx != rsdata.mtx) {
2527 FPRINTF(g_mtxerr,"ERROR: mtx_add_row_series_init called for\n");
2528 FPRINTF(g_mtxerr," release with ungrabbed matrix.\n");
2529 return;
2530 }
2531 if (use) {
2532 mtx_renull_using_row(rsdata.mtx, rsdata.t_org, rsdata.arr);
2533 } else {
2534 mtx_renull_all(rsdata.mtx, rsdata.arr);
2535 }
2536 mtx_null_row_vector_release();
2537 rsdata.mtx = NULL;
2538 rsdata.arr = NULL;
2539 rsdata.tocur = NULL;
2540 rsdata.t_org = mtx_NONE;
2541 }
2542 }
2543
2544 void mtx_add_row_series(int32 s_cur,real64 factor,mtx_range_t *rng)
2545 {
2546 register int32 org_col;
2547 int32 t_org,*tocur;
2548 struct element_t **arr,*elt;
2549 mtx_matrix_t mtx;
2550
2551 if ( !(rsdata.mtx) ) {
2552 FPRINTF(g_mtxerr,"ERROR: mtx_add_row_series called for\n");
2553 FPRINTF(g_mtxerr," without grabbing target row first.\n");
2554 return;
2555 }
2556 mtx = rsdata.mtx;
2557 arr = rsdata.arr;
2558 tocur = rsdata.tocur;
2559 t_org = rsdata.t_org;
2560
2561 #if MTX_DEBUG
2562 if(!mtx_check_matrix(mtx)) return;
2563 #endif
2564
2565 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[s_cur]];
2566 if( rng == mtx_ALL_COLS ) {
2567 for( ; NOTNULL(elt); elt = elt->next.col ) {
2568 if( ISNULL(arr[(org_col=elt->col)]) ) {
2569 arr[org_col] =
2570 mtx_create_element_value(mtx,t_org,org_col,(factor * elt->value));
2571 } else {
2572 arr[org_col]->value += factor * elt->value;
2573 }
2574 }
2575 return;
2576 }
2577 /* fast_in_range is a 10% winner on the alpha, and should be even more
2578 on the other platforms. */
2579 if( rng->high >= rng->low ) {
2580 register int32 cur_col, lo,hi;
2581 lo=rng->low; hi= rng->high;
2582 for( ; NOTNULL(elt); elt = elt->next.col ) {
2583 cur_col=tocur[(org_col=elt->col)];
2584 if( fast_in_range(lo,hi,cur_col) ) {
2585 if( NOTNULL(arr[org_col]) ) {
2586 arr[org_col]->value += factor * elt->value;
2587 } else {
2588 arr[org_col] =
2589 mtx_create_element_value(mtx,t_org,org_col,(factor*elt->value));
2590 }
2591 }
2592 }
2593 }
2594 }
2595
2596 void mtx_add_col_series_init(mtx_matrix_t mtx,int32 t_cur,boolean use)
2597 {
2598
2599 #if MTX_DEBUG
2600 if(!mtx_check_matrix(mtx)) return;
2601 #endif
2602
2603 if ( !(csdata.mtx) ) {
2604 /* this is a grabbing call */
2605 if ( mtx && (t_cur >= 0) && (t_cur < mtx->order) ) {
2606 csdata.mtx = mtx;
2607 csdata.tocur = mtx->perm.row.org_to_cur;
2608 csdata.t_org = mtx->perm.col.cur_to_org[t_cur];
2609 csdata.arr = mtx_expand_col_series(mtx,csdata.t_org);
2610 } else {
2611 FPRINTF(g_mtxerr,"ERROR: mtx_add_col_series_init called\n");
2612 FPRINTF(g_mtxerr," for grab with invalid row or mtx.\n");
2613 FPRINTF(g_mtxerr," row number %d.\n",t_cur);
2614 }
2615 return;
2616 } else {
2617 /* this is supposed to be a releasing call */
2618 if (t_cur != mtx_NONE) {
2619 FPRINTF(g_mtxerr,"ERROR: mtx_add_col_series_init called for\n");
2620 FPRINTF(g_mtxerr," release without mtx_NONE.\n");
2621 return;
2622 }
2623 if (mtx != csdata.mtx) {
2624 FPRINTF(g_mtxerr,"ERROR: mtx_add_col_series_init called for\n");
2625 FPRINTF(g_mtxerr," release with ungrabbed matrix.\n");
2626 return;
2627 }
2628 if (use) {
2629 mtx_renull_using_col(csdata.mtx, csdata.t_org, csdata.arr);
2630 } else {
2631 mtx_renull_all(csdata.mtx, csdata.arr);
2632 }
2633 mtx_null_col_vector_release();
2634 csdata.mtx = NULL;
2635 csdata.arr = NULL;
2636 csdata.tocur = NULL;
2637 csdata.t_org = mtx_NONE;
2638 }
2639 }
2640
2641 void mtx_add_col_series(int32 s_cur,real64 factor,mtx_range_t *rng)
2642 {
2643 register int32 org_row;
2644 int32 t_org,*tocur;
2645 struct element_t **arr,*elt;
2646 mtx_matrix_t mtx;
2647
2648 if ( !(csdata.mtx) ) {
2649 FPRINTF(g_mtxerr,"ERROR: mtx_add_col_series called for\n");
2650 FPRINTF(g_mtxerr," without grabbing target col first.\n");
2651 return;
2652 }
2653 mtx = csdata.mtx;
2654 arr = csdata.arr;
2655 tocur = csdata.tocur;
2656 t_org = csdata.t_org;
2657
2658 #if MTX_DEBUG
2659 if(!mtx_check_matrix(mtx)) return;
2660 #endif
2661
2662 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[s_cur]];
2663 if( rng == mtx_ALL_ROWS ) {
2664 for( ; NOTNULL(elt); elt = elt->next.row ) {
2665 if( ISNULL(arr[(org_row=elt->row)]) ) {
2666 arr[org_row] =
2667 mtx_create_element_value(mtx,org_row,t_org,(factor * elt->value));
2668 } else {
2669 arr[org_row]->value += factor * elt->value;
2670 }
2671 }
2672 return;
2673 }
2674 if( rng->high >= rng->low ) {
2675 register int32 cur_row;
2676
2677 for( ; NOTNULL(elt); elt = elt->next.row ) {
2678 cur_row=tocur[(org_row=elt->row)];
2679 if( in_range(rng,cur_row) ) {
2680 if( NOTNULL(arr[org_row]) ) {
2681 arr[org_row]->value += factor * elt->value;
2682 } else {
2683 arr[org_row] =
2684 mtx_create_element_value(mtx,org_row,t_org,(factor*elt->value));
2685 }
2686 }
2687 }
2688 }
2689 }
2690
2691 void mtx_old_add_row_sparse(mtx_matrix_t mtx,
2692 int32 t_cur, /* cur index of target row */
2693 real64 *s_cur, /* dense source row, curindexed */
2694 real64 factor, /* coefficient */
2695 mtx_range_t *rng,
2696 int32 *ilist) /* list to add over */
2697 {
2698 int32 t_org,*toorg,cindex,orgcindex,hilim;
2699 struct element_t **arr;
2700 real64 value;
2701
2702 #if MTX_DEBUG
2703 if(!mtx_check_matrix(mtx)) return;
2704 #endif
2705
2706 if (factor==D_ZERO) return; /* adding 0 rather silly */
2707
2708 if( rng == mtx_ALL_COLS ) {
2709 t_org = mtx->perm.row.cur_to_org[t_cur]; /* org of target row */
2710 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2711 toorg = mtx->perm.col.cur_to_org; /* current col perm */
2712 hilim=mtx->order;
2713
2714 if (!ilist) { /* FULL ROW CASE no ilist */
2715 for (cindex=0; cindex< hilim; cindex++) {
2716 if ((value=s_cur[cindex])!=D_ZERO) {
2717 if ( ISNULL(arr[(orgcindex=toorg[cindex])]) ) { /* add element */
2718 arr[orgcindex] =
2719 mtx_create_element_value(mtx,t_org,orgcindex,(factor * value));
2720 } else { /* increment element */
2721 arr[orgcindex]->value += factor * value;
2722 }
2723 }
2724 }
2725 } else { /* SPARSE ROW CASE with ilist */
2726 int32 i;
2727 i=0;
2728 while ((cindex=ilist[i])>=0) {
2729 value=s_cur[cindex];
2730 if ( ISNULL(arr[(orgcindex=toorg[cindex])]) ) { /* add element */
2731 arr[orgcindex] =
2732 mtx_create_element_value(mtx,t_org,orgcindex,(factor * value));
2733 } else { /* increment element */
2734 arr[orgcindex]->value += factor * value;
2735 }
2736 i++;
2737 }
2738 }
2739 mtx_renull_using_row(mtx,t_org,arr);
2740 } else if( rng->high >= rng->low ) { /* DENSE RANGE CASE */
2741 t_org = mtx->perm.row.cur_to_org[t_cur]; /* org of target row */
2742 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2743 toorg = mtx->perm.col.cur_to_org; /* current col perm */
2744 hilim = rng->high;
2745
2746 for (cindex=rng->low; cindex<= hilim; cindex++) {
2747 if ((value=s_cur[cindex])!=D_ZERO) {
2748 if ( ISNULL(arr[(orgcindex=toorg[cindex])]) ) { /* add element */
2749 arr[orgcindex] =
2750 mtx_create_element_value(mtx,t_org,orgcindex,(factor * value));
2751 } else { /* increment element */
2752 arr[orgcindex]->value += factor * value;
2753 }
2754 }
2755 }
2756 mtx_renull_using_row(mtx,t_org,arr);
2757 }
2758 mtx_null_vector_release();
2759 }
2760
2761 void mtx_old_add_col_sparse(mtx_matrix_t mtx,
2762 int32 t_cur, /* cur index of target col */
2763 real64 *s_cur, /* dense source col, curindexed */
2764 real64 factor, /* coefficient */
2765 mtx_range_t *rng, /* range to add over or */
2766 int32 *ilist) /* list to add over */
2767 {
2768 int32 t_org,*toorg,rindex,orgrindex,hilim;
2769 struct element_t **arr;
2770 real64 value;
2771
2772 #if MTX_DEBUG
2773 if(!mtx_check_matrix(mtx)) return;
2774 #endif
2775
2776 if (factor==D_ZERO) return;
2777
2778 if( rng == mtx_ALL_ROWS ) {
2779 t_org = mtx->perm.col.cur_to_org[t_cur]; /* org of target col */
2780 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2781 toorg = mtx->perm.row.cur_to_org; /* current row perm */
2782 hilim=mtx->order;
2783
2784 if (!ilist) { /* FULL COL CASE no ilist */
2785 for (rindex=0; rindex< hilim; rindex++) {
2786 if ((value=s_cur[rindex])!=D_ZERO) {
2787 if ( ISNULL(arr[(orgrindex=toorg[rindex])]) ) { /* add element */
2788 arr[orgrindex] =
2789 mtx_create_element_value(mtx,orgrindex,t_org,(factor * value));
2790 } else { /* increment element */
2791 arr[orgrindex]->value += factor * value;
2792 }
2793 }
2794 }
2795 } else { /* SPARSE COL CASE with ilist */
2796 int32 i;
2797 i=0;
2798 while ((rindex=ilist[i])>=0) {
2799 value=s_cur[rindex];
2800 if ( ISNULL(arr[(orgrindex=toorg[rindex])]) ) { /* add element */
2801 arr[orgrindex] =
2802 mtx_create_element_value(mtx,orgrindex,t_org,(factor * value));
2803 } else { /* increment element */
2804 arr[orgrindex]->value += factor * value;
2805 }
2806 i++;
2807 }
2808 }
2809 mtx_renull_using_col(mtx,t_org,arr);
2810
2811 } else if( rng->high >= rng->low ) { /* DENSE RANGE CASE */
2812 t_org = mtx->perm.col.cur_to_org[t_cur]; /* org of target col */
2813 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2814 toorg = mtx->perm.row.cur_to_org; /* current row perm */
2815 hilim = rng->high;
2816
2817 for (rindex=rng->low; rindex<= hilim; rindex++) {
2818 if ((value=s_cur[rindex])!=D_ZERO) {
2819 if ( ISNULL(arr[(orgrindex=toorg[rindex])]) ) { /* add element */
2820 arr[orgrindex] =
2821 mtx_create_element_value(mtx,orgrindex,t_org,(factor * value));
2822 } else { /* increment element */
2823 arr[orgrindex]->value += factor * value;
2824 }
2825 }
2826 }
2827 mtx_renull_using_col(mtx,t_org,arr);
2828 }
2829 mtx_null_vector_release();
2830 }
2831
2832 size_t mtx_size(mtx_matrix_t mtx) {
2833 size_t size=0;
2834 if (ISNULL(mtx)) return size;
2835 size += (1+mtx->nslaves)*sizeof(struct mtx_header);
2836 if (ISSLAVE(mtx)) {
2837 return 2*sizeof(struct element_t *)*(size_t)mtx->capacity +size;
2838 }
2839 size += mem_sizeof_store(mtx->ms);
2840 /* headers */
2841 size += (1 + mtx->nslaves) * (size_t)mtx->capacity *
2842 (size_t)2 * sizeof(struct element_t *);
2843 /* block data */
2844 if (mtx->data->nblocks >0)
2845 size += sizeof(mtx_region_t)*(size_t)(mtx->data->nblocks-1);
2846 /* permutations */
2847 size += (size_t)4*(sizeof(int32)*(size_t)mtx->capacity);
2848 return size;
2849 }
2850
2851 size_t mtx_chattel_size(mtx_matrix_t mtx) {
2852 size_t size=0;
2853 int32 i;
2854 if (ISNULL(mtx) || ISSLAVE(mtx)) return size;
2855 /* headers */
2856 size += (mtx->nslaves)*sizeof(struct mtx_header);
2857 /* incidence */
2858 for (i=0; i <mtx->nslaves; i++) {
2859 size += (size_t)mtx_nonzeros_in_region(mtx->slaves[i],mtx_ENTIRE_MATRIX);
2860 }
2861 /*nz headers */
2862 size += mtx->nslaves * (size_t)mtx->capacity *
2863 (