/[ascend]/trunk/base/generic/solver/mtx_basic.c
ViewVC logotype

Contents of /trunk/base/generic/solver/mtx_basic.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1175 - (show annotations) (download) (as text)
Thu Jan 18 00:34:22 2007 UTC (13 years, 10 months ago) by johnpye
File MIME type: text/x-csrc
File size: 109276 byte(s)
A few easily-fixed warnings dispelled
1 /*
2 * mtx: Ascend Sparse Matrix Package
3 * by Karl Michael Westerberg
4 * Created: 2/6/90
5 * Version: $Revision: 1.20 $
6 * Version control file: $RCSfile: mtx_basic.c,v $
7 * Date last modified: $Date: 2000/01/25 02:27:07 $
8 * Last modified by: $Author: ballan $
9 *
10 * This file is part of the SLV solver.
11 *
12 * Copyright (C) 1990 Karl Michael Westerberg
13 * Copyright (C) 1993 Joseph Zaher
14 * Copyright (C) 1994 Joseph Zaher, Benjamin Andrew Allan
15 * Copyright (C) 1995 Kirk Andre Abbott, Benjamin Andrew Allan
16 * Copyright (C) 1996 Benjamin Andrew Allan
17 *
18 * The SLV solver is free software; you can redistribute
19 * it and/or modify it under the terms of the GNU General Public License as
20 * published by the Free Software Foundation; either version 2 of the
21 * License, or (at your option) any later version.
22 *
23 * The SLV solver is distributed in hope that it will be
24 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License along with
29 * the program; if not, write to the Free Software Foundation, Inc., 675
30 * Mass Ave, Cambridge, MA 02139 USA. Check the file named COPYING.
31 * COPYING is found in ../compiler.
32 */
33
34 #include <math.h>
35 #include <utilities/ascConfig.h>
36 #include <utilities/ascMalloc.h>
37 #include <utilities/mem.h>
38 #include "mtx.h"
39 #ifdef ASC_WITH_MMIO
40 # include <mmio.h>
41 #endif
42
43 /* grab our private parts */
44 #define __MTX_C_SEEN__
45 #include "mtx_use_only.h"
46 #include <general/mathmacros.h>
47
48 /* this returns an error count */
49 int super_check_matrix( mtx_matrix_t mtx)
50 /**
51 *** Really check the matrix.
52 **/
53 {
54 int32 ndx,errcnt=0;
55 int32 rowc,colc;
56
57 /* Test consistency of permutation arrays */
58 for( ndx=ZERO ; ndx < mtx->capacity ; ++ndx ) {
59 if( mtx->perm.row.org_to_cur[mtx->perm.row.cur_to_org[ndx]] != ndx ) {
60 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
61 FPRINTF(g_mtxerr," Permutation violation in row %d.\n", ndx);
62 errcnt++;
63 }
64 if( mtx->perm.col.org_to_cur[mtx->perm.col.cur_to_org[ndx]] != ndx ) {
65 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
66 FPRINTF(g_mtxerr," Permutation violation in col %d.\n",ndx);
67 errcnt++;
68 }
69 }
70
71 if( mtx->order > mtx->capacity ) {
72 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
73 FPRINTF(g_mtxerr," Capacity %d is less than order %d\n",
74 mtx->capacity,mtx->order);
75 errcnt++;
76 }
77
78 /* Make sure rows and columns which don't exist are blank */
79 for( ndx = mtx->order ; ndx < mtx->capacity ; ++ndx ) {
80 int32 org_row,org_col;
81 org_row = mtx->perm.row.cur_to_org[ndx];
82 org_col = mtx->perm.col.cur_to_org[ndx];
83 if( NOTNULL(mtx->hdr.row[org_row]) ) {
84 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
85 FPRINTF(g_mtxerr," Non-zeros found in non-existent row %d.\n",ndx);
86 errcnt++;
87 }
88 if( NOTNULL(mtx->hdr.col[org_col]) ) {
89 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
90 FPRINTF(g_mtxerr," Non-zeros found in non-existent col %d.\n",ndx);
91 errcnt++;
92 }
93 }
94
95 /* Verify fishnet structure */
96 for( ndx=rowc=colc=ZERO ; ndx < mtx->capacity ; ++ndx ) {
97 struct element_t *elt;
98 int32 org_row,org_col;
99 org_row = mtx->perm.row.cur_to_org[ndx];
100 org_col = mtx->perm.col.cur_to_org[ndx];
101 for( elt = mtx->hdr.row[org_row] ; NOTNULL(elt) ; elt = elt->next.col ) {
102 if( elt->row != mtx_NONE ) {
103 ++rowc;
104 if( elt->row != org_row ) {
105 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
106 FPRINTF(g_mtxerr," Element mismatch in row %d.\n", ndx);
107 errcnt++;
108 }
109 } else {
110 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
111 FPRINTF(g_mtxerr," Disclaimed element in row %d.\n", ndx);
112 errcnt++;
113 }
114 }
115 for( elt = mtx->hdr.col[org_col] ; NOTNULL(elt) ; elt = elt->next.row ) {
116 if( elt->col != mtx_NONE ) {
117 ++colc;
118 if( elt->col != org_col ) {
119 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
120 FPRINTF(g_mtxerr," Element mismatch in col %d.\n", ndx);
121 errcnt++;
122 }
123 } else {
124 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
125 FPRINTF(g_mtxerr," Disclaimed element in col %d.\n", ndx);
126 errcnt++;
127 }
128 }
129 }
130 if( rowc != colc ) {
131 FPRINTF(g_mtxerr,"ERROR: (mtx) super_check_matrix\n");
132 FPRINTF(g_mtxerr," Non-zero discrepancy, %d by row, %d by col.\n",
133 rowc, colc);
134 errcnt++;
135 }
136 return errcnt;
137 }
138
139 boolean check_matrix( mtx_matrix_t mtx, char *file, int line)
140 /**
141 *** Checks the integrity flag of the matrix.
142 **/
143 {
144
145 if( ISNULL(mtx) ) {
146 FPRINTF(g_mtxerr,"ERROR: (mtx) check_matrix\n");
147 FPRINTF(g_mtxerr," NULL matrix in %s line %d.\n",file,line);
148 return 0;
149 }
150
151 switch( mtx->integrity ) {
152 case OK:
153 break;
154 case DESTROYED:
155 FPRINTF(g_mtxerr,"ERROR: (mtx) check_matrix\n");
156 FPRINTF(g_mtxerr,
157 " Matrix deceased found in %s line %d.\n",file,line);
158 return 0;
159 default:
160 FPRINTF(g_mtxerr,"ERROR: (mtx) check_matrix\n");
161 FPRINTF(g_mtxerr," Matrix garbage found in %s line %d.\n",
162 file,line);
163 return 0;
164 }
165
166 #if MTX_DEBUG
167 super_check_matrix(mtx);
168 #endif
169
170 if (ISSLAVE(mtx)) {
171 return (check_matrix(mtx->master,file,line));
172 }
173 return 1;
174 }
175
176 /* this function checks the consistency of a sparse as best it can.
177 returns FALSE if something wierd. */
178 boolean check_sparse(const mtx_sparse_t * const sp, char *file, int line)
179 {
180 if ( ISNULL(sp) ||
181 sp->len > sp->cap ||
182 ( sp->cap > 0 && ( ISNULL(sp->idata) || ISNULL(sp->data) ) )
183 ) {
184 FPRINTF(g_mtxerr,"ERROR: (mtx) check_sparse - Inconsistent or\n");
185 FPRINTF(g_mtxerr," NULL sparse in %s line %d.\n",file,line);
186 return 0;
187 }
188 return 1;
189 }
190
191 #define free_unless_null(ptr) if( NOTNULL(ptr) ) ascfree(ptr)
192 /*
193 static void free_unless_null(POINTER ptr)
194 {
195 if( NOTNULL(ptr) )
196 ascfree(ptr);
197 }
198 */
199
200 void mtx_zero_int32(int32 *data, int len)
201 {
202 int i;
203 if (data==NULL) return;
204 for (i=0; i<len; i++) data[i]=0;
205 }
206
207 void mtx_zero_real64(real64 *data, int len)
208 {
209 int i;
210 if (data==NULL) return;
211 for (i=0; i<len; i++) data[i]=0.0;
212 }
213
214 void mtx_zero_ptr(void **data, int len)
215 {
216 int i;
217 if (data==NULL) return;
218 for (i=0; i<len; i++) data[i]=NULL;
219 }
220
221 static mtx_matrix_t alloc_header(void)
222 /**
223 *** Allocates a matrix header and returns a pointer to it.
224 **/
225 {
226 mtx_matrix_t mtx;
227 mtx = (mtx_matrix_t)ascmalloc( sizeof(struct mtx_header) );
228 mtx->integrity = OK;
229 return(mtx);
230 }
231
232 static void free_header(mtx_matrix_t mtx)
233 /**
234 *** Frees a matrix header.
235 **/
236 {
237 mtx->integrity = DESTROYED;
238 ascfree(mtx);
239 }
240
241 #ifdef DELETE_THIS_UNUSED_FUNCTION
242 /* not IN use? */
243 static struct element_t **alloc_nz_header(int32 cap)
244 /**
245 *** Allocates a row or column header randomly initialized.
246 **/
247 {
248 return(cap > 0 ? (struct element_t **)
249 ascmalloc(cap*sizeof(struct element_t *)) : NULL);
250 }
251 #endif /* DELETE_THIS_UNUSED_FUNCTION */
252
253
254 static struct element_t **calloc_nz_header(int32 cap)
255 /**
256 *** Allocates a zeroed row or column header.
257 **/
258 {
259 return(cap > 0 ? ASC_NEW_ARRAY_CLEAR(struct element_t *,cap) : NULL);
260 }
261
262 static void copy_nz_header(struct element_t **tarhdr,
263 struct element_t **srchdr, int32 cap)
264 /**
265 *** Copies srchdr to tarhdr given the capacity of srchdr.
266 **/
267 {
268 mem_copy_cast(srchdr,tarhdr,cap*sizeof(struct element_t *));
269 }
270
271 #define free_nz_header(hdr) free_unless_null( (POINTER)(hdr) )
272 /**
273 *** Frees a row or column header
274 **/
275
276
277 /********************************************************************/
278 /*
279 Any method of deallocating elements that doesn't use
280 delete_from_row/col or nuke_fishnet is DOOMED to die.
281 On ANY entry to delete_from_row or delete_from_col,
282 last_value_matrix should be already set to the mtx being deleted from.
283 What this really means is that before any of the following list of functions
284 is called, the last_value_matrix should be set:
285
286 disclaim element
287 delete_from_row/col
288 blast_row/col
289 clean_row/col
290
291 Note that the above functions call each other, with blast_/clean_ being
292 the top housekeeping/garbage collection pair. Users of delete, blast or
293 clean should set last_value_matrix at the scope where the matrix is known.
294
295 Excuses for this kluge:
296 1) mtx_value/set_value need it.
297 2) the mem_store scheme REQUIRES it.
298 3) it is faster than passing the mtx
299 up and down through the functions listed above, and we do a LOT of
300 these operations.
301 */
302 static mtx_matrix_t last_value_matrix = NULL;
303
304 static void disclaim_element(struct element_t *element)
305 /**
306 *** Indicates that one of the row or column (it doesn't matter) in which
307 *** this element appears will no longer point to this element (it has
308 *** already been removed from the list). Elements disclaimed once have
309 *** their row,col indices set to mtx_NONE. Elements disclaimed twice are
310 *** discarded.
311 *** NOTE WELL: last_value_mtx must be set before this function is called.
312 **/
313 {
314 if( element->row == mtx_NONE ) {
315 mem_free_element(last_value_matrix->ms,(void *)element);
316 } else {
317 element->row = element->col = mtx_NONE;
318 }
319 }
320
321 static void delete_from_row(struct element_t **link)
322 /**
323 *** Deletes the element from the row it is in, given a pointer to
324 *** the row link which leads to this element.
325 *** On return the pointer pointed to by link points to the new next element.
326 *** NOTE WELL: last_value_mtx must be set before this function is called.
327 **/
328 {
329 struct element_t *p;
330 p = *link;
331 *link = p->next.col;
332 disclaim_element(p);
333 /* conservatively cause mtx_set_value to forget */
334 last_value_matrix->last_value = NULL;
335 }
336
337 static void delete_from_col(struct element_t **link)
338 /**
339 *** Deletes the element from the col it is in, given a pointer to
340 *** the col link which leads to this element.
341 *** NOTE WELL: last_value_mtx must be set before this function is called.
342 **/
343 {
344 struct element_t *p;
345 p = *link;
346 *link = p->next.row;
347 disclaim_element(p);
348 /* conservatively cause mtx_set_value to forget */
349 last_value_matrix->last_value = NULL;
350 }
351
352 static void nuke_fishnet(mtx_matrix_t mtx)
353 /**
354 *** Deletes every element even remotely associated with a matrix
355 *** and nulls the headers. Also nulls the headers of slaves
356 *** and cleans out their elements.
357 **/
358 {
359 int32 i;
360
361 if ( ISNULL(mtx) || ISNULL(mtx->hdr.row) ||
362 ISNULL(mtx->hdr.col) || ISSLAVE(mtx)) {
363 FPRINTF(g_mtxerr,"WARNING: (mtx) nuke_fishnet called\n");
364 FPRINTF(g_mtxerr," with bad matrix.\n");
365 return;
366 }
367 if (mtx->capacity<1) return;
368 mtx->last_value = NULL;
369 mem_clear_store(mtx->ms);
370 zero(mtx->hdr.row,mtx->capacity,struct element_t *);
371 zero(mtx->hdr.col,mtx->capacity,struct element_t *);
372 for (i = 0; i < mtx->nslaves; i++) {
373 zero(mtx->slaves[i]->hdr.row,mtx->capacity,struct element_t *);
374 zero(mtx->slaves[i]->hdr.col,mtx->capacity,struct element_t *);
375 }
376 }
377
378 static void blast_row( mtx_matrix_t mtx, int32 org, mtx_range_t *rng)
379 /**
380 *** Destroys all elements in the given row with (current)
381 *** col index in the given col range: if rng == mtx_ALL_COLS,
382 *** entire row is destroyed.
383 *** NOTE WELL: last_value_mtx must be set before this function is called.
384 **/
385 {
386 struct element_t **link;
387
388 link = &(mtx->hdr.row[org]);
389 if( rng == mtx_ALL_COLS ) {
390 while( NOTNULL(*link) )
391 delete_from_row(link);
392 } else {
393 int32 *tocur = mtx->perm.col.org_to_cur;
394 int32 col;
395 while( NOTNULL(*link) ) {
396 col = (*link)->col;
397 if( col == mtx_NONE || in_range(rng,tocur[col]) )
398 delete_from_row(link);
399 else
400 link = &((*link)->next.col);
401 }
402 }
403 }
404
405 static void blast_col( mtx_matrix_t mtx, int32 org, mtx_range_t *rng)
406 /**
407 *** Destroys all elements in the given col with (current)
408 *** row index in the given row range: if rng == mtx_ALL_ROWS,
409 *** entire col is destroyed.
410 *** NOTE WELL: last_value_mtx must be set before this function is called.
411 **/
412 {
413 struct element_t **link;
414
415 link = &(mtx->hdr.col[org]);
416 if( rng == mtx_ALL_ROWS ) {
417 while( NOTNULL(*link) )
418 delete_from_col(link);
419 } else {
420 int32 *tocur = mtx->perm.row.org_to_cur, row;
421
422 while( NOTNULL(*link) ) {
423 row = (*link)->row;
424 if( row == mtx_NONE || in_range(rng,tocur[row]) )
425 delete_from_col(link);
426 else
427 link = &((*link)->next.row);
428 }
429 }
430 }
431
432 static void clean_row(mtx_matrix_t mtx, int32 org)
433 /**
434 *** Disclaims all elements in the given row which have already been
435 *** disclaimed once.
436 *** NOTE WELL: last_value_mtx must be set before this function is called.
437 **/
438 {
439 struct element_t **link;
440
441 link = &(mtx->hdr.row[org]);
442 while( NOTNULL(*link) )
443 if( (*link)->row == mtx_NONE )
444 delete_from_row(link);
445 else
446 link = &((*link)->next.col);
447 }
448
449 static void clean_col(mtx_matrix_t mtx, int32 org)
450 /**
451 *** Disclaims all elements in the given col which have already been
452 *** disclaimed once.
453 *** NOTE WELL: last_value_mtx must be set before this function is called.
454 **/
455 {
456 struct element_t **link;
457
458 link = &(mtx->hdr.col[org]);
459 while( NOTNULL(*link) )
460 if( (*link)->col == mtx_NONE )
461 delete_from_col(link);
462 else
463 link = &((*link)->next.row);
464 }
465 /********************************************************************/
466
467 mtx_coord_t *mtx_coord(mtx_coord_t *coordp, int32 row, int32 col)
468 {
469 coordp->row = row;
470 coordp->col = col;
471 return(coordp);
472 }
473
474 mtx_range_t *mtx_range(mtx_range_t *rangep, int32 low, int32 high)
475 {
476 rangep->low = low;
477 rangep->high = high;
478 return(rangep);
479 }
480
481 mtx_region_t *mtx_region( mtx_region_t *regionp, int32 rowlow, int32 rowhigh,
482 int32 collow, int32 colhigh)
483 {
484 mtx_range(&(regionp->row),rowlow,rowhigh);
485 mtx_range(&(regionp->col),collow,colhigh);
486 return(regionp);
487 }
488
489 static int g_mtx_debug_redirect = 0;
490
491 void mtx_debug_redirect_freeze() {
492 g_mtx_debug_redirect = 1;
493 }
494
495 static void mtx_redirectErrors(FILE *f) {
496 if (!g_mtx_debug_redirect) {
497 assert(f != NULL);
498 g_mtxerr = f;
499 }
500 }
501
502 mtx_matrix_t mtx_create(void)
503 {
504 mtx_matrix_t mtx;
505
506 mtx_redirectErrors(stderr); /* call mtx_debug_redirect_freeze() to bypass */
507
508 mtx = alloc_header();
509 mtx->order = ZERO;
510 mtx->capacity = ZERO;
511 mtx->hdr.row = mtx->hdr.col = NULL;
512 mtx->perm.row.org_to_cur = mtx_alloc_perm(ZERO);
513 mtx->perm.row.cur_to_org = mtx_alloc_perm(ZERO);
514 mtx->perm.row.parity = EVEN;
515 mtx->perm.col.org_to_cur = mtx_alloc_perm(ZERO);
516 mtx->perm.col.cur_to_org = mtx_alloc_perm(ZERO);
517 mtx->perm.col.parity = EVEN;
518 mtx->perm.transpose = mtx_NONE;
519 mtx->data =
520 (struct structural_data_t *)ascmalloc(sizeof(struct structural_data_t));
521 mtx->data->symbolic_rank = -1;
522 mtx->data->nblocks = -1;
523 mtx->data->block = NULL;
524 mtx->master = NULL;
525 mtx->nslaves = ZERO;
526 mtx->slaves = NULL;
527 mtx->last_value = NULL;
528 mtx->ms = mem_create_store(LENMAGIC, WIDTHMAGIC/sizeof(struct element_t) - 1,
529 sizeof(struct element_t),10,2048);
530 return(mtx);
531 }
532
533 /**
534 ** Slave headers share the master's:
535 ** perm.anything, data->anything, ms->anything, order and capacity.
536 ** Unique to the slave are:
537 ** hdr.anything
538 ** last_value.
539 ** All slaves of a master appear as pointers in the masters slaves list
540 ** which is nslaves long. slaves do not have slaves.
541 **/
542 mtx_matrix_t mtx_create_slave(mtx_matrix_t master)
543 {
544 mtx_matrix_t mtx, *mlist;
545 int32 newcnt;
546 mtx_redirectErrors(stderr); /* call mtx_debug_redirect_freeze() to bypass */
547 if(!mtx_check_matrix(master) || ISSLAVE(master)) return NULL;
548
549 mtx = alloc_header();
550 if (ISNULL(mtx)) return mtx;
551 mtx->order = master->order;
552 mtx->capacity = master->capacity;
553 mtx->nslaves = ZERO;
554 mtx->hdr.row = calloc_nz_header(master->capacity);
555 if (ISNULL(mtx->hdr.row)) {
556 ascfree(mtx);
557 FPRINTF(g_mtxerr,"mtx_create_slave: Insufficient memory.\n");
558 return NULL;
559 }
560 mtx->hdr.col = calloc_nz_header(master->capacity);
561 if (ISNULL(mtx->hdr.col)) {
562 ascfree(mtx->hdr.row);
563 ascfree(mtx);
564 FPRINTF(g_mtxerr,"mtx_create_slave: Insufficient memory.\n");
565 return NULL;
566 }
567 mtx->perm.row.org_to_cur = master->perm.row.org_to_cur;
568 mtx->perm.row.cur_to_org = master->perm.row.cur_to_org;
569 mtx->perm.row.parity = EVEN; /* dont look at this again*/
570 mtx->perm.col.org_to_cur = master->perm.col.org_to_cur;
571 mtx->perm.col.cur_to_org = master->perm.col.cur_to_org;
572 mtx->perm.col.parity = EVEN; /* dont look at this again*/
573 mtx->perm.transpose = mtx_NONE; /* dont look at this again*/
574 mtx->slaves = NULL;
575 mtx->data = master->data;
576 mtx->master = master;
577 mtx->last_value = NULL;
578 mtx->ms = master->ms;
579 newcnt = master->nslaves + 1;
580 if (newcnt == 1) {
581 mlist = (mtx_matrix_t *)ascmalloc(sizeof(mtx_matrix_t));
582 } else {
583 mlist =
584 (mtx_matrix_t *)ascrealloc(master->slaves,newcnt*sizeof(mtx_matrix_t));
585 }
586 if (ISNULL(mlist)) {
587 ascfree(mtx->hdr.row);
588 ascfree(mtx->hdr.col);
589 ascfree(mtx);
590 FPRINTF(g_mtxerr,"mtx_create_slave: Insufficient memory.\n");
591 return NULL;
592 } else {
593 master->slaves = mlist;
594 master->slaves[master->nslaves] = mtx;
595 master->nslaves = newcnt;
596 }
597 return(mtx);
598 }
599
600 static void destroy_slave(mtx_matrix_t master, mtx_matrix_t mtx)
601 {
602 int32 sid,i;
603
604 if(!mtx_check_matrix(mtx) || !mtx_check_matrix(master)) {
605 FPRINTF(g_mtxerr,
606 "destroy_slave(mtx.c) called with corrupt slave or master\n");
607 return;
608 }
609 if (ISSLAVE(master) || !ISSLAVE(mtx)) {
610 FPRINTF(g_mtxerr,
611 "destroy_slave(mtx.c) called with mismatched slave/master pair\n");
612 return;
613 }
614 /* get slave index */
615 sid = -1;
616 for (i=0; i<master->nslaves; i++) {
617 if (master->slaves[i] == mtx) {
618 sid = i;
619 }
620 }
621 if (sid < 0) {
622 FPRINTF(g_mtxerr,
623 "destroy_slave(mtx.c) called with mismatched slave/master pair\n");
624 return;
625 }
626 /* move the rest of the slaves up the list */
627 /* old implementation - possible undefined operation on i - new code follows - JDS
628 for (i = sid; i < master->nslaves -1;) {
629 master->slaves[i] = master->slaves[++i];
630 }
631 */
632 for (i = sid; i < master->nslaves -1; ++i) {
633 master->slaves[i] = master->slaves[i+1];
634 }
635 (master->nslaves)--;
636 /* we will not realloc smaller. */
637 if (master->nslaves == 0 && NOTNULL(master->slaves)) {
638 ascfree(master->slaves);
639 master->slaves = NULL;
640 }
641 mtx_clear_region(mtx,mtx_ENTIRE_MATRIX);
642 free_nz_header(mtx->hdr.row);
643 free_nz_header(mtx->hdr.col);
644 free_header(mtx);
645 }
646
647 void mtx_destroy(mtx_matrix_t mtx)
648 {
649 int32 i;
650 if(!mtx_check_matrix(mtx)) return;
651 if (ISSLAVE(mtx)) {
652 destroy_slave(mtx->master,mtx);
653 return;
654 }
655
656 for (i=0; i<mtx->nslaves; i++) {
657 if (mtx_check_matrix(mtx->slaves[i])) {
658 free_nz_header(mtx->slaves[i]->hdr.row);
659 free_nz_header(mtx->slaves[i]->hdr.col);
660 free_header(mtx->slaves[i]);
661 mtx->slaves[i] = NULL;
662 } else {
663 FPRINTF(g_mtxerr,
664 "mtx_destroy: Corrupt slave found while destroying master.\n");
665 FPRINTF(g_mtxerr,
666 " Slave %d being abandoned.\n",i);
667 }
668 }
669 mtx->nslaves = 0;
670
671 mtx_clear_region(mtx,mtx_ENTIRE_MATRIX);
672 /* mtx_check_matrix is called again if MTX_DEBUG*/
673 /**
674 *** The fishnet structure is
675 *** destroyed, just leaving
676 *** the headers and maybe ms.
677 **/
678 mem_destroy_store(mtx->ms);
679
680 free_nz_header(mtx->hdr.row);
681 free_nz_header(mtx->hdr.col);
682
683 mtx_free_perm(mtx->perm.row.org_to_cur);
684 mtx_free_perm(mtx->perm.row.cur_to_org);
685 mtx_free_perm(mtx->perm.col.org_to_cur);
686 mtx_free_perm(mtx->perm.col.cur_to_org);
687
688 if( NOTNULL(mtx->data->block) ) {
689 ascfree(mtx->data->block);
690 }
691 ascfree(mtx->data);
692
693 free_header(mtx);
694 }
695
696 mtx_sparse_t *mtx_create_sparse(int32 cap) {
697 mtx_sparse_t *ret;
698 ret = (mtx_sparse_t *)ascmalloc(sizeof(mtx_sparse_t));
699 if (ISNULL(ret)) {
700 FPRINTF(g_mtxerr,"ERROR: (mtx_create_sparse) Insufficient memory.\n");
701 return ret;
702 }
703 ret->data = ASC_NEW_ARRAY(real64,cap);
704 if (ISNULL(ret->data)) {
705 FPRINTF(g_mtxerr,"ERROR: (mtx_create_sparse) Insufficient memory.\n");
706 ascfree(ret);
707 return NULL;
708 }
709 ret->idata = ASC_NEW_ARRAY(int32,cap);
710 if (ISNULL(ret->idata)) {
711 FPRINTF(g_mtxerr,"ERROR: (mtx_create_sparse) Insufficient memory.\n");
712 ascfree(ret->data);
713 ascfree(ret);
714 return NULL;
715 }
716 ret->cap = cap;
717 ret->len = 0;
718 return ret;
719 }
720
721 void mtx_destroy_sparse(mtx_sparse_t *ret)
722 {
723 if (ISNULL(ret)) return;
724 if (NOTNULL(ret->idata)) ascfree(ret->idata);
725 if (NOTNULL(ret->data)) ascfree(ret->data);
726 ascfree(ret);
727 }
728
729 void mtx_destroy_blocklist(mtx_block_t *bl)
730 {
731 if (ISNULL(bl)) return;
732 if (NOTNULL(bl->block) && bl->nblocks>0) ascfree(bl->block);
733 ascfree(bl);
734 }
735
736 mtx_matrix_t mtx_duplicate_region(mtx_matrix_t mtx, mtx_region_t *reg,
737 real64 drop)
738 {
739 mtx_matrix_t new;
740 int32 org_row;
741 struct element_t *elt;
742
743 if (!mtx_check_matrix(mtx) ||
744 (mtx->master != NULL && !mtx_check_matrix(mtx->master))) {
745 return NULL;
746 }
747 drop = fabs(drop);
748 if (ISSLAVE(mtx)) {
749 new = mtx_create_slave(mtx->master);
750 } else {
751 new = mtx_create_slave(mtx);
752 }
753 if (new == NULL) {
754 FPRINTF(g_mtxerr,"ERROR: (mtx_duplicate_region) Insufficient memory.\n");
755 return NULL;
756 }
757 if (reg==mtx_ENTIRE_MATRIX){
758 if (drop >0.0) { /* copy with drop tolerance */
759 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
760 for( elt = mtx->hdr.row[org_row] ;
761 NOTNULL(elt) ; elt = elt->next.col ) {
762 if( elt->row != mtx_NONE && fabs(elt->value) > drop) {
763 mtx_create_element_value(new,elt->row,elt->col,elt->value);
764 }
765 }
766 }
767 } else {
768 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
769 for( elt = mtx->hdr.row[org_row] ;
770 NOTNULL(elt) ; elt = elt->next.col ) {
771 if( elt->row != mtx_NONE ) {
772 mtx_create_element_value(new,elt->row,elt->col,elt->value);
773 }
774 }
775 }
776 }
777 } else {
778 int32 *rowcur, *colcur, row,col;
779 mtx_range_t *colrng,*rowrng;
780 rowcur=mtx->perm.row.org_to_cur;
781 colcur=mtx->perm.col.org_to_cur;
782 rowrng=&(reg->row);
783 colrng=&(reg->col);
784 if (drop >0.0) { /* copy with drop tolerance */
785 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
786 row=rowcur[org_row];
787 if (in_range(rowrng,row)) {
788 for( elt = mtx->hdr.row[org_row] ;
789 NOTNULL(elt) ; elt = elt->next.col ) {
790 col=colcur[elt->col];
791 if( in_range(colrng,col) && elt->row != mtx_NONE &&
792 fabs(elt->value) > drop ) {
793 /* don't copy^bad^elements */
794 mtx_create_element_value(new,elt->row,elt->col,elt->value);
795 }
796 }
797 }
798 }
799 } else {
800 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
801 row=rowcur[org_row];
802 if (in_range(rowrng,row)) {
803 for( elt = mtx->hdr.row[org_row] ;
804 NOTNULL(elt) ; elt = elt->next.col ) {
805 col=colcur[elt->col];
806 if( in_range(colrng,col) && elt->row != mtx_NONE ) {
807 /* don't copy^bad^elements */
808 mtx_create_element_value(new,elt->row,elt->col,elt->value);
809 }
810 }
811 }
812 }
813 }
814 }
815
816 return new;
817 }
818
819 /* WARNING: this function assumes sufficient memory is available.
820 * it needs to be check for null returns in allocs and doesn't.
821 */
822 mtx_matrix_t mtx_copy_options(mtx_matrix_t mtx, boolean blocks,
823 boolean incidence, mtx_region_t *reg,
824 real64 drop)
825 {
826 mtx_matrix_t copy;
827 int32 org_row;
828 struct element_t *elt;
829 int32 bnum;
830
831 if (!mtx_check_matrix(mtx)) return NULL;
832 drop = fabs(drop);
833
834 /* create same size matrix */
835 copy = alloc_header();
836 copy->order = mtx->order;
837 copy->capacity = mtx->capacity;
838 copy->master = NULL;
839 copy->nslaves = ZERO;
840 copy->slaves = NULL;
841 copy->last_value = NULL;
842
843 /* copy headers and fishnet */
844 copy->hdr.row = calloc_nz_header(copy->capacity);
845 copy->hdr.col = calloc_nz_header(copy->capacity);
846 if (incidence) {
847 struct mem_statistics stat;
848 int s_expool;
849 mem_get_stats(&stat,mtx->ms);
850 s_expool = MAX(2048,stat.elt_total/stat.str_wid);
851 copy->ms = mem_create_store(LENMAGIC,
852 WIDTHMAGIC/sizeof(struct element_t) - 1,
853 sizeof(struct element_t),
854 10,s_expool-LENMAGIC);
855 /* copy of a slave or master matrix will end up with an
856 * initial mem_store that is the size cumulative of the
857 * master and all its slaves since they share the ms.
858 */
859 if (reg==mtx_ENTIRE_MATRIX){
860 if (drop >0.0) { /* copy with drop tolerance */
861 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
862 for( elt = mtx->hdr.row[org_row] ;
863 NOTNULL(elt) ; elt = elt->next.col ) {
864 if( elt->row != mtx_NONE && fabs(elt->value) > drop) {
865 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
866 }
867 }
868 }
869 } else {
870 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
871 for( elt = mtx->hdr.row[org_row] ;
872 NOTNULL(elt) ; elt = elt->next.col ) {
873 if( elt->row != mtx_NONE ) {
874 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
875 }
876 }
877 }
878 }
879 } else {
880 int32 *rowcur, *colcur, row,col;
881 mtx_range_t *colrng,*rowrng;
882 rowcur=mtx->perm.row.org_to_cur;
883 colcur=mtx->perm.col.org_to_cur;
884 rowrng=&(reg->row);
885 colrng=&(reg->col);
886 if (drop >0.0) { /* copy with drop tolerance */
887 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
888 row=rowcur[org_row];
889 if (in_range(rowrng,row)) {
890 for( elt = mtx->hdr.row[org_row] ;
891 NOTNULL(elt) ; elt = elt->next.col ) {
892 col=colcur[elt->col];
893 if( in_range(colrng,col) && elt->row != mtx_NONE &&
894 fabs(elt->value) > drop ) {
895 /* don't copy^bad^elements */
896 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
897 }
898 }
899 }
900 }
901 } else {
902 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
903 row=rowcur[org_row];
904 if (in_range(rowrng,row)) {
905 for( elt = mtx->hdr.row[org_row] ;
906 NOTNULL(elt) ; elt = elt->next.col ) {
907 col=colcur[elt->col];
908 if( in_range(colrng,col) && elt->row != mtx_NONE ) {
909 /* don't copy^bad^elements */
910 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
911 }
912 }
913 }
914 }
915 }
916 }
917 } else {
918 copy->ms = mem_create_store(LENMAGIC,
919 WIDTHMAGIC/sizeof(struct element_t) - 1,
920 sizeof(struct element_t),10,2048);
921 /* copy of a slave or master matrix will end up with an
922 initial mem_store that is the default size */
923 }
924
925 /* copy permutation information */
926 copy->perm.row.org_to_cur = mtx_alloc_perm(copy->capacity);
927 mtx_copy_perm(copy->perm.row.org_to_cur,
928 mtx->perm.row.org_to_cur,copy->capacity);
929 copy->perm.row.cur_to_org = mtx_alloc_perm(copy->capacity);
930 mtx_copy_perm(copy->perm.row.cur_to_org,
931 mtx->perm.row.cur_to_org,copy->capacity);
932 copy->perm.row.parity = mtx_row_parity(mtx);
933
934 copy->perm.col.org_to_cur = mtx_alloc_perm(copy->capacity);
935 mtx_copy_perm(copy->perm.col.org_to_cur,
936 mtx->perm.col.org_to_cur,copy->capacity);
937 copy->perm.col.cur_to_org = mtx_alloc_perm(copy->capacity);
938 mtx_copy_perm(copy->perm.col.cur_to_org,
939 mtx->perm.col.cur_to_org,copy->capacity);
940 copy->perm.col.parity = mtx_col_parity(mtx);
941
942 /* copy (or not) block data */
943 copy->data =
944 (struct structural_data_t *)ascmalloc(sizeof(struct structural_data_t));
945 if (blocks) {
946 copy->data->symbolic_rank = mtx->data->symbolic_rank;
947 copy->data->nblocks = mtx->data->nblocks;
948 copy->data->block = mtx->data->nblocks > 0 ? (mtx_region_t *)
949 ascmalloc( mtx->data->nblocks*sizeof(mtx_region_t) ) : NULL;
950 for( bnum=0; bnum < mtx->data->nblocks; bnum++ ) {
951 copy->data->block[bnum].row.low = mtx->data->block[bnum].row.low;
952 copy->data->block[bnum].row.high = mtx->data->block[bnum].row.high;
953 copy->data->block[bnum].col.low = mtx->data->block[bnum].col.low;
954 copy->data->block[bnum].col.high = mtx->data->block[bnum].col.high;
955 }
956 } else {
957 copy->data->symbolic_rank=0;
958 copy->data->nblocks=0;
959 copy->data->block =NULL;
960 }
961
962 return(copy);
963 }
964
965 int32 mtx_order( mtx_matrix_t mtx)
966 {
967 if (!mtx_check_matrix(mtx)) return -1;
968 return(mtx->order);
969 }
970
971 int32 mtx_capacity( mtx_matrix_t mtx)
972 {
973 if (!mtx_check_matrix(mtx)) return -1;
974 return(mtx->capacity);
975 }
976
977 /* this is only to be called from in mtx_set_order */
978 static void trim_incidence(mtx_matrix_t mtx, int32 order)
979 {
980 int32 ndx;
981 int32 *toorg;
982
983 last_value_matrix = mtx;
984 toorg = mtx->perm.col.cur_to_org;
985 for( ndx = order ; ndx < mtx->order ; ++ndx )
986 blast_col(mtx,toorg[ndx],mtx_ALL_ROWS);
987 for( ndx = ZERO ; ndx < order ; ++ndx )
988 clean_row(mtx,toorg[ndx]);
989
990 toorg = mtx->perm.row.cur_to_org;
991 for( ndx = order ; ndx < mtx->order ; ++ndx )
992 blast_row(mtx,toorg[ndx],mtx_ALL_COLS);
993 for( ndx = ZERO ; ndx < order ; ++ndx )
994 clean_col(mtx,toorg[ndx]);
995 }
996
997 /* this is only to be called from in mtx_set_order */
998 static void enlarge_nzheaders(mtx_matrix_t mtx, int32 order)
999 {
1000 struct element_t **newhdr;
1001
1002 /* realloc does not initialize, so calloc is the best we can do here */
1003
1004 newhdr = calloc_nz_header(order);
1005 copy_nz_header(newhdr,mtx->hdr.row,mtx->capacity);
1006 free_nz_header(mtx->hdr.row);
1007 mtx->hdr.row = newhdr;
1008
1009 newhdr = calloc_nz_header(order);
1010 copy_nz_header(newhdr,mtx->hdr.col,mtx->capacity);
1011 free_nz_header(mtx->hdr.col);
1012 mtx->hdr.col = newhdr;
1013 }
1014
1015 void mtx_set_order( mtx_matrix_t mtx, int32 order)
1016 /**
1017 *** This function will preserve the fact that all of the arrays are
1018 *** "correct" out to the capacity of the arrays, not just out to the order
1019 *** of the matrix. In other words, extensions to orders which still do
1020 *** not exceed the capacity of the arrays are trivial.
1021 **/
1022 {
1023 int32 i;
1024 if(!mtx_check_matrix(mtx)) return;
1025 if (ISSLAVE(mtx)) {
1026 mtx_set_order(mtx->master,order);
1027 return;
1028 }
1029 /* we now have a master matrix */
1030 if( order < mtx->order ) { /* Truncate */
1031 trim_incidence(mtx,order); /* clean master */
1032 for (i = 0; i < mtx->nslaves; i++) {
1033 trim_incidence(mtx->slaves[i],order);
1034 }
1035 }
1036 if (mtx->perm.transpose == mtx_NONE) {
1037 mtx->perm.transpose = 0;
1038 }
1039
1040 if( order > mtx->capacity ) {
1041 int32 *newperm;
1042 int32 ndx;
1043
1044 enlarge_nzheaders(mtx,order);
1045 for (i = 0; i < mtx->nslaves; i++) {
1046 enlarge_nzheaders(mtx->slaves[i],order);
1047 }
1048
1049 /* realloc not in order here. Happens only on the master. */
1050 newperm = mtx_alloc_perm(order);
1051 mtx_copy_perm(newperm,mtx->perm.row.org_to_cur,mtx->capacity);
1052 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1053 newperm[ndx] = ndx;
1054 mtx_free_perm(mtx->perm.row.org_to_cur);
1055 mtx->perm.row.org_to_cur = newperm;
1056
1057 newperm = mtx_alloc_perm(order);
1058 mtx_copy_perm(newperm,mtx->perm.row.cur_to_org,mtx->capacity);
1059 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1060 newperm[ndx] = ndx;
1061 mtx_free_perm(mtx->perm.row.cur_to_org);
1062 mtx->perm.row.cur_to_org = newperm;
1063
1064 newperm = mtx_alloc_perm(order);
1065 mtx_copy_perm(newperm,mtx->perm.col.org_to_cur,mtx->capacity);
1066 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1067 newperm[ndx] = ndx;
1068 mtx_free_perm(mtx->perm.col.org_to_cur);
1069 mtx->perm.col.org_to_cur = newperm;
1070
1071 newperm = mtx_alloc_perm(order);
1072 mtx_copy_perm(newperm,mtx->perm.col.cur_to_org,mtx->capacity);
1073 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1074 newperm[ndx] = ndx;
1075 mtx_free_perm(mtx->perm.col.cur_to_org);
1076 mtx->perm.col.cur_to_org = newperm;
1077
1078 mtx->capacity = order;
1079 for (i = 0; i < mtx->nslaves; i++) {
1080 mtx->slaves[i]->capacity = order;
1081 /* point slaves at master perm again in case anything has changed */
1082 mtx->slaves[i]->perm.row.org_to_cur = mtx->perm.row.org_to_cur;
1083 mtx->slaves[i]->perm.row.cur_to_org = mtx->perm.row.cur_to_org;
1084 mtx->slaves[i]->perm.col.org_to_cur = mtx->perm.col.org_to_cur;
1085 mtx->slaves[i]->perm.col.cur_to_org = mtx->perm.col.cur_to_org;
1086 }
1087 }
1088 mtx->order = order;
1089 for (i = 0; i < mtx->nslaves; i++) {
1090 mtx->slaves[i]->order = order;
1091 }
1092 }
1093
1094 void mtx_clear_coord(mtx_matrix_t mtx, int32 row, int32 col)
1095 {
1096 struct element_t **rlink, **clink;
1097 int32 org_row, org_col;
1098
1099 #if MTX_DEBUG
1100 if( !mtx_check_matrix(mtx) ) return; /*ben*/
1101 #endif
1102
1103 last_value_matrix = mtx;
1104 org_row = mtx->perm.row.cur_to_org[row];
1105 org_col = mtx->perm.col.cur_to_org[col];
1106 rlink = &(mtx->hdr.row[org_row]);
1107 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1108 rlink = &((*rlink)->next.col);
1109 if( ISNULL(*rlink) ) return;
1110 clink = &(mtx->hdr.col[org_col]);
1111 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1112 clink = &((*clink)->next.row);
1113 delete_from_row(rlink);
1114 delete_from_col(clink);
1115 }
1116
1117 void mtx_clear_row( mtx_matrix_t mtx, int32 row, mtx_range_t *rng)
1118 {
1119 struct element_t **rlink, **clink;
1120 int32 org_row;
1121
1122 #if MTX_DEBUG
1123 if( !mtx_check_matrix(mtx) ) return; /*ben*/
1124 #endif
1125
1126 last_value_matrix = mtx;
1127 org_row = mtx->perm.row.cur_to_org[row];
1128 rlink = &(mtx->hdr.row[org_row]);
1129 if( rng == mtx_ALL_COLS ) {
1130 while( NOTNULL(*rlink) ) {
1131 clink = &(mtx->hdr.col[(*rlink)->col]);
1132 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1133 clink = &((*clink)->next.row);
1134 delete_from_row(rlink);
1135 delete_from_col(clink);
1136 }
1137 } else if( rng->high >= rng->low ) {
1138 while( NOTNULL(*rlink) ) {
1139 if( in_range(rng,mtx->perm.col.org_to_cur[(*rlink)->col]) ) {
1140 clink = &(mtx->hdr.col[(*rlink)->col]);
1141 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1142 clink = &((*clink)->next.row);
1143 delete_from_row(rlink);
1144 delete_from_col(clink);
1145 } else
1146 rlink = &((*rlink)->next.col);
1147 }
1148 }
1149 }
1150
1151 void mtx_clear_col( mtx_matrix_t mtx, int32 col, mtx_range_t *rng)
1152 {
1153 struct element_t **clink, **rlink;
1154 int32 org_col;
1155
1156 #if MTX_DEBUG
1157 if( !mtx_check_matrix(mtx) ) return;
1158 #endif
1159
1160 last_value_matrix = mtx;
1161 org_col = mtx->perm.col.cur_to_org[col];
1162 clink = &(mtx->hdr.col[org_col]);
1163 if( rng == mtx_ALL_ROWS ) {
1164 while( NOTNULL(*clink) ) {
1165 rlink = &(mtx->hdr.row[(*clink)->row]);
1166 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1167 rlink = &((*rlink)->next.col);
1168 delete_from_col(clink);
1169 delete_from_row(rlink);
1170 }
1171 } else if( rng->high >= rng->low ) {
1172 while( NOTNULL(*clink) ) {
1173 if( in_range(rng,mtx->perm.row.org_to_cur[(*clink)->row]) ) {
1174 rlink = &(mtx->hdr.row[(*clink)->row]);
1175 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1176 rlink = &((*rlink)->next.col);
1177 delete_from_col(clink);
1178 delete_from_row(rlink);
1179 } else
1180 clink = &((*clink)->next.row);
1181 }
1182 }
1183 }
1184
1185 void mtx_clear_region(mtx_matrix_t mtx, mtx_region_t *region)
1186 {
1187 #if MTX_DEBUG
1188 if(!mtx_check_matrix(mtx)) return;
1189 #endif
1190
1191 if( region == mtx_ENTIRE_MATRIX && !ISSLAVE(mtx)) {
1192 /* damn the torpedos, wipe that sucker and slaves fast */
1193 nuke_fishnet(mtx);
1194 } else {
1195 int32 ndx;
1196 int32 *toorg;
1197 mtx_region_t reg;
1198
1199 if (region == mtx_ENTIRE_MATRIX) {
1200 reg.row.low = reg.col.low = 0;
1201 reg.row.high = reg.col.high = mtx->order-1;
1202 } else {
1203 reg = *region;
1204 }
1205 last_value_matrix = mtx;
1206 toorg = mtx->perm.row.cur_to_org;
1207 for( ndx = reg.row.low ; ndx <= reg.row.high ; ++ndx )
1208 blast_row(mtx,toorg[ndx],&(reg.col));
1209 toorg = mtx->perm.col.cur_to_org;
1210 for( ndx = reg.col.low ; ndx <= reg.col.high ; ++ndx )
1211 clean_col(mtx,toorg[ndx]);
1212 }
1213 }
1214
1215
1216 void mtx_reset_perm(mtx_matrix_t mtx)
1217 {
1218 int32 ndx;
1219
1220 #if MTX_DEBUG
1221 if(!mtx_check_matrix(mtx)) return;
1222 #endif
1223 if (ISSLAVE(mtx)) {
1224 mtx_reset_perm(mtx->master);
1225 return;
1226 }
1227
1228 for( ndx=ZERO ; ndx < mtx->capacity ; ++ndx ) {
1229 mtx->perm.row.org_to_cur[ndx] = ndx;
1230 mtx->perm.row.cur_to_org[ndx] = ndx;
1231 mtx->perm.col.org_to_cur[ndx] = ndx;
1232 mtx->perm.col.cur_to_org[ndx] = ndx;
1233 }
1234 mtx->perm.row.parity = mtx->perm.col.parity = EVEN;
1235
1236 mtx->data->symbolic_rank = -1;
1237 mtx->data->nblocks = -1;
1238 if( NOTNULL(mtx->data->block) ) {
1239 ascfree(mtx->data->block);
1240 mtx->data->block = NULL;
1241 }
1242 }
1243
1244 void mtx_clear(mtx_matrix_t mtx)
1245 {
1246 if (ISSLAVE(mtx)) {
1247 mtx_clear_region(mtx->master,mtx_ENTIRE_MATRIX);
1248 }
1249 mtx_clear_region(mtx,mtx_ENTIRE_MATRIX);
1250 mtx_reset_perm(mtx);
1251 }
1252
1253 real64 mtx_value(mtx_matrix_t mtx, mtx_coord_t *coord)
1254 {
1255 struct element_t *elt;
1256 register int32 org_row,org_col;
1257
1258 #if MTX_DEBUG
1259 if(!mtx_check_matrix(mtx)) return D_ZERO;
1260 #endif
1261 org_row = mtx->perm.row.cur_to_org[coord->row];
1262 org_col = mtx->perm.col.cur_to_org[coord->col];
1263 elt = mtx_find_element(mtx,org_row,org_col);
1264 mtx->last_value = elt;
1265 return( ISNULL(elt) ? D_ZERO : elt->value );
1266 }
1267
1268 void mtx_set_value( mtx_matrix_t mtx, mtx_coord_t *coord, real64 value)
1269 {
1270 register int32 org_row,org_col;
1271
1272 #if MTX_DEBUG
1273 if(!mtx_check_matrix(mtx)) return;
1274 #endif
1275 org_row = mtx->perm.row.cur_to_org[coord->row];
1276 org_col = mtx->perm.col.cur_to_org[coord->col];
1277 if ( NOTNULL(mtx->last_value) &&
1278 mtx->last_value->row==org_row &&
1279 mtx->last_value->col==org_col ) {
1280 mtx->last_value->value = value;
1281 } else {
1282 struct element_t *elt;
1283 if( ISNULL(elt = mtx_find_element(mtx,org_row,org_col)) ) {
1284 if (value != D_ZERO ) {
1285 elt = mtx_create_element_value(mtx,org_row,org_col,value);
1286 }
1287 } else {
1288 elt->value = value;
1289 }
1290 }
1291 }
1292
1293 void mtx_fill_value(mtx_matrix_t mtx, mtx_coord_t *coord, real64 value)
1294 {
1295 register int32 org_row,org_col;
1296
1297 #if MTX_DEBUG
1298 if(!mtx_check_matrix(mtx)) return;
1299 #endif
1300 org_row = mtx->perm.row.cur_to_org[coord->row];
1301 org_col = mtx->perm.col.cur_to_org[coord->col];
1302 mtx_create_element_value(mtx,org_row,org_col,value);
1303 }
1304
1305 void mtx_fill_org_value(mtx_matrix_t mtx, const mtx_coord_t *coord,
1306 real64 value)
1307 {
1308 #if MTX_DEBUG
1309 if(!mtx_check_matrix(mtx)) return;
1310 #endif
1311 mtx_create_element_value(mtx,coord->row,coord->col,value);
1312 }
1313
1314 /* sparse matrix assembly of potentially duplicate fills */
1315 /* takes a matrix, assumed to have redundant and otherwise insane incidences
1316 created by 'misusing mtx_fill_value' and sums all like entries, eliminating
1317 the duplicates and the zeroes. Returns -# of elements removed, or 1 if bad.
1318 returns 1 if fails for some reason, 0 otherwise.
1319 Could stand to have the error messages it emits improved.
1320 Could stand to take a rowrange or a rowlist, a colrange or a collist,droptol.
1321 algorithm: O(3tau)
1322 establish a vector mv of columns needing cleaning markers
1323 set lowmark=-1, highmark= -2
1324 for rows
1325 if row empty continue
1326 establish a vector ev of elt pointers set null
1327 prevlink = &header 1st elt pointer
1328 for each elt in row
1329 if elt is not marked in ev
1330 mark ev with elt, update prevlink
1331 else
1332 add value to elt pointed at by ev
1333 mark col in mv as needing cleaning, updating lowmark,highmark
1334 delete elt
1335 endif
1336 endfor
1337 for each elt in row
1338 unmark ev with elt
1339 if elt is 0, delete and mark col in mv.
1340 endfor
1341 endfor
1342 for i = lowmark to highmark
1343 clean col
1344 endfor
1345 My god, the tricks we play on a linked list.
1346 */
1347 int32 mtx_assemble(mtx_matrix_t mtx)
1348 {
1349 char *real_mv=NULL, *mv;
1350 struct element_t **real_ev=NULL, **ev, **prevlink, *elt;
1351 int32 orgrow, orgcol, lowmark,highmark,dup;
1352 /* note elt and prevlink could be combined, but the code is
1353 unreadable utterly if you do so. */
1354
1355 if (ISNULL(mtx)) return 1;
1356 if (mtx->order <1) return 0;
1357 real_mv = mtx_null_mark(mtx->order+1);
1358 mv = real_mv+1;
1359 if (ISNULL(real_mv)) return 1;
1360 real_ev = mtx_null_vector(mtx->order+1);
1361 ev = real_ev+1;
1362 if (ISNULL(real_ev)) {
1363 mtx_null_mark_release();
1364 return 1;
1365 }
1366 /* we have allocated arrays which include a -1 element to buy
1367 ourselves an awful convenient lot of safety. */
1368 lowmark=-1;
1369 highmark=-2;
1370 dup = 0;
1371 last_value_matrix = mtx;
1372 for (orgrow=0; orgrow < mtx->order; orgrow++) {
1373 elt = mtx->hdr.row[orgrow];
1374 prevlink = &(mtx->hdr.row[orgrow]);
1375 while (NOTNULL(elt)) {
1376 if (ISNULL(ev[elt->col])) {
1377 ev[elt->col] = elt; /* mark first elt found for this col */
1378 prevlink= &(elt->next.col); /* collect pointer to where we go next */
1379 elt = elt->next.col; /* go there */
1380 } else {
1381 /* elt is duplicate and must die */
1382 dup++;
1383 ev[elt->col]->value += elt->value; /* accumulate value */
1384 /* update lowmark, highmark . this is a debatable implementation. */
1385 /* for small mods on large matrices, this is sane */
1386 if (lowmark > -1) { /* not first mark */
1387 if (elt->col < lowmark && elt->col > -1) {
1388 lowmark = elt->col;
1389 }
1390 if (elt->col > highmark && elt->col < mtx->order) {
1391 highmark = elt->col;
1392 }
1393 } else { /* very first mark */
1394 if (elt->col > -1 && elt->col < mtx->order) {
1395 lowmark = highmark = elt->col;
1396 }
1397 }
1398 mv[elt->col] = 1; /* mark column as to be cleaned */
1399 delete_from_row(prevlink);
1400 elt = *prevlink;
1401 }
1402 }
1403 elt = mtx->hdr.row[orgrow];
1404 prevlink = &(mtx->hdr.row[orgrow]);
1405 while (NOTNULL(elt)) { /* renull the accumulator and trash 0s */
1406 ev[elt->col] = NULL; /* regardless, reset accum */
1407 if (elt->value != D_ZERO) {
1408 prevlink= &(elt->next.col); /* collect pointer to where we go next */
1409 elt = elt->next.col; /* go there */
1410 } else {
1411 /* this is still a debatable implementation. */
1412 if (lowmark > -1) { /* not first mark */
1413 if (elt->col < lowmark && elt->col > -1) {
1414 lowmark = elt->col;
1415 }
1416 if (elt->col > highmark && elt->col < mtx->order) {
1417 highmark = elt->col;
1418 }
1419 } else { /* very first mark */
1420 if (elt->col > -1 && elt->col < mtx->order) {
1421 lowmark = highmark = elt->col;
1422 }
1423 }
1424 mv[elt->col] = 1; /* mark column as to be cleaned */
1425 delete_from_row(prevlink);
1426 elt = *prevlink;
1427 }
1428 }
1429 }
1430 for (orgcol = lowmark; orgcol <= highmark; orgcol++) {
1431 if (mv[orgcol]) {
1432 clean_col(mtx,orgcol); /* scrap dups and 0s */
1433 }
1434 }
1435 mtx_null_mark_release();
1436 mtx_null_vector_release();
1437 return -dup;
1438 }
1439
1440 void mtx_del_zr_in_row(mtx_matrix_t mtx, int32 row)
1441 {
1442 register int32 org;
1443 struct element_t **rlink, **clink;
1444
1445 #if MTX_DEBUG
1446 if(!mtx_check_matrix(mtx)) return;
1447 #endif
1448 org = mtx->perm.row.cur_to_org[row];
1449 rlink = &(mtx->hdr.row[org]);
1450
1451 last_value_matrix = mtx;
1452 while( NOTNULL(*rlink) )
1453 if( (*rlink)->value == D_ZERO ) {
1454 clink = &(mtx->hdr.col[(*rlink)->col]);
1455 for( ; NOTNULL(*clink) && (*clink)->row != org ; )
1456 clink = &((*clink)->next.row);
1457 delete_from_row(rlink);
1458 delete_from_col(clink);
1459 } else
1460 rlink = &((*rlink)->next.col);
1461 }
1462
1463 void mtx_del_zr_in_col(mtx_matrix_t mtx, int32 col)
1464 {
1465 register int32 org;
1466 struct element_t **clink, **rlink;
1467
1468 #if MTX_DEBUG
1469 if(!mtx_check_matrix(mtx)) return;
1470 #endif
1471 org = mtx->perm.col.cur_to_org[col];
1472 clink = &(mtx->hdr.col[org]);
1473
1474 last_value_matrix = mtx;
1475 while( NOTNULL(*clink) )
1476 if( (*clink)->value == D_ZERO ) {
1477 rlink = &(mtx->hdr.row[(*clink)->row]);
1478 for( ; NOTNULL(*rlink) && (*rlink)->col != org ; )
1479 rlink = &((*rlink)->next.col);
1480 delete_from_col(clink);
1481 delete_from_row(rlink);
1482 } else
1483 clink = &((*clink)->next.row);
1484 }
1485
1486 void mtx_del_zr_in_rowrange(mtx_matrix_t mtx, mtx_range_t *rng)
1487 {
1488 register int32 org,row,rowhi, *toorg;
1489 struct element_t **rlink, **clink;
1490
1491 #if MTX_DEBUG
1492 if(!mtx_check_matrix(mtx)) return;
1493 #endif
1494 rowhi=rng->high;
1495 toorg= mtx->perm.row.cur_to_org;
1496 last_value_matrix = mtx;
1497 for (row=rng->low; row <=rowhi; row++) {
1498 org = toorg[row];
1499 rlink = &(mtx->hdr.row[org]);
1500
1501 while( NOTNULL(*rlink) ) {
1502 if( (*rlink)->value == D_ZERO ) {
1503 clink = &(mtx->hdr.col[(*rlink)->col]);
1504 for( ; NOTNULL(*clink) && (*clink)->row != org ; )
1505 clink = &((*clink)->next.row);
1506 delete_from_row(rlink);
1507 delete_from_col(clink);
1508 } else {
1509 rlink = &((*rlink)->next.col);
1510 }
1511 }
1512
1513 }
1514 }
1515
1516 void mtx_del_zr_in_colrange(mtx_matrix_t mtx, mtx_range_t *rng)
1517 {
1518 register int32 org,col,colhi, *toorg;
1519 struct element_t **clink, **rlink;
1520
1521 #if MTX_DEBUG
1522 if(!mtx_check_matrix(mtx)) return;
1523 #endif
1524 colhi=rng->high;
1525 toorg= mtx->perm.col.cur_to_org;
1526 last_value_matrix = mtx;
1527 for (col=rng->low; col <=colhi; col++) {
1528 org = toorg[col];
1529 clink = &(mtx->hdr.col[org]);
1530
1531 while( NOTNULL(*clink) ) {
1532 if( (*clink)->value == D_ZERO ) {
1533 rlink = &(mtx->hdr.row[(*clink)->row]);
1534 for( ; NOTNULL(*rlink) && (*rlink)->col != org ; )
1535 rlink = &((*rlink)->next.col);
1536 delete_from_col(clink);
1537 delete_from_row(rlink);
1538 } else {
1539 clink = &((*clink)->next.row);
1540 }
1541 }
1542
1543 }
1544 }
1545
1546 void mtx_steal_org_row_vec(mtx_matrix_t mtx, int32 row,
1547 real64 *vec, mtx_range_t *rng)
1548 {
1549 struct element_t **rlink, **clink;
1550 int32 org_row;
1551
1552 #if MTX_DEBUG
1553 if( !mtx_check_matrix(mtx) ) return;
1554 #endif
1555
1556 last_value_matrix = mtx;
1557 org_row = mtx->perm.row.cur_to_org[row];
1558 rlink = &(mtx->hdr.row[org_row]);
1559 if( rng == mtx_ALL_COLS ) {
1560 while( NOTNULL(*rlink) ) {
1561 vec[(*rlink)->col] = (*rlink)->value;
1562 clink = &(mtx->hdr.col[(*rlink)->col]);
1563 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1564 clink = &((*clink)->next.row);
1565 delete_from_row(rlink);
1566 delete_from_col(clink);
1567 }
1568 } else if( rng->high >= rng->low ) {
1569 int32 *tocur;
1570 tocur = mtx->perm.col.org_to_cur;
1571 while( NOTNULL(*rlink) ) {
1572 if( in_range(rng,tocur[(*rlink)->col]) ) {
1573 vec[(*rlink)->col] = (*rlink)->value;
1574 clink = &(mtx->hdr.col[(*rlink)->col]);
1575 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1576 clink = &((*clink)->next.row);
1577 delete_from_row(rlink);
1578 delete_from_col(clink);
1579 } else {
1580 rlink = &((*rlink)->next.col);
1581 }
1582 }
1583 }
1584 }
1585
1586 void mtx_steal_org_col_vec(mtx_matrix_t mtx, int32 col,
1587 real64 *vec, mtx_range_t *rng)
1588 {
1589 struct element_t **clink, **rlink;
1590 int32 org_col;
1591
1592 #if MTX_DEBUG
1593 if( !mtx_check_matrix(mtx) ) return;
1594 #endif
1595
1596 last_value_matrix = mtx;
1597 org_col = mtx->perm.col.cur_to_org[col];
1598 clink = &(mtx->hdr.col[org_col]);
1599 if( rng == mtx_ALL_ROWS ) {
1600 while( NOTNULL(*clink) ) {
1601 vec[(*clink)->row] = (*clink)->value;
1602 rlink = &(mtx->hdr.row[(*clink)->row]);
1603 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1604 rlink = &((*rlink)->next.col);
1605 delete_from_col(clink);
1606 delete_from_row(rlink);
1607 }
1608 } else if( rng->high >= rng->low ) {
1609 int32 *tocur;
1610 tocur = mtx->perm.row.org_to_cur;
1611 while( NOTNULL(*clink) ) {
1612 if( in_range(rng,tocur[(*clink)->row]) ) {
1613 vec[(*clink)->row] = (*clink)->value;
1614 rlink = &(mtx->hdr.row[(*clink)->row]);
1615 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1616 rlink = &((*rlink)->next.col);
1617 delete_from_col(clink);
1618 delete_from_row(rlink);
1619 } else {
1620 clink = &((*clink)->next.row);
1621 }
1622 }
1623 }
1624 }
1625
1626 void mtx_steal_cur_row_vec(mtx_matrix_t mtx, int32 row,
1627 real64 *vec, mtx_range_t *rng)
1628 {
1629 struct element_t **rlink, **clink;
1630 int32 org_row, *tocur;
1631
1632 #if MTX_DEBUG
1633 if( !mtx_check_matrix(mtx) ) return;
1634 #endif
1635
1636 tocur = mtx->perm.col.org_to_cur;
1637 last_value_matrix = mtx;
1638 org_row = mtx->perm.row.cur_to_org[row];
1639 rlink = &(mtx->hdr.row[org_row]);
1640 if( rng == mtx_ALL_COLS ) {
1641 while( NOTNULL(*rlink) ) {
1642 vec[tocur[(*rlink)->col]] = (*rlink)->value;
1643 clink = &(mtx->hdr.col[(*rlink)->col]);
1644 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1645 clink = &((*clink)->next.row);
1646 delete_from_row(rlink);
1647 delete_from_col(clink);
1648 }
1649 } else if( rng->high >= rng->low ) {
1650 while( NOTNULL(*rlink) ) {
1651 if( in_range(rng,tocur[(*rlink)->col]) ) {
1652 vec[tocur[(*rlink)->col]] = (*rlink)->value;
1653 clink = &(mtx->hdr.col[(*rlink)->col]);
1654 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1655 clink = &((*clink)->next.row);
1656 delete_from_row(rlink);
1657 delete_from_col(clink);
1658 } else {
1659 rlink = &((*rlink)->next.col);
1660 }
1661 }
1662 }
1663 }
1664
1665 void mtx_steal_cur_col_vec(mtx_matrix_t mtx, int32 col,
1666 real64 *vec, mtx_range_t *rng)
1667 {
1668 struct element_t **clink, **rlink;
1669 int32 org_col, *tocur;
1670
1671 #if MTX_DEBUG
1672 if( !mtx_check_matrix(mtx) ) return;
1673 #endif
1674
1675 tocur = mtx->perm.row.org_to_cur;
1676 last_value_matrix = mtx;
1677 org_col = mtx->perm.col.cur_to_org[col];
1678 clink = &(mtx->hdr.col[org_col]);
1679 if( rng == mtx_ALL_ROWS ) {
1680 while( NOTNULL(*clink) ) {
1681 vec[tocur[(*clink)->row]] = (*clink)->value;
1682 rlink = &(mtx->hdr.row[(*clink)->row]);
1683 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1684 rlink = &((*rlink)->next.col);
1685 delete_from_col(clink);
1686 delete_from_row(rlink);
1687 }
1688 } else if( rng->high >= rng->low ) {
1689 while( NOTNULL(*clink) ) {
1690 if( in_range(rng,tocur[(*clink)->row]) ) {
1691 vec[tocur[(*clink)->row]] = (*clink)->value;
1692 rlink = &(mtx->hdr.row[(*clink)->row]);
1693 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1694 rlink = &((*rlink)->next.col);
1695 delete_from_col(clink);
1696 delete_from_row(rlink);
1697 } else {
1698 clink = &((*clink)->next.row);
1699 }
1700 }
1701 }
1702 }
1703
1704 #ifdef DELETE_THIS_UNUSED_FUNCTION
1705 /* Little function to enlarge the capacity of a sparse to the len given.
1706 * New memory allocated, if any, is not initialized in any way.
1707 * The pointer given is not itself changed, only its data.
1708 * This function in no way shrinks the data in a sparse.
1709 * (exception: buggy realloc implementations may shrink it, ack!)
1710 * Data/idata values up to the old capacity (ret->cap) are preserved.
1711 */
1712 static int enlarge_sparse(mtx_sparse_t *ret, int32 len)
1713 {
1714 int32 *inew;
1715 real64 *dnew;
1716 if (ISNULL(ret)) {
1717 FPRINTF(g_mtxerr,"ERROR: (mtx.c) enlarge_sparse called with NULL.\n");
1718 return 1;
1719 }
1720 if (len <= ret->cap || len <1) return 0; /* already big enough */
1721
1722 if (ret->idata == NULL) {
1723 inew = ASC_NEW_ARRAY(int32,len);
1724 } else {
1725 inew = (int32 *)ascrealloc(ret->idata,sizeof(int32)*len);
1726 }
1727 if (ISNULL(inew)) {
1728 FPRINTF(g_mtxerr,"ERROR: (mtx.c) enlarge_sparse\n");
1729 FPRINTF(g_mtxerr," Insufficient memory.\n");
1730 return 1;
1731 }
1732 ret->idata = inew; /* dnew can still fail without losing inew memory. */
1733
1734 if (ret->data == NULL) {
1735 dnew = ASC_NEW_ARRAY(real64,len);
1736 } else {
1737 dnew = (real64 *)ascrealloc(ret->data,sizeof(real64)*len);
1738 }
1739 if (ISNULL(dnew)) {
1740 FPRINTF(g_mtxerr,"ERROR: (mtx.c) enlarge_sparse\n");
1741 FPRINTF(g_mtxerr," Insufficient memory.\n");
1742 return 1;
1743 }
1744 ret->data = dnew; /* we succeeded */
1745 ret->cap = len;
1746 return 0;
1747 }
1748 #endif /* DELETE_THIS_UNUSED_FUNCTION */
1749
1750
1751 /* going to try to make steal also handle sparse creation ...*/
1752 /* don't you dare, whoever you are! */
1753 boolean mtx_steal_org_row_sparse(mtx_matrix_t mtx, int32 row,
1754 mtx_sparse_t *sp, mtx_range_t *rng)
1755 {
1756 struct element_t **rlink, **clink;
1757 int32 org_row;
1758 int32 len,k;
1759
1760 #if MTX_DEBUG
1761 if( !mtx_check_matrix(mtx) ) return TRUE;
1762 #endif
1763 if (sp == mtx_CREATE_SPARSE) {
1764 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_org_row_sparse called with\n");
1765 FPRINTF(g_mtxerr," mtx_CREATE_SPARSE. Not supported.\n");
1766 return TRUE;
1767 }
1768 if (rng == mtx_ALL_COLS) {
1769 len = mtx->order;
1770 } else {
1771 len = rng->high - rng->low +1;
1772 }
1773 if (sp->cap < len) {
1774 sp->len = 0;
1775 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_org_row_sparse called with\n");
1776 FPRINTF(g_mtxerr," sparse of insufficient capacity.\n");
1777 return TRUE;
1778 }
1779
1780 last_value_matrix = mtx;
1781 org_row = mtx->perm.row.cur_to_org[row];
1782 rlink = &(mtx->hdr.row[org_row]);
1783 k = 0;
1784
1785 if( rng == mtx_ALL_COLS ) {
1786 while( NOTNULL(*rlink) ) {
1787 sp->idata[k] = (*rlink)->col;
1788 sp->data[k] = (*rlink)->value;
1789 k++;
1790 clink = &(mtx->hdr.col[(*rlink)->col]);
1791 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1792 clink = &((*clink)->next.row);
1793 delete_from_row(rlink);
1794 delete_from_col(clink);
1795 }
1796 } else if( rng->high >= rng->low ) {
1797 int32 *tocur;
1798 tocur = mtx->perm.col.org_to_cur;
1799 while( NOTNULL(*rlink) ) {
1800 if( in_range(rng,tocur[(*rlink)->col]) ) {
1801 sp->idata[k] = (*rlink)->col;
1802 sp->data[k] = (*rlink)->value;
1803 k++;
1804 clink = &(mtx->hdr.col[(*rlink)->col]);
1805 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1806 clink = &((*clink)->next.row);
1807 delete_from_row(rlink);
1808 delete_from_col(clink);
1809 } else {
1810 rlink = &((*rlink)->next.col);
1811 }
1812 }
1813 }
1814 sp->len = k;
1815 return FALSE;
1816 }
1817
1818 boolean mtx_steal_org_col_sparse(mtx_matrix_t mtx, int32 col,
1819 mtx_sparse_t *sp, mtx_range_t *rng)
1820 {
1821 struct element_t **clink, **rlink;
1822 int32 org_col;
1823 int32 len,k;
1824
1825 #if MTX_DEBUG
1826 if( !mtx_check_matrix(mtx) ) return TRUE;
1827 #endif
1828 if (sp == mtx_CREATE_SPARSE) {
1829 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_org_col_sparse called with\n");
1830 FPRINTF(g_mtxerr," mtx_CREATE_SPARSE. Not supported.\n");
1831 return TRUE;
1832 }
1833 if (rng == mtx_ALL_ROWS) {
1834 len = mtx->order;
1835 } else {
1836 len = rng->high - rng->low +1;
1837 }
1838 if (sp->cap < len) {
1839 sp->len = 0;
1840 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_org_col_sparse called with\n");
1841 FPRINTF(g_mtxerr," sparse of insufficient capacity.\n");
1842 return TRUE;
1843 }
1844
1845 last_value_matrix = mtx;
1846 org_col = mtx->perm.col.cur_to_org[col];
1847 clink = &(mtx->hdr.col[org_col]);
1848 k = 0;
1849
1850 if( rng == mtx_ALL_ROWS ) {
1851 while( NOTNULL(*clink) ) {
1852 sp->idata[k] = (*clink)->row;
1853 sp->data[k] = (*clink)->value;
1854 k++;
1855 rlink = &(mtx->hdr.row[(*clink)->row]);
1856 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1857 rlink = &((*rlink)->next.col);
1858 delete_from_col(clink);
1859 delete_from_row(rlink);
1860 }
1861 } else if( rng->high >= rng->low ) {
1862 int32 *tocur;
1863 tocur = mtx->perm.row.org_to_cur;
1864 while( NOTNULL(*clink) ) {
1865 if( in_range(rng,tocur[(*clink)->row]) ) {
1866 sp->idata[k] = (*clink)->row;
1867 sp->data[k] = (*clink)->value;
1868 k++;
1869 rlink = &(mtx->hdr.row[(*clink)->row]);
1870 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1871 rlink = &((*rlink)->next.col);
1872 delete_from_col(clink);
1873 delete_from_row(rlink);
1874 } else {
1875 clink = &((*clink)->next.row);
1876 }
1877 }
1878 }
1879 sp->len = k;
1880 return FALSE;
1881 }
1882
1883 boolean mtx_steal_cur_row_sparse(mtx_matrix_t mtx, int32 row,
1884 mtx_sparse_t *sp, mtx_range_t *rng)
1885 {
1886 struct element_t **rlink, **clink;
1887 int32 org_row, *tocur;
1888 int32 len,k;
1889
1890 #if MTX_DEBUG
1891 if( !mtx_check_matrix(mtx) ) return TRUE;
1892 #endif
1893 if (sp == mtx_CREATE_SPARSE) {
1894 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_cur_row_sparse called with\n");
1895 FPRINTF(g_mtxerr," mtx_CREATE_SPARSE. Not supported.\n");
1896 return TRUE;
1897 }
1898 if (rng == mtx_ALL_COLS) {
1899 len = mtx->order;
1900 } else {
1901 len = rng->high - rng->low +1;
1902 }
1903 if (sp->cap < len) {
1904 sp->len = 0;
1905 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_cur_row_sparse called with\n");
1906 FPRINTF(g_mtxerr," sparse of insufficient capacity.\n");
1907 return TRUE;
1908 }
1909
1910 tocur = mtx->perm.col.org_to_cur;
1911 last_value_matrix = mtx;
1912 org_row = mtx->perm.row.cur_to_org[row];
1913 rlink = &(mtx->hdr.row[org_row]);
1914 k = 0;
1915
1916 if( rng == mtx_ALL_COLS ) {
1917 while( NOTNULL(*rlink) ) {
1918 sp->idata[k] = tocur[(*rlink)->col];
1919 sp->data[k] = (*rlink)->value;
1920 k++;
1921 clink = &(mtx->hdr.col[(*rlink)->col]);
1922 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1923 clink = &((*clink)->next.row);
1924 delete_from_row(rlink);
1925 delete_from_col(clink);
1926 }
1927 } else if( rng->high >= rng->low ) {
1928 while( NOTNULL(*rlink) ) {
1929 if( in_range(rng,tocur[(*rlink)->col]) ) {
1930 sp->idata[k] = tocur[(*rlink)->col];
1931 sp->data[k] = (*rlink)->value;
1932 k++;
1933 clink = &(mtx->hdr.col[(*rlink)->col]);
1934 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1935 clink = &((*clink)->next.row);
1936 delete_from_row(rlink);
1937 delete_from_col(clink);
1938 } else {
1939 rlink = &((*rlink)->next.col);
1940 }
1941 }
1942 }
1943 sp->len = k;
1944 return FALSE;
1945 }
1946
1947 boolean mtx_steal_cur_col_sparse(mtx_matrix_t mtx, int32 col,
1948 mtx_sparse_t *sp, mtx_range_t *rng)
1949 {
1950 struct element_t **clink, **rlink;
1951 int32 org_col, *tocur;
1952 int32 len,k;
1953
1954 #if MTX_DEBUG
1955 if( !mtx_check_matrix(mtx) ) return TRUE;
1956 #endif
1957 if (sp == mtx_CREATE_SPARSE) {
1958 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_cur_col_sparse called with\n");
1959 FPRINTF(g_mtxerr," mtx_CREATE_SPARSE. Not supported.\n");
1960 return TRUE;
1961 }
1962 if (rng == mtx_ALL_ROWS) {
1963 len = mtx->order;
1964 } else {
1965 len = rng->high - rng->low +1;
1966 }
1967 if (sp->cap < len) {
1968 sp->len = 0;
1969 FPRINTF(g_mtxerr,"ERROR: (mtx.c) mtx_steal_cur_col_sparse called with\n");
1970 FPRINTF(g_mtxerr," sparse of insufficient capacity.\n");
1971 return TRUE;
1972 }
1973
1974 tocur = mtx->perm.row.org_to_cur;
1975 last_value_matrix = mtx;
1976 org_col = mtx->perm.col.cur_to_org[col];
1977 clink = &(mtx->hdr.col[org_col]);
1978 k = 0;
1979
1980 if( rng == mtx_ALL_ROWS ) {
1981 while( NOTNULL(*clink) ) {
1982 sp->idata[k] = tocur[(*clink)->row];
1983 sp->data[k] = (*clink)->value;
1984 k++;
1985 rlink = &(mtx->hdr.row[(*clink)->row]);
1986 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1987 rlink = &((*rlink)->next.col);
1988 delete_from_col(clink);
1989 delete_from_row(rlink);
1990 }
1991 } else if( rng->high >= rng->low ) {
1992 while( NOTNULL(*clink) ) {
1993 if( in_range(rng,tocur[(*clink)->row]) ) {
1994 sp->idata[k] = tocur[(*clink)->row];
1995 sp->data[k] = (*clink)->value;
1996 k++;
1997 rlink = &(mtx->hdr.row[(*clink)->row]);
1998 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1999 rlink = &((*rlink)->next.col);
2000 delete_from_col(clink);
2001 delete_from_row(rlink);
2002 } else {
2003 clink = &((*clink)->next.row);
2004 }
2005 }
2006 }
2007 sp->len = k;
2008 return FALSE;
2009 }
2010
2011 void mtx_fill_org_row_vec(mtx_matrix_t mtx, int32 row,
2012 real64 *vec, mtx_range_t *rng)
2013 {
2014 int32 org_row,highcol, *toorg;
2015 register int32 org_col;
2016 register real64 value;
2017
2018 #if MTX_DEBUG
2019 if(!mtx_check_matrix(mtx)) return;
2020 #endif
2021
2022 org_row = mtx->perm.row.cur_to_org[row];
2023 if( rng == mtx_ALL_COLS ) {
2024 highcol=mtx->order;
2025 for( org_col=0 ; org_col <highcol ; org_col++ ) {
2026 if ((value=vec[org_col]) != D_ZERO)
2027 mtx_create_element_value(mtx,org_row,org_col,value);
2028 }
2029 } else {
2030 register int32 cur_col;
2031
2032 toorg = mtx->perm.col.cur_to_org;
2033 highcol= rng->high;
2034 for ( cur_col=rng->low; cur_col<=highcol; cur_col++) {
2035 if ((value=vec[(org_col=toorg[cur_col])]) != D_ZERO)
2036 mtx_create_element_value(mtx,org_row,org_col,value);
2037 }
2038 }
2039 }
2040
2041 void mtx_fill_org_col_vec(mtx_matrix_t mtx, int32 col,
2042 real64 *vec, mtx_range_t *rng)
2043 {
2044 int32 org_col,highrow, *toorg;
2045 register int32 org_row;
2046 register real64 value;
2047
2048 #if MTX_DEBUG
2049 if(!mtx_check_matrix(mtx)) return;
2050 #endif
2051
2052 org_col = mtx->perm.col.cur_to_org[col];
2053 if( rng == mtx_ALL_ROWS ) {
2054 highrow=mtx->order;
2055 for( org_row=0 ; org_row <highrow ; org_row++ ) {
2056 if ((value=vec[org_row]) != D_ZERO)
2057 mtx_create_element_value(mtx,org_row,org_col,value);
2058 }
2059 } else {
2060 register int32 cur_row;
2061
2062 toorg = mtx->perm.row.cur_to_org;
2063 highrow= rng->high;
2064 for ( cur_row=rng->low; cur_row<=highrow; cur_row++) {
2065 if ((value=vec[(org_row=toorg[cur_row])]) != D_ZERO)
2066 mtx_create_element_value(mtx,org_row,org_col,value);
2067 }
2068 }
2069 }
2070
2071 void mtx_fill_cur_row_vec(mtx_matrix_t mtx, int32 row,
2072 real64 *vec, mtx_range_t *rng)
2073 {
2074 int32 org_row,highcol,lowcol, *toorg;
2075 register int32 cur_col;
2076 register real64 value;
2077
2078 #if MTX_DEBUG
2079 if(!mtx_check_matrix(mtx)) return;
2080 #endif
2081
2082 org_row = mtx->perm.row.cur_to_org[row];
2083 toorg=mtx->perm.col.cur_to_org;
2084 if( rng == mtx_ALL_COLS ) {
2085 highcol = mtx->order-1;
2086 lowcol = 0;
2087 } else {
2088 highcol= rng->high;
2089 lowcol=rng->low;
2090 }
2091 for( cur_col=lowcol ; cur_col <= highcol ; cur_col++ ) {
2092 if ((value=vec[cur_col]) != D_ZERO)
2093 mtx_create_element_value(mtx,org_row,toorg[cur_col],value);
2094 }
2095 }
2096
2097 void mtx_fill_cur_col_vec(mtx_matrix_t mtx, int32 col,
2098 real64 *vec, mtx_range_t *rng)
2099 {
2100 int32 org_col,highrow,lowrow, *toorg;
2101 register int32 cur_row;
2102 register real64 value;
2103
2104 #if MTX_DEBUG
2105 if(!mtx_check_matrix(mtx)) return;
2106 #endif
2107
2108 org_col = mtx->perm.col.cur_to_org[col];
2109 toorg=mtx->perm.row.cur_to_org;
2110 if( rng == mtx_ALL_ROWS ) {
2111 highrow=mtx->order-1;
2112 lowrow=0;
2113 } else {
2114 highrow= rng->high;
2115 lowrow=rng->low;
2116 }
2117 for( cur_row=lowrow ; cur_row <= highrow ; cur_row++ ) {
2118 if ((value=vec[cur_row]) != D_ZERO)
2119 mtx_create_element_value(mtx,toorg[cur_row],org_col,value);
2120 }
2121 }
2122
2123 void mtx_dropfill_cur_col_vec(mtx_matrix_t mtx, int32 col,
2124 real64 *vec, mtx_range_t *rng,
2125 real64 tol)
2126 {
2127 int32 org_col,highrow,lowrow, *toorg;
2128 register int32 cur_row;
2129 register real64 value;
2130
2131 if (tol==0.0) {
2132 mtx_fill_cur_col_vec(mtx,col,vec,rng);
2133 return;
2134 }
2135
2136 #if MTX_DEBUG
2137 if(!mtx_check_matrix(mtx)) return;
2138 #endif
2139
2140 org_col = mtx->perm.col.cur_to_org[col];
2141 toorg=mtx->perm.row.cur_to_org;
2142 if( rng == mtx_ALL_ROWS ) {
2143 highrow=mtx->order-1;
2144 lowrow=0;
2145 } else {
2146 highrow= rng->high;
2147 lowrow=rng->low;
2148 }
2149 for( cur_row=lowrow ; cur_row <= highrow ; cur_row++ ) {
2150 if (fabs(value=vec[cur_row]) > tol)
2151 mtx_create_element_value(mtx,toorg[cur_row],org_col,value);
2152 }
2153 }
2154
2155 void mtx_dropfill_cur_row_vec(mtx_matrix_t mtx, int32 row,
2156 real64 *vec, mtx_range_t *rng,
2157 real64 tol)
2158 {
2159 int32 org_row,highcol,lowcol, *toorg;
2160 register int32 cur_col;
2161 register real64 value;
2162
2163 if (tol==0.0) {
2164 mtx_fill_cur_row_vec(mtx,row,vec,rng);
2165 return;
2166 }
2167
2168 #if MTX_DEBUG
2169 if(!mtx_check_matrix(mtx)) return;
2170 #endif
2171
2172 org_row = mtx->perm.row.cur_to_org[row];
2173 toorg=mtx->perm.col.cur_to_org;
2174 if( rng == mtx_ALL_COLS ) {
2175 highcol = mtx->order-1;
2176 lowcol = 0;
2177 } else {
2178 highcol= rng->high;
2179 lowcol=rng->low;
2180 }
2181 for( cur_col=lowcol ; cur_col <= highcol ; cur_col++ ) {
2182 if (fabs(value=vec[cur_col]) > tol)
2183 mtx_create_element_value(mtx,org_row,toorg[cur_col],value);
2184 }
2185 }
2186
2187 void mtx_fill_org_row_sparse(mtx_matrix_t mtx, int32 row,
2188 const mtx_sparse_t *sp)
2189 {
2190 int32 orgrow,i;
2191 #if MTX_DEBUG
2192 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2193 #endif
2194 orgrow = mtx->perm.row.cur_to_org[row];
2195 for (i=0; i < sp->len; i++) {
2196 if (sp->data[i] != D_ZERO
2197 #if MTX_DEBUG
2198 && sp->idata[i] >=0 && sp->idata[i] < mtx->order
2199 #endif
2200 ) {
2201 mtx_create_element_value(mtx,orgrow,sp->idata[i],sp->data[i]);
2202 }
2203 }
2204 }
2205
2206 void mtx_fill_org_col_sparse(mtx_matrix_t mtx, int32 col,
2207 const mtx_sparse_t *sp)
2208 {
2209 int32 orgcol,i;
2210 #if MTX_DEBUG
2211 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2212 #endif
2213 orgcol = mtx->perm.col.cur_to_org[col];
2214 for (i=0; i < sp->len; i++) {
2215 if (sp->data[i] != D_ZERO
2216 #if MTX_DEBUG
2217 && sp->idata[i] >=0 && sp->idata[i] < mtx->order
2218 #endif
2219 ) {
2220 mtx_create_element_value(mtx,sp->idata[i],orgcol,sp->data[i]);
2221 }
2222 }
2223 }
2224
2225 void mtx_fill_cur_row_sparse(mtx_matrix_t mtx, int32 row,
2226 const mtx_sparse_t *sp)
2227 {
2228 int32 orgrow,i;
2229 #if MTX_DEBUG
2230 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2231 #endif
2232 orgrow = mtx->perm.row.cur_to_org[row];
2233 for (i=0; i < sp->len; i++) {
2234 if (
2235 #if MTX_DEBUG
2236 sp->idata[i] >=0 && sp->idata[i] < mtx->order &&
2237 #endif
2238 sp->data[i] != D_ZERO) {
2239 mtx_create_element_value(mtx,orgrow,
2240 mtx->perm.col.cur_to_org[sp->idata[i]],
2241 sp->data[i]);
2242 }
2243 }
2244 }
2245
2246 void mtx_fill_cur_col_sparse(mtx_matrix_t mtx, int32 col,
2247 const mtx_sparse_t *sp)
2248 {
2249 int32 orgcol,i;
2250 #if MTX_DEBUG
2251 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2252 #endif
2253 orgcol = mtx->perm.col.cur_to_org[col];
2254 for (i=0; i < sp->len; i++) {
2255 if (
2256 #if MTX_DEBUG
2257 sp->idata[i] >=0 && sp->idata[i] < mtx->order &&
2258 #endif
2259 sp->data[i] != D_ZERO) {
2260 mtx_create_element_value(mtx,
2261 mtx->perm.col.cur_to_org[sp->idata[i]],
2262 orgcol,
2263 sp->data[i]);
2264 }
2265 }
2266 }
2267
2268 void mtx_mult_row(mtx_matrix_t mtx, int32 row, real64 factor, mtx_range_t *rng)
2269 {
2270 struct element_t *elt;
2271 int32 *tocur;
2272
2273 if( factor == D_ZERO ) {
2274 mtx_clear_row(mtx,row,rng);
2275 return;
2276 }
2277
2278 if (factor == D_ONE) return;
2279 #if MTX_DEBUG
2280 if(!mtx_check_matrix(mtx)) return;
2281 #endif
2282
2283 tocur = mtx->perm.col.org_to_cur;
2284 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[row]];
2285 if( rng == mtx_ALL_COLS )
2286 for( ; NOTNULL(elt); elt = elt->next.col ) elt->value *= factor;
2287 else if( rng->high >= rng->low )
2288 for( ; NOTNULL(elt); elt = elt->next.col )
2289 if( in_range(rng,tocur[elt->col]) ) elt->value *= factor;
2290 }
2291
2292 void mtx_mult_col(mtx_matrix_t mtx, int32 col, real64 factor, mtx_range_t *rng)
2293 {
2294 struct element_t *elt;
2295 int32 *tocur;
2296
2297 if( factor == D_ZERO ) {
2298 mtx_clear_col(mtx,col,rng);
2299 return;
2300 }
2301 if (factor == D_ONE) return;
2302
2303 #if MTX_DEBUG
2304 if(!mtx_check_matrix(mtx)) return;
2305 #endif
2306
2307 tocur = mtx->perm.row.org_to_cur;
2308 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[col]];
2309 if( rng == mtx_ALL_ROWS )
2310 for( ; NOTNULL(elt); elt = elt->next.row ) elt->value *= factor;
2311 else if( rng->high >= rng->low )
2312 for( ; NOTNULL(elt); elt = elt->next.row )
2313 if( in_range(rng,tocur[elt->row]) ) elt->value *= factor;
2314 }
2315
2316 void mtx_mult_row_zero(mtx_matrix_t mtx, int32 row, mtx_range_t *rng)
2317 {
2318 struct element_t *elt;
2319 int32 *tocur;
2320
2321 #if MTX_DEBUG
2322 if(!mtx_check_matrix(mtx)) return;
2323 #endif
2324
2325 tocur = mtx->perm.col.org_to_cur;
2326 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[row]];
2327 if( rng == mtx_ALL_COLS )
2328 for( ; NOTNULL(elt); elt = elt->next.col ) elt->value = D_ZERO;
2329 else if( rng->high >= rng->low )
2330 for( ; NOTNULL(elt); elt = elt->next.col )
2331 if( in_range(rng,tocur[elt->col]) ) elt->value = D_ZERO;
2332 }
2333
2334 void mtx_mult_col_zero(mtx_matrix_t mtx, int32 col, mtx_range_t *rng)
2335 {
2336 struct element_t *elt;
2337 int32 *tocur;
2338
2339 #if MTX_DEBUG
2340 if(!mtx_check_matrix(mtx)) return;
2341 #endif
2342
2343 tocur = mtx->perm.row.org_to_cur;
2344 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[col]];
2345 if( rng == mtx_ALL_ROWS )
2346 for( ; NOTNULL(elt); elt = elt->next.row ) elt->value = D_ZERO;
2347 else if( rng->high >= rng->low )
2348 for( ; NOTNULL(elt); elt = elt->next.row )
2349 if( in_range(rng,tocur[elt->row]) ) elt->value = D_ZERO;
2350 }
2351
2352 void mtx_add_row(mtx_matrix_t mtx, int32 s_cur, int32 t_cur, real64 factor,
2353 mtx_range_t *rng)
2354 {
2355 register int32 org_col;
2356 int32 t_org,*tocur;
2357 struct element_t **arr,*elt;
2358
2359 #if MTX_DEBUG
2360 if(!mtx_check_matrix(mtx)) return;
2361 #endif
2362
2363 t_org = mtx->perm.row.cur_to_org[t_cur];
2364 if( rng == mtx_ALL_COLS ) {
2365 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2366 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[s_cur]];
2367 for( ; NOTNULL(elt); elt = elt->next.col ) {
2368 if( ISNULL(arr[(org_col=elt->col)]) ) {
2369 arr[org_col] =
2370 mtx_create_element_value(mtx,t_org,org_col,(factor * elt->value));
2371 } else {
2372 arr[org_col]->value += factor * elt->value;
2373 }
2374 }
2375 mtx_renull_using_row(mtx,t_org,arr);
2376 } else if( rng->high >= rng->low ) {
2377 register int32 cur_col;
2378
2379 tocur = mtx->perm.col.org_to_cur;
2380 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2381 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[s_cur]];
2382 for( ; NOTNULL(elt); elt = elt->next.col ) {
2383 cur_col=tocur[(org_col=elt->col)];
2384 if( in_range(rng,cur_col) ) {
2385 if( NOTNULL(arr[org_col]) ) {
2386 arr[org_col]->value += factor * elt->value;
2387 } else {
2388 arr[org_col] =
2389 mtx_create_element_value(mtx,t_org,org_col,
2390 (factor * elt->value));
2391 /* hit rate usually lower */
2392 }
2393 }
2394 }
2395 mtx_renull_using_row(mtx,t_org,arr);
2396 }
2397 mtx_null_vector_release();
2398 }
2399
2400 void mtx_add_col(mtx_matrix_t mtx, int32 s_cur, int32 t_cur, real64 factor,
2401 mtx_range_t *rng)
2402 {
2403 int32 t_org,*tocur;
2404 struct element_t **arr,*elt;
2405
2406 #if MTX_DEBUG
2407 if(!mtx_check_matrix(mtx)) return;
2408 #endif
2409
2410 if( rng == mtx_ALL_ROWS ) {
2411 t_org = mtx->perm.col.cur_to_org[t_cur];
2412 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2413 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[s_cur]];
2414 for( ; NOTNULL(elt); elt = elt->next.row )
2415 if( ISNULL(arr[elt->row]) ) {
2416 arr[elt->row] =
2417 mtx_create_element_value(mtx,elt->row,t_org,
2418 (factor * elt->value));
2419 } else arr[elt->row]->value += factor * elt->value;
2420 mtx_renull_using_col(mtx,t_org,arr);
2421 } else if( rng->high >= rng->low ) {
2422 tocur = mtx->perm.row.org_to_cur;
2423 t_org = mtx->perm.col.cur_to_org[t_cur];
2424 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2425 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[s_cur]];
2426 for( ; NOTNULL(elt); elt = elt->next.row ) {
2427 if( in_range(rng,tocur[elt->row]) ) {
2428 if( NOTNULL(arr[elt->row]) ) {
2429 arr[elt->row]->value += factor * elt->value;
2430 } else {
2431 arr[elt->row] =
2432 mtx_create_element_value(mtx,elt->row,t_org,
2433 (factor * elt->value));
2434 }
2435 }
2436 }
2437 mtx_renull_using_col(mtx,t_org,arr);
2438 }
2439 mtx_null_vector_release();
2440 }
2441
2442 static struct element_t **mtx_expand_row_series( mtx_matrix_t mtx, int32 org)
2443 /**
2444 *** Expands the given row into an array of pointers, indexed on original
2445 *** col number. The array is obtained from mtx_null_row_vector().
2446 *** Be sure to call mtx_null_row_vector_release() when done with the vector and
2447 *** you have rezeroed it.
2448 **/
2449 {
2450 struct element_t **arr;
2451 struct element_t *elt;
2452
2453 arr = mtx_null_row_vector(mtx->order);
2454 for( elt=mtx->hdr.row[org]; NOTNULL(elt); elt=elt->next.col )
2455 arr[elt->col] = elt;
2456 return(arr);
2457 }
2458
2459 static struct element_t **mtx_expand_col_series(mtx_matrix_t mtx, int32 org)
2460 /**
2461 *** Expands the given col into an array of pointers, indexed on original
2462 *** row number. The array is obtained from mtx_null_col_vector().
2463 *** Be sure to call mtx_null_col_vector_release() when done with the vector and
2464 *** you have rezeroed it.
2465 **/
2466 {
2467 struct element_t **arr;
2468 struct element_t *elt;
2469
2470 arr = mtx_null_col_vector(mtx->order);
2471 for( elt = mtx->hdr.col[org] ; NOTNULL(elt) ; elt = elt->next.row )
2472 arr[elt->row] = elt;
2473 return(arr);
2474 }
2475
2476 struct add_series_data {
2477 mtx_matrix_t mtx; /* matrix we're operating on */
2478 struct element_t **arr; /* target row/col expansion array */
2479 int32 *tocur; /* col/row permutation vector */
2480 int32 t_org; /* org row/col which is expanded */
2481 };
2482
2483 static struct add_series_data
2484 rsdata={NULL,NULL,NULL,mtx_NONE},
2485 csdata={NULL,NULL,NULL,mtx_NONE};
2486
2487 static void add_series_data_release(void)
2488 {
2489 /* if apparently sane, and have array release the arr */
2490 if (NOTNULL(rsdata.mtx) && NOTNULL(rsdata.tocur)
2491 && rsdata.t_org >= 0 && NOTNULL(rsdata.arr)) {
2492 mtx_null_row_vector_release();
2493 }
2494 if (NOTNULL(csdata.mtx) && NOTNULL(csdata.tocur)
2495 && csdata.t_org >= 0 && NOTNULL(csdata.arr)) {
2496 mtx_null_col_vector_release();
2497 }
2498 /* reinit data */
2499
2500 rsdata.mtx = NULL;
2501 rsdata.arr = NULL;
2502 rsdata.tocur = NULL;
2503 rsdata.t_org = mtx_NONE;
2504
2505 csdata.mtx = NULL;
2506 csdata.arr = NULL;
2507 csdata.tocur = NULL;
2508 csdata.t_org = mtx_NONE;
2509 }
2510
2511 void mtx_add_row_series_init(mtx_matrix_t mtx,int32 t_cur,boolean use)
2512 {
2513
2514 #if MTX_DEBUG
2515 if(!mtx_check_matrix(mtx)) return;
2516 #endif
2517
2518 if ( !(rsdata.mtx) ) {
2519 /* this is a grabbing call due to memory reuse */
2520 if ( mtx && (t_cur >= 0) && (t_cur < mtx->order) ) {
2521 rsdata.mtx = mtx;
2522 rsdata.tocur = mtx->perm.col.org_to_cur;
2523 rsdata.t_org = mtx->perm.row.cur_to_org[t_cur];
2524 rsdata.arr = mtx_expand_row_series(mtx,rsdata.t_org);
2525 } else {
2526 FPRINTF(g_mtxerr,"ERROR: mtx_add_row_series_init called\n");
2527 FPRINTF(g_mtxerr," for grab with invalid column or mtx.\n");
2528 FPRINTF(g_mtxerr," col number %d.\n",t_cur);
2529 }
2530 return;
2531 } else {
2532 /* this is supposed to be a releasing call */
2533 if (t_cur != mtx_NONE) {
2534 FPRINTF(g_mtxerr,"ERROR: mtx_add_row_series_init called for\n");
2535 FPRINTF(g_mtxerr," release without mtx_NONE.\n");
2536 return;
2537 }
2538 if (mtx != rsdata.mtx) {
2539 FPRINTF(g_mtxerr,"ERROR: mtx_add_row_series_init called for\n");
2540 FPRINTF(g_mtxerr," release with ungrabbed matrix.\n");
2541 return;
2542 }
2543 if (use) {
2544 mtx_renull_using_row(rsdata.mtx, rsdata.t_org, rsdata.arr);
2545 } else {
2546 mtx_renull_all(rsdata.mtx, rsdata.arr);
2547 }
2548 mtx_null_row_vector_release();
2549 rsdata.mtx = NULL;
2550 rsdata.arr = NULL;
2551 rsdata.tocur = NULL;
2552 rsdata.t_org = mtx_NONE;
2553 }
2554 }
2555
2556 void mtx_add_row_series(int32 s_cur,real64 factor,mtx_range_t *rng)
2557 {
2558 register int32 org_col;
2559 int32 t_org,*tocur;
2560 struct element_t **arr,*elt;
2561 mtx_matrix_t mtx;
2562
2563 if ( !(rsdata.mtx) ) {
2564 FPRINTF(g_mtxerr,"ERROR: mtx_add_row_series called for\n");
2565 FPRINTF(g_mtxerr," without grabbing target row first.\n");
2566 return;
2567 }
2568 mtx = rsdata.mtx;
2569 arr = rsdata.arr;
2570 tocur = rsdata.tocur;
2571 t_org = rsdata.t_org;
2572
2573 #if MTX_DEBUG
2574 if(!mtx_check_matrix(mtx)) return;
2575 #endif
2576
2577 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[s_cur]];
2578 if( rng == mtx_ALL_COLS ) {
2579 for( ; NOTNULL(elt); elt = elt->next.col ) {
2580 if( ISNULL(arr[(org_col=elt->col)]) ) {
2581 arr[org_col] =
2582 mtx_create_element_value(mtx,t_org,org_col,(factor * elt->value));
2583 } else {
2584 arr[org_col]->value += factor * elt->value;
2585 }
2586 }
2587 return;
2588 }
2589 /* fast_in_range is a 10% winner on the alpha, and should be even more
2590 on the other platforms. */
2591 if( rng->high >= rng->low ) {
2592 register int32 cur_col, lo,hi;
2593 lo=rng->low; hi= rng->high;
2594 for( ; NOTNULL(elt); elt = elt->next.col ) {
2595 cur_col=tocur[(org_col=elt->col)];
2596 if( fast_in_range(lo,hi,cur_col) ) {
2597 if( NOTNULL(arr[org_col]) ) {
2598 arr[org_col]->value += factor * elt->value;
2599 } else {
2600 arr[org_col] =
2601 mtx_create_element_value(mtx,t_org,org_col,(factor*elt->value));
2602 }
2603 }
2604 }
2605 }
2606 }
2607
2608 void mtx_add_col_series_init(mtx_matrix_t mtx,int32 t_cur,boolean use)
2609 {
2610
2611 #if MTX_DEBUG
2612 if(!mtx_check_matrix(mtx)) return;
2613 #endif
2614
2615 if ( !(csdata.mtx) ) {
2616 /* this is a grabbing call */
2617 if ( mtx && (t_cur >= 0) && (t_cur < mtx->order) ) {
2618 csdata.mtx = mtx;
2619 csdata.tocur = mtx->perm.row.org_to_cur;
2620 csdata.t_org = mtx->perm.col.cur_to_org[t_cur];
2621 csdata.arr = mtx_expand_col_series(mtx,csdata.t_org);
2622 } else {
2623 FPRINTF(g_mtxerr,"ERROR: mtx_add_col_series_init called\n");
2624 FPRINTF(g_mtxerr," for grab with invalid row or mtx.\n");
2625 FPRINTF(g_mtxerr," row number %d.\n",t_cur);
2626 }
2627 return;
2628 } else {
2629 /* this is supposed to be a releasing call */
2630 if (t_cur != mtx_NONE) {
2631 FPRINTF(g_mtxerr,"ERROR: mtx_add_col_series_init called for\n");
2632 FPRINTF(g_mtxerr," release without mtx_NONE.\n");
2633 return;
2634 }
2635 if (mtx != csdata.mtx) {
2636 FPRINTF(g_mtxerr,"ERROR: mtx_add_col_series_init called for\n");
2637 FPRINTF(g_mtxerr," release with ungrabbed matrix.\n");
2638 return;
2639 }
2640 if (use) {
2641 mtx_renull_using_col(csdata.mtx, csdata.t_org, csdata.arr);
2642 } else {
2643 mtx_renull_all(csdata.mtx, csdata.arr);
2644 }
2645 mtx_null_col_vector_release();
2646 csdata.mtx = NULL;
2647 csdata.arr = NULL;
2648 csdata.tocur = NULL;
2649 csdata.t_org = mtx_NONE;
2650 }
2651 }
2652
2653 void mtx_add_col_series(int32 s_cur,real64 factor,mtx_range_t *rng)
2654 {
2655 register int32 org_row;
2656 int32 t_org,*tocur;
2657 struct element_t **arr,*elt;
2658 mtx_matrix_t mtx;
2659
2660 if ( !(csdata.mtx) ) {
2661 FPRINTF(g_mtxerr,"ERROR: mtx_add_col_series called for\n");
2662 FPRINTF(g_mtxerr," without grabbing target col first.\n");
2663 return;
2664 }
2665 mtx = csdata.mtx;
2666 arr = csdata.arr;
2667 tocur = csdata.tocur;
2668 t_org = csdata.t_org;
2669
2670 #if MTX_DEBUG
2671 if(!mtx_check_matrix(mtx)) return;
2672 #endif
2673
2674 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[s_cur]];
2675 if( rng == mtx_ALL_ROWS ) {
2676 for( ; NOTNULL(elt); elt = elt->next.row ) {
2677 if( ISNULL(arr[(org_row=elt->row)]) ) {
2678 arr[org_row] =
2679 mtx_create_element_value(mtx,org_row,t_org,(factor * elt->value));
2680 } else {
2681 arr[org_row]->value += factor * elt->value;
2682 }
2683 }
2684 return;
2685 }
2686 if( rng->high >= rng->low ) {
2687 register int32 cur_row;
2688
2689 for( ; NOTNULL(elt); elt = elt->next.row ) {
2690 cur_row=tocur[(org_row=elt->row)];
2691 if( in_range(rng,cur_row) ) {
2692 if( NOTNULL(arr[org_row]) ) {
2693 arr[org_row]->value += factor * elt->value;
2694 } else {
2695 arr[org_row] =
2696 mtx_create_element_value(mtx,org_row,t_org,(factor*elt->value));
2697 }
2698 }
2699 }
2700 }
2701 }
2702
2703 void mtx_old_add_row_sparse(mtx_matrix_t mtx,
2704 int32 t_cur, /* cur index of target row */
2705 real64 *s_cur, /* dense source row, curindexed */
2706 real64 factor, /* coefficient */
2707 mtx_range_t *rng,
2708 int32 *ilist) /* list to add over */
2709 {
2710 int32 t_org,*toorg,cindex,orgcindex,hilim;
2711 struct element_t **arr;
2712 real64 value;
2713
2714 #if MTX_DEBUG
2715 if(!mtx_check_matrix(mtx)) return;
2716 #endif
2717
2718 if (factor==D_ZERO) return; /* adding 0 rather silly */
2719
2720 if( rng == mtx_ALL_COLS ) {
2721 t_org = mtx->perm.row.cur_to_org[t_cur]; /* org of target row */
2722 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2723 toorg = mtx->perm.col.cur_to_org; /* current col perm */
2724 hilim=mtx->order;
2725
2726 if (!ilist) { /* FULL ROW CASE no ilist */
2727 for (cindex=0; cindex< hilim; cindex++) {
2728 if ((value=s_cur[cindex])!=D_ZERO) {
2729 if ( ISNULL(arr[(orgcindex=toorg[cindex])]) ) { /* add element */
2730 arr[orgcindex] =
2731 mtx_create_element_value(mtx,t_org,orgcindex,(factor * value));
2732 } else { /* increment element */
2733 arr[orgcindex]->value += factor * value;
2734 }
2735 }
2736 }
2737 } else { /* SPARSE ROW CASE with ilist */
2738 int32 i;
2739 i=0;
2740 while ((cindex=ilist[i])>=0) {
2741 value=s_cur[cindex];
2742 if ( ISNULL(arr[(orgcindex=toorg[cindex])]) ) { /* add element */
2743 arr[orgcindex] =
2744 mtx_create_element_value(mtx,t_org,orgcindex,(factor * value));
2745 } else { /* increment element */
2746 arr[orgcindex]->value += factor * value;
2747 }
2748 i++;
2749 }
2750 }
2751 mtx_renull_using_row(mtx,t_org,arr);
2752 } else if( rng->high >= rng->low ) { /* DENSE RANGE CASE */
2753 t_org = mtx->perm.row.cur_to_org[t_cur]; /* org of target row */
2754 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2755 toorg = mtx->perm.col.cur_to_org; /* current col perm */
2756 hilim = rng->high;
2757
2758 for (cindex=rng->low; cindex<= hilim; cindex++) {
2759 if ((value=s_cur[cindex])!=D_ZERO) {
2760 if ( ISNULL(arr[(orgcindex=toorg[cindex])]) ) { /* add element */
2761 arr[orgcindex] =
2762 mtx_create_element_value(mtx,t_org,orgcindex,(factor * value));
2763 } else { /* increment element */
2764 arr[orgcindex]->value += factor * value;
2765 }
2766 }
2767 }
2768 mtx_renull_using_row(mtx,t_org,arr);
2769 }
2770 mtx_null_vector_release();
2771 }
2772
2773 void mtx_old_add_col_sparse(mtx_matrix_t mtx,
2774 int32 t_cur, /* cur index of target col */
2775 real64 *s_cur, /* dense source col, curindexed */
2776 real64 factor, /* coefficient */
2777 mtx_range_t *rng, /* range to add over or */
2778 int32 *ilist) /* list to add over */
2779 {
2780 int32 t_org,*toorg,rowindex,orgrowindex,hilim;
2781 struct element_t **arr;
2782 real64 value;
2783
2784 #if MTX_DEBUG
2785 if(!mtx_check_matrix(mtx)) return;
2786 #endif
2787
2788 if (factor==D_ZERO) return;
2789
2790 if( rng == mtx_ALL_ROWS ) {
2791 t_org = mtx->perm.col.cur_to_org[t_cur]; /* org of target col */
2792 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2793 toorg = mtx->perm.row.cur_to_org; /* current row perm */
2794 hilim=mtx->order;
2795
2796 if (!ilist) { /* FULL COL CASE no ilist */
2797 for (rowindex=0; rowindex< hilim; rowindex++) {
2798 if ((value=s_cur[rowindex])!=D_ZERO) {
2799 if ( ISNULL(arr[(orgrowindex=toorg[rowindex])]) ) { /* add element */
2800 arr[orgrowindex] =
2801 mtx_create_element_value(mtx,orgrowindex,t_org,(factor * value));
2802 } else { /* increment element */
2803 arr[orgrowindex]->value += factor * value;
2804 }
2805 }
2806 }
2807 } else { /* SPARSE COL CASE with ilist */
2808 int32 i;
2809 i=0;
2810 while ((rowindex=ilist[i])>=0) {
2811 value=s_cur[rowindex];
2812 if ( ISNULL(arr[(orgrowindex=toorg[rowindex])]) ) { /* add element */
2813 arr[orgrowindex] =
2814 mtx_create_element_value(mtx,orgrowindex,t_org,(factor * value));
2815 } else { /* increment element */
2816 arr[orgrowindex]->value += factor * value;
2817 }
2818 i++;
2819 }
2820 }
2821 mtx_renull_using_col(mtx,t_org,arr);
2822
2823 } else if( rng->high >= rng->low ) { /* DENSE RANGE CASE */
2824 t_org = mtx->perm.col.cur_to_org[t_cur]; /* org of target col */
2825 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2826 toorg = mtx->perm.row.cur_to_org; /* current row perm */
2827 hilim = rng->high;
2828
2829 for (rowindex=rng->low; rowindex<= hilim; rowindex++) {
2830 if ((value=s_cur[rowindex])!=D_ZERO) {
2831 if ( ISNULL(arr[(orgrowindex=toorg[rowindex])]) ) { /* add element */
2832 arr[orgrowindex] =
2833 mtx_create_element_value(mtx,orgrowindex,t_org,(factor * value));
2834 } else { /* increment element */
2835 arr[orgrowindex]->value += factor * value;
2836 }
2837 }
2838 }
2839 mtx_renull_using_col(mtx,t_org,arr);
2840 }
2841 mtx_null_vector_release();
2842 }
2843
2844 size_t mtx_size(mtx_matrix_t mtx) {
2845 size_t size=0;
2846 if (ISNULL(mtx)) return size;
2847 size += (1+mtx->nslaves)*sizeof(struct mtx_header);
2848 if (ISSLAVE(mtx)) {
2849 return 2*sizeof(struct element_t *)*(size_t)mtx->capacity +size;
2850 }
2851 size += mem_sizeof_store(mtx->ms);
2852 /* headers */
2853 size += (1 + mtx->nslaves) * (size_t)mtx->capacity *
2854 (size_t)2 * sizeof(struct element_t *);
2855 /* block data */
2856 if (mtx->data->nblocks >0)
2857 size += sizeof(mtx_region_t)*(size_t)(mtx->data->nblocks-1);
2858 /* permutations */
2859 size += (size_t)4*(sizeof(int32)*(size_t)mtx->capacity);
2860 return size;
2861 }
2862
2863 size_t mtx_chattel_size(mtx_matrix_t mtx) {
2864 size_t size=0;
2865 int32 i;
2866 if (ISNULL(mtx) || ISSLAVE(mtx)) return size;
2867 /* headers */
2868 size += (mtx->nslaves)*sizeof(struct mtx_header);
2869 /* incidence */
2870 for (i=0; i <mtx->nslaves; i++) {