/[ascend]/trunk/ascend/linear/mtx_basic.c
ViewVC logotype

Contents of /trunk/ascend/linear/mtx_basic.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2542 - (show annotations) (download) (as text)
Mon Jan 30 18:42:14 2012 UTC (6 years, 3 months ago) by jpye
File MIME type: text/x-csrc
File size: 106087 byte(s)
Add missing #include 
1 /* ASCEND modelling environment
2 Copyright (C) 1990 Karl Michael Westerberg
3 Copyright (C) 1993 Joseph Zaher
4 Copyright (C) 1994 Joseph Zaher, Benjamin Andrew Allan
5 Copyright (C) 1995 Benjamin Andrew Allan, Kirk Andre' Abbott
6 Copyright (C) 1996 Benjamin Andrew Allan
7 Copyright (C) 2007 Carnegie Mellon University
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
12 any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA.
23 *//** @file
24 Basic operations with mtx matrix types, including file i/o.
25 *//*
26 mtx: Ascend Sparse Matrix Package
27 by Karl Michael Westerberg, Created: 2/6/90
28 last in CVS: $Revision: 1.20 $ $Date: 2000/01/25 02:27:07 $ $Author: ballan $
29 */
30
31 #include <math.h>
32 #include <ascend/utilities/config.h>
33 #include <ascend/general/platform.h>
34 #include <ascend/general/ascMalloc.h>
35 #include <ascend/general/mem.h>
36 #include "mtx.h"
37 #ifdef ASC_WITH_MMIO
38 # include <mmio.h>
39 #endif
40
41 /* grab our private parts */
42 #define __MTX_C_SEEN__
43 #include "mtx_use_only.h"
44 #include <ascend/general/mathmacros.h>
45
46 /**
47 *Really* check the matrix.
48 @return an error count
49 */
50 int super_check_matrix( mtx_matrix_t mtx){
51 int32 ndx,errcnt=0;
52 int32 rowc,colc;
53
54 /* Test consistency of permutation arrays */
55 for( ndx=ZERO ; ndx < mtx->capacity ; ++ndx ) {
56 if( mtx->perm.row.org_to_cur[mtx->perm.row.cur_to_org[ndx]] != ndx ) {
57
58 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Permutation violation in row %d.\n", ndx);
59 errcnt++;
60 }
61 if( mtx->perm.col.org_to_cur[mtx->perm.col.cur_to_org[ndx]] != ndx ) {
62
63 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Permutation violation in col %d.\n",ndx);
64 errcnt++;
65 }
66 }
67
68 if( mtx->order > mtx->capacity ) {
69
70 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Capacity %d is less than order %d\n",
71 mtx->capacity,mtx->order);
72 errcnt++;
73 }
74
75 /* Make sure rows and columns which don't exist are blank */
76 for( ndx = mtx->order ; ndx < mtx->capacity ; ++ndx ) {
77 int32 org_row,org_col;
78 org_row = mtx->perm.row.cur_to_org[ndx];
79 org_col = mtx->perm.col.cur_to_org[ndx];
80 if( NOTNULL(mtx->hdr.row[org_row]) ) {
81
82 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Non-zeros found in non-existent row %d.\n",ndx);
83 errcnt++;
84 }
85 if( NOTNULL(mtx->hdr.col[org_col]) ) {
86
87 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Non-zeros found in non-existent col %d.\n",ndx);
88 errcnt++;
89 }
90 }
91
92 /* Verify fishnet structure */
93 for( ndx=rowc=colc=ZERO ; ndx < mtx->capacity ; ++ndx ) {
94 struct element_t *elt;
95 int32 org_row,org_col;
96 org_row = mtx->perm.row.cur_to_org[ndx];
97 org_col = mtx->perm.col.cur_to_org[ndx];
98 for( elt = mtx->hdr.row[org_row] ; NOTNULL(elt) ; elt = elt->next.col ) {
99 if( elt->row != mtx_NONE ) {
100 ++rowc;
101 if( elt->row != org_row ) {
102
103 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Element mismatch in row %d.\n", ndx);
104 errcnt++;
105 }
106 } else {
107
108 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Disclaimed element in row %d.\n", ndx);
109 errcnt++;
110 }
111 }
112 for( elt = mtx->hdr.col[org_col] ; NOTNULL(elt) ; elt = elt->next.row ) {
113 if( elt->col != mtx_NONE ) {
114 ++colc;
115 if( elt->col != org_col ) {
116
117 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Element mismatch in col %d.\n", ndx);
118 errcnt++;
119 }
120 } else {
121
122 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Disclaimed element in col %d.\n", ndx);
123 errcnt++;
124 }
125 }
126 }
127 if( rowc != colc ) {
128
129 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Non-zero discrepancy, %d by row, %d by col.\n",
130 rowc, colc);
131 errcnt++;
132 }
133 return errcnt;
134 }
135
136 /**
137 Checks the integrity flag of the matrix.
138 */
139 boolean check_matrix( mtx_matrix_t mtx, char *file, int line){
140
141 if( ISNULL(mtx) ) {
142
143 ERROR_REPORTER_HERE(ASC_PROG_ERR,"NULL matrix in %s line %d.\n",file,line);
144 return 0;
145 }
146
147 switch( mtx->integrity ) {
148 case OK:
149 break;
150 case DESTROYED:
151
152 FPRINTF(g_mtxerr,
153 " Matrix deceased found in %s line %d.\n",file,line);
154 return 0;
155 default:
156
157 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Matrix garbage found in %s line %d.\n",
158 file,line);
159 return 0;
160 }
161
162 #if MTX_DEBUG
163 super_check_matrix(mtx);
164 #endif
165
166 if (ISSLAVE(mtx)) {
167 return (check_matrix(mtx->master,file,line));
168 }
169 return 1;
170 }
171
172 /*
173 this function checks the consistency of a sparse as best it can.
174 @return FALSE if something wierd
175 */
176 boolean check_sparse(const mtx_sparse_t * const sp, char *file, int line)
177 {
178 if ( ISNULL(sp) ||
179 sp->len > sp->cap ||
180 ( sp->cap > 0 && ( ISNULL(sp->idata) || ISNULL(sp->data) ) )
181 ) {
182 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Inconsistent or NULL sparse in %s line %d.\n",file,line);
183 return 0;
184 }
185 return 1;
186 }
187
188 #define free_unless_null(ptr) if( NOTNULL(ptr) ) ascfree(ptr)
189 /*
190 static void free_unless_null(POINTER ptr)
191 {
192 if( NOTNULL(ptr) )
193 ascfree(ptr);
194 }
195 */
196
197 void mtx_zero_int32(int32 *data, int len){
198 int i;
199 if (data==NULL) return;
200 for (i=0; i<len; i++) data[i]=0;
201 }
202
203 void mtx_zero_real64(real64 *data, int len){
204 int i;
205 if (data==NULL) return;
206 for (i=0; i<len; i++) data[i]=0.0;
207 }
208
209 void mtx_zero_ptr(void **data, int len){
210 int i;
211 if (data==NULL) return;
212 for (i=0; i<len; i++) data[i]=NULL;
213 }
214
215 /*
216 Allocates a matrix header and returns a pointer to it.
217 */
218 static mtx_matrix_t alloc_header(void){
219 mtx_matrix_t mtx;
220 mtx = (mtx_matrix_t)ascmalloc( sizeof(struct mtx_header) );
221 mtx->integrity = OK;
222 return(mtx);
223 }
224
225 /**
226 Frees a matrix header.
227 */
228 static void free_header(mtx_matrix_t mtx){
229 mtx->integrity = DESTROYED;
230 ascfree(mtx);
231 }
232
233 /**
234 Allocates a zeroed row or column header.
235 */
236 static struct element_t **calloc_nz_header(int32 cap){
237 return(cap > 0 ? ASC_NEW_ARRAY_CLEAR(struct element_t *,cap) : NULL);
238 }
239
240 /**
241 Copies srchdr to tarhdr given the capacity of srchdr.
242 */
243 static void copy_nz_header(struct element_t **tarhdr,
244 struct element_t **srchdr, int32 cap
245 ){
246 mem_copy_cast(srchdr,tarhdr,cap*sizeof(struct element_t *));
247 }
248
249 #define free_nz_header(hdr) free_unless_null( (POINTER)(hdr) )
250 /**
251 Frees a row or column header
252 **/
253
254
255 /********************************************************************/
256 /*
257 Any method of deallocating elements that doesn't use
258 delete_from_row/col or nuke_fishnet is DOOMED to die.
259 On ANY entry to delete_from_row or delete_from_col,
260 last_value_matrix should be already set to the mtx being deleted from.
261 What this really means is that before any of the following list of functions
262 is called, the last_value_matrix should be set:
263
264 disclaim element
265 delete_from_row/col
266 blast_row/col
267 clean_row/col
268
269 Note that the above functions call each other, with blast_/clean_ being
270 the top housekeeping/garbage collection pair. Users of delete, blast or
271 clean should set last_value_matrix at the scope where the matrix is known.
272
273 Excuses for this kluge:
274 1) mtx_value/set_value need it.
275 2) the mem_store scheme REQUIRES it.
276 3) it is faster than passing the mtx
277 up and down through the functions listed above, and we do a LOT of
278 these operations.
279 */
280 static mtx_matrix_t last_value_matrix = NULL;
281
282 /**
283 Indicates that one of the row or column (it doesn't matter) in which
284 this element appears will no longer point to this element (it has
285 already been removed from the list). Elements disclaimed once have
286 their row,col indices set to mtx_NONE. Elements disclaimed twice are
287 discarded.
288 NOTE WELL: last_value_mtx must be set before this function is called.
289 */
290 static void disclaim_element(struct element_t *element){
291 if( element->row == mtx_NONE ) {
292 mem_free_element(last_value_matrix->ms,(void *)element);
293 } else {
294 element->row = element->col = mtx_NONE;
295 }
296 }
297
298 /**
299 Deletes the element from the row it is in, given a pointer to
300 the row link which leads to this element.
301 On return the pointer pointed to by link points to the new next element.
302 NOTE WELL: last_value_mtx must be set before this function is called.
303 */
304 static void delete_from_row(struct element_t **link){
305 struct element_t *p;
306 p = *link;
307 *link = p->next.col;
308 disclaim_element(p);
309 /* conservatively cause mtx_set_value to forget */
310 last_value_matrix->last_value = NULL;
311 }
312
313 /**
314 Deletes the element from the col it is in, given a pointer to
315 the col link which leads to this element.
316 NOTE WELL: last_value_mtx must be set before this function is called.
317 */
318 static void delete_from_col(struct element_t **link){
319 struct element_t *p;
320 p = *link;
321 *link = p->next.row;
322 disclaim_element(p);
323 /* conservatively cause mtx_set_value to forget */
324 last_value_matrix->last_value = NULL;
325 }
326
327 /**
328 Deletes every element even remotely associated with a matrix
329 and nulls the headers. Also nulls the headers of slaves
330 and cleans out their elements.
331 */
332 static void nuke_fishnet(mtx_matrix_t mtx){
333 int32 i;
334
335 if ( ISNULL(mtx) || ISNULL(mtx->hdr.row) ||
336 ISNULL(mtx->hdr.col) || ISSLAVE(mtx)) {
337 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with bad matrix.");
338 return;
339 }
340 if (mtx->capacity<1) return;
341 mtx->last_value = NULL;
342 mem_clear_store(mtx->ms);
343 zero(mtx->hdr.row,mtx->capacity,struct element_t *);
344 zero(mtx->hdr.col,mtx->capacity,struct element_t *);
345 for (i = 0; i < mtx->nslaves; i++) {
346 zero(mtx->slaves[i]->hdr.row,mtx->capacity,struct element_t *);
347 zero(mtx->slaves[i]->hdr.col,mtx->capacity,struct element_t *);
348 }
349 }
350
351 /**
352 Destroys all elements in the given row with (current)
353 col index in the given col range: if rng == mtx_ALL_COLS,
354 entire row is destroyed.
355 NOTE WELL: last_value_mtx must be set before this function is called.
356 */
357 static void blast_row( mtx_matrix_t mtx, int32 org, mtx_range_t *rng){
358 struct element_t **link;
359
360 link = &(mtx->hdr.row[org]);
361 if( rng == mtx_ALL_COLS ) {
362 while( NOTNULL(*link) )
363 delete_from_row(link);
364 } else {
365 int32 *tocur = mtx->perm.col.org_to_cur;
366 int32 col;
367 while( NOTNULL(*link) ) {
368 col = (*link)->col;
369 if( col == mtx_NONE || in_range(rng,tocur[col]) )
370 delete_from_row(link);
371 else
372 link = &((*link)->next.col);
373 }
374 }
375 }
376
377 /**
378 Destroys all elements in the given col with (current)
379 row index in the given row range: if rng == mtx_ALL_ROWS,
380 entire col is destroyed.
381 NOTE WELL: last_value_mtx must be set before this function is called.
382 */
383 static void blast_col( mtx_matrix_t mtx, int32 org, mtx_range_t *rng){
384 struct element_t **link;
385
386 link = &(mtx->hdr.col[org]);
387 if( rng == mtx_ALL_ROWS ) {
388 while( NOTNULL(*link) )
389 delete_from_col(link);
390 } else {
391 int32 *tocur = mtx->perm.row.org_to_cur, row;
392
393 while( NOTNULL(*link) ) {
394 row = (*link)->row;
395 if( row == mtx_NONE || in_range(rng,tocur[row]) )
396 delete_from_col(link);
397 else
398 link = &((*link)->next.row);
399 }
400 }
401 }
402
403 /**
404 Disclaims all elements in the given row which have already been
405 disclaimed once.
406 NOTE WELL: last_value_mtx must be set before this function is called.
407 */
408 static void clean_row(mtx_matrix_t mtx, int32 org){
409 struct element_t **link;
410
411 link = &(mtx->hdr.row[org]);
412 while( NOTNULL(*link) )
413 if( (*link)->row == mtx_NONE )
414 delete_from_row(link);
415 else
416 link = &((*link)->next.col);
417 }
418
419 /**
420 Disclaims all elements in the given col which have already been
421 disclaimed once.
422 NOTE WELL: last_value_mtx must be set before this function is called.
423 */
424 static void clean_col(mtx_matrix_t mtx, int32 org){
425 struct element_t **link;
426
427 link = &(mtx->hdr.col[org]);
428 while( NOTNULL(*link) )
429 if( (*link)->col == mtx_NONE )
430 delete_from_col(link);
431 else
432 link = &((*link)->next.row);
433 }
434 /********************************************************************/
435
436 mtx_coord_t *mtx_coord(mtx_coord_t *coordp, int32 row, int32 col){
437 coordp->row = row;
438 coordp->col = col;
439 return(coordp);
440 }
441
442 mtx_range_t *mtx_range(mtx_range_t *rangep, int32 low, int32 high){
443 rangep->low = low;
444 rangep->high = high;
445 return(rangep);
446 }
447
448 mtx_region_t *mtx_region( mtx_region_t *regionp, int32 rowlow, int32 rowhigh,
449 int32 collow, int32 colhigh
450 ){
451 mtx_range(&(regionp->row),rowlow,rowhigh);
452 mtx_range(&(regionp->col),collow,colhigh);
453 return(regionp);
454 }
455
456 static int g_mtx_debug_redirect = 0;
457
458 void mtx_debug_redirect_freeze(){
459 g_mtx_debug_redirect = 1;
460 }
461
462 static void mtx_redirectErrors(FILE *f){
463 if (!g_mtx_debug_redirect) {
464 assert(f != NULL);
465 g_mtxerr = f;
466 }
467 }
468
469 mtx_matrix_t mtx_create(void){
470 mtx_matrix_t mtx;
471
472 mtx_redirectErrors(stderr); /* call mtx_debug_redirect_freeze() to bypass */
473
474 mtx = alloc_header();
475 mtx->order = ZERO;
476 mtx->capacity = ZERO;
477 mtx->hdr.row = mtx->hdr.col = NULL;
478 mtx->perm.row.org_to_cur = mtx_alloc_perm(ZERO);
479 mtx->perm.row.cur_to_org = mtx_alloc_perm(ZERO);
480 mtx->perm.row.parity = EVEN;
481 mtx->perm.col.org_to_cur = mtx_alloc_perm(ZERO);
482 mtx->perm.col.cur_to_org = mtx_alloc_perm(ZERO);
483 mtx->perm.col.parity = EVEN;
484 mtx->perm.transpose = mtx_NONE;
485 mtx->data =
486 (struct structural_data_t *)ascmalloc(sizeof(struct structural_data_t));
487 mtx->data->symbolic_rank = -1;
488 mtx->data->nblocks = -1;
489 mtx->data->block = NULL;
490 mtx->master = NULL;
491 mtx->nslaves = ZERO;
492 mtx->slaves = NULL;
493 mtx->last_value = NULL;
494 mtx->ms = mem_create_store(LENMAGIC, WIDTHMAGIC/sizeof(struct element_t) - 1,
495 sizeof(struct element_t),10,2048);
496 return(mtx);
497 }
498
499 /**
500 ** Slave headers share the master's:
501 ** perm.anything, data->anything, ms->anything, order and capacity.
502 ** Unique to the slave are:
503 ** hdr.anything
504 ** last_value.
505 ** All slaves of a master appear as pointers in the masters slaves list
506 ** which is nslaves long. slaves do not have slaves.
507 **/
508 mtx_matrix_t mtx_create_slave(mtx_matrix_t master){
509 mtx_matrix_t mtx, *mlist;
510 int32 newcnt;
511 mtx_redirectErrors(stderr); /* call mtx_debug_redirect_freeze() to bypass */
512 if(!mtx_check_matrix(master) || ISSLAVE(master)) return NULL;
513
514 mtx = alloc_header();
515 if (ISNULL(mtx)) return mtx;
516 mtx->order = master->order;
517 mtx->capacity = master->capacity;
518 mtx->nslaves = ZERO;
519 mtx->hdr.row = calloc_nz_header(master->capacity);
520 if (ISNULL(mtx->hdr.row)) {
521 ascfree(mtx);
522 FPRINTF(g_mtxerr,"mtx_create_slave: Insufficient memory.\n");
523 return NULL;
524 }
525 mtx->hdr.col = calloc_nz_header(master->capacity);
526 if (ISNULL(mtx->hdr.col)) {
527 ascfree(mtx->hdr.row);
528 ascfree(mtx);
529 FPRINTF(g_mtxerr,"mtx_create_slave: Insufficient memory.\n");
530 return NULL;
531 }
532 mtx->perm.row.org_to_cur = master->perm.row.org_to_cur;
533 mtx->perm.row.cur_to_org = master->perm.row.cur_to_org;
534 mtx->perm.row.parity = EVEN; /* dont look at this again*/
535 mtx->perm.col.org_to_cur = master->perm.col.org_to_cur;
536 mtx->perm.col.cur_to_org = master->perm.col.cur_to_org;
537 mtx->perm.col.parity = EVEN; /* dont look at this again*/
538 mtx->perm.transpose = mtx_NONE; /* dont look at this again*/
539 mtx->slaves = NULL;
540 mtx->data = master->data;
541 mtx->master = master;
542 mtx->last_value = NULL;
543 mtx->ms = master->ms;
544 newcnt = master->nslaves + 1;
545 if (newcnt == 1) {
546 mlist = (mtx_matrix_t *)ascmalloc(sizeof(mtx_matrix_t));
547 } else {
548 mlist =
549 (mtx_matrix_t *)ascrealloc(master->slaves,newcnt*sizeof(mtx_matrix_t));
550 }
551 if (ISNULL(mlist)) {
552 ascfree(mtx->hdr.row);
553 ascfree(mtx->hdr.col);
554 ascfree(mtx);
555 FPRINTF(g_mtxerr,"mtx_create_slave: Insufficient memory.\n");
556 return NULL;
557 } else {
558 master->slaves = mlist;
559 master->slaves[master->nslaves] = mtx;
560 master->nslaves = newcnt;
561 }
562 return(mtx);
563 }
564
565 static void destroy_slave(mtx_matrix_t master, mtx_matrix_t mtx){
566 int32 sid,i;
567
568 if(!mtx_check_matrix(mtx) || !mtx_check_matrix(master)) {
569 FPRINTF(g_mtxerr,
570 "destroy_slave(mtx.c) called with corrupt slave or master\n");
571 return;
572 }
573 if (ISSLAVE(master) || !ISSLAVE(mtx)) {
574 FPRINTF(g_mtxerr,
575 "destroy_slave(mtx.c) called with mismatched slave/master pair\n");
576 return;
577 }
578 /* get slave index */
579 sid = -1;
580 for (i=0; i<master->nslaves; i++) {
581 if (master->slaves[i] == mtx) {
582 sid = i;
583 }
584 }
585 if (sid < 0) {
586 FPRINTF(g_mtxerr,
587 "destroy_slave(mtx.c) called with mismatched slave/master pair\n");
588 return;
589 }
590 /* move the rest of the slaves up the list */
591 /* old implementation - possible undefined operation on i - new code follows - JDS
592 for (i = sid; i < master->nslaves -1;) {
593 master->slaves[i] = master->slaves[++i];
594 }
595 */
596 for (i = sid; i < master->nslaves -1; ++i) {
597 master->slaves[i] = master->slaves[i+1];
598 }
599 (master->nslaves)--;
600 /* we will not realloc smaller. */
601 if (master->nslaves == 0 && NOTNULL(master->slaves)) {
602 ascfree(master->slaves);
603 master->slaves = NULL;
604 }
605 mtx_clear_region(mtx,mtx_ENTIRE_MATRIX);
606 free_nz_header(mtx->hdr.row);
607 free_nz_header(mtx->hdr.col);
608 free_header(mtx);
609 }
610
611 void mtx_destroy(mtx_matrix_t mtx){
612 int32 i;
613 if(!mtx_check_matrix(mtx)) return;
614 if (ISSLAVE(mtx)) {
615 destroy_slave(mtx->master,mtx);
616 return;
617 }
618
619 for (i=0; i<mtx->nslaves; i++) {
620 if (mtx_check_matrix(mtx->slaves[i])) {
621 free_nz_header(mtx->slaves[i]->hdr.row);
622 free_nz_header(mtx->slaves[i]->hdr.col);
623 free_header(mtx->slaves[i]);
624 mtx->slaves[i] = NULL;
625 } else {
626 FPRINTF(g_mtxerr,
627 "mtx_destroy: Corrupt slave found while destroying master.\n");
628 FPRINTF(g_mtxerr,
629 " Slave %d being abandoned.\n",i);
630 }
631 }
632 mtx->nslaves = 0;
633
634 mtx_clear_region(mtx,mtx_ENTIRE_MATRIX);
635 /* mtx_check_matrix is called again if MTX_DEBUG*/
636 /**
637 The fishnet structure is
638 destroyed, just leaving
639 the headers and maybe ms.
640 **/
641 mem_destroy_store(mtx->ms);
642
643 free_nz_header(mtx->hdr.row);
644 free_nz_header(mtx->hdr.col);
645
646 mtx_free_perm(mtx->perm.row.org_to_cur);
647 mtx_free_perm(mtx->perm.row.cur_to_org);
648 mtx_free_perm(mtx->perm.col.org_to_cur);
649 mtx_free_perm(mtx->perm.col.cur_to_org);
650
651 if( NOTNULL(mtx->data->block) ) {
652 ascfree(mtx->data->block);
653 }
654 ascfree(mtx->data);
655
656 free_header(mtx);
657 }
658
659 mtx_sparse_t *mtx_create_sparse(int32 cap){
660 mtx_sparse_t *ret;
661 ret = (mtx_sparse_t *)ascmalloc(sizeof(mtx_sparse_t));
662 if (ISNULL(ret)) {
663 FPRINTF(g_mtxerr,"ERROR: (mtx_create_sparse) Insufficient memory.\n");
664 return ret;
665 }
666 ret->data = ASC_NEW_ARRAY(real64,cap);
667 if (ISNULL(ret->data)) {
668 FPRINTF(g_mtxerr,"ERROR: (mtx_create_sparse) Insufficient memory.\n");
669 ascfree(ret);
670 return NULL;
671 }
672 ret->idata = ASC_NEW_ARRAY(int32,cap);
673 if (ISNULL(ret->idata)) {
674 FPRINTF(g_mtxerr,"ERROR: (mtx_create_sparse) Insufficient memory.\n");
675 ascfree(ret->data);
676 ascfree(ret);
677 return NULL;
678 }
679 ret->cap = cap;
680 ret->len = 0;
681 return ret;
682 }
683
684 void mtx_destroy_sparse(mtx_sparse_t *ret){
685 if (ISNULL(ret)) return;
686 if (NOTNULL(ret->idata)) ascfree(ret->idata);
687 if (NOTNULL(ret->data)) ascfree(ret->data);
688 ascfree(ret);
689 }
690
691 void mtx_destroy_blocklist(mtx_block_t *bl){
692 if (ISNULL(bl)) return;
693 if (NOTNULL(bl->block) && bl->nblocks>0) ascfree(bl->block);
694 ascfree(bl);
695 }
696
697 mtx_matrix_t mtx_duplicate_region(mtx_matrix_t mtx
698 , mtx_region_t *reg
699 , real64 drop
700 ){
701 mtx_matrix_t new;
702 int32 org_row;
703 struct element_t *elt;
704
705 if (!mtx_check_matrix(mtx) ||
706 (mtx->master != NULL && !mtx_check_matrix(mtx->master))) {
707 return NULL;
708 }
709 drop = fabs(drop);
710 if (ISSLAVE(mtx)) {
711 new = mtx_create_slave(mtx->master);
712 } else {
713 new = mtx_create_slave(mtx);
714 }
715 if (new == NULL) {
716 FPRINTF(g_mtxerr,"ERROR: (mtx_duplicate_region) Insufficient memory.\n");
717 return NULL;
718 }
719 if (reg==mtx_ENTIRE_MATRIX){
720 if (drop >0.0) { /* copy with drop tolerance */
721 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
722 for( elt = mtx->hdr.row[org_row] ;
723 NOTNULL(elt) ; elt = elt->next.col ) {
724 if( elt->row != mtx_NONE && fabs(elt->value) > drop) {
725 mtx_create_element_value(new,elt->row,elt->col,elt->value);
726 }
727 }
728 }
729 } else {
730 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
731 for( elt = mtx->hdr.row[org_row] ;
732 NOTNULL(elt) ; elt = elt->next.col ) {
733 if( elt->row != mtx_NONE ) {
734 mtx_create_element_value(new,elt->row,elt->col,elt->value);
735 }
736 }
737 }
738 }
739 } else {
740 int32 *rowcur, *colcur, row,col;
741 mtx_range_t *colrng,*rowrng;
742 rowcur=mtx->perm.row.org_to_cur;
743 colcur=mtx->perm.col.org_to_cur;
744 rowrng=&(reg->row);
745 colrng=&(reg->col);
746 if (drop >0.0) { /* copy with drop tolerance */
747 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
748 row=rowcur[org_row];
749 if (in_range(rowrng,row)) {
750 for( elt = mtx->hdr.row[org_row] ;
751 NOTNULL(elt) ; elt = elt->next.col ) {
752 col=colcur[elt->col];
753 if( in_range(colrng,col) && elt->row != mtx_NONE &&
754 fabs(elt->value) > drop ) {
755 /* don't copy^bad^elements */
756 mtx_create_element_value(new,elt->row,elt->col,elt->value);
757 }
758 }
759 }
760 }
761 } else {
762 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
763 row=rowcur[org_row];
764 if (in_range(rowrng,row)) {
765 for( elt = mtx->hdr.row[org_row] ;
766 NOTNULL(elt) ; elt = elt->next.col ) {
767 col=colcur[elt->col];
768 if( in_range(colrng,col) && elt->row != mtx_NONE ) {
769 /* don't copy^bad^elements */
770 mtx_create_element_value(new,elt->row,elt->col,elt->value);
771 }
772 }
773 }
774 }
775 }
776 }
777
778 return new;
779 }
780
781 /* WARNING: this function assumes sufficient memory is available.
782 * it needs to be check for null returns in allocs and doesn't.
783 */
784 mtx_matrix_t mtx_copy_options(mtx_matrix_t mtx
785 , boolean blocks
786 , boolean incidence, mtx_region_t *reg
787 , real64 drop
788 ){
789 mtx_matrix_t copy;
790 int32 org_row;
791 struct element_t *elt;
792 int32 bnum;
793
794 if (!mtx_check_matrix(mtx)) return NULL;
795 drop = fabs(drop);
796
797 /* create same size matrix */
798 copy = alloc_header();
799 copy->order = mtx->order;
800 copy->capacity = mtx->capacity;
801 copy->master = NULL;
802 copy->nslaves = ZERO;
803 copy->slaves = NULL;
804 copy->last_value = NULL;
805
806 /* copy headers and fishnet */
807 copy->hdr.row = calloc_nz_header(copy->capacity);
808 copy->hdr.col = calloc_nz_header(copy->capacity);
809 if (incidence) {
810 struct mem_statistics stat;
811 int s_expool;
812 mem_get_stats(&stat,mtx->ms);
813 s_expool = MAX(2048,stat.elt_total/stat.str_wid);
814 copy->ms = mem_create_store(LENMAGIC,
815 WIDTHMAGIC/sizeof(struct element_t) - 1,
816 sizeof(struct element_t),
817 10,s_expool-LENMAGIC);
818 /* copy of a slave or master matrix will end up with an
819 * initial mem_store that is the size cumulative of the
820 * master and all its slaves since they share the ms.
821 */
822 if (reg==mtx_ENTIRE_MATRIX){
823 if (drop >0.0) { /* copy with drop tolerance */
824 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
825 for( elt = mtx->hdr.row[org_row] ;
826 NOTNULL(elt) ; elt = elt->next.col ) {
827 if( elt->row != mtx_NONE && fabs(elt->value) > drop) {
828 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
829 }
830 }
831 }
832 } else {
833 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
834 for( elt = mtx->hdr.row[org_row] ;
835 NOTNULL(elt) ; elt = elt->next.col ) {
836 if( elt->row != mtx_NONE ) {
837 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
838 }
839 }
840 }
841 }
842 } else {
843 int32 *rowcur, *colcur, row,col;
844 mtx_range_t *colrng,*rowrng;
845 rowcur=mtx->perm.row.org_to_cur;
846 colcur=mtx->perm.col.org_to_cur;
847 rowrng=&(reg->row);
848 colrng=&(reg->col);
849 if (drop >0.0) { /* copy with drop tolerance */
850 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
851 row=rowcur[org_row];
852 if (in_range(rowrng,row)) {
853 for( elt = mtx->hdr.row[org_row] ;
854 NOTNULL(elt) ; elt = elt->next.col ) {
855 col=colcur[elt->col];
856 if( in_range(colrng,col) && elt->row != mtx_NONE &&
857 fabs(elt->value) > drop ) {
858 /* don't copy^bad^elements */
859 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
860 }
861 }
862 }
863 }
864 } else {
865 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
866 row=rowcur[org_row];
867 if (in_range(rowrng,row)) {
868 for( elt = mtx->hdr.row[org_row] ;
869 NOTNULL(elt) ; elt = elt->next.col ) {
870 col=colcur[elt->col];
871 if( in_range(colrng,col) && elt->row != mtx_NONE ) {
872 /* don't copy^bad^elements */
873 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
874 }
875 }
876 }
877 }
878 }
879 }
880 } else {
881 copy->ms = mem_create_store(LENMAGIC,
882 WIDTHMAGIC/sizeof(struct element_t) - 1,
883 sizeof(struct element_t),10,2048);
884 /* copy of a slave or master matrix will end up with an
885 initial mem_store that is the default size */
886 }
887
888 /* copy permutation information */
889 copy->perm.row.org_to_cur = mtx_alloc_perm(copy->capacity);
890 mtx_copy_perm(copy->perm.row.org_to_cur,
891 mtx->perm.row.org_to_cur,copy->capacity);
892 copy->perm.row.cur_to_org = mtx_alloc_perm(copy->capacity);
893 mtx_copy_perm(copy->perm.row.cur_to_org,
894 mtx->perm.row.cur_to_org,copy->capacity);
895 copy->perm.row.parity = mtx_row_parity(mtx);
896
897 copy->perm.col.org_to_cur = mtx_alloc_perm(copy->capacity);
898 mtx_copy_perm(copy->perm.col.org_to_cur,
899 mtx->perm.col.org_to_cur,copy->capacity);
900 copy->perm.col.cur_to_org = mtx_alloc_perm(copy->capacity);
901 mtx_copy_perm(copy->perm.col.cur_to_org,
902 mtx->perm.col.cur_to_org,copy->capacity);
903 copy->perm.col.parity = mtx_col_parity(mtx);
904
905 /* copy (or not) block data */
906 copy->data =
907 (struct structural_data_t *)ascmalloc(sizeof(struct structural_data_t));
908 if (blocks) {
909 copy->data->symbolic_rank = mtx->data->symbolic_rank;
910 copy->data->nblocks = mtx->data->nblocks;
911 copy->data->block = mtx->data->nblocks > 0 ? (mtx_region_t *)
912 ascmalloc( mtx->data->nblocks*sizeof(mtx_region_t) ) : NULL;
913 for( bnum=0; bnum < mtx->data->nblocks; bnum++ ) {
914 copy->data->block[bnum].row.low = mtx->data->block[bnum].row.low;
915 copy->data->block[bnum].row.high = mtx->data->block[bnum].row.high;
916 copy->data->block[bnum].col.low = mtx->data->block[bnum].col.low;
917 copy->data->block[bnum].col.high = mtx->data->block[bnum].col.high;
918 }
919 } else {
920 copy->data->symbolic_rank=0;
921 copy->data->nblocks=0;
922 copy->data->block =NULL;
923 }
924
925 return(copy);
926 }
927
928 int32 mtx_order( mtx_matrix_t mtx){
929 if (!mtx_check_matrix(mtx)) return -1;
930 return(mtx->order);
931 }
932
933 int32 mtx_capacity( mtx_matrix_t mtx){
934 if (!mtx_check_matrix(mtx)) return -1;
935 return(mtx->capacity);
936 }
937
938 /* this is only to be called from in mtx_set_order */
939 static void trim_incidence(mtx_matrix_t mtx, int32 order){
940 int32 ndx;
941 int32 *toorg;
942
943 last_value_matrix = mtx;
944 toorg = mtx->perm.col.cur_to_org;
945 for( ndx = order ; ndx < mtx->order ; ++ndx )
946 blast_col(mtx,toorg[ndx],mtx_ALL_ROWS);
947 for( ndx = ZERO ; ndx < order ; ++ndx )
948 clean_row(mtx,toorg[ndx]);
949
950 toorg = mtx->perm.row.cur_to_org;
951 for( ndx = order ; ndx < mtx->order ; ++ndx )
952 blast_row(mtx,toorg[ndx],mtx_ALL_COLS);
953 for( ndx = ZERO ; ndx < order ; ++ndx )
954 clean_col(mtx,toorg[ndx]);
955 }
956
957 /* this is only to be called from in mtx_set_order */
958 static void enlarge_nzheaders(mtx_matrix_t mtx, int32 order){
959 struct element_t **newhdr;
960
961 /* realloc does not initialize, so calloc is the best we can do here */
962
963 newhdr = calloc_nz_header(order);
964 copy_nz_header(newhdr,mtx->hdr.row,mtx->capacity);
965 free_nz_header(mtx->hdr.row);
966 mtx->hdr.row = newhdr;
967
968 newhdr = calloc_nz_header(order);
969 copy_nz_header(newhdr,mtx->hdr.col,mtx->capacity);
970 free_nz_header(mtx->hdr.col);
971 mtx->hdr.col = newhdr;
972 }
973
974 /**
975 This function will preserve the fact that all of the arrays are
976 "correct" out to the capacity of the arrays, not just out to the order
977 of the matrix. In other words, extensions to orders which still do
978 not exceed the capacity of the arrays are trivial.
979 */
980 void mtx_set_order( mtx_matrix_t mtx, int32 order){
981 int32 i;
982 if(!mtx_check_matrix(mtx)) return;
983 if (ISSLAVE(mtx)) {
984 mtx_set_order(mtx->master,order);
985 return;
986 }
987 /* we now have a master matrix */
988 if( order < mtx->order ) { /* Truncate */
989 trim_incidence(mtx,order); /* clean master */
990 for (i = 0; i < mtx->nslaves; i++) {
991 trim_incidence(mtx->slaves[i],order);
992 }
993 }
994 if (mtx->perm.transpose == mtx_NONE) {
995 mtx->perm.transpose = 0;
996 }
997
998 if( order > mtx->capacity ) {
999 int32 *newperm;
1000 int32 ndx;
1001
1002 enlarge_nzheaders(mtx,order);
1003 for (i = 0; i < mtx->nslaves; i++) {
1004 enlarge_nzheaders(mtx->slaves[i],order);
1005 }
1006
1007 /* realloc not in order here. Happens only on the master. */
1008 newperm = mtx_alloc_perm(order);
1009 mtx_copy_perm(newperm,mtx->perm.row.org_to_cur,mtx->capacity);
1010 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1011 newperm[ndx] = ndx;
1012 mtx_free_perm(mtx->perm.row.org_to_cur);
1013 mtx->perm.row.org_to_cur = newperm;
1014
1015 newperm = mtx_alloc_perm(order);
1016 mtx_copy_perm(newperm,mtx->perm.row.cur_to_org,mtx->capacity);
1017 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1018 newperm[ndx] = ndx;
1019 mtx_free_perm(mtx->perm.row.cur_to_org);
1020 mtx->perm.row.cur_to_org = newperm;
1021
1022 newperm = mtx_alloc_perm(order);
1023 mtx_copy_perm(newperm,mtx->perm.col.org_to_cur,mtx->capacity);
1024 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1025 newperm[ndx] = ndx;
1026 mtx_free_perm(mtx->perm.col.org_to_cur);
1027 mtx->perm.col.org_to_cur = newperm;
1028
1029 newperm = mtx_alloc_perm(order);
1030 mtx_copy_perm(newperm,mtx->perm.col.cur_to_org,mtx->capacity);
1031 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1032 newperm[ndx] = ndx;
1033 mtx_free_perm(mtx->perm.col.cur_to_org);
1034 mtx->perm.col.cur_to_org = newperm;
1035
1036 mtx->capacity = order;
1037 for (i = 0; i < mtx->nslaves; i++) {
1038 mtx->slaves[i]->capacity = order;
1039 /* point slaves at master perm again in case anything has changed */
1040 mtx->slaves[i]->perm.row.org_to_cur = mtx->perm.row.org_to_cur;
1041 mtx->slaves[i]->perm.row.cur_to_org = mtx->perm.row.cur_to_org;
1042 mtx->slaves[i]->perm.col.org_to_cur = mtx->perm.col.org_to_cur;
1043 mtx->slaves[i]->perm.col.cur_to_org = mtx->perm.col.cur_to_org;
1044 }
1045 }
1046 mtx->order = order;
1047 for (i = 0; i < mtx->nslaves; i++) {
1048 mtx->slaves[i]->order = order;
1049 }
1050 }
1051
1052 void mtx_clear_coord(mtx_matrix_t mtx, int32 row, int32 col){
1053 struct element_t **rlink, **clink;
1054 int32 org_row, org_col;
1055
1056 #if MTX_DEBUG
1057 if( !mtx_check_matrix(mtx) ) return; /*ben*/
1058 #endif
1059
1060 last_value_matrix = mtx;
1061 org_row = mtx->perm.row.cur_to_org[row];
1062 org_col = mtx->perm.col.cur_to_org[col];
1063 rlink = &(mtx->hdr.row[org_row]);
1064 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1065 rlink = &((*rlink)->next.col);
1066 if( ISNULL(*rlink) ) return;
1067 clink = &(mtx->hdr.col[org_col]);
1068 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1069 clink = &((*clink)->next.row);
1070 delete_from_row(rlink);
1071 delete_from_col(clink);
1072 }
1073
1074 void mtx_clear_row( mtx_matrix_t mtx, int32 row, mtx_range_t *rng){
1075 struct element_t **rlink, **clink;
1076 int32 org_row;
1077
1078 #if MTX_DEBUG
1079 if( !mtx_check_matrix(mtx) ) return; /*ben*/
1080 #endif
1081
1082 last_value_matrix = mtx;
1083 org_row = mtx->perm.row.cur_to_org[row];
1084 rlink = &(mtx->hdr.row[org_row]);
1085 if( rng == mtx_ALL_COLS ) {
1086 while( NOTNULL(*rlink) ) {
1087 clink = &(mtx->hdr.col[(*rlink)->col]);
1088 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1089 clink = &((*clink)->next.row);
1090 delete_from_row(rlink);
1091 delete_from_col(clink);
1092 }
1093 } else if( rng->high >= rng->low ) {
1094 while( NOTNULL(*rlink) ) {
1095 if( in_range(rng,mtx->perm.col.org_to_cur[(*rlink)->col]) ) {
1096 clink = &(mtx->hdr.col[(*rlink)->col]);
1097 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1098 clink = &((*clink)->next.row);
1099 delete_from_row(rlink);
1100 delete_from_col(clink);
1101 } else
1102 rlink = &((*rlink)->next.col);
1103 }
1104 }
1105 }
1106
1107 void mtx_clear_col( mtx_matrix_t mtx, int32 col, mtx_range_t *rng){
1108 struct element_t **clink, **rlink;
1109 int32 org_col;
1110
1111 #if MTX_DEBUG
1112 if( !mtx_check_matrix(mtx) ) return;
1113 #endif
1114
1115 last_value_matrix = mtx;
1116 org_col = mtx->perm.col.cur_to_org[col];
1117 clink = &(mtx->hdr.col[org_col]);
1118 if( rng == mtx_ALL_ROWS ) {
1119 while( NOTNULL(*clink) ) {
1120 rlink = &(mtx->hdr.row[(*clink)->row]);
1121 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1122 rlink = &((*rlink)->next.col);
1123 delete_from_col(clink);
1124 delete_from_row(rlink);
1125 }
1126 } else if( rng->high >= rng->low ) {
1127 while( NOTNULL(*clink) ) {
1128 if( in_range(rng,mtx->perm.row.org_to_cur[(*clink)->row]) ) {
1129 rlink = &(mtx->hdr.row[(*clink)->row]);
1130 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1131 rlink = &((*rlink)->next.col);
1132 delete_from_col(clink);
1133 delete_from_row(rlink);
1134 } else
1135 clink = &((*clink)->next.row);
1136 }
1137 }
1138 }
1139
1140 void mtx_clear_region(mtx_matrix_t mtx, mtx_region_t *region){
1141 #if MTX_DEBUG
1142 if(!mtx_check_matrix(mtx)) return;
1143 #endif
1144
1145 if( region == mtx_ENTIRE_MATRIX && !ISSLAVE(mtx)) {
1146 /* damn the torpedos, wipe that sucker and slaves fast */
1147 nuke_fishnet(mtx);
1148 } else {
1149 int32 ndx;
1150 int32 *toorg;
1151 mtx_region_t reg;
1152
1153 if (region == mtx_ENTIRE_MATRIX) {
1154 reg.row.low = reg.col.low = 0;
1155 reg.row.high = reg.col.high = mtx->order-1;
1156 } else {
1157 reg = *region;
1158 }
1159 last_value_matrix = mtx;
1160 toorg = mtx->perm.row.cur_to_org;
1161 for( ndx = reg.row.low ; ndx <= reg.row.high ; ++ndx )
1162 blast_row(mtx,toorg[ndx],&(reg.col));
1163 toorg = mtx->perm.col.cur_to_org;
1164 for( ndx = reg.col.low ; ndx <= reg.col.high ; ++ndx )
1165 clean_col(mtx,toorg[ndx]);
1166 }
1167 }
1168
1169
1170 void mtx_reset_perm(mtx_matrix_t mtx){
1171 int32 ndx;
1172
1173 #if MTX_DEBUG
1174 if(!mtx_check_matrix(mtx)) return;
1175 #endif
1176 if (ISSLAVE(mtx)) {
1177 mtx_reset_perm(mtx->master);
1178 return;
1179 }
1180
1181 for( ndx=ZERO ; ndx < mtx->capacity ; ++ndx ) {
1182 mtx->perm.row.org_to_cur[ndx] = ndx;
1183 mtx->perm.row.cur_to_org[ndx] = ndx;
1184 mtx->perm.col.org_to_cur[ndx] = ndx;
1185 mtx->perm.col.cur_to_org[ndx] = ndx;
1186 }
1187 mtx->perm.row.parity = mtx->perm.col.parity = EVEN;
1188
1189 mtx->data->symbolic_rank = -1;
1190 mtx->data->nblocks = -1;
1191 if( NOTNULL(mtx->data->block) ) {
1192 ascfree(mtx->data->block);
1193 mtx->data->block = NULL;
1194 }
1195 }
1196
1197 void mtx_clear(mtx_matrix_t mtx){
1198 if (ISSLAVE(mtx)) {
1199 mtx_clear_region(mtx->master,mtx_ENTIRE_MATRIX);
1200 }
1201 mtx_clear_region(mtx,mtx_ENTIRE_MATRIX);
1202 mtx_reset_perm(mtx);
1203 }
1204
1205 real64 mtx_value(mtx_matrix_t mtx, mtx_coord_t *coord){
1206 struct element_t *elt;
1207 register int32 org_row,org_col;
1208
1209 #if MTX_DEBUG
1210 if(!mtx_check_matrix(mtx)) return D_ZERO;
1211 #endif
1212 org_row = mtx->perm.row.cur_to_org[coord->row];
1213 org_col = mtx->perm.col.cur_to_org[coord->col];
1214 elt = mtx_find_element(mtx,org_row,org_col);
1215 mtx->last_value = elt;
1216 return( ISNULL(elt) ? D_ZERO : elt->value );
1217 }
1218
1219 void mtx_set_value( mtx_matrix_t mtx, mtx_coord_t *coord, real64 value){
1220 register int32 org_row,org_col;
1221
1222 #if MTX_DEBUG
1223 if(!mtx_check_matrix(mtx)) return;
1224 #endif
1225 org_row = mtx->perm.row.cur_to_org[coord->row];
1226 org_col = mtx->perm.col.cur_to_org[coord->col];
1227 if ( NOTNULL(mtx->last_value) &&
1228 mtx->last_value->row==org_row &&
1229 mtx->last_value->col==org_col ) {
1230 mtx->last_value->value = value;
1231 } else {
1232 struct element_t *elt;
1233 if( ISNULL(elt = mtx_find_element(mtx,org_row,org_col)) ) {
1234 if (value != D_ZERO ) {
1235 elt = mtx_create_element_value(mtx,org_row,org_col,value);
1236 }
1237 } else {
1238 elt->value = value;
1239 }
1240 }
1241 }
1242
1243 void mtx_fill_value(mtx_matrix_t mtx, mtx_coord_t *coord, real64 value){
1244 register int32 org_row,org_col;
1245
1246 #if MTX_DEBUG
1247 if(!mtx_check_matrix(mtx)) return;
1248 #endif
1249 org_row = mtx->perm.row.cur_to_org[coord->row];
1250 org_col = mtx->perm.col.cur_to_org[coord->col];
1251 mtx_create_element_value(mtx,org_row,org_col,value);
1252 }
1253
1254 void mtx_fill_org_value(mtx_matrix_t mtx
1255 , const mtx_coord_t *coord
1256 , real64 value
1257 ){
1258 #if MTX_DEBUG
1259 if(!mtx_check_matrix(mtx)) return;
1260 #endif
1261 mtx_create_element_value(mtx,coord->row,coord->col,value);
1262 }
1263
1264 /* sparse matrix assembly of potentially duplicate fills */
1265 /* takes a matrix, assumed to have redundant and otherwise insane incidences
1266 created by 'misusing mtx_fill_value' and sums all like entries, eliminating
1267 the duplicates and the zeroes. Returns -# of elements removed, or 1 if bad.
1268 returns 1 if fails for some reason, 0 otherwise.
1269 Could stand to have the error messages it emits improved.
1270 Could stand to take a rowrange or a rowlist, a colrange or a collist,droptol.
1271 algorithm: O(3tau)
1272 establish a vector mv of columns needing cleaning markers
1273 set lowmark=-1, highmark= -2
1274 for rows
1275 if row empty continue
1276 establish a vector ev of elt pointers set null
1277 prevlink = &header 1st elt pointer
1278 for each elt in row
1279 if elt is not marked in ev
1280 mark ev with elt, update prevlink
1281 else
1282 add value to elt pointed at by ev
1283 mark col in mv as needing cleaning, updating lowmark,highmark
1284 delete elt
1285 endif
1286 endfor
1287 for each elt in row
1288 unmark ev with elt
1289 if elt is 0, delete and mark col in mv.
1290 endfor
1291 endfor
1292 for i = lowmark to highmark
1293 clean col
1294 endfor
1295 My god, the tricks we play on a linked list.
1296 */
1297 int32 mtx_assemble(mtx_matrix_t mtx){
1298 char *real_mv=NULL, *mv;
1299 struct element_t **real_ev=NULL, **ev, **prevlink, *elt;
1300 int32 orgrow, orgcol, lowmark,highmark,dup;
1301 /* note elt and prevlink could be combined, but the code is
1302 unreadable utterly if you do so. */
1303
1304 if (ISNULL(mtx)) return 1;
1305 if (mtx->order <1) return 0;
1306 real_mv = mtx_null_mark(mtx->order+1);
1307 mv = real_mv+1;
1308 if (ISNULL(real_mv)) return 1;
1309 real_ev = mtx_null_vector(mtx->order+1);
1310 ev = real_ev+1;
1311 if (ISNULL(real_ev)) {
1312 mtx_null_mark_release();
1313 return 1;
1314 }
1315 /* we have allocated arrays which include a -1 element to buy
1316 ourselves an awful convenient lot of safety. */
1317 lowmark=-1;
1318 highmark=-2;
1319 dup = 0;
1320 last_value_matrix = mtx;
1321 for (orgrow=0; orgrow < mtx->order; orgrow++) {
1322 elt = mtx->hdr.row[orgrow];
1323 prevlink = &(mtx->hdr.row[orgrow]);
1324 while (NOTNULL(elt)) {
1325 if (ISNULL(ev[elt->col])) {
1326 ev[elt->col] = elt; /* mark first elt found for this col */
1327 prevlink= &(elt->next.col); /* collect pointer to where we go next */
1328 elt = elt->next.col; /* go there */
1329 } else {
1330 /* elt is duplicate and must die */
1331 dup++;
1332 ev[elt->col]->value += elt->value; /* accumulate value */
1333 /* update lowmark, highmark . this is a debatable implementation. */
1334 /* for small mods on large matrices, this is sane */
1335 if (lowmark > -1) { /* not first mark */
1336 if (elt->col < lowmark && elt->col > -1) {
1337 lowmark = elt->col;
1338 }
1339 if (elt->col > highmark && elt->col < mtx->order) {
1340 highmark = elt->col;
1341 }
1342 } else { /* very first mark */
1343 if (elt->col > -1 && elt->col < mtx->order) {
1344 lowmark = highmark = elt->col;
1345 }
1346 }
1347 mv[elt->col] = 1; /* mark column as to be cleaned */
1348 delete_from_row(prevlink);
1349 elt = *prevlink;
1350 }
1351 }
1352 elt = mtx->hdr.row[orgrow];
1353 prevlink = &(mtx->hdr.row[orgrow]);
1354 while (NOTNULL(elt)) { /* renull the accumulator and trash 0s */
1355 ev[elt->col] = NULL; /* regardless, reset accum */
1356 if (elt->value != D_ZERO) {
1357 prevlink= &(elt->next.col); /* collect pointer to where we go next */
1358 elt = elt->next.col; /* go there */
1359 } else {
1360 /* this is still a debatable implementation. */
1361 if (lowmark > -1) { /* not first mark */
1362 if (elt->col < lowmark && elt->col > -1) {
1363 lowmark = elt->col;
1364 }
1365 if (elt->col > highmark && elt->col < mtx->order) {
1366 highmark = elt->col;
1367 }
1368 } else { /* very first mark */
1369 if (elt->col > -1 && elt->col < mtx->order) {
1370 lowmark = highmark = elt->col;
1371 }
1372 }
1373 mv[elt->col] = 1; /* mark column as to be cleaned */
1374 delete_from_row(prevlink);
1375 elt = *prevlink;
1376 }
1377 }
1378 }
1379 for (orgcol = lowmark; orgcol <= highmark; orgcol++) {
1380 if (mv[orgcol]) {
1381 clean_col(mtx,orgcol); /* scrap dups and 0s */
1382 }
1383 }
1384 mtx_null_mark_release();
1385 mtx_null_vector_release();
1386 return -dup;
1387 }
1388
1389 void mtx_del_zr_in_row(mtx_matrix_t mtx, int32 row){
1390 register int32 org;
1391 struct element_t **rlink, **clink;
1392
1393 #if MTX_DEBUG
1394 if(!mtx_check_matrix(mtx)) return;
1395 #endif
1396 org = mtx->perm.row.cur_to_org[row];
1397 rlink = &(mtx->hdr.row[org]);
1398
1399 last_value_matrix = mtx;
1400 while( NOTNULL(*rlink) )
1401 if( (*rlink)->value == D_ZERO ) {
1402 clink = &(mtx->hdr.col[(*rlink)->col]);
1403 for( ; NOTNULL(*clink) && (*clink)->row != org ; )
1404 clink = &((*clink)->next.row);
1405 delete_from_row(rlink);
1406 delete_from_col(clink);
1407 } else
1408 rlink = &((*rlink)->next.col);
1409 }
1410
1411 void mtx_del_zr_in_col(mtx_matrix_t mtx, int32 col){
1412 register int32 org;
1413 struct element_t **clink, **rlink;
1414
1415 #if MTX_DEBUG
1416 if(!mtx_check_matrix(mtx)) return;
1417 #endif
1418 org = mtx->perm.col.cur_to_org[col];
1419 clink = &(mtx->hdr.col[org]);
1420
1421 last_value_matrix = mtx;
1422 while( NOTNULL(*clink) )
1423 if( (*clink)->value == D_ZERO ) {
1424 rlink = &(mtx->hdr.row[(*clink)->row]);
1425 for( ; NOTNULL(*rlink) && (*rlink)->col != org ; )
1426 rlink = &((*rlink)->next.col);
1427 delete_from_col(clink);
1428 delete_from_row(rlink);
1429 } else
1430 clink = &((*clink)->next.row);
1431 }
1432
1433 void mtx_del_zr_in_rowrange(mtx_matrix_t mtx, mtx_range_t *rng){
1434 register int32 org,row,rowhi, *toorg;
1435 struct element_t **rlink, **clink;
1436
1437 #if MTX_DEBUG
1438 if(!mtx_check_matrix(mtx)) return;
1439 #endif
1440 rowhi=rng->high;
1441 toorg= mtx->perm.row.cur_to_org;
1442 last_value_matrix = mtx;
1443 for (row=rng->low; row <=rowhi; row++) {
1444 org = toorg[row];
1445 rlink = &(mtx->hdr.row[org]);
1446
1447 while( NOTNULL(*rlink) ) {
1448 if( (*rlink)->value == D_ZERO ) {
1449 clink = &(mtx->hdr.col[(*rlink)->col]);
1450 for( ; NOTNULL(*clink) && (*clink)->row != org ; )
1451 clink = &((*clink)->next.row);
1452 delete_from_row(rlink);
1453 delete_from_col(clink);
1454 } else {
1455 rlink = &((*rlink)->next.col);
1456 }
1457 }
1458
1459 }
1460 }
1461
1462 void mtx_del_zr_in_colrange(mtx_matrix_t mtx, mtx_range_t *rng){
1463 register int32 org,col,colhi, *toorg;
1464 struct element_t **clink, **rlink;
1465
1466 #if MTX_DEBUG
1467 if(!mtx_check_matrix(mtx)) return;
1468 #endif
1469 colhi=rng->high;
1470 toorg= mtx->perm.col.cur_to_org;
1471 last_value_matrix = mtx;
1472 for (col=rng->low; col <=colhi; col++) {
1473 org = toorg[col];
1474 clink = &(mtx->hdr.col[org]);
1475
1476 while( NOTNULL(*clink) ) {
1477 if( (*clink)->value == D_ZERO ) {
1478 rlink = &(mtx->hdr.row[(*clink)->row]);
1479 for( ; NOTNULL(*rlink) && (*rlink)->col != org ; )
1480 rlink = &((*rlink)->next.col);
1481 delete_from_col(clink);
1482 delete_from_row(rlink);
1483 } else {
1484 clink = &((*clink)->next.row);
1485 }
1486 }
1487
1488 }
1489 }
1490
1491 void mtx_steal_org_row_vec(mtx_matrix_t mtx, int32 row
1492 , real64 *vec, mtx_range_t *rng
1493 ){
1494 struct element_t **rlink, **clink;
1495 int32 org_row;
1496
1497 #if MTX_DEBUG
1498 if( !mtx_check_matrix(mtx) ) return;
1499 #endif
1500
1501 last_value_matrix = mtx;
1502 org_row = mtx->perm.row.cur_to_org[row];
1503 rlink = &(mtx->hdr.row[org_row]);
1504 if( rng == mtx_ALL_COLS ) {
1505 while( NOTNULL(*rlink) ) {
1506 vec[(*rlink)->col] = (*rlink)->value;
1507 clink = &(mtx->hdr.col[(*rlink)->col]);
1508 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1509 clink = &((*clink)->next.row);
1510 delete_from_row(rlink);
1511 delete_from_col(clink);
1512 }
1513 } else if( rng->high >= rng->low ) {
1514 int32 *tocur;
1515 tocur = mtx->perm.col.org_to_cur;
1516 while( NOTNULL(*rlink) ) {
1517 if( in_range(rng,tocur[(*rlink)->col]) ) {
1518 vec[(*rlink)->col] = (*rlink)->value;
1519 clink = &(mtx->hdr.col[(*rlink)->col]);
1520 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1521 clink = &((*clink)->next.row);
1522 delete_from_row(rlink);
1523 delete_from_col(clink);
1524 } else {
1525 rlink = &((*rlink)->next.col);
1526 }
1527 }
1528 }
1529 }
1530
1531 void mtx_steal_org_col_vec(mtx_matrix_t mtx, int32 col
1532 , real64 *vec, mtx_range_t *rng
1533 ){
1534 struct element_t **clink, **rlink;
1535 int32 org_col;
1536
1537 #if MTX_DEBUG
1538 if( !mtx_check_matrix(mtx) ) return;
1539 #endif
1540
1541 last_value_matrix = mtx;
1542 org_col = mtx->perm.col.cur_to_org[col];
1543 clink = &(mtx->hdr.col[org_col]);
1544 if( rng == mtx_ALL_ROWS ) {
1545 while( NOTNULL(*clink) ) {
1546 vec[(*clink)->row] = (*clink)->value;
1547 rlink = &(mtx->hdr.row[(*clink)->row]);
1548 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1549 rlink = &((*rlink)->next.col);
1550 delete_from_col(clink);
1551 delete_from_row(rlink);
1552 }
1553 } else if( rng->high >= rng->low ) {
1554 int32 *tocur;
1555 tocur = mtx->perm.row.org_to_cur;
1556 while( NOTNULL(*clink) ) {
1557 if( in_range(rng,tocur[(*clink)->row]) ) {
1558 vec[(*clink)->row] = (*clink)->value;
1559 rlink = &(mtx->hdr.row[(*clink)->row]);
1560 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1561 rlink = &((*rlink)->next.col);
1562 delete_from_col(clink);
1563 delete_from_row(rlink);
1564 } else {
1565 clink = &((*clink)->next.row);
1566 }
1567 }
1568 }
1569 }
1570
1571 void mtx_steal_cur_row_vec(mtx_matrix_t mtx, int32 row
1572 , real64 *vec, mtx_range_t *rng
1573 ){
1574 struct element_t **rlink, **clink;
1575 int32 org_row, *tocur;
1576
1577 #if MTX_DEBUG
1578 if( !mtx_check_matrix(mtx) ) return;
1579 #endif
1580
1581 tocur = mtx->perm.col.org_to_cur;
1582 last_value_matrix = mtx;
1583 org_row = mtx->perm.row.cur_to_org[row];
1584 rlink = &(mtx->hdr.row[org_row]);
1585 if( rng == mtx_ALL_COLS ) {
1586 while( NOTNULL(*rlink) ) {
1587 vec[tocur[(*rlink)->col]] = (*rlink)->value;
1588 clink = &(mtx->hdr.col[(*rlink)->col]);
1589 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1590 clink = &((*clink)->next.row);
1591 delete_from_row(rlink);
1592 delete_from_col(clink);
1593 }
1594 } else if( rng->high >= rng->low ) {
1595 while( NOTNULL(*rlink) ) {
1596 if( in_range(rng,tocur[(*rlink)->col]) ) {
1597 vec[tocur[(*rlink)->col]] = (*rlink)->value;
1598 clink = &(mtx->hdr.col[(*rlink)->col]);
1599 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1600 clink = &((*clink)->next.row);
1601 delete_from_row(rlink);
1602 delete_from_col(clink);
1603 } else {
1604 rlink = &((*rlink)->next.col);
1605 }
1606 }
1607 }
1608 }
1609
1610 void mtx_steal_cur_col_vec(mtx_matrix_t mtx, int32 col
1611 , real64 *vec, mtx_range_t *rng
1612 ){
1613 struct element_t **clink, **rlink;
1614 int32 org_col, *tocur;
1615
1616 #if MTX_DEBUG
1617 if( !mtx_check_matrix(mtx) ) return;
1618 #endif
1619
1620 tocur = mtx->perm.row.org_to_cur;
1621 last_value_matrix = mtx;
1622 org_col = mtx->perm.col.cur_to_org[col];
1623 clink = &(mtx->hdr.col[org_col]);
1624 if( rng == mtx_ALL_ROWS ) {
1625 while( NOTNULL(*clink) ) {
1626 vec[tocur[(*clink)->row]] = (*clink)->value;
1627 rlink = &(mtx->hdr.row[(*clink)->row]);
1628 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1629 rlink = &((*rlink)->next.col);
1630 delete_from_col(clink);
1631 delete_from_row(rlink);
1632 }
1633 } else if( rng->high >= rng->low ) {
1634 while( NOTNULL(*clink) ) {
1635 if( in_range(rng,tocur[(*clink)->row]) ) {
1636 vec[tocur[(*clink)->row]] = (*clink)->value;
1637 rlink = &(mtx->hdr.row[(*clink)->row]);
1638 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1639 rlink = &((*rlink)->next.col);
1640 delete_from_col(clink);
1641 delete_from_row(rlink);
1642 } else {
1643 clink = &((*clink)->next.row);
1644 }
1645 }
1646 }
1647 }
1648
1649 #ifdef DELETE_THIS_UNUSED_FUNCTION
1650 /* Little function to enlarge the capacity of a sparse to the len given.
1651 * New memory allocated, if any, is not initialized in any way.
1652 * The pointer given is not itself changed, only its data.
1653 * This function in no way shrinks the data in a sparse.
1654 * (exception: buggy realloc implementations may shrink it, ack!)
1655 * Data/idata values up to the old capacity (ret->cap) are preserved.
1656 */
1657 static int enlarge_sparse(mtx_sparse_t *ret, int32 len){
1658 int32 *inew;
1659 real64 *dnew;
1660 if (ISNULL(ret)) {
1661 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with NULL.\n");
1662 return 1;
1663 }
1664 if (len <= ret->cap || len <1) return 0; /* already big enough */
1665
1666 if (ret->idata == NULL) {
1667 inew = ASC_NEW_ARRAY(int32,len);
1668 } else {
1669 inew = (int32 *)ascrealloc(ret->idata,sizeof(int32)*len);
1670 }
1671 if (ISNULL(inew)) {
1672
1673 ERROR_REPORTER_HERE(ASC_PROG_ERR," Insufficient memory.\n");
1674 return 1;
1675 }
1676 ret->idata = inew; /* dnew can still fail without losing inew memory. */
1677
1678 if (ret->data == NULL) {
1679 dnew = ASC_NEW_ARRAY(real64,len);
1680 } else {
1681 dnew = (real64 *)ascrealloc(ret->data,sizeof(real64)*len);
1682 }
1683 if (ISNULL(dnew)) {
1684
1685 ERROR_REPORTER_HERE(ASC_PROG_ERR," Insufficient memory.\n");
1686 return 1;
1687 }
1688 ret->data = dnew; /* we succeeded */
1689 ret->cap = len;
1690 return 0;
1691 }
1692 #endif /* DELETE_THIS_UNUSED_FUNCTION */
1693
1694
1695 /* going to try to make steal also handle sparse creation ...*/
1696 /* don't you dare, whoever you are! */
1697 boolean mtx_steal_org_row_sparse(mtx_matrix_t mtx, int32 row
1698 , mtx_sparse_t *sp, mtx_range_t *rng
1699 ){
1700 struct element_t **rlink, **clink;
1701 int32 org_row;
1702 int32 len,k;
1703
1704 #if MTX_DEBUG
1705 if( !mtx_check_matrix(mtx) ) return TRUE;
1706 #endif
1707 if (sp == mtx_CREATE_SPARSE) {
1708 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with mtx_CREATE_SPARSE. Not supported.\n");
1709 return TRUE;
1710 }
1711 if (rng == mtx_ALL_COLS) {
1712 len = mtx->order;
1713 } else {
1714 len = rng->high - rng->low +1;
1715 }
1716 if (sp->cap < len) {
1717 sp->len = 0;
1718 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with sparse of insufficient capacity.\n");
1719 return TRUE;
1720 }
1721
1722 last_value_matrix = mtx;
1723 org_row = mtx->perm.row.cur_to_org[row];
1724 rlink = &(mtx->hdr.row[org_row]);
1725 k = 0;
1726
1727 if( rng == mtx_ALL_COLS ) {
1728 while( NOTNULL(*rlink) ) {
1729 sp->idata[k] = (*rlink)->col;
1730 sp->data[k] = (*rlink)->value;
1731 k++;
1732 clink = &(mtx->hdr.col[(*rlink)->col]);
1733 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1734 clink = &((*clink)->next.row);
1735 delete_from_row(rlink);
1736 delete_from_col(clink);
1737 }
1738 } else if( rng->high >= rng->low ) {
1739 int32 *tocur;
1740 tocur = mtx->perm.col.org_to_cur;
1741 while( NOTNULL(*rlink) ) {
1742 if( in_range(rng,tocur[(*rlink)->col]) ) {
1743 sp->idata[k] = (*rlink)->col;
1744 sp->data[k] = (*rlink)->value;
1745 k++;
1746 clink = &(mtx->hdr.col[(*rlink)->col]);
1747 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1748 clink = &((*clink)->next.row);
1749 delete_from_row(rlink);
1750 delete_from_col(clink);
1751 } else {
1752 rlink = &((*rlink)->next.col);
1753 }
1754 }
1755 }
1756 sp->len = k;
1757 return FALSE;
1758 }
1759
1760 boolean mtx_steal_org_col_sparse(mtx_matrix_t mtx, int32 col
1761 , mtx_sparse_t *sp, mtx_range_t *rng
1762 ){
1763 struct element_t **clink, **rlink;
1764 int32 org_col;
1765 int32 len,k;
1766
1767 #if MTX_DEBUG
1768 if( !mtx_check_matrix(mtx) ) return TRUE;
1769 #endif
1770 if (sp == mtx_CREATE_SPARSE) {
1771 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with mtx_CREATE_SPARSE. Not supported.\n");
1772 return TRUE;
1773 }
1774 if (rng == mtx_ALL_ROWS) {
1775 len = mtx->order;
1776 } else {
1777 len = rng->high - rng->low +1;
1778 }
1779 if (sp->cap < len) {
1780 sp->len = 0;
1781 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with sparse of insufficient capacity.\n");
1782 return TRUE;
1783 }
1784
1785 last_value_matrix = mtx;
1786 org_col = mtx->perm.col.cur_to_org[col];
1787 clink = &(mtx->hdr.col[org_col]);
1788 k = 0;
1789
1790 if( rng == mtx_ALL_ROWS ) {
1791 while( NOTNULL(*clink) ) {
1792 sp->idata[k] = (*clink)->row;
1793 sp->data[k] = (*clink)->value;
1794 k++;
1795 rlink = &(mtx->hdr.row[(*clink)->row]);
1796 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1797 rlink = &((*rlink)->next.col);
1798 delete_from_col(clink);
1799 delete_from_row(rlink);
1800 }
1801 } else if( rng->high >= rng->low ) {
1802 int32 *tocur;
1803 tocur = mtx->perm.row.org_to_cur;
1804 while( NOTNULL(*clink) ) {
1805 if( in_range(rng,tocur[(*clink)->row]) ) {
1806 sp->idata[k] = (*clink)->row;
1807 sp->data[k] = (*clink)->value;
1808 k++;
1809 rlink = &(mtx->hdr.row[(*clink)->row]);
1810 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1811 rlink = &((*rlink)->next.col);
1812 delete_from_col(clink);
1813 delete_from_row(rlink);
1814 } else {
1815 clink = &((*clink)->next.row);
1816 }
1817 }
1818 }
1819 sp->len = k;
1820 return FALSE;
1821 }
1822
1823 boolean mtx_steal_cur_row_sparse(mtx_matrix_t mtx, int32 row
1824 , mtx_sparse_t *sp, mtx_range_t *rng
1825 ){
1826 struct element_t **rlink, **clink;
1827 int32 org_row, *tocur;
1828 int32 len,k;
1829
1830 #if MTX_DEBUG
1831 if( !mtx_check_matrix(mtx) ) return TRUE;
1832 #endif
1833 if (sp == mtx_CREATE_SPARSE) {
1834 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with mtx_CREATE_SPARSE. Not supported.\n");
1835 return TRUE;
1836 }
1837 if (rng == mtx_ALL_COLS) {
1838 len = mtx->order;
1839 } else {
1840 len = rng->high - rng->low +1;
1841 }
1842 if (sp->cap < len) {
1843 sp->len = 0;
1844 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with sparse of insufficient capacity.\n");
1845 return TRUE;
1846 }
1847
1848 tocur = mtx->perm.col.org_to_cur;
1849 last_value_matrix = mtx;
1850 org_row = mtx->perm.row.cur_to_org[row];
1851 rlink = &(mtx->hdr.row[org_row]);
1852 k = 0;
1853
1854 if( rng == mtx_ALL_COLS ) {
1855 while( NOTNULL(*rlink) ) {
1856 sp->idata[k] = tocur[(*rlink)->col];
1857 sp->data[k] = (*rlink)->value;
1858 k++;
1859 clink = &(mtx->hdr.col[(*rlink)->col]);
1860 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1861 clink = &((*clink)->next.row);
1862 delete_from_row(rlink);
1863 delete_from_col(clink);
1864 }
1865 } else if( rng->high >= rng->low ) {
1866 while( NOTNULL(*rlink) ) {
1867 if( in_range(rng,tocur[(*rlink)->col]) ) {
1868 sp->idata[k] = tocur[(*rlink)->col];
1869 sp->data[k] = (*rlink)->value;
1870 k++;
1871 clink = &(mtx->hdr.col[(*rlink)->col]);
1872 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1873 clink = &((*clink)->next.row);
1874 delete_from_row(rlink);
1875 delete_from_col(clink);
1876 } else {
1877 rlink = &((*rlink)->next.col);
1878 }
1879 }
1880 }
1881 sp->len = k;
1882 return FALSE;
1883 }
1884
1885 boolean mtx_steal_cur_col_sparse(mtx_matrix_t mtx, int32 col
1886 , mtx_sparse_t *sp, mtx_range_t *rng
1887 ){
1888 struct element_t **clink, **rlink;
1889 int32 org_col, *tocur;
1890 int32 len,k;
1891
1892 #if MTX_DEBUG
1893 if( !mtx_check_matrix(mtx) ) return TRUE;
1894 #endif
1895 if (sp == mtx_CREATE_SPARSE) {
1896 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with mtx_CREATE_SPARSE. Not supported.\n");
1897 return TRUE;
1898 }
1899 if (rng == mtx_ALL_ROWS) {
1900 len = mtx->order;
1901 } else {
1902 len = rng->high - rng->low +1;
1903 }
1904 if (sp->cap < len) {
1905 sp->len = 0;
1906 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with sparse of insufficient capacity.\n");
1907 return TRUE;
1908 }
1909
1910 tocur = mtx->perm.row.org_to_cur;
1911 last_value_matrix = mtx;
1912 org_col = mtx->perm.col.cur_to_org[col];
1913 clink = &(mtx->hdr.col[org_col]);
1914 k = 0;
1915
1916 if( rng == mtx_ALL_ROWS ) {
1917 while( NOTNULL(*clink) ) {
1918 sp->idata[k] = tocur[(*clink)->row];
1919 sp->data[k] = (*clink)->value;
1920 k++;
1921 rlink = &(mtx->hdr.row[(*clink)->row]);
1922 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1923 rlink = &((*rlink)->next.col);
1924 delete_from_col(clink);
1925 delete_from_row(rlink);
1926 }
1927 } else if( rng->high >= rng->low ) {
1928 while( NOTNULL(*clink) ) {
1929 if( in_range(rng,tocur[(*clink)->row]) ) {
1930 sp->idata[k] = tocur[(*clink)->row];
1931 sp->data[k] = (*clink)->value;
1932 k++;
1933 rlink = &(mtx->hdr.row[(*clink)->row]);
1934 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1935 rlink = &((*rlink)->next.col);
1936 delete_from_col(clink);
1937 delete_from_row(rlink);
1938 } else {
1939 clink = &((*clink)->next.row);
1940 }
1941 }
1942 }
1943 sp->len = k;
1944 return FALSE;
1945 }
1946
1947 void mtx_fill_org_row_vec(mtx_matrix_t mtx, int32 row
1948 , real64 *vec, mtx_range_t *rng
1949 ){
1950 int32 org_row,highcol, *toorg;
1951 register int32 org_col;
1952 register real64 value;
1953
1954 #if MTX_DEBUG
1955 if(!mtx_check_matrix(mtx)) return;
1956 #endif
1957
1958 org_row = mtx->perm.row.cur_to_org[row];
1959 if( rng == mtx_ALL_COLS ) {
1960 highcol=mtx->order;
1961 for( org_col=0 ; org_col <highcol ; org_col++ ) {
1962 if ((value=vec[org_col]) != D_ZERO)
1963 mtx_create_element_value(mtx,org_row,org_col,value);
1964 }
1965 } else {
1966 register int32 cur_col;
1967
1968 toorg = mtx->perm.col.cur_to_org;
1969 highcol= rng->high;
1970 for ( cur_col=rng->low; cur_col<=highcol; cur_col++) {
1971 if ((value=vec[(org_col=toorg[cur_col])]) != D_ZERO)
1972 mtx_create_element_value(mtx,org_row,org_col,value);
1973 }
1974 }
1975 }
1976
1977 void mtx_fill_org_col_vec(mtx_matrix_t mtx
1978 , int32 col, real64 *vec, mtx_range_t *rng
1979 ){
1980 int32 org_col,highrow, *toorg;
1981 register int32 org_row;
1982 register real64 value;
1983
1984 #if MTX_DEBUG
1985 if(!mtx_check_matrix(mtx)) return;
1986 #endif
1987
1988 org_col = mtx->perm.col.cur_to_org[col];
1989 if( rng == mtx_ALL_ROWS ) {
1990 highrow=mtx->order;
1991 for( org_row=0 ; org_row <highrow ; org_row++ ) {
1992 if ((value=vec[org_row]) != D_ZERO)
1993 mtx_create_element_value(mtx,org_row,org_col,value);
1994 }
1995 } else {
1996 register int32 cur_row;
1997
1998 toorg = mtx->perm.row.cur_to_org;
1999 highrow= rng->high;
2000 for ( cur_row=rng->low; cur_row<=highrow; cur_row++) {
2001 if ((value=vec[(org_row=toorg[cur_row])]) != D_ZERO)
2002 mtx_create_element_value(mtx,org_row,org_col,value);
2003 }
2004 }
2005 }
2006
2007 void mtx_fill_cur_row_vec(mtx_matrix_t mtx
2008 , int32 row, real64 *vec, mtx_range_t *rng
2009 ){
2010 int32 org_row,highcol,lowcol, *toorg;
2011 register int32 cur_col;
2012 register real64 value;
2013
2014 #if MTX_DEBUG
2015 if(!mtx_check_matrix(mtx)) return;
2016 #endif
2017
2018 org_row = mtx->perm.row.cur_to_org[row];
2019 toorg=mtx->perm.col.cur_to_org;
2020 if( rng == mtx_ALL_COLS ) {
2021 highcol = mtx->order-1;
2022 lowcol = 0;
2023 } else {
2024 highcol= rng->high;
2025 lowcol=rng->low;
2026 }
2027 for( cur_col=lowcol ; cur_col <= highcol ; cur_col++ ) {
2028 if ((value=vec[cur_col]) != D_ZERO)
2029 mtx_create_element_value(mtx,org_row,toorg[cur_col],value);
2030 }
2031 }
2032
2033 void mtx_fill_cur_col_vec(mtx_matrix_t mtx
2034 , int32 col, real64 *vec, mtx_range_t *rng
2035 ){
2036 int32 org_col,highrow,lowrow, *toorg;
2037 register int32 cur_row;
2038 register real64 value;
2039
2040 #if MTX_DEBUG
2041 if(!mtx_check_matrix(mtx)) return;
2042 #endif
2043
2044 org_col = mtx->perm.col.cur_to_org[col];
2045 toorg=mtx->perm.row.cur_to_org;
2046 if( rng == mtx_ALL_ROWS ) {
2047 highrow=mtx->order-1;
2048 lowrow=0;
2049 } else {
2050 highrow= rng->high;
2051 lowrow=rng->low;
2052 }
2053 for( cur_row=lowrow ; cur_row <= highrow ; cur_row++ ) {
2054 if ((value=vec[cur_row]) != D_ZERO)
2055 mtx_create_element_value(mtx,toorg[cur_row],org_col,value);
2056 }
2057 }
2058
2059 void mtx_dropfill_cur_col_vec(mtx_matrix_t mtx, int32 col,
2060 real64 *vec, mtx_range_t *rng,
2061 real64 tol
2062 ){
2063 int32 org_col,highrow,lowrow, *toorg;
2064 register int32 cur_row;
2065 register real64 value;
2066
2067 if (tol==0.0) {
2068 mtx_fill_cur_col_vec(mtx,col,vec,rng);
2069 return;
2070 }
2071
2072 #if MTX_DEBUG
2073 if(!mtx_check_matrix(mtx)) return;
2074 #endif
2075
2076 org_col = mtx->perm.col.cur_to_org[col];
2077 toorg=mtx->perm.row.cur_to_org;
2078 if( rng == mtx_ALL_ROWS ) {
2079 highrow=mtx->order-1;
2080 lowrow=0;
2081 } else {
2082 highrow= rng->high;
2083 lowrow=rng->low;
2084 }
2085 for( cur_row=lowrow ; cur_row <= highrow ; cur_row++ ) {
2086 if (fabs(value=vec[cur_row]) > tol)
2087 mtx_create_element_value(mtx,toorg[cur_row],org_col,value);
2088 }
2089 }
2090
2091 void mtx_dropfill_cur_row_vec(mtx_matrix_t mtx, int32 row,
2092 real64 *vec, mtx_range_t *rng,
2093 real64 tol
2094 ){
2095 int32 org_row,highcol,lowcol, *toorg;
2096 register int32 cur_col;
2097 register real64 value;
2098
2099 if (tol==0.0) {
2100 mtx_fill_cur_row_vec(mtx,row,vec,rng);
2101 return;
2102 }
2103
2104 #if MTX_DEBUG
2105 if(!mtx_check_matrix(mtx)) return;
2106 #endif
2107
2108 org_row = mtx->perm.row.cur_to_org[row];
2109 toorg=mtx->perm.col.cur_to_org;
2110 if( rng == mtx_ALL_COLS ) {
2111 highcol = mtx->order-1;
2112 lowcol = 0;
2113 } else {
2114 highcol= rng->high;
2115 lowcol=rng->low;
2116 }
2117 for( cur_col=lowcol ; cur_col <= highcol ; cur_col++ ) {
2118 if (fabs(value=vec[cur_col]) > tol)
2119 mtx_create_element_value(mtx,org_row,toorg[cur_col],value);
2120 }
2121 }
2122
2123 void mtx_fill_org_row_sparse(mtx_matrix_t mtx, int32 row,
2124 const mtx_sparse_t *sp
2125 ){
2126 int32 orgrow,i;
2127 #if MTX_DEBUG
2128 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2129 #endif
2130 orgrow = mtx->perm.row.cur_to_org[row];
2131 for (i=0; i < sp->len; i++) {
2132 if (sp->data[i] != D_ZERO
2133 #if MTX_DEBUG
2134 && sp->idata[i] >=0 && sp->idata[i] < mtx->order
2135 #endif
2136 ) {
2137 mtx_create_element_value(mtx,orgrow,sp->idata[i],sp->data[i]);
2138 }
2139 }
2140 }
2141
2142 void mtx_fill_org_col_sparse(mtx_matrix_t mtx, int32 col,
2143 const mtx_sparse_t *sp
2144 ){
2145 int32 orgcol,i;
2146 #if MTX_DEBUG
2147 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2148 #endif
2149 orgcol = mtx->perm.col.cur_to_org[col];
2150 for (i=0; i < sp->len; i++) {
2151 if (sp->data[i] != D_ZERO
2152 #if MTX_DEBUG
2153 && sp->idata[i] >=0 && sp->idata[i] < mtx->order
2154 #endif
2155 ) {
2156 mtx_create_element_value(mtx,sp->idata[i],orgcol,sp->data[i]);
2157 }
2158 }
2159 }
2160
2161 void mtx_fill_cur_row_sparse(mtx_matrix_t mtx, int32 row,
2162 const mtx_sparse_t *sp
2163 ){
2164 int32 orgrow,i;
2165 #if MTX_DEBUG
2166 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2167 #endif
2168 orgrow = mtx->perm.row.cur_to_org[row];
2169 for (i=0; i < sp->len; i++) {
2170 if (
2171 #if MTX_DEBUG
2172 sp->idata[i] >=0 && sp->idata[i] < mtx->order &&
2173 #endif
2174 sp->data[i] != D_ZERO) {
2175 mtx_create_element_value(mtx,orgrow,
2176 mtx->perm.col.cur_to_org[sp->idata[i]],
2177 sp->data[i]);
2178 }
2179 }
2180 }
2181
2182 void mtx_fill_cur_col_sparse(mtx_matrix_t mtx, int32 col,
2183 const mtx_sparse_t *sp
2184 ){
2185 int32 orgcol,i;
2186 #if MTX_DEBUG
2187 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2188 #endif
2189 orgcol = mtx->perm.col.cur_to_org[col];
2190 for (i=0; i < sp->len; i++) {
2191 if (
2192 #if MTX_DEBUG
2193 sp->idata[i] >=0 && sp->idata[i] < mtx->order &&
2194 #endif
2195 sp->data[i] != D_ZERO) {
2196 mtx_create_element_value(mtx,
2197 mtx->perm.col.cur_to_org[sp->idata[i]],
2198 orgcol,
2199 sp->data[i]);
2200 }
2201 }
2202 }
2203
2204 void mtx_mult_row(mtx_matrix_t mtx, int32 row, real64 factor, mtx_range_t *rng){
2205 struct element_t *elt;
2206 int32 *tocur;
2207
2208 if( factor == D_ZERO ) {
2209 mtx_clear_row(mtx,row,rng);
2210 return;
2211 }
2212
2213 if (factor == D_ONE) return;
2214 #if MTX_DEBUG
2215 if(!mtx_check_matrix(mtx)) return;
2216 #endif
2217
2218 tocur = mtx->perm.col.org_to_cur;
2219 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[row]];
2220 if( rng == mtx_ALL_COLS )
2221 for( ; NOTNULL(elt); elt = elt->next.col ) elt->value *= factor;
2222 else if( rng->high >= rng->low )
2223 for( ; NOTNULL(elt); elt = elt->next.col )
2224 if( in_range(rng,tocur[elt->col]) ) elt->value *= factor;
2225 }
2226
2227 void mtx_mult_col(mtx_matrix_t mtx, int32 col, real64 factor, mtx_range_t *rng){
2228 struct element_t *elt;
2229 int32 *tocur;
2230
2231 if( factor == D_ZERO ) {
2232 mtx_clear_col(mtx,col,rng);
2233 return;
2234 }
2235 if (factor == D_ONE) return;
2236
2237 #if MTX_DEBUG
2238 if(!mtx_check_matrix(mtx)) return;
2239 #endif
2240
2241 tocur = mtx->perm.row.org_to_cur;
2242 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[col]];
2243 if( rng == mtx_ALL_ROWS )
2244 for( ; NOTNULL(elt); elt = elt->next.row ) elt->value *= factor;
2245 else if( rng->high >= rng->low )
2246 for( ; NOTNULL(elt); elt = elt->next.row )
2247 if( in_range(rng,tocur[elt->row]) ) elt->value *= factor;
2248 }
2249
2250 void mtx_mult_row_zero(mtx_matrix_t mtx, int32 row, mtx_range_t *rng){
2251 struct element_t *elt;
2252 int32 *tocur;
2253
2254 #if MTX_DEBUG
2255 if(!mtx_check_matrix(mtx)) return;
2256 #endif
2257
2258 tocur = mtx->perm.col.org_to_cur;
2259 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[row]];
2260 if( rng == mtx_ALL_COLS )
2261 for( ; NOTNULL(elt); elt = elt->next.col ) elt->value = D_ZERO;
2262 else if( rng->high >= rng->low )
2263 for( ; NOTNULL(elt); elt = elt->next.col )
2264 if( in_range(rng,tocur[elt->col]) ) elt->value = D_ZERO;
2265 }
2266
2267 void mtx_mult_col_zero(mtx_matrix_t mtx, int32 col, mtx_range_t *rng){
2268 struct element_t *elt;
2269 int32 *tocur;
2270
2271 #if MTX_DEBUG
2272 if(!mtx_check_matrix(mtx)) return;
2273 #endif
2274
2275 tocur = mtx->perm.row.org_to_cur;
2276 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[col]];
2277 if( rng == mtx_ALL_ROWS )
2278 for( ; NOTNULL(elt); elt = elt->next.row ) elt->value = D_ZERO;
2279 else if( rng->high >= rng->low )
2280 for( ; NOTNULL(elt); elt = elt->next.row )
2281 if( in_range(rng,tocur[elt->row]) ) elt->value = D_ZERO;
2282 }
2283
2284 void mtx_add_row(mtx_matrix_t mtx, int32 s_cur, int32 t_cur, real64 factor,
2285 mtx_range_t *rng
2286 ){
2287 register int32 org_col;
2288 int32 t_org,*tocur;
2289 struct element_t **arr,*elt;
2290
2291 #if MTX_DEBUG
2292 if(!mtx_check_matrix(mtx)) return;
2293 #endif
2294
2295 t_org = mtx->perm.row.cur_to_org[t_cur];
2296 if( rng == mtx_ALL_COLS ) {
2297 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2298 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[s_cur]];
2299 for( ; NOTNULL(elt); elt = elt->next.col ) {
2300 if( ISNULL(arr[(org_col=elt->col)]) ) {
2301 arr[org_col] =
2302 mtx_create_element_value(mtx,t_org,org_col,(factor * elt->value));
2303 } else {
2304 arr[org_col]->value += factor * elt->value;
2305 }
2306 }
2307 mtx_renull_using_row(mtx,t_org,arr);
2308 } else if( rng->high >= rng->low ) {
2309 register int32 cur_col;
2310
2311 tocur = mtx->perm.col.org_to_cur;
2312 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2313 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[s_cur]];
2314 for( ; NOTNULL(elt); elt = elt->next.col ) {
2315 cur_col=tocur[(org_col=elt->col)];
2316 if( in_range(rng,cur_col) ) {
2317 if( NOTNULL(arr[org_col]) ) {
2318 arr[org_col]->value += factor * elt->value;
2319 } else {
2320 arr[org_col] =
2321 mtx_create_element_value(mtx,t_org,org_col,
2322 (factor * elt->value));
2323 /* hit rate usually lower */
2324 }
2325 }
2326 }
2327 mtx_renull_using_row(mtx,t_org,arr);
2328 }
2329 mtx_null_vector_release();
2330 }
2331
2332 void mtx_add_col(mtx_matrix_t mtx, int32 s_cur, int32 t_cur, real64 factor,
2333 mtx_range_t *rng
2334 ){
2335 int32 t_org,*tocur;
2336 struct element_t **arr,*elt;
2337
2338 #if MTX_DEBUG
2339 if(!mtx_check_matrix(mtx)) return;
2340 #endif
2341
2342 if( rng == mtx_ALL_ROWS ) {
2343 t_org = mtx->perm.col.cur_to_org[t_cur];
2344 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2345 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[s_cur]];
2346 for( ; NOTNULL(elt); elt = elt->next.row )
2347 if( ISNULL(arr[elt->row]) ) {
2348 arr[elt->row] =
2349 mtx_create_element_value(mtx,elt->row,t_org,
2350 (factor * elt->value));
2351 } else arr[elt->row]->value += factor * elt->value;
2352 mtx_renull_using_col(mtx,t_org,arr);
2353 } else if( rng->high >= rng->low ) {
2354 tocur = mtx->perm.row.org_to_cur;
2355 t_org = mtx->perm.col.cur_to_org[t_cur];
2356 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2357 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[s_cur]];
2358 for( ; NOTNULL(elt); elt = elt->next.row ) {
2359 if( in_range(rng,tocur[elt->row]) ) {
2360 if( NOTNULL(arr[elt->row]) ) {
2361 arr[elt->row]->value += factor * elt->value;
2362 } else {
2363 arr[elt->row] =
2364 mtx_create_element_value(mtx,elt->row,t_org,
2365 (factor * elt->value));
2366 }
2367 }
2368 }
2369 mtx_renull_using_col(mtx,t_org,arr);
2370 }
2371 mtx_null_vector_release();
2372 }
2373
2374 /**
2375 Expands the given row into an array of pointers, indexed on original
2376 col number. The array is obtained from mtx_null_row_vector().
2377 Be sure to call mtx_null_row_vector_release() when done with the vector and
2378 you have rezeroed it.
2379 */
2380 static struct element_t **mtx_expand_row_series( mtx_matrix_t mtx, int32 org){
2381 struct element_t **arr;
2382 struct element_t *elt;
2383
2384 arr = mtx_null_row_vector(mtx->order);
2385 for( elt=mtx->hdr.row[org]; NOTNULL(elt); elt=elt->next.col )
2386 arr[elt->col] = elt;
2387 return(arr);
2388 }
2389
2390 /**
2391 Expands the given col into an array of pointers, indexed on original
2392 row number. The array is obtained from mtx_null_col_vector().
2393 Be sure to call mtx_null_col_vector_release() when done with the vector and
2394 you have rezeroed it.
2395 */
2396 static struct element_t **mtx_expand_col_series(mtx_matrix_t mtx, int32 org){
2397 struct element_t **arr;
2398 struct element_t *elt;
2399
2400 arr = mtx_null_col_vector(mtx->order);
2401 for( elt = mtx->hdr.col[org] ; NOTNULL(elt) ; elt = elt->next.row )
2402 arr[elt->row] = elt;
2403 return(arr);
2404 }
2405
2406 struct add_series_data {
2407 mtx_matrix_t mtx; /* matrix we're operating on */
2408 struct element_t **arr; /* target row/col expansion array */
2409 int32 *tocur; /* col/row permutation vector */
2410 int32 t_org; /* org row/col which is expanded */
2411 };
2412
2413 static struct add_series_data
2414 rsdata={NULL,NULL,NULL,mtx_NONE},
2415 csdata={NULL,NULL,NULL,mtx_NONE};
2416
2417 static void add_series_data_release(void){
2418 /* if apparently sane, and have array release the arr */
2419 if (NOTNULL(rsdata.mtx) && NOTNULL(rsdata.tocur)
2420 && rsdata.t_org >= 0 && NOTNULL(rsdata.arr)) {
2421 mtx_null_row_vector_release();
2422 }
2423 if (NOTNULL(csdata.mtx) && NOTNULL(csdata.tocur)
2424 && csdata.t_org >= 0 && NOTNULL(csdata.arr)) {
2425 mtx_null_col_vector_release();
2426 }
2427 /* reinit data */
2428
2429 rsdata.mtx = NULL;
2430 rsdata.arr = NULL;
2431 rsdata.tocur = NULL;
2432 rsdata.t_org = mtx_NONE;
2433
2434 csdata.mtx = NULL;
2435 csdata.arr = NULL;
2436 csdata.tocur = NULL;
2437 csdata.t_org = mtx_NONE;
2438 }
2439
2440 void mtx_add_row_series_init(mtx_matrix_t mtx,int32 t_cur,boolean use){
2441
2442 #if MTX_DEBUG
2443 if(!mtx_check_matrix(mtx)) return;
2444 #endif
2445
2446 if ( !(rsdata.mtx) ) {
2447 /* this is a grabbing call due to memory reuse */
2448 if ( mtx && (t_cur >= 0) && (t_cur < mtx->order) ) {
2449 rsdata.mtx = mtx;
2450 rsdata.tocur = mtx->perm.col.org_to_cur;
2451 rsdata.t_org = mtx->perm.row.cur_to_org[t_cur];
2452 rsdata.arr = mtx_expand_row_series(mtx,rsdata.t_org);
2453 } else {
2454 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for grab with invalid column or mtx. col number %d.\n",t_cur);
2455 }
2456 return;
2457 } else {
2458 /* this is supposed to be a releasing call */
2459 if (t_cur != mtx_NONE) {
2460 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for release without mtx_NONE.\n");
2461 return;
2462 }
2463 if (mtx != rsdata.mtx) {
2464 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for release with ungrabbed matrix.\n");
2465 return;
2466 }
2467 if (use) {
2468 mtx_renull_using_row(rsdata.mtx, rsdata.t_org, rsdata.arr);
2469 } else {
2470 mtx_renull_all(rsdata.mtx, rsdata.arr);
2471 }
2472 mtx_null_row_vector_release();
2473 rsdata.mtx = NULL;
2474 rsdata.arr = NULL;
2475 rsdata.tocur = NULL;
2476 rsdata.t_org = mtx_NONE;
2477 }
2478 }
2479
2480 void mtx_add_row_series(int32 s_cur,real64 factor,mtx_range_t *rng){
2481 register int32 org_col;
2482 int32 t_org,*tocur;
2483 struct element_t **arr,*elt;
2484 mtx_matrix_t mtx;
2485
2486 if ( !(rsdata.mtx) ) {
2487 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called without grabbing target row first.\n");
2488 return;
2489 }
2490 mtx = rsdata.mtx;
2491 arr = rsdata.arr;
2492 tocur = rsdata.tocur;
2493 t_org = rsdata.t_org;
2494
2495 #if MTX_DEBUG
2496 if(!mtx_check_matrix(mtx)) return;
2497 #endif
2498
2499 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[s_cur]];
2500 if( rng == mtx_ALL_COLS ) {
2501 for( ; NOTNULL(elt); elt = elt->next.col ) {
2502 if( ISNULL(arr[(org_col=elt->col)]) ) {
2503 arr[org_col] =
2504 mtx_create_element_value(mtx,t_org,org_col,(factor * elt->value));
2505 } else {
2506 arr[org_col]->value += factor * elt->value;
2507 }
2508 }
2509 return;
2510 }
2511 /* fast_in_range is a 10% winner on the alpha, and should be even more
2512 on the other platforms. */
2513 if( rng->high >= rng->low ) {
2514 register int32 cur_col, lo,hi;
2515 lo=rng->low; hi= rng->high;
2516 for( ; NOTNULL(elt); elt = elt->next.col ) {
2517 cur_col=tocur[(org_col=elt->col)];
2518 if( fast_in_range(lo,hi,cur_col) ) {
2519 if( NOTNULL(arr[org_col]) ) {
2520 arr[org_col]->value += factor * elt->value;
2521 } else {
2522 arr[org_col] =
2523 mtx_create_element_value(mtx,t_org,org_col,(factor*elt->value));
2524 }
2525 }
2526 }
2527 }
2528 }
2529
2530 void mtx_add_col_series_init(mtx_matrix_t mtx,int32 t_cur,boolean use){
2531
2532 #if MTX_DEBUG
2533 if(!mtx_check_matrix(mtx)) return;
2534 #endif
2535
2536 if ( !(csdata.mtx) ) {
2537 /* this is a grabbing call */
2538 if ( mtx && (t_cur >= 0) && (t_cur < mtx->order) ) {
2539 csdata.mtx = mtx;
2540 csdata.tocur = mtx->perm.row.org_to_cur;
2541 csdata.t_org = mtx->perm.col.cur_to_org[t_cur];
2542 csdata.arr = mtx_expand_col_series(mtx,csdata.t_org);
2543 } else {
2544 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for grab with invalid row or mtx. row number %d.\n",t_cur);
2545 }
2546 return;
2547 } else {
2548 /* this is supposed to be a releasing call */
2549 if (t_cur != mtx_NONE) {
2550 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for release without mtx_NONE.\n");
2551 return;
2552 }
2553 if (mtx != csdata.mtx) {
2554 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for release with ungrabbed matrix.\n");
2555 return;
2556 }
2557 if (use) {
2558 mtx_renull_using_col(csdata.mtx, csdata.t_org, csdata.arr);
2559 } else {
2560 mtx_renull_all(csdata.mtx, csdata.arr);
2561 }
2562 mtx_null_col_vector_release();
2563 csdata.mtx = NULL;
2564 csdata.arr = NULL;
2565 csdata.tocur = NULL;
2566 csdata.t_org = mtx_NONE;
2567 }
2568 }
2569
2570 void mtx_add_col_series(int32 s_cur,real64 factor,mtx_range_t *rng){
2571 register int32 org_row;
2572 int32 t_org,*tocur;
2573 struct element_t **arr,*elt;
2574 mtx_matrix_t mtx;
2575
2576 if ( !(csdata.mtx) ) {
2577 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for without grabbing target col first.\n");
2578 return;
2579 }
2580 mtx = csdata.mtx;
2581 arr = csdata.arr;
2582 tocur = csdata.tocur;
2583 t_org = csdata.t_org;
2584
2585 #if MTX_DEBUG
2586 if(!mtx_check_matrix(mtx)) return;
2587 #endif
2588
2589 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[s_cur]];
2590 if( rng == mtx_ALL_ROWS ) {
2591 for( ; NOTNULL(elt); elt = elt->next.row ) {
2592 if( ISNULL(arr[(org_row=elt->row)]) ) {
2593 arr[org_row] =
2594 mtx_create_element_value(mtx,org_row,t_org,(factor * elt->value));
2595 } else {
2596 arr[org_row]->value += factor * elt->value;
2597 }
2598 }
2599 return;
2600 }
2601 if( rng->high >= rng->low ) {
2602 register int32 cur_row;
2603
2604 for( ; NOTNULL(elt); elt = elt->next.row ) {
2605 cur_row=tocur[(org_row=elt->row)];
2606 if( in_range(rng,cur_row) ) {
2607 if( NOTNULL(arr[org_row]) ) {
2608 arr[org_row]->value += factor * elt->value;
2609 } else {
2610 arr[org_row] =
2611 mtx_create_element_value(mtx,org_row,t_org,(factor*elt->value));
2612 }
2613 }
2614 }
2615 }
2616 }
2617
2618 void mtx_old_add_row_sparse(mtx_matrix_t mtx,
2619 int32 t_cur, /* cur index of target row */
2620 real64 *s_cur, /* dense source row, curindexed */
2621 real64 factor, /* coefficient */
2622 mtx_range_t *rng,
2623 int32 *ilist /* list to add over */
2624 ){
2625 int32 t_org,*toorg,cindex,orgcindex,hilim;
2626 struct element_t **arr;
2627 real64 value;
2628
2629 #if MTX_DEBUG
2630 if(!mtx_check_matrix(mtx)) return;
2631 #endif
2632
2633 if (factor==D_ZERO) return; /* adding 0 rather silly */
2634
2635 if( rng == mtx_ALL_COLS ) {
2636 t_org = mtx->perm.row.cur_to_org[t_cur]; /* org of target row */
2637 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2638 toorg = mtx->perm.col.cur_to_org; /* current col perm */
2639 hilim=mtx->order;
2640
2641 if (!ilist) { /* FULL ROW CASE no ilist */
2642 for (cindex=0; cindex< hilim; cindex++) {
2643 if ((value=s_cur[cindex])!=D_ZERO) {
2644 if ( ISNULL(arr[(orgcindex=toorg[cindex])]) ) { /* add element */
2645 arr[orgcindex] =
2646 mtx_create_element_value(mtx,t_org,orgcindex,(factor * value));
2647 } else { /* increment element */
2648 arr[orgcindex]->value += factor * value;
2649 }
2650 }
2651 }
2652 } else { /* SPARSE ROW CASE with ilist */
2653 int32 i;
2654 i=0;
2655 while ((cindex=ilist[i])>=0) {
2656 value=s_cur[cindex];
2657 if ( ISNULL(arr[(orgcindex=toorg[cindex])]) ) { /* add element */
2658 arr[orgcindex] =
2659 mtx_create_element_value(mtx,t_org,orgcindex,(factor * value));
2660 } else { /* increment element */
2661 arr[orgcindex]->value += factor * value;
2662 }
2663 i++;
2664 }
2665 }
2666 mtx_renull_using_row(mtx,t_org,arr);
2667 } else if( rng->high >= rng->low ) { /* DENSE RANGE CASE */
2668 t_org = mtx->perm.row.cur_to_org[t_cur]; /* org of target row */
2669 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2670 toorg = mtx->perm.col.cur_to_org; /* current col perm */
2671 hilim = rng->high;
2672
2673 for (cindex=rng->low; cindex<= hilim; cindex++) {
2674 if ((value=s_cur[cindex])!=D_ZERO) {
2675 if ( ISNULL(arr[(orgcindex=toorg[cindex])]) ) { /* add element */
2676 arr[orgcindex] =
2677 mtx_create_element_value(mtx,t_org,orgcindex,(factor * value));
2678 } else { /* increment element */
2679 arr[orgcindex]->value += factor * value;
2680 }
2681 }
2682 }
2683 mtx_renull_using_row(mtx,t_org,arr);
2684 }
2685 mtx_null_vector_release();
2686 }
2687
2688 void mtx_old_add_col_sparse(mtx_matrix_t mtx,
2689 int32 t_cur, /* cur index of target col */
2690 real64 *s_cur, /* dense source col, curindexed */
2691 real64 factor, /* coefficient */
2692 mtx_range_t *rng, /* range to add over or */
2693 int32 *ilist /* list to add over */
2694 ){
2695 int32 t_org,*toorg,rowindex,orgrowindex,hilim;
2696 struct element_t **arr;
2697 real64 value;
2698
2699 #if MTX_DEBUG
2700 if(!mtx_check_matrix(mtx)) return;
2701 #endif
2702
2703 if (factor==D_ZERO) return;
2704
2705 if( rng == mtx_ALL_ROWS ) {
2706 t_org = mtx->perm.col.cur_to_org[t_cur]; /* org of target col */
2707 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2708 toorg = mtx->perm.row.cur_to_org; /* current row perm */
2709 hilim=mtx->order;
2710
2711 if (!ilist) { /* FULL COL CASE no ilist */
2712 for (rowindex=0; rowindex< hilim; rowindex++) {
2713 if ((value=s_cur[rowindex])!=D_ZERO) {
2714 if ( ISNULL(arr[(orgrowindex=toorg[rowindex])]) ) { /* add element */
2715 arr[orgrowindex] =
2716 mtx_create_element_value(mtx,orgrowindex,t_org,(factor * value));
2717 } else { /* increment element */
2718 arr[orgrowindex]->value += factor * value;
2719 }
2720 }
2721 }
2722 } else { /* SPARSE COL CASE with ilist */
2723 int32 i;
2724 i=0;
2725 while ((rowindex=ilist[i])>=0) {
2726 value=s_cur[rowindex];
2727 if ( ISNULL(arr[(orgrowindex=toorg[rowindex])]) ) { /* add element */
2728 arr[orgrowindex] =
2729 mtx_create_element_value(mtx,orgrowindex,t_org,(factor * value));
2730 } else { /* increment element */
2731 arr[orgrowindex]->value += factor * value;
2732 }
2733 i++;
2734 }
2735 }
2736 mtx_renull_using_col(mtx,t_org,arr);
2737
2738 } else if( rng->high >= rng->low ) { /* DENSE RANGE CASE */
2739 t_org = mtx->perm.col.cur_to_org[t_cur]; /* org of target col */
2740 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2741 toorg = mtx->perm.row.cur_to_org; /* current row perm */
2742 hilim = rng->high;
2743
2744 for (rowindex=rng->low; rowindex<= hilim; rowindex++) {
2745 if ((value=s_cur[rowindex])!=D_ZERO) {
2746 if ( ISNULL(arr[(orgrowindex=toorg[rowindex])]) ) { /* add element */
2747 arr[orgrowindex] =
2748 mtx_create_element_value(mtx,orgrowindex,t_org,(factor * value));
2749 } else { /* increment element */
2750 arr[orgrowindex]->value += factor * value;
2751 }
2752 }
2753 }
2754 mtx_renull_using_col(mtx,t_org,arr);
2755 }
2756 mtx_null_vector_release();
2757 }
2758
2759 size_t mtx_size(mtx_matrix_t mtx) {
2760 size_t size=0;
2761 if (ISNULL(mtx)) return size;
2762 size += (1+mtx->nslaves)*sizeof(struct mtx_header);
2763 if (ISSLAVE(mtx)) {
2764 return 2*sizeof(struct element_t *)*(size_t)mtx->capacity +size;
2765 }
2766 size += mem_sizeof_store(mtx->ms);
2767 /* headers */
2768 size += (1 + mtx->nslaves) * (size_t)mtx->capacity *
2769 (size_t)2 * sizeof(struct element_t *);
2770 /* block data */
2771 if (mtx->data->nblocks >0)
2772 size += sizeof(mtx_region_t)*(size_t)(mtx->data->nblocks-1);
2773 /* permutations */
2774 size += (size_t)4*(sizeof(int32)*(size_t)mtx->capacity);
2775 return size;
2776 }
2777
2778 size_t mtx_chattel_size(mtx_matrix_t mtx) {
2779 size_t size=0;
2780 int32 i;
2781 if (ISNULL(mtx) || ISSLAVE(mtx)) return size;
2782 /* headers */
2783 size += (mtx->nslaves)*sizeof(struct mtx_header);
2784 /* incidence */
2785 for (i=0; i <mtx->nslaves; i++) {
2786 size += (size_t)mtx_nonzeros_in_region(mtx->slaves[i],mtx_ENTIRE_MATRIX);
2787 }
2788 /*nz headers */
2789 size += mtx->nslaves * (size_t)mtx->capacity *
2790 (size_t)2 * sizeof(struct element_t *);
2791 /* block data */
2792 /* permutations */
2793 return size;
2794 }
2795
2796 void mtx_free_reused_mem(void){
2797 (void)mtx_null_mark((int32)0);
2798 (void)mtx_null_vector((int32)0);
2799 (void)mtx_null_row_vector((int32)0);
2800 (void)mtx_null_col_vector((int32)0);
2801 }
2802
2803
2804 void mtx_write_sparse(FILE *fp,mtx_sparse_t *v){
2805 int32 k;
2806 if (NOTNULL(v) && NOTNULL(fp)) {
2807 FPRINTF(fp,"len %d, cap %d\n",v->len,v->cap);
2808 } else {
2809 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with NULL.\n");
2810 return;
2811 }
2812 if ( NOTNULL(v->data) && NOTNULL(v->idata) ){
2813 FPRINTF(fp,"valid data:\n");
2814 for (k=0; k < v->len; k++) {
2815 FPRINTF(fp,"(%d) %d %.18g\n",k,v->idata[k],v->data[k]);
2816 }
2817 } else {
2818 FPRINTF(fp,"Invalid data pointers found.\n");
2819 }
2820 }
2821
2822 void mtx_write_region_human_f(FILE *fp,mtx_matrix_t mtx,
2823 mtx_region_t *greg,int colwise,int orgmajor
2824 ){
2825 mtx_coord_t nz,org;
2826 real64 value;
2827 mtx_region_t reg;
2828
2829 if (greg!=mtx_ENTIRE_MATRIX) {
2830 reg=(*greg);
2831 } else {
2832 mtx_region(&reg,0,mtx->order-1,0,mtx->order-1);
2833 }
2834 /* current col perm driven */
2835 if (colwise && !orgmajor) {
2836 for(nz.col = reg.col.low; nz.col <= reg.col.high; ++(nz.col) ) {
2837 nz.row = mtx_FIRST;
2838 /* if col not empty, print header and first element */
2839 if( value = mtx_next_in_col(mtx,&nz,&(reg.row)), nz.row != mtx_LAST ) {
2840 FPRINTF(fp," Col %d (org %d)\n", nz.col, mtx_col_to_org(mtx,nz.col));
2841 FPRINTF(fp," Row %d (org %d) has value %g\n",
2842 nz.row, mtx_row_to_org(mtx,nz.row), value);
2843 }
2844 while( value = mtx_next_in_col(mtx,&nz,&(reg.row)),
2845 nz.row != mtx_LAST ) {
2846 FPRINTF(fp," Row %d (org %d) has value %g\n",
2847 nz.row, mtx_row_to_org(mtx,nz.row), value);
2848 }
2849 }
2850 return;
2851 }
2852 /* current row perm driven */
2853 if (!colwise && !orgmajor) {
2854 for(nz.row = reg.row.low; nz.row <= reg.row.high; ++(nz.row) ) {
2855 nz.col = mtx_FIRST;
2856 if ( value = mtx_next_in_row(mtx,&nz,