/[ascend]/trunk/base/generic/linear/mtx_basic.c
ViewVC logotype

Contents of /trunk/base/generic/linear/mtx_basic.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1380 - (show annotations) (download) (as text)
Fri Apr 6 04:07:32 2007 UTC (12 years, 10 months ago) by jpye
File MIME type: text/x-csrc
File size: 106029 byte(s)
Fixed build problem on MinGW GCC 3.4.2
Fixed problem with font in IPython console on Windows
Some attempts to fix problems with FILE* support in Python bindings
gfortran is now the preferred FORTRAN compiler.
Comment out failing FILE* tests for the moment.
1 /* ASCEND modelling environment
2 Copyright (C) 1990 Karl Michael Westerberg
3 Copyright (C) 1993 Joseph Zaher
4 Copyright (C) 1994 Joseph Zaher, Benjamin Andrew Allan
5 Copyright (C) 1995 Benjamin Andrew Allan, Kirk Andre' Abbott
6 Copyright (C) 1996 Benjamin Andrew Allan
7 Copyright (C) 2007 Carnegie Mellon University
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
12 any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA.
23 *//** @file
24 Basic operations with mtx matrix types, including file i/o.
25 *//*
26 mtx: Ascend Sparse Matrix Package
27 by Karl Michael Westerberg, Created: 2/6/90
28 last in CVS: $Revision: 1.20 $ $Date: 2000/01/25 02:27:07 $ $Author: ballan $
29 */
30
31 #include <math.h>
32 #include <utilities/ascConfig.h>
33 #include <utilities/ascMalloc.h>
34 #include <utilities/mem.h>
35 #include "mtx.h"
36 #ifdef ASC_WITH_MMIO
37 # include <mmio.h>
38 #endif
39
40 /* grab our private parts */
41 #define __MTX_C_SEEN__
42 #include "mtx_use_only.h"
43 #include <general/mathmacros.h>
44
45 /**
46 *Really* check the matrix.
47 @return an error count
48 */
49 int super_check_matrix( mtx_matrix_t mtx){
50 int32 ndx,errcnt=0;
51 int32 rowc,colc;
52
53 /* Test consistency of permutation arrays */
54 for( ndx=ZERO ; ndx < mtx->capacity ; ++ndx ) {
55 if( mtx->perm.row.org_to_cur[mtx->perm.row.cur_to_org[ndx]] != ndx ) {
56
57 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Permutation violation in row %d.\n", ndx);
58 errcnt++;
59 }
60 if( mtx->perm.col.org_to_cur[mtx->perm.col.cur_to_org[ndx]] != ndx ) {
61
62 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Permutation violation in col %d.\n",ndx);
63 errcnt++;
64 }
65 }
66
67 if( mtx->order > mtx->capacity ) {
68
69 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Capacity %d is less than order %d\n",
70 mtx->capacity,mtx->order);
71 errcnt++;
72 }
73
74 /* Make sure rows and columns which don't exist are blank */
75 for( ndx = mtx->order ; ndx < mtx->capacity ; ++ndx ) {
76 int32 org_row,org_col;
77 org_row = mtx->perm.row.cur_to_org[ndx];
78 org_col = mtx->perm.col.cur_to_org[ndx];
79 if( NOTNULL(mtx->hdr.row[org_row]) ) {
80
81 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Non-zeros found in non-existent row %d.\n",ndx);
82 errcnt++;
83 }
84 if( NOTNULL(mtx->hdr.col[org_col]) ) {
85
86 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Non-zeros found in non-existent col %d.\n",ndx);
87 errcnt++;
88 }
89 }
90
91 /* Verify fishnet structure */
92 for( ndx=rowc=colc=ZERO ; ndx < mtx->capacity ; ++ndx ) {
93 struct element_t *elt;
94 int32 org_row,org_col;
95 org_row = mtx->perm.row.cur_to_org[ndx];
96 org_col = mtx->perm.col.cur_to_org[ndx];
97 for( elt = mtx->hdr.row[org_row] ; NOTNULL(elt) ; elt = elt->next.col ) {
98 if( elt->row != mtx_NONE ) {
99 ++rowc;
100 if( elt->row != org_row ) {
101
102 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Element mismatch in row %d.\n", ndx);
103 errcnt++;
104 }
105 } else {
106
107 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Disclaimed element in row %d.\n", ndx);
108 errcnt++;
109 }
110 }
111 for( elt = mtx->hdr.col[org_col] ; NOTNULL(elt) ; elt = elt->next.row ) {
112 if( elt->col != mtx_NONE ) {
113 ++colc;
114 if( elt->col != org_col ) {
115
116 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Element mismatch in col %d.\n", ndx);
117 errcnt++;
118 }
119 } else {
120
121 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Disclaimed element in col %d.\n", ndx);
122 errcnt++;
123 }
124 }
125 }
126 if( rowc != colc ) {
127
128 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Non-zero discrepancy, %d by row, %d by col.\n",
129 rowc, colc);
130 errcnt++;
131 }
132 return errcnt;
133 }
134
135 /**
136 Checks the integrity flag of the matrix.
137 */
138 boolean check_matrix( mtx_matrix_t mtx, char *file, int line){
139
140 if( ISNULL(mtx) ) {
141
142 ERROR_REPORTER_HERE(ASC_PROG_ERR,"NULL matrix in %s line %d.\n",file,line);
143 return 0;
144 }
145
146 switch( mtx->integrity ) {
147 case OK:
148 break;
149 case DESTROYED:
150
151 FPRINTF(g_mtxerr,
152 " Matrix deceased found in %s line %d.\n",file,line);
153 return 0;
154 default:
155
156 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Matrix garbage found in %s line %d.\n",
157 file,line);
158 return 0;
159 }
160
161 #if MTX_DEBUG
162 super_check_matrix(mtx);
163 #endif
164
165 if (ISSLAVE(mtx)) {
166 return (check_matrix(mtx->master,file,line));
167 }
168 return 1;
169 }
170
171 /*
172 this function checks the consistency of a sparse as best it can.
173 @return FALSE if something wierd
174 */
175 boolean check_sparse(const mtx_sparse_t * const sp, char *file, int line)
176 {
177 if ( ISNULL(sp) ||
178 sp->len > sp->cap ||
179 ( sp->cap > 0 && ( ISNULL(sp->idata) || ISNULL(sp->data) ) )
180 ) {
181 ERROR_REPORTER_HERE(ASC_PROG_ERR,"Inconsistent or NULL sparse in %s line %d.\n",file,line);
182 return 0;
183 }
184 return 1;
185 }
186
187 #define free_unless_null(ptr) if( NOTNULL(ptr) ) ascfree(ptr)
188 /*
189 static void free_unless_null(POINTER ptr)
190 {
191 if( NOTNULL(ptr) )
192 ascfree(ptr);
193 }
194 */
195
196 void mtx_zero_int32(int32 *data, int len){
197 int i;
198 if (data==NULL) return;
199 for (i=0; i<len; i++) data[i]=0;
200 }
201
202 void mtx_zero_real64(real64 *data, int len){
203 int i;
204 if (data==NULL) return;
205 for (i=0; i<len; i++) data[i]=0.0;
206 }
207
208 void mtx_zero_ptr(void **data, int len){
209 int i;
210 if (data==NULL) return;
211 for (i=0; i<len; i++) data[i]=NULL;
212 }
213
214 /*
215 Allocates a matrix header and returns a pointer to it.
216 */
217 static mtx_matrix_t alloc_header(void){
218 mtx_matrix_t mtx;
219 mtx = (mtx_matrix_t)ascmalloc( sizeof(struct mtx_header) );
220 mtx->integrity = OK;
221 return(mtx);
222 }
223
224 /**
225 Frees a matrix header.
226 */
227 static void free_header(mtx_matrix_t mtx){
228 mtx->integrity = DESTROYED;
229 ascfree(mtx);
230 }
231
232 /**
233 Allocates a zeroed row or column header.
234 */
235 static struct element_t **calloc_nz_header(int32 cap){
236 return(cap > 0 ? ASC_NEW_ARRAY_CLEAR(struct element_t *,cap) : NULL);
237 }
238
239 /**
240 Copies srchdr to tarhdr given the capacity of srchdr.
241 */
242 static void copy_nz_header(struct element_t **tarhdr,
243 struct element_t **srchdr, int32 cap
244 ){
245 mem_copy_cast(srchdr,tarhdr,cap*sizeof(struct element_t *));
246 }
247
248 #define free_nz_header(hdr) free_unless_null( (POINTER)(hdr) )
249 /**
250 Frees a row or column header
251 **/
252
253
254 /********************************************************************/
255 /*
256 Any method of deallocating elements that doesn't use
257 delete_from_row/col or nuke_fishnet is DOOMED to die.
258 On ANY entry to delete_from_row or delete_from_col,
259 last_value_matrix should be already set to the mtx being deleted from.
260 What this really means is that before any of the following list of functions
261 is called, the last_value_matrix should be set:
262
263 disclaim element
264 delete_from_row/col
265 blast_row/col
266 clean_row/col
267
268 Note that the above functions call each other, with blast_/clean_ being
269 the top housekeeping/garbage collection pair. Users of delete, blast or
270 clean should set last_value_matrix at the scope where the matrix is known.
271
272 Excuses for this kluge:
273 1) mtx_value/set_value need it.
274 2) the mem_store scheme REQUIRES it.
275 3) it is faster than passing the mtx
276 up and down through the functions listed above, and we do a LOT of
277 these operations.
278 */
279 static mtx_matrix_t last_value_matrix = NULL;
280
281 /**
282 Indicates that one of the row or column (it doesn't matter) in which
283 this element appears will no longer point to this element (it has
284 already been removed from the list). Elements disclaimed once have
285 their row,col indices set to mtx_NONE. Elements disclaimed twice are
286 discarded.
287 NOTE WELL: last_value_mtx must be set before this function is called.
288 */
289 static void disclaim_element(struct element_t *element){
290 if( element->row == mtx_NONE ) {
291 mem_free_element(last_value_matrix->ms,(void *)element);
292 } else {
293 element->row = element->col = mtx_NONE;
294 }
295 }
296
297 /**
298 Deletes the element from the row it is in, given a pointer to
299 the row link which leads to this element.
300 On return the pointer pointed to by link points to the new next element.
301 NOTE WELL: last_value_mtx must be set before this function is called.
302 */
303 static void delete_from_row(struct element_t **link){
304 struct element_t *p;
305 p = *link;
306 *link = p->next.col;
307 disclaim_element(p);
308 /* conservatively cause mtx_set_value to forget */
309 last_value_matrix->last_value = NULL;
310 }
311
312 /**
313 Deletes the element from the col it is in, given a pointer to
314 the col link which leads to this element.
315 NOTE WELL: last_value_mtx must be set before this function is called.
316 */
317 static void delete_from_col(struct element_t **link){
318 struct element_t *p;
319 p = *link;
320 *link = p->next.row;
321 disclaim_element(p);
322 /* conservatively cause mtx_set_value to forget */
323 last_value_matrix->last_value = NULL;
324 }
325
326 /**
327 Deletes every element even remotely associated with a matrix
328 and nulls the headers. Also nulls the headers of slaves
329 and cleans out their elements.
330 */
331 static void nuke_fishnet(mtx_matrix_t mtx){
332 int32 i;
333
334 if ( ISNULL(mtx) || ISNULL(mtx->hdr.row) ||
335 ISNULL(mtx->hdr.col) || ISSLAVE(mtx)) {
336 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with bad matrix.");
337 return;
338 }
339 if (mtx->capacity<1) return;
340 mtx->last_value = NULL;
341 mem_clear_store(mtx->ms);
342 zero(mtx->hdr.row,mtx->capacity,struct element_t *);
343 zero(mtx->hdr.col,mtx->capacity,struct element_t *);
344 for (i = 0; i < mtx->nslaves; i++) {
345 zero(mtx->slaves[i]->hdr.row,mtx->capacity,struct element_t *);
346 zero(mtx->slaves[i]->hdr.col,mtx->capacity,struct element_t *);
347 }
348 }
349
350 /**
351 Destroys all elements in the given row with (current)
352 col index in the given col range: if rng == mtx_ALL_COLS,
353 entire row is destroyed.
354 NOTE WELL: last_value_mtx must be set before this function is called.
355 */
356 static void blast_row( mtx_matrix_t mtx, int32 org, mtx_range_t *rng){
357 struct element_t **link;
358
359 link = &(mtx->hdr.row[org]);
360 if( rng == mtx_ALL_COLS ) {
361 while( NOTNULL(*link) )
362 delete_from_row(link);
363 } else {
364 int32 *tocur = mtx->perm.col.org_to_cur;
365 int32 col;
366 while( NOTNULL(*link) ) {
367 col = (*link)->col;
368 if( col == mtx_NONE || in_range(rng,tocur[col]) )
369 delete_from_row(link);
370 else
371 link = &((*link)->next.col);
372 }
373 }
374 }
375
376 /**
377 Destroys all elements in the given col with (current)
378 row index in the given row range: if rng == mtx_ALL_ROWS,
379 entire col is destroyed.
380 NOTE WELL: last_value_mtx must be set before this function is called.
381 */
382 static void blast_col( mtx_matrix_t mtx, int32 org, mtx_range_t *rng){
383 struct element_t **link;
384
385 link = &(mtx->hdr.col[org]);
386 if( rng == mtx_ALL_ROWS ) {
387 while( NOTNULL(*link) )
388 delete_from_col(link);
389 } else {
390 int32 *tocur = mtx->perm.row.org_to_cur, row;
391
392 while( NOTNULL(*link) ) {
393 row = (*link)->row;
394 if( row == mtx_NONE || in_range(rng,tocur[row]) )
395 delete_from_col(link);
396 else
397 link = &((*link)->next.row);
398 }
399 }
400 }
401
402 /**
403 Disclaims all elements in the given row which have already been
404 disclaimed once.
405 NOTE WELL: last_value_mtx must be set before this function is called.
406 */
407 static void clean_row(mtx_matrix_t mtx, int32 org){
408 struct element_t **link;
409
410 link = &(mtx->hdr.row[org]);
411 while( NOTNULL(*link) )
412 if( (*link)->row == mtx_NONE )
413 delete_from_row(link);
414 else
415 link = &((*link)->next.col);
416 }
417
418 /**
419 Disclaims all elements in the given col which have already been
420 disclaimed once.
421 NOTE WELL: last_value_mtx must be set before this function is called.
422 */
423 static void clean_col(mtx_matrix_t mtx, int32 org){
424 struct element_t **link;
425
426 link = &(mtx->hdr.col[org]);
427 while( NOTNULL(*link) )
428 if( (*link)->col == mtx_NONE )
429 delete_from_col(link);
430 else
431 link = &((*link)->next.row);
432 }
433 /********************************************************************/
434
435 mtx_coord_t *mtx_coord(mtx_coord_t *coordp, int32 row, int32 col){
436 coordp->row = row;
437 coordp->col = col;
438 return(coordp);
439 }
440
441 mtx_range_t *mtx_range(mtx_range_t *rangep, int32 low, int32 high){
442 rangep->low = low;
443 rangep->high = high;
444 return(rangep);
445 }
446
447 mtx_region_t *mtx_region( mtx_region_t *regionp, int32 rowlow, int32 rowhigh,
448 int32 collow, int32 colhigh
449 ){
450 mtx_range(&(regionp->row),rowlow,rowhigh);
451 mtx_range(&(regionp->col),collow,colhigh);
452 return(regionp);
453 }
454
455 static int g_mtx_debug_redirect = 0;
456
457 void mtx_debug_redirect_freeze(){
458 g_mtx_debug_redirect = 1;
459 }
460
461 static void mtx_redirectErrors(FILE *f){
462 if (!g_mtx_debug_redirect) {
463 assert(f != NULL);
464 g_mtxerr = f;
465 }
466 }
467
468 mtx_matrix_t mtx_create(void){
469 mtx_matrix_t mtx;
470
471 mtx_redirectErrors(stderr); /* call mtx_debug_redirect_freeze() to bypass */
472
473 mtx = alloc_header();
474 mtx->order = ZERO;
475 mtx->capacity = ZERO;
476 mtx->hdr.row = mtx->hdr.col = NULL;
477 mtx->perm.row.org_to_cur = mtx_alloc_perm(ZERO);
478 mtx->perm.row.cur_to_org = mtx_alloc_perm(ZERO);
479 mtx->perm.row.parity = EVEN;
480 mtx->perm.col.org_to_cur = mtx_alloc_perm(ZERO);
481 mtx->perm.col.cur_to_org = mtx_alloc_perm(ZERO);
482 mtx->perm.col.parity = EVEN;
483 mtx->perm.transpose = mtx_NONE;
484 mtx->data =
485 (struct structural_data_t *)ascmalloc(sizeof(struct structural_data_t));
486 mtx->data->symbolic_rank = -1;
487 mtx->data->nblocks = -1;
488 mtx->data->block = NULL;
489 mtx->master = NULL;
490 mtx->nslaves = ZERO;
491 mtx->slaves = NULL;
492 mtx->last_value = NULL;
493 mtx->ms = mem_create_store(LENMAGIC, WIDTHMAGIC/sizeof(struct element_t) - 1,
494 sizeof(struct element_t),10,2048);
495 return(mtx);
496 }
497
498 /**
499 ** Slave headers share the master's:
500 ** perm.anything, data->anything, ms->anything, order and capacity.
501 ** Unique to the slave are:
502 ** hdr.anything
503 ** last_value.
504 ** All slaves of a master appear as pointers in the masters slaves list
505 ** which is nslaves long. slaves do not have slaves.
506 **/
507 mtx_matrix_t mtx_create_slave(mtx_matrix_t master){
508 mtx_matrix_t mtx, *mlist;
509 int32 newcnt;
510 mtx_redirectErrors(stderr); /* call mtx_debug_redirect_freeze() to bypass */
511 if(!mtx_check_matrix(master) || ISSLAVE(master)) return NULL;
512
513 mtx = alloc_header();
514 if (ISNULL(mtx)) return mtx;
515 mtx->order = master->order;
516 mtx->capacity = master->capacity;
517 mtx->nslaves = ZERO;
518 mtx->hdr.row = calloc_nz_header(master->capacity);
519 if (ISNULL(mtx->hdr.row)) {
520 ascfree(mtx);
521 FPRINTF(g_mtxerr,"mtx_create_slave: Insufficient memory.\n");
522 return NULL;
523 }
524 mtx->hdr.col = calloc_nz_header(master->capacity);
525 if (ISNULL(mtx->hdr.col)) {
526 ascfree(mtx->hdr.row);
527 ascfree(mtx);
528 FPRINTF(g_mtxerr,"mtx_create_slave: Insufficient memory.\n");
529 return NULL;
530 }
531 mtx->perm.row.org_to_cur = master->perm.row.org_to_cur;
532 mtx->perm.row.cur_to_org = master->perm.row.cur_to_org;
533 mtx->perm.row.parity = EVEN; /* dont look at this again*/
534 mtx->perm.col.org_to_cur = master->perm.col.org_to_cur;
535 mtx->perm.col.cur_to_org = master->perm.col.cur_to_org;
536 mtx->perm.col.parity = EVEN; /* dont look at this again*/
537 mtx->perm.transpose = mtx_NONE; /* dont look at this again*/
538 mtx->slaves = NULL;
539 mtx->data = master->data;
540 mtx->master = master;
541 mtx->last_value = NULL;
542 mtx->ms = master->ms;
543 newcnt = master->nslaves + 1;
544 if (newcnt == 1) {
545 mlist = (mtx_matrix_t *)ascmalloc(sizeof(mtx_matrix_t));
546 } else {
547 mlist =
548 (mtx_matrix_t *)ascrealloc(master->slaves,newcnt*sizeof(mtx_matrix_t));
549 }
550 if (ISNULL(mlist)) {
551 ascfree(mtx->hdr.row);
552 ascfree(mtx->hdr.col);
553 ascfree(mtx);
554 FPRINTF(g_mtxerr,"mtx_create_slave: Insufficient memory.\n");
555 return NULL;
556 } else {
557 master->slaves = mlist;
558 master->slaves[master->nslaves] = mtx;
559 master->nslaves = newcnt;
560 }
561 return(mtx);
562 }
563
564 static void destroy_slave(mtx_matrix_t master, mtx_matrix_t mtx){
565 int32 sid,i;
566
567 if(!mtx_check_matrix(mtx) || !mtx_check_matrix(master)) {
568 FPRINTF(g_mtxerr,
569 "destroy_slave(mtx.c) called with corrupt slave or master\n");
570 return;
571 }
572 if (ISSLAVE(master) || !ISSLAVE(mtx)) {
573 FPRINTF(g_mtxerr,
574 "destroy_slave(mtx.c) called with mismatched slave/master pair\n");
575 return;
576 }
577 /* get slave index */
578 sid = -1;
579 for (i=0; i<master->nslaves; i++) {
580 if (master->slaves[i] == mtx) {
581 sid = i;
582 }
583 }
584 if (sid < 0) {
585 FPRINTF(g_mtxerr,
586 "destroy_slave(mtx.c) called with mismatched slave/master pair\n");
587 return;
588 }
589 /* move the rest of the slaves up the list */
590 /* old implementation - possible undefined operation on i - new code follows - JDS
591 for (i = sid; i < master->nslaves -1;) {
592 master->slaves[i] = master->slaves[++i];
593 }
594 */
595 for (i = sid; i < master->nslaves -1; ++i) {
596 master->slaves[i] = master->slaves[i+1];
597 }
598 (master->nslaves)--;
599 /* we will not realloc smaller. */
600 if (master->nslaves == 0 && NOTNULL(master->slaves)) {
601 ascfree(master->slaves);
602 master->slaves = NULL;
603 }
604 mtx_clear_region(mtx,mtx_ENTIRE_MATRIX);
605 free_nz_header(mtx->hdr.row);
606 free_nz_header(mtx->hdr.col);
607 free_header(mtx);
608 }
609
610 void mtx_destroy(mtx_matrix_t mtx){
611 int32 i;
612 if(!mtx_check_matrix(mtx)) return;
613 if (ISSLAVE(mtx)) {
614 destroy_slave(mtx->master,mtx);
615 return;
616 }
617
618 for (i=0; i<mtx->nslaves; i++) {
619 if (mtx_check_matrix(mtx->slaves[i])) {
620 free_nz_header(mtx->slaves[i]->hdr.row);
621 free_nz_header(mtx->slaves[i]->hdr.col);
622 free_header(mtx->slaves[i]);
623 mtx->slaves[i] = NULL;
624 } else {
625 FPRINTF(g_mtxerr,
626 "mtx_destroy: Corrupt slave found while destroying master.\n");
627 FPRINTF(g_mtxerr,
628 " Slave %d being abandoned.\n",i);
629 }
630 }
631 mtx->nslaves = 0;
632
633 mtx_clear_region(mtx,mtx_ENTIRE_MATRIX);
634 /* mtx_check_matrix is called again if MTX_DEBUG*/
635 /**
636 The fishnet structure is
637 destroyed, just leaving
638 the headers and maybe ms.
639 **/
640 mem_destroy_store(mtx->ms);
641
642 free_nz_header(mtx->hdr.row);
643 free_nz_header(mtx->hdr.col);
644
645 mtx_free_perm(mtx->perm.row.org_to_cur);
646 mtx_free_perm(mtx->perm.row.cur_to_org);
647 mtx_free_perm(mtx->perm.col.org_to_cur);
648 mtx_free_perm(mtx->perm.col.cur_to_org);
649
650 if( NOTNULL(mtx->data->block) ) {
651 ascfree(mtx->data->block);
652 }
653 ascfree(mtx->data);
654
655 free_header(mtx);
656 }
657
658 mtx_sparse_t *mtx_create_sparse(int32 cap){
659 mtx_sparse_t *ret;
660 ret = (mtx_sparse_t *)ascmalloc(sizeof(mtx_sparse_t));
661 if (ISNULL(ret)) {
662 FPRINTF(g_mtxerr,"ERROR: (mtx_create_sparse) Insufficient memory.\n");
663 return ret;
664 }
665 ret->data = ASC_NEW_ARRAY(real64,cap);
666 if (ISNULL(ret->data)) {
667 FPRINTF(g_mtxerr,"ERROR: (mtx_create_sparse) Insufficient memory.\n");
668 ascfree(ret);
669 return NULL;
670 }
671 ret->idata = ASC_NEW_ARRAY(int32,cap);
672 if (ISNULL(ret->idata)) {
673 FPRINTF(g_mtxerr,"ERROR: (mtx_create_sparse) Insufficient memory.\n");
674 ascfree(ret->data);
675 ascfree(ret);
676 return NULL;
677 }
678 ret->cap = cap;
679 ret->len = 0;
680 return ret;
681 }
682
683 void mtx_destroy_sparse(mtx_sparse_t *ret){
684 if (ISNULL(ret)) return;
685 if (NOTNULL(ret->idata)) ascfree(ret->idata);
686 if (NOTNULL(ret->data)) ascfree(ret->data);
687 ascfree(ret);
688 }
689
690 void mtx_destroy_blocklist(mtx_block_t *bl){
691 if (ISNULL(bl)) return;
692 if (NOTNULL(bl->block) && bl->nblocks>0) ascfree(bl->block);
693 ascfree(bl);
694 }
695
696 mtx_matrix_t mtx_duplicate_region(mtx_matrix_t mtx
697 , mtx_region_t *reg
698 , real64 drop
699 ){
700 mtx_matrix_t new;
701 int32 org_row;
702 struct element_t *elt;
703
704 if (!mtx_check_matrix(mtx) ||
705 (mtx->master != NULL && !mtx_check_matrix(mtx->master))) {
706 return NULL;
707 }
708 drop = fabs(drop);
709 if (ISSLAVE(mtx)) {
710 new = mtx_create_slave(mtx->master);
711 } else {
712 new = mtx_create_slave(mtx);
713 }
714 if (new == NULL) {
715 FPRINTF(g_mtxerr,"ERROR: (mtx_duplicate_region) Insufficient memory.\n");
716 return NULL;
717 }
718 if (reg==mtx_ENTIRE_MATRIX){
719 if (drop >0.0) { /* copy with drop tolerance */
720 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
721 for( elt = mtx->hdr.row[org_row] ;
722 NOTNULL(elt) ; elt = elt->next.col ) {
723 if( elt->row != mtx_NONE && fabs(elt->value) > drop) {
724 mtx_create_element_value(new,elt->row,elt->col,elt->value);
725 }
726 }
727 }
728 } else {
729 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
730 for( elt = mtx->hdr.row[org_row] ;
731 NOTNULL(elt) ; elt = elt->next.col ) {
732 if( elt->row != mtx_NONE ) {
733 mtx_create_element_value(new,elt->row,elt->col,elt->value);
734 }
735 }
736 }
737 }
738 } else {
739 int32 *rowcur, *colcur, row,col;
740 mtx_range_t *colrng,*rowrng;
741 rowcur=mtx->perm.row.org_to_cur;
742 colcur=mtx->perm.col.org_to_cur;
743 rowrng=&(reg->row);
744 colrng=&(reg->col);
745 if (drop >0.0) { /* copy with drop tolerance */
746 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
747 row=rowcur[org_row];
748 if (in_range(rowrng,row)) {
749 for( elt = mtx->hdr.row[org_row] ;
750 NOTNULL(elt) ; elt = elt->next.col ) {
751 col=colcur[elt->col];
752 if( in_range(colrng,col) && elt->row != mtx_NONE &&
753 fabs(elt->value) > drop ) {
754 /* don't copy^bad^elements */
755 mtx_create_element_value(new,elt->row,elt->col,elt->value);
756 }
757 }
758 }
759 }
760 } else {
761 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
762 row=rowcur[org_row];
763 if (in_range(rowrng,row)) {
764 for( elt = mtx->hdr.row[org_row] ;
765 NOTNULL(elt) ; elt = elt->next.col ) {
766 col=colcur[elt->col];
767 if( in_range(colrng,col) && elt->row != mtx_NONE ) {
768 /* don't copy^bad^elements */
769 mtx_create_element_value(new,elt->row,elt->col,elt->value);
770 }
771 }
772 }
773 }
774 }
775 }
776
777 return new;
778 }
779
780 /* WARNING: this function assumes sufficient memory is available.
781 * it needs to be check for null returns in allocs and doesn't.
782 */
783 mtx_matrix_t mtx_copy_options(mtx_matrix_t mtx
784 , boolean blocks
785 , boolean incidence, mtx_region_t *reg
786 , real64 drop
787 ){
788 mtx_matrix_t copy;
789 int32 org_row;
790 struct element_t *elt;
791 int32 bnum;
792
793 if (!mtx_check_matrix(mtx)) return NULL;
794 drop = fabs(drop);
795
796 /* create same size matrix */
797 copy = alloc_header();
798 copy->order = mtx->order;
799 copy->capacity = mtx->capacity;
800 copy->master = NULL;
801 copy->nslaves = ZERO;
802 copy->slaves = NULL;
803 copy->last_value = NULL;
804
805 /* copy headers and fishnet */
806 copy->hdr.row = calloc_nz_header(copy->capacity);
807 copy->hdr.col = calloc_nz_header(copy->capacity);
808 if (incidence) {
809 struct mem_statistics stat;
810 int s_expool;
811 mem_get_stats(&stat,mtx->ms);
812 s_expool = MAX(2048,stat.elt_total/stat.str_wid);
813 copy->ms = mem_create_store(LENMAGIC,
814 WIDTHMAGIC/sizeof(struct element_t) - 1,
815 sizeof(struct element_t),
816 10,s_expool-LENMAGIC);
817 /* copy of a slave or master matrix will end up with an
818 * initial mem_store that is the size cumulative of the
819 * master and all its slaves since they share the ms.
820 */
821 if (reg==mtx_ENTIRE_MATRIX){
822 if (drop >0.0) { /* copy with drop tolerance */
823 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
824 for( elt = mtx->hdr.row[org_row] ;
825 NOTNULL(elt) ; elt = elt->next.col ) {
826 if( elt->row != mtx_NONE && fabs(elt->value) > drop) {
827 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
828 }
829 }
830 }
831 } else {
832 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
833 for( elt = mtx->hdr.row[org_row] ;
834 NOTNULL(elt) ; elt = elt->next.col ) {
835 if( elt->row != mtx_NONE ) {
836 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
837 }
838 }
839 }
840 }
841 } else {
842 int32 *rowcur, *colcur, row,col;
843 mtx_range_t *colrng,*rowrng;
844 rowcur=mtx->perm.row.org_to_cur;
845 colcur=mtx->perm.col.org_to_cur;
846 rowrng=&(reg->row);
847 colrng=&(reg->col);
848 if (drop >0.0) { /* copy with drop tolerance */
849 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
850 row=rowcur[org_row];
851 if (in_range(rowrng,row)) {
852 for( elt = mtx->hdr.row[org_row] ;
853 NOTNULL(elt) ; elt = elt->next.col ) {
854 col=colcur[elt->col];
855 if( in_range(colrng,col) && elt->row != mtx_NONE &&
856 fabs(elt->value) > drop ) {
857 /* don't copy^bad^elements */
858 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
859 }
860 }
861 }
862 }
863 } else {
864 for( org_row=0 ; org_row < mtx->order ; ++org_row ) {
865 row=rowcur[org_row];
866 if (in_range(rowrng,row)) {
867 for( elt = mtx->hdr.row[org_row] ;
868 NOTNULL(elt) ; elt = elt->next.col ) {
869 col=colcur[elt->col];
870 if( in_range(colrng,col) && elt->row != mtx_NONE ) {
871 /* don't copy^bad^elements */
872 mtx_create_element_value(copy,elt->row,elt->col,elt->value);
873 }
874 }
875 }
876 }
877 }
878 }
879 } else {
880 copy->ms = mem_create_store(LENMAGIC,
881 WIDTHMAGIC/sizeof(struct element_t) - 1,
882 sizeof(struct element_t),10,2048);
883 /* copy of a slave or master matrix will end up with an
884 initial mem_store that is the default size */
885 }
886
887 /* copy permutation information */
888 copy->perm.row.org_to_cur = mtx_alloc_perm(copy->capacity);
889 mtx_copy_perm(copy->perm.row.org_to_cur,
890 mtx->perm.row.org_to_cur,copy->capacity);
891 copy->perm.row.cur_to_org = mtx_alloc_perm(copy->capacity);
892 mtx_copy_perm(copy->perm.row.cur_to_org,
893 mtx->perm.row.cur_to_org,copy->capacity);
894 copy->perm.row.parity = mtx_row_parity(mtx);
895
896 copy->perm.col.org_to_cur = mtx_alloc_perm(copy->capacity);
897 mtx_copy_perm(copy->perm.col.org_to_cur,
898 mtx->perm.col.org_to_cur,copy->capacity);
899 copy->perm.col.cur_to_org = mtx_alloc_perm(copy->capacity);
900 mtx_copy_perm(copy->perm.col.cur_to_org,
901 mtx->perm.col.cur_to_org,copy->capacity);
902 copy->perm.col.parity = mtx_col_parity(mtx);
903
904 /* copy (or not) block data */
905 copy->data =
906 (struct structural_data_t *)ascmalloc(sizeof(struct structural_data_t));
907 if (blocks) {
908 copy->data->symbolic_rank = mtx->data->symbolic_rank;
909 copy->data->nblocks = mtx->data->nblocks;
910 copy->data->block = mtx->data->nblocks > 0 ? (mtx_region_t *)
911 ascmalloc( mtx->data->nblocks*sizeof(mtx_region_t) ) : NULL;
912 for( bnum=0; bnum < mtx->data->nblocks; bnum++ ) {
913 copy->data->block[bnum].row.low = mtx->data->block[bnum].row.low;
914 copy->data->block[bnum].row.high = mtx->data->block[bnum].row.high;
915 copy->data->block[bnum].col.low = mtx->data->block[bnum].col.low;
916 copy->data->block[bnum].col.high = mtx->data->block[bnum].col.high;
917 }
918 } else {
919 copy->data->symbolic_rank=0;
920 copy->data->nblocks=0;
921 copy->data->block =NULL;
922 }
923
924 return(copy);
925 }
926
927 int32 mtx_order( mtx_matrix_t mtx){
928 if (!mtx_check_matrix(mtx)) return -1;
929 return(mtx->order);
930 }
931
932 int32 mtx_capacity( mtx_matrix_t mtx){
933 if (!mtx_check_matrix(mtx)) return -1;
934 return(mtx->capacity);
935 }
936
937 /* this is only to be called from in mtx_set_order */
938 static void trim_incidence(mtx_matrix_t mtx, int32 order){
939 int32 ndx;
940 int32 *toorg;
941
942 last_value_matrix = mtx;
943 toorg = mtx->perm.col.cur_to_org;
944 for( ndx = order ; ndx < mtx->order ; ++ndx )
945 blast_col(mtx,toorg[ndx],mtx_ALL_ROWS);
946 for( ndx = ZERO ; ndx < order ; ++ndx )
947 clean_row(mtx,toorg[ndx]);
948
949 toorg = mtx->perm.row.cur_to_org;
950 for( ndx = order ; ndx < mtx->order ; ++ndx )
951 blast_row(mtx,toorg[ndx],mtx_ALL_COLS);
952 for( ndx = ZERO ; ndx < order ; ++ndx )
953 clean_col(mtx,toorg[ndx]);
954 }
955
956 /* this is only to be called from in mtx_set_order */
957 static void enlarge_nzheaders(mtx_matrix_t mtx, int32 order){
958 struct element_t **newhdr;
959
960 /* realloc does not initialize, so calloc is the best we can do here */
961
962 newhdr = calloc_nz_header(order);
963 copy_nz_header(newhdr,mtx->hdr.row,mtx->capacity);
964 free_nz_header(mtx->hdr.row);
965 mtx->hdr.row = newhdr;
966
967 newhdr = calloc_nz_header(order);
968 copy_nz_header(newhdr,mtx->hdr.col,mtx->capacity);
969 free_nz_header(mtx->hdr.col);
970 mtx->hdr.col = newhdr;
971 }
972
973 /**
974 This function will preserve the fact that all of the arrays are
975 "correct" out to the capacity of the arrays, not just out to the order
976 of the matrix. In other words, extensions to orders which still do
977 not exceed the capacity of the arrays are trivial.
978 */
979 void mtx_set_order( mtx_matrix_t mtx, int32 order){
980 int32 i;
981 if(!mtx_check_matrix(mtx)) return;
982 if (ISSLAVE(mtx)) {
983 mtx_set_order(mtx->master,order);
984 return;
985 }
986 /* we now have a master matrix */
987 if( order < mtx->order ) { /* Truncate */
988 trim_incidence(mtx,order); /* clean master */
989 for (i = 0; i < mtx->nslaves; i++) {
990 trim_incidence(mtx->slaves[i],order);
991 }
992 }
993 if (mtx->perm.transpose == mtx_NONE) {
994 mtx->perm.transpose = 0;
995 }
996
997 if( order > mtx->capacity ) {
998 int32 *newperm;
999 int32 ndx;
1000
1001 enlarge_nzheaders(mtx,order);
1002 for (i = 0; i < mtx->nslaves; i++) {
1003 enlarge_nzheaders(mtx->slaves[i],order);
1004 }
1005
1006 /* realloc not in order here. Happens only on the master. */
1007 newperm = mtx_alloc_perm(order);
1008 mtx_copy_perm(newperm,mtx->perm.row.org_to_cur,mtx->capacity);
1009 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1010 newperm[ndx] = ndx;
1011 mtx_free_perm(mtx->perm.row.org_to_cur);
1012 mtx->perm.row.org_to_cur = newperm;
1013
1014 newperm = mtx_alloc_perm(order);
1015 mtx_copy_perm(newperm,mtx->perm.row.cur_to_org,mtx->capacity);
1016 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1017 newperm[ndx] = ndx;
1018 mtx_free_perm(mtx->perm.row.cur_to_org);
1019 mtx->perm.row.cur_to_org = newperm;
1020
1021 newperm = mtx_alloc_perm(order);
1022 mtx_copy_perm(newperm,mtx->perm.col.org_to_cur,mtx->capacity);
1023 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1024 newperm[ndx] = ndx;
1025 mtx_free_perm(mtx->perm.col.org_to_cur);
1026 mtx->perm.col.org_to_cur = newperm;
1027
1028 newperm = mtx_alloc_perm(order);
1029 mtx_copy_perm(newperm,mtx->perm.col.cur_to_org,mtx->capacity);
1030 for( ndx=mtx->capacity ; ndx < order ; ++ndx )
1031 newperm[ndx] = ndx;
1032 mtx_free_perm(mtx->perm.col.cur_to_org);
1033 mtx->perm.col.cur_to_org = newperm;
1034
1035 mtx->capacity = order;
1036 for (i = 0; i < mtx->nslaves; i++) {
1037 mtx->slaves[i]->capacity = order;
1038 /* point slaves at master perm again in case anything has changed */
1039 mtx->slaves[i]->perm.row.org_to_cur = mtx->perm.row.org_to_cur;
1040 mtx->slaves[i]->perm.row.cur_to_org = mtx->perm.row.cur_to_org;
1041 mtx->slaves[i]->perm.col.org_to_cur = mtx->perm.col.org_to_cur;
1042 mtx->slaves[i]->perm.col.cur_to_org = mtx->perm.col.cur_to_org;
1043 }
1044 }
1045 mtx->order = order;
1046 for (i = 0; i < mtx->nslaves; i++) {
1047 mtx->slaves[i]->order = order;
1048 }
1049 }
1050
1051 void mtx_clear_coord(mtx_matrix_t mtx, int32 row, int32 col){
1052 struct element_t **rlink, **clink;
1053 int32 org_row, org_col;
1054
1055 #if MTX_DEBUG
1056 if( !mtx_check_matrix(mtx) ) return; /*ben*/
1057 #endif
1058
1059 last_value_matrix = mtx;
1060 org_row = mtx->perm.row.cur_to_org[row];
1061 org_col = mtx->perm.col.cur_to_org[col];
1062 rlink = &(mtx->hdr.row[org_row]);
1063 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1064 rlink = &((*rlink)->next.col);
1065 if( ISNULL(*rlink) ) return;
1066 clink = &(mtx->hdr.col[org_col]);
1067 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1068 clink = &((*clink)->next.row);
1069 delete_from_row(rlink);
1070 delete_from_col(clink);
1071 }
1072
1073 void mtx_clear_row( mtx_matrix_t mtx, int32 row, mtx_range_t *rng){
1074 struct element_t **rlink, **clink;
1075 int32 org_row;
1076
1077 #if MTX_DEBUG
1078 if( !mtx_check_matrix(mtx) ) return; /*ben*/
1079 #endif
1080
1081 last_value_matrix = mtx;
1082 org_row = mtx->perm.row.cur_to_org[row];
1083 rlink = &(mtx->hdr.row[org_row]);
1084 if( rng == mtx_ALL_COLS ) {
1085 while( NOTNULL(*rlink) ) {
1086 clink = &(mtx->hdr.col[(*rlink)->col]);
1087 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1088 clink = &((*clink)->next.row);
1089 delete_from_row(rlink);
1090 delete_from_col(clink);
1091 }
1092 } else if( rng->high >= rng->low ) {
1093 while( NOTNULL(*rlink) ) {
1094 if( in_range(rng,mtx->perm.col.org_to_cur[(*rlink)->col]) ) {
1095 clink = &(mtx->hdr.col[(*rlink)->col]);
1096 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1097 clink = &((*clink)->next.row);
1098 delete_from_row(rlink);
1099 delete_from_col(clink);
1100 } else
1101 rlink = &((*rlink)->next.col);
1102 }
1103 }
1104 }
1105
1106 void mtx_clear_col( mtx_matrix_t mtx, int32 col, mtx_range_t *rng){
1107 struct element_t **clink, **rlink;
1108 int32 org_col;
1109
1110 #if MTX_DEBUG
1111 if( !mtx_check_matrix(mtx) ) return;
1112 #endif
1113
1114 last_value_matrix = mtx;
1115 org_col = mtx->perm.col.cur_to_org[col];
1116 clink = &(mtx->hdr.col[org_col]);
1117 if( rng == mtx_ALL_ROWS ) {
1118 while( NOTNULL(*clink) ) {
1119 rlink = &(mtx->hdr.row[(*clink)->row]);
1120 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1121 rlink = &((*rlink)->next.col);
1122 delete_from_col(clink);
1123 delete_from_row(rlink);
1124 }
1125 } else if( rng->high >= rng->low ) {
1126 while( NOTNULL(*clink) ) {
1127 if( in_range(rng,mtx->perm.row.org_to_cur[(*clink)->row]) ) {
1128 rlink = &(mtx->hdr.row[(*clink)->row]);
1129 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1130 rlink = &((*rlink)->next.col);
1131 delete_from_col(clink);
1132 delete_from_row(rlink);
1133 } else
1134 clink = &((*clink)->next.row);
1135 }
1136 }
1137 }
1138
1139 void mtx_clear_region(mtx_matrix_t mtx, mtx_region_t *region){
1140 #if MTX_DEBUG
1141 if(!mtx_check_matrix(mtx)) return;
1142 #endif
1143
1144 if( region == mtx_ENTIRE_MATRIX && !ISSLAVE(mtx)) {
1145 /* damn the torpedos, wipe that sucker and slaves fast */
1146 nuke_fishnet(mtx);
1147 } else {
1148 int32 ndx;
1149 int32 *toorg;
1150 mtx_region_t reg;
1151
1152 if (region == mtx_ENTIRE_MATRIX) {
1153 reg.row.low = reg.col.low = 0;
1154 reg.row.high = reg.col.high = mtx->order-1;
1155 } else {
1156 reg = *region;
1157 }
1158 last_value_matrix = mtx;
1159 toorg = mtx->perm.row.cur_to_org;
1160 for( ndx = reg.row.low ; ndx <= reg.row.high ; ++ndx )
1161 blast_row(mtx,toorg[ndx],&(reg.col));
1162 toorg = mtx->perm.col.cur_to_org;
1163 for( ndx = reg.col.low ; ndx <= reg.col.high ; ++ndx )
1164 clean_col(mtx,toorg[ndx]);
1165 }
1166 }
1167
1168
1169 void mtx_reset_perm(mtx_matrix_t mtx){
1170 int32 ndx;
1171
1172 #if MTX_DEBUG
1173 if(!mtx_check_matrix(mtx)) return;
1174 #endif
1175 if (ISSLAVE(mtx)) {
1176 mtx_reset_perm(mtx->master);
1177 return;
1178 }
1179
1180 for( ndx=ZERO ; ndx < mtx->capacity ; ++ndx ) {
1181 mtx->perm.row.org_to_cur[ndx] = ndx;
1182 mtx->perm.row.cur_to_org[ndx] = ndx;
1183 mtx->perm.col.org_to_cur[ndx] = ndx;
1184 mtx->perm.col.cur_to_org[ndx] = ndx;
1185 }
1186 mtx->perm.row.parity = mtx->perm.col.parity = EVEN;
1187
1188 mtx->data->symbolic_rank = -1;
1189 mtx->data->nblocks = -1;
1190 if( NOTNULL(mtx->data->block) ) {
1191 ascfree(mtx->data->block);
1192 mtx->data->block = NULL;
1193 }
1194 }
1195
1196 void mtx_clear(mtx_matrix_t mtx){
1197 if (ISSLAVE(mtx)) {
1198 mtx_clear_region(mtx->master,mtx_ENTIRE_MATRIX);
1199 }
1200 mtx_clear_region(mtx,mtx_ENTIRE_MATRIX);
1201 mtx_reset_perm(mtx);
1202 }
1203
1204 real64 mtx_value(mtx_matrix_t mtx, mtx_coord_t *coord){
1205 struct element_t *elt;
1206 register int32 org_row,org_col;
1207
1208 #if MTX_DEBUG
1209 if(!mtx_check_matrix(mtx)) return D_ZERO;
1210 #endif
1211 org_row = mtx->perm.row.cur_to_org[coord->row];
1212 org_col = mtx->perm.col.cur_to_org[coord->col];
1213 elt = mtx_find_element(mtx,org_row,org_col);
1214 mtx->last_value = elt;
1215 return( ISNULL(elt) ? D_ZERO : elt->value );
1216 }
1217
1218 void mtx_set_value( mtx_matrix_t mtx, mtx_coord_t *coord, real64 value){
1219 register int32 org_row,org_col;
1220
1221 #if MTX_DEBUG
1222 if(!mtx_check_matrix(mtx)) return;
1223 #endif
1224 org_row = mtx->perm.row.cur_to_org[coord->row];
1225 org_col = mtx->perm.col.cur_to_org[coord->col];
1226 if ( NOTNULL(mtx->last_value) &&
1227 mtx->last_value->row==org_row &&
1228 mtx->last_value->col==org_col ) {
1229 mtx->last_value->value = value;
1230 } else {
1231 struct element_t *elt;
1232 if( ISNULL(elt = mtx_find_element(mtx,org_row,org_col)) ) {
1233 if (value != D_ZERO ) {
1234 elt = mtx_create_element_value(mtx,org_row,org_col,value);
1235 }
1236 } else {
1237 elt->value = value;
1238 }
1239 }
1240 }
1241
1242 void mtx_fill_value(mtx_matrix_t mtx, mtx_coord_t *coord, real64 value){
1243 register int32 org_row,org_col;
1244
1245 #if MTX_DEBUG
1246 if(!mtx_check_matrix(mtx)) return;
1247 #endif
1248 org_row = mtx->perm.row.cur_to_org[coord->row];
1249 org_col = mtx->perm.col.cur_to_org[coord->col];
1250 mtx_create_element_value(mtx,org_row,org_col,value);
1251 }
1252
1253 void mtx_fill_org_value(mtx_matrix_t mtx
1254 , const mtx_coord_t *coord
1255 , real64 value
1256 ){
1257 #if MTX_DEBUG
1258 if(!mtx_check_matrix(mtx)) return;
1259 #endif
1260 mtx_create_element_value(mtx,coord->row,coord->col,value);
1261 }
1262
1263 /* sparse matrix assembly of potentially duplicate fills */
1264 /* takes a matrix, assumed to have redundant and otherwise insane incidences
1265 created by 'misusing mtx_fill_value' and sums all like entries, eliminating
1266 the duplicates and the zeroes. Returns -# of elements removed, or 1 if bad.
1267 returns 1 if fails for some reason, 0 otherwise.
1268 Could stand to have the error messages it emits improved.
1269 Could stand to take a rowrange or a rowlist, a colrange or a collist,droptol.
1270 algorithm: O(3tau)
1271 establish a vector mv of columns needing cleaning markers
1272 set lowmark=-1, highmark= -2
1273 for rows
1274 if row empty continue
1275 establish a vector ev of elt pointers set null
1276 prevlink = &header 1st elt pointer
1277 for each elt in row
1278 if elt is not marked in ev
1279 mark ev with elt, update prevlink
1280 else
1281 add value to elt pointed at by ev
1282 mark col in mv as needing cleaning, updating lowmark,highmark
1283 delete elt
1284 endif
1285 endfor
1286 for each elt in row
1287 unmark ev with elt
1288 if elt is 0, delete and mark col in mv.
1289 endfor
1290 endfor
1291 for i = lowmark to highmark
1292 clean col
1293 endfor
1294 My god, the tricks we play on a linked list.
1295 */
1296 int32 mtx_assemble(mtx_matrix_t mtx){
1297 char *real_mv=NULL, *mv;
1298 struct element_t **real_ev=NULL, **ev, **prevlink, *elt;
1299 int32 orgrow, orgcol, lowmark,highmark,dup;
1300 /* note elt and prevlink could be combined, but the code is
1301 unreadable utterly if you do so. */
1302
1303 if (ISNULL(mtx)) return 1;
1304 if (mtx->order <1) return 0;
1305 real_mv = mtx_null_mark(mtx->order+1);
1306 mv = real_mv+1;
1307 if (ISNULL(real_mv)) return 1;
1308 real_ev = mtx_null_vector(mtx->order+1);
1309 ev = real_ev+1;
1310 if (ISNULL(real_ev)) {
1311 mtx_null_mark_release();
1312 return 1;
1313 }
1314 /* we have allocated arrays which include a -1 element to buy
1315 ourselves an awful convenient lot of safety. */
1316 lowmark=-1;
1317 highmark=-2;
1318 dup = 0;
1319 last_value_matrix = mtx;
1320 for (orgrow=0; orgrow < mtx->order; orgrow++) {
1321 elt = mtx->hdr.row[orgrow];
1322 prevlink = &(mtx->hdr.row[orgrow]);
1323 while (NOTNULL(elt)) {
1324 if (ISNULL(ev[elt->col])) {
1325 ev[elt->col] = elt; /* mark first elt found for this col */
1326 prevlink= &(elt->next.col); /* collect pointer to where we go next */
1327 elt = elt->next.col; /* go there */
1328 } else {
1329 /* elt is duplicate and must die */
1330 dup++;
1331 ev[elt->col]->value += elt->value; /* accumulate value */
1332 /* update lowmark, highmark . this is a debatable implementation. */
1333 /* for small mods on large matrices, this is sane */
1334 if (lowmark > -1) { /* not first mark */
1335 if (elt->col < lowmark && elt->col > -1) {
1336 lowmark = elt->col;
1337 }
1338 if (elt->col > highmark && elt->col < mtx->order) {
1339 highmark = elt->col;
1340 }
1341 } else { /* very first mark */
1342 if (elt->col > -1 && elt->col < mtx->order) {
1343 lowmark = highmark = elt->col;
1344 }
1345 }
1346 mv[elt->col] = 1; /* mark column as to be cleaned */
1347 delete_from_row(prevlink);
1348 elt = *prevlink;
1349 }
1350 }
1351 elt = mtx->hdr.row[orgrow];
1352 prevlink = &(mtx->hdr.row[orgrow]);
1353 while (NOTNULL(elt)) { /* renull the accumulator and trash 0s */
1354 ev[elt->col] = NULL; /* regardless, reset accum */
1355 if (elt->value != D_ZERO) {
1356 prevlink= &(elt->next.col); /* collect pointer to where we go next */
1357 elt = elt->next.col; /* go there */
1358 } else {
1359 /* this is still a debatable implementation. */
1360 if (lowmark > -1) { /* not first mark */
1361 if (elt->col < lowmark && elt->col > -1) {
1362 lowmark = elt->col;
1363 }
1364 if (elt->col > highmark && elt->col < mtx->order) {
1365 highmark = elt->col;
1366 }
1367 } else { /* very first mark */
1368 if (elt->col > -1 && elt->col < mtx->order) {
1369 lowmark = highmark = elt->col;
1370 }
1371 }
1372 mv[elt->col] = 1; /* mark column as to be cleaned */
1373 delete_from_row(prevlink);
1374 elt = *prevlink;
1375 }
1376 }
1377 }
1378 for (orgcol = lowmark; orgcol <= highmark; orgcol++) {
1379 if (mv[orgcol]) {
1380 clean_col(mtx,orgcol); /* scrap dups and 0s */
1381 }
1382 }
1383 mtx_null_mark_release();
1384 mtx_null_vector_release();
1385 return -dup;
1386 }
1387
1388 void mtx_del_zr_in_row(mtx_matrix_t mtx, int32 row){
1389 register int32 org;
1390 struct element_t **rlink, **clink;
1391
1392 #if MTX_DEBUG
1393 if(!mtx_check_matrix(mtx)) return;
1394 #endif
1395 org = mtx->perm.row.cur_to_org[row];
1396 rlink = &(mtx->hdr.row[org]);
1397
1398 last_value_matrix = mtx;
1399 while( NOTNULL(*rlink) )
1400 if( (*rlink)->value == D_ZERO ) {
1401 clink = &(mtx->hdr.col[(*rlink)->col]);
1402 for( ; NOTNULL(*clink) && (*clink)->row != org ; )
1403 clink = &((*clink)->next.row);
1404 delete_from_row(rlink);
1405 delete_from_col(clink);
1406 } else
1407 rlink = &((*rlink)->next.col);
1408 }
1409
1410 void mtx_del_zr_in_col(mtx_matrix_t mtx, int32 col){
1411 register int32 org;
1412 struct element_t **clink, **rlink;
1413
1414 #if MTX_DEBUG
1415 if(!mtx_check_matrix(mtx)) return;
1416 #endif
1417 org = mtx->perm.col.cur_to_org[col];
1418 clink = &(mtx->hdr.col[org]);
1419
1420 last_value_matrix = mtx;
1421 while( NOTNULL(*clink) )
1422 if( (*clink)->value == D_ZERO ) {
1423 rlink = &(mtx->hdr.row[(*clink)->row]);
1424 for( ; NOTNULL(*rlink) && (*rlink)->col != org ; )
1425 rlink = &((*rlink)->next.col);
1426 delete_from_col(clink);
1427 delete_from_row(rlink);
1428 } else
1429 clink = &((*clink)->next.row);
1430 }
1431
1432 void mtx_del_zr_in_rowrange(mtx_matrix_t mtx, mtx_range_t *rng){
1433 register int32 org,row,rowhi, *toorg;
1434 struct element_t **rlink, **clink;
1435
1436 #if MTX_DEBUG
1437 if(!mtx_check_matrix(mtx)) return;
1438 #endif
1439 rowhi=rng->high;
1440 toorg= mtx->perm.row.cur_to_org;
1441 last_value_matrix = mtx;
1442 for (row=rng->low; row <=rowhi; row++) {
1443 org = toorg[row];
1444 rlink = &(mtx->hdr.row[org]);
1445
1446 while( NOTNULL(*rlink) ) {
1447 if( (*rlink)->value == D_ZERO ) {
1448 clink = &(mtx->hdr.col[(*rlink)->col]);
1449 for( ; NOTNULL(*clink) && (*clink)->row != org ; )
1450 clink = &((*clink)->next.row);
1451 delete_from_row(rlink);
1452 delete_from_col(clink);
1453 } else {
1454 rlink = &((*rlink)->next.col);
1455 }
1456 }
1457
1458 }
1459 }
1460
1461 void mtx_del_zr_in_colrange(mtx_matrix_t mtx, mtx_range_t *rng){
1462 register int32 org,col,colhi, *toorg;
1463 struct element_t **clink, **rlink;
1464
1465 #if MTX_DEBUG
1466 if(!mtx_check_matrix(mtx)) return;
1467 #endif
1468 colhi=rng->high;
1469 toorg= mtx->perm.col.cur_to_org;
1470 last_value_matrix = mtx;
1471 for (col=rng->low; col <=colhi; col++) {
1472 org = toorg[col];
1473 clink = &(mtx->hdr.col[org]);
1474
1475 while( NOTNULL(*clink) ) {
1476 if( (*clink)->value == D_ZERO ) {
1477 rlink = &(mtx->hdr.row[(*clink)->row]);
1478 for( ; NOTNULL(*rlink) && (*rlink)->col != org ; )
1479 rlink = &((*rlink)->next.col);
1480 delete_from_col(clink);
1481 delete_from_row(rlink);
1482 } else {
1483 clink = &((*clink)->next.row);
1484 }
1485 }
1486
1487 }
1488 }
1489
1490 void mtx_steal_org_row_vec(mtx_matrix_t mtx, int32 row
1491 , real64 *vec, mtx_range_t *rng
1492 ){
1493 struct element_t **rlink, **clink;
1494 int32 org_row;
1495
1496 #if MTX_DEBUG
1497 if( !mtx_check_matrix(mtx) ) return;
1498 #endif
1499
1500 last_value_matrix = mtx;
1501 org_row = mtx->perm.row.cur_to_org[row];
1502 rlink = &(mtx->hdr.row[org_row]);
1503 if( rng == mtx_ALL_COLS ) {
1504 while( NOTNULL(*rlink) ) {
1505 vec[(*rlink)->col] = (*rlink)->value;
1506 clink = &(mtx->hdr.col[(*rlink)->col]);
1507 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1508 clink = &((*clink)->next.row);
1509 delete_from_row(rlink);
1510 delete_from_col(clink);
1511 }
1512 } else if( rng->high >= rng->low ) {
1513 int32 *tocur;
1514 tocur = mtx->perm.col.org_to_cur;
1515 while( NOTNULL(*rlink) ) {
1516 if( in_range(rng,tocur[(*rlink)->col]) ) {
1517 vec[(*rlink)->col] = (*rlink)->value;
1518 clink = &(mtx->hdr.col[(*rlink)->col]);
1519 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1520 clink = &((*clink)->next.row);
1521 delete_from_row(rlink);
1522 delete_from_col(clink);
1523 } else {
1524 rlink = &((*rlink)->next.col);
1525 }
1526 }
1527 }
1528 }
1529
1530 void mtx_steal_org_col_vec(mtx_matrix_t mtx, int32 col
1531 , real64 *vec, mtx_range_t *rng
1532 ){
1533 struct element_t **clink, **rlink;
1534 int32 org_col;
1535
1536 #if MTX_DEBUG
1537 if( !mtx_check_matrix(mtx) ) return;
1538 #endif
1539
1540 last_value_matrix = mtx;
1541 org_col = mtx->perm.col.cur_to_org[col];
1542 clink = &(mtx->hdr.col[org_col]);
1543 if( rng == mtx_ALL_ROWS ) {
1544 while( NOTNULL(*clink) ) {
1545 vec[(*clink)->row] = (*clink)->value;
1546 rlink = &(mtx->hdr.row[(*clink)->row]);
1547 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1548 rlink = &((*rlink)->next.col);
1549 delete_from_col(clink);
1550 delete_from_row(rlink);
1551 }
1552 } else if( rng->high >= rng->low ) {
1553 int32 *tocur;
1554 tocur = mtx->perm.row.org_to_cur;
1555 while( NOTNULL(*clink) ) {
1556 if( in_range(rng,tocur[(*clink)->row]) ) {
1557 vec[(*clink)->row] = (*clink)->value;
1558 rlink = &(mtx->hdr.row[(*clink)->row]);
1559 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1560 rlink = &((*rlink)->next.col);
1561 delete_from_col(clink);
1562 delete_from_row(rlink);
1563 } else {
1564 clink = &((*clink)->next.row);
1565 }
1566 }
1567 }
1568 }
1569
1570 void mtx_steal_cur_row_vec(mtx_matrix_t mtx, int32 row
1571 , real64 *vec, mtx_range_t *rng
1572 ){
1573 struct element_t **rlink, **clink;
1574 int32 org_row, *tocur;
1575
1576 #if MTX_DEBUG
1577 if( !mtx_check_matrix(mtx) ) return;
1578 #endif
1579
1580 tocur = mtx->perm.col.org_to_cur;
1581 last_value_matrix = mtx;
1582 org_row = mtx->perm.row.cur_to_org[row];
1583 rlink = &(mtx->hdr.row[org_row]);
1584 if( rng == mtx_ALL_COLS ) {
1585 while( NOTNULL(*rlink) ) {
1586 vec[tocur[(*rlink)->col]] = (*rlink)->value;
1587 clink = &(mtx->hdr.col[(*rlink)->col]);
1588 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1589 clink = &((*clink)->next.row);
1590 delete_from_row(rlink);
1591 delete_from_col(clink);
1592 }
1593 } else if( rng->high >= rng->low ) {
1594 while( NOTNULL(*rlink) ) {
1595 if( in_range(rng,tocur[(*rlink)->col]) ) {
1596 vec[tocur[(*rlink)->col]] = (*rlink)->value;
1597 clink = &(mtx->hdr.col[(*rlink)->col]);
1598 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1599 clink = &((*clink)->next.row);
1600 delete_from_row(rlink);
1601 delete_from_col(clink);
1602 } else {
1603 rlink = &((*rlink)->next.col);
1604 }
1605 }
1606 }
1607 }
1608
1609 void mtx_steal_cur_col_vec(mtx_matrix_t mtx, int32 col
1610 , real64 *vec, mtx_range_t *rng
1611 ){
1612 struct element_t **clink, **rlink;
1613 int32 org_col, *tocur;
1614
1615 #if MTX_DEBUG
1616 if( !mtx_check_matrix(mtx) ) return;
1617 #endif
1618
1619 tocur = mtx->perm.row.org_to_cur;
1620 last_value_matrix = mtx;
1621 org_col = mtx->perm.col.cur_to_org[col];
1622 clink = &(mtx->hdr.col[org_col]);
1623 if( rng == mtx_ALL_ROWS ) {
1624 while( NOTNULL(*clink) ) {
1625 vec[tocur[(*clink)->row]] = (*clink)->value;
1626 rlink = &(mtx->hdr.row[(*clink)->row]);
1627 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1628 rlink = &((*rlink)->next.col);
1629 delete_from_col(clink);
1630 delete_from_row(rlink);
1631 }
1632 } else if( rng->high >= rng->low ) {
1633 while( NOTNULL(*clink) ) {
1634 if( in_range(rng,tocur[(*clink)->row]) ) {
1635 vec[tocur[(*clink)->row]] = (*clink)->value;
1636 rlink = &(mtx->hdr.row[(*clink)->row]);
1637 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1638 rlink = &((*rlink)->next.col);
1639 delete_from_col(clink);
1640 delete_from_row(rlink);
1641 } else {
1642 clink = &((*clink)->next.row);
1643 }
1644 }
1645 }
1646 }
1647
1648 #ifdef DELETE_THIS_UNUSED_FUNCTION
1649 /* Little function to enlarge the capacity of a sparse to the len given.
1650 * New memory allocated, if any, is not initialized in any way.
1651 * The pointer given is not itself changed, only its data.
1652 * This function in no way shrinks the data in a sparse.
1653 * (exception: buggy realloc implementations may shrink it, ack!)
1654 * Data/idata values up to the old capacity (ret->cap) are preserved.
1655 */
1656 static int enlarge_sparse(mtx_sparse_t *ret, int32 len){
1657 int32 *inew;
1658 real64 *dnew;
1659 if (ISNULL(ret)) {
1660 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with NULL.\n");
1661 return 1;
1662 }
1663 if (len <= ret->cap || len <1) return 0; /* already big enough */
1664
1665 if (ret->idata == NULL) {
1666 inew = ASC_NEW_ARRAY(int32,len);
1667 } else {
1668 inew = (int32 *)ascrealloc(ret->idata,sizeof(int32)*len);
1669 }
1670 if (ISNULL(inew)) {
1671
1672 ERROR_REPORTER_HERE(ASC_PROG_ERR," Insufficient memory.\n");
1673 return 1;
1674 }
1675 ret->idata = inew; /* dnew can still fail without losing inew memory. */
1676
1677 if (ret->data == NULL) {
1678 dnew = ASC_NEW_ARRAY(real64,len);
1679 } else {
1680 dnew = (real64 *)ascrealloc(ret->data,sizeof(real64)*len);
1681 }
1682 if (ISNULL(dnew)) {
1683
1684 ERROR_REPORTER_HERE(ASC_PROG_ERR," Insufficient memory.\n");
1685 return 1;
1686 }
1687 ret->data = dnew; /* we succeeded */
1688 ret->cap = len;
1689 return 0;
1690 }
1691 #endif /* DELETE_THIS_UNUSED_FUNCTION */
1692
1693
1694 /* going to try to make steal also handle sparse creation ...*/
1695 /* don't you dare, whoever you are! */
1696 boolean mtx_steal_org_row_sparse(mtx_matrix_t mtx, int32 row
1697 , mtx_sparse_t *sp, mtx_range_t *rng
1698 ){
1699 struct element_t **rlink, **clink;
1700 int32 org_row;
1701 int32 len,k;
1702
1703 #if MTX_DEBUG
1704 if( !mtx_check_matrix(mtx) ) return TRUE;
1705 #endif
1706 if (sp == mtx_CREATE_SPARSE) {
1707 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with mtx_CREATE_SPARSE. Not supported.\n");
1708 return TRUE;
1709 }
1710 if (rng == mtx_ALL_COLS) {
1711 len = mtx->order;
1712 } else {
1713 len = rng->high - rng->low +1;
1714 }
1715 if (sp->cap < len) {
1716 sp->len = 0;
1717 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with sparse of insufficient capacity.\n");
1718 return TRUE;
1719 }
1720
1721 last_value_matrix = mtx;
1722 org_row = mtx->perm.row.cur_to_org[row];
1723 rlink = &(mtx->hdr.row[org_row]);
1724 k = 0;
1725
1726 if( rng == mtx_ALL_COLS ) {
1727 while( NOTNULL(*rlink) ) {
1728 sp->idata[k] = (*rlink)->col;
1729 sp->data[k] = (*rlink)->value;
1730 k++;
1731 clink = &(mtx->hdr.col[(*rlink)->col]);
1732 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1733 clink = &((*clink)->next.row);
1734 delete_from_row(rlink);
1735 delete_from_col(clink);
1736 }
1737 } else if( rng->high >= rng->low ) {
1738 int32 *tocur;
1739 tocur = mtx->perm.col.org_to_cur;
1740 while( NOTNULL(*rlink) ) {
1741 if( in_range(rng,tocur[(*rlink)->col]) ) {
1742 sp->idata[k] = (*rlink)->col;
1743 sp->data[k] = (*rlink)->value;
1744 k++;
1745 clink = &(mtx->hdr.col[(*rlink)->col]);
1746 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1747 clink = &((*clink)->next.row);
1748 delete_from_row(rlink);
1749 delete_from_col(clink);
1750 } else {
1751 rlink = &((*rlink)->next.col);
1752 }
1753 }
1754 }
1755 sp->len = k;
1756 return FALSE;
1757 }
1758
1759 boolean mtx_steal_org_col_sparse(mtx_matrix_t mtx, int32 col
1760 , mtx_sparse_t *sp, mtx_range_t *rng
1761 ){
1762 struct element_t **clink, **rlink;
1763 int32 org_col;
1764 int32 len,k;
1765
1766 #if MTX_DEBUG
1767 if( !mtx_check_matrix(mtx) ) return TRUE;
1768 #endif
1769 if (sp == mtx_CREATE_SPARSE) {
1770 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with mtx_CREATE_SPARSE. Not supported.\n");
1771 return TRUE;
1772 }
1773 if (rng == mtx_ALL_ROWS) {
1774 len = mtx->order;
1775 } else {
1776 len = rng->high - rng->low +1;
1777 }
1778 if (sp->cap < len) {
1779 sp->len = 0;
1780 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with sparse of insufficient capacity.\n");
1781 return TRUE;
1782 }
1783
1784 last_value_matrix = mtx;
1785 org_col = mtx->perm.col.cur_to_org[col];
1786 clink = &(mtx->hdr.col[org_col]);
1787 k = 0;
1788
1789 if( rng == mtx_ALL_ROWS ) {
1790 while( NOTNULL(*clink) ) {
1791 sp->idata[k] = (*clink)->row;
1792 sp->data[k] = (*clink)->value;
1793 k++;
1794 rlink = &(mtx->hdr.row[(*clink)->row]);
1795 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1796 rlink = &((*rlink)->next.col);
1797 delete_from_col(clink);
1798 delete_from_row(rlink);
1799 }
1800 } else if( rng->high >= rng->low ) {
1801 int32 *tocur;
1802 tocur = mtx->perm.row.org_to_cur;
1803 while( NOTNULL(*clink) ) {
1804 if( in_range(rng,tocur[(*clink)->row]) ) {
1805 sp->idata[k] = (*clink)->row;
1806 sp->data[k] = (*clink)->value;
1807 k++;
1808 rlink = &(mtx->hdr.row[(*clink)->row]);
1809 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1810 rlink = &((*rlink)->next.col);
1811 delete_from_col(clink);
1812 delete_from_row(rlink);
1813 } else {
1814 clink = &((*clink)->next.row);
1815 }
1816 }
1817 }
1818 sp->len = k;
1819 return FALSE;
1820 }
1821
1822 boolean mtx_steal_cur_row_sparse(mtx_matrix_t mtx, int32 row
1823 , mtx_sparse_t *sp, mtx_range_t *rng
1824 ){
1825 struct element_t **rlink, **clink;
1826 int32 org_row, *tocur;
1827 int32 len,k;
1828
1829 #if MTX_DEBUG
1830 if( !mtx_check_matrix(mtx) ) return TRUE;
1831 #endif
1832 if (sp == mtx_CREATE_SPARSE) {
1833 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with mtx_CREATE_SPARSE. Not supported.\n");
1834 return TRUE;
1835 }
1836 if (rng == mtx_ALL_COLS) {
1837 len = mtx->order;
1838 } else {
1839 len = rng->high - rng->low +1;
1840 }
1841 if (sp->cap < len) {
1842 sp->len = 0;
1843 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with sparse of insufficient capacity.\n");
1844 return TRUE;
1845 }
1846
1847 tocur = mtx->perm.col.org_to_cur;
1848 last_value_matrix = mtx;
1849 org_row = mtx->perm.row.cur_to_org[row];
1850 rlink = &(mtx->hdr.row[org_row]);
1851 k = 0;
1852
1853 if( rng == mtx_ALL_COLS ) {
1854 while( NOTNULL(*rlink) ) {
1855 sp->idata[k] = tocur[(*rlink)->col];
1856 sp->data[k] = (*rlink)->value;
1857 k++;
1858 clink = &(mtx->hdr.col[(*rlink)->col]);
1859 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1860 clink = &((*clink)->next.row);
1861 delete_from_row(rlink);
1862 delete_from_col(clink);
1863 }
1864 } else if( rng->high >= rng->low ) {
1865 while( NOTNULL(*rlink) ) {
1866 if( in_range(rng,tocur[(*rlink)->col]) ) {
1867 sp->idata[k] = tocur[(*rlink)->col];
1868 sp->data[k] = (*rlink)->value;
1869 k++;
1870 clink = &(mtx->hdr.col[(*rlink)->col]);
1871 for( ; NOTNULL(*clink) && (*clink)->row != org_row ; )
1872 clink = &((*clink)->next.row);
1873 delete_from_row(rlink);
1874 delete_from_col(clink);
1875 } else {
1876 rlink = &((*rlink)->next.col);
1877 }
1878 }
1879 }
1880 sp->len = k;
1881 return FALSE;
1882 }
1883
1884 boolean mtx_steal_cur_col_sparse(mtx_matrix_t mtx, int32 col
1885 , mtx_sparse_t *sp, mtx_range_t *rng
1886 ){
1887 struct element_t **clink, **rlink;
1888 int32 org_col, *tocur;
1889 int32 len,k;
1890
1891 #if MTX_DEBUG
1892 if( !mtx_check_matrix(mtx) ) return TRUE;
1893 #endif
1894 if (sp == mtx_CREATE_SPARSE) {
1895 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with mtx_CREATE_SPARSE. Not supported.\n");
1896 return TRUE;
1897 }
1898 if (rng == mtx_ALL_ROWS) {
1899 len = mtx->order;
1900 } else {
1901 len = rng->high - rng->low +1;
1902 }
1903 if (sp->cap < len) {
1904 sp->len = 0;
1905 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with sparse of insufficient capacity.\n");
1906 return TRUE;
1907 }
1908
1909 tocur = mtx->perm.row.org_to_cur;
1910 last_value_matrix = mtx;
1911 org_col = mtx->perm.col.cur_to_org[col];
1912 clink = &(mtx->hdr.col[org_col]);
1913 k = 0;
1914
1915 if( rng == mtx_ALL_ROWS ) {
1916 while( NOTNULL(*clink) ) {
1917 sp->idata[k] = tocur[(*clink)->row];
1918 sp->data[k] = (*clink)->value;
1919 k++;
1920 rlink = &(mtx->hdr.row[(*clink)->row]);
1921 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1922 rlink = &((*rlink)->next.col);
1923 delete_from_col(clink);
1924 delete_from_row(rlink);
1925 }
1926 } else if( rng->high >= rng->low ) {
1927 while( NOTNULL(*clink) ) {
1928 if( in_range(rng,tocur[(*clink)->row]) ) {
1929 sp->idata[k] = tocur[(*clink)->row];
1930 sp->data[k] = (*clink)->value;
1931 k++;
1932 rlink = &(mtx->hdr.row[(*clink)->row]);
1933 for( ; NOTNULL(*rlink) && (*rlink)->col != org_col ; )
1934 rlink = &((*rlink)->next.col);
1935 delete_from_col(clink);
1936 delete_from_row(rlink);
1937 } else {
1938 clink = &((*clink)->next.row);
1939 }
1940 }
1941 }
1942 sp->len = k;
1943 return FALSE;
1944 }
1945
1946 void mtx_fill_org_row_vec(mtx_matrix_t mtx, int32 row
1947 , real64 *vec, mtx_range_t *rng
1948 ){
1949 int32 org_row,highcol, *toorg;
1950 register int32 org_col;
1951 register real64 value;
1952
1953 #if MTX_DEBUG
1954 if(!mtx_check_matrix(mtx)) return;
1955 #endif
1956
1957 org_row = mtx->perm.row.cur_to_org[row];
1958 if( rng == mtx_ALL_COLS ) {
1959 highcol=mtx->order;
1960 for( org_col=0 ; org_col <highcol ; org_col++ ) {
1961 if ((value=vec[org_col]) != D_ZERO)
1962 mtx_create_element_value(mtx,org_row,org_col,value);
1963 }
1964 } else {
1965 register int32 cur_col;
1966
1967 toorg = mtx->perm.col.cur_to_org;
1968 highcol= rng->high;
1969 for ( cur_col=rng->low; cur_col<=highcol; cur_col++) {
1970 if ((value=vec[(org_col=toorg[cur_col])]) != D_ZERO)
1971 mtx_create_element_value(mtx,org_row,org_col,value);
1972 }
1973 }
1974 }
1975
1976 void mtx_fill_org_col_vec(mtx_matrix_t mtx
1977 , int32 col, real64 *vec, mtx_range_t *rng
1978 ){
1979 int32 org_col,highrow, *toorg;
1980 register int32 org_row;
1981 register real64 value;
1982
1983 #if MTX_DEBUG
1984 if(!mtx_check_matrix(mtx)) return;
1985 #endif
1986
1987 org_col = mtx->perm.col.cur_to_org[col];
1988 if( rng == mtx_ALL_ROWS ) {
1989 highrow=mtx->order;
1990 for( org_row=0 ; org_row <highrow ; org_row++ ) {
1991 if ((value=vec[org_row]) != D_ZERO)
1992 mtx_create_element_value(mtx,org_row,org_col,value);
1993 }
1994 } else {
1995 register int32 cur_row;
1996
1997 toorg = mtx->perm.row.cur_to_org;
1998 highrow= rng->high;
1999 for ( cur_row=rng->low; cur_row<=highrow; cur_row++) {
2000 if ((value=vec[(org_row=toorg[cur_row])]) != D_ZERO)
2001 mtx_create_element_value(mtx,org_row,org_col,value);
2002 }
2003 }
2004 }
2005
2006 void mtx_fill_cur_row_vec(mtx_matrix_t mtx
2007 , int32 row, real64 *vec, mtx_range_t *rng
2008 ){
2009 int32 org_row,highcol,lowcol, *toorg;
2010 register int32 cur_col;
2011 register real64 value;
2012
2013 #if MTX_DEBUG
2014 if(!mtx_check_matrix(mtx)) return;
2015 #endif
2016
2017 org_row = mtx->perm.row.cur_to_org[row];
2018 toorg=mtx->perm.col.cur_to_org;
2019 if( rng == mtx_ALL_COLS ) {
2020 highcol = mtx->order-1;
2021 lowcol = 0;
2022 } else {
2023 highcol= rng->high;
2024 lowcol=rng->low;
2025 }
2026 for( cur_col=lowcol ; cur_col <= highcol ; cur_col++ ) {
2027 if ((value=vec[cur_col]) != D_ZERO)
2028 mtx_create_element_value(mtx,org_row,toorg[cur_col],value);
2029 }
2030 }
2031
2032 void mtx_fill_cur_col_vec(mtx_matrix_t mtx
2033 , int32 col, real64 *vec, mtx_range_t *rng
2034 ){
2035 int32 org_col,highrow,lowrow, *toorg;
2036 register int32 cur_row;
2037 register real64 value;
2038
2039 #if MTX_DEBUG
2040 if(!mtx_check_matrix(mtx)) return;
2041 #endif
2042
2043 org_col = mtx->perm.col.cur_to_org[col];
2044 toorg=mtx->perm.row.cur_to_org;
2045 if( rng == mtx_ALL_ROWS ) {
2046 highrow=mtx->order-1;
2047 lowrow=0;
2048 } else {
2049 highrow= rng->high;
2050 lowrow=rng->low;
2051 }
2052 for( cur_row=lowrow ; cur_row <= highrow ; cur_row++ ) {
2053 if ((value=vec[cur_row]) != D_ZERO)
2054 mtx_create_element_value(mtx,toorg[cur_row],org_col,value);
2055 }
2056 }
2057
2058 void mtx_dropfill_cur_col_vec(mtx_matrix_t mtx, int32 col,
2059 real64 *vec, mtx_range_t *rng,
2060 real64 tol
2061 ){
2062 int32 org_col,highrow,lowrow, *toorg;
2063 register int32 cur_row;
2064 register real64 value;
2065
2066 if (tol==0.0) {
2067 mtx_fill_cur_col_vec(mtx,col,vec,rng);
2068 return;
2069 }
2070
2071 #if MTX_DEBUG
2072 if(!mtx_check_matrix(mtx)) return;
2073 #endif
2074
2075 org_col = mtx->perm.col.cur_to_org[col];
2076 toorg=mtx->perm.row.cur_to_org;
2077 if( rng == mtx_ALL_ROWS ) {
2078 highrow=mtx->order-1;
2079 lowrow=0;
2080 } else {
2081 highrow= rng->high;
2082 lowrow=rng->low;
2083 }
2084 for( cur_row=lowrow ; cur_row <= highrow ; cur_row++ ) {
2085 if (fabs(value=vec[cur_row]) > tol)
2086 mtx_create_element_value(mtx,toorg[cur_row],org_col,value);
2087 }
2088 }
2089
2090 void mtx_dropfill_cur_row_vec(mtx_matrix_t mtx, int32 row,
2091 real64 *vec, mtx_range_t *rng,
2092 real64 tol
2093 ){
2094 int32 org_row,highcol,lowcol, *toorg;
2095 register int32 cur_col;
2096 register real64 value;
2097
2098 if (tol==0.0) {
2099 mtx_fill_cur_row_vec(mtx,row,vec,rng);
2100 return;
2101 }
2102
2103 #if MTX_DEBUG
2104 if(!mtx_check_matrix(mtx)) return;
2105 #endif
2106
2107 org_row = mtx->perm.row.cur_to_org[row];
2108 toorg=mtx->perm.col.cur_to_org;
2109 if( rng == mtx_ALL_COLS ) {
2110 highcol = mtx->order-1;
2111 lowcol = 0;
2112 } else {
2113 highcol= rng->high;
2114 lowcol=rng->low;
2115 }
2116 for( cur_col=lowcol ; cur_col <= highcol ; cur_col++ ) {
2117 if (fabs(value=vec[cur_col]) > tol)
2118 mtx_create_element_value(mtx,org_row,toorg[cur_col],value);
2119 }
2120 }
2121
2122 void mtx_fill_org_row_sparse(mtx_matrix_t mtx, int32 row,
2123 const mtx_sparse_t *sp
2124 ){
2125 int32 orgrow,i;
2126 #if MTX_DEBUG
2127 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2128 #endif
2129 orgrow = mtx->perm.row.cur_to_org[row];
2130 for (i=0; i < sp->len; i++) {
2131 if (sp->data[i] != D_ZERO
2132 #if MTX_DEBUG
2133 && sp->idata[i] >=0 && sp->idata[i] < mtx->order
2134 #endif
2135 ) {
2136 mtx_create_element_value(mtx,orgrow,sp->idata[i],sp->data[i]);
2137 }
2138 }
2139 }
2140
2141 void mtx_fill_org_col_sparse(mtx_matrix_t mtx, int32 col,
2142 const mtx_sparse_t *sp
2143 ){
2144 int32 orgcol,i;
2145 #if MTX_DEBUG
2146 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2147 #endif
2148 orgcol = mtx->perm.col.cur_to_org[col];
2149 for (i=0; i < sp->len; i++) {
2150 if (sp->data[i] != D_ZERO
2151 #if MTX_DEBUG
2152 && sp->idata[i] >=0 && sp->idata[i] < mtx->order
2153 #endif
2154 ) {
2155 mtx_create_element_value(mtx,sp->idata[i],orgcol,sp->data[i]);
2156 }
2157 }
2158 }
2159
2160 void mtx_fill_cur_row_sparse(mtx_matrix_t mtx, int32 row,
2161 const mtx_sparse_t *sp
2162 ){
2163 int32 orgrow,i;
2164 #if MTX_DEBUG
2165 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2166 #endif
2167 orgrow = mtx->perm.row.cur_to_org[row];
2168 for (i=0; i < sp->len; i++) {
2169 if (
2170 #if MTX_DEBUG
2171 sp->idata[i] >=0 && sp->idata[i] < mtx->order &&
2172 #endif
2173 sp->data[i] != D_ZERO) {
2174 mtx_create_element_value(mtx,orgrow,
2175 mtx->perm.col.cur_to_org[sp->idata[i]],
2176 sp->data[i]);
2177 }
2178 }
2179 }
2180
2181 void mtx_fill_cur_col_sparse(mtx_matrix_t mtx, int32 col,
2182 const mtx_sparse_t *sp
2183 ){
2184 int32 orgcol,i;
2185 #if MTX_DEBUG
2186 if(!mtx_check_matrix(mtx) || !mtx_check_sparse(sp)) return;
2187 #endif
2188 orgcol = mtx->perm.col.cur_to_org[col];
2189 for (i=0; i < sp->len; i++) {
2190 if (
2191 #if MTX_DEBUG
2192 sp->idata[i] >=0 && sp->idata[i] < mtx->order &&
2193 #endif
2194 sp->data[i] != D_ZERO) {
2195 mtx_create_element_value(mtx,
2196 mtx->perm.col.cur_to_org[sp->idata[i]],
2197 orgcol,
2198 sp->data[i]);
2199 }
2200 }
2201 }
2202
2203 void mtx_mult_row(mtx_matrix_t mtx, int32 row, real64 factor, mtx_range_t *rng){
2204 struct element_t *elt;
2205 int32 *tocur;
2206
2207 if( factor == D_ZERO ) {
2208 mtx_clear_row(mtx,row,rng);
2209 return;
2210 }
2211
2212 if (factor == D_ONE) return;
2213 #if MTX_DEBUG
2214 if(!mtx_check_matrix(mtx)) return;
2215 #endif
2216
2217 tocur = mtx->perm.col.org_to_cur;
2218 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[row]];
2219 if( rng == mtx_ALL_COLS )
2220 for( ; NOTNULL(elt); elt = elt->next.col ) elt->value *= factor;
2221 else if( rng->high >= rng->low )
2222 for( ; NOTNULL(elt); elt = elt->next.col )
2223 if( in_range(rng,tocur[elt->col]) ) elt->value *= factor;
2224 }
2225
2226 void mtx_mult_col(mtx_matrix_t mtx, int32 col, real64 factor, mtx_range_t *rng){
2227 struct element_t *elt;
2228 int32 *tocur;
2229
2230 if( factor == D_ZERO ) {
2231 mtx_clear_col(mtx,col,rng);
2232 return;
2233 }
2234 if (factor == D_ONE) return;
2235
2236 #if MTX_DEBUG
2237 if(!mtx_check_matrix(mtx)) return;
2238 #endif
2239
2240 tocur = mtx->perm.row.org_to_cur;
2241 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[col]];
2242 if( rng == mtx_ALL_ROWS )
2243 for( ; NOTNULL(elt); elt = elt->next.row ) elt->value *= factor;
2244 else if( rng->high >= rng->low )
2245 for( ; NOTNULL(elt); elt = elt->next.row )
2246 if( in_range(rng,tocur[elt->row]) ) elt->value *= factor;
2247 }
2248
2249 void mtx_mult_row_zero(mtx_matrix_t mtx, int32 row, mtx_range_t *rng){
2250 struct element_t *elt;
2251 int32 *tocur;
2252
2253 #if MTX_DEBUG
2254 if(!mtx_check_matrix(mtx)) return;
2255 #endif
2256
2257 tocur = mtx->perm.col.org_to_cur;
2258 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[row]];
2259 if( rng == mtx_ALL_COLS )
2260 for( ; NOTNULL(elt); elt = elt->next.col ) elt->value = D_ZERO;
2261 else if( rng->high >= rng->low )
2262 for( ; NOTNULL(elt); elt = elt->next.col )
2263 if( in_range(rng,tocur[elt->col]) ) elt->value = D_ZERO;
2264 }
2265
2266 void mtx_mult_col_zero(mtx_matrix_t mtx, int32 col, mtx_range_t *rng){
2267 struct element_t *elt;
2268 int32 *tocur;
2269
2270 #if MTX_DEBUG
2271 if(!mtx_check_matrix(mtx)) return;
2272 #endif
2273
2274 tocur = mtx->perm.row.org_to_cur;
2275 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[col]];
2276 if( rng == mtx_ALL_ROWS )
2277 for( ; NOTNULL(elt); elt = elt->next.row ) elt->value = D_ZERO;
2278 else if( rng->high >= rng->low )
2279 for( ; NOTNULL(elt); elt = elt->next.row )
2280 if( in_range(rng,tocur[elt->row]) ) elt->value = D_ZERO;
2281 }
2282
2283 void mtx_add_row(mtx_matrix_t mtx, int32 s_cur, int32 t_cur, real64 factor,
2284 mtx_range_t *rng
2285 ){
2286 register int32 org_col;
2287 int32 t_org,*tocur;
2288 struct element_t **arr,*elt;
2289
2290 #if MTX_DEBUG
2291 if(!mtx_check_matrix(mtx)) return;
2292 #endif
2293
2294 t_org = mtx->perm.row.cur_to_org[t_cur];
2295 if( rng == mtx_ALL_COLS ) {
2296 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2297 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[s_cur]];
2298 for( ; NOTNULL(elt); elt = elt->next.col ) {
2299 if( ISNULL(arr[(org_col=elt->col)]) ) {
2300 arr[org_col] =
2301 mtx_create_element_value(mtx,t_org,org_col,(factor * elt->value));
2302 } else {
2303 arr[org_col]->value += factor * elt->value;
2304 }
2305 }
2306 mtx_renull_using_row(mtx,t_org,arr);
2307 } else if( rng->high >= rng->low ) {
2308 register int32 cur_col;
2309
2310 tocur = mtx->perm.col.org_to_cur;
2311 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2312 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[s_cur]];
2313 for( ; NOTNULL(elt); elt = elt->next.col ) {
2314 cur_col=tocur[(org_col=elt->col)];
2315 if( in_range(rng,cur_col) ) {
2316 if( NOTNULL(arr[org_col]) ) {
2317 arr[org_col]->value += factor * elt->value;
2318 } else {
2319 arr[org_col] =
2320 mtx_create_element_value(mtx,t_org,org_col,
2321 (factor * elt->value));
2322 /* hit rate usually lower */
2323 }
2324 }
2325 }
2326 mtx_renull_using_row(mtx,t_org,arr);
2327 }
2328 mtx_null_vector_release();
2329 }
2330
2331 void mtx_add_col(mtx_matrix_t mtx, int32 s_cur, int32 t_cur, real64 factor,
2332 mtx_range_t *rng
2333 ){
2334 int32 t_org,*tocur;
2335 struct element_t **arr,*elt;
2336
2337 #if MTX_DEBUG
2338 if(!mtx_check_matrix(mtx)) return;
2339 #endif
2340
2341 if( rng == mtx_ALL_ROWS ) {
2342 t_org = mtx->perm.col.cur_to_org[t_cur];
2343 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2344 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[s_cur]];
2345 for( ; NOTNULL(elt); elt = elt->next.row )
2346 if( ISNULL(arr[elt->row]) ) {
2347 arr[elt->row] =
2348 mtx_create_element_value(mtx,elt->row,t_org,
2349 (factor * elt->value));
2350 } else arr[elt->row]->value += factor * elt->value;
2351 mtx_renull_using_col(mtx,t_org,arr);
2352 } else if( rng->high >= rng->low ) {
2353 tocur = mtx->perm.row.org_to_cur;
2354 t_org = mtx->perm.col.cur_to_org[t_cur];
2355 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2356 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[s_cur]];
2357 for( ; NOTNULL(elt); elt = elt->next.row ) {
2358 if( in_range(rng,tocur[elt->row]) ) {
2359 if( NOTNULL(arr[elt->row]) ) {
2360 arr[elt->row]->value += factor * elt->value;
2361 } else {
2362 arr[elt->row] =
2363 mtx_create_element_value(mtx,elt->row,t_org,
2364 (factor * elt->value));
2365 }
2366 }
2367 }
2368 mtx_renull_using_col(mtx,t_org,arr);
2369 }
2370 mtx_null_vector_release();
2371 }
2372
2373 /**
2374 Expands the given row into an array of pointers, indexed on original
2375 col number. The array is obtained from mtx_null_row_vector().
2376 Be sure to call mtx_null_row_vector_release() when done with the vector and
2377 you have rezeroed it.
2378 */
2379 static struct element_t **mtx_expand_row_series( mtx_matrix_t mtx, int32 org){
2380 struct element_t **arr;
2381 struct element_t *elt;
2382
2383 arr = mtx_null_row_vector(mtx->order);
2384 for( elt=mtx->hdr.row[org]; NOTNULL(elt); elt=elt->next.col )
2385 arr[elt->col] = elt;
2386 return(arr);
2387 }
2388
2389 /**
2390 Expands the given col into an array of pointers, indexed on original
2391 row number. The array is obtained from mtx_null_col_vector().
2392 Be sure to call mtx_null_col_vector_release() when done with the vector and
2393 you have rezeroed it.
2394 */
2395 static struct element_t **mtx_expand_col_series(mtx_matrix_t mtx, int32 org){
2396 struct element_t **arr;
2397 struct element_t *elt;
2398
2399 arr = mtx_null_col_vector(mtx->order);
2400 for( elt = mtx->hdr.col[org] ; NOTNULL(elt) ; elt = elt->next.row )
2401 arr[elt->row] = elt;
2402 return(arr);
2403 }
2404
2405 struct add_series_data {
2406 mtx_matrix_t mtx; /* matrix we're operating on */
2407 struct element_t **arr; /* target row/col expansion array */
2408 int32 *tocur; /* col/row permutation vector */
2409 int32 t_org; /* org row/col which is expanded */
2410 };
2411
2412 static struct add_series_data
2413 rsdata={NULL,NULL,NULL,mtx_NONE},
2414 csdata={NULL,NULL,NULL,mtx_NONE};
2415
2416 static void add_series_data_release(void){
2417 /* if apparently sane, and have array release the arr */
2418 if (NOTNULL(rsdata.mtx) && NOTNULL(rsdata.tocur)
2419 && rsdata.t_org >= 0 && NOTNULL(rsdata.arr)) {
2420 mtx_null_row_vector_release();
2421 }
2422 if (NOTNULL(csdata.mtx) && NOTNULL(csdata.tocur)
2423 && csdata.t_org >= 0 && NOTNULL(csdata.arr)) {
2424 mtx_null_col_vector_release();
2425 }
2426 /* reinit data */
2427
2428 rsdata.mtx = NULL;
2429 rsdata.arr = NULL;
2430 rsdata.tocur = NULL;
2431 rsdata.t_org = mtx_NONE;
2432
2433 csdata.mtx = NULL;
2434 csdata.arr = NULL;
2435 csdata.tocur = NULL;
2436 csdata.t_org = mtx_NONE;
2437 }
2438
2439 void mtx_add_row_series_init(mtx_matrix_t mtx,int32 t_cur,boolean use){
2440
2441 #if MTX_DEBUG
2442 if(!mtx_check_matrix(mtx)) return;
2443 #endif
2444
2445 if ( !(rsdata.mtx) ) {
2446 /* this is a grabbing call due to memory reuse */
2447 if ( mtx && (t_cur >= 0) && (t_cur < mtx->order) ) {
2448 rsdata.mtx = mtx;
2449 rsdata.tocur = mtx->perm.col.org_to_cur;
2450 rsdata.t_org = mtx->perm.row.cur_to_org[t_cur];
2451 rsdata.arr = mtx_expand_row_series(mtx,rsdata.t_org);
2452 } else {
2453 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for grab with invalid column or mtx. col number %d.\n",t_cur);
2454 }
2455 return;
2456 } else {
2457 /* this is supposed to be a releasing call */
2458 if (t_cur != mtx_NONE) {
2459 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for release without mtx_NONE.\n");
2460 return;
2461 }
2462 if (mtx != rsdata.mtx) {
2463 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for release with ungrabbed matrix.\n");
2464 return;
2465 }
2466 if (use) {
2467 mtx_renull_using_row(rsdata.mtx, rsdata.t_org, rsdata.arr);
2468 } else {
2469 mtx_renull_all(rsdata.mtx, rsdata.arr);
2470 }
2471 mtx_null_row_vector_release();
2472 rsdata.mtx = NULL;
2473 rsdata.arr = NULL;
2474 rsdata.tocur = NULL;
2475 rsdata.t_org = mtx_NONE;
2476 }
2477 }
2478
2479 void mtx_add_row_series(int32 s_cur,real64 factor,mtx_range_t *rng){
2480 register int32 org_col;
2481 int32 t_org,*tocur;
2482 struct element_t **arr,*elt;
2483 mtx_matrix_t mtx;
2484
2485 if ( !(rsdata.mtx) ) {
2486 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called without grabbing target row first.\n");
2487 return;
2488 }
2489 mtx = rsdata.mtx;
2490 arr = rsdata.arr;
2491 tocur = rsdata.tocur;
2492 t_org = rsdata.t_org;
2493
2494 #if MTX_DEBUG
2495 if(!mtx_check_matrix(mtx)) return;
2496 #endif
2497
2498 elt = mtx->hdr.row[mtx->perm.row.cur_to_org[s_cur]];
2499 if( rng == mtx_ALL_COLS ) {
2500 for( ; NOTNULL(elt); elt = elt->next.col ) {
2501 if( ISNULL(arr[(org_col=elt->col)]) ) {
2502 arr[org_col] =
2503 mtx_create_element_value(mtx,t_org,org_col,(factor * elt->value));
2504 } else {
2505 arr[org_col]->value += factor * elt->value;
2506 }
2507 }
2508 return;
2509 }
2510 /* fast_in_range is a 10% winner on the alpha, and should be even more
2511 on the other platforms. */
2512 if( rng->high >= rng->low ) {
2513 register int32 cur_col, lo,hi;
2514 lo=rng->low; hi= rng->high;
2515 for( ; NOTNULL(elt); elt = elt->next.col ) {
2516 cur_col=tocur[(org_col=elt->col)];
2517 if( fast_in_range(lo,hi,cur_col) ) {
2518 if( NOTNULL(arr[org_col]) ) {
2519 arr[org_col]->value += factor * elt->value;
2520 } else {
2521 arr[org_col] =
2522 mtx_create_element_value(mtx,t_org,org_col,(factor*elt->value));
2523 }
2524 }
2525 }
2526 }
2527 }
2528
2529 void mtx_add_col_series_init(mtx_matrix_t mtx,int32 t_cur,boolean use){
2530
2531 #if MTX_DEBUG
2532 if(!mtx_check_matrix(mtx)) return;
2533 #endif
2534
2535 if ( !(csdata.mtx) ) {
2536 /* this is a grabbing call */
2537 if ( mtx && (t_cur >= 0) && (t_cur < mtx->order) ) {
2538 csdata.mtx = mtx;
2539 csdata.tocur = mtx->perm.row.org_to_cur;
2540 csdata.t_org = mtx->perm.col.cur_to_org[t_cur];
2541 csdata.arr = mtx_expand_col_series(mtx,csdata.t_org);
2542 } else {
2543 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for grab with invalid row or mtx. row number %d.\n",t_cur);
2544 }
2545 return;
2546 } else {
2547 /* this is supposed to be a releasing call */
2548 if (t_cur != mtx_NONE) {
2549 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for release without mtx_NONE.\n");
2550 return;
2551 }
2552 if (mtx != csdata.mtx) {
2553 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for release with ungrabbed matrix.\n");
2554 return;
2555 }
2556 if (use) {
2557 mtx_renull_using_col(csdata.mtx, csdata.t_org, csdata.arr);
2558 } else {
2559 mtx_renull_all(csdata.mtx, csdata.arr);
2560 }
2561 mtx_null_col_vector_release();
2562 csdata.mtx = NULL;
2563 csdata.arr = NULL;
2564 csdata.tocur = NULL;
2565 csdata.t_org = mtx_NONE;
2566 }
2567 }
2568
2569 void mtx_add_col_series(int32 s_cur,real64 factor,mtx_range_t *rng){
2570 register int32 org_row;
2571 int32 t_org,*tocur;
2572 struct element_t **arr,*elt;
2573 mtx_matrix_t mtx;
2574
2575 if ( !(csdata.mtx) ) {
2576 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called for without grabbing target col first.\n");
2577 return;
2578 }
2579 mtx = csdata.mtx;
2580 arr = csdata.arr;
2581 tocur = csdata.tocur;
2582 t_org = csdata.t_org;
2583
2584 #if MTX_DEBUG
2585 if(!mtx_check_matrix(mtx)) return;
2586 #endif
2587
2588 elt = mtx->hdr.col[mtx->perm.col.cur_to_org[s_cur]];
2589 if( rng == mtx_ALL_ROWS ) {
2590 for( ; NOTNULL(elt); elt = elt->next.row ) {
2591 if( ISNULL(arr[(org_row=elt->row)]) ) {
2592 arr[org_row] =
2593 mtx_create_element_value(mtx,org_row,t_org,(factor * elt->value));
2594 } else {
2595 arr[org_row]->value += factor * elt->value;
2596 }
2597 }
2598 return;
2599 }
2600 if( rng->high >= rng->low ) {
2601 register int32 cur_row;
2602
2603 for( ; NOTNULL(elt); elt = elt->next.row ) {
2604 cur_row=tocur[(org_row=elt->row)];
2605 if( in_range(rng,cur_row) ) {
2606 if( NOTNULL(arr[org_row]) ) {
2607 arr[org_row]->value += factor * elt->value;
2608 } else {
2609 arr[org_row] =
2610 mtx_create_element_value(mtx,org_row,t_org,(factor*elt->value));
2611 }
2612 }
2613 }
2614 }
2615 }
2616
2617 void mtx_old_add_row_sparse(mtx_matrix_t mtx,
2618 int32 t_cur, /* cur index of target row */
2619 real64 *s_cur, /* dense source row, curindexed */
2620 real64 factor, /* coefficient */
2621 mtx_range_t *rng,
2622 int32 *ilist /* list to add over */
2623 ){
2624 int32 t_org,*toorg,cindex,orgcindex,hilim;
2625 struct element_t **arr;
2626 real64 value;
2627
2628 #if MTX_DEBUG
2629 if(!mtx_check_matrix(mtx)) return;
2630 #endif
2631
2632 if (factor==D_ZERO) return; /* adding 0 rather silly */
2633
2634 if( rng == mtx_ALL_COLS ) {
2635 t_org = mtx->perm.row.cur_to_org[t_cur]; /* org of target row */
2636 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2637 toorg = mtx->perm.col.cur_to_org; /* current col perm */
2638 hilim=mtx->order;
2639
2640 if (!ilist) { /* FULL ROW CASE no ilist */
2641 for (cindex=0; cindex< hilim; cindex++) {
2642 if ((value=s_cur[cindex])!=D_ZERO) {
2643 if ( ISNULL(arr[(orgcindex=toorg[cindex])]) ) { /* add element */
2644 arr[orgcindex] =
2645 mtx_create_element_value(mtx,t_org,orgcindex,(factor * value));
2646 } else { /* increment element */
2647 arr[orgcindex]->value += factor * value;
2648 }
2649 }
2650 }
2651 } else { /* SPARSE ROW CASE with ilist */
2652 int32 i;
2653 i=0;
2654 while ((cindex=ilist[i])>=0) {
2655 value=s_cur[cindex];
2656 if ( ISNULL(arr[(orgcindex=toorg[cindex])]) ) { /* add element */
2657 arr[orgcindex] =
2658 mtx_create_element_value(mtx,t_org,orgcindex,(factor * value));
2659 } else { /* increment element */
2660 arr[orgcindex]->value += factor * value;
2661 }
2662 i++;
2663 }
2664 }
2665 mtx_renull_using_row(mtx,t_org,arr);
2666 } else if( rng->high >= rng->low ) { /* DENSE RANGE CASE */
2667 t_org = mtx->perm.row.cur_to_org[t_cur]; /* org of target row */
2668 arr = mtx_expand_row(mtx,t_org); /* Expand the target row */
2669 toorg = mtx->perm.col.cur_to_org; /* current col perm */
2670 hilim = rng->high;
2671
2672 for (cindex=rng->low; cindex<= hilim; cindex++) {
2673 if ((value=s_cur[cindex])!=D_ZERO) {
2674 if ( ISNULL(arr[(orgcindex=toorg[cindex])]) ) { /* add element */
2675 arr[orgcindex] =
2676 mtx_create_element_value(mtx,t_org,orgcindex,(factor * value));
2677 } else { /* increment element */
2678 arr[orgcindex]->value += factor * value;
2679 }
2680 }
2681 }
2682 mtx_renull_using_row(mtx,t_org,arr);
2683 }
2684 mtx_null_vector_release();
2685 }
2686
2687 void mtx_old_add_col_sparse(mtx_matrix_t mtx,
2688 int32 t_cur, /* cur index of target col */
2689 real64 *s_cur, /* dense source col, curindexed */
2690 real64 factor, /* coefficient */
2691 mtx_range_t *rng, /* range to add over or */
2692 int32 *ilist /* list to add over */
2693 ){
2694 int32 t_org,*toorg,rowindex,orgrowindex,hilim;
2695 struct element_t **arr;
2696 real64 value;
2697
2698 #if MTX_DEBUG
2699 if(!mtx_check_matrix(mtx)) return;
2700 #endif
2701
2702 if (factor==D_ZERO) return;
2703
2704 if( rng == mtx_ALL_ROWS ) {
2705 t_org = mtx->perm.col.cur_to_org[t_cur]; /* org of target col */
2706 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2707 toorg = mtx->perm.row.cur_to_org; /* current row perm */
2708 hilim=mtx->order;
2709
2710 if (!ilist) { /* FULL COL CASE no ilist */
2711 for (rowindex=0; rowindex< hilim; rowindex++) {
2712 if ((value=s_cur[rowindex])!=D_ZERO) {
2713 if ( ISNULL(arr[(orgrowindex=toorg[rowindex])]) ) { /* add element */
2714 arr[orgrowindex] =
2715 mtx_create_element_value(mtx,orgrowindex,t_org,(factor * value));
2716 } else { /* increment element */
2717 arr[orgrowindex]->value += factor * value;
2718 }
2719 }
2720 }
2721 } else { /* SPARSE COL CASE with ilist */
2722 int32 i;
2723 i=0;
2724 while ((rowindex=ilist[i])>=0) {
2725 value=s_cur[rowindex];
2726 if ( ISNULL(arr[(orgrowindex=toorg[rowindex])]) ) { /* add element */
2727 arr[orgrowindex] =
2728 mtx_create_element_value(mtx,orgrowindex,t_org,(factor * value));
2729 } else { /* increment element */
2730 arr[orgrowindex]->value += factor * value;
2731 }
2732 i++;
2733 }
2734 }
2735 mtx_renull_using_col(mtx,t_org,arr);
2736
2737 } else if( rng->high >= rng->low ) { /* DENSE RANGE CASE */
2738 t_org = mtx->perm.col.cur_to_org[t_cur]; /* org of target col */
2739 arr = mtx_expand_col(mtx,t_org); /* Expand the target col */
2740 toorg = mtx->perm.row.cur_to_org; /* current row perm */
2741 hilim = rng->high;
2742
2743 for (rowindex=rng->low; rowindex<= hilim; rowindex++) {
2744 if ((value=s_cur[rowindex])!=D_ZERO) {
2745 if ( ISNULL(arr[(orgrowindex=toorg[rowindex])]) ) { /* add element */
2746 arr[orgrowindex] =
2747 mtx_create_element_value(mtx,orgrowindex,t_org,(factor * value));
2748 } else { /* increment element */
2749 arr[orgrowindex]->value += factor * value;
2750 }
2751 }
2752 }
2753 mtx_renull_using_col(mtx,t_org,arr);
2754 }
2755 mtx_null_vector_release();
2756 }
2757
2758 size_t mtx_size(mtx_matrix_t mtx) {
2759 size_t size=0;
2760 if (ISNULL(mtx)) return size;
2761 size += (1+mtx->nslaves)*sizeof(struct mtx_header);
2762 if (ISSLAVE(mtx)) {
2763 return 2*sizeof(struct element_t *)*(size_t)mtx->capacity +size;
2764 }
2765 size += mem_sizeof_store(mtx->ms);
2766 /* headers */
2767 size += (1 + mtx->nslaves) * (size_t)mtx->capacity *
2768 (size_t)2 * sizeof(struct element_t *);
2769 /* block data */
2770 if (mtx->data->nblocks >0)
2771 size += sizeof(mtx_region_t)*(size_t)(mtx->data->nblocks-1);
2772 /* permutations */
2773 size += (size_t)4*(sizeof(int32)*(size_t)mtx->capacity);
2774 return size;
2775 }
2776
2777 size_t mtx_chattel_size(mtx_matrix_t mtx) {
2778 size_t size=0;
2779 int32 i;
2780 if (ISNULL(mtx) || ISSLAVE(mtx)) return size;
2781 /* headers */
2782 size += (mtx->nslaves)*sizeof(struct mtx_header);
2783 /* incidence */
2784 for (i=0; i <mtx->nslaves; i++) {
2785 size += (size_t)mtx_nonzeros_in_region(mtx->slaves[i],mtx_ENTIRE_MATRIX);
2786 }
2787 /*nz headers */
2788 size += mtx->nslaves * (size_t)mtx->capacity *
2789 (size_t)2 * sizeof(struct element_t *);
2790 /* block data */
2791 /* permutations */
2792 return size;
2793 }
2794
2795 void mtx_free_reused_mem(void){
2796 (void)mtx_null_mark((int32)0);
2797 (void)mtx_null_vector((int32)0);
2798 (void)mtx_null_row_vector((int32)0);
2799 (void)mtx_null_col_vector((int32)0);
2800 }
2801
2802
2803 void mtx_write_sparse(FILE *fp,mtx_sparse_t *v){
2804 int32 k;
2805 if (NOTNULL(v) && NOTNULL(fp)) {
2806 FPRINTF(fp,"len %d, cap %d\n",v->len,v->cap);
2807 } else {
2808 ERROR_REPORTER_HERE(ASC_PROG_ERR,"called with NULL.\n");
2809 return;
2810 }
2811 if ( NOTNULL(v->data) && NOTNULL(v->idata) ){
2812 FPRINTF(fp,"valid data:\n");
2813 for (k=0; k < v->len; k++) {
2814 FPRINTF(fp,"(%d) %d %.18g\n",k,v->idata[k],v->data[k]);
2815 }
2816 } else {
2817 FPRINTF(fp,"Invalid data pointers found.\n");
2818 }
2819 }
2820
2821 void mtx_write_region_human_f(FILE *fp,mtx_matrix_t mtx,
2822 mtx_region_t *greg,int colwise,int orgmajor
2823 ){
2824 mtx_coord_t nz,org;
2825 real64 value;
2826 mtx_region_t reg;
2827
2828 if (greg!=mtx_ENTIRE_MATRIX) {
2829 reg=(*greg);
2830 } else {
2831 mtx_region(&reg,0,mtx->order-1,0,mtx->order-1);
2832 }
2833 /* current col perm driven */
2834 if (colwise && !orgmajor) {
2835 for(nz.col = reg.col.low; nz.col <= reg.col.high; ++(nz.col) ) {
2836 nz.row = mtx_FIRST;
2837 /* if col not empty, print header and first element */
2838 if( value = mtx_next_in_col(mtx,&nz,&(reg.row)), nz.row != mtx_LAST ) {
2839 FPRINTF(fp," Col %d (org %d)\n", nz.col, mtx_col_to_org(mtx,nz.col));
2840 FPRINTF(fp," Row %d (org %d) has value %g\n",
2841 nz.row, mtx_row_to_org(mtx,nz.row), value);
2842 }
2843 while( value = mtx_next_in_col(mtx,&nz,&(reg.row)),
2844 nz.row != mtx_LAST ) {
2845 FPRINTF(fp," Row %d (org %d) has value %g\n",
2846 nz.row, mtx_row_to_org(mtx,nz.row), value);
2847 }
2848 }
2849 return;
2850 }
2851 /* current row perm driven */
2852 if (!colwise && !orgmajor) {
2853 for(nz.row = reg.row.low; nz.row <= reg.row.high; ++(nz.row) ) {
2854 nz.col = mtx_FIRST;
2855 if ( value = mtx_next_in_row(mtx