1 |
/* |
2 |
* mtx: Ascend Sparse Matrix Package |
3 |
* by Karl Michael Westerberg |
4 |
* Created: 2/6/90 |
5 |
* Version: $Revision: 1.9 $ |
6 |
* Version control file: $RCSfile: mtx_use_only.c,v $ |
7 |
* Date last modified: $Date: 2000/01/25 02:27:12 $ |
8 |
* Last modified by: $Author: ballan $ |
9 |
* |
10 |
* This file is part of the SLV solver. |
11 |
* |
12 |
* Copyright (C) 1990 Karl Michael Westerberg |
13 |
* Copyright (C) 1993 Joseph Zaher |
14 |
* Copyright (C) 1994 Joseph Zaher, Benjamin Andrew Allan |
15 |
* Copyright (C) 1995 Kirk Andre Abbott, Benjamin Andrew Allan |
16 |
* Copyright (C) 1996 Benjamin Andrew Allan |
17 |
* |
18 |
* The SLV solver is free software; you can redistribute |
19 |
* it and/or modify it under the terms of the GNU General Public License as |
20 |
* published by the Free Software Foundation; either version 2 of the |
21 |
* License, or (at your option) any later version. |
22 |
* |
23 |
* The SLV solver is distributed in hope that it will be |
24 |
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of |
25 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
26 |
* General Public License for more details. |
27 |
* |
28 |
* You should have received a copy of the GNU General Public License along with |
29 |
* the program; if not, write to the Free Software Foundation, Inc., 675 |
30 |
* Mass Ave, Cambridge, MA 02139 USA. Check the file named COPYING. |
31 |
* COPYING is found in ../compiler. |
32 |
*/ |
33 |
#include "utilities/ascConfig.h" |
34 |
#include "utilities/ascMalloc.h" |
35 |
#include "utilities/mem.h" |
36 |
#include "solver/mtx.h" |
37 |
/* grab our private parts */ |
38 |
#define __MTX_C_SEEN__ |
39 |
#include "solver/mtx_use_only.h" |
40 |
|
41 |
#define free_unless_null(ptr) if( NOTNULL(ptr) ) ascfree(ptr) |
42 |
|
43 |
FILE *g_mtxerr; |
44 |
|
45 |
struct element_t *mtx_find_element(mtx_matrix_t mtx, int32 org_row, |
46 |
int32 org_col) |
47 |
{ |
48 |
register struct element_t *elt; |
49 |
|
50 |
/* old way. |
51 |
for( elt = mtx->hdr.row[org_row] ; NOTNULL(elt) ; elt = elt->next.col ) |
52 |
if( elt->col == org_col ) |
53 |
break; |
54 |
*/ |
55 |
for( elt = mtx->hdr.row[org_row] ; |
56 |
NOTNULL(elt) && elt->col!=org_col ; |
57 |
elt = elt->next.col ); |
58 |
return(elt); |
59 |
} |
60 |
|
61 |
struct element_t *mtx_create_element(mtx_matrix_t mtx, |
62 |
int32 org_row, int32 org_col) |
63 |
/** |
64 |
*** Creates the given element and returns a pointer to it. The value is |
65 |
*** initially zero. |
66 |
**/ |
67 |
{ |
68 |
register struct element_t *elt; |
69 |
|
70 |
#if MTX_DEBUG |
71 |
if( NOTNULL(mtx_find_element(mtx,org_row,org_col)) ) { |
72 |
FPRINTF(g_mtxerr,"ERROR: (mtx) create_element\n"); |
73 |
FPRINTF(g_mtxerr," Element (%d,%d) already exists.\n", |
74 |
org_row,org_col); |
75 |
} |
76 |
#endif |
77 |
elt = (struct element_t *)mem_get_element(mtx->ms); |
78 |
/* guess who didn't check for the alloc return. */ |
79 |
elt->value = 0.0; |
80 |
elt->row = org_row; |
81 |
elt->col = org_col; |
82 |
elt->next.col = mtx->hdr.row[org_row]; |
83 |
elt->next.row = mtx->hdr.col[org_col]; |
84 |
mtx->hdr.row[org_row] = mtx->hdr.col[org_col] = elt; |
85 |
return(elt); |
86 |
} |
87 |
|
88 |
struct element_t *mtx_create_element_value(mtx_matrix_t mtx, |
89 |
int32 org_row, |
90 |
int32 org_col, |
91 |
real64 val) |
92 |
{ |
93 |
register struct element_t *elt; |
94 |
|
95 |
#if MTX_DEBUG |
96 |
if( NOTNULL(mtx_find_element(mtx,org_row,org_col)) ) { |
97 |
FPRINTF(g_mtxerr,"ERROR: (mtx) create_element_value\n"); |
98 |
FPRINTF(g_mtxerr," Element (%d,%d) already exists.\n", |
99 |
org_row,org_col); |
100 |
} |
101 |
#endif |
102 |
elt = (struct element_t *)mem_get_element(mtx->ms); |
103 |
/* guess who didn't check for the alloc return. not needed here. */ |
104 |
elt->value = val; |
105 |
elt->row = org_row; |
106 |
elt->col = org_col; |
107 |
elt->next.col = mtx->hdr.row[org_row]; |
108 |
elt->next.row = mtx->hdr.col[org_col]; |
109 |
mtx->hdr.row[org_row] = mtx->hdr.col[org_col] = elt; |
110 |
return(elt); |
111 |
} |
112 |
|
113 |
/* new version */ |
114 |
struct element_t *mtx_next_col(register struct element_t *elt, |
115 |
mtx_range_t *rng, int32 *tocur) |
116 |
{ |
117 |
if( NOTNULL(elt) ) { |
118 |
elt = elt->next.col; |
119 |
} else { |
120 |
return elt; |
121 |
} |
122 |
/* use of not_in_range in the following is a loser on alphas */ |
123 |
/* due to the price of assigning the range limits */ |
124 |
if( rng != mtx_ALL_COLS ) { |
125 |
if( rng->high < rng->low ) return NULL; |
126 |
while( NOTNULL(elt) && !in_range(rng,tocur[elt->col]) ) |
127 |
elt = elt->next.col; |
128 |
} |
129 |
return( elt ); |
130 |
} |
131 |
|
132 |
/* original version */ |
133 |
struct element_t *mtx_next_row(register struct element_t *elt, |
134 |
mtx_range_t *rng, int32 *tocur) |
135 |
{ |
136 |
if( NOTNULL(elt) ) elt = elt->next.row; |
137 |
if( rng != mtx_ALL_ROWS ) { |
138 |
if( rng->high < rng->low ) return NULL; |
139 |
while( NOTNULL(elt) && !in_range(rng,tocur[elt->row]) ) |
140 |
elt = elt->next.row; |
141 |
} |
142 |
return( elt ); |
143 |
} |
144 |
|
145 |
|
146 |
int32 *mtx_alloc_perm(int32 cap) |
147 |
{ |
148 |
int32 *perm; |
149 |
perm = (int32 *)ascmalloc( (cap+1)*sizeof(int32) ); |
150 |
*perm = -1; |
151 |
return( perm+1 ); |
152 |
} |
153 |
|
154 |
void mtx_copy_perm( int32 *tarperm, int32 *srcperm, int32 cap) |
155 |
/** |
156 |
*** Copies srcperm to tarperm given the capacity of srcperm. |
157 |
*** If tarperm was obtained from |
158 |
*** alloc_perm(), the -1 has already been copied |
159 |
**/ |
160 |
{ |
161 |
mem_copy_cast(srcperm,tarperm,cap*sizeof(int32)); |
162 |
} |
163 |
|
164 |
#define free_perm(perm) ascfree( (POINTER)((perm)-1) ) |
165 |
/** |
166 |
*** Frees a permutation vector. |
167 |
**/ |
168 |
|
169 |
void mtx_free_perm(int32 *perm) |
170 |
{ |
171 |
free_perm(perm); |
172 |
} |
173 |
|
174 |
/* some local scope globals to keep memory so we aren't constantly |
175 |
reallocating */ |
176 |
struct reusable_data_vector |
177 |
g_mtx_null_index_data = {NULL,0,sizeof(int32),0}, |
178 |
g_mtx_null_sum_data = {NULL,0,sizeof(real64),0}, |
179 |
g_mtx_null_mark_data = {NULL,0,sizeof(char),0}, |
180 |
g_mtx_null_vector_data = {NULL,0,sizeof(struct element_t *),0}, |
181 |
g_mtx_null_col_vector_data = {NULL,0,sizeof(struct element_t *),0}, |
182 |
g_mtx_null_row_vector_data = {NULL,0,sizeof(struct element_t *),0}; |
183 |
|
184 |
/* the psychologist function. resets data to zero after some checking */ |
185 |
static void mtx_sanify_reuseable(struct reusable_data_vector *r) |
186 |
{ |
187 |
if (r->last_line) { |
188 |
r->last_line = ZERO; |
189 |
if (r->capacity > 0 && r->arr != NULL) { |
190 |
/* rezero the buffer */ |
191 |
switch (r->entry_size) { |
192 |
case sizeof(int32): |
193 |
mtx_zero_int32(r->arr,r->capacity); |
194 |
break; |
195 |
case sizeof(real64): |
196 |
mtx_zero_real64(r->arr,r->capacity); |
197 |
break; |
198 |
default: |
199 |
mtx_zero_char(r->arr, r->capacity*r->entry_size); |
200 |
break; |
201 |
} |
202 |
} |
203 |
} |
204 |
if (r->capacity && r->arr == NULL) { |
205 |
/* forget it exists (capacity && !arr --> insanity) */ |
206 |
r->capacity = 0; |
207 |
r->arr = NULL; |
208 |
} |
209 |
} |
210 |
|
211 |
/* a function to insure the sanity of all the reuseable vectors */ |
212 |
void mtx_reset_null_vectors(void) { |
213 |
mtx_sanify_reuseable( &g_mtx_null_index_data); |
214 |
mtx_sanify_reuseable( &g_mtx_null_sum_data); |
215 |
mtx_sanify_reuseable( &g_mtx_null_mark_data); |
216 |
mtx_sanify_reuseable( &g_mtx_null_row_vector_data); |
217 |
mtx_sanify_reuseable( &g_mtx_null_col_vector_data); |
218 |
mtx_sanify_reuseable( &g_mtx_null_vector_data); |
219 |
} |
220 |
|
221 |
/* get a chunk of memory (probably already allocated) and return |
222 |
to user who will reset it to 0s before releasing it. */ |
223 |
void *mtx_null_vector_f(int32 cap, int line, CONST char *file, |
224 |
struct reusable_data_vector *ptr, char *fn) |
225 |
/** |
226 |
*** Call only via the macros. |
227 |
*** Bugs: does not check for null return. |
228 |
*** but then if this returns null, a crash from some other cause |
229 |
*** is imminent anyway. |
230 |
**/ |
231 |
{ |
232 |
if (ptr->last_line) { |
233 |
FPRINTF(g_mtxerr,"Warning: (%s) mtx_%s called while data in use\n",file,fn); |
234 |
FPRINTF(g_mtxerr," Last called line: %d, this call line %d\n", |
235 |
ptr->last_line,line); |
236 |
if (cap > 0 && cap <= ptr->capacity) { |
237 |
switch (ptr->entry_size) { |
238 |
case sizeof(int32): |
239 |
mtx_zero_int32(ptr->arr,ptr->capacity); |
240 |
break; |
241 |
case sizeof(real64): |
242 |
mtx_zero_real64(ptr->arr,ptr->capacity); |
243 |
break; |
244 |
default: |
245 |
mtx_zero_char(ptr->arr, ptr->capacity*ptr->entry_size); |
246 |
break; |
247 |
} |
248 |
} |
249 |
} |
250 |
if (!cap) { |
251 |
free_unless_null( (POINTER)ptr->arr ); |
252 |
ptr->capacity = ZERO; |
253 |
ptr->arr = NULL; |
254 |
ptr->last_line = ZERO; |
255 |
return( ptr->arr ); |
256 |
} |
257 |
|
258 |
if( cap > ptr->capacity ) { |
259 |
free_unless_null( (POINTER)ptr->arr ); |
260 |
ptr->arr = asccalloc( cap, ptr->entry_size ); |
261 |
if (ISNULL(ptr->arr)) { |
262 |
ptr->capacity = ZERO; |
263 |
ptr->last_line = ZERO; |
264 |
FPRINTF(g_mtxerr,"ERROR: memory allocation failed in mtx_%s.\n",fn); |
265 |
return ptr->arr; |
266 |
} |
267 |
ptr->capacity = cap; |
268 |
} |
269 |
ptr->last_line = line; |
270 |
return( ptr->arr ); |
271 |
} |
272 |
|
273 |
/** |
274 |
*** Call only via the macros. |
275 |
**/ |
276 |
void mtx_null_vector_release_f(int line, CONST char *file, |
277 |
struct reusable_data_vector *ptr, char *fn) |
278 |
{ |
279 |
if (ptr->last_line == 0) { |
280 |
FPRINTF(g_mtxerr, |
281 |
"Warning: (%s) mtx_%s_release called (line %d) on vector not in use\n", |
282 |
file, fn, line); |
283 |
} |
284 |
ptr->last_line = 0; |
285 |
} |
286 |
|
287 |
/********************************************************************/ |
288 |
|
289 |
struct element_t **mtx_expand_row(mtx_matrix_t mtx, int32 org) |
290 |
{ |
291 |
struct element_t **arr; |
292 |
struct element_t *elt; |
293 |
|
294 |
arr = mtx_null_vector(mtx->order); |
295 |
for( elt=mtx->hdr.row[org]; NOTNULL(elt); elt=elt->next.col ) |
296 |
arr[elt->col] = elt; |
297 |
return(arr); |
298 |
} |
299 |
|
300 |
struct element_t **mtx_expand_col(mtx_matrix_t mtx, int32 org) |
301 |
{ |
302 |
struct element_t **arr; |
303 |
struct element_t *elt; |
304 |
|
305 |
arr = mtx_null_vector(mtx->order); |
306 |
for( elt = mtx->hdr.col[org] ; NOTNULL(elt) ; elt = elt->next.row ) |
307 |
arr[elt->row] = elt; |
308 |
return(arr); |
309 |
} |
310 |
|
311 |
void mtx_renull_using_row(mtx_matrix_t mtx, int32 org, |
312 |
struct element_t **arr) |
313 |
{ |
314 |
struct element_t *elt; |
315 |
|
316 |
for( elt=mtx->hdr.row[org]; NOTNULL(elt); elt=elt->next.col ) |
317 |
arr[elt->col] = NULL; |
318 |
} |
319 |
|
320 |
void mtx_renull_using_col(mtx_matrix_t mtx, int32 org, |
321 |
struct element_t **arr) |
322 |
{ |
323 |
struct element_t *elt; |
324 |
|
325 |
for( elt=mtx->hdr.col[org]; NOTNULL(elt); elt=elt->next.row ) |
326 |
arr[elt->row] = NULL; |
327 |
} |
328 |
|
329 |
void mtx_renull_all(mtx_matrix_t mtx, struct element_t **arr) |
330 |
/** |
331 |
*** Makes arr NULLed again, assuming it is size mtx->order. |
332 |
**/ |
333 |
{ |
334 |
if (mtx && arr) |
335 |
zero(arr,mtx->order,struct element_t *); |
336 |
} |
337 |
|
338 |
#undef __MTX_C_SEEN__ |