1 |
/* |
2 |
* mtx2: Ascend Sparse Matrix Package |
3 |
* by Benjamin Andrew Allan |
4 |
* Derived from mtx by Karl Michael Westerberg |
5 |
* Created: 5/3/90 |
6 |
* Version: $Revision: 1.13 $ |
7 |
* Version control file: $RCSfile: mtx_basic.h,v $ |
8 |
* Date last modified: $Date: 2000/01/25 02:27:10 $ |
9 |
* Last modified by: $Author: ballan $ |
10 |
* |
11 |
* This file is part of the SLV solver. |
12 |
* |
13 |
* Copyright (C) 1990 Karl Michael Westerberg |
14 |
* Copyright (C) 1993 Joseph Zaher |
15 |
* Copyright (C) 1994 Joseph Zaher, Benjamin Andrew Allan |
16 |
* Copyright (C) 1995 Benjamin Andrew Allan, Kirk Andre' Abbott |
17 |
* Copyright (C) 1996 Benjamin Andrew Allan |
18 |
* |
19 |
* The SLV solver is free software; you can redistribute |
20 |
* it and/or modify it under the terms of the GNU General Public License as |
21 |
* published by the Free Software Foundation; either version 2 of the |
22 |
* License, or (at your option) any later version. |
23 |
* |
24 |
* The SLV solver is distributed in hope that it will be |
25 |
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of |
26 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
27 |
* General Public License for more details. |
28 |
* |
29 |
* You should have received a copy of the GNU General Public License along with |
30 |
* the program; if not, write to the Free Software Foundation, Inc., 675 |
31 |
* Mass Ave, Cambridge, MA 02139 USA. Check the file named COPYING. |
32 |
* COPYING is found in ../compiler. |
33 |
*/ |
34 |
#ifndef __MTX_BASIC_H_SEEN__ |
35 |
#define __MTX_BASIC_H_SEEN__ |
36 |
/* requires #include "mtx.h" */ |
37 |
|
38 |
|
39 |
/***********************************************************************\ |
40 |
mtx types creation/destruction/info routines |
41 |
\***********************************************************************/ |
42 |
extern boolean check_matrix(mtx_matrix_t, char *, int); |
43 |
#define mtx_check_matrix(m) check_matrix((m),__FILE__,__LINE__) |
44 |
/* (mtx_matrix_t, char *, int); */ |
45 |
/** |
46 |
** Use mtx_check_matrix in all cases. |
47 |
** If mtx given is ok, returns 1, otherwise returns 0. |
48 |
** spits up to stderr if not ok. |
49 |
** If mtx is a slave, also checks integrity on the master mtx. |
50 |
mm macro extern boolean mtx_check_matrix(m) |
51 |
**/ |
52 |
|
53 |
extern boolean check_sparse(const mtx_sparse_t * const, char *, int); |
54 |
#define mtx_check_sparse(sp) check_sparse((sp),__FILE__,__LINE__) |
55 |
/* (mtx_sparse_t *, char *, int); */ |
56 |
/** |
57 |
** Use mtx_check_sparse in all cases. |
58 |
** If sparse given is ok, returns 1, otherwise returns 0. |
59 |
** spits up to stderr if not ok. |
60 |
mm macro extern boolean mtx_check_sparse(sp) |
61 |
**/ |
62 |
|
63 |
extern mtx_coord_t *mtx_coord(mtx_coord_t *, int32, int32); |
64 |
extern mtx_range_t *mtx_range(mtx_range_t *, int32, int32); |
65 |
extern mtx_region_t *mtx_region(mtx_region_t *, int32, int32, |
66 |
int32, int32); |
67 |
/** |
68 |
*** coordp = mtx_coord(coordp,row,col) |
69 |
*** rangep = mtx_range(rangep,low,high) |
70 |
*** regionp = mtx_region(regionp,rowlow,rowhigh,collow,colhigh) |
71 |
*** mtx_coord_t *coordp; |
72 |
*** mtx_range_t *rangep; |
73 |
*** mtx_region_t *regionp; |
74 |
*** int32 row,col,low,high,rowlow,rowhigh,collow,colhigh; |
75 |
*** |
76 |
*** Places the values of arguments 2,3,... into the structure pointed to |
77 |
*** by argument 1 and returns the pointer to it again. |
78 |
*** |
79 |
*** Typical usage: |
80 |
*** { |
81 |
*** mtx_coord_t coord; |
82 |
*** value = mtx_value(matrix,mtx_coord(&coord,row,col)); |
83 |
*** } |
84 |
**/ |
85 |
|
86 |
extern void mtx_zero_int32(int32 *, int); |
87 |
extern void mtx_zero_real64(real64 *, int); |
88 |
extern void mtx_zero_ptr(void **, int); |
89 |
#define mtx_zero_char(ar,len) if ((ar)!=NULL) memset((ar),0,(len)) |
90 |
/** |
91 |
*** mtx_zero_XXX(ptr,length); |
92 |
*** Zeros a vector of specified length and type. |
93 |
*** Ignore NULL input vectors. |
94 |
**/ |
95 |
|
96 |
extern mtx_matrix_t mtx_create(); |
97 |
/** |
98 |
*** matrix = mtx_create() |
99 |
*** mtx_matrix_t matrix; |
100 |
*** |
101 |
*** Creates a 0-order matrix and returns a handle to it. |
102 |
*** Matrix created is a master with no slaves. |
103 |
**/ |
104 |
|
105 |
extern mtx_matrix_t mtx_create_slave(mtx_matrix_t); |
106 |
/** |
107 |
*** matrix = mtx_create_slave(master) |
108 |
*** mtx_matrix_t matrix, master; |
109 |
*** |
110 |
*** Create and return a matrix which shares all structural |
111 |
*** information EXCEPT incidence pattern/values with the |
112 |
*** master matrix given. A master may have as many slaves |
113 |
*** as desired. Slaves cannot have slaves. Slaves cannot |
114 |
*** become masters. Copies of slave matrices are totally |
115 |
*** independent of both slave and master. |
116 |
*** |
117 |
*** All structural manipulation/query function calls will |
118 |
*** be passed up to the master. When the master is resized, |
119 |
*** all its slaves are resized. |
120 |
*** |
121 |
*** This function will return NULL if called on a matrix which |
122 |
*** is a slave or otherwise bad. |
123 |
*** |
124 |
*** Slave matrices exist to: |
125 |
*** - Let the user keep a whole stack of matrices in permutation |
126 |
*** synchrony for the price of structural operations on only |
127 |
*** the master matrix. Principally, this reduces mtx_drag cost. |
128 |
*** - Reduce the memory overhead of a matrix duplicate |
129 |
*** when the duplicate is to be kept in structural synchrony. |
130 |
*** The additional memory required to maintain a slave matrix |
131 |
*** is the cost of the incidence stored in the slave plus |
132 |
*** 2*mtx_capacity(master)*sizeof(double) + sizeof(void *). |
133 |
*** - Demonstrate that C can knock the cookies out of FORTRAN |
134 |
*** in speed while delivering twice the semantic content |
135 |
*** in the output of a matrix factorization routine. |
136 |
**/ |
137 |
|
138 |
extern void mtx_debug_redirect_freeze(); |
139 |
/** |
140 |
*** stops future mtx_create/mtx_create_slave from |
141 |
*** redirecting errors back to stderr. |
142 |
**/ |
143 |
|
144 |
extern void mtx_destroy(mtx_matrix_t); |
145 |
/** |
146 |
*** mtx_destroy(matrix); |
147 |
*** mtx_matrix_t matrix; |
148 |
*** |
149 |
*** Destroys the matrix, freeing the memory it occupied. |
150 |
*** Does nothing if matrix fails mtx_check_matrix. |
151 |
*** Destroys all slaves associated with the matrix if |
152 |
*** it is a master. If mtx is a slave, its master must |
153 |
*** also pass check_matrix before slave is destroyed. |
154 |
**/ |
155 |
|
156 |
extern mtx_sparse_t *mtx_create_sparse(int32); |
157 |
/** |
158 |
*** sp = mtx_create_sparse(capacity); |
159 |
*** mtx_sparse_t *sp; |
160 |
*** int32 capacity; |
161 |
*** |
162 |
*** Creates a sparse vector with capacity given and returns it. |
163 |
*** The length of the sparse data is initialized to 0. |
164 |
*** If insufficient memory is available, returns NULL. |
165 |
*** |
166 |
**/ |
167 |
|
168 |
extern void mtx_destroy_sparse(mtx_sparse_t *); |
169 |
/** |
170 |
*** mtx_destroy_sparse(sp); |
171 |
*** mtx_sparse_t *sp; |
172 |
*** |
173 |
*** Given a pointer to the sparse structure, deallocates everything to |
174 |
*** do with the structure, including the structure itself. The pointer |
175 |
*** sp is invalidated. Handles NULL gracefully. |
176 |
**/ |
177 |
|
178 |
extern void mtx_destroy_blocklist(mtx_block_t *); |
179 |
/** |
180 |
*** mtx_destroy_blocklist); |
181 |
*** mtx_block_t *bl; |
182 |
*** |
183 |
*** Given a pointer to the block structure, deallocates everything to |
184 |
*** do with the structure, including the structure itself. The pointer |
185 |
*** bl is invalidated. Handles NULL gracefully. |
186 |
*** If nblocks is 0 and the region array is not, region pointer will be |
187 |
*** abandoned. |
188 |
**/ |
189 |
|
190 |
extern mtx_matrix_t mtx_duplicate_region(mtx_matrix_t, mtx_region_t *, real64); |
191 |
/** |
192 |
-$- slave = mtx_duplicate_region(matrix,region,drop); |
193 |
*** |
194 |
*** mtx_matrix_t slave, matrix; |
195 |
*** mtx_region_t *region; |
196 |
*** real64 drop; |
197 |
*** |
198 |
*** Creates a slave of the matrix given (or of the master of the matrix |
199 |
*** given). This operator provides a low overhead way of |
200 |
*** saving a matrix region for later computation. The slave matrix |
201 |
*** returned is kept permuted to the same ordering as the master from |
202 |
*** which it was created. |
203 |
*** |
204 |
*** The incidence is copied from the matrix given, even if the matrix |
205 |
*** given is the slave of another matrix. During the copy, all Aij such |
206 |
*** that abs(Aij) < drop in the matrix given are ignored. |
207 |
*** |
208 |
*** If you want a slave but do not want to copy any incidence, you should |
209 |
*** just use mtx_create_slave(matrix); instead. |
210 |
*** |
211 |
-$- Does nothing if matrix fails mtx_check_matrix; returns NULL. |
212 |
**/ |
213 |
|
214 |
extern mtx_matrix_t mtx_copy_options(mtx_matrix_t, boolean, |
215 |
boolean, mtx_region_t *, real64); |
216 |
/** |
217 |
-$- copy = mtx_copy_options(matrix,blocks,incidence,region,drop); |
218 |
*** |
219 |
*** All operations can take either a master or a slave matrix |
220 |
*** and all return a NEW master. If you want a slave copy, see |
221 |
*** mtx_duplicate_region. |
222 |
*** |
223 |
*** MACROS: |
224 |
-$- copy = mtx_copy(matrix) |
225 |
-$- copy = mtx_copy_region(matrix,region) |
226 |
-$- copy = mtx_copy_region_drop(matrix,region,drop) |
227 |
-$- copy = mtx_copy_wo_incidence(matrix) |
228 |
-$- copy = mtx_copy_complete(matrix) |
229 |
*** mtx_matrix_t copy,matrix; |
230 |
*** boolean blocks, incidence; |
231 |
*** mtx_region_t *region; |
232 |
*** real64 drop; |
233 |
*** |
234 |
*** copy = mtx_copy_complete(mtx): |
235 |
*** Copies everything to do with a mtx. Copying the block information |
236 |
*** is usually redundant because the caller should use the block info |
237 |
*** from the original matrix, but in the odd event that such is not |
238 |
*** the case, we provide the copy_complete operator. |
239 |
*** Note that if you are copying a matrix created by mtx_copy |
240 |
*** or mtx_copy_wo_incidence then the copy returned will not have |
241 |
*** block data. |
242 |
*** |
243 |
*** mtx_copy(mtx): |
244 |
*** Copies the matrix except for the block structure and returns |
245 |
*** a handle to the new copy. Most commonly used. |
246 |
*** |
247 |
*** mtx_copy_region(matrix,region): |
248 |
*** Copies the matrix excluding the block structure and any |
249 |
*** incidence outside the region given. |
250 |
*** |
251 |
*** mtx_copy_region_drop(matrix,region,drop): |
252 |
*** As mtx_copy_region, except that incidence of magnitude < abs(drop) is |
253 |
*** not copied. Note that in C 0.0 !< 0.0. Use a really small number if |
254 |
*** you want 0s suppressed in the copy. |
255 |
*** |
256 |
*** mtx_copy_wo_incidence(mtx): |
257 |
*** Copies the matrix except for the nonzero and block structure |
258 |
*** and returns a handle to the new copy. If you find yourself |
259 |
*** doing mtx_copy followed by mtx_clear, use this instead. |
260 |
*** |
261 |
*** |
262 |
-$- Does nothing if matrix fails mtx_check_matrix; returns NULL. |
263 |
*** |
264 |
mmm macro extern mtx_matrix_t mtx_copy(m) |
265 |
mmm macro extern mtx_matrix_t mtx_copy_region(m,r) |
266 |
mmm macro extern mtx_matrix_t mtx_copy_region_drop(m,r,d) |
267 |
mmm macro extern mtx_matrix_t mtx_copy_wo_incidence(m) |
268 |
mmm macro extern mtx_matrix_t mtx_copy_complete(m) |
269 |
**/ |
270 |
#define mtx_copy(m) mtx_copy_options((m),FALSE,TRUE,mtx_ENTIRE_MATRIX,0.0) |
271 |
#define mtx_copy_region(m,r) mtx_copy_options((m),FALSE,TRUE,(r),0.0) |
272 |
#define mtx_copy_region_drop(m,r,d) mtx_copy_options((m),FALSE,TRUE,(r),(d)) |
273 |
#define mtx_copy_wo_incidence(m) mtx_copy_options((m),FALSE,FALSE,NULL,0.0) |
274 |
#define mtx_copy_complete(m) mtx_copy_options((m),TRUE,TRUE, \ |
275 |
mtx_ENTIRE_MATRIX,0.0) |
276 |
|
277 |
extern int32 mtx_order(mtx_matrix_t); |
278 |
/** |
279 |
*** order = mtx_order(matrix) |
280 |
*** int32 order; |
281 |
*** mtx_matrix_t matrix; |
282 |
*** |
283 |
*** Returns the order of the matrix, be it master or slave. |
284 |
-$- Order of a corrupt matrix is -1; |
285 |
**/ |
286 |
|
287 |
extern int32 mtx_capacity(mtx_matrix_t); |
288 |
/** |
289 |
*** cap = mtx_capacity(matrix) |
290 |
*** int32 cap; |
291 |
*** mtx_matrix_t matrix; |
292 |
*** |
293 |
*** Returns the capacity of the matrix, be it master or slave. |
294 |
*** Original row/column numbers must lie in the range 0 <= ndx < cap. |
295 |
*** cap may be larger than the matrix order if the order of the matrix |
296 |
*** was ever at time higher than it is now. |
297 |
-$- capacity of a corrupt matrix is -1; |
298 |
**/ |
299 |
|
300 |
extern void mtx_set_order(mtx_matrix_t, int32); |
301 |
/** |
302 |
*** mtx_set_order(matrix,order) |
303 |
*** mtx_matrix_t matrix; |
304 |
*** int32 order; |
305 |
*** |
306 |
*** Changes the order of the matrix to the new order, either truncating |
307 |
*** the matrix, or extending it with blank rows and columns if necessary. |
308 |
*** Does not change the incidence pattern if the matrix is being expanded. |
309 |
*** Calls on slaves are passed up to the master. Calls on a master reset |
310 |
*** the order for all of its slaves. |
311 |
*** |
312 |
*** Bugs: |
313 |
*** This should return a memory allocation status boolean, but doesn't. |
314 |
-$- Does nothing to a bad matrix. |
315 |
**/ |
316 |
|
317 |
extern void mtx_clear_coord(mtx_matrix_t, int32,int32); |
318 |
extern void mtx_clear_row(mtx_matrix_t,int32,mtx_range_t *); |
319 |
extern void mtx_clear_col(mtx_matrix_t,int32,mtx_range_t *); |
320 |
extern void mtx_clear_rowlist(mtx_matrix_t, |
321 |
mtx_sparse_t *,mtx_range_t *); |
322 |
extern void mtx_clear_collist(mtx_matrix_t, |
323 |
mtx_sparse_t *,mtx_range_t *); |
324 |
extern void mtx_clear_region(mtx_matrix_t, mtx_region_t *); |
325 |
extern void mtx_reset_perm(mtx_matrix_t); |
326 |
extern void mtx_clear(mtx_matrix_t); |
327 |
/** |
328 |
-$- mtx_clear_coord(matrix,row,col) |
329 |
-$- mtx_clear_row(matrix,row,rng) |
330 |
-$- mtx_clear_col(matrix,col,rng) |
331 |
-$- mtx_clear_rowlist(matrix,sp,rng) !* not yet implemented *! |
332 |
-$- mtx_clear_collist(matrix,sp,rng) !* not yet implemented *! |
333 |
-$- mtx_clear_region(matrix,region) |
334 |
-$- mtx_reset_perm(matrix) |
335 |
-$- mtx_clear(matrix) |
336 |
*** mtx_matrix_t matrix; |
337 |
*** mtx_sparse_t *sp; |
338 |
*** mtx_region_t *region; |
339 |
*** mtx_range_t *rowrng; |
340 |
*** |
341 |
*** mtx_clear_coord will make sure a specific element doesn't exist. |
342 |
*** mtx_clear_row and mtx_clear_col will erase a range of elements |
343 |
*** in a single row or column. |
344 |
*** mtx_clear_rowlist and mtx_clear_collist will erase a range of elements |
345 |
*** in the list of rows or columns given in the idata of the sparse. |
346 |
*** The data of the sparse need not be valid as it will not be referenced. |
347 |
*** mtx_clear_region erases all elements in the given region. |
348 |
*** mtx_reset_perm restores the original row/column ordering. |
349 |
*** mtx_clear clears everything. |
350 |
-$- Does nothing to a bad matrix. |
351 |
*** |
352 |
*** It is generally most efficient to clear as many incidences in possible |
353 |
*** in a single call. When a set of adjacent rows/columns is to be cleared, |
354 |
*** use mtx_clear_region. If a rather disjoint set is to be cleared (such |
355 |
*** as the nonlinear rows of a mixed linear/nonlinear jacobian) use |
356 |
*** mtx_clear_rowlist/collist. |
357 |
*** |
358 |
*** Note that mtx_clear and mtx_reset_perm invalidate any data saved |
359 |
*** with the mtx_*_block_perm functions. |
360 |
*** Notes on masters/slaves: |
361 |
*** mtx_clear_coord, mtx_clear_row, mtx_clear_col, mtx_clear_rowlist, |
362 |
*** mtx_clear_collist work as usual. |
363 |
*** mtx_clear_region(slave,region) can be used without affecting other |
364 |
*** slaves or the master. |
365 |
*** mtx_clear_region(master,region) affects only the master UNLESS |
366 |
*** region is mtx_ENTIRE_MATRIX. mtx_ENTIRE_MATRIX clears the master |
367 |
*** and ALL incidence in ALL its slaves. |
368 |
*** mtx_reset_perm(master or slave) passes the reset up to the master. |
369 |
*** mtx_clear(master or slave) passes up to the master. |
370 |
*** |
371 |
**/ |
372 |
|
373 |
extern real64 mtx_value(mtx_matrix_t, mtx_coord_t *); |
374 |
extern void mtx_set_value(mtx_matrix_t, mtx_coord_t *, real64); |
375 |
extern void mtx_fill_value(mtx_matrix_t, mtx_coord_t *, real64); |
376 |
extern void mtx_fill_org_value(mtx_matrix_t, const mtx_coord_t *, |
377 |
real64); |
378 |
/** |
379 |
-$- value = mtx_value(matrix,coord) |
380 |
-$- mtx_set_value(matrix,coord,value) |
381 |
-$- mtx_fill_value(matrix,coord,value) |
382 |
-$- mtx_fill_org_value(matrix,orgcoord,value) |
383 |
*** real64 value; |
384 |
*** mtx_matrix_t matrix; |
385 |
*** mtx_coord_t *coord, *orgcoord; |
386 |
*** |
387 |
*** Use of mtx_value, mtx_set_value should be avoided if at all possible |
388 |
*** inside loops. See mtx_next_in_* for doing mtx/vector operations. |
389 |
*** Returns/sets the value of the given element in the matrix. |
390 |
*** Because the only sane usage of mtx_value, mtx_set_value is for |
391 |
*** things like getting and setting something in the diagonal, |
392 |
*** mtx_set_value remembers the last place mtx_value returned |
393 |
*** and checks it before starting a search. Any element destruction |
394 |
*** causes mtx_set_value to forget, however. |
395 |
*** This remembrance is matrix specific and is not affected by |
396 |
*** element destruction in other unrelated matrices. |
397 |
*** |
398 |
*** mtx_fill_value is |
399 |
*** to be used in place of mtx_set_value in those instances where the |
400 |
*** caller knows there currently exists no element at the coordinate |
401 |
*** whose value is to be set. mtx_fill_value can also be used if a |
402 |
*** mtx_assemble call will happen before ANY other numeric or structural |
403 |
*** calls are made. |
404 |
*** |
405 |
*** mtx_fill_org_value is |
406 |
*** just like mtx_fill_value except the location given is understood as |
407 |
*** the (orgrow,orgcol) location to put the fill. |
408 |
*** |
409 |
*** mtx_set_value(mtx,coord,0.0) will create no incidence if none |
410 |
*** is presently there, nor does it delete an incidence. |
411 |
*** |
412 |
*** Doesn't matter whether a matrix is slave or master. |
413 |
*** |
414 |
-$- Returns 0.0/does nothing from/to a bad matrix. |
415 |
**/ |
416 |
/* grandfathering old linsol. remove when linsolqr replaces linsol */ |
417 |
#define mtx_add_value(a,b,c) mtx_fill_value((a),(b),(c)) |
418 |
|
419 |
extern int32 mtx_assemble(mtx_matrix_t); |
420 |
/** |
421 |
*** dinc = mtx_assemble(mtx_matrix_t); |
422 |
*** mtx_matrix_t matrix; |
423 |
*** int32 dinc; |
424 |
*** |
425 |
*** Takes a matrix, assumed to have redundant and otherwise insane incidences |
426 |
*** created by mtx_fill_value and sums all like entries, eliminating |
427 |
*** the duplicates and the zeroes. Returns -# of duplicate elements removed. |
428 |
*** returns 1 if fails for some reason. |
429 |
*** Could stand to have the error messages it emits improved. |
430 |
*** Could stand to take a rowrange or a rowlist, |
431 |
*** a colrange or a collist,droptol. Zeroes are not counted as duplicates. |
432 |
*** algorithm cost: O(3*nnz) |
433 |
**/ |
434 |
|
435 |
/***********************************************************************\ |
436 |
mtx element routines |
437 |
None of these routines care about master/slave status. |
438 |
\***********************************************************************/ |
439 |
extern void mtx_del_zr_in_row(mtx_matrix_t, int32); |
440 |
extern void mtx_del_zr_in_col(mtx_matrix_t, int32); |
441 |
extern void mtx_del_zr_in_rowrange(mtx_matrix_t, mtx_range_t *); |
442 |
extern void mtx_del_zr_in_colrange(mtx_matrix_t, mtx_range_t *); |
443 |
/** |
444 |
-$- mtx_del_zr_in_row(matrix,row) |
445 |
-$- mtx_del_zr_in_col(matrix,col) |
446 |
-$- mtx_del_zr_in_rowrange(matrix,rng) |
447 |
-$- mtx_del_zr_in_colrange(matrix,rng) |
448 |
*** mtx_matrix_t matrix; |
449 |
*** int32 row,col; |
450 |
*** mtx_range_t *rng; |
451 |
*** |
452 |
*** "Non-zeros" in the given row/col/rng of rows/cols which are actually |
453 |
*** zero are effectively removed (i.e. the status of "non-zero" is revoked). |
454 |
-$- Does nothing to a bad matrix. |
455 |
**/ |
456 |
|
457 |
extern void mtx_steal_org_row_vec(mtx_matrix_t, int32, |
458 |
real64 *, mtx_range_t *); |
459 |
extern void mtx_steal_org_col_vec(mtx_matrix_t, int32, |
460 |
real64 *, mtx_range_t *); |
461 |
extern void mtx_steal_cur_row_vec(mtx_matrix_t, int32, |
462 |
real64 *, mtx_range_t *); |
463 |
extern void mtx_steal_cur_col_vec(mtx_matrix_t, int32, |
464 |
real64 *, mtx_range_t *); |
465 |
/** |
466 |
-$- mtx_steal_org_row_vec(mtx,row,vec,colrng) |
467 |
-$- mtx_steal_org_col_vec(mtx,col,vec,rowrng) |
468 |
-$- mtx_steal_cur_row_vec(mtx,row,vec,colrng) |
469 |
-$- mtx_steal_cur_col_vec(mtx,col,vec,rowrng) |
470 |
*** mtx_matrix_t mtx; |
471 |
*** int32 row,col; |
472 |
*** real64 *vec; |
473 |
*** mtx_range_t *colrng,*rowrng; |
474 |
*** |
475 |
*** mtx_steal_org/cur_row_vec: |
476 |
*** The user is expected to supply the vec; we cannot check it. |
477 |
*** Copies the mtx nonzeros currently within colrng INTO array vec which is |
478 |
*** indexed by org/cur column number. Does not affect other |
479 |
*** entries of vec in or outside the range. In particular, vec |
480 |
*** is NOT zeroed within the range unless there is a matrix element |
481 |
*** with value zero at that location. |
482 |
*** All incidence within colrng of the row given is removed from the matrix. |
483 |
*** |
484 |
*** mtx_steal_org/cur_col_vec: |
485 |
*** Switch row <--> col in above. |
486 |
*** |
487 |
*** Notes: It is rather faster to call this with mtx_ALL_COLS/ROWS when |
488 |
*** the row/col of interest is known to have incidence exclusively in |
489 |
*** the range of interest. |
490 |
*** |
491 |
-$- Fetches nothing from a bad matrix. |
492 |
**/ |
493 |
|
494 |
/*****************************************************************\ |
495 |
Sparse vector operations, rather analogous to the mtx_value suite. |
496 |
These are tools for data motion. No arithmetic operators |
497 |
are provided as yet. The deallocation of sparse vectors is |
498 |
the user's job. See the notes at the top of this header for the |
499 |
data semantics of a sparse vector. |
500 |
The user may construct a sparse vector. |
501 |
Calls which return a sparse vector may create or use and return |
502 |
a user supplied sparse vector. These calls are at liberty to |
503 |
reallocate the data memory if that supplied is insufficient to |
504 |
hold the data. The len and cap values of the vector will be reset |
505 |
as appropriate. |
506 |
|
507 |
Functions do not create a sparse unless it says in their header |
508 |
that mtx_CREATE_SPARSE is a valid argument. |
509 |
\*****************************************************************/ |
510 |
|
511 |
extern boolean mtx_steal_org_row_sparse(mtx_matrix_t, int32, |
512 |
mtx_sparse_t *, mtx_range_t *); |
513 |
extern boolean mtx_steal_org_col_sparse(mtx_matrix_t, int32, |
514 |
mtx_sparse_t *, mtx_range_t *); |
515 |
extern boolean mtx_steal_cur_row_sparse(mtx_matrix_t, int32, |
516 |
mtx_sparse_t *, mtx_range_t *); |
517 |
extern boolean mtx_steal_cur_col_sparse(mtx_matrix_t, int32, |
518 |
mtx_sparse_t *, mtx_range_t *); |
519 |
/** |
520 |
-$- err = mtx_steal_org_row_sparse(mtx,row,sparse,colrng) |
521 |
-$- err = mtx_steal_org_col_sparse(mtx,col,sparse,rowrng) |
522 |
-$- err = mtx_steal_cur_row_sparse(mtx,row,sparse,colrng) |
523 |
-$- err = mtx_steal_cur_col_sparse(mtx,col,sparse,rowrng) |
524 |
*** mtx_matrix_t mtx; |
525 |
*** int32 row,col; |
526 |
*** mtx_sparse_t *sparse; |
527 |
*** mtx_range_t *colrng,*rowrng; |
528 |
*** boolean err; |
529 |
*** |
530 |
*** The user must supply the sparse; if it is too small, err will be |
531 |
*** TRUE and data will not be collected and incidence not cleared. |
532 |
*** mtx_CREATE_SPARSE is not a valid argument to this function. |
533 |
*** What is too small? For all flavors the sparse must |
534 |
*** have at least the capacity indicated by the col/rowrng. |
535 |
*** |
536 |
*** mtx_steal_org/cur_row_sparse: |
537 |
*** Copies the mtx nonzeros currently within colrng to the sparse, |
538 |
*** indexing by org/cur column number. Nonzeros with value 0.0 WILL NOT |
539 |
*** be included in the sparse. sparse->len will be set accordingly. |
540 |
*** All incidence within colrng will be deleted from the mtx. |
541 |
*** |
542 |
*** mtx_steal_org/cur_col_sparse: |
543 |
*** Switch row <--> col in above. |
544 |
*** |
545 |
*** Notes: It is rather faster to call this with mtx_ALL_COLS/ROWS when |
546 |
*** the row/col of interest is known to have incidence exclusively in |
547 |
*** the range of interest. |
548 |
*** |
549 |
-$- Fetches nothing from a bad matrix. |
550 |
**/ |
551 |
|
552 |
/** |
553 |
extern wish ! ! ! NOT YET IMPLEMENTED. NO USE FOR THEM SO FAR. |
554 |
extern void ! ! ! mtx_set_org_row_vec(mtx_matrix_t, int32, |
555 |
real64 *, mtx_range_t *,boolean); |
556 |
extern void ! ! ! mtx_set_org_col_vec(mtx_matrix_t, int32, |
557 |
real64 *, mtx_range_t *,boolean); |
558 |
extern void ! ! ! mtx_set_cur_row_vec(mtx_matrix_t, int32, |
559 |
real64 *, mtx_range_t *,boolean); |
560 |
extern void ! ! ! mtx_set_cur_col_vec(mtx_matrix_t, int32, |
561 |
real64 *, mtx_range_t *,boolean); |
562 |
-$- mtx_set_org_row_vec(mtx,row,vec,colrng,destructive) |
563 |
-$- mtx_set_org_col_vec(mtx,col,vec,rowrng,destructive) |
564 |
-$- mtx_set_cur_row_vec(mtx,row,vec,colrng,destructive) |
565 |
-$- mtx_set_cur_col_vec(mtx,col,vec,rowrng,destructive) |
566 |
*** mtx_matrix_t mtx; |
567 |
*** int32 row,col; |
568 |
*** real64 *vec; |
569 |
*** mtx_range_t *colrng,*rowrng; |
570 |
*** boolean destructive; |
571 |
*** |
572 |
*** mtx_set_org/cur_row_vec: |
573 |
*** Copies the nonzeros currently within colrng FROM array vec which is |
574 |
*** indexed by org/cur column number INTO the matrix. |
575 |
*** If destructive is TRUE, existing nonzeros which are given value 0.0 |
576 |
*** |
577 |
*** mtx_set_org/cur_col_vec: |
578 |
*** Switch row <--> col in above. |
579 |
*** |
580 |
*** Notes: It is faster to call this with a narrow range than with |
581 |
*** mtx_ALL_COLS/ROWS. For ranges with very low sparsity, it may |
582 |
*** be faster to call mtx_clear_row/col followed by mtx_fill_*_*_vec, |
583 |
*** depending on the time cost of allocating and deallocating small |
584 |
*** pieces of memory on the machine in question. |
585 |
*** |
586 |
! ! Warning: If destructive is TRUE, care should be taken when using |
587 |
! ! these operators within a mtx_next_in_* loop that the current element |
588 |
! ! of the loop is not zeroed by the vec causing it to be deallocated. |
589 |
*** |
590 |
-$- Puts nothing to a bad matrix. |
591 |
**/ |
592 |
|
593 |
extern void mtx_fill_org_row_vec(mtx_matrix_t, int32, |
594 |
real64 *, mtx_range_t *); |
595 |
extern void mtx_fill_org_col_vec(mtx_matrix_t, int32, |
596 |
real64 *, mtx_range_t *); |
597 |
extern void mtx_fill_cur_row_vec(mtx_matrix_t, int32, |
598 |
real64 *, mtx_range_t *); |
599 |
extern void mtx_fill_cur_col_vec(mtx_matrix_t, int32, |
600 |
real64 *, mtx_range_t *); |
601 |
extern void mtx_dropfill_cur_row_vec(mtx_matrix_t, int32, |
602 |
real64 *, mtx_range_t *, |
603 |
real64); |
604 |
extern void mtx_dropfill_cur_col_vec(mtx_matrix_t, int32, |
605 |
real64 *, mtx_range_t *, |
606 |
real64); |
607 |
/** |
608 |
-$- mtx_fill_org_row_vec(mtx,row,vec,colrng) |
609 |
-$- mtx_fill_org_col_vec(mtx,col,vec,rowrng) |
610 |
-$- mtx_fill_cur_row_vec(mtx,row,vec,colrng) |
611 |
-$- mtx_fill_cur_col_vec(mtx,col,vec,rowrng) |
612 |
-$- mtx_dropfill_cur_row_vec(mtx,row,vec,colrng,tolerance) |
613 |
-$- mtx_dropfill_cur_col_vec(mtx,col,vec,rowrng,tolerance) |
614 |
*** mtx_matrix_t mtx; |
615 |
*** int32 row, col; |
616 |
*** real64 tolerance, *vec; |
617 |
*** mtx_range_t *colrng, *rowrng; |
618 |
*** |
619 |
*** mtx_fill_org/cur_row_vec: |
620 |
*** Assumes that the colrng of row in the matrix is empty and |
621 |
*** copies the nonzeros currently within colrng FROM array vec which is |
622 |
*** indexed by org/cur column number INTO the matrix. |
623 |
*** |
624 |
*** mtx_fill_org/cur_col_vec: |
625 |
*** Switch row <--> col in above. |
626 |
*** |
627 |
*** mtx_dropfill_cur_row/col_vec: |
628 |
*** Assumes that the colrng of row in the matrix is empty and |
629 |
*** copies the values such that abs(value) >tolerance currently |
630 |
*** within colrng FROM array vec which is |
631 |
*** indexed by cur column/row number INTO the matrix. |
632 |
*** |
633 |
*** Notes: It is faster to call these with a narrow range than with |
634 |
*** mtx_ALL_COLS/ROWS. It is very marginally faster to call the cur |
635 |
*** rather than the org flavor of these functions. |
636 |
! ! If you use this when the range is NOT empty, you will sooner or |
637 |
! ! later certainly lose numerical integrity and you may lose memory |
638 |
! ! integrity. The sparse matrix programmer cannot afford to be naive. |
639 |
*** |
640 |
-$- Puts nothing to a bad matrix. |
641 |
**/ |
642 |
|
643 |
extern void mtx_fill_org_row_sparse(mtx_matrix_t, int32, |
644 |
const mtx_sparse_t *); |
645 |
extern void mtx_fill_org_col_sparse(mtx_matrix_t, int32, |
646 |
const mtx_sparse_t *); |
647 |
extern void mtx_fill_cur_row_sparse(mtx_matrix_t, int32, |
648 |
const mtx_sparse_t *); |
649 |
extern void mtx_fill_cur_col_sparse(mtx_matrix_t, int32, |
650 |
const mtx_sparse_t *); |
651 |
/** |
652 |
-$- mtx_fill_org_row_sparse(mtx,row,sp) |
653 |
-$- mtx_fill_org_col_sparse(mtx,col,sp) |
654 |
-$- mtx_fill_cur_row_sparse(mtx,row,sp) |
655 |
-$- mtx_fill_cur_col_sparse(mtx,col,sp) |
656 |
*** mtx_matrix_t mtx; |
657 |
*** int32 row, col; |
658 |
*** mtx_sparse_t *sp; |
659 |
*** |
660 |
*** mtx_fill_org_row_sparse: |
661 |
*** Fills the current row given with the data in the sparse given. |
662 |
*** The indices in sp->idata are taken as org col indices. |
663 |
*** It is assumed that the row is empty, at least where data is |
664 |
*** being added. |
665 |
*** mtx_fill_org_col_sparse: |
666 |
*** Swap row/col in the previous description. |
667 |
*** |
668 |
*** mtx_fill_cur_row_sparse: |
669 |
*** Fills the current row given with the data in the sparse given. |
670 |
*** The indices in sp->idata are taken as cur col indices. |
671 |
*** It is assumed that the row is empty, at least where data is |
672 |
*** being added. |
673 |
*** mtx_fill_cur_col_sparse: |
674 |
*** Swap row/col in the previous description. |
675 |
*** |
676 |
*** All these functions ignore 0.0 in the data and do not create numbers |
677 |
*** there in the mtx. |
678 |
**/ |
679 |
|
680 |
|
681 |
extern void mtx_mult_row(mtx_matrix_t,int32, |
682 |
real64,mtx_range_t *); |
683 |
extern void mtx_mult_col(mtx_matrix_t,int32, |
684 |
real64,mtx_range_t *); |
685 |
extern void mtx_mult_row_zero(mtx_matrix_t,int32,mtx_range_t *); |
686 |
extern void mtx_mult_col_zero(mtx_matrix_t,int32,mtx_range_t *); |
687 |
/** |
688 |
-$- mtx_mult_row(matrix,row,factor,colrng) |
689 |
-$- mtx_mult_col(matrix,col,factor,rowrng) |
690 |
-$- mtx_mult_row_zero(matrix,row,colrng) |
691 |
-$- mtx_mult_col_zero(matrix,col,rowrng) |
692 |
*** mtx_matrix_t matrix; |
693 |
*** int32 row,col; |
694 |
*** real64 factor; |
695 |
*** mtx_range_t *colrng,*rowrng; |
696 |
*** |
697 |
*** Multiplies the given row/column by a given factor. Only those |
698 |
*** elements with column/row index within the given range are multiplied. |
699 |
*** mtx_mult_row/col tests for factor=0.0 and blows away the row if true. |
700 |
*** mtx_mult_row/col_zero zeros without disturbing structure. |
701 |
-$- Does nothing on a bad matrix. |
702 |
**/ |
703 |
|
704 |
extern void mtx_add_row(mtx_matrix_t,int32,int32, |
705 |
real64,mtx_range_t *); |
706 |
extern void mtx_add_col(mtx_matrix_t,int32,int32, |
707 |
real64,mtx_range_t *); |
708 |
/** |
709 |
-$- mtx_add_row(matrix,srow,trow,factor,colrng) |
710 |
-$- mtx_add_col(matrix,scol,tcol,factor,rowrng) |
711 |
*** mtx_matrix_t matrix; |
712 |
*** int32 srow,trow,scol,tcol; |
713 |
*** real64 factor; |
714 |
*** mtx_range_t *colrng,*rowrng; |
715 |
*** |
716 |
*** Adds a given multiple of row srow/column scol to row trow/column tcol. |
717 |
*** Only those elements with column/row index within the given range are |
718 |
*** so affected. |
719 |
-$- Does nothing on a bad matrix. |
720 |
**/ |
721 |
|
722 |
extern void mtx_add_row_series(int32, real64, |
723 |
mtx_range_t *); |
724 |
extern void mtx_add_col_series(int32, real64, |
725 |
mtx_range_t *); |
726 |
extern void mtx_add_row_series_init(mtx_matrix_t, int32, |
727 |
boolean); |
728 |
extern void mtx_add_col_series_init(mtx_matrix_t, int32, |
729 |
boolean); |
730 |
/** |
731 |
-$- mtx_add_row_series(srow,factor,colrng) |
732 |
-$- mtx_add_col_series(scol,factor,rowrng) |
733 |
-$- mtx_add_row_series_init(matrix,trow,userow) |
734 |
-$- mtx_add_col_series_init(matrix,tcol,usecol) |
735 |
*** mtx_matrix_t matrix; |
736 |
*** int32 srow,trow,scol,tcol; |
737 |
*** real64 factor; |
738 |
*** mtx_range_t *colrng,*rowrng; |
739 |
*** boolean userow,usecol; |
740 |
*** |
741 |
*** mtx_add_row/col_series_init(mtx, t>=0, bool): |
742 |
*** Grab a row/column t of the matrix to have several rows/columns |
743 |
*** from the same matrix added to it. bool is ignored for t>=0. |
744 |
*** Only one row/col at a time may be grabbed for all existing |
745 |
*** matrices. (One row and one col may be grabbed contemporaneously.) |
746 |
*** You must release the row/col (see below) before you can grab another. |
747 |
*** |
748 |
*** mtx_add_row/col_series_init(mtx, mtx_NONE, userow/col): |
749 |
*** Release the last row/column which was grabbed for multiple adds. |
750 |
*** If userow/col is TRUE, the current contents of row/col t |
751 |
*** (from the previous call) will be used to release the row/col. |
752 |
*** If userow/col is FALSE, a pessimistic release method will be |
753 |
*** used instead of the row/col previously specified. |
754 |
! ! If ANY destructive operations have been done on the row/col, call |
755 |
! ! this with userow/col==FALSE. |
756 |
! ! The mtx given to a release call must match that given in the grabbing |
757 |
! ! or the release is ignored. |
758 |
*** For very dense rows/cols, it may be faster to call with userow/col |
759 |
*** == FALSE since the release with TRUE requires a row/col traversal. |
760 |
*** |
761 |
*** mtx_add_row/col_series(s,factor,rowrng) |
762 |
*** Adds the given multiple of row/column s to trow/tcol. |
763 |
*** Only those elements of s with column/row index within the |
764 |
*** given range are added to trow/tcol. |
765 |
*** When possible, range mtx_ALL_COLS/ROWS is faster for sparse rows. |
766 |
! ! Calling these without a prior call to series_init is an error. |
767 |
*** |
768 |
! ! Warning: You MUST release any grabbed row/col created on a matrix |
769 |
! ! before destroying that matrix. Failure to do so is fatal. |
770 |
*** |
771 |
-$- Does nothing on a bad matrix. |
772 |
**/ |
773 |
|
774 |
/** |
775 |
extern wish ! ! ! NOT YET IMPLEMENTED. NO USE FOR THEM SO FAR. |
776 |
extern void ! ! ! mtx_add_row_org_vec(mtx_matrix_t,int32, |
777 |
real64 *,real64, |
778 |
mtx_range_t *, boolean); |
779 |
extern void ! ! ! mtx_add_col_org_vec(mtx_matrix_t,int32, |
780 |
real64 *,real64, |
781 |
mtx_range_t *, boolean); |
782 |
extern void ! ! ! mtx_add_row_cur_vec(mtx_matrix_t,int32, |
783 |
real64 *,real64, |
784 |
mtx_range_t *, boolean); |
785 |
extern void ! ! ! mtx_add_col_cur_vec(mtx_matrix_t,int32, |
786 |
real64 *,real64, |
787 |
mtx_range_t *, boolean); |
788 |
-$- mtx_add_row_org_vec(matrix,trow,svec,factor,colrng,transpose) |
789 |
-$- mtx_add_col_org_vec(matrix,tcol,svec,factor,rowrng,transpose) |
790 |
-$- mtx_add_row_cur_vec(matrix,trow,svec,factor,colrng,transpose) |
791 |
-$- mtx_add_col_cur_vec(matrix,tcol,svec,factor,rowrng,transpose) |
792 |
*** mtx_matrix_t matrix; |
793 |
*** int32 trow,tcol; |
794 |
*** real64 *svec; |
795 |
*** real64 factor; |
796 |
*** mtx_range_t *colrng,*rowrng; |
797 |
*** boolean transpose; |
798 |
*** |
799 |
*** mtx_add_row_org/cur_vec: |
800 |
*** In matrix trow[col] += factor*svec[col] for elements of svec in colrng. |
801 |
*** svec is org/cur indexed, by row if !transpose and by column if transpose. |
802 |
*** |
803 |
*** mtx_add_col_org/cur_vec: |
804 |
*** Reverse row <-->col in above |
805 |
*** |
806 |
*** This is an absurdly expensive thing to do. Think very carefully about |
807 |
*** whether you are ignoring existing sparsity info before asking that this |
808 |
*** mtx primitive be coded. |
809 |
-$- Does nothing on a bad matrix. |
810 |
**/ |
811 |
|
812 |
|
813 |
extern void mtx_old_add_row_sparse(mtx_matrix_t, int32, real64 *, |
814 |
real64, mtx_range_t *, int32 *); |
815 |
extern void mtx_old_add_col_sparse(mtx_matrix_t, int32, real64 *, |
816 |
real64, mtx_range_t *, int32 *); |
817 |
/** |
818 |
*** Note: the interface of this operator needs some work! |
819 |
*** In particular, it should be redone with consideration given |
820 |
*** to the mtx_sparse_t recently added. |
821 |
*** |
822 |
-$- mtx_old_add_row_sparse(matrix,row,drow,factor,colrng,collist) |
823 |
-$- mtx_old_add_col_sparse(matrix,col,dcol,factor,rowrng,rowlist) |
824 |
*** mtx_matrix_t matrix; |
825 |
*** int32 row,col,*collist,*rowlist; |
826 |
*** real64 *drow, *dcol; |
827 |
*** real64 factor; |
828 |
*** mtx_range_t *colrng,*rowrng; |
829 |
*** |
830 |
*** Dense in these function names refers to the row/col data type, |
831 |
*** a vector as opposed to an mtx row/col, not the information density. |
832 |
*** Adds factor*(drow/dcol)[j] to row/col. |
833 |
*** Drow/dcol are assumed to be the size of the matrix and in current |
834 |
*** row/col order, not original order. |
835 |
*** |
836 |
*** If colrng/rowrng is mtx_ALL_COLS/ROWS, then the following is TRUE: |
837 |
*** If collist/rowlist is NOT NULL, it will be used instead of the rng |
838 |
*** to determine which columns/rows j of drow/dcol are added to row/col. |
839 |
*** Collist/rowlist must be an array terminated by -1. The sanity of |
840 |
*** current indices in collist/rowlist is the users responsibility. |
841 |
*** In particular, in list mode the value of drow/dcol is added blindly |
842 |
*** rather than being checked for 0.0 and the range of indices is not |
843 |
*** checked against the matrix size. |
844 |
*** If collist/rowlist IS NULL the range will be mtx_ALL_COLS/ROWS. |
845 |
*** If a range is specified, collist/rowlist is ignored and only those |
846 |
*** elements with column/row index j within the given range are affected. |
847 |
*** |
848 |
*** List mode is faster if you are adding the same dense vector |
849 |
*** (differing only by factor) across many (>2) rows/columns. |
850 |
*** Range mode is more convenient. The caller should switch between the |
851 |
*** two as appropriate. |
852 |
-$- Does nothing on a bad matrix. |
853 |
**/ |
854 |
|
855 |
extern void mtx_add_row_sparse(mtx_matrix_t, int32, real64, mtx_sparse_t *); |
856 |
extern void mtx_add_col_sparse(mtx_matrix_t, int32, real64, mtx_sparse_t *); |
857 |
/** |
858 |
-$- mtx_add_row_sparse(matrix,row,factor,sparse) |
859 |
-$- mtx_add_col_sparse(matrix,col,factor,sparse) |
860 |
*** mtx_matrix_t matrix; |
861 |
*** int32 row,col; |
862 |
*** real64 factor; |
863 |
*** mtx_sparse_t *sparse; expected to be in org coords. |
864 |
*** NOT IMPLEMENTED. |
865 |
**/ |
866 |
|
867 |
extern size_t mtx_size(mtx_matrix_t); |
868 |
extern size_t mtx_chattel_size(mtx_matrix_t); |
869 |
/** |
870 |
*** size=mtx_size(mtx) |
871 |
*** size=mtx_chattel_size(mtx) |
872 |
*** size_t size; |
873 |
*** mtx_matrix_t mtx; |
874 |
*** |
875 |
*** mtx_size(mtx); |
876 |
*** Returns the amount of memory in use by a matrix and all its |
877 |
*** bits and pieces. |
878 |
*** Slaves report only their incremental cost, that is they do |
879 |
*** not report the cost of the structures they share with a master. |
880 |
*** Masters do report the cost of their slaves. |
881 |
*** |
882 |
*** mtx_chattel_size(master); |
883 |
*** Returns the memory used by all slaves of the matrix given |
884 |
*** that is not shared with the master. Returns 0 from a slave. |
885 |
**/ |
886 |
|
887 |
extern void mtx_free_reused_mem(); |
888 |
/** |
889 |
*** mtx_free_reused_mem() |
890 |
*** |
891 |
*** Deallocates any memory that mtx may be squirrelling away for |
892 |
*** internal reuse. Calling this while any slv_system_t exists |
893 |
*** is likely to be fatal: handle with care. |
894 |
**/ |
895 |
|
896 |
/***********************************************************************\ |
897 |
mtx io routines |
898 |
\***********************************************************************/ |
899 |
extern void mtx_write_sparse(FILE *,mtx_sparse_t *); |
900 |
/** |
901 |
*** mtx_write_sparse(file,sparse); |
902 |
*** Outputs sizes and data of sparse vector to the file given. |
903 |
**/ |
904 |
|
905 |
extern void mtx_write_region_human_f(FILE *,mtx_matrix_t, |
906 |
mtx_region_t *,int,int); |
907 |
/** |
908 |
*** MACROS: |
909 |
*** mtx_write_region_human(file,mtx,region); |
910 |
*** Grandfather support for the old usages. |
911 |
*** mtx_write_region_human_rows(file,mtx,region); |
912 |
*** Writes row oriented human readable output of a mtx region. |
913 |
*** mtx_write_region_human_cols(file,mtx,region); |
914 |
*** Writes column oriented human readable output of a mtx region. |
915 |
*** |
916 |
*** Internal: |
917 |
*** mtx_write_region_human_f(file,mtx,region,colwise,orgwise); |
918 |
*** Outputs permutation and values of the nonzero elements in the |
919 |
*** given region of the mtx to the file given. |
920 |
*** If colwise != 0, output will be column grouped, |
921 |
*** otherwise it will be row grouped. |
922 |
*** If orgwise != 0, only org indices will be printed. |
923 |
*** Doesn't care about master/slave status. |
924 |
*** |
925 |
mmm macro extern void mtx_write_region_human(file,mtx,region) |
926 |
mmm macro extern void mtx_write_region_human_rows(file,mtx,region) |
927 |
mmm macro extern void mtx_write_region_human_cols(file,mtx,region) |
928 |
mmm macro extern void mtx_write_region_human_orgrows(file,mtx,region) |
929 |
mmm macro extern void mtx_write_region_human_orgcols(file,mtx,region) |
930 |
**/ |
931 |
#define mtx_write_region_human(f,m,r) mtx_write_region_human_f((f),(m),(r),0,0) |
932 |
#define mtx_write_region_human_rows(f,m,r) \ |
933 |
mtx_write_region_human_f((f),(m),(r),0,0) |
934 |
#define mtx_write_region_human_cols(f,m,r) \ |
935 |
mtx_write_region_human_f((f),(m),(r),1,0) |
936 |
#define mtx_write_region_human_orgrows(f,m,r) \ |
937 |
mtx_write_region_human_f((f),(m),(r),0,1) |
938 |
#define mtx_write_region_human_orgcols(f,m,r) \ |
939 |
mtx_write_region_human_f((f),(m),(r),1,1) |
940 |
|
941 |
extern void mtx_write_region(FILE *,mtx_matrix_t,mtx_region_t *); |
942 |
/** |
943 |
*** mtx_write_region(file,mtx,region) |
944 |
*** Outputs permutation and values of the nonzero elements in the |
945 |
*** given region of the mtx to the file given along with the matrix |
946 |
*** size. |
947 |
*** The coordinates of the nonzeros written will be in original |
948 |
*** (unpermuted) indexing. This file is for mtx_read_region, but is |
949 |
*** in ASCII for portability. |
950 |
*** Doesn't care about master/slave status. |
951 |
*** Not intended for human consumptions, but just so you know |
952 |
*** permutations are written in order r2org org2r c2org org2c. |
953 |
*** parity is not written. |
954 |
*** If the region given is mtx_ENTIRE_MATRIX, and there is a |
955 |
*** block structure present in the matrix, it will be written as well |
956 |
*** and the symbolic rank will go out with the block structure. |
957 |
**/ |
958 |
|
959 |
extern mtx_matrix_t mtx_read_region(FILE *,mtx_matrix_t,int); |
960 |
/** |
961 |
*** mtx_read_region(file,mtx,transposed) |
962 |
*** From the file pointer, does the following: |
963 |
*** Gets the matrix order of the data in the file. |
964 |
*** If mtx is NULL, creates it. |
965 |
*** Expands the matrix given or created to the data order if it is too small. |
966 |
*** Permutes the matrix to the permutation found in the file. |
967 |
*** Reads the region limits in the file and clears that region in the mtx. |
968 |
*** Reads coefficients from the file. |
969 |
*** |
970 |
*** transposed governs if the matrix data should be treated as |
971 |
*** row col or col row |
972 |
! ! Warnings: Do not add incidence data to the |
973 |
! ! file except in those org_rows and org_cols which are within the |
974 |
! ! region specified (note that the region is given in CUR coordinates.) |
975 |
! ! Adding incidence outside the region may lead to loss of internal |
976 |
! ! consistency unless the entire mtx given is empty when this is called. |
977 |
! ! If you send a matrix larger than the order of data in the file, |
978 |
! ! you should do a reset to it first to undo the previous permutation. |
979 |
*** |
980 |
*** Return value is the pointer to the mtx given or created. |
981 |
*** If error in reading matrix file, mtx returned may be incomplete or NULL |
982 |
*** if the mtx given was NULL. |
983 |
*** If the file read has block information in it and further the user sent |
984 |
*** in NULL to this function, block info will be read and added to the |
985 |
*** returned matrix. |
986 |
*** |
987 |
*** Doesn't care about master/slave status. |
988 |
**/ |
989 |
|
990 |
extern void mtx_write_region_matlab(FILE *,mtx_matrix_t,mtx_region_t *); |
991 |
/** |
992 |
*** mtx_write_region_matlab(file,mtx,region) |
993 |
*** Outputs values of the nonzero elements in the |
994 |
*** given region of the mtx to the file in matlab/harwell sparse format. |
995 |
*** The "a=[\n" and trailing "];\n" are not supplied, since the caller |
996 |
*** knows better what they should look like. |
997 |
*** Row/column coordinates printed are the cur coordinates. |
998 |
**/ |
999 |
|
1000 |
extern void mtx_write_region_plot(FILE *,mtx_matrix_t,mtx_region_t *); |
1001 |
/** |
1002 |
*** mtx_write_region_plot(file,mtx,region) |
1003 |
*** Outputs the coordinates of elements to file with format suitable |
1004 |
*** for xgraph consumption from the given region of the mtx. |
1005 |
*** A suitable xgraph invocation would be "xgraph -nl -m filename". |
1006 |
*** Doesn't care about master/slave status. |
1007 |
**/ |
1008 |
|
1009 |
extern void mtx_write_region_csr(FILE *,mtx_matrix_t, |
1010 |
mtx_region_t *, int); |
1011 |
extern void mtx_write_region_smms(FILE *,mtx_matrix_t, |
1012 |
mtx_region_t *, int); |
1013 |
extern mtx_matrix_t mtx_read_smms(FILE *,mtx_matrix_t,int); |
1014 |
|
1015 |
/** |
1016 |
*** mtx_write_region_csr(file,mtx,region,offset) |
1017 |
*** mtx_write_region_smms(file,mtx,region,offset) |
1018 |
*** mtx = mtx_read_smms(file,mtx,transpose); |
1019 |
*** |
1020 |
*** Writes the given region of the matrix to the named file. |
1021 |
*** Will write the entire matrix if the region is mtx_ENTIRE_MATRIX. |
1022 |
*** The _csr version writes out the matrix in compressed row format. |
1023 |
*** The _smms version writes out the matrix in a form digestible by |
1024 |
*** Alvarado's Sparse Matrix Manipulation System. |
1025 |
*** There may be a _ccs version (column based) one day. |
1026 |
*** offset controls whether fortran (1) or c style indexing is done. |
1027 |
*** |
1028 |
*** mtx_read_smms reads a matrix in smms format. If a NULL matrix |
1029 |
*** is sent in, it will create and return it. If a non NULL matrix |
1030 |
*** is given, the order will be increased if necessary. The contents |
1031 |
*** of the old mtx, will be blown away. The transpose flag dictates |
1032 |
*** whether the transpose should be read in. |
1033 |
*** Doesn't care about master/slave status. |
1034 |
**/ |
1035 |
|
1036 |
extern void mtx_exception_recover(); |
1037 |
/** |
1038 |
*** mtx_exception_recover(); |
1039 |
*** You don't need to know what this does, except that you should call |
1040 |
*** it any time it is probable that a floating point exception has |
1041 |
*** occurred during matrix manipulations. Several functions use |
1042 |
*** data structures that can cause insanity in the event of exception. |
1043 |
*** |
1044 |
*** Just for the curious, it resets several internal data structures |
1045 |
*** needed including ones used in the operators: |
1046 |
*** mtx_add_row/col |
1047 |
*** mtx_add_row/col_series |
1048 |
*** mtx_add_row/col_series_init |
1049 |
*** mtx_assemble |
1050 |
*** mtx_add_outer_product |
1051 |
**/ |
1052 |
|
1053 |
extern void mtx__debug_output(FILE *,mtx_matrix_t); |
1054 |
/** |
1055 |
*** mtx_debug_output(fp,matrix) |
1056 |
*** FILE *fp; |
1057 |
*** mtx_matrix_t matrix; |
1058 |
*** |
1059 |
*** Debug outputs all internal information about a matrix to file. |
1060 |
*** In the process, integrity checks are performed. |
1061 |
*** If file is NULL, output goes to default (which is stderr.) |
1062 |
*** Doesn't care about master/slave status. |
1063 |
**/ |
1064 |
|
1065 |
#endif /* __MTX_BASIC_H_SEEN__ */ |