1 |
#ifdef __MTX_C_SEEN__ |
2 |
/* |
3 |
* mtx2: Ascend Sparse Matrix Package |
4 |
* by Benjamin Andrew Allan |
5 |
* Derived from mtx by Karl Michael Westerberg |
6 |
* Created: 5/3/90 |
7 |
* Version: $Revision: 1.9 $ |
8 |
* Version control file: $RCSfile: mtx_use_only.h,v $ |
9 |
* Date last modified: $Date: 2000/01/25 02:27:13 $ |
10 |
* Last modified by: $Author: ballan $ |
11 |
* |
12 |
* This file is part of the SLV solver. |
13 |
* |
14 |
* Copyright (C) 1996 Benjamin Andrew Allan |
15 |
* based (loosely) on mtx |
16 |
* Copyright (C) 1990 Karl Michael Westerberg |
17 |
* Copyright (C) 1993 Joseph Zaher |
18 |
* Copyright (C) 1994 Joseph Zaher, Benjamin Andrew Allan |
19 |
* Copyright (C) 1995 Kirk Andre Abbott, Benjamin Andrew Allan |
20 |
* |
21 |
* The SLV solver is free software; you can redistribute |
22 |
* it and/or modify it under the terms of the GNU General Public License as |
23 |
* published by the Free Software Foundation; either version 2 of the |
24 |
* License, or (at your option) any later version. |
25 |
* |
26 |
* The SLV solver is distributed in hope that it will be |
27 |
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of |
28 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
29 |
* General Public License for more details. |
30 |
* |
31 |
* You should have received a copy of the GNU General Public License along with |
32 |
* the program; if not, write to the Free Software Foundation, Inc., 675 |
33 |
* Mass Ave, Cambridge, MA 02139 USA. Check the file named COPYING. |
34 |
* COPYING is found in ../compiler. |
35 |
*/ |
36 |
/*********************************************************************\ |
37 |
This file defines the private parts of an mtx and is only for mtx*.c |
38 |
consumption. Any temptation to include this header in a linear or |
39 |
nonlinear solver package is a symptom of extremely bad programming |
40 |
and lack of proper task analysis. This header should be regarded as |
41 |
highly unstable. We make ABSOLUTELY NO commitment to maintain |
42 |
consistency between any two versions of this file. |
43 |
|
44 |
Note to third parties: |
45 |
mtx is PRODUCTION code in very long use at Carnegie Mellon University. |
46 |
As such, we maintain a very tight hold of the internals of our data |
47 |
structure so that we can easily prove the code when apparent bugs arise. |
48 |
99.44% of "bugs" experienced in using mtx are the result of not |
49 |
reading the public headers carefully. |
50 |
|
51 |
The material in this file was never a part of any header until the |
52 |
old mtx.c file got so big that we had to split it up to make it |
53 |
manageable. |
54 |
|
55 |
Note to future developers of the mtx module. If you change ANYTHING |
56 |
in this file it is YOUR job to: |
57 |
a) clear that change with all the other developers using this header |
58 |
b) fix ALL the other mtx*.c files that depend on it. |
59 |
If you are not willing to do that much work, why the hell are you |
60 |
dabbling in sparse matrix math? go work on GUIs. |
61 |
\*********************************************************************/ |
62 |
#ifndef __MTX_INTERNAL_USE_ONLY_H__ |
63 |
#define __MTX_INTERNAL_USE_ONLY_H__ |
64 |
/* requires #include <stdio.h> */ |
65 |
/* requires #include "mem.h" */ |
66 |
/* requires #include "mtx.h" */ |
67 |
|
68 |
#ifndef FALSE |
69 |
/* these should have come in from base.h. */ |
70 |
#define FALSE 0 |
71 |
#define TRUE 1 |
72 |
#endif |
73 |
|
74 |
#define MTX_DEBUG FALSE |
75 |
/* MTX_DEBUG is a no holds barred sanity checking flag for use when |
76 |
* nothing else is giving a clue why something is going wrong. It |
77 |
* slows down the code to a crawl. Do not under any conditions change |
78 |
* its value or undefine it except at this location. If you need some |
79 |
* other sort of debugging flag for debugging a particular function, |
80 |
* use some personal debugging flag. |
81 |
*/ |
82 |
|
83 |
#define EVEN FALSE |
84 |
#define ODD TRUE |
85 |
#define SWAPS_PRESERVE_ORDER TRUE |
86 |
/** |
87 |
*** Do row and column swaps preserve the ordering of non-zeros in rows |
88 |
*** and columns? Setting this to TRUE means swapping only entails the |
89 |
*** movement of integer row or column numbers and NOT the exchange of |
90 |
*** entire row or columns. |
91 |
**/ |
92 |
#define WIDTHMAGIC 2048 |
93 |
#define LENMAGIC 10 |
94 |
/** |
95 |
*** WIDTHMAGIC is the byte size to aim for in allocating groups of elements. |
96 |
*** LENMAGIC initial # of groups of elements, hence the smallest |
97 |
*** possible number of elements a matrix will ever have is LENM*WIDTHM/eltsize. |
98 |
**/ |
99 |
|
100 |
extern FILE *g_mtxerr; |
101 |
/** |
102 |
*** Global file pointer to which errors are reported. Should never be |
103 |
*** NULL. Also useful when running ascend in gdb and you can't find |
104 |
*** any other file pointer to use. |
105 |
**/ |
106 |
|
107 |
struct next_element_t { |
108 |
struct element_t *row; |
109 |
struct element_t *col; |
110 |
}; |
111 |
/* just a struct to make resulting code more readable. */ |
112 |
|
113 |
struct element_t { |
114 |
real64 value; |
115 |
int32 row; |
116 |
int32 col; |
117 |
struct next_element_t next; |
118 |
}; |
119 |
/** |
120 |
*** This is the basic jacobian element of an mtx. |
121 |
*** It's size is 24 bytes on 4 byte pointer machines and |
122 |
*** 32 bytes on 8 byte pointer machines. |
123 |
*** The elements form a bidirectional singly linked list. |
124 |
*** The row and col indices in an element refer back to |
125 |
*** the header positions of the two lists that element is in. |
126 |
*** That is, each element knows its orgrow and orgcol. |
127 |
**/ |
128 |
|
129 |
struct nz_headers_t { |
130 |
struct element_t **row; |
131 |
struct element_t **col; |
132 |
}; |
133 |
/** |
134 |
*** Each matrix is really just a pair of arrays of pointers to |
135 |
*** elements. The index of a row or column in THESE arrays is |
136 |
*** what is referred to as an org index. A value of NULL in |
137 |
*** either array means that that row (or col) is empty. |
138 |
*** |
139 |
*** When we insert elements in the matrix, we simply shove the |
140 |
*** element in at the head of the its row/column lists. |
141 |
*** When we delete an element in the matrix, we search in one |
142 |
*** direction and unlink the element, marking it "dead". Then a |
143 |
*** general pass in the other direction unlinks all the "dead" |
144 |
*** elements. |
145 |
*** |
146 |
*** Special note: The -1th element of nz_header arrays is NOT allocated. |
147 |
**/ |
148 |
|
149 |
struct permutation_t { |
150 |
int32 *org_to_cur; /* org_to_cur[-1] = -1 */ |
151 |
int32 *cur_to_org; /* cur_to_org[-1] = -1 */ |
152 |
boolean parity; |
153 |
}; |
154 |
/** |
155 |
*** We maintain, rather than rederiving, the information required to |
156 |
*** answer all possible permutation questions. |
157 |
*** This is a policy decision based on the fact that mtx is research |
158 |
*** code that needs maximal flexibility at reasonable speed. |
159 |
*** |
160 |
*** The -1th element of org_to_cur and cur_to_org are defined because |
161 |
*** -1 is used all over mtx as an error return. It's easier to debug |
162 |
*** things without the memory access errors that would happen if |
163 |
*** -1 were not allocated or were part of memory in some other object. |
164 |
*** |
165 |
*** Special note: The -1th element of nz_header arrays is NOT allocated. |
166 |
**/ |
167 |
|
168 |
/* Do not access the parity field of a slave matrix, refer to its master. */ |
169 |
/* Conduct all permuting operations on the master. */ |
170 |
|
171 |
struct permutations_t { |
172 |
struct permutation_t row; |
173 |
struct permutation_t col; |
174 |
int32 transpose; |
175 |
}; |
176 |
|
177 |
struct structural_data_t { |
178 |
int32 symbolic_rank; /* Symbolic rank (< 0 if invalid) */ |
179 |
int32 nblocks; /* # blocks in matrix */ |
180 |
mtx_region_t *block; /* Pointer to array of blocks */ |
181 |
}; |
182 |
/** |
183 |
*** There is a list of blocks associated with a matrix. |
184 |
*** This is an artifact of POOR solver API design between |
185 |
*** Peter Piela and Karl Westerberg. The blockwise decomposition |
186 |
*** information properly belongs to a linear or nonlinear solver |
187 |
*** and not to the mtx. |
188 |
*** |
189 |
*** We intend to fix this soon. |
190 |
**/ |
191 |
|
192 |
struct mtx_header { |
193 |
int integrity; /* Integrity integer */ |
194 |
int32 order; /* Order of the matrix */ |
195 |
int32 capacity; /* Capacity of all the arrays */ |
196 |
int32 nslaves; /* number of slave matrices */ |
197 |
struct nz_headers_t hdr; /* Non-zero headers of the matrix */ |
198 |
struct element_t *last_value; /* value/set_value memory */ |
199 |
mem_store_t ms; /* element cache memory */ |
200 |
struct permutations_t perm; /* Permutation vectors */ |
201 |
struct structural_data_t *data; /* Pointer to structural information */ |
202 |
mtx_matrix_t master; /* the master of this mtx, if slave */ |
203 |
mtx_matrix_t *slaves; /* array of slave matrices */ |
204 |
}; |
205 |
/** |
206 |
*** capacity may be > order. |
207 |
*** A matrix of capacity 0 doesn't have a mem_store_t yet and elements |
208 |
*** cannot be queried about without a core dump. |
209 |
**/ |
210 |
|
211 |
struct mtx_block_perm_structure { |
212 |
int integrity; |
213 |
int32 order; /* Order of the matrix */ |
214 |
int32 capacity; /* Capacity of all the arrays */ |
215 |
mtx_matrix_t mtx; /* matrix of origin */ |
216 |
struct permutations_t perm; /* Permutation vectors */ |
217 |
struct structural_data_t *data; /* Pointers to structural information */ |
218 |
}; |
219 |
/** |
220 |
*** If you want to save a permutation for restoration, you |
221 |
*** have to make a copy of that data, eh? Here's the place you |
222 |
*** put it. Note that the block list should be disappearing from |
223 |
*** from the structural data soon. |
224 |
**/ |
225 |
|
226 |
|
227 |
#define OK ((int)201539237) |
228 |
#define DESTROYED ((int)531503871) |
229 |
/* matrix integrity values. */ |
230 |
|
231 |
#define ZERO ((int32)0) |
232 |
#define D_ZERO ((real64)0.0) |
233 |
#define D_ONE ((real64)1.0) |
234 |
/* useful constants if your C compiler is not too bright about ANSI */ |
235 |
|
236 |
#define ISSLAVE(m) ((m)->master!=NULL) |
237 |
/** |
238 |
*** Returns 1 if m is a slave matrix, 0 if not. |
239 |
**/ |
240 |
|
241 |
#define ordered3(a,b,c) ((a) <= (b) && (b) <= (c)) |
242 |
#define in_range(rng,ndx) ordered3((rng)->low,ndx,(rng)->high) |
243 |
#define legal(mtx,ndx) ordered3(ZERO,ndx,(mtx)->order-1) |
244 |
/** |
245 |
*** Boolean operators to compare a row or column |
246 |
*** index with some specified range or the maximum |
247 |
*** range of the matrix in which it is used. |
248 |
**/ |
249 |
|
250 |
#define fast_in_range(l,h,i) ( ordered3(l,i,h) ) |
251 |
#define not_in_range(l,h,i) ( (i)<(l) || (i)>(h) ) |
252 |
/** |
253 |
*** Boolean operators to compare 3 integers. |
254 |
*** l <= h must be TRUE or these will lie. In many cases, |
255 |
*** this condition can (or should) be met before in_range |
256 |
*** is called. Sometimes these are not faster since the lo,hi vals cost. |
257 |
*** In particular, queries like next_col do not profit while calls |
258 |
*** which must traverse an entire row/col do. |
259 |
*** Gains in cycle count on dec alphas+cc are about 10% per function, |
260 |
*** but the gains in time are more like 1%, so alpha pixie is lying a little. |
261 |
*** For compilers which are not as clever as Decs, (gcc, sun acc) the |
262 |
*** gains should be much more visible. (some do not realize rng->low |
263 |
*** is invariant even with -O.) |
264 |
*** Note that these are 'loose' comparisons if !(l<=h) |
265 |
**/ |
266 |
|
267 |
#define zero(ptr,nelts,type) \ |
268 |
mem_zero_byte_cast((ptr),0,(nelts)*sizeof(type)) |
269 |
/** |
270 |
*** Zeros a vector of specified length and type. |
271 |
*** It is inefficient to use, however, if you know the type |
272 |
*** is one of the basic types (int,double,ptr,char) |
273 |
**/ |
274 |
|
275 |
|
276 |
/**************************************************************************\ |
277 |
Private check routines |
278 |
\**************************************************************************/ |
279 |
extern int super_check_matrix(mtx_matrix_t); |
280 |
/** |
281 |
*** After somevery extensive checking, returns an error count. |
282 |
*** More or less assume MTX_DEBUG is TRUE, and that is the only |
283 |
*** condition under which this should be called. |
284 |
**/ |
285 |
|
286 |
|
287 |
/**************************************************************************\ |
288 |
Element CREATE/find routines. Please try to confine use of these to |
289 |
mtx_basic.c as much as possible. |
290 |
Use of find should be avoided at all costs, and in particular |
291 |
absolutely noone outside mtx should put their fingers on elements. |
292 |
|
293 |
These functions are not exported to generic users because they are |
294 |
on the critical path and we cannot afford the sanity checking required. |
295 |
They should only be called in contexts where the arguments are |
296 |
guaranteed valid. |
297 |
\**************************************************************************/ |
298 |
|
299 |
struct element_t *mtx_find_element( mtx_matrix_t, int32, int32); |
300 |
/** |
301 |
*** mtx_find_element(mtx,org_row,org_col) |
302 |
*** mtx_matrix_t mtx; |
303 |
*** int32 org_row; |
304 |
*** int32 org_col; |
305 |
*** |
306 |
*** Searches for a given element of the matrix and returns a pointer to it |
307 |
*** if it exists, or NULL if it doesn't exist. |
308 |
*** It is *ASSUMED* that org_row |
309 |
*** and org_col are legal indices. May crash if they are not. |
310 |
**/ |
311 |
|
312 |
struct element_t *mtx_create_element( mtx_matrix_t, int32, int32); |
313 |
/** |
314 |
*** mtx_create_element(mtx,org_row,org_col); |
315 |
*** mtx_matrix_t mtx; |
316 |
*** int32 org_row; |
317 |
*** int32 org_col; |
318 |
*** Creates the given element and returns a pointer to it. The value is |
319 |
*** initially zero. |
320 |
*** It is *ASSUMED* that org_row |
321 |
*** and org_col are legal indices. May crash if they are not. |
322 |
*** If mtx_DEBUG is TRUE, then we will whine if the element already |
323 |
*** exists, but go ahead and create it anyway. |
324 |
**/ |
325 |
|
326 |
struct element_t *mtx_create_element_value(mtx_matrix_t, int32, |
327 |
int32,real64); |
328 |
/** |
329 |
*** mtx_create_element_value(mtx,org_row,org_col,val); |
330 |
*** mtx_matrix_t mtx; |
331 |
*** int32 org_row; |
332 |
*** int32 org_col; |
333 |
*** real64 val; |
334 |
*** Creates the given element and returns a pointer to it. The value is |
335 |
*** initialzed to val. |
336 |
*** It is *ASSUMED* that org_row |
337 |
*** and org_col are legal indices. May crash if they are not. |
338 |
*** If mtx_DEBUG is TRUE, then we will whine if the element already |
339 |
*** exists, but go ahead and create it anyway. |
340 |
**/ |
341 |
|
342 |
/**************************************************************************\ |
343 |
Element list traversals. No linear algebra programmer with an ounce of |
344 |
intelligence would ever need to use these in critical path functions. |
345 |
\**************************************************************************/ |
346 |
extern struct element_t *mtx_next_col(register struct element_t *, |
347 |
mtx_range_t *, int32 *); |
348 |
/** |
349 |
*** enext = struct element_t *mtx_next_col(elt,rng,tocur); |
350 |
*** struct element_t *elt, *enext; |
351 |
*** mtx_range_t *rng; |
352 |
*** int32 *tocur; |
353 |
*** |
354 |
*** Returns the next element after elt that is in the range |
355 |
*** rng according to the permutation vector tocur given. May return NULL. |
356 |
**/ |
357 |
|
358 |
extern struct element_t *mtx_next_row(register struct element_t *, |
359 |
mtx_range_t *, int32 *); |
360 |
/** |
361 |
*** enext = struct element_t *mtx_next_row(elt,rng,tocur); |
362 |
*** struct element_t *elt, *enext; |
363 |
*** mtx_range_t *rng; |
364 |
*** int32 *tocur; |
365 |
*** |
366 |
*** Returns the next element after elt that is in the range |
367 |
*** rng according to the permutation vector tocur given. May return NULL. |
368 |
**/ |
369 |
|
370 |
/**************************************************************************\ |
371 |
Permutation memory management. |
372 |
\**************************************************************************/ |
373 |
extern int32 *mtx_alloc_perm(int32); |
374 |
/** |
375 |
*** p = mtx_alloc_perm(cap); |
376 |
*** int32 cap, *p; |
377 |
*** Allocates a permutation vector. The user need |
378 |
*** not concern himself with the -1st element, which does exist. |
379 |
**/ |
380 |
|
381 |
extern void mtx_copy_perm(int32 *, int32 *, int32); |
382 |
/** |
383 |
*** mtx_copy_perm(tarperm,srcperm,cap) |
384 |
*** int32 *tarperm; |
385 |
*** int32 *srcperm; |
386 |
*** int32 cap; |
387 |
*** Copies srcperm to tarperm given the capacity of srcperm. |
388 |
*** If tarperm was obtained from alloc_perm(), the -1 has already been copied. |
389 |
**/ |
390 |
|
391 |
extern void mtx_free_perm(int32 *); |
392 |
/** |
393 |
*** mtx_free_perm(perm); |
394 |
*** int32 *perm; |
395 |
**/ |
396 |
|
397 |
/**************************************************************************\ |
398 |
It is advantageous in an interactive system to introduce reusable |
399 |
memory and monitor its integrity rather than to repeatedly allocate |
400 |
and zero it. The following code accomplishes this for mtx. |
401 |
A null_vector is an array of objects (size s, length n) with value 0. |
402 |
This sort of memory management is needed because there is always the chance |
403 |
that a floating point exception could cause premature return of an mtx |
404 |
client. This way we have a safe place to store pointers to the memory |
405 |
even if the user's algorithm loses them. |
406 |
\**************************************************************************/ |
407 |
|
408 |
struct reusable_data_vector { |
409 |
void *arr; /* pointer to array of objects size entrysize */ |
410 |
int capacity; /* number of object slots in array */ |
411 |
size_t entry_size; /* size of slots */ |
412 |
int last_line; /* line most recently associated with this structure, |
413 |
should be 0 if the array is not in use. */ |
414 |
}; |
415 |
|
416 |
extern struct reusable_data_vector |
417 |
g_mtx_null_index_data, /* bunch of int32 */ |
418 |
g_mtx_null_sum_data, /* bunch of mtx_value_t */ |
419 |
g_mtx_null_mark_data, /* bunch of char */ |
420 |
g_mtx_null_vector_data, /* bunch of element pointers */ |
421 |
g_mtx_null_col_vector_data, /* bunch of element pointers */ |
422 |
g_mtx_null_row_vector_data; /* bunch of element pointers */ |
423 |
|
424 |
/** |
425 |
*** vec = mtx_null_vector(nptrs); |
426 |
*** vec = mtx_null_col_vector(nptrs); |
427 |
*** vec = mtx_null_row_vector(nptrs); |
428 |
*** marks = mtx_null_mark(nchar); |
429 |
*** sums = mtx_null_sum(nnums); |
430 |
*** indexes = mtx_null_index(ninds); |
431 |
*** |
432 |
*** struct element_t **vec; |
433 |
*** char *marks; |
434 |
*** real64 *sums; |
435 |
*** int32 *indexes; |
436 |
*** int32 nptrs, nchar, nnums, ninds; |
437 |
*** |
438 |
*** Returns an array of chars, elt pointers, indexes or numbers all NULL/0. |
439 |
*** We need these a lot, but seldom simultaneously, and we know generally |
440 |
*** how to rezero them when done with them. |
441 |
*** These functions should not be |
442 |
*** called again until the vector is re-NULLED and out of use. |
443 |
*** If we detect a double call, we will whine loudly, renull |
444 |
*** the array ourselves, and give it to you again. |
445 |
*** To avoid whining, call the corresponding release functions |
446 |
*** each time you are done with one of these vectors. |
447 |
*** |
448 |
*** In the event of insufficient memory (alloc failed) we will |
449 |
*** return NULL. If we return NULL, you needn't call the release function. |
450 |
*** |
451 |
*** mtx_null_vector_release(); |
452 |
*** mtx_null_col_vector_release(); |
453 |
*** mtx_null_row_vector_release(); |
454 |
*** mtx_null_mark_release(); |
455 |
*** mtx_null_sum_release(); |
456 |
*** mtx_null_index_release(); |
457 |
*** |
458 |
*** These are a memory reuse promoter. |
459 |
*** Calling with cap==0 frees any memory in use. |
460 |
*** Clientlists -- PLEASE KEEP THIS UP TO DATE -- |
461 |
*** mtx_null_vector: |
462 |
*** expand_row,expand_col, mtx_assemble |
463 |
*** mtx_householder_transform |
464 |
*** mtx_null_row_vector: |
465 |
*** expand_row_series |
466 |
*** mtx_null_col_vector: |
467 |
*** expand_col_series |
468 |
*** mtx_null_mark: |
469 |
*** mtx_householder_transform |
470 |
*** mtx_null_sum: |
471 |
*** mtx_householder_transform |
472 |
*** mtx_null_index: |
473 |
*** mtx_householder_transform |
474 |
**/ |
475 |
#define mtx_null_vector(c) \ |
476 |
((struct element_t **)mtx_null_vector_f(c,__LINE__,__FILE__, \ |
477 |
&g_mtx_null_vector_data,"null_vector")) |
478 |
#define mtx_null_row_vector(c) \ |
479 |
((struct element_t **)mtx_null_vector_f(c,__LINE__,__FILE__, \ |
480 |
&g_mtx_null_row_vector_data,\ |
481 |
"null_row_vector")) |
482 |
#define mtx_null_col_vector(c) \ |
483 |
((struct element_t **)mtx_null_vector_f(c,__LINE__,__FILE__, \ |
484 |
&g_mtx_null_col_vector_data,\ |
485 |
"null_col_vector")) |
486 |
#define mtx_null_mark(c) \ |
487 |
((char *)mtx_null_vector_f(c,__LINE__,__FILE__, \ |
488 |
&g_mtx_null_mark_data,"null_mark")) |
489 |
#define mtx_null_sum(c) \ |
490 |
((real64 *)mtx_null_vector_f(c,__LINE__,__FILE__, \ |
491 |
&g_mtx_null_sum_data,"null_sum")) |
492 |
#define mtx_null_index(c) \ |
493 |
((int32 *)mtx_null_vector_f(c,__LINE__,__FILE__, \ |
494 |
&g_mtx_null_index_data,"null_index")) |
495 |
|
496 |
#define mtx_null_vector_release() \ |
497 |
mtx_null_vector_release_f(__LINE__,__FILE__, \ |
498 |
&g_mtx_null_vector_data,"null_vector") |
499 |
#define mtx_null_col_vector_release() \ |
500 |
mtx_null_vector_release_f(__LINE__,__FILE__, \ |
501 |
&g_mtx_null_col_vector_data,"null_col_vector") |
502 |
#define mtx_null_row_vector_release() \ |
503 |
mtx_null_vector_release_f(__LINE__,__FILE__, \ |
504 |
&g_mtx_null_row_vector_data,"null_row_vector") |
505 |
#define mtx_null_mark_release() \ |
506 |
mtx_null_vector_release_f(__LINE__,__FILE__, \ |
507 |
&g_mtx_null_mark_data,"null_mark") |
508 |
#define mtx_null_sum_release() \ |
509 |
mtx_null_vector_release_f(__LINE__,__FILE__, \ |
510 |
&g_mtx_null_sum_data,"null_sum") |
511 |
#define mtx_null_index_release() \ |
512 |
mtx_null_vector_release_f(__LINE__,__FILE__, \ |
513 |
&g_mtx_null_index_data,"null_index") |
514 |
|
515 |
extern void *mtx_null_vector_f(int32, int, CONST char *, |
516 |
struct reusable_data_vector *, char *); |
517 |
/** |
518 |
*** v = mtx_null_vector_f(cap,line,file, ptr,fn); |
519 |
*** int32 cap; |
520 |
*** int line; |
521 |
*** CONST char *file; |
522 |
*** struct reusable_data_vector *ptr; |
523 |
*** char *fn; |
524 |
*** |
525 |
*** Returns a pointer to cap*ptr->entry_size bytes, which must be cast. |
526 |
*** The memory pointed at is believed to be zero, and will be if the |
527 |
*** user is properly rezeroing the vector before it is released. |
528 |
*** If insufficient memory is available, this whines and returns NULL. |
529 |
*** Calling this with cap==0 causes the reused memory to be deallocated and |
530 |
*** returns NULL. |
531 |
*** Call this only via the macros, please. |
532 |
**/ |
533 |
|
534 |
extern void mtx_null_vector_release_f(int, CONST char *, |
535 |
struct reusable_data_vector *, char *); |
536 |
/** |
537 |
*** mtx_null_vector_release_f(line,file,ptr,fn); |
538 |
*** int line; |
539 |
*** CONST char *file, |
540 |
*** struct reusable_data_vector *ptr; |
541 |
*** char *fn |
542 |
*** |
543 |
*** Marks a vector as not in use, or whines if it wasn't. |
544 |
*** Does no other checking. Uses line, file and fn in error reporting. |
545 |
*** Please use the macros to access this function. |
546 |
**/ |
547 |
|
548 |
extern void mtx_reset_null_vectors(void); |
549 |
/** |
550 |
*** This resets the reusable arrays of zeroes to zero in the event |
551 |
*** that they may have been corrupted. |
552 |
**/ |
553 |
|
554 |
/* |
555 |
** INTERNAL element vector operations of some utility. |
556 |
*/ |
557 |
|
558 |
extern struct element_t **mtx_expand_row(mtx_matrix_t, int32); |
559 |
/** |
560 |
*** buf = mtx_expand_row(mtx,orgrow); |
561 |
*** mtx_matrix_t mtx; |
562 |
*** int32 orgrow; |
563 |
*** struct element_t **buf; |
564 |
*** |
565 |
*** Expands the given row into an array of pointers, indexed on original |
566 |
*** col number. The array is obtained from mtx_null_vector(). |
567 |
*** Be sure to call mtx_null_vector_release() when done with the vector and |
568 |
*** you have rezeroed it. |
569 |
*** You cannot call this twice without releasing first or call mtx_expand_col. |
570 |
**/ |
571 |
|
572 |
extern struct element_t **mtx_expand_col(mtx_matrix_t, int32); |
573 |
/** |
574 |
*** buf = mtx_expand_col(mtx,orgcol); |
575 |
*** mtx_matrix_t mtx; |
576 |
*** int32 orgcol; |
577 |
*** struct element_t **buf; |
578 |
*** |
579 |
*** Expands the given col into an array of pointers, indexed on original |
580 |
*** row number. The array is obtained from mtx_null_vector(). |
581 |
*** Be sure to call mtx_null_vector_release() when done with the vector and |
582 |
*** you have rezeroed it. |
583 |
*** You cannot call this twice without releasing first or call mtx_expand_row. |
584 |
**/ |
585 |
|
586 |
extern void mtx_renull_using_row(mtx_matrix_t, int32, |
587 |
struct element_t **); |
588 |
/** |
589 |
*** mtx_renull_using_row(mtx,orgrow,arr) |
590 |
*** mtx_matrix_t mtx; |
591 |
*** int32 orgrow; |
592 |
*** struct element_t **arr; |
593 |
*** |
594 |
*** Makes arr NULLed again, assuming that the only non-NULL elements |
595 |
*** must correspond to original col numbers that exist in the given |
596 |
*** orgrow. |
597 |
**/ |
598 |
|
599 |
extern void mtx_renull_using_col(mtx_matrix_t, int32, |
600 |
struct element_t **); |
601 |
/** |
602 |
*** mtx_renull_using_row(mtx,orgcol,arr); |
603 |
*** mtx_matrix_t mtx; |
604 |
*** int32 orgcol; |
605 |
*** struct element_t **arr; |
606 |
*** |
607 |
*** Makes arr NULLed again, assuming that the only non-NULL elements |
608 |
*** must correspond to original row numbers that exist in the given |
609 |
*** orgcol. |
610 |
**/ |
611 |
|
612 |
extern void mtx_renull_all(mtx_matrix_t, struct element_t **); |
613 |
/** |
614 |
*** mtx_renull_all(mtx,arr); |
615 |
*** mtx_matrix_t mtx; |
616 |
*** struct element_t **arr; |
617 |
*** |
618 |
*** Makes arr NULLed again, assuming it is size mtx->order. |
619 |
*** |
620 |
**/ |
621 |
|
622 |
#endif /* __MTX_INTERNAL_USE_ONLY_H__ */ |
623 |
#endif /* none of your business if you aren't mtx_*.c */ |