1 |
/* |
2 |
* mtx2: Ascend Sparse Matrix Package |
3 |
* by Benjamin Andrew Allan |
4 |
* Derived from mtx by Karl Michael Westerberg |
5 |
* Created: 5/3/90 |
6 |
* Version: $Revision: 1.9 $ |
7 |
* Version control file: $RCSfile: mtx_query.h,v $ |
8 |
* Date last modified: $Date: 1997/07/18 12:15:12 $ |
9 |
* Last modified by: $Author: mthomas $ |
10 |
* |
11 |
* This file is part of the SLV solver. |
12 |
* |
13 |
* Copyright (C) 1996 Benjamin Andrew Allan |
14 |
* |
15 |
* The SLV solver is free software; you can redistribute |
16 |
* it and/or modify it under the terms of the GNU General Public License as |
17 |
* published by the Free Software Foundation; either version 2 of the |
18 |
* License, or (at your option) any later version. |
19 |
* |
20 |
* The SLV solver is distributed in hope that it will be |
21 |
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of |
22 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
23 |
* General Public License for more details. |
24 |
* |
25 |
* You should have received a copy of the GNU General Public License along with |
26 |
* the program; if not, write to the Free Software Foundation, Inc., 675 |
27 |
* Mass Ave, Cambridge, MA 02139 USA. Check the file named COPYING. |
28 |
* COPYING is found in ../compiler. |
29 |
*/ |
30 |
|
31 |
/** @file |
32 |
* mtx2: Ascend Sparse Matrix Package. |
33 |
* <pre> |
34 |
* requires: #include "utilities/ascConfig.h" |
35 |
* requires: #include "mtx.h" |
36 |
* </pre> |
37 |
*/ |
38 |
|
39 |
#ifndef __MTX_QUERY_H_SEEN__ |
40 |
#define __MTX_QUERY_H_SEEN__ |
41 |
|
42 |
extern real64 mtx_next_in_row(mtx_matrix_t matrix, |
43 |
mtx_coord_t *coord, |
44 |
mtx_range_t *colrng); |
45 |
/**< See mtx_next_in_col(), switching row & column references. */ |
46 |
extern real64 mtx_next_in_col(mtx_matrix_t matrix, |
47 |
mtx_coord_t *coord, |
48 |
mtx_range_t *rowrng); |
49 |
/**< |
50 |
<!-- -$- value = mtx_next_in_row(matrix,coord,colrng) --> |
51 |
<!-- -$- value = mtx_next_in_col(matrix,coord,rowrng) --> |
52 |
*** <!-- real64 value; --> |
53 |
*** <!-- mtx_matrix_t matrix; --> |
54 |
*** <!-- mtx_coord_t *coord; --> |
55 |
*** <!-- mtx_range_t *colrng,*rowrng; --> |
56 |
*** |
57 |
*** <!-- mtx_next_in_row(): --> |
58 |
*** Searches for the next non-zero in the given row, whose column index |
59 |
*** lies in the given range, and returns its value. Coord->col is set to |
60 |
*** the column in which it was found. To find the first non-zero in the |
61 |
*** row, set coord->col = mtx_FIRST. Coord->col will equal mtx_LAST and |
62 |
*** 0.0 will be returned if there is no next non-zero. The row (and mtx!) |
63 |
*** searched is always the one specified when mtx_next_in_row was most |
64 |
*** recently called with a coord such that coord->col == mtx_FIRST. |
65 |
*** If colrng.low > colrng.high, return is mtx_LAST, which gives correct |
66 |
*** behaviour for mtx/vector processing loops. |
67 |
*** If nz.row is not in the matrix (<0 || >= mtx order) on an initial |
68 |
*** call (with nz.col == mtx_FIRST) returns 0.0 and nz.col == mtx_LAST. |
69 |
*** |
70 |
*** <!-- mtx_next_in_col(): --> |
71 |
*** <!-- Switch row <--> column in the discussion above. --> |
72 |
*** |
73 |
*** Example of canonical usage: <pre> |
74 |
*** nz.col = col; (* the col to be traversed *) |
75 |
*** nz.row = mtx_FIRST; (* initializer, which must be used *) |
76 |
*** while( value = mtx_next_in_col(mtx,&nz,rowrng)), |
77 |
*** nz.row != mtx_LAST ) { (* your thing here *) } </pre> |
78 |
*** Note that the logic test of the while is ONLY nz.row != mtx_LAST. |
79 |
*** C lets you do whatever you choose before the comma.<br><br> |
80 |
*** |
81 |
-$- Returns FALSE from bad a matrix.<br><br> |
82 |
*** |
83 |
*** NOTE: The notion of "next non-zero" is arbitrary, but will not change |
84 |
*** until the next destructive operation on the matrix is performed. |
85 |
*** Neither the row/col number nor the rel/var index will |
86 |
*** behave monotonically while traversing a col/row. This means there |
87 |
*** is nothing you can do with the range to shrink the search space |
88 |
*** based on the last row/col returned. Basic matrix operations, when |
89 |
*** properly thought about in a general sparse context, do not care |
90 |
*** about the order in which the elements of vectors are processed. |
91 |
*** <pre> |
92 |
*** WARNINGS: |
93 |
!1! - You may NOT nest mtx_next_in_col within mtx_next_in_col |
94 |
! ! or mtx_next_in_row within mtx_next_in_row. We have yet to find a |
95 |
! ! process suggestive of doing so that was not better handled with |
96 |
! ! a for loop. This is a global constraint. Violation results in garbage. |
97 |
!2! - You may NOT safely perform operation mtx_del_zr_in_col while traversing |
98 |
! ! a column unless the value just returned from the previous next_in_col is |
99 |
! ! nonzero or the coord.row last returned is mtx_LAST. Also, you may NOT |
100 |
! ! safely perform operation mtx_del_zr_in_row on coord.row unless the |
101 |
! ! value just returned from the previous next_in_col is nonzero. |
102 |
! ! The proscription here is for mtx_next_in_ and mtx_del_zr_in_ operating |
103 |
! ! on the same matrix. mtx_del_zr_in_ is safe to call if the mtx_next_in_ |
104 |
! ! driving it is happening on a different matrix. |
105 |
!3! Transpose warning 2 for next_in_row. |
106 |
! ! Violation of 2 or 3 results, sooner or later, in a memory fault. |
107 |
*** You CAN nest mtx_next_in_col within mtx_next_in_row or vice versa, |
108 |
*** however. |
109 |
*** </pre> |
110 |
**/ |
111 |
|
112 |
extern real64 mtx_row_max(mtx_matrix_t matrix, |
113 |
mtx_coord_t *coord, |
114 |
mtx_range_t *colrng, |
115 |
real64 *signval); |
116 |
/**< See mtx_col_max(), switching row & column references. */ |
117 |
extern real64 mtx_col_max(mtx_matrix_t matrix, |
118 |
mtx_coord_t *coord, |
119 |
mtx_range_t *rowrng, |
120 |
real64 *signval); |
121 |
/**< |
122 |
<!-- -$- value = mtx_row_max(matrix,coord,colrng,signval) --> |
123 |
<!-- -$- value = mtx_col_max(matrix,coord,rowrng,signval) --> |
124 |
<!-- -$- value = mtx_row_min(matrix,coord,colrng,signval) --> |
125 |
<!-- -$- value = mtx_col_min(matrix,coord,rowrng,signval) --> |
126 |
*** <!-- real64 value, *signval; --> |
127 |
*** <!-- mtx_matrix_t matrix; --> |
128 |
*** <!-- mtx_coord_t *coord; --> |
129 |
*** <!-- mtx_range_t *colrng,*rowrng; --> |
130 |
*** |
131 |
*** <!-- mtx_row_max(): --> |
132 |
*** Searches for the element in the given row, with column index in the |
133 |
*** given column range, which has the largest absolute value. The user |
134 |
*** should set coord->row to the desired row to search, and this function |
135 |
*** will set coord->col to the column index where the maximum was found |
136 |
*** (or mtx_NONE if no non-zero was found in that range). The absolute |
137 |
*** value of that element is also returned. If the pointer signval is not |
138 |
*** NULL, the real64 pointed to will be stuffed with the signed |
139 |
*** value of the maximum sized element. |
140 |
*** In the event of ties, the element with the lowest current column |
141 |
*** index wins.<br><br> |
142 |
*** |
143 |
*** <!-- mtx_col_max(): --> |
144 |
*** <!-- Replace row <--> column above. --> |
145 |
*** |
146 |
-$- Returns -1.0 from a bad matrix. |
147 |
**/ |
148 |
extern real64 mtx_row_min(mtx_matrix_t matrix, |
149 |
mtx_coord_t *coord, |
150 |
mtx_range_t *colrng, |
151 |
real64 *signval, |
152 |
real64 minval); |
153 |
/**< See mtx_col_min(), switching row & column references. */ |
154 |
extern real64 mtx_col_min(mtx_matrix_t matrix, |
155 |
mtx_coord_t *coord, |
156 |
mtx_range_t *rowrng, |
157 |
real64 *signval, |
158 |
real64 minval); |
159 |
/**< |
160 |
*** Searches for the element in the given row, with column index in the |
161 |
*** given column range, which has the smallest absolute value. The user |
162 |
*** should set coord->row to the desired row to search, and this function |
163 |
*** will set coord->col to the column index where the minimum was found |
164 |
*** (or mtx_NONE if no non-zero was found in that range). The absolute |
165 |
*** value of that element is also returned. If the pointer signval is not |
166 |
*** NULL, the real64 pointed to will be stuffed with the signed |
167 |
*** value of the minimum sized element. |
168 |
*** In the event of ties, the element with the lowest current column |
169 |
*** index wins.<br><br> |
170 |
*** This function only looks at the nonzero elements, and will only |
171 |
*** find numbers between minval and 1e50. If no number is found signval |
172 |
*** will be zero and |
173 |
*** value will be one.<br><br> |
174 |
*** |
175 |
-$- Returns -1.0 from a bad matrix. |
176 |
**/ |
177 |
/* OLD GROUP COMMENTS */ |
178 |
/* |
179 |
-$- value = mtx_row_min(matrix,coord,colrng,signval,minval) |
180 |
-$- value = mtx_col_min(matrix,coord,rowrng,signval,minval) |
181 |
*** real64 value, *signval, minval; |
182 |
*** mtx_matrix_t matrix; |
183 |
*** mtx_coord_t *coord; |
184 |
*** mtx_range_t *colrng,*rowrng; |
185 |
*** |
186 |
*** These functions are basicaly defined as their max counterparts |
187 |
*** above with the appropriate swapping of max<-->min. |
188 |
*** This function only looks at the nonzero elements |
189 |
*** The min functions will only find numbers between minval |
190 |
*** and 1e50. If no number is found signval will be zero and |
191 |
*** value will be one. |
192 |
**/ |
193 |
|
194 |
extern real64 mtx_get_pivot_col(mtx_matrix_t matrix, |
195 |
mtx_coord_t *coord, |
196 |
mtx_range_t *colrng, |
197 |
real64 *signval, |
198 |
real64 tol, |
199 |
real64 eps); |
200 |
/**< See mtx_get_pivot_row(), switching row & column references. */ |
201 |
extern real64 mtx_get_pivot_row(mtx_matrix_t matrix, |
202 |
mtx_coord_t *coord, |
203 |
mtx_range_t *rowrng, |
204 |
real64 *signval, |
205 |
real64 tol, |
206 |
real64 eps); |
207 |
/**< |
208 |
<!-- -$- value = mtx_get_pivot_col(matrix,coord,colrng,signval,tol,eps) --> |
209 |
<!-- -$- value = mtx_get_pivot_row(matrix,coord,rowrng,signval,tol,eps) --> |
210 |
*** <!-- real64 value, *signval; --> |
211 |
*** <!-- mtx_matrix_t matrix; --> |
212 |
*** <!-- mtx_coord_t *coord; --> |
213 |
*** <!-- mtx_range_t *colrng,*rowrng; --> |
214 |
*** |
215 |
*** This implements efficiently the standard sparse modification |
216 |
*** of LU partial pivot selection. |
217 |
*** |
218 |
*** <!-- mtx_get_pivot_col(); --> |
219 |
*** Searches for the leftmost element in the colrng of the given row, |
220 |
*** which passes the sparse partial pivoting criteria: |
221 |
*** -# aij >= eps, |
222 |
*** -# aij >= tol * max_abs_element_value_in_colrng. |
223 |
*** The absolute value of the passing element is returned, or 0.0 if |
224 |
*** there are no entries that pass criterion 1. The user |
225 |
*** should set coord->row to the desired row to search. This function |
226 |
*** will set coord->col to the column index where the result was found |
227 |
*** (or mtx_NONE if nothing good was found in that range). The absolute |
228 |
*** value of that element is also returned. If the pointer signval is not |
229 |
*** NULL, the real64 pointed to will be stuffed with the signed |
230 |
*** value of the selected element. |
231 |
*** This function is faster when colrng == mtx_ALL_COLS can be used.<br><br> |
232 |
*** |
233 |
*** <!-- mtx_get_pivot_row(): --> |
234 |
*** <!-- Replace row <--> column above. --> |
235 |
-$- Returns -1.0 from a bad matrix. |
236 |
**/ |
237 |
|
238 |
extern int32 mtx_nonzeros_in_row(mtx_matrix_t matrix, |
239 |
int32 row, |
240 |
mtx_range_t *colrng); |
241 |
/**< |
242 |
*** Counts the number of incidences in the given row whose column index |
243 |
*** lies in the given column range. |
244 |
-$- Returns -1 from a bad matrix. |
245 |
**/ |
246 |
extern int32 mtx_nonzeros_in_col(mtx_matrix_t matrix, |
247 |
int32 col, |
248 |
mtx_range_t *rowrng); |
249 |
/**< |
250 |
*** Counts the number of incidences in the given column whose row index |
251 |
*** lies in the given row range. |
252 |
-$- Returns -1 from a bad matrix. |
253 |
**/ |
254 |
extern int32 mtx_nonzeros_in_region(mtx_matrix_t matrix, |
255 |
mtx_region_t *reg); |
256 |
/**< |
257 |
*** Counts the non-zero values in the given region. |
258 |
-$- Returns -1 from a bad matrix. |
259 |
**/ |
260 |
extern int32 mtx_numbers_in_row(mtx_matrix_t matrix, |
261 |
int32 row, |
262 |
mtx_range_t *colrng); |
263 |
/**< |
264 |
*** Counts the non-zero values in the given row whose column index |
265 |
*** lies in the given column range. |
266 |
-$- Returns -1 from a bad matrix. |
267 |
**/ |
268 |
extern int32 mtx_numbers_in_col(mtx_matrix_t matrix, |
269 |
int32 col, |
270 |
mtx_range_t *rowrng); |
271 |
/**< |
272 |
*** Counts the non-zero values in the given column whose row index |
273 |
*** lies in the given row range. |
274 |
-$- Returns -1 from a bad matrix. |
275 |
**/ |
276 |
extern int32 mtx_numbers_in_region(mtx_matrix_t matrix, |
277 |
mtx_region_t *reg); |
278 |
/**< |
279 |
*** Counts the number of incidences in the given region. |
280 |
-$- Returns -1 from a bad matrix. |
281 |
**/ |
282 |
/* OLD GROUP COMMENT */ |
283 |
/* |
284 |
-$- count = mtx_nonzeros_in_row(matrix,row,colrng) |
285 |
-$- count = mtx_nonzeros_in_col(matrix,col,rowrng) |
286 |
-$- count = mtx_nonzeros_in_region(matrix,reg) |
287 |
-$- count = mtx_numbers_in_row(matrix,row,colrng) |
288 |
-$- count = mtx_numbers_in_col(matrix,col,rowrng) |
289 |
-$- count = mtx_numbers_in_region(matrix,reg) |
290 |
*** int32 count; |
291 |
*** mtx_matrix_t matrix; |
292 |
*** int32 row,col; |
293 |
*** mtx_range_t *colrng,*rowrng; |
294 |
*** mtx_region_t *reg; |
295 |
*** |
296 |
*** mtx_nonzeros_in_row(): |
297 |
*** Counts the number of incidences in the given row whose column index |
298 |
*** lies in the given column range. |
299 |
*** |
300 |
*** mtx_nonzeros_in_col(): |
301 |
*** Replace row <--> column above. |
302 |
*** |
303 |
*** mtx_nonzeros_in_region(): |
304 |
*** Counts the number of incidences in the given region. |
305 |
*** |
306 |
*** mtx_numbers_in_row(): |
307 |
*** Counts the non-zero values in the given row whose column index |
308 |
*** lies in the given column range. |
309 |
*** |
310 |
*** mtx_numbers_in_col(): |
311 |
*** Replace row <--> column above. |
312 |
*** |
313 |
*** mtx_nonzeros_in_region(): |
314 |
*** Counts the non-zero values in the given region. |
315 |
*** |
316 |
-$- Returns -1 from a bad matrix. |
317 |
**/ |
318 |
|
319 |
/* ********************************************************************* *\ |
320 |
mtx vector operation routines |
321 |
None of these routines care about master/slave status. |
322 |
\* ********************************************************************* */ |
323 |
|
324 |
/* *************************************************************** *\ |
325 |
Dense vector operations, rather analogous to the mtx_value suite. |
326 |
These are tools for data motion. No dense-dense arithmetic operators |
327 |
are provided as these are best left to the user to design or steal |
328 |
from elsewhere, e.g. blas. The (de)allocation of dense vectors is |
329 |
the user's job,as is insuring that the dense vectors used are |
330 |
large enough to accomodate operations in the range of the given |
331 |
mtx_range_t. mtx->order is a safe size to use if you can't think |
332 |
of something else. |
333 |
\* *************************************************************** */ |
334 |
|
335 |
extern void mtx_org_row_vec(mtx_matrix_t mtx, int32 row, |
336 |
real64 *vec, mtx_range_t *colrng); |
337 |
/**< See mtx_cur_col_vec(), switching row & column references. */ |
338 |
extern void mtx_org_col_vec(mtx_matrix_t mtx, int32 col, |
339 |
real64 *vec, mtx_range_t *rowrng); |
340 |
/**< See mtx_cur_col_vec(). */ |
341 |
extern void mtx_cur_row_vec(mtx_matrix_t mtx, int32 row, |
342 |
real64 *vec, mtx_range_t *colrng); |
343 |
/**< See mtx_cur_col_vec(), switching row & column references. */ |
344 |
extern void mtx_cur_col_vec(mtx_matrix_t mtx, int32 col, |
345 |
real64 *vec, mtx_range_t *rowrng); |
346 |
/**< |
347 |
<!-- -$- mtx_org_row_vec(mtx,row,vec,colrng) --> |
348 |
<!-- -$- mtx_org_col_vec(mtx,col,vec,rowrng) --> |
349 |
<!-- -$- mtx_cur_row_vec(mtx,row,vec,colrng) --> |
350 |
<!-- -$- mtx_cur_col_vec(mtx,col,vec,rowrng) --> |
351 |
*** <!-- mtx_matrix_t mtx; --> |
352 |
*** <!-- int32 row,col; --> |
353 |
*** <!-- real64 *vec; --> |
354 |
*** <!-- mtx_range_t *colrng,*rowrng; --> |
355 |
*** |
356 |
*** <!-- mtx_org/cur_col_vec: --> |
357 |
*** The user is expected to supply the vec; we cannot check it. |
358 |
*** Copies the mtx nonzeros currently within rowrng INTO array vec which is |
359 |
*** indexed by org/cur row number. Does not affect other |
360 |
*** entries of vec in or outside the range. In particular, vec |
361 |
*** is NOT zeroed within the range unless there is a matrix element |
362 |
*** with value zero at that location.<br><br> |
363 |
*** |
364 |
*** <!-- mtx_org/cur_row_vec: --> |
365 |
*** <!-- Switch row <--> col in above. --> |
366 |
*** |
367 |
*** Notes: It is faster to call this with mtx_ALL_COLS/ROWS when |
368 |
*** the row/col of interest is known to have incidence exclusively in |
369 |
*** the range of interest.<br><br> |
370 |
*** |
371 |
-$- Fetches nothing from a bad matrix. |
372 |
**/ |
373 |
|
374 |
/* *************************************************************** *\ |
375 |
Sparse vector operations, rather analogous to the mtx_value suite. |
376 |
These are tools for data motion. No arithmetic operators |
377 |
are provided as yet. The deallocation of sparse vectors is |
378 |
the user's job. See the notes at the top of this header for the |
379 |
data semantics of a sparse vector. |
380 |
The user may construct a sparse vector. |
381 |
Calls which return a sparse vector may create or use and return |
382 |
a user supplied sparse vector. These calls are at liberty to |
383 |
reallocate the data memory if that supplied is insufficient to |
384 |
hold the data. The len and cap values of the vector will be reset |
385 |
as appropriate. |
386 |
|
387 |
Functions do not create a sparse unless it says in their header |
388 |
that mtx_CREATE_SPARSE is a valid argument. |
389 |
\* *************************************************************** */ |
390 |
|
391 |
extern mtx_sparse_t *mtx_org_row_sparse(mtx_matrix_t mtx, |
392 |
int32 row, |
393 |
mtx_sparse_t * const sparse, |
394 |
mtx_range_t *colrng, |
395 |
int zeroes); |
396 |
/**< See mtx_cur_col_sparse(), switching row & column references. */ |
397 |
extern mtx_sparse_t *mtx_org_col_sparse(mtx_matrix_t mtx, |
398 |
int32 col, |
399 |
mtx_sparse_t * const sparse, |
400 |
mtx_range_t *rowrng, |
401 |
int zeroes); |
402 |
/**< See mtx_cur_col_sparse(). */ |
403 |
extern mtx_sparse_t *mtx_cur_row_sparse(mtx_matrix_t mtx, |
404 |
int32 row, |
405 |
mtx_sparse_t * const sparse, |
406 |
mtx_range_t *colrng, |
407 |
int zeroes); |
408 |
/**< See mtx_cur_col_sparse(), switching row & column references. */ |
409 |
extern mtx_sparse_t *mtx_cur_col_sparse(mtx_matrix_t mtx, |
410 |
int32 col, |
411 |
mtx_sparse_t * const sparse, |
412 |
mtx_range_t *rowrng, |
413 |
int zeroes); |
414 |
/**< |
415 |
<!-- -$- sparse = mtx_org_row_sparse(mtx,row,sparse,colrng,zeroes) --> |
416 |
<!-- -$- sparse = mtx_org_col_sparse(mtx,col,sparse,rowrng,zeroes) --> |
417 |
<!-- -$- sparse = mtx_cur_row_sparse(mtx,row,sparse,colrng,zeroes) --> |
418 |
<!-- -$- sparse = mtx_cur_col_sparse(mtx,col,sparse,rowrng,zeroes) --> |
419 |
*** <!-- mtx_matrix_t mtx; --> |
420 |
*** <!-- int32 row,col; --> |
421 |
*** <!-- mtx_sparse_t *sparse; --> |
422 |
*** <!-- mtx_range_t *colrng,*rowrng; --> |
423 |
*** <!-- int zeroes; --> |
424 |
*** |
425 |
*** <!-- mtx_org/cur_col_sparse: --> |
426 |
*** Copies the mtx nonzeros currently within rowrng to the sparse, |
427 |
*** indexing by org/cur row number. Nonzeros with value 0.0 WILL |
428 |
*** be included in the sparse iff zeros is mtx_SOFT_ZEROES. |
429 |
*** sparse->len will be set accordingly. |
430 |
*** |
431 |
*** <!-- Switch row <--> col in above. --> |
432 |
*** <!-- mtx_org/cur_row_sparse: --> |
433 |
*** |
434 |
*** The user must supply the sparse. It will not be enlarged. |
435 |
*** mtx_CREATE_SPARSE is not a valid argument. |
436 |
*** If capacity of the sparse given is insufficient, we will |
437 |
*** copy as much data as will fit into sparse and return NULL. |
438 |
*** User beware!<br><br> |
439 |
*** |
440 |
*** Notes: It is faster to call this with mtx_ALL_COLS/ROWS when |
441 |
*** the row/col of interest is known to have incidence exclusively in |
442 |
*** the range of interest.<br><br> |
443 |
*** |
444 |
-$- Fetches nothing from a bad matrix. |
445 |
**/ |
446 |
|
447 |
extern void mtx_zr_org_vec_using_row(mtx_matrix_t mtx, int32 row, |
448 |
real64 *vec, mtx_range_t *colrng); |
449 |
/**< See mtx_zr_cur_vec_using_col(), switching row & column references. */ |
450 |
extern void mtx_zr_org_vec_using_col(mtx_matrix_t mtx, int32 col, |
451 |
real64 *vec, mtx_range_t *rowrng); |
452 |
/**< See mtx_zr_cur_vec_using_col(). */ |
453 |
extern void mtx_zr_cur_vec_using_row(mtx_matrix_t mtx, int32 row, |
454 |
real64 *vec, mtx_range_t *colrng); |
455 |
/**< See mtx_zr_cur_vec_using_col(), switching row & column references. */ |
456 |
extern void mtx_zr_cur_vec_using_col(mtx_matrix_t mtx, int32 col, |
457 |
real64 *vec, mtx_range_t *rowrng); |
458 |
/**< |
459 |
<!-- -$- mtx_zr_org_vec_using_row(mtx,row,vec,colrng) --> |
460 |
<!-- -$- mtx_zr_org_vec_using_col(mtx,col,vec,rowrng) --> |
461 |
<!-- -$- mtx_zr_cur_vec_using_row(mtx,row,vec,colrng) --> |
462 |
<!-- -$- mtx_zr_cur_vec_using_col(mtx,col,vec,rowrng) --> |
463 |
*** <!-- mtx_matrix_t mtx; --> |
464 |
*** <!-- int32 row,col; --> |
465 |
*** <!-- real64 *vec; --> |
466 |
*** <!-- mtx_range_t *colrng,*rowrng; --> |
467 |
*** |
468 |
*** <!-- mtx_zr_org/cur_vec_using_col: --> |
469 |
*** Sets the values of vec (indexed by org/cur col) corresponding to |
470 |
*** incidences in rowrng to 0.0.<br><br> |
471 |
*** |
472 |
*** <!-- mtx_zr_org/cur_vec_using_row: --> |
473 |
*** <!-- Switch row <--> col in above. --> |
474 |
*** |
475 |
*** Notes: It is faster to call this with mtx_ALL_COLS/ROWS when |
476 |
*** practical, and the org flavor is faster than the cur flavor.<br><br> |
477 |
*** |
478 |
-$- Does nothing given a bad matrix. |
479 |
**/ |
480 |
|
481 |
extern real64 mtx_sum_sqrs_in_row(mtx_matrix_t mtx, int32 row, |
482 |
const mtx_range_t *colrng); |
483 |
/**< |
484 |
*** Compute sum of squares of non-zeros in the given row whose column index |
485 |
*** lies in the given column range. |
486 |
*** |
487 |
-$- Returns 0.0 from a bad matrix. |
488 |
**/ |
489 |
extern real64 mtx_sum_sqrs_in_col(mtx_matrix_t mtx, int32 col, |
490 |
const mtx_range_t *rowrng); |
491 |
/**< |
492 |
*** Compute sum of squares of non-zeros in the given column whose row index |
493 |
*** lies in the given row range. |
494 |
*** |
495 |
-$- Returns 0.0 from a bad matrix. |
496 |
**/ |
497 |
extern real64 mtx_sum_abs_in_row(mtx_matrix_t mtx, int32 row, |
498 |
const mtx_range_t *colrng); |
499 |
/**< |
500 |
*** Compute sum of absolute values of non-zeros in the |
501 |
*** given row whose column index lies in the given column range. |
502 |
*** |
503 |
-$- Returns 0.0 from a bad matrix. |
504 |
**/ |
505 |
extern real64 mtx_sum_abs_in_col(mtx_matrix_t mtx, int32 col, |
506 |
const mtx_range_t *rowrng); |
507 |
/**< |
508 |
*** Compute sum of absolute values of non-zeros in the |
509 |
*** given column whose row index lies in the given row range. |
510 |
*** |
511 |
-$- Returns 0.0 from a bad matrix. |
512 |
**/ |
513 |
/* OLD GROUP COMMENTS */ |
514 |
/* |
515 |
-$- sum = mtx_sum_sqrs_in_row(matrix,row,colrng) |
516 |
-$- sum = mtx_sum_sqrs_in_col(matrix,col,rowrng) |
517 |
-$- sum = mtx_sum_abs_in_row(matrix,row,colrng) |
518 |
-$- sum = mtx_sum_abs_in_col(matrix,col,rowrng) |
519 |
*** real64 sum; |
520 |
*** mtx_matrix_t matrix; |
521 |
*** int32 row,col; |
522 |
*** mtx_range_t *colrng,*rowrng; |
523 |
*** |
524 |
*** mtx_sum_sqrs_in_row(): |
525 |
*** Compute sum of squares of non-zeros in the given row whose column index |
526 |
*** lies in the given column range. |
527 |
*** mtx_sum_abs_in_row(): |
528 |
*** Compute sum of absolute values of non-zeros in the |
529 |
*** given row whose column index lies in the given column range. |
530 |
*** |
531 |
*** mtx_sum_sqrs_in_col(): |
532 |
*** Replace row <--> column above. |
533 |
*** mtx_sum_abs_in_col(): |
534 |
*** Replace row <--> column above. |
535 |
-$- Returns 0.0 from a bad matrix. |
536 |
**/ |
537 |
|
538 |
extern real64 mtx_col_dot_full_org_vec(mtx_matrix_t mtx, |
539 |
int32 col, |
540 |
real64 *orgvec, |
541 |
mtx_range_t *rowrng, |
542 |
boolean transpose); |
543 |
/**< See mtx_row_dot_full_org_vec(), switching row & column references. */ |
544 |
extern real64 mtx_row_dot_full_org_vec(mtx_matrix_t mtx, |
545 |
int32 row, |
546 |
real64 *orgvec, |
547 |
mtx_range_t *colrng, |
548 |
boolean transpose); |
549 |
/**< |
550 |
<!-- -$- sum = mtx_row_dot_full_org_vec(matrix,row,orgvec,colrng,transpose) --> |
551 |
<!-- -$- sum = mtx_col_dot_full_org_vec(matrix,col,orgvec,rowrng,transpose) --> |
552 |
*** <!-- real64 sum; --> |
553 |
*** <!-- mtx_matrix_t matrix; --> |
554 |
*** <!-- int32 row,col; --> |
555 |
*** <!-- real64 *orgvec; --> |
556 |
*** <!-- mtx_range_t *colrng,*rowrng; --> |
557 |
*** <!-- boolean transpose; --> |
558 |
*** |
559 |
*** <!-- mtx_row_dot_full_org_vec: --> |
560 |
*** <pre> |
561 |
*** For transpose==FALSE: |
562 |
*** Compute the dot product of the row given with the org_col indexed |
563 |
*** orgvec over the colrng given (colrng being the cur indexed |
564 |
*** limits as usual.) |
565 |
*** i.e. SUM( mtx(row,col_cur) * orgvec[mtx_col_to_org(mtx,col_cur)] ) |
566 |
*** for all inrange(col_cur). |
567 |
*** |
568 |
*** For transpose==TRUE: |
569 |
*** Compute the dot product of the row given with the org_row indexed |
570 |
*** orgvec over the colrng given (colrng being the cur indexed |
571 |
*** limits as usual.) |
572 |
*** i.e. SUM( mtx(row,col_cur) * orgvec[mtx_row_to_org(mtx,col_cur)] ) |
573 |
*** for all inrange(col_cur). |
574 |
*** </pre> |
575 |
*** <!-- mtx_col_dot_full_org_vec: --> |
576 |
*** <!-- Replace row <--> col above. --> |
577 |
-$- Returns 0.0 from a bad matrix. |
578 |
**/ |
579 |
|
580 |
extern real64 mtx_col_dot_full_cur_vec(mtx_matrix_t mtx, |
581 |
int32 col, |
582 |
real64 *curcolvec, |
583 |
mtx_range_t *rowrng, |
584 |
boolean transpose); |
585 |
/**< See mtx_row_dot_full_cur_vec(), switching row & column references. */ |
586 |
extern real64 mtx_row_dot_full_cur_vec(mtx_matrix_t mtx, |
587 |
int32 row, |
588 |
real64 *currowvec, |
589 |
mtx_range_t *colrng, |
590 |
boolean transpose); |
591 |
/**< |
592 |
<!-- -$- sum = mtx_row_dot_full_cur_vec(matrix,row,currowvec,colrng,transpose) --> |
593 |
<!-- -$- sum = mtx_col_dot_full_cur_vec(matrix,col,curcolvec,rowrng,transpose) --> |
594 |
*** <!-- real64 sum; --> |
595 |
*** <!-- mtx_matrix_t matrix; --> |
596 |
*** <!-- int32 row,col; --> |
597 |
*** <!-- real64 *currowvec, *curcolvec; --> |
598 |
*** <!-- mtx_range_t *colrng,*rowrng; --> |
599 |
*** <!-- boolean transpose; --> |
600 |
*** |
601 |
*** <!-- mtx_row_dot_full_cur_vec: --> |
602 |
*** Compute the dot product of the row given with the cur col indexed |
603 |
*** currowvec over the colrng given (colrng being the cur indexed |
604 |
*** limits as usual.)<br><br> |
605 |
*** |
606 |
*** <!-- mtx_col_dot_full_cur_vec: --> |
607 |
*** <!-- Replace row <--> column above. --> |
608 |
-$- Returns 0.0 from a bad matrix.<br><br> |
609 |
*** |
610 |
! ! Transpose is currently not implemented. A warning will be issued. |
611 |
! ! When someone finds a use and can explain what the transpose versions |
612 |
! ! of these functions do in terms of permutations, it will be coded.<br><br> |
613 |
*** |
614 |
*** Note: This pair of operators is slightly less expensive than |
615 |
*** the mtx_*_dot_full_org_vec is. |
616 |
**/ |
617 |
|
618 |
extern real64 mtx_col_dot_full_org_custom_vec(mtx_matrix_t matrix1, |
619 |
mtx_matrix_t matrix2, |
620 |
int32 row, |
621 |
real64 *orgvec, |
622 |
mtx_range_t *colrng, |
623 |
boolean transpose); |
624 |
/**< See mtx_row_dot_full_org_custom_vec(), switching row & column references. */ |
625 |
extern real64 mtx_row_dot_full_org_custom_vec(mtx_matrix_t matrix1, |
626 |
mtx_matrix_t matrix2, |
627 |
int32 col, |
628 |
real64 *orgvec, |
629 |
mtx_range_t *rowrng, |
630 |
boolean transpose); |
631 |
/**< |
632 |
<!-- -$- sum = mtx_row_dot_full_org_custom_vec --> |
633 |
<!-- -$- (matrix1,matrix2,row,orgvec,colrng,transpose)--> |
634 |
<!-- -$- sum = mtx_col_dot_full_org_custom_vec --> |
635 |
<!-- -$- (matrix1,matrix2,col,orgvec,rowrng,transpose)--> |
636 |
*** <!-- real64 sum; --> |
637 |
*** <!-- mtx_matrix_t matrix1; --> |
638 |
*** <!-- mtx_matrix_t matrix2; --> |
639 |
*** <!-- int32 row,col; --> |
640 |
*** <!-- real64 *orgvec; --> |
641 |
*** <!-- mtx_range_t *colrng,*rowrng; --> |
642 |
*** <!-- boolean transpose; --> |
643 |
*** |
644 |
*** <!-- mtx_row_dot_full_org_vec: --> |
645 |
*** <pre> |
646 |
*** For transpose==FALSE: |
647 |
*** Compute the dot product of the row given with the org_col indexed |
648 |
*** orgvec (wrt matrix2) over the colrng given (colrng being the cur |
649 |
*** indexed limits of matrix1.) |
650 |
*** i.e. SUM( mtx(row,col_cur) * |
651 |
*** orgvec[mtx_col_to_org(mtx2,col_cur)] ) |
652 |
*** for all inrange(col_cur). |
653 |
*** |
654 |
*** For transpose==TRUE: |
655 |
*** Compute the dot product of the row given with the org_row indexed |
656 |
*** orgvec (wrt matrix2) over the colrng given (colrng being the cur |
657 |
*** indexed limits of matrix1.) |
658 |
*** i.e. SUM( mtx(row,col_cur) * |
659 |
*** orgvec[mtx_row_to_org(mtx2,col_cur)] ) |
660 |
*** for all inrange(col_cur). |
661 |
*** </pre> |
662 |
*** <!-- mtx_col_dot_full_org_custom_vec: --> |
663 |
*** <!-- Replace row <--> col above. --> |
664 |
-$- Returns 0.0 from a bad matrix. |
665 |
**/ |
666 |
|
667 |
extern void mtx_org_vec_add_col(mtx_matrix_t mtx, |
668 |
real64 *tvec, |
669 |
int32 scol, |
670 |
real64 factor, |
671 |
mtx_range_t *rowrng, |
672 |
boolean transpose); |
673 |
/**< See mtx_cur_vec_add_row(), switching row & column references. */ |
674 |
extern void mtx_org_vec_add_row(mtx_matrix_t mtx, |
675 |
real64 *tvec, |
676 |
int32 srow, |
677 |
real64 factor, |
678 |
mtx_range_t *colrng, |
679 |
boolean transpose); |
680 |
/**< See mtx_cur_vec_add_row(). */ |
681 |
extern void mtx_cur_vec_add_col(mtx_matrix_t mtx, |
682 |
real64 *tvec, |
683 |
int32 scol, |
684 |
real64 factor, |
685 |
mtx_range_t *rowrng, |
686 |
boolean transpose); |
687 |
/**< See mtx_cur_vec_add_row(), switching row & column references. */ |
688 |
extern void mtx_cur_vec_add_row(mtx_matrix_t mtx, |
689 |
real64 *tvec, |
690 |
int32 srow, |
691 |
real64 factor, |
692 |
mtx_range_t *colrng, |
693 |
boolean transpose); |
694 |
/**< |
695 |
<!-- -$- mtx_org_vec_add_row(matrix,tvec,srow,factor,colrng,transpose) --> |
696 |
<!-- -$- mtx_org_vec_add_col(matrix,tvec,scol,factor,rowrng,transpose) --> |
697 |
<!-- -$- mtx_cur_vec_add_row(matrix,tvec,srow,factor,colrng,transpose) --> |
698 |
<!-- -$- mtx_cur_vec_add_col(matrix,tvec,scol,factor,rowrng,transpose) --> |
699 |
*** <!-- mtx_matrix_t matrix; --> |
700 |
*** <!-- int32 srow,scol; --> |
701 |
*** <!-- real64 *tvec; --> |
702 |
*** <!-- real64 factor; --> |
703 |
*** <!-- mtx_range_t *colrng,*rowrng; --> |
704 |
*** <!-- boolean transpose; --> |
705 |
*** |
706 |
*** <!-- mtx_org/cur_vec_add_row: --> |
707 |
*** Adds multiple factor of srow to tvec for those columns in colrng. |
708 |
*** tvec is org/cur col indexed if transpose==FALSE. |
709 |
*** i.e. this is just adding rows. |
710 |
*** tvec is org/cur row indexed if transpose==TRUE. |
711 |
*** orgvec[mtx_row_to_org(col)]+=factor*element(srow,col) |
712 |
! ! curvec[???]+=factor*element(srow,col) |
713 |
***<br><br> |
714 |
*** <!-- mtx_org/cur_vec_add_col: --> |
715 |
*** <!-- Reverse row <-->col in above. --> |
716 |
*** Since switching row and column is hard for the transpose, here it is: |
717 |
*** orgvec[mtx_col_to_org(row)]+=factor*element(row,scol) |
718 |
! ! curvec[???]+=factor*element(row,scol) |
719 |
*** <br><br> |
720 |
*** Notes: It is faster to use this with mtx_ALL_COLS/ROWS where |
721 |
*** possible. |
722 |
*** Use transpose==TRUE here if you would use transpose==TRUE |
723 |
*** for dotting the row/col with the same vector.<br><br> |
724 |
! ! Warning: |
725 |
! ! Like mtx_row/col_dot_full_cur_vec, |
726 |
! ! the transpose==TRUE flavors of mtx_cur_vec_add_row/col |
727 |
! ! are NOT implemented. Nobody has found a use for them and nobody |
728 |
! ! has yet cooked up what they mean in permutation terms.<br><br> |
729 |
*** |
730 |
-$- Does nothing to a bad matrix. |
731 |
**/ |
732 |
|
733 |
#endif /* __MTX_QUERY_H_SEEN__ */ |