1 |
aw0a |
1 |
/* |
2 |
|
|
* mtx2: Ascend Sparse Matrix Package |
3 |
|
|
* by Benjamin Andrew Allan |
4 |
|
|
* Derived from mtx by Karl Michael Westerberg |
5 |
|
|
* Created: 5/3/90 |
6 |
|
|
* Version: $Revision: 1.9 $ |
7 |
|
|
* Version control file: $RCSfile: mtx_query.h,v $ |
8 |
|
|
* Date last modified: $Date: 1997/07/18 12:15:12 $ |
9 |
|
|
* Last modified by: $Author: mthomas $ |
10 |
|
|
* |
11 |
|
|
* This file is part of the SLV solver. |
12 |
|
|
* |
13 |
|
|
* Copyright (C) 1996 Benjamin Andrew Allan |
14 |
|
|
* |
15 |
|
|
* The SLV solver is free software; you can redistribute |
16 |
|
|
* it and/or modify it under the terms of the GNU General Public License as |
17 |
|
|
* published by the Free Software Foundation; either version 2 of the |
18 |
|
|
* License, or (at your option) any later version. |
19 |
|
|
* |
20 |
|
|
* The SLV solver is distributed in hope that it will be |
21 |
|
|
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of |
22 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
23 |
|
|
* General Public License for more details. |
24 |
|
|
* |
25 |
|
|
* You should have received a copy of the GNU General Public License along with |
26 |
|
|
* the program; if not, write to the Free Software Foundation, Inc., 675 |
27 |
|
|
* Mass Ave, Cambridge, MA 02139 USA. Check the file named COPYING. |
28 |
|
|
* COPYING is found in ../compiler. |
29 |
|
|
*/ |
30 |
|
|
#ifndef __MTX_QUERY_H_SEEN__ |
31 |
|
|
#define __MTX_QUERY_H_SEEN__ |
32 |
|
|
/* requires #include "mtx.h" */ |
33 |
|
|
|
34 |
|
|
|
35 |
|
|
extern real64 mtx_next_in_row(mtx_matrix_t, mtx_coord_t *,mtx_range_t *); |
36 |
|
|
extern real64 mtx_next_in_col(mtx_matrix_t, mtx_coord_t *,mtx_range_t *); |
37 |
|
|
/** |
38 |
|
|
-$- value = mtx_next_in_row(matrix,coord,colrng) |
39 |
|
|
-$- value = mtx_next_in_col(matrix,coord,rowrng) |
40 |
|
|
*** real64 value; |
41 |
|
|
*** mtx_matrix_t matrix; |
42 |
|
|
*** mtx_coord_t *coord; |
43 |
|
|
*** mtx_range_t *colrng,*rowrng; |
44 |
|
|
*** |
45 |
|
|
*** mtx_next_in_row(): |
46 |
|
|
*** Searches for the next non-zero in the given row, whose column index |
47 |
|
|
*** lies in the given range, and returns its value. Coord->col is set to |
48 |
|
|
*** the column in which it was found. To find the first non-zero in the |
49 |
|
|
*** row, set coord->col = mtx_FIRST. Coord->col will equal mtx_LAST and |
50 |
|
|
*** 0.0 will be returned if there is no next non-zero. The row (and mtx!) |
51 |
|
|
*** searched is always the one specified when mtx_next_in_row was most |
52 |
|
|
*** recently called with a coord such that coord->col == mtx_FIRST. |
53 |
|
|
*** If colrng.low > colrng.high, return is mtx_LAST, which gives correct |
54 |
|
|
*** behaviour for mtx/vector processing loops. |
55 |
|
|
*** If nz.row is not in the matrix (<0 || >= mtx order) on an initial |
56 |
|
|
*** call (with nz.col == mtx_FIRST) returns 0.0 and nz.col == mtx_LAST. |
57 |
|
|
*** |
58 |
|
|
*** mtx_next_in_col(): |
59 |
|
|
*** Switch row <--> column in the discussion above. |
60 |
|
|
*** |
61 |
|
|
*** Example of canonical usage: |
62 |
|
|
*** nz.col = col; (* the col to be traversed *) |
63 |
|
|
*** nz.row = mtx_FIRST; (* initializer, which must be used *) |
64 |
|
|
*** while( value = mtx_next_in_col(mtx,&nz,rowrng)), |
65 |
|
|
*** nz.row != mtx_LAST ) { (* your thing here *) } |
66 |
|
|
*** Note that the logic test of the while is ONLY nz.row != mtx_LAST. |
67 |
|
|
*** C lets you do whatever you choose before the comma. |
68 |
|
|
*** |
69 |
|
|
-$- Returns FALSE from bad a matrix. |
70 |
|
|
*** |
71 |
|
|
*** NOTE: The notion of "next non-zero" is arbitrary, but will not change |
72 |
|
|
*** until the next destructive operation on the matrix is performed. |
73 |
|
|
*** Neither the row/col number nor the rel/var index will |
74 |
|
|
*** behave monotonically while traversing a col/row. This means there |
75 |
|
|
*** is nothing you can do with the range to shrink the search space |
76 |
|
|
*** based on the last row/col returned. Basic matrix operations, when |
77 |
|
|
*** properly thought about in a general sparse context, do not care |
78 |
|
|
*** about the order in which the elements of vectors are processed. |
79 |
|
|
*** |
80 |
|
|
*** WARNINGS: |
81 |
|
|
!1! - You may NOT nest mtx_next_in_col within mtx_next_in_col |
82 |
|
|
! ! or mtx_next_in_row within mtx_next_in_row. We have yet to find a |
83 |
|
|
! ! process suggestive of doing so that was not better handled with |
84 |
|
|
! ! a for loop. This is a global constraint. Violation results in garbage. |
85 |
|
|
!2! - You may NOT safely perform operation mtx_del_zr_in_col while traversing |
86 |
|
|
! ! a column unless the value just returned from the previous next_in_col is |
87 |
|
|
! ! nonzero or the coord.row last returned is mtx_LAST. Also, you may NOT |
88 |
|
|
! ! safely perform operation mtx_del_zr_in_row on coord.row unless the |
89 |
|
|
! ! value just returned from the previous next_in_col is nonzero. |
90 |
|
|
! ! The proscription here is for mtx_next_in_ and mtx_del_zr_in_ operating |
91 |
|
|
! ! on the same matrix. mtx_del_zr_in_ is safe to call if the mtx_next_in_ |
92 |
|
|
! ! driving it is happening on a different matrix. |
93 |
|
|
!3! Transpose warning 2 for next_in_row. |
94 |
|
|
! ! Violation of 2 or 3 results, sooner or later, in a memory fault. |
95 |
|
|
*** You CAN nest mtx_next_in_col within mtx_next_in_row or vice versa, |
96 |
|
|
*** however. |
97 |
|
|
**/ |
98 |
|
|
|
99 |
|
|
extern real64 mtx_row_max(mtx_matrix_t, mtx_coord_t *, |
100 |
|
|
mtx_range_t *, real64 *); |
101 |
|
|
extern real64 mtx_col_max(mtx_matrix_t, mtx_coord_t *, |
102 |
|
|
mtx_range_t *, real64 *); |
103 |
|
|
/** |
104 |
|
|
-$- value = mtx_row_max(matrix,coord,colrng,signval) |
105 |
|
|
-$- value = mtx_col_max(matrix,coord,rowrng,signval) |
106 |
|
|
-$- value = mtx_row_min(matrix,coord,colrng,signval) |
107 |
|
|
-$- value = mtx_col_min(matrix,coord,rowrng,signval) |
108 |
|
|
*** real64 value, *signval; |
109 |
|
|
*** mtx_matrix_t matrix; |
110 |
|
|
*** mtx_coord_t *coord; |
111 |
|
|
*** mtx_range_t *colrng,*rowrng; |
112 |
|
|
*** |
113 |
|
|
*** mtx_row_max(): |
114 |
|
|
*** Searches for the element in the given row, with column index in the |
115 |
|
|
*** given column range, which has the largest absolute value. The user |
116 |
|
|
*** should set coord->row to the desired row to search, and this function |
117 |
|
|
*** will set coord->col to the column index where the maximum was found |
118 |
|
|
*** (or mtx_NONE if no non-zero was found in that range). The absolute |
119 |
|
|
*** value of that element is also returned. If the pointer signval is not |
120 |
|
|
*** NULL, the real64 pointed to will be stuffed with the signed |
121 |
|
|
*** value of the maximum sized element. |
122 |
|
|
*** In the event of ties, the element with the lowest current column |
123 |
|
|
*** index wins. |
124 |
|
|
*** |
125 |
|
|
*** mtx_col_max(): |
126 |
|
|
*** Replace row <--> column above. |
127 |
|
|
*** |
128 |
|
|
-$- Returns -1.0 from a bad matrix. |
129 |
|
|
**/ |
130 |
|
|
extern real64 mtx_row_min(mtx_matrix_t, mtx_coord_t *, |
131 |
|
|
mtx_range_t *, real64 *,real64); |
132 |
|
|
extern real64 mtx_col_min(mtx_matrix_t, mtx_coord_t *, |
133 |
|
|
mtx_range_t *, real64 *,real64); |
134 |
|
|
/** |
135 |
|
|
-$- value = mtx_row_min(matrix,coord,colrng,signval,minval) |
136 |
|
|
-$- value = mtx_col_min(matrix,coord,rowrng,signval,minval) |
137 |
|
|
*** real64 value, *signval, minval; |
138 |
|
|
*** mtx_matrix_t matrix; |
139 |
|
|
*** mtx_coord_t *coord; |
140 |
|
|
*** mtx_range_t *colrng,*rowrng; |
141 |
|
|
*** |
142 |
|
|
*** These functions are basicaly defined as their max counterparts |
143 |
|
|
*** above with the appropriate swapping of max<-->min. |
144 |
|
|
*** This function only looks at the nonzero elements |
145 |
|
|
*** The min functions will only find numbers between minval |
146 |
|
|
*** and 1e50. If no number is found signval will be zero and |
147 |
|
|
*** value will be one. |
148 |
|
|
**/ |
149 |
|
|
extern real64 mtx_get_pivot_col(mtx_matrix_t, mtx_coord_t *, |
150 |
|
|
mtx_range_t *, real64 *, |
151 |
|
|
real64, real64); |
152 |
|
|
extern real64 mtx_get_pivot_row(mtx_matrix_t, mtx_coord_t *, |
153 |
|
|
mtx_range_t *, real64 *, |
154 |
|
|
real64, real64); |
155 |
|
|
/** |
156 |
|
|
-$- value = mtx_get_pivot_col(matrix,coord,colrng,signval,tol,eps) |
157 |
|
|
-$- value = mtx_get_pivot_row(matrix,coord,rowrng,signval,tol,eps) |
158 |
|
|
*** real64 value, *signval; |
159 |
|
|
*** mtx_matrix_t matrix; |
160 |
|
|
*** mtx_coord_t *coord; |
161 |
|
|
*** mtx_range_t *colrng,*rowrng; |
162 |
|
|
*** |
163 |
|
|
*** This implements efficiently the standard sparse modification |
164 |
|
|
*** of LU partial pivot selection. |
165 |
|
|
*** |
166 |
|
|
*** mtx_get_pivot_col(); |
167 |
|
|
*** Searches for the leftmost element in the colrng of the given row, |
168 |
|
|
*** which passes the sparse partial pivoting criteria: |
169 |
|
|
*** 1) aij >= eps, |
170 |
|
|
*** 2) aij >= tol * max_abs_element_value_in_colrng. |
171 |
|
|
*** The absolute value of the passing element is returned, or 0.0 if |
172 |
|
|
*** there are no entries that pass criterion 1. The user |
173 |
|
|
*** should set coord->row to the desired row to search. This function |
174 |
|
|
*** will set coord->col to the column index where the result was found |
175 |
|
|
*** (or mtx_NONE if nothing good was found in that range). The absolute |
176 |
|
|
*** value of that element is also returned. If the pointer signval is not |
177 |
|
|
*** NULL, the real64 pointed to will be stuffed with the signed |
178 |
|
|
*** value of the selected element. |
179 |
|
|
*** This function is faster when colrng == mtx_ALL_COLS can be used. |
180 |
|
|
*** |
181 |
|
|
*** mtx_get_pivot_row(): |
182 |
|
|
*** Replace row <--> column above. |
183 |
|
|
-$- Returns -1.0 from a bad matrix. |
184 |
|
|
**/ |
185 |
|
|
|
186 |
|
|
extern int32 mtx_nonzeros_in_row(mtx_matrix_t,int32,mtx_range_t *); |
187 |
|
|
extern int32 mtx_nonzeros_in_col(mtx_matrix_t,int32,mtx_range_t *); |
188 |
|
|
extern int32 mtx_nonzeros_in_region(mtx_matrix_t, mtx_region_t *); |
189 |
|
|
extern int32 mtx_numbers_in_row(mtx_matrix_t,int32,mtx_range_t *); |
190 |
|
|
extern int32 mtx_numbers_in_col(mtx_matrix_t,int32,mtx_range_t *); |
191 |
|
|
extern int32 mtx_numbers_in_region(mtx_matrix_t, mtx_region_t *); |
192 |
|
|
/** |
193 |
|
|
-$- count = mtx_nonzeros_in_row(matrix,row,colrng) |
194 |
|
|
-$- count = mtx_nonzeros_in_col(matrix,col,rowrng) |
195 |
|
|
-$- count = mtx_nonzeros_in_region(matrix,reg) |
196 |
|
|
-$- count = mtx_numbers_in_row(matrix,row,colrng) |
197 |
|
|
-$- count = mtx_numbers_in_col(matrix,col,rowrng) |
198 |
|
|
-$- count = mtx_numbers_in_region(matrix,reg) |
199 |
|
|
*** int32 count; |
200 |
|
|
*** mtx_matrix_t matrix; |
201 |
|
|
*** int32 row,col; |
202 |
|
|
*** mtx_range_t *colrng,*rowrng; |
203 |
|
|
*** mtx_region_t *reg; |
204 |
|
|
*** |
205 |
|
|
*** mtx_nonzeros_in_row(): |
206 |
|
|
*** Counts the number of incidences in the given row whose column index |
207 |
|
|
*** lies in the given column range. |
208 |
|
|
*** |
209 |
|
|
*** mtx_nonzeros_in_col(): |
210 |
|
|
*** Replace row <--> column above. |
211 |
|
|
*** |
212 |
|
|
*** mtx_nonzeros_in_region(): |
213 |
|
|
*** Counts the number of incidences in the given region. |
214 |
|
|
*** |
215 |
|
|
*** mtx_numbers_in_row(): |
216 |
|
|
*** Counts the non-zero values in the given row whose column index |
217 |
|
|
*** lies in the given column range. |
218 |
|
|
*** |
219 |
|
|
*** mtx_numbers_in_col(): |
220 |
|
|
*** Replace row <--> column above. |
221 |
|
|
*** |
222 |
|
|
*** mtx_nonzeros_in_region(): |
223 |
|
|
*** Counts the non-zero values in the given region. |
224 |
|
|
*** |
225 |
|
|
-$- Returns -1 from a bad matrix. |
226 |
|
|
**/ |
227 |
|
|
|
228 |
|
|
/***********************************************************************\ |
229 |
|
|
mtx vector operation routines |
230 |
|
|
None of these routines care about master/slave status. |
231 |
|
|
\***********************************************************************/ |
232 |
|
|
|
233 |
|
|
/*****************************************************************\ |
234 |
|
|
Dense vector operations, rather analogous to the mtx_value suite. |
235 |
|
|
These are tools for data motion. No dense-dense arithmetic operators |
236 |
|
|
are provided as these are best left to the user to design or steal |
237 |
|
|
from elsewhere, e.g. blas. The (de)allocation of dense vectors is |
238 |
|
|
the user's job,as is insuring that the dense vectors used are |
239 |
|
|
large enough to accomodate operations in the range of the given |
240 |
|
|
mtx_range_t. mtx->order is a safe size to use if you can't think |
241 |
|
|
of something else. |
242 |
|
|
\*****************************************************************/ |
243 |
|
|
|
244 |
|
|
extern void mtx_org_row_vec(mtx_matrix_t, int32, |
245 |
|
|
real64 *, mtx_range_t *); |
246 |
|
|
extern void mtx_org_col_vec(mtx_matrix_t, int32, |
247 |
|
|
real64 *, mtx_range_t *); |
248 |
|
|
extern void mtx_cur_row_vec(mtx_matrix_t, int32, |
249 |
|
|
real64 *, mtx_range_t *); |
250 |
|
|
extern void mtx_cur_col_vec(mtx_matrix_t, int32, |
251 |
|
|
real64 *, mtx_range_t *); |
252 |
|
|
/** |
253 |
|
|
-$- mtx_org_row_vec(mtx,row,vec,colrng) |
254 |
|
|
-$- mtx_org_col_vec(mtx,col,vec,rowrng) |
255 |
|
|
-$- mtx_cur_row_vec(mtx,row,vec,colrng) |
256 |
|
|
-$- mtx_cur_col_vec(mtx,col,vec,rowrng) |
257 |
|
|
*** mtx_matrix_t mtx; |
258 |
|
|
*** int32 row,col; |
259 |
|
|
*** real64 *vec; |
260 |
|
|
*** mtx_range_t *colrng,*rowrng; |
261 |
|
|
*** |
262 |
|
|
*** mtx_org/cur_row_vec: |
263 |
|
|
*** The user is expected to supply the vec; we cannot check it. |
264 |
|
|
*** Copies the mtx nonzeros currently within colrng INTO array vec which is |
265 |
|
|
*** indexed by org/cur column number. Does not affect other |
266 |
|
|
*** entries of vec in or outside the range. In particular, vec |
267 |
|
|
*** is NOT zeroed within the range unless there is a matrix element |
268 |
|
|
*** with value zero at that location. |
269 |
|
|
*** |
270 |
|
|
*** mtx_org/cur_col_vec: |
271 |
|
|
*** Switch row <--> col in above. |
272 |
|
|
*** |
273 |
|
|
*** Notes: It is faster to call this with mtx_ALL_COLS/ROWS when |
274 |
|
|
*** the row/col of interest is known to have incidence exclusively in |
275 |
|
|
*** the range of interest. |
276 |
|
|
*** |
277 |
|
|
-$- Fetches nothing from a bad matrix. |
278 |
|
|
**/ |
279 |
|
|
|
280 |
|
|
/*****************************************************************\ |
281 |
|
|
Sparse vector operations, rather analogous to the mtx_value suite. |
282 |
|
|
These are tools for data motion. No arithmetic operators |
283 |
|
|
are provided as yet. The deallocation of sparse vectors is |
284 |
|
|
the user's job. See the notes at the top of this header for the |
285 |
|
|
data semantics of a sparse vector. |
286 |
|
|
The user may construct a sparse vector. |
287 |
|
|
Calls which return a sparse vector may create or use and return |
288 |
|
|
a user supplied sparse vector. These calls are at liberty to |
289 |
|
|
reallocate the data memory if that supplied is insufficient to |
290 |
|
|
hold the data. The len and cap values of the vector will be reset |
291 |
|
|
as appropriate. |
292 |
|
|
|
293 |
|
|
Functions do not create a sparse unless it says in their header |
294 |
|
|
that mtx_CREATE_SPARSE is a valid argument. |
295 |
|
|
\*****************************************************************/ |
296 |
|
|
|
297 |
|
|
extern mtx_sparse_t *mtx_org_row_sparse(mtx_matrix_t, int32, |
298 |
|
|
mtx_sparse_t * const, mtx_range_t *, |
299 |
|
|
int); |
300 |
|
|
extern mtx_sparse_t *mtx_org_col_sparse(mtx_matrix_t, int32, |
301 |
|
|
mtx_sparse_t * const, mtx_range_t *, |
302 |
|
|
int); |
303 |
|
|
extern mtx_sparse_t *mtx_cur_row_sparse(mtx_matrix_t, int32, |
304 |
|
|
mtx_sparse_t * const, mtx_range_t *, |
305 |
|
|
int); |
306 |
|
|
extern mtx_sparse_t *mtx_cur_col_sparse(mtx_matrix_t, int32, |
307 |
|
|
mtx_sparse_t * const, mtx_range_t *, |
308 |
|
|
int); |
309 |
|
|
/** |
310 |
|
|
-$- sparse = mtx_org_row_sparse(mtx,row,sparse,colrng,zeroes) |
311 |
|
|
-$- sparse = mtx_org_col_sparse(mtx,col,sparse,rowrng,zeroes) |
312 |
|
|
-$- sparse = mtx_cur_row_sparse(mtx,row,sparse,colrng,zeroes) |
313 |
|
|
-$- sparse = mtx_cur_col_sparse(mtx,col,sparse,rowrng,zeroes) |
314 |
|
|
*** mtx_matrix_t mtx; |
315 |
|
|
*** int32 row,col; |
316 |
|
|
*** mtx_sparse_t *sparse; |
317 |
|
|
*** mtx_range_t *colrng,*rowrng; |
318 |
|
|
*** int zeroes; |
319 |
|
|
*** |
320 |
|
|
*** The user must supply the sparse. It will not be enlarged. |
321 |
|
|
*** mtx_CREATE_SPARSE is not a valid argument. |
322 |
|
|
*** If capacity of the sparse given is insufficient, we will |
323 |
|
|
*** copy as much data as will fit into sparse and return NULL. |
324 |
|
|
*** User beware! |
325 |
|
|
*** |
326 |
|
|
*** mtx_org/cur_row_sparse: |
327 |
|
|
*** Copies the mtx nonzeros currently within colrng to the sparse, |
328 |
|
|
*** indexing by org/cur column number. Nonzeros with value 0.0 WILL |
329 |
|
|
*** be included in the sparse iff zeros is mtx_SOFT_ZEROES. |
330 |
|
|
*** sparse->len will be set accordingly. |
331 |
|
|
*** |
332 |
|
|
*** mtx_org/cur_col_sparse: |
333 |
|
|
*** Switch row <--> col in above. |
334 |
|
|
*** |
335 |
|
|
*** Notes: It is faster to call this with mtx_ALL_COLS/ROWS when |
336 |
|
|
*** the row/col of interest is known to have incidence exclusively in |
337 |
|
|
*** the range of interest. |
338 |
|
|
*** |
339 |
|
|
-$- Fetches nothing from a bad matrix. |
340 |
|
|
**/ |
341 |
|
|
|
342 |
|
|
|
343 |
|
|
extern void mtx_zr_org_vec_using_row(mtx_matrix_t, int32, |
344 |
|
|
real64 *, mtx_range_t *); |
345 |
|
|
extern void mtx_zr_org_vec_using_col(mtx_matrix_t, int32, |
346 |
|
|
real64 *, mtx_range_t *); |
347 |
|
|
extern void mtx_zr_cur_vec_using_row(mtx_matrix_t, int32, |
348 |
|
|
real64 *, mtx_range_t *); |
349 |
|
|
extern void mtx_zr_cur_vec_using_col(mtx_matrix_t, int32, |
350 |
|
|
real64 *, mtx_range_t *); |
351 |
|
|
/** |
352 |
|
|
-$- mtx_zr_org_vec_using_row(mtx,row,vec,colrng) |
353 |
|
|
-$- mtx_zr_org_vec_using_col(mtx,col,vec,rowrng) |
354 |
|
|
-$- mtx_zr_cur_vec_using_row(mtx,row,vec,colrng) |
355 |
|
|
-$- mtx_zr_cur_vec_using_col(mtx,col,vec,rowrng) |
356 |
|
|
*** mtx_matrix_t mtx; |
357 |
|
|
*** int32 row,col; |
358 |
|
|
*** real64 *vec; |
359 |
|
|
*** mtx_range_t *colrng,*rowrng; |
360 |
|
|
*** |
361 |
|
|
*** mtx_zr_org/cur_vec_using_row: |
362 |
|
|
*** Sets the values of vec (indexed by org/cur row) corresponding to |
363 |
|
|
*** incidences in colrng to 0.0. |
364 |
|
|
*** |
365 |
|
|
*** mtx_zr_org/cur_vec_using_col: |
366 |
|
|
*** Switch row <--> col in above. |
367 |
|
|
*** |
368 |
|
|
*** Notes: It is faster to call this with mtx_ALL_COLS/ROWS when |
369 |
|
|
*** practical, and the org flavor is faster than the cur flavor. |
370 |
|
|
*** |
371 |
|
|
-$- Does nothing given a bad matrix. |
372 |
|
|
**/ |
373 |
|
|
|
374 |
|
|
extern real64 mtx_sum_sqrs_in_row(mtx_matrix_t, int32, |
375 |
|
|
const mtx_range_t *); |
376 |
|
|
extern real64 mtx_sum_sqrs_in_col(mtx_matrix_t, int32, |
377 |
|
|
const mtx_range_t *); |
378 |
|
|
extern real64 mtx_sum_abs_in_row(mtx_matrix_t, int32, |
379 |
|
|
const mtx_range_t *); |
380 |
|
|
extern real64 mtx_sum_abs_in_col(mtx_matrix_t, int32, |
381 |
|
|
const mtx_range_t *); |
382 |
|
|
/** |
383 |
|
|
-$- sum = mtx_sum_sqrs_in_row(matrix,row,colrng) |
384 |
|
|
-$- sum = mtx_sum_sqrs_in_col(matrix,col,rowrng) |
385 |
|
|
-$- sum = mtx_sum_abs_in_row(matrix,row,colrng) |
386 |
|
|
-$- sum = mtx_sum_abs_in_col(matrix,col,rowrng) |
387 |
|
|
*** real64 sum; |
388 |
|
|
*** mtx_matrix_t matrix; |
389 |
|
|
*** int32 row,col; |
390 |
|
|
*** mtx_range_t *colrng,*rowrng; |
391 |
|
|
*** |
392 |
|
|
*** mtx_sum_sqrs_in_row(): |
393 |
|
|
*** Compute sum of squares of non-zeros in the given row whose column index |
394 |
|
|
*** lies in the given column range. |
395 |
|
|
*** mtx_sum_abs_in_row(): |
396 |
|
|
*** Compute sum of absolute values of non-zeros in the |
397 |
|
|
*** given row whose column index lies in the given column range. |
398 |
|
|
*** |
399 |
|
|
*** mtx_sum_sqrs_in_col(): |
400 |
|
|
*** Replace row <--> column above. |
401 |
|
|
*** mtx_sum_abs_in_col(): |
402 |
|
|
*** Replace row <--> column above. |
403 |
|
|
-$- Returns 0.0 from a bad matrix. |
404 |
|
|
**/ |
405 |
|
|
|
406 |
|
|
extern real64 mtx_row_dot_full_org_vec(mtx_matrix_t, int32, |
407 |
|
|
real64 *, mtx_range_t *, |
408 |
|
|
boolean); |
409 |
|
|
|
410 |
|
|
extern real64 mtx_col_dot_full_org_vec(mtx_matrix_t, int32, |
411 |
|
|
real64 *, mtx_range_t *, |
412 |
|
|
boolean); |
413 |
|
|
/** |
414 |
|
|
-$- sum = mtx_row_dot_full_org_vec(matrix,row,orgvec,colrng,transpose) |
415 |
|
|
-$- sum = mtx_col_dot_full_org_vec(matrix,col,orgvec,rowrng,transpose) |
416 |
|
|
*** real64 sum; |
417 |
|
|
*** mtx_matrix_t matrix; |
418 |
|
|
*** int32 row,col; |
419 |
|
|
*** real64 *orgvec; |
420 |
|
|
*** mtx_range_t *colrng,*rowrng; |
421 |
|
|
*** boolean transpose; |
422 |
|
|
*** |
423 |
|
|
*** mtx_row_dot_full_org_vec: |
424 |
|
|
*** |
425 |
|
|
*** For transpose==FALSE: |
426 |
|
|
*** Compute the dot product of the row given with the org_col indexed |
427 |
|
|
*** orgvec over the colrng given (colrng being the cur indexed |
428 |
|
|
*** limits as usual.) |
429 |
|
|
*** i.e. SUM( mtx(row,col_cur) * orgvec[mtx_col_to_org(mtx,col_cur)] ) |
430 |
|
|
*** for all inrange(col_cur). |
431 |
|
|
*** |
432 |
|
|
*** For transpose==TRUE: |
433 |
|
|
*** Compute the dot product of the row given with the org_row indexed |
434 |
|
|
*** orgvec over the colrng given (colrng being the cur indexed |
435 |
|
|
*** limits as usual.) |
436 |
|
|
*** i.e. SUM( mtx(row,col_cur) * orgvec[mtx_row_to_org(mtx,col_cur)] ) |
437 |
|
|
*** for all inrange(col_cur). |
438 |
|
|
*** |
439 |
|
|
*** mtx_col_dot_full_org_vec: |
440 |
|
|
*** Replace row <--> col above. |
441 |
|
|
-$- Returns 0.0 from a bad matrix. |
442 |
|
|
**/ |
443 |
|
|
|
444 |
|
|
extern real64 mtx_row_dot_full_cur_vec(mtx_matrix_t, int32, |
445 |
|
|
real64 *, mtx_range_t *, |
446 |
|
|
boolean); |
447 |
|
|
extern real64 mtx_col_dot_full_cur_vec(mtx_matrix_t, int32, |
448 |
|
|
real64 *, mtx_range_t *, |
449 |
|
|
boolean); |
450 |
|
|
/** |
451 |
|
|
-$- sum = mtx_row_dot_full_cur_vec(matrix,row,currowvec,colrng,transpose) |
452 |
|
|
-$- sum = mtx_col_dot_full_cur_vec(matrix,col,curcolvec,rowrng,transpose) |
453 |
|
|
*** real64 sum; |
454 |
|
|
*** mtx_matrix_t matrix; |
455 |
|
|
*** int32 row,col; |
456 |
|
|
*** real64 *currowvec, *curcolvec; |
457 |
|
|
*** mtx_range_t *colrng,*rowrng; |
458 |
|
|
*** boolean transpose; |
459 |
|
|
*** |
460 |
|
|
! ! Transpose is currently not implemented. A warning will be issued. |
461 |
|
|
! ! When someone finds a use and can explain what the transpose versions |
462 |
|
|
! ! of these functions do in terms of permutations, it will be coded. |
463 |
|
|
*** |
464 |
|
|
*** mtx_row_dot_full_cur_vec: |
465 |
|
|
*** Compute the dot product of the row given with the cur col indexed |
466 |
|
|
*** currowvec over the colrng given (colrng being the cur indexed |
467 |
|
|
*** limits as usual.) |
468 |
|
|
*** |
469 |
|
|
*** mtx_col_dot_full_cur_vec: |
470 |
|
|
*** Replace row <--> column above. |
471 |
|
|
-$- Returns 0.0 from a bad matrix. |
472 |
|
|
*** |
473 |
|
|
*** Note: This pair of operators is slightly less expensive than |
474 |
|
|
*** the mtx_*_dot_full_org_vec is. |
475 |
|
|
**/ |
476 |
|
|
|
477 |
|
|
extern real64 mtx_row_dot_full_org_custom_vec(mtx_matrix_t, |
478 |
|
|
mtx_matrix_t, int32, |
479 |
|
|
real64 *, mtx_range_t *, |
480 |
|
|
boolean); |
481 |
|
|
|
482 |
|
|
extern real64 mtx_col_dot_full_org_custom_vec(mtx_matrix_t, |
483 |
|
|
mtx_matrix_t, int32, |
484 |
|
|
real64 *, mtx_range_t *, |
485 |
|
|
boolean); |
486 |
|
|
/** |
487 |
|
|
-$- sum = |
488 |
|
|
mtx_row_dot_full_org_custom_vec(matrix1,matrix2,row,orgvec,colrng,transpose) |
489 |
|
|
-$- sum = |
490 |
|
|
mtx_col_dot_full_org_custom_vec(matrix1,matrix2,col,orgvec,rowrng,transpose) |
491 |
|
|
*** real64 sum; |
492 |
|
|
*** mtx_matrix_t matrix1; |
493 |
|
|
*** mtx_matrix_t matrix2; |
494 |
|
|
*** int32 row,col; |
495 |
|
|
*** real64 *orgvec; |
496 |
|
|
*** mtx_range_t *colrng,*rowrng; |
497 |
|
|
*** boolean transpose; |
498 |
|
|
*** |
499 |
|
|
*** mtx_row_dot_full_org_vec: |
500 |
|
|
*** |
501 |
|
|
*** For transpose==FALSE: |
502 |
|
|
*** Compute the dot product of the row given with the org_col indexed |
503 |
|
|
*** orgvec (wrt matrix2) over the colrng given (colrng being the cur |
504 |
|
|
*** indexed limits of matrix1.) |
505 |
|
|
*** i.e. SUM( mtx(row,col_cur) * |
506 |
|
|
*** orgvec[mtx_col_to_org(mtx2,col_cur)] ) |
507 |
|
|
*** for all inrange(col_cur). |
508 |
|
|
*** |
509 |
|
|
*** For transpose==TRUE: |
510 |
|
|
*** Compute the dot product of the row given with the org_row indexed |
511 |
|
|
*** orgvec (wrt matrix2) over the colrng given (colrng being the cur |
512 |
|
|
*** indexed limits of matrix1.) |
513 |
|
|
*** i.e. SUM( mtx(row,col_cur) * |
514 |
|
|
*** orgvec[mtx_row_to_org(mtx2,col_cur)] ) |
515 |
|
|
*** for all inrange(col_cur). |
516 |
|
|
*** |
517 |
|
|
*** mtx_col_dot_full_org_custom_vec: |
518 |
|
|
*** Replace row <--> col above. |
519 |
|
|
-$- Returns 0.0 from a bad matrix. |
520 |
|
|
**/ |
521 |
|
|
|
522 |
|
|
extern void mtx_org_vec_add_row(mtx_matrix_t, real64 *, |
523 |
|
|
int32, real64, |
524 |
|
|
mtx_range_t *,boolean); |
525 |
|
|
extern void mtx_org_vec_add_col(mtx_matrix_t, real64 *, |
526 |
|
|
int32, real64, |
527 |
|
|
mtx_range_t *,boolean); |
528 |
|
|
extern void mtx_cur_vec_add_row(mtx_matrix_t, real64 *, |
529 |
|
|
int32, real64, |
530 |
|
|
mtx_range_t *,boolean); |
531 |
|
|
extern void mtx_cur_vec_add_col(mtx_matrix_t, real64 *, |
532 |
|
|
int32, real64, |
533 |
|
|
mtx_range_t *,boolean); |
534 |
|
|
/** |
535 |
|
|
-$- mtx_org_vec_add_row(matrix,tvec,srow,factor,colrng,transpose) |
536 |
|
|
-$- mtx_org_vec_add_col(matrix,tvec,scol,factor,rowrng,transpose) |
537 |
|
|
-$- mtx_cur_vec_add_row(matrix,tvec,srow,factor,colrng,transpose) |
538 |
|
|
-$- mtx_cur_vec_add_col(matrix,tvec,scol,factor,rowrng,transpose) |
539 |
|
|
*** mtx_matrix_t matrix; |
540 |
|
|
*** int32 srow,scol; |
541 |
|
|
*** real64 *tvec; |
542 |
|
|
*** real64 factor; |
543 |
|
|
*** mtx_range_t *colrng,*rowrng; |
544 |
|
|
*** boolean transpose; |
545 |
|
|
*** |
546 |
|
|
*** mtx_org/cur_vec_add_row: |
547 |
|
|
*** Adds multiple factor of srow to tvec for those columns in colrng. |
548 |
|
|
*** tvec is org/cur col indexed if transpose==FALSE. |
549 |
|
|
*** i.e. this is just adding rows. |
550 |
|
|
*** tvec is org/cur row indexed if transpose==TRUE. |
551 |
|
|
*** orgvec[mtx_row_to_org(col)]+=factor*element(srow,col) |
552 |
|
|
! ! curvec[???]+=factor*element(srow,col) |
553 |
|
|
*** |
554 |
|
|
*** mtx_org/cur_vec_add_col: |
555 |
|
|
*** Reverse row <-->col in above. |
556 |
|
|
*** Since this reversal is hard for the transpose, here it is: |
557 |
|
|
*** orgvec[mtx_col_to_org(row)]+=factor*element(row,scol) |
558 |
|
|
! ! curvec[???]+=factor*element(row,scol) |
559 |
|
|
*** |
560 |
|
|
*** Notes: It is faster to use this with mtx_ALL_COLS/ROWS where |
561 |
|
|
*** possible. |
562 |
|
|
*** Use transpose==TRUE here if you would use transpose==TRUE |
563 |
|
|
*** for dotting the row/col with the same vector. |
564 |
|
|
! ! Warning: |
565 |
|
|
! ! Like mtx_row/col_dot_full_cur_vec, |
566 |
|
|
! ! the transpose==TRUE flavors of mtx_cur_vec_add_row/col |
567 |
|
|
! ! are NOT implemented. Nobody has found a use for them and nobody |
568 |
|
|
! ! has yet cooked up what they mean in permutation terms. |
569 |
|
|
*** |
570 |
|
|
-$- Does nothing to a bad matrix. |
571 |
|
|
**/ |
572 |
|
|
|
573 |
|
|
#endif /* __MTX_QUERY_H_SEEN__ */ |