1 |
aw0a |
1 |
/* |
2 |
|
|
* anonmerge.c |
3 |
|
|
* by Benjamin Andrew Allan |
4 |
|
|
* Created September 21, 1997 |
5 |
|
|
* Copyright 1997 Carnegie Mellon University. |
6 |
|
|
* Version: $Revision: 1.9 $ |
7 |
|
|
* Version control file: $RCSfile: anonmerg.c,v $ |
8 |
|
|
* Date last modified: $Date: 2000/01/25 02:25:52 $ |
9 |
|
|
* Last modified by: $Author: ballan $ |
10 |
|
|
* |
11 |
|
|
* This file is part of the Ascend Language Interpreter. |
12 |
|
|
* |
13 |
|
|
* The Ascend Language Interpreter is free software; you can redistribute |
14 |
|
|
* it and/or modify it under the terms of the GNU General Public License as |
15 |
|
|
* published by the Free Software Foundation; either version 2 of the |
16 |
|
|
* License, or (at your option) any later version. |
17 |
|
|
* |
18 |
|
|
* The Ascend Language Interpreter is distributed in hope that it will be |
19 |
|
|
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of |
20 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
21 |
|
|
* General Public License for more details. |
22 |
|
|
* |
23 |
|
|
* You should have received a copy of the GNU General Public License |
24 |
|
|
* along with the program; if not, write to the Free Software Foundation, |
25 |
|
|
* Inc., 675 Mass Ave, Cambridge, MA 02139 USA. Check the file named |
26 |
|
|
* COPYING. |
27 |
johnpye |
399 |
* |
28 |
johnpye |
770 |
* (further comments moved to anonmerg.h) |
29 |
aw0a |
1 |
*/ |
30 |
|
|
|
31 |
|
|
|
32 |
|
|
#include <limits.h> /* for INT_MAX */ |
33 |
jpye |
2037 |
#include <ascend/utilities/config.h> |
34 |
jpye |
2018 |
#include <ascend/utilities/ascConfig.h> |
35 |
|
|
#include <ascend/utilities/ascMalloc.h> |
36 |
|
|
#include <ascend/utilities/ascPanic.h> |
37 |
|
|
#include <ascend/utilities/ascPrint.h> |
38 |
|
|
#include <ascend/general/pool.h> |
39 |
|
|
#include <ascend/general/list.h> |
40 |
|
|
#include <ascend/general/listio.h> |
41 |
|
|
#include <ascend/general/dstring.h> |
42 |
johnpye |
1210 |
|
43 |
aw0a |
1 |
#if TIMECOMPILER |
44 |
|
|
#include <time.h> |
45 |
jpye |
2018 |
#include <ascend/general/tm_time.h> |
46 |
aw0a |
1 |
#endif |
47 |
johnpye |
1211 |
|
48 |
|
|
|
49 |
johnpye |
399 |
#include "child.h" |
50 |
|
|
#include "type_desc.h" |
51 |
johnpye |
669 |
#include "expr_types.h" |
52 |
johnpye |
399 |
#include "instance_types.h" |
53 |
|
|
#include "instance_name.h" |
54 |
|
|
#include "tmpnum.h" |
55 |
|
|
#include "parentchild.h" |
56 |
|
|
#include "instquery.h" |
57 |
|
|
#include "visitinst.h" |
58 |
|
|
#include "instance_io.h" |
59 |
|
|
#include "visitinst.h" |
60 |
|
|
#include "visitlink.h" |
61 |
|
|
#include "arrayinst.h" /* for arrayvisitlocalleaves only */ |
62 |
|
|
#include "anonmerg.h" |
63 |
aw0a |
1 |
#if AMSTAT |
64 |
|
|
/* need numlistio to write debugging info */ |
65 |
|
|
#define NUMLISTEXPORTIO |
66 |
|
|
#endif |
67 |
johnpye |
399 |
#include "numlist.h" |
68 |
aw0a |
1 |
#if AMSTAT |
69 |
|
|
#undef NUMLISTEXPORTIO |
70 |
|
|
#endif |
71 |
|
|
|
72 |
|
|
#ifndef lint |
73 |
|
|
static CONST char AnonMergeModuleID[] = "$Id: anonmerg.c,v 1.9 2000/01/25 02:25:52 ballan Exp $"; |
74 |
|
|
#endif |
75 |
|
|
|
76 |
|
|
/* if want to compile unused functions, set to 1. |
77 |
|
|
*/ |
78 |
|
|
#define AMUNUSED 0 |
79 |
|
|
|
80 |
|
|
/* print detail timing breakdown of merge detection */ |
81 |
|
|
#define AMBKDN 0 |
82 |
|
|
#define CFI 0 |
83 |
|
|
|
84 |
|
|
/* print node list info */ |
85 |
|
|
#define CHECKN (AMSTAT || 0) |
86 |
|
|
|
87 |
|
|
/* if want to write reachability info as derived, set to 1. */ |
88 |
|
|
#define WRITEREACHABLE 0 |
89 |
|
|
|
90 |
|
|
/* |
91 |
|
|
* Number of buckets in the hash table to use for detecting universals. |
92 |
|
|
* Should bear some relation to the number of formal types in question. |
93 |
|
|
* Must match the mask in AMUHASH (buckets = 2^N, and mask = 2^N -1.) |
94 |
|
|
*/ |
95 |
|
|
#define AMUBUCKETS 1024 |
96 |
|
|
/* |
97 |
|
|
* hash function to put a heap pointer in a bucket. |
98 |
|
|
*/ |
99 |
|
|
#define AMUHASH(p) (((((long) (p))*1103515245) >> 20) & 1023) |
100 |
|
|
|
101 |
|
|
/* |
102 |
|
|
* this is the flag that indicates an instance is one of a kind |
103 |
|
|
* within the scope of merge detection, therefore is effectively |
104 |
|
|
* UNIVERSAL. |
105 |
|
|
*/ |
106 |
|
|
#define AMUFLAG 0x10 |
107 |
|
|
|
108 |
|
|
/* This is the flag in anonflags indicates that an amip is on the instance. |
109 |
|
|
*/ |
110 |
|
|
#define AMIPFLAG 0x1 |
111 |
|
|
|
112 |
|
|
|
113 |
|
|
/* so on each node which has merged descendants that need to be recorded, |
114 |
|
|
* we need a list of lists of paths. |
115 |
|
|
* i->amip->amlist-|-------------|->path1 to d1 ||||| |
116 |
|
|
* | |->path2 to d1 ||||| |
117 |
|
|
* | |
118 |
|
|
* |-------------|->path1 to d2 ||||| |
119 |
|
|
* etc |->path2 to d2 ||||| |
120 |
|
|
* etc |
121 |
|
|
* |
122 |
|
|
* gllist gllists gllist of childnums |
123 |
|
|
* where each path is the gllist of childnumbers that is followed |
124 |
|
|
* to reach the shared instance di. |
125 |
|
|
*/ |
126 |
johnpye |
399 |
/* |
127 |
aw0a |
1 |
*/ |
128 |
|
|
struct AnonMergeIP { /* Hangs off all instances with amipflag=1 */ |
129 |
|
|
struct gl_list_t *amlist; |
130 |
|
|
/* The list of merges contained in this inst. each element is a |
131 |
|
|
* gl_list of paths. each path is a gl_list of child numbers. |
132 |
|
|
*/ |
133 |
ballan |
2036 |
GLint ip_number; /* number of ip node (subgraph) */ |
134 |
|
|
GLint node_number; /* number of merged node, or 0 if this |
135 |
aw0a |
1 |
* node is itself uninteresting. |
136 |
|
|
* (shared) |
137 |
|
|
*/ |
138 |
|
|
Numlist_p subgraph; /* list of all ip'd nodes in subgraph(i) */ |
139 |
|
|
Numlist_p shared; /* list of merged nodes in subgraph(i) */ |
140 |
|
|
Numlist_p parentslist;/* list of parent nodes of i */ |
141 |
|
|
struct gl_list_t *glp;/* gl list of parent nodes of i. */ |
142 |
|
|
/* number of merges recorded is gllen amlist. */ |
143 |
|
|
}; |
144 |
|
|
|
145 |
|
|
/* The existence of a bucket implies at least one instance of the |
146 |
|
|
* formal type exists. If no more than one exists, then its pointer |
147 |
|
|
* is stored in the bucket. If a second is found, the bucket becomes empty. |
148 |
|
|
* (b->i==NULL). |
149 |
|
|
* When the table is all built, mark anything that has a non-NULL |
150 |
|
|
* instance as locally-universal. |
151 |
|
|
*/ |
152 |
|
|
struct AnonMergeUniBucket { |
153 |
|
|
CONST struct TypeDescription *d; /* type = hash key */ |
154 |
|
|
struct Instance *i; /* could be a list later */ |
155 |
|
|
struct AnonMergeUniBucket *next; /* chain */ |
156 |
|
|
unsigned long indirected; /* needed for arrays */ |
157 |
|
|
}; |
158 |
|
|
|
159 |
|
|
struct AnonMergeVisitInfo { |
160 |
|
|
struct Instance *root; |
161 |
|
|
unsigned long nip; /* instances to get IPs */ |
162 |
|
|
unsigned long nim; /* instances with multiple nonaliased parents */ |
163 |
|
|
struct AnonMergeUniBucket *t[AMUBUCKETS]; /* hash table */ |
164 |
|
|
unsigned long maxchildren; /* most children of any instance */ |
165 |
|
|
}; |
166 |
|
|
|
167 |
|
|
struct sgelt { |
168 |
ballan |
2036 |
GLint sg; |
169 |
aw0a |
1 |
struct sgelt *next; |
170 |
|
|
}; |
171 |
|
|
|
172 |
|
|
struct mdata { |
173 |
|
|
struct sgelt **header; /* [maxip] */ |
174 |
ballan |
2036 |
GLint *colcount; /* [maxip] */ |
175 |
|
|
GLint *sg2blob; /* [maxchildren] */ |
176 |
aw0a |
1 |
int *blobcollected; /* [maxchildren] */ |
177 |
|
|
struct gl_list_t **blob; /* [maxchildren] */ |
178 |
ballan |
2036 |
GLint nextnode_ip; |
179 |
|
|
GLint sg; |
180 |
aw0a |
1 |
int maxchildren; |
181 |
|
|
pool_store_t pool; |
182 |
|
|
}; |
183 |
|
|
|
184 |
|
|
|
185 |
|
|
/* |
186 |
|
|
* packet that persists while derived merge data is in use. |
187 |
|
|
*/ |
188 |
|
|
struct AnonMergeIPData { |
189 |
|
|
struct gl_list_t *oldips; /* result of puship */ |
190 |
|
|
struct Instance *root; |
191 |
|
|
struct AnonMergeIP *ipbuf; /* a mass allocated set of AMIPs */ |
192 |
|
|
unsigned long iplen; /* number of ips */ |
193 |
|
|
unsigned int ipused; /* number of slots from buf handed out */ |
194 |
|
|
unsigned int ipback; /* number of slots from buf returned */ |
195 |
|
|
FILE *fp; |
196 |
|
|
|
197 |
ballan |
2036 |
GLint *listhints; /* listhints[i] --> hint for graph i */ |
198 |
|
|
GLint *node2ip; /* node2ip[node_number] --> ip_number */ |
199 |
aw0a |
1 |
|
200 |
|
|
Numlist_p enlp0, enlp1,enlp2; /* scratch expandable Numlists */ |
201 |
|
|
struct gl_list_t *nlpgl; /* gllist of collected numlists */ |
202 |
|
|
|
203 |
|
|
Numlist_p senlp0, senlp1,senlp2; /* scratch expandable Numlists */ |
204 |
|
|
struct gl_list_t *snlpgl; /* gllist of collected numlists */ |
205 |
|
|
|
206 |
|
|
Numlist_p *mergegraphs; /* array of shareds size totgraphs */ |
207 |
|
|
Numlist_p *graphs; /* array of subgraphs size totgraphs */ |
208 |
|
|
unsigned long *graphchildnum; /* child of i that graph came from */ |
209 |
|
|
int totgraphs; /* number of children of i with subgraphs */ |
210 |
|
|
|
211 |
|
|
Numlist_p *routes; /* array of subgraphs size totroutes */ |
212 |
|
|
unsigned long *routechildnum; /* child of i that route graph came from */ |
213 |
|
|
int totroutes; /* number of children containing target */ |
214 |
|
|
|
215 |
|
|
unsigned long *finalchildnum; /* children of i that yield merge path */ |
216 |
|
|
int totfinal; /* number of paths to target we record */ |
217 |
|
|
|
218 |
|
|
struct gl_list_t *scratchpath; /* scratch childnumber list */ |
219 |
|
|
struct gl_list_t *scratchpathlist; /* scratch list of paths */ |
220 |
|
|
struct gl_list_t *scratchamlist; /* scratch list of paths */ |
221 |
|
|
|
222 |
|
|
int num_mergelists; /* total number of N-path merges recorded */ |
223 |
|
|
int num_iwithmerge; /* total number of instances logging merges. */ |
224 |
|
|
int compcount; |
225 |
|
|
|
226 |
|
|
struct mdata md; |
227 |
|
|
}; |
228 |
|
|
|
229 |
|
|
/* given a pointer to an AM gl_list, destroys the list and sublists. |
230 |
|
|
* ignores NULL input. |
231 |
|
|
*/ |
232 |
|
|
static |
233 |
|
|
void DestroyMList(struct gl_list_t *aml) |
234 |
|
|
{ |
235 |
|
|
struct gl_list_t *pathlist, *path; |
236 |
|
|
unsigned long c,len,d,dlen; |
237 |
|
|
if (aml != NULL) { |
238 |
|
|
len = gl_length(aml); |
239 |
|
|
/* for each descendant */ |
240 |
|
|
for ( c = 1; c <= len; c++) { |
241 |
|
|
pathlist = (struct gl_list_t *)gl_fetch(aml,c); |
242 |
|
|
dlen = gl_length(pathlist); |
243 |
|
|
/* for each path in list of paths to descendant */ |
244 |
|
|
for ( d = 1; d <= dlen; d++) { |
245 |
|
|
path = (struct gl_list_t *)gl_fetch(pathlist,d); |
246 |
|
|
/* path is just a list of ints, so nothing to do to them */ |
247 |
|
|
gl_destroy(path); |
248 |
|
|
} |
249 |
|
|
gl_destroy(pathlist); |
250 |
|
|
} |
251 |
|
|
gl_destroy(aml); |
252 |
|
|
} |
253 |
|
|
} |
254 |
|
|
|
255 |
|
|
/* |
256 |
|
|
* return an ip from the array of them we are managing |
257 |
|
|
*/ |
258 |
|
|
static |
259 |
|
|
struct AnonMergeIP *AMGetIP(struct AnonMergeIPData *amipd) |
260 |
|
|
{ |
261 |
|
|
struct AnonMergeIP *amip; |
262 |
|
|
assert(amipd!=NULL); |
263 |
|
|
if (amipd->iplen <= amipd->ipused) { |
264 |
johnpye |
1064 |
ASC_PANIC("Too many ips requested"); |
265 |
aw0a |
1 |
gl_write_list(NULL,NULL); /* delete when done. forcing linker */ |
266 |
|
|
return NULL; /* not reached */ |
267 |
|
|
} |
268 |
|
|
amip = &(amipd->ipbuf[amipd->ipused]); |
269 |
|
|
(amipd->ipused)++; |
270 |
|
|
return amip; |
271 |
|
|
} |
272 |
|
|
|
273 |
|
|
/* Returns 1 if instance ch is of set, constant, array of set, or |
274 |
|
|
* array of constant type. The mergedness of these cannot affect |
275 |
|
|
* the meaning of models containing them, since the models' structures |
276 |
|
|
* are dependent on values of them, not identity. |
277 |
|
|
* Likewise returns 1 for relations, logical relations, whens, |
278 |
|
|
* and arrays thereof since these always have 1 parent and couldn't |
279 |
|
|
* be merged in any case. Likewise for dummy instance, which are UNIVERSAL. |
280 |
|
|
*/ |
281 |
johnpye |
399 |
static |
282 |
aw0a |
1 |
int IsIgnorableInstance(struct Instance *ch) |
283 |
|
|
{ |
284 |
|
|
assert(ch!=NULL); |
285 |
|
|
if (InstanceKind(ch)&IERRINST) { |
286 |
johnpye |
1064 |
ASC_PANIC("Called with bad instance"); |
287 |
aw0a |
1 |
return 0; /* not reached */ |
288 |
|
|
} |
289 |
|
|
if (InstanceKind(ch) & (IFUND | ICONS | IDUMB | IWHEN | IRELN | ILRELN)) { |
290 |
|
|
return 1; |
291 |
|
|
} |
292 |
|
|
if (IsArrayInstance(ch) ) { |
293 |
|
|
struct TypeDescription *d; |
294 |
|
|
d = GetArrayBaseType(InstanceTypeDesc(ch)); |
295 |
|
|
if (GetBaseType(d) & ERROR_KIND) { |
296 |
johnpye |
1064 |
ASC_PANIC("Called with bad array instance"); |
297 |
aw0a |
1 |
return 0; /* not reached */ |
298 |
|
|
} |
299 |
|
|
if (GetBaseType(d) & (EQN_KIND | CONSTANT_KIND | DUMB_KIND | SET_KIND)) { |
300 |
|
|
return 1; |
301 |
|
|
} |
302 |
|
|
} |
303 |
|
|
return 0; |
304 |
|
|
} |
305 |
|
|
|
306 |
|
|
/* |
307 |
|
|
* Collects and merges into a cheaply allocated numlist (not expandable) |
308 |
|
|
* all the interesting descendant information from the children of i. |
309 |
|
|
* i itself is in the subgraph of i if i is interesting. |
310 |
|
|
* If nothing interesting is reachable, may return NULL. |
311 |
|
|
* |
312 |
|
|
* This function does not collect as 'reachable' constant and set |
313 |
|
|
* instance types since the 'identity' or lack thereof does not |
314 |
|
|
* change the semantics of these structural parameter instances. |
315 |
johnpye |
399 |
* We don't need to know about their mergedness. |
316 |
aw0a |
1 |
* Because we are numbering only the mergednodes we care about, |
317 |
johnpye |
399 |
* ignoring constants does not fill the maps with holes. |
318 |
aw0a |
1 |
* |
319 |
|
|
* shared is the list described above. This function also collects |
320 |
|
|
* subgraph, the list of both merged and compound nodes reachable |
321 |
|
|
* from i. |
322 |
johnpye |
399 |
* |
323 |
aw0a |
1 |
* We handle each child of i once and double up the other operations. |
324 |
|
|
*/ |
325 |
|
|
static |
326 |
|
|
void CollectSubgraphNumlist(struct Instance *i, |
327 |
|
|
struct AnonMergeIPData *amipd, |
328 |
|
|
struct AnonMergeIP *amip) |
329 |
|
|
{ |
330 |
|
|
unsigned long c,len, tn; |
331 |
|
|
struct Instance *ch; |
332 |
|
|
struct AnonMergeIP *chamip; |
333 |
|
|
Numlist_p shared; /* list of shared nodes in the dag */ |
334 |
|
|
Numlist_p subgraph; /* list of shared and compound nodes in dag */ |
335 |
|
|
ChildListPtr cl; |
336 |
|
|
|
337 |
|
|
NumpairClearList(amipd->enlp0); |
338 |
|
|
NumpairClearList(amipd->senlp0); |
339 |
|
|
gl_reset(amipd->nlpgl); |
340 |
|
|
gl_reset(amipd->snlpgl); |
341 |
|
|
len = NumberChildren(i); |
342 |
|
|
cl = (InstanceKind(i) == MODEL_INST)?GetChildList(InstanceTypeDesc(i)):NULL; |
343 |
|
|
for (c = 1; c <= len; c++) { |
344 |
|
|
ch = InstanceChild(i,c); |
345 |
|
|
if ( ch != NULL && (GetAnonFlags(ch) & AMIPFLAG) != 0 ) { |
346 |
|
|
chamip = (struct AnonMergeIP *)GetInterfacePtr(ch); |
347 |
|
|
/* collect child reachables and also their nums */ |
348 |
|
|
if (chamip != NULL) { |
349 |
|
|
if (chamip->glp != NULL && |
350 |
|
|
(cl == NULL || ChildAliasing(cl,c)==0)) { |
351 |
|
|
gl_append_ptr(chamip->glp,(void *)amip->ip_number); |
352 |
|
|
} |
353 |
|
|
if (chamip->shared != NULL) { |
354 |
|
|
gl_append_ptr(amipd->nlpgl, chamip->shared); |
355 |
|
|
} |
356 |
|
|
tn = chamip->node_number; |
357 |
|
|
if (tn != 0L) { |
358 |
|
|
NumpairAppendList(amipd->enlp0, tn); |
359 |
|
|
} |
360 |
|
|
|
361 |
|
|
if (chamip->subgraph != NULL) { |
362 |
|
|
gl_append_ptr(amipd->snlpgl, chamip->subgraph); |
363 |
|
|
} |
364 |
|
|
tn = chamip->ip_number; |
365 |
|
|
if (tn != 0L) { |
366 |
|
|
NumpairAppendList(amipd->senlp0, tn); |
367 |
|
|
} |
368 |
|
|
} |
369 |
|
|
} |
370 |
|
|
} |
371 |
|
|
if (amip->node_number != 0) { |
372 |
|
|
NumpairAppendList(amipd->enlp0, amip->node_number); |
373 |
|
|
} |
374 |
|
|
if (amip->ip_number != 0) { |
375 |
|
|
NumpairAppendList(amipd->senlp0, amip->ip_number); |
376 |
|
|
} |
377 |
|
|
gl_append_ptr(amipd->nlpgl, amipd->enlp0); |
378 |
|
|
gl_append_ptr(amipd->snlpgl, amipd->senlp0); |
379 |
|
|
shared = NumpairCombineLists(amipd->nlpgl, amipd->enlp1, amipd->enlp2); |
380 |
|
|
subgraph = NumpairCombineLists(amipd->snlpgl, amipd->senlp1, amipd->senlp2); |
381 |
|
|
NumpairClearList(amipd->enlp0); |
382 |
|
|
NumpairClearList(amipd->senlp0); |
383 |
|
|
amip->shared = shared; |
384 |
|
|
amip->subgraph = subgraph; |
385 |
|
|
} |
386 |
|
|
|
387 |
|
|
/* Grabs an ip from the buffer and inits/returns it. |
388 |
|
|
* When used with pushIP, decorates all instances that are marked with |
389 |
johnpye |
399 |
* AMIPFLAG which should only be models/arrays and shared variables. |
390 |
aw0a |
1 |
* As an interesting side effect, we collect the list of mergednodes in the |
391 |
|
|
* subgraph of i (in numlist_p form). Should be applied bottom-up. |
392 |
|
|
* We also add i's number to the lists of parents in its children. |
393 |
|
|
* By dfbu, parent numbers will be added in increasing order, possibly |
394 |
|
|
* with the same parent appearing twice consecutively in the child's |
395 |
|
|
* parent list. Logging numbers here allows us to avoid sorting the |
396 |
|
|
* numbers later before creating a persistent numlistp. |
397 |
|
|
* Actually, to reduce work, we subcontract numbering the children |
398 |
|
|
* to CollectSubgraphNumlist. |
399 |
|
|
*/ |
400 |
|
|
static |
401 |
|
|
struct AnonMergeIP *AnonMergeCreateIP(struct Instance *i, |
402 |
|
|
struct AnonMergeIPData *amipd) |
403 |
|
|
{ |
404 |
|
|
struct AnonMergeIP *amip; |
405 |
|
|
|
406 |
|
|
assert(i!=NULL); |
407 |
|
|
|
408 |
|
|
amip = NULL; |
409 |
|
|
|
410 |
|
|
if (GetAnonFlags(i) & AMIPFLAG) { |
411 |
|
|
|
412 |
|
|
amip = AMGetIP(amipd); |
413 |
|
|
assert(amip!=NULL); |
414 |
|
|
amip->amlist = NULL; |
415 |
|
|
amip->subgraph = NULL; |
416 |
|
|
amip->glp = gl_create(NumberParents(i)+2); |
417 |
|
|
amip->parentslist = NULL; |
418 |
|
|
|
419 |
johnpye |
399 |
amip->node_number = (int)GetTmpNum(i); |
420 |
aw0a |
1 |
amip->ip_number = amipd->ipused; /* array loc in ipbuf +1 */ |
421 |
|
|
gl_append_ptr(amip->glp,(void *)amip->ip_number); |
422 |
|
|
assert(amip->node_number >= 0); |
423 |
|
|
/* Node number is the dfbu numbering of shared instances, |
424 |
|
|
* the dfbu mergenode number in tmpnum of i. |
425 |
|
|
*/ |
426 |
|
|
SetTmpNum(i,0L); |
427 |
|
|
|
428 |
|
|
CollectSubgraphNumlist(i, amipd, amip); |
429 |
|
|
} |
430 |
|
|
return amip; |
431 |
|
|
} |
432 |
|
|
|
433 |
|
|
/* Hunts up node 'target' in the descendants of child 'start' of i. |
434 |
|
|
* Resets content of scratch on entry. |
435 |
|
|
* Fills path 'scratch' with childnumbers that can be followed to |
436 |
|
|
* get to target. Scratch should not be NULL. |
437 |
|
|
* Could return target instance for free if needed. |
438 |
|
|
* This function cannot fail if the shared lists in the amip are |
439 |
|
|
* correct. |
440 |
|
|
* Scratch result should be copied. |
441 |
|
|
*/ |
442 |
|
|
static |
443 |
|
|
void AnonMergeFindPath(struct Instance *i, int target, |
444 |
|
|
unsigned long start, struct gl_list_t *scratch) |
445 |
|
|
{ |
446 |
|
|
#define AMFPDEBUG 0 |
447 |
|
|
unsigned long c; |
448 |
johnpye |
89 |
struct Instance *ch = NULL; /* unnec init to avoid warning */ |
449 |
aw0a |
1 |
struct AnonMergeIP *amip; |
450 |
|
|
int notfound, notchild; |
451 |
|
|
ChildListPtr cl; |
452 |
|
|
#if AMFPDEBUG |
453 |
|
|
int first = 1; |
454 |
|
|
#endif |
455 |
johnpye |
89 |
|
456 |
aw0a |
1 |
assert(scratch != NULL); |
457 |
|
|
assert(InstanceChild(i,start) != NULL); |
458 |
|
|
|
459 |
|
|
/* start out knowing the start child, so we don't search much on 1st one */ |
460 |
|
|
gl_reset(scratch); |
461 |
|
|
c = start; |
462 |
|
|
notfound = 1; |
463 |
|
|
/* ch will always hit InstanceChild assignment before being used |
464 |
|
|
* when this function is called with a correct start value. |
465 |
|
|
*/ |
466 |
|
|
while (notfound) { /* entered at least once */ |
467 |
|
|
notchild = 1; |
468 |
|
|
if (InstanceKind(i) == MODEL_INST) { |
469 |
|
|
cl = GetChildList(InstanceTypeDesc(i)); |
470 |
|
|
} else { |
471 |
|
|
cl = NULL; |
472 |
|
|
} |
473 |
|
|
while (notchild) { /* entered at least once */ |
474 |
|
|
/* must succeed on first trip if start is correct */ |
475 |
|
|
/* hunt for next child of i which has target as a merged descendant */ |
476 |
|
|
ch = InstanceChild(i,c); |
477 |
johnpye |
399 |
if (ch != NULL && |
478 |
aw0a |
1 |
(GetAnonFlags(ch) & AMIPFLAG) != 0 && |
479 |
|
|
(cl == NULL || ChildAliasing(cl,c) == 0)) { |
480 |
|
|
amip = (struct AnonMergeIP *)GetInterfacePtr(ch); |
481 |
|
|
assert(amip != NULL); |
482 |
|
|
if (NumpairNumberInList(amip->shared,(int)target)) { /*nnl*/ |
483 |
|
|
notchild = 0; |
484 |
|
|
if (amip->node_number == target) { |
485 |
|
|
notfound = 0; |
486 |
|
|
} |
487 |
|
|
gl_append_ptr(scratch,(void *)c); |
488 |
|
|
} |
489 |
|
|
} /* else child can't be it. go to next. */ |
490 |
|
|
#if AMFPDEBUG |
491 |
|
|
if (first) { |
492 |
|
|
first = 0; |
493 |
|
|
if (notchild) { |
494 |
|
|
FPRINTF(ASCERR,"AMFindPath missed on given child %lu\n",start); |
495 |
|
|
} |
496 |
|
|
} |
497 |
|
|
#endif |
498 |
|
|
c++; |
499 |
|
|
} |
500 |
|
|
/* descend to child found. hunt for next left to right. */ |
501 |
|
|
c = 1; |
502 |
|
|
i = ch; /* so when at last located, i becomes target instance. */ |
503 |
|
|
} |
504 |
|
|
return; |
505 |
|
|
} |
506 |
|
|
|
507 |
|
|
static |
508 |
|
|
void AnonWritePath(FILE *fp, struct Instance *i, struct gl_list_t *path) |
509 |
|
|
{ |
510 |
|
|
#define AWPDB 0 /* debugging for this function */ |
511 |
|
|
struct Instance *root; |
512 |
johnpye |
399 |
unsigned long c,len, cn; |
513 |
aw0a |
1 |
#if AWPDB |
514 |
|
|
int ip,im; |
515 |
|
|
struct AnonMergeIP *amip; |
516 |
|
|
#endif |
517 |
|
|
struct InstanceName name; |
518 |
|
|
|
519 |
|
|
assert(i != NULL); |
520 |
|
|
assert(path != NULL); |
521 |
|
|
|
522 |
|
|
len = gl_length(path); |
523 |
|
|
for (c = 1; c <= len; c++) { |
524 |
|
|
root = i; |
525 |
|
|
cn = (unsigned long)gl_fetch(path,c); |
526 |
|
|
i = InstanceChild(root,cn); |
527 |
|
|
#if AWPDB |
528 |
|
|
if (GetAnonFlags(i)&AMIPFLAG) { |
529 |
|
|
amip = (struct AnonMergeIP *)GetInterfacePtr(i); |
530 |
|
|
assert(amip != NULL); |
531 |
|
|
ip = amip->ip_number; |
532 |
|
|
im = amip->node_number; |
533 |
|
|
} else { |
534 |
|
|
im = ip = -1; |
535 |
|
|
} |
536 |
|
|
FPRINTF(fp,"(%d){%d}",ip,im); |
537 |
|
|
#endif |
538 |
johnpye |
399 |
name = ChildName(root,cn); |
539 |
aw0a |
1 |
switch (InstanceNameType(name)){ |
540 |
|
|
case StrName: |
541 |
|
|
if (c>1) { |
542 |
|
|
PUTC('.',fp); |
543 |
|
|
} |
544 |
|
|
FPRINTF(fp,SCP(InstanceNameStr(name))); |
545 |
|
|
break; |
546 |
|
|
case IntArrayIndex: |
547 |
|
|
FPRINTF(fp,"[%ld]",InstanceIntIndex(name)); |
548 |
|
|
break; |
549 |
|
|
case StrArrayIndex: |
550 |
|
|
FPRINTF(fp,"['%s']",SCP(InstanceStrIndex(name))); |
551 |
|
|
break; |
552 |
|
|
} |
553 |
|
|
} |
554 |
|
|
FPRINTF(fp,"\n"); |
555 |
|
|
} |
556 |
|
|
|
557 |
|
|
static |
558 |
|
|
void WriteMList(FILE *fp, struct gl_list_t *aml, struct Instance *i) |
559 |
|
|
{ |
560 |
|
|
struct gl_list_t *pathlist, *path; |
561 |
johnpye |
399 |
unsigned long c,len; |
562 |
|
|
unsigned long d,dlen; |
563 |
aw0a |
1 |
unsigned long elen; |
564 |
|
|
|
565 |
|
|
if (aml !=NULL) { |
566 |
|
|
len = gl_length(aml); |
567 |
|
|
for (c = 1; c <= len; c++) { /* over each merge bush */ |
568 |
|
|
pathlist = (struct gl_list_t *)gl_fetch(aml,c); |
569 |
|
|
assert(pathlist != NULL); |
570 |
|
|
dlen = gl_length(pathlist); |
571 |
|
|
assert(dlen != 0); |
572 |
|
|
FPRINTF(fp,"%lu ALTERNATE PATHS to ",dlen); |
573 |
|
|
path = (struct gl_list_t *)gl_fetch(pathlist,1); |
574 |
|
|
assert(pathlist != NULL); |
575 |
|
|
AnonWritePath(fp,i,path); |
576 |
|
|
/* write the list of paths */ |
577 |
|
|
FPRINTF(fp," //\n"); |
578 |
|
|
for (d = 1; d <= dlen; d++) { |
579 |
|
|
path = (struct gl_list_t *)gl_fetch(pathlist,d); |
580 |
|
|
elen = gl_length(path); |
581 |
|
|
AnonWritePath(fp,i,path); |
582 |
|
|
} |
583 |
|
|
} |
584 |
|
|
} |
585 |
|
|
} |
586 |
|
|
|
587 |
|
|
|
588 |
|
|
#if AMSTAT |
589 |
|
|
static |
590 |
|
|
void AnonMergeLogIP(struct Instance *i, struct AnonMergeIPData *amipd) |
591 |
|
|
{ |
592 |
|
|
struct AnonMergeIP *amip; |
593 |
|
|
if (GetAnonFlags(i)&AMIPFLAG) { |
594 |
|
|
amip = (struct AnonMergeIP *)GetInterfacePtr(i); |
595 |
|
|
if (amip->amlist != NULL) { |
596 |
|
|
FPRINTF(amipd->fp,"%lu, ", gl_length(amip->amlist)); |
597 |
|
|
WriteInstanceName(amipd->fp,i,NULL); |
598 |
|
|
FPRINTF(amipd->fp,"\n"); |
599 |
|
|
WriteMList(amipd->fp,amip->amlist,i); |
600 |
|
|
FPRINTF(amipd->fp,"\n"); |
601 |
|
|
} |
602 |
|
|
} |
603 |
|
|
} |
604 |
|
|
#endif |
605 |
|
|
/* |
606 |
|
|
* clears lists in amip, if any. |
607 |
|
|
*/ |
608 |
|
|
static |
609 |
|
|
void AnonMergeDestroyIP(struct Instance *i, |
610 |
|
|
struct AnonMergeIP *amip, |
611 |
|
|
struct AnonMergeIPData *amipd) |
612 |
|
|
{ |
613 |
|
|
(void)i; |
614 |
|
|
DestroyMList(amip->amlist); |
615 |
|
|
#if AMSTAT |
616 |
|
|
if (amip->ip_number !=0 || amip->node_number != 0) { |
617 |
|
|
FPRINTF(amipd->fp,"ip# %d mn# %d ",amip->ip_number,amip->node_number); |
618 |
|
|
WriteInstanceName(amipd->fp,i,NULL); |
619 |
|
|
FPRINTF(amipd->fp,"\n"); |
620 |
|
|
} |
621 |
|
|
#endif |
622 |
|
|
amip->amlist = NULL; |
623 |
|
|
if (amip->subgraph != NULL) { |
624 |
|
|
NumpairDestroyList(amip->subgraph); |
625 |
|
|
amip->subgraph = NULL; |
626 |
|
|
} |
627 |
|
|
if (amip->glp != NULL) { |
628 |
|
|
gl_destroy(amip->glp); |
629 |
|
|
amip->glp = NULL; |
630 |
|
|
} |
631 |
|
|
if (amip->parentslist != NULL) { |
632 |
|
|
NumpairDestroyList(amip->parentslist); |
633 |
|
|
amip->parentslist = NULL; |
634 |
|
|
} |
635 |
|
|
if (amip->shared != NULL) { |
636 |
|
|
NumpairDestroyList(amip->shared); |
637 |
|
|
amip->shared = NULL; |
638 |
|
|
} |
639 |
|
|
amipd->ipback++; |
640 |
|
|
} |
641 |
|
|
|
642 |
|
|
|
643 |
|
|
#define THASH 0 |
644 |
|
|
static |
645 |
|
|
struct AnonMergeUniBucket *FindUniBucket(struct TypeDescription *d, |
646 |
|
|
unsigned long indirected, |
647 |
|
|
struct AnonMergeUniBucket **t) |
648 |
|
|
{ |
649 |
|
|
struct AnonMergeUniBucket *result; |
650 |
johnpye |
908 |
int bindex; |
651 |
|
|
bindex = AMUHASH(SCP(GetName(d))); |
652 |
|
|
result = t[bindex]; |
653 |
aw0a |
1 |
while (result != NULL && |
654 |
|
|
( d != result->d || |
655 |
|
|
(indirected != LONG_MAX && indirected != result->indirected) |
656 |
|
|
) |
657 |
|
|
) { |
658 |
|
|
result = result->next; |
659 |
|
|
} |
660 |
|
|
#if THASH |
661 |
|
|
FPRINTF(ASCERR,"FUB:\t%d\tind %lu\t%s\t%lu\n", |
662 |
johnpye |
908 |
bindex,indirected,SCP(GetName(d)),(unsigned long)result); |
663 |
aw0a |
1 |
#endif |
664 |
|
|
return result; |
665 |
|
|
} |
666 |
|
|
|
667 |
|
|
/* |
668 |
|
|
* caller is responsible for setting b->i. |
669 |
|
|
*/ |
670 |
|
|
static |
671 |
|
|
struct AnonMergeUniBucket *AddUniBucket(struct TypeDescription *d, |
672 |
|
|
unsigned long indirected, |
673 |
|
|
struct AnonMergeUniBucket **t) |
674 |
|
|
{ |
675 |
|
|
struct AnonMergeUniBucket *b; |
676 |
johnpye |
908 |
int bindex; |
677 |
johnpye |
709 |
b = ASC_NEW(struct AnonMergeUniBucket); |
678 |
aw0a |
1 |
if (b==NULL) { |
679 |
|
|
return NULL; |
680 |
|
|
} |
681 |
johnpye |
908 |
bindex = AMUHASH(SCP(GetName(d))); |
682 |
|
|
b->next = t[bindex]; |
683 |
|
|
t[bindex] = b; |
684 |
aw0a |
1 |
b->d = d; |
685 |
|
|
b->indirected = indirected; |
686 |
|
|
return b; |
687 |
|
|
} |
688 |
|
|
/* |
689 |
|
|
* Clears the table (free the buckets) and any bucket |
690 |
|
|
* for implying only 1 instance, mark the instance as |
691 |
|
|
* being universal and mark it as having 0 parent references |
692 |
|
|
* because we don't care about its merges. |
693 |
|
|
*/ |
694 |
|
|
static |
695 |
|
|
void AnonMergeMarkUniversals(struct AnonMergeVisitInfo *amvi) |
696 |
|
|
{ |
697 |
|
|
int i; |
698 |
|
|
struct AnonMergeUniBucket **t; |
699 |
|
|
struct AnonMergeUniBucket *b; |
700 |
|
|
t = amvi->t; |
701 |
|
|
for (i = 0; i < AMUBUCKETS; i++) { |
702 |
|
|
while (t[i] != NULL) { |
703 |
|
|
b = t[i]; |
704 |
|
|
t[i] = b->next; |
705 |
|
|
if (b->i != NULL) { |
706 |
|
|
SetAnonFlags(b->i,AMUFLAG); /* this must also catch explicit univs */ |
707 |
|
|
SetTmpNum(b->i,0L); |
708 |
|
|
#if (AMSTAT && 1) |
709 |
|
|
WriteInstanceName(ASCERR,b->i,NULL); |
710 |
|
|
FPRINTF(ASCERR," UNIVERSAL\n"); |
711 |
|
|
#endif |
712 |
|
|
} |
713 |
|
|
ascfree(b); |
714 |
|
|
} |
715 |
|
|
} |
716 |
|
|
} |
717 |
|
|
|
718 |
|
|
/* |
719 |
|
|
* AnonMergeCountTree(i,amvi); |
720 |
|
|
* This must be applied bottom up. |
721 |
|
|
* Sets the anonflags to 0 for all i. |
722 |
johnpye |
399 |
* Sets the tmpnum of i to zero. |
723 |
aw0a |
1 |
* Increment tmpnum in each child of compound i which is not an alias child of |
724 |
|
|
* i or a constant type. Constants are fully accounted for in their individual |
725 |
|
|
* anontype, so sharedness does not further affect instance semantics -- at |
726 |
|
|
* least so far as equations are concerned. We deduct the array alii |
727 |
|
|
* contributions from the MODEL level where we find the alias statement |
728 |
|
|
* because it is hard to do it in the array scope. |
729 |
|
|
* |
730 |
|
|
* When visit done with this function, every nonconstant instance ends with |
731 |
|
|
* tmpnum set to the number of parents it is connected to in a non-alias way. |
732 |
|
|
* |
733 |
|
|
* Sometimes and incorrectly: |
734 |
|
|
* Counts total number of children, parents, extra parents, universals, |
735 |
|
|
* parents of universals; |
736 |
|
|
* These will be overestimates because the visitinstance |
737 |
|
|
* functions do not ignore universals, even though we will. |
738 |
|
|
*/ |
739 |
|
|
void AnonMergeCountTree(struct Instance *i, struct AnonMergeVisitInfo *amvi) |
740 |
|
|
{ |
741 |
|
|
unsigned long c,len; |
742 |
|
|
struct AnonMergeUniBucket *b; |
743 |
|
|
struct Instance *ch; |
744 |
|
|
ChildListPtr cl; |
745 |
|
|
#if AMSTAT |
746 |
|
|
unsigned long tmp; |
747 |
|
|
(void) tmp; |
748 |
|
|
#endif |
749 |
|
|
|
750 |
|
|
SetAnonFlags(i,0); |
751 |
|
|
SetTmpNum(i,0); |
752 |
|
|
|
753 |
|
|
/* we need to find if all instances are universal or at least of |
754 |
|
|
* locally unique formal type. Inventory them here. |
755 |
|
|
*/ |
756 |
|
|
b = FindUniBucket(InstanceTypeDesc(i),InstanceIndirected(i),amvi->t); |
757 |
|
|
if (b == NULL) { |
758 |
|
|
b = AddUniBucket(InstanceTypeDesc(i),InstanceIndirected(i),amvi->t); |
759 |
|
|
b->i = i; /* remember first, hopefully only, instance */ |
760 |
|
|
} else { |
761 |
|
|
b->i = NULL; /* more than 1 instance. not universal */ |
762 |
|
|
} |
763 |
|
|
|
764 |
|
|
if (IsCompoundInstance(i)) { |
765 |
|
|
/* Mark increment parent link count on interesting children. Elsewhere |
766 |
|
|
* all compound instances and all the ones that happen to have multiple |
767 |
|
|
* non-alias parents will get amips. All those with with multiple |
768 |
|
|
* non-alias parents will eventually get tmpnums reassigned dfbu. |
769 |
|
|
*/ |
770 |
|
|
len = NumberChildren(i); |
771 |
|
|
if (len > amvi->maxchildren) { |
772 |
|
|
amvi->maxchildren = len; |
773 |
|
|
} |
774 |
|
|
if (InstanceKind(i) == MODEL_INST) { |
775 |
|
|
cl = GetChildList(InstanceTypeDesc(i)); |
776 |
|
|
assert(cl!=NULL); |
777 |
|
|
for (c = 1; c <= len; c++) { |
778 |
|
|
if ((ch = InstanceChild(i,c)) != NULL && !IsIgnorableInstance(ch)) { |
779 |
|
|
/*ignore const/set/reln/dummy/aryofsame ch*/ |
780 |
|
|
if ( !ChildAliasing(cl,c) ) { |
781 |
|
|
/* record existence of a real link to ch */ |
782 |
|
|
IncrementTmpNum(ch); |
783 |
|
|
} else { |
784 |
|
|
/* ALIASES. array possibly. */ |
785 |
|
|
if (IsArrayInstance(ch)) { |
786 |
|
|
/* Now, in the context of the MODEL, we can spot if a child is |
787 |
|
|
* an alias array. If it is, then we want to decrement the |
788 |
|
|
* reference counts on the leaves of the array, but not on the |
789 |
|
|
* array_insts, unless those leaves happen to be other arrays. |
790 |
|
|
* It may happen that there are no leaves if the array is |
791 |
johnpye |
399 |
* defined over a NULL set in some portion. So for example if |
792 |
aw0a |
1 |
* c[8][9] IS_A real; b[1..2][4..5] ALIASES c; |
793 |
|
|
* then the instance b has 4 subscripts to the end user, |
794 |
|
|
* but the 'leaves' of b which we don't want to |
795 |
|
|
* count as parents of elements c[i] are b[i][j]. |
796 |
|
|
* Go NumberofDereferences(ch) and delete that contribution to |
797 |
|
|
* the number of parents counts. Add this to arrayinst.c. |
798 |
|
|
*/ |
799 |
|
|
ArrayVisitLocalLeaves(ch,(AVProc)DecrementTmpNum); |
800 |
|
|
} |
801 |
|
|
} |
802 |
|
|
} |
803 |
|
|
} |
804 |
|
|
} else { |
805 |
|
|
assert(IsArrayInstance(i)); /* must be array since not MODEL */ |
806 |
|
|
for (c = 1; c <= len; c++) { |
807 |
|
|
if ( (ch = InstanceChild(i,c)) != NULL) { |
808 |
|
|
if (!IsIgnorableInstance(ch)) { |
809 |
|
|
/* do not care about merges on arrays of constants/sets/relns |
810 |
|
|
* arrays of same since the anontype includes value and |
811 |
|
|
* structure doesn't matter if value is the same. |
812 |
|
|
*/ |
813 |
|
|
IncrementTmpNum(ch); |
814 |
|
|
} |
815 |
|
|
} |
816 |
|
|
} |
817 |
|
|
} |
818 |
|
|
} |
819 |
|
|
} |
820 |
|
|
|
821 |
|
|
/* Sets the tmpnum to mergenode dfbu if tmpnum currently > 1. |
822 |
|
|
* Sets the tmpnum to 0 if tmpnum currently 1. |
823 |
|
|
* Does nothing to tmpnum if tmpnum 0. |
824 |
|
|
* Counts the instance toward the number of ips needed if it is |
825 |
|
|
* compound or its final tmpnum is not 0. |
826 |
|
|
* This must be called after universals are marked, else the universal |
827 |
|
|
* marking may stop on the anonflags. |
828 |
|
|
*/ |
829 |
|
|
static |
830 |
|
|
void AnonMergeLabelNodes(struct Instance *i, struct AnonMergeVisitInfo *amvi) |
831 |
|
|
{ |
832 |
|
|
assert(GetTmpNum(i) <10000000); |
833 |
|
|
switch (GetTmpNum(i)) { |
834 |
johnpye |
399 |
case 0: |
835 |
aw0a |
1 |
/* all ignorable stuff should fall in here */ |
836 |
|
|
break; |
837 |
|
|
case 1: |
838 |
|
|
assert(IsIgnorableInstance(i)==0); |
839 |
|
|
/* we're not supposed to be marking ignorable instances tmpnums at all |
840 |
|
|
* in the net. |
841 |
|
|
*/ |
842 |
|
|
SetTmpNum(i,0L); |
843 |
|
|
break; |
844 |
|
|
default: |
845 |
|
|
/* more than 1 real parent */ |
846 |
|
|
(amvi->nim)++; |
847 |
|
|
assert(IsIgnorableInstance(i)==0); |
848 |
|
|
SetTmpNum(i,(unsigned long)amvi->nim); |
849 |
|
|
assert(GetTmpNum(i) <100000000); |
850 |
|
|
/* compound or not it gets an IP */ |
851 |
|
|
(amvi->nip)++; |
852 |
johnpye |
399 |
SetAnonFlags(i,(GetAnonFlags(i) | AMIPFLAG)); |
853 |
aw0a |
1 |
return; |
854 |
|
|
} |
855 |
|
|
if (IsCompoundInstance(i)) { |
856 |
|
|
/* nonmerged instance or uninteresting instance. check compoundness |
857 |
|
|
* as even uninteresting, unmerged compound instances need a space |
858 |
|
|
* to hang their subgraph numlist. universals included. |
859 |
|
|
*/ |
860 |
johnpye |
399 |
SetAnonFlags(i,(GetAnonFlags(i) | AMIPFLAG)); |
861 |
aw0a |
1 |
(amvi->nip)++; |
862 |
|
|
} |
863 |
|
|
} |
864 |
|
|
|
865 |
|
|
/* |
866 |
|
|
* prints out the statistics of academic interest. |
867 |
|
|
* does not compile (cannot) unless amstat == TRUE. |
868 |
|
|
*/ |
869 |
|
|
#if (AMSTAT) |
870 |
|
|
static |
871 |
|
|
void AMCWriteCounts(struct AnonMergeVisitInfo *amvi) |
872 |
|
|
{ |
873 |
|
|
FPRINTF(ASCERR,"AnonMerge statistics:\n"); |
874 |
|
|
FPRINTF(ASCERR,"\tMerged Instances:\t%lu\n",amvi->nim); |
875 |
|
|
FPRINTF(ASCERR,"\tIP Instances:\t%lu\n",amvi->nip); |
876 |
|
|
} |
877 |
|
|
#endif /* amstat */ |
878 |
|
|
|
879 |
|
|
static |
880 |
|
|
void AMVIInit(struct AnonMergeVisitInfo *amvi,struct Instance *root) |
881 |
|
|
{ |
882 |
|
|
int i; |
883 |
|
|
assert(amvi!=NULL); |
884 |
|
|
assert(root!=NULL); |
885 |
|
|
amvi->root = root; |
886 |
|
|
amvi->nip = 0L; |
887 |
|
|
amvi->nim = 0L; |
888 |
|
|
amvi->maxchildren = 0L; |
889 |
|
|
for (i = 0; i < AMUBUCKETS; i++) { |
890 |
|
|
amvi->t[i] = NULL; |
891 |
|
|
} |
892 |
|
|
} |
893 |
|
|
|
894 |
|
|
#define SP_LEN 2 |
895 |
|
|
#if (SIZEOF_VOID_P == 8) |
896 |
|
|
#define SP_WID 252 |
897 |
|
|
#else |
898 |
|
|
#define SP_WID 504 |
899 |
|
|
#endif |
900 |
|
|
/* retune spwid if the size of sgelt changes dramatically */ |
901 |
|
|
#define SP_ELT_SIZE (sizeof(struct sgelt)) |
902 |
|
|
#define SP_MORE_ELTS 1 |
903 |
|
|
/* Number of slots filled if more elements needed. |
904 |
|
|
So if the pool grows, it grows by SP_MORE_ELTS*SP_WID elements at a time. */ |
905 |
|
|
#define SP_MORE_BARS 50 |
906 |
|
|
/* This is the number of pool bar slots to add during expansion. |
907 |
|
|
not all the slots will be filled immediately. */ |
908 |
|
|
|
909 |
|
|
static |
910 |
|
|
void InitSgeltPool(struct mdata *md) { |
911 |
|
|
if (md->pool != NULL ) { |
912 |
johnpye |
1064 |
ASC_PANIC("ERROR: InitSgeltPool called twice.\n"); |
913 |
aw0a |
1 |
} |
914 |
|
|
md->pool = |
915 |
|
|
pool_create_store(SP_LEN, SP_WID, SP_ELT_SIZE, SP_MORE_ELTS, SP_MORE_BARS); |
916 |
|
|
if (md->pool == NULL) { |
917 |
johnpye |
1064 |
ASC_PANIC("ERROR: InitSgeltPool unable to allocate pool.\n"); |
918 |
aw0a |
1 |
} |
919 |
|
|
} |
920 |
|
|
|
921 |
|
|
static |
922 |
|
|
void DestroySgeltPool(struct mdata *md) { |
923 |
|
|
if (md->pool==NULL) return; |
924 |
|
|
pool_destroy_store(md->pool); |
925 |
|
|
md->pool = NULL; |
926 |
|
|
} |
927 |
|
|
|
928 |
johnpye |
89 |
#if 0 |
929 |
|
|
/* UNUSED */ |
930 |
aw0a |
1 |
static |
931 |
|
|
void ReportSgeltPool(FILE *f,struct mdata *md) |
932 |
|
|
{ |
933 |
|
|
if (md->pool==NULL) { |
934 |
|
|
FPRINTF(f,"SgeltPool is empty\n"); |
935 |
|
|
} |
936 |
|
|
FPRINTF(f,"SgeltPool "); |
937 |
|
|
pool_print_store(f,md->pool,0); |
938 |
|
|
} |
939 |
johnpye |
89 |
#endif |
940 |
aw0a |
1 |
|
941 |
|
|
/* create amipd and init all its bits */ |
942 |
|
|
static |
943 |
|
|
struct AnonMergeIPData *AMIPDInit(struct AnonMergeVisitInfo *amvi) |
944 |
|
|
{ |
945 |
|
|
size_t asize; |
946 |
|
|
struct AnonMergeIPData *amipd; |
947 |
|
|
int i,len; |
948 |
|
|
unsigned long j,dlen; |
949 |
|
|
|
950 |
johnpye |
709 |
amipd = ASC_NEW(struct AnonMergeIPData); |
951 |
aw0a |
1 |
if (amipd==NULL) { |
952 |
|
|
ascfree(amipd); |
953 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for amipd."); |
954 |
aw0a |
1 |
return NULL; |
955 |
|
|
} |
956 |
|
|
amipd->root = amvi->root; |
957 |
|
|
amipd->iplen = amvi->nip; |
958 |
|
|
amipd->num_mergelists = 0; |
959 |
|
|
amipd->num_iwithmerge = 0; |
960 |
ballan |
2036 |
amipd->node2ip = ASC_NEW_ARRAY(GLint,(amvi->nim+1)); |
961 |
aw0a |
1 |
if (amipd->node2ip == NULL) { |
962 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for node2ip."); |
963 |
aw0a |
1 |
return NULL; |
964 |
|
|
} |
965 |
johnpye |
709 |
amipd->ipbuf = ASC_NEW_ARRAY(struct AnonMergeIP,amvi->nip); |
966 |
aw0a |
1 |
if (amipd->ipbuf==NULL) { |
967 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for ipbuf."); |
968 |
aw0a |
1 |
return NULL; |
969 |
|
|
} |
970 |
|
|
amipd->ipused = 0; |
971 |
|
|
amipd->ipback = 0; |
972 |
|
|
amipd->compcount = 1; |
973 |
|
|
amipd->enlp0 = NumpairExpandableList(NULL,100); |
974 |
|
|
amipd->enlp1 = NumpairExpandableList(NULL,100); |
975 |
|
|
amipd->enlp2 = NumpairExpandableList(NULL,100); |
976 |
|
|
amipd->nlpgl = gl_create(100); |
977 |
|
|
if (amipd->enlp0 == NULL || |
978 |
|
|
amipd->enlp1 == NULL || |
979 |
|
|
amipd->enlp2 == NULL || |
980 |
|
|
amipd->nlpgl == NULL) { |
981 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for scratch nlps."); |
982 |
aw0a |
1 |
return NULL; |
983 |
|
|
} |
984 |
|
|
amipd->senlp0 = NumpairExpandableList(NULL,100); |
985 |
|
|
amipd->senlp1 = NumpairExpandableList(NULL,100); |
986 |
|
|
amipd->senlp2 = NumpairExpandableList(NULL,100); |
987 |
|
|
amipd->snlpgl = gl_create(100); |
988 |
|
|
if (amipd->senlp0 == NULL || |
989 |
|
|
amipd->senlp1 == NULL || |
990 |
|
|
amipd->senlp2 == NULL || |
991 |
|
|
amipd->snlpgl == NULL) { |
992 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for scratch snlps."); |
993 |
aw0a |
1 |
return NULL; |
994 |
|
|
} |
995 |
|
|
amipd->oldips = PushInterfacePtrs(amvi->root,(IPFunc)AnonMergeCreateIP, |
996 |
|
|
amvi->nip,1, (VOIDPTR)amipd); |
997 |
johnpye |
399 |
|
998 |
aw0a |
1 |
asize = amvi->maxchildren; /* now some buffers of this size */ |
999 |
|
|
|
1000 |
ballan |
2036 |
amipd->listhints = ASC_NEW_ARRAY(GLint,asize); |
1001 |
johnpye |
669 |
amipd->graphs = ASC_NEW_ARRAY(Numlist_p,asize); |
1002 |
|
|
amipd->mergegraphs = ASC_NEW_ARRAY(Numlist_p,asize); |
1003 |
johnpye |
399 |
amipd->graphchildnum = |
1004 |
johnpye |
669 |
ASC_NEW_ARRAY(unsigned long,asize); |
1005 |
|
|
amipd->routes = ASC_NEW_ARRAY(Numlist_p,asize); |
1006 |
johnpye |
399 |
amipd->routechildnum = |
1007 |
johnpye |
669 |
ASC_NEW_ARRAY(unsigned long,asize); |
1008 |
johnpye |
399 |
amipd->finalchildnum = |
1009 |
johnpye |
669 |
ASC_NEW_ARRAY(unsigned long,asize); |
1010 |
aw0a |
1 |
/* totfinal, totroute, totgraph are all 'in use' counters, not |
1011 |
|
|
* the size of allocation. amvi->maxchildren is the capacity. |
1012 |
|
|
*/ |
1013 |
|
|
if (amipd->graphs == NULL || |
1014 |
|
|
amipd->mergegraphs == NULL || |
1015 |
|
|
amipd->graphchildnum == NULL || |
1016 |
|
|
amipd->routes == NULL || |
1017 |
|
|
amipd->routechildnum == NULL || |
1018 |
|
|
amipd->finalchildnum == NULL) { |
1019 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for subgraph lists."); |
1020 |
aw0a |
1 |
return NULL; |
1021 |
|
|
} |
1022 |
|
|
amipd->scratchpath = gl_create(100); |
1023 |
|
|
amipd->scratchpathlist = gl_create(100); |
1024 |
|
|
amipd->scratchamlist = gl_create(100); |
1025 |
|
|
if (amipd->scratchpath == NULL || |
1026 |
|
|
amipd->scratchpathlist == NULL || |
1027 |
|
|
amipd->scratchamlist == NULL) { |
1028 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for scratch lists."); |
1029 |
aw0a |
1 |
return NULL; |
1030 |
|
|
} |
1031 |
|
|
/* setup md */ |
1032 |
johnpye |
709 |
amipd->md.header = ASC_NEW_ARRAY(struct sgelt *,amvi->nip+1); |
1033 |
aw0a |
1 |
if (amipd->md.header == NULL) { |
1034 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for md.header."); |
1035 |
aw0a |
1 |
return NULL; |
1036 |
|
|
} |
1037 |
ballan |
2036 |
amipd->md.colcount = ASC_NEW_ARRAY(GLint,amvi->nip+1); |
1038 |
aw0a |
1 |
if (amipd->md.colcount == NULL) { |
1039 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for md.colcount."); |
1040 |
aw0a |
1 |
return NULL; |
1041 |
|
|
} |
1042 |
ballan |
2036 |
amipd->md.sg2blob = ASC_NEW_ARRAY(GLint,asize+1); |
1043 |
aw0a |
1 |
if (amipd->md.sg2blob == NULL) { |
1044 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for md.sg2blob."); |
1045 |
aw0a |
1 |
return NULL; |
1046 |
|
|
} |
1047 |
johnpye |
709 |
amipd->md.blobcollected = ASC_NEW_ARRAY(int,asize+1); |
1048 |
aw0a |
1 |
if (amipd->md.blobcollected == NULL) { |
1049 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for md.blobcollected."); |
1050 |
aw0a |
1 |
return NULL; |
1051 |
|
|
} |
1052 |
johnpye |
709 |
amipd->md.blob = ASC_NEW_ARRAY(struct gl_list_t *,asize+1); |
1053 |
aw0a |
1 |
if (amipd->md.blob == NULL) { |
1054 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for md.blob."); |
1055 |
aw0a |
1 |
return NULL; |
1056 |
|
|
} |
1057 |
johnpye |
89 |
for (i=0;i <= (int)asize;i++) { |
1058 |
aw0a |
1 |
/* as we'll never use most of these, create them small and |
1059 |
|
|
* let them grow. |
1060 |
|
|
*/ |
1061 |
|
|
amipd->md.blob[i] = gl_create(2); |
1062 |
|
|
if (amipd->md.blob[i] == NULL) { |
1063 |
johnpye |
1064 |
ASC_PANIC("Insufficent memory for md.blob data."); |
1064 |
aw0a |
1 |
return NULL; |
1065 |
|
|
} |
1066 |
|
|
} |
1067 |
|
|
amipd->md.maxchildren = asize; |
1068 |
|
|
amipd->md.nextnode_ip = 0; |
1069 |
|
|
amipd->md.sg = 0; |
1070 |
|
|
amipd->md.pool = NULL; |
1071 |
|
|
InitSgeltPool(&(amipd->md)); |
1072 |
|
|
/* */ |
1073 |
|
|
len = amipd->ipused; |
1074 |
|
|
for (i=0; i < len; i++) { |
1075 |
|
|
struct gl_list_t *gl; |
1076 |
|
|
gl = amipd->ipbuf[i].glp; |
1077 |
|
|
if (gl != NULL) { |
1078 |
|
|
if ((dlen=gl_length(gl)) > 0) { |
1079 |
|
|
NumpairClearList(amipd->enlp0); |
1080 |
|
|
for (j=1; j <= dlen; j++) { |
1081 |
ballan |
2036 |
NumpairAppendList(amipd->enlp0,(GLint)gl_fetch(gl,j)); |
1082 |
aw0a |
1 |
} |
1083 |
|
|
amipd->ipbuf[i].parentslist = NumpairCopyList(amipd->enlp0); |
1084 |
|
|
} |
1085 |
|
|
amipd->ipbuf[i].glp = NULL; |
1086 |
|
|
gl_destroy(gl); |
1087 |
|
|
} |
1088 |
|
|
} |
1089 |
|
|
return amipd; |
1090 |
|
|
} |
1091 |
|
|
/* deallocate amipd */ |
1092 |
|
|
static |
1093 |
|
|
void DestroyAMIPD(struct AnonMergeIPData *amipd) { |
1094 |
|
|
int i; |
1095 |
|
|
ascfree(amipd->ipbuf); |
1096 |
|
|
ascfree(amipd->node2ip); |
1097 |
|
|
ascfree(amipd->listhints); |
1098 |
|
|
DestroySgeltPool(&(amipd->md)); |
1099 |
|
|
ascfree(amipd->md.header); |
1100 |
|
|
ascfree(amipd->md.colcount); |
1101 |
|
|
ascfree(amipd->md.sg2blob); |
1102 |
|
|
ascfree(amipd->md.blobcollected); |
1103 |
|
|
for (i=0; i <= amipd->md.maxchildren; i++) { |
1104 |
|
|
gl_destroy(amipd->md.blob[i]); |
1105 |
|
|
} |
1106 |
|
|
ascfree(amipd->md.blob); |
1107 |
|
|
gl_destroy(amipd->nlpgl); |
1108 |
|
|
NumpairDestroyList(amipd->enlp0); |
1109 |
|
|
NumpairDestroyList(amipd->enlp1); |
1110 |
|
|
NumpairDestroyList(amipd->enlp2); |
1111 |
|
|
gl_destroy(amipd->snlpgl); |
1112 |
|
|
NumpairDestroyList(amipd->senlp0); |
1113 |
|
|
NumpairDestroyList(amipd->senlp1); |
1114 |
|
|
NumpairDestroyList(amipd->senlp2); |
1115 |
|
|
ascfree(amipd->mergegraphs); |
1116 |
|
|
ascfree(amipd->graphs); |
1117 |
|
|
ascfree(amipd->routes); |
1118 |
|
|
ascfree(amipd->graphchildnum); |
1119 |
|
|
ascfree(amipd->routechildnum); |
1120 |
|
|
ascfree(amipd->finalchildnum); |
1121 |
|
|
gl_destroy(amipd->scratchpath); |
1122 |
|
|
gl_destroy(amipd->scratchpathlist); |
1123 |
|
|
gl_destroy(amipd->scratchamlist); |
1124 |
|
|
ascfree(amipd); |
1125 |
|
|
} |
1126 |
|
|
|
1127 |
|
|
#if CHECKN |
1128 |
|
|
static |
1129 |
|
|
void AMCheckN(struct Instance *i, struct AnonMergeIPData *amipd) |
1130 |
|
|
{ |
1131 |
|
|
struct AnonMergeIP *amip; |
1132 |
|
|
if ((GetAnonFlags(i) & AMIPFLAG) == 0) { |
1133 |
|
|
return; |
1134 |
|
|
} else { |
1135 |
|
|
/* get amip for use in the rest of this function. */ |
1136 |
|
|
amip = (struct AnonMergeIP *)GetInterfacePtr(i); |
1137 |
|
|
} |
1138 |
|
|
if (amip->node_number > 0) { |
1139 |
|
|
if (amip->shared != NULL && NumpairListLen(amip->shared)>0) { |
1140 |
|
|
FPRINTF(amipd->fp,"snlist %d len=%d\n", amip->node_number, |
1141 |
|
|
NumpairListLen(amip->shared)); |
1142 |
|
|
} |
1143 |
|
|
} |
1144 |
|
|
if (amip->ip_number > 0) { |
1145 |
|
|
if (amip->subgraph != NULL && NumpairListLen(amip->subgraph)>0) { |
1146 |
|
|
FPRINTF(amipd->fp,"iplist %d len=%d\n", amip->ip_number, |
1147 |
|
|
NumpairListLen(amip->subgraph)); |
1148 |
|
|
} |
1149 |
|
|
} |
1150 |
|
|
} |
1151 |
|
|
#endif |
1152 |
|
|
|
1153 |
|
|
static |
1154 |
|
|
Numlist_p GetParentsList(int p, struct AnonMergeIPData *amipd) |
1155 |
|
|
{ |
1156 |
|
|
assert(amipd != NULL); |
1157 |
|
|
assert(p>0); |
1158 |
|
|
assert (amipd->ipbuf[p-1].ip_number == p); |
1159 |
|
|
return amipd->ipbuf[p-1].parentslist; |
1160 |
|
|
} |
1161 |
|
|
|
1162 |
|
|
static |
1163 |
|
|
void ZeroArrayEntry(int i, struct mdata *md) |
1164 |
|
|
{ |
1165 |
|
|
md->header[i] = NULL; |
1166 |
|
|
md->colcount[i] = 0; |
1167 |
|
|
} |
1168 |
|
|
|
1169 |
|
|
/* |
1170 |
|
|
* Move content of all blobs indicated in bl into the biggest. |
1171 |
|
|
* renumbers sg2blob of smaller blobs to match bigblob. |
1172 |
|
|
*/ |
1173 |
|
|
static |
1174 |
|
|
void MergeBlobs(struct mdata *md, int bigblob, struct gl_list_t *bl) |
1175 |
|
|
{ |
1176 |
|
|
struct gl_list_t *keep, *old; |
1177 |
|
|
unsigned long c,len, blc,bllen; |
1178 |
ballan |
2036 |
GLint oldblob, oldsg; |
1179 |
aw0a |
1 |
keep = md->blob[bigblob]; |
1180 |
|
|
for (blc=1,bllen = gl_length(bl); blc <= bllen; blc++) { |
1181 |
ballan |
2036 |
oldblob = (GLint)gl_fetch(bl,blc); |
1182 |
aw0a |
1 |
if (oldblob != bigblob) { |
1183 |
|
|
old = md->blob[oldblob]; |
1184 |
|
|
for (c=1, len = gl_length(old); c <= len; c++) { |
1185 |
ballan |
2036 |
oldsg = (GLint)gl_fetch(old,c); |
1186 |
johnpye |
399 |
#if 1 |
1187 |
aw0a |
1 |
/* check the supposition that we are keeping lists nonoverlapping */ |
1188 |
|
|
assert(gl_ptr_search(keep,(VOIDPTR)oldsg,0)==0); |
1189 |
|
|
#endif |
1190 |
|
|
gl_append_ptr(keep,(VOIDPTR)oldsg); /* add to new blob */ |
1191 |
|
|
md->sg2blob[oldsg] = bigblob; /* reassign sg2blob lookup */ |
1192 |
|
|
} |
1193 |
|
|
} |
1194 |
|
|
} |
1195 |
|
|
} |
1196 |
|
|
|
1197 |
|
|
#define ELTMALLOC (struct sgelt *)pool_get_element(md->pool) |
1198 |
|
|
/* ELTFREE is not defined, as we use pool reset instead */ |
1199 |
|
|
#define ELTRESET(md) pool_clear_store((md)->pool) |
1200 |
|
|
/* |
1201 |
|
|
* Build a sparse matrix of sorts col indexed by parent ip |
1202 |
|
|
* with 'rows' indexing by sg, the reduced child number of i. |
1203 |
|
|
* there is no rowwise access to this because we don't need it. |
1204 |
|
|
* elts must be pooled or we spend eternity in malloc. |
1205 |
|
|
*/ |
1206 |
|
|
static |
1207 |
|
|
void add_matrix_incidence(int parent, struct mdata *md) |
1208 |
|
|
{ |
1209 |
|
|
struct sgelt *elt; |
1210 |
|
|
if (parent > md->nextnode_ip) { |
1211 |
|
|
#if CFI |
1212 |
|
|
FPRINTF(ASCERR,"mat row %d col %d\n",md->sg,parent); |
1213 |
|
|
#endif |
1214 |
|
|
elt = ELTMALLOC; |
1215 |
|
|
elt->sg = md->sg; |
1216 |
|
|
elt->next = md->header[parent]; |
1217 |
|
|
md->header[parent] = elt; |
1218 |
|
|
md->colcount[parent]++; |
1219 |
johnpye |
399 |
} else { |
1220 |
aw0a |
1 |
#if (CFI && 0) |
1221 |
|
|
FPRINTF(ASCERR,"ign row %d col %d\n",md->sg,parent); |
1222 |
|
|
#endif |
1223 |
|
|
} |
1224 |
|
|
} |
1225 |
|
|
|
1226 |
|
|
/* can we move some of this into md? */ |
1227 |
|
|
static |
1228 |
johnpye |
399 |
void CalcFinalIndependentRoutes(struct mdata *md, |
1229 |
aw0a |
1 |
Numlist_p parentsini, |
1230 |
|
|
Numlist_p CONST *routes, |
1231 |
|
|
Numlist_p parentsinchild, |
1232 |
|
|
struct gl_list_t *blobsincol, |
1233 |
|
|
struct gl_list_t *newsgincol, |
1234 |
|
|
unsigned long * CONST finalchildnum, |
1235 |
|
|
unsigned long * CONST routechildnum, |
1236 |
|
|
int * CONST totfinal, |
1237 |
|
|
CONST int totroutes) |
1238 |
|
|
{ |
1239 |
|
|
unsigned long newsg,blc,bllen; |
1240 |
|
|
struct sgelt *elt; |
1241 |
ballan |
2036 |
GLint blob; |
1242 |
|
|
GLint bigblob; |
1243 |
aw0a |
1 |
unsigned long bigsize; |
1244 |
ballan |
2036 |
GLint p,sg,hint; |
1245 |
aw0a |
1 |
|
1246 |
|
|
/* init work space */ |
1247 |
|
|
NumpairListIterate(parentsini,(NPLFunc)ZeroArrayEntry,(VOIDPTR)md); |
1248 |
|
|
/* collect connection matrix */ |
1249 |
|
|
for (sg = 0; sg < totroutes; sg++) { |
1250 |
|
|
md->sg = sg; |
1251 |
|
|
md->sg2blob[sg] = -1; |
1252 |
|
|
md->blobcollected[sg] = 0; |
1253 |
|
|
gl_reset(md->blob[sg]); |
1254 |
|
|
NumpairCalcIntersection(parentsini,routes[sg],parentsinchild); |
1255 |
|
|
NumpairListIterate(parentsinchild,(NPLFunc)add_matrix_incidence, |
1256 |
|
|
(VOIDPTR)md); |
1257 |
|
|
} |
1258 |
|
|
/* process columns of matrix to group rows into overlapping blobs */ |
1259 |
|
|
/* while processing a column, blobcollected indicates if we've |
1260 |
|
|
* already seen that blob in this column. |
1261 |
|
|
*/ |
1262 |
|
|
p = 0; |
1263 |
|
|
hint = -1; |
1264 |
|
|
while ( p = NumpairPrevNumber(parentsini,p, &hint), |
1265 |
|
|
p > md->nextnode_ip) { |
1266 |
|
|
#if CFI |
1267 |
|
|
FPRINTF(ASCERR,"parent %d count %d\n",p,md->colcount[p]); |
1268 |
|
|
#endif |
1269 |
|
|
switch(md->colcount[p]) { |
1270 |
|
|
case 0: /* target appears directly in child list of source */ |
1271 |
|
|
/* At least one element of sg2blob will stay -1. |
1272 |
|
|
* We'll need that sg at the end as it is an independent path. |
1273 |
|
|
*/ |
1274 |
|
|
break; |
1275 |
|
|
case 1: |
1276 |
|
|
sg = md->header[p]->sg; |
1277 |
|
|
if (md->sg2blob[sg] == -1) { |
1278 |
|
|
/* new blob for sg, and new sg for new blob */ |
1279 |
|
|
md->sg2blob[sg] = sg; |
1280 |
|
|
gl_append_ptr(md->blob[sg], (VOIDPTR)sg); |
1281 |
|
|
} |
1282 |
|
|
/* else this parent is already seen by some other |
1283 |
|
|
* child of the source. |
1284 |
|
|
*/ |
1285 |
|
|
break; |
1286 |
|
|
default: |
1287 |
|
|
elt = md->header[p]; |
1288 |
|
|
if (elt != NULL) { |
1289 |
|
|
/* must be at least a blob or an unblobbed sg in the column */ |
1290 |
|
|
bigblob = -1; /* sg of the biggest seen so far */ |
1291 |
|
|
bigsize = 0L; |
1292 |
|
|
gl_reset(blobsincol); /* nonredundant list of blobs seen in col */ |
1293 |
|
|
/* each blob is a nonredundant list of sg in the blob */ |
1294 |
|
|
gl_reset(newsgincol); /* list of sg new in column not in any blob */ |
1295 |
|
|
/* Compute list of existing blobs in col. These must be |
1296 |
|
|
* merged if there are more than 1. |
1297 |
|
|
* Compute list of unblobbed (as yet) parentsini. If no |
1298 |
|
|
* existing blobs, these form a new blob, else |
1299 |
|
|
* we add these to the existing blobs. |
1300 |
|
|
* With this collection method, we never need merge overlapping |
1301 |
|
|
* blobs because we never create a blob that contains any member |
1302 |
|
|
* of an existing blob. At the end of the surrounding loop, we |
1303 |
|
|
* will have only nonoverlapping blobs and direct children (-1) |
1304 |
|
|
* left in matrix.sg2blob. We want to keep the lowest sg of each |
1305 |
|
|
* blob in sg2blob. |
1306 |
|
|
*/ |
1307 |
|
|
while (elt != NULL) { |
1308 |
|
|
blob = md->sg2blob[elt->sg]; |
1309 |
|
|
if (blob == -1) { |
1310 |
|
|
/* never seen this child of source */ |
1311 |
|
|
gl_append_ptr(newsgincol,(VOIDPTR)elt->sg); |
1312 |
|
|
} else { |
1313 |
|
|
if (md->blobcollected[blob] == 0) { |
1314 |
|
|
/* never seen this blob, blob must be nonempty */ |
1315 |
|
|
assert(gl_length(md->blob[blob]) != 0); |
1316 |
|
|
if (gl_length(md->blob[blob]) > bigsize) { |
1317 |
|
|
/* track which of collected blobs is biggest */ |
1318 |
|
|
bigsize = gl_length(md->blob[blob]); |
1319 |
|
|
bigblob = blob; |
1320 |
|
|
} |
1321 |
|
|
gl_append_ptr(blobsincol,(VOIDPTR)blob); |
1322 |
|
|
md->blobcollected[blob] = 1; |
1323 |
johnpye |
399 |
} |
1324 |
aw0a |
1 |
/* else saw blob before. */ |
1325 |
|
|
} |
1326 |
|
|
elt = elt->next; |
1327 |
|
|
} /* endwhile */ |
1328 |
|
|
/* By construction, last element of newsgincol |
1329 |
|
|
* is also the lowest sg in col because sg are inserted in |
1330 |
|
|
* increasing order at header in the matrix assembly. |
1331 |
|
|
*/ |
1332 |
johnpye |
399 |
if (gl_length(blobsincol) == 0L) { |
1333 |
aw0a |
1 |
assert(bigblob == -1); |
1334 |
|
|
/* get list index of least sg to use as new blob */ |
1335 |
johnpye |
399 |
bllen = (int)gl_length(newsgincol); |
1336 |
aw0a |
1 |
#if CFI |
1337 |
|
|
FPRINTF(ASCERR,"No blobs. %lu newsg\n",bllen); |
1338 |
|
|
#endif |
1339 |
johnpye |
399 |
assert(bllen > 0); |
1340 |
aw0a |
1 |
/* fails mysteriously when all sg in first blob. |
1341 |
|
|
* Why is blobsincol empty? because someone left blobcollected 1. |
1342 |
|
|
*/ |
1343 |
|
|
/* new blob */ |
1344 |
ballan |
2036 |
blob = (GLint)gl_fetch(newsgincol,bllen); /* new blob number:last */ |
1345 |
aw0a |
1 |
assert(md->blob[blob] != NULL); |
1346 |
|
|
assert(gl_length(md->blob[blob]) == 0); |
1347 |
|
|
/* add sg to new blob and assign new blob to those sg */ |
1348 |
|
|
for (blc = 1; blc <= bllen; blc++) { |
1349 |
ballan |
2036 |
newsg = (GLint)gl_fetch(newsgincol,blc); |
1350 |
aw0a |
1 |
gl_append_ptr(md->blob[blob],(VOIDPTR)newsg); |
1351 |
|
|
md->sg2blob[newsg] = blob; |
1352 |
|
|
} |
1353 |
|
|
} else { |
1354 |
|
|
#if CFI |
1355 |
|
|
FPRINTF(ASCERR,"blobs: %lu.\n",gl_length(blobsincol)); |
1356 |
|
|
#endif |
1357 |
|
|
assert(bigblob >= 0); |
1358 |
|
|
/* renumber smaller blobs in blobsincol to be members of biggest */ |
1359 |
|
|
MergeBlobs(md, bigblob, blobsincol); |
1360 |
|
|
md->blobcollected[bigblob] = 0; |
1361 |
|
|
/* blobcollected for smaller blobs will not be seen again. |
1362 |
|
|
* reinit it for next column for the big blob |
1363 |
|
|
*/ |
1364 |
|
|
/* add new sg's to bigblob */ |
1365 |
|
|
for (blc = 1, bllen = gl_length(newsgincol); blc <= bllen; blc++) { |
1366 |
ballan |
2036 |
newsg = (GLint)gl_fetch(newsgincol,blc); |
1367 |
aw0a |
1 |
gl_append_ptr(md->blob[bigblob],(VOIDPTR)newsg); |
1368 |
|
|
md->sg2blob[newsg] = bigblob; |
1369 |
|
|
} |
1370 |
|
|
} |
1371 |
|
|
} /* end if elt ! NULL */ |
1372 |
|
|
break; |
1373 |
|
|
} /* end switch */ |
1374 |
|
|
} /* end while */ |
1375 |
|
|
for (sg = 0; sg < totroutes; sg++) { |
1376 |
|
|
md->blobcollected[sg] = 0; |
1377 |
|
|
} |
1378 |
|
|
/* note this process and the mergeblobs process both leave lots |
1379 |
|
|
* of old data scattered in their wakes. |
1380 |
|
|
*/ |
1381 |
|
|
/* now blobcollected indicates we're keeping a route */ |
1382 |
|
|
for (sg = 0; sg < totroutes; sg++) { |
1383 |
|
|
if (md->sg2blob[sg] == -1) { |
1384 |
|
|
/* direct route. sg not in blob. collect it. */ |
1385 |
|
|
md->blobcollected[sg] = 1; |
1386 |
|
|
finalchildnum[*totfinal] = routechildnum[sg]; |
1387 |
|
|
(*totfinal)++; |
1388 |
|
|
} else { |
1389 |
|
|
if (md->blobcollected[md->sg2blob[sg]] == 0) { |
1390 |
|
|
/* sg is leftmost route through blob it is in. |
1391 |
johnpye |
399 |
* collect sg and mark blob collected. |
1392 |
aw0a |
1 |
*/ |
1393 |
|
|
md->blobcollected[md->sg2blob[sg]] = 1; |
1394 |
|
|
finalchildnum[*totfinal] = routechildnum[sg]; |
1395 |
|
|
(*totfinal)++; |
1396 |
|
|
} |
1397 |
|
|
} |
1398 |
|
|
} |
1399 |
|
|
ELTRESET(md); |
1400 |
|
|
/* ok, totfinal and finalchildnum are what they need to be. */ |
1401 |
|
|
} |
1402 |
|
|
|
1403 |
|
|
/* This now called with visittree, not visitnametree, so no |
1404 |
|
|
* path to i is known or needed. |
1405 |
|
|
* All the amipd scratch spaces are assumed clean on entry, |
1406 |
|
|
* so leave them clean on exit and clean them before the |
1407 |
|
|
* first call to this occurs in a related series. |
1408 |
|
|
*/ |
1409 |
|
|
static |
1410 |
|
|
void AnonMergeDetect(struct Instance *i, |
1411 |
|
|
struct AnonMergeIPData *amipd) |
1412 |
|
|
{ |
1413 |
|
|
struct Instance *ch; /* immediate children of i */ |
1414 |
|
|
struct AnonMergeIP *amip, *chamip; |
1415 |
|
|
unsigned long nch,c; |
1416 |
ballan |
2036 |
GLint len; |
1417 |
|
|
GLint nextnode, nextnode_ip; /* index for descendant iteration */ |
1418 |
|
|
GLint nphint; /* clues for iterations */ |
1419 |
|
|
GLint thisnode; /* nodenumber for i */ |
1420 |
|
|
GLint sg; /* iteration */ |
1421 |
aw0a |
1 |
unsigned int aflags; |
1422 |
|
|
Numlist_p mergednodes; /* interesting descendants of i */ |
1423 |
|
|
Numlist_p parents; /* interesting descendants of i */ |
1424 |
johnpye |
399 |
struct gl_list_t *scratchpath, *scratchpathlist, *scratchamlist; |
1425 |
aw0a |
1 |
struct gl_list_t *path, *pathlist; |
1426 |
|
|
ChildListPtr cl; |
1427 |
|
|
/* probably want to make totroutes, totgraphs, totfinal local ints */ |
1428 |
|
|
|
1429 |
johnpye |
399 |
/* Here we take the subgraph map of merged nodes in i |
1430 |
aw0a |
1 |
* and try to find nonintersecting paths to each of the |
1431 |
johnpye |
399 |
* merged nodes starting from each of the immediate |
1432 |
aw0a |
1 |
* children of i. All compound or merged instances have a subgraph |
1433 |
|
|
* map (or NULL if they have no merged descendants of |
1434 |
johnpye |
399 |
* interest) of mergednode numbers. |
1435 |
aw0a |
1 |
*/ |
1436 |
|
|
|
1437 |
|
|
aflags = GetAnonFlags(i); |
1438 |
|
|
if ((aflags & AMIPFLAG) == 0 || /* ignore junk */ |
1439 |
|
|
(aflags & AMUFLAG) != 0 /* ignore UNIVERSALs */) { |
1440 |
|
|
/* skip the maximally uninteresting nodes */ |
1441 |
|
|
return; |
1442 |
|
|
} else { |
1443 |
|
|
/* get amip for use in the rest of this function. */ |
1444 |
|
|
amip = (struct AnonMergeIP *)GetInterfacePtr(i); |
1445 |
|
|
assert(amip!=NULL); |
1446 |
|
|
/* for all nodes set up the translation to ip_number */ |
1447 |
|
|
amipd->node2ip[amip->node_number] = amip->ip_number; |
1448 |
|
|
/* this number is not used in this function call, but in |
1449 |
|
|
* later calls where this node is the merged descendant |
1450 |
|
|
* being accounted for. node2ip[0] is obviously not meaningful. |
1451 |
|
|
*/ |
1452 |
|
|
} |
1453 |
|
|
|
1454 |
|
|
if (IsCompoundInstance(i) && (nch = NumberChildren(i)) > 1) { |
1455 |
|
|
mergednodes = amip->shared; |
1456 |
|
|
if (mergednodes == NULL || NumpairListLen(mergednodes) == 0) { |
1457 |
|
|
/* no merged descendants to account for */ |
1458 |
|
|
return; |
1459 |
|
|
} |
1460 |
|
|
/* get this nodes number so we can ignore i while processing i */ |
1461 |
|
|
thisnode = amip->node_number; |
1462 |
|
|
amipd->totgraphs = 0; |
1463 |
|
|
#if (AMSTAT && 0) |
1464 |
|
|
FPRINTF(ASCERR,"\nroot ip %d ",amip->ip_number); |
1465 |
|
|
WriteInstanceName(ASCERR,i,NULL); |
1466 |
|
|
FPRINTF(ASCERR,"\n"); |
1467 |
|
|
#if (AMSTAT && 0) |
1468 |
|
|
FPRINTF(ASCERR,"descendants:\n"); |
1469 |
|
|
NLPWrite(NULL,amip->subgraph); |
1470 |
|
|
#endif |
1471 |
|
|
#endif |
1472 |
|
|
if (InstanceKind(i)==MODEL_INST) { |
1473 |
|
|
cl = GetChildList(InstanceTypeDesc(i)); |
1474 |
|
|
} else { |
1475 |
|
|
cl = NULL; |
1476 |
|
|
} |
1477 |
|
|
|
1478 |
|
|
for (c = 1; c <= nch; c++) { |
1479 |
|
|
/* lots of children have no merged subgraphs. collect total set from |
1480 |
|
|
* those that do and then iterate only over those children. |
1481 |
|
|
* Also exclude alias children as they will be accounted in |
1482 |
|
|
* the type definition. |
1483 |
|
|
*/ |
1484 |
|
|
ch = InstanceChild(i,c); |
1485 |
johnpye |
399 |
if (ch != NULL && |
1486 |
aw0a |
1 |
(GetAnonFlags(ch)&AMIPFLAG) != 0 && |
1487 |
|
|
(cl == NULL || ChildAliasing(cl,c)==0)) { |
1488 |
|
|
chamip = (struct AnonMergeIP *)GetInterfacePtr(ch); |
1489 |
|
|
assert(chamip != NULL); |
1490 |
|
|
if (chamip->shared != NULL) { |
1491 |
|
|
amipd->graphs[amipd->totgraphs] = chamip->subgraph; |
1492 |
|
|
amipd->mergegraphs[amipd->totgraphs] = chamip->shared; |
1493 |
|
|
amipd->graphchildnum[amipd->totgraphs] = c; /* track origin */ |
1494 |
|
|
amipd->totgraphs++; |
1495 |
|
|
#if (AMSTAT && 0) |
1496 |
johnpye |
399 |
if (chamip->subgraph != NULL && |
1497 |
aw0a |
1 |
NumpairListLen(chamip->subgraph) > 0) { |
1498 |
|
|
FPRINTF(ASCERR,"root ip %d, child number %lu, subgraph:\n", |
1499 |
|
|
amip->ip_number,c); |
1500 |
|
|
#if NLPWRITE |
1501 |
|
|
NLPWrite(ASCERR, chamip->subgraph); |
1502 |
|
|
FPRINTF(ASCERR,"shared\n"); |
1503 |
|
|
NLPWrite(ASCERR, chamip->shared); |
1504 |
|
|
FPRINTF(ASCERR,"\n"); |
1505 |
|
|
#endif |
1506 |
|
|
} /* endif */ |
1507 |
|
|
#endif /*amstat*/ |
1508 |
|
|
} /* endif */ |
1509 |
|
|
} /* endif */ |
1510 |
|
|
} /* endfor children i */ |
1511 |
|
|
|
1512 |
|
|
if (amipd->totgraphs < 2) { |
1513 |
|
|
/* one child has all the merged nodes, so we will never be able |
1514 |
|
|
* to find two independent paths to any merged descendant. quit now. |
1515 |
johnpye |
399 |
*/ |
1516 |
aw0a |
1 |
amipd->totgraphs = 0; |
1517 |
|
|
return; |
1518 |
|
|
} |
1519 |
|
|
|
1520 |
johnpye |
399 |
/* Foreach merged descendant of i, find merges recordable on i. |
1521 |
aw0a |
1 |
* There can never be more than n=totgraphs independent paths. |
1522 |
|
|
* Process in descending node order, |
1523 |
|
|
* though this is not required as what is recorded about one descendant |
1524 |
|
|
* does not affect what is recorded about another. |
1525 |
|
|
* Reverse order may make some set computations cheaper if we were |
1526 |
|
|
* to highly optimize the processing. |
1527 |
|
|
*/ |
1528 |
|
|
scratchpath = amipd->scratchpath; |
1529 |
|
|
scratchpathlist = amipd->scratchpathlist; |
1530 |
johnpye |
399 |
scratchamlist = amipd->scratchamlist; |
1531 |
aw0a |
1 |
gl_reset(scratchamlist); |
1532 |
|
|
nphint = -1; |
1533 |
|
|
len = amipd->totgraphs; |
1534 |
|
|
for (sg = 0; sg < len; sg++) { |
1535 |
|
|
/* With a little care, we could construct totgraphs full of -1 |
1536 |
|
|
* and always leave it in that state. The amount of assignments |
1537 |
|
|
* would be the same, but the iteration would be skipped if we |
1538 |
|
|
* could piggyback the iteration on some other iteration. |
1539 |
|
|
*/ |
1540 |
|
|
amipd->listhints[sg] = -1; |
1541 |
|
|
} |
1542 |
|
|
for (nextnode = NumpairPrevNumber(mergednodes,0,&nphint); |
1543 |
|
|
nextnode > 0; |
1544 |
|
|
nextnode = NumpairPrevNumber(mergednodes,nextnode,&nphint)) { |
1545 |
|
|
amipd->totroutes = 0; |
1546 |
|
|
/* find merges of i's descendants nextnode but not of i itself. */ |
1547 |
|
|
if (nextnode != thisnode) { |
1548 |
|
|
len = amipd->totgraphs; |
1549 |
|
|
/* filter out shared that don't have a route to nextnode in them */ |
1550 |
|
|
nextnode_ip = amipd->node2ip[nextnode]; |
1551 |
|
|
for (sg = 0; sg < len; sg++) { |
1552 |
|
|
if (NumpairNumberInListHintedDecreasing(amipd->mergegraphs[sg], |
1553 |
|
|
nextnode, |
1554 |
|
|
&(amipd->listhints[sg])) |
1555 |
|
|
) { |
1556 |
|
|
/* collect shared that contain a route to nextnode. |
1557 |
|
|
* Because we can do this in a hinted fashion and skip |
1558 |
|
|
* out on extra computing for many easy cases, we |
1559 |
|
|
* do not combine it with the redundancy detection step which |
1560 |
|
|
* costs a bit more. |
1561 |
|
|
*/ |
1562 |
|
|
amipd->routes[amipd->totroutes] = amipd->graphs[sg]; |
1563 |
|
|
amipd->routechildnum[amipd->totroutes] = amipd->graphchildnum[sg]; |
1564 |
|
|
amipd->totroutes++; |
1565 |
|
|
} |
1566 |
|
|
} |
1567 |
|
|
if (amipd->totroutes < 2) { |
1568 |
|
|
/* one child has all the routes, there are not |
1569 |
|
|
* two independent paths to this merged descendant. |
1570 |
|
|
* quit this descendant now. |
1571 |
johnpye |
399 |
*/ |
1572 |
aw0a |
1 |
continue; /* go on to nextnode */ |
1573 |
|
|
} |
1574 |
|
|
#if (AMSTAT && 0) |
1575 |
|
|
FPRINTF(ASCERR,"Routes to merge %d (ip %d): %d\n", nextnode, |
1576 |
|
|
nextnode_ip, amipd->totroutes); |
1577 |
|
|
#endif |
1578 |
|
|
/* filter out redundant routes and redundant merges */ |
1579 |
|
|
/* amipd->enlp0 is a scratch enlp */ |
1580 |
|
|
NumpairClearList(amipd->enlp0); |
1581 |
|
|
|
1582 |
|
|
/* collect parents of nextnode that are in graph of i. |
1583 |
|
|
* Do this by calculating the intersection of subgraph(i) |
1584 |
|
|
* with parents(nextnode). Calculate the parents(nextnode) |
1585 |
|
|
* list once in preprocessing. |
1586 |
|
|
*/ |
1587 |
|
|
parents = GetParentsList(nextnode_ip,amipd); |
1588 |
|
|
assert(parents != NULL); |
1589 |
|
|
#if (AMSTAT && 0) |
1590 |
|
|
FPRINTF(ASCERR,"Parents of ip#%d\n",nextnode_ip); |
1591 |
|
|
NLPWrite(NULL,parents); |
1592 |
|
|
#endif |
1593 |
johnpye |
399 |
NumpairCalcIntersection(amip->subgraph, parents, amipd->enlp0); |
1594 |
aw0a |
1 |
amipd->totfinal = 0; |
1595 |
|
|
#if CFI |
1596 |
|
|
FPRINTF(ASCERR,"Checking merged ip# %d\n",nextnode_ip); |
1597 |
|
|
#endif |
1598 |
|
|
amipd->md.nextnode_ip = nextnode_ip; |
1599 |
|
|
CalcFinalIndependentRoutes( |
1600 |
|
|
&(amipd->md), |
1601 |
|
|
amipd->enlp0, |
1602 |
|
|
amipd->routes, |
1603 |
|
|
amipd->enlp1, |
1604 |
|
|
scratchpathlist, |
1605 |
|
|
scratchpath, |
1606 |
|
|
amipd->finalchildnum, |
1607 |
|
|
amipd->routechildnum, |
1608 |
|
|
&(amipd->totfinal), |
1609 |
|
|
amipd->totroutes |
1610 |
|
|
); |
1611 |
|
|
|
1612 |
|
|
if (amipd->totfinal < 2) { |
1613 |
|
|
/* merges for nextnode are redundant in i. go to next */ |
1614 |
|
|
continue; |
1615 |
|
|
} |
1616 |
|
|
|
1617 |
|
|
/* |
1618 |
|
|
* Ok, so now we have a list of nonintersecting starting points |
1619 |
|
|
* (direct children of i). Follow alphabetic path from each |
1620 |
|
|
* to nextnode and record it. (Intersecting at i and nextnode, |
1621 |
|
|
* but nowhere else in the intervening dag, is ok.) |
1622 |
|
|
*/ |
1623 |
|
|
gl_reset(scratchpathlist); |
1624 |
|
|
#if (AMSTAT && 0) |
1625 |
|
|
FPRINTF(ASCERR,"ip_node %d: merge %d (ip %d) in subgraph %d" |
1626 |
|
|
" independent ways.\n", |
1627 |
|
|
amip->ip_number,nextnode,nextnode_ip,amipd->totfinal); |
1628 |
|
|
#endif /* amstat */ |
1629 |
|
|
for (sg = 0; sg < amipd->totfinal; sg++) { |
1630 |
|
|
gl_reset(scratchpath); |
1631 |
|
|
#if (AMSTAT && 0) |
1632 |
|
|
FPRINTF(ASCERR,"Searching in child %lu.\n",amipd->finalchildnum[sg]); |
1633 |
|
|
#endif /* amstat */ |
1634 |
|
|
AnonMergeFindPath(i,nextnode,amipd->finalchildnum[sg],scratchpath); |
1635 |
|
|
/* save path found to pathlist */ |
1636 |
|
|
path = gl_copy(scratchpath); |
1637 |
|
|
gl_append_ptr(scratchpathlist,path); |
1638 |
|
|
} |
1639 |
|
|
/* save pathlist to mergelist of i */ |
1640 |
|
|
pathlist = gl_copy(scratchpathlist); |
1641 |
|
|
gl_append_ptr(scratchamlist,pathlist); |
1642 |
|
|
} |
1643 |
|
|
} /* endfor nextnode */ |
1644 |
|
|
/* save merges for this node i to IP */ |
1645 |
|
|
assert(amip->amlist == NULL); |
1646 |
|
|
if (gl_length(scratchamlist) > 0) { |
1647 |
|
|
amip->amlist = gl_copy(scratchamlist); |
1648 |
|
|
amipd->num_iwithmerge++; |
1649 |
|
|
amipd->num_mergelists += (int)gl_length(amip->amlist); |
1650 |
|
|
} else { |
1651 |
|
|
amip->amlist = NULL; |
1652 |
|
|
} |
1653 |
|
|
} /* done with this compound instance */ |
1654 |
|
|
} |
1655 |
|
|
|
1656 |
|
|
/* public stuff */ |
1657 |
|
|
/* |
1658 |
|
|
* sanity checker variable. |
1659 |
|
|
*/ |
1660 |
|
|
static int g_ammarking = 0; |
1661 |
|
|
/* |
1662 |
|
|
* vp = Asc_AnonMergeMarkIP(root); |
1663 |
|
|
* VOIDPTR vp; |
1664 |
|
|
* This function finds merged instances, anon or otherwise, and |
1665 |
|
|
* records the merges at the scope most appropriate. |
1666 |
|
|
* On return, all InterfacePointers in the tree of root are either |
1667 |
|
|
* NULL or point to some data we understand. |
1668 |
|
|
* When done using these IPs, the caller should call |
1669 |
johnpye |
399 |
* AnonMergeDestroyIPs(vp); |
1670 |
aw0a |
1 |
* This function uses the push/pop protocol for ips, so ip data |
1671 |
|
|
* of other clients may be lost if Unmark is not called properly. |
1672 |
|
|
* Generally, both functions should be called from the same scope. |
1673 |
|
|
* |
1674 |
|
|
* ! ! Assumes that tmpnums are all 0 on entry, and leaves any |
1675 |
|
|
* it has touched 0 on exit. |
1676 |
|
|
* |
1677 |
johnpye |
399 |
* Does not record recursive merges, i.e. if a,b ARE_THE_SAME, |
1678 |
aw0a |
1 |
* don't record a.i,b.i ARE_THE_SAME. This is simple to detect |
1679 |
|
|
* as a.i,b.i have the same childnumber/instanceparent pair. |
1680 |
|
|
*/ |
1681 |
|
|
VOIDPTR Asc_AnonMergeMarkIPs(struct Instance *root) |
1682 |
|
|
{ |
1683 |
|
|
struct AnonMergeVisitInfo amvi; /* no pointers inside, so no malloc/free*/ |
1684 |
|
|
struct AnonMergeIPData *amipd; |
1685 |
|
|
|
1686 |
|
|
g_ammarking = 1; |
1687 |
|
|
/* init before estimate of ips needed */ |
1688 |
|
|
AMVIInit(&amvi,root); |
1689 |
|
|
|
1690 |
|
|
/* zero anon flags (universal), mark number of |
1691 |
|
|
* real parents to children in tmpnums, and census the formal types |
1692 |
|
|
* for detecting anonymously universal instances. |
1693 |
|
|
*/ |
1694 |
|
|
#if (AMSTAT || AMBKDN) |
1695 |
|
|
FPRINTF(ASCERR,"%ld init amvi done\n",clock()); |
1696 |
|
|
#endif |
1697 |
|
|
SilentVisitInstanceTreeTwo(root,(VisitTwoProc)AnonMergeCountTree,1,0,&amvi); |
1698 |
|
|
|
1699 |
|
|
#if (AMSTAT || AMBKDN) |
1700 |
|
|
FPRINTF(ASCERR,"%ld counttree done\n",clock()); |
1701 |
|
|
#endif |
1702 |
|
|
/* mark the instances which have only one instance as 'locally' universal |
1703 |
|
|
* in anonflags and set tmpnum to 0 for same. |
1704 |
|
|
*/ |
1705 |
|
|
AnonMergeMarkUniversals(&amvi); |
1706 |
|
|
#if (AMSTAT || AMBKDN) |
1707 |
|
|
FPRINTF(ASCERR,"%ld mark universals done\n",clock()); |
1708 |
|
|
#endif |
1709 |
|
|
|
1710 |
|
|
/* Set the tmpnum of anything with a 1 tmpnum(1 parent) to 0 tmpnum. |
1711 |
|
|
* Set the tmpnums of anything with > 1 parent to the merged node |
1712 |
|
|
* number. note that this forces us to use tmpnum instead of amip->nodenum |
1713 |
|
|
* since there is amip are on any compound instance and any shared instance. |
1714 |
|
|
* amvi.nip is number of ips we'll need and amvi.nim is the number of |
1715 |
|
|
* shared instances for which we need to account the merges. |
1716 |
|
|
*/ |
1717 |
|
|
SilentVisitInstanceTreeTwo(root,(VisitTwoProc)AnonMergeLabelNodes, |
1718 |
|
|
1,0,&amvi); |
1719 |
|
|
/* Nodes are tmpnumbered 1..mergednodes for nodes of interest, not all |
1720 |
|
|
* instances. If we were cpu fascists, we would merge this step with the |
1721 |
|
|
* next one quite carefully. |
1722 |
|
|
*/ |
1723 |
|
|
#if (AMSTAT || AMBKDN) |
1724 |
|
|
FPRINTF(ASCERR,"%ld label nodes done\n",clock()); |
1725 |
|
|
#endif |
1726 |
|
|
|
1727 |
|
|
#if (AMSTAT) |
1728 |
|
|
AMCWriteCounts(&amvi); |
1729 |
|
|
#endif /*amstat*/ |
1730 |
|
|
|
1731 |
|
|
/* create AMIPs and other temporary memory */ |
1732 |
|
|
amipd = AMIPDInit(&amvi); |
1733 |
|
|
if (amipd == NULL) { |
1734 |
|
|
return NULL; |
1735 |
|
|
} |
1736 |
|
|
|
1737 |
|
|
#if (AMSTAT || AMBKDN) |
1738 |
|
|
FPRINTF(ASCERR,"%ld amipdinit done\n",clock()); |
1739 |
|
|
#endif |
1740 |
|
|
|
1741 |
|
|
#if (CHECKN) |
1742 |
|
|
/* check the numbering for continuity */ |
1743 |
|
|
amipd->fp = fopen("/tmp/nodes","w+"); |
1744 |
|
|
SilentVisitInstanceTreeTwo(root,(VisitTwoProc)AMCheckN,1,0,amipd); |
1745 |
|
|
fclose(amipd->fp); |
1746 |
|
|
#endif |
1747 |
|
|
/* do the magic bottom up. */ |
1748 |
|
|
SilentVisitInstanceTreeTwo(root,(VisitTwoProc)AnonMergeDetect,1,0,amipd); |
1749 |
johnpye |
399 |
|
1750 |
aw0a |
1 |
#if (AMSTAT || AMBKDN) |
1751 |
|
|
FPRINTF(ASCERR,"%ld mergedetect done\n\n",clock()); |
1752 |
|
|
#endif |
1753 |
|
|
#if AMBKDN |
1754 |
|
|
FPRINTF(ASCERR,"Instances with merges %d\n",amipd->num_iwithmerge); |
1755 |
|
|
FPRINTF(ASCERR,"N-way merges recorded %d\n",amipd->num_mergelists); |
1756 |
|
|
FPRINTF(ASCERR,"Merged instances %ld\n",amvi.nim); |
1757 |
|
|
FPRINTF(ASCERR,"Instances expecting IP %ld\n",amvi.nip); |
1758 |
|
|
FPRINTF(ASCERR,"IPs used %d\n",amipd->ipused); |
1759 |
|
|
#endif |
1760 |
|
|
return (VOIDPTR)amipd; |
1761 |
|
|
} |
1762 |
|
|
|
1763 |
|
|
/* |
1764 |
|
|
* return the comparison of merge lists from two distinct instances |
1765 |
|
|
* first check for same instance (aml's equal), then |
1766 |
|
|
* we must compare until a difference found. |
1767 |
|
|
* This function must yield a partial order, ie a<b && b<c ==> a < c. |
1768 |
|
|
* We compare by at 4 levels, searching for the first difference: |
1769 |
|
|
* 1) length of aml (number of merged instances in i) |
1770 |
|
|
* 2) lengths of lists in aml (number of names for each merged descendant) |
1771 |
|
|
* 3) lengths of paths in each pathlist. |
1772 |
|
|
* 4) content of each pair of paths. |
1773 |
|
|
*/ |
1774 |
|
|
static |
1775 |
|
|
int AnonMergeCmpMLists(CONST struct gl_list_t *aml1, |
1776 |
|
|
CONST struct gl_list_t *aml2) |
1777 |
|
|
{ |
1778 |
|
|
unsigned long len1, len2, len3, len4, pl, p; |
1779 |
|
|
struct gl_list_t *pathlist1, *pathlist2, *path1, *path2; |
1780 |
|
|
int cmp = 0; |
1781 |
|
|
|
1782 |
|
|
if (aml1==aml2) { |
1783 |
|
|
/* includes NULL == NULL case. */ |
1784 |
johnpye |
399 |
return 0; |
1785 |
aw0a |
1 |
} |
1786 |
|
|
if (aml1 == NULL) { |
1787 |
|
|
return -1; |
1788 |
|
|
} |
1789 |
|
|
if (aml2 == NULL) { |
1790 |
|
|
return 1; |
1791 |
|
|
} |
1792 |
|
|
assert(aml1!=NULL); |
1793 |
|
|
assert(aml2!=NULL); |
1794 |
|
|
/* check for same number of merged descendants */ |
1795 |
|
|
len1 = gl_length(aml1); |
1796 |
|
|
len2 = gl_length(aml2); |
1797 |
|
|
if (len2 != len1) { |
1798 |
|
|
return (len2 > len1) ? -1 : 1; /* longer is >, right? */ |
1799 |
|
|
} |
1800 |
|
|
/* foreach descendant, check same number of paths found */ |
1801 |
|
|
for (pl = 1; pl <= len1; pl++) { |
1802 |
|
|
pathlist1 = (struct gl_list_t *)gl_fetch(aml1,pl); |
1803 |
|
|
pathlist2 = (struct gl_list_t *)gl_fetch(aml2,pl); |
1804 |
|
|
assert(pathlist1!=NULL); |
1805 |
|
|
assert(pathlist2!=NULL); |
1806 |
|
|
len2 = gl_length(pathlist1); |
1807 |
|
|
len3 = gl_length(pathlist2); |
1808 |
|
|
if (len3 != len2) { |
1809 |
|
|
return (len3 > len2) ? -1 : 1; /* longer is >, right? */ |
1810 |
|
|
} |
1811 |
|
|
} |
1812 |
|
|
/* for each descendant, check that paths of same length were found */ |
1813 |
|
|
for (pl = 1; pl <= len1; pl++) { |
1814 |
|
|
pathlist1 = (struct gl_list_t *)gl_fetch(aml1,pl); |
1815 |
|
|
pathlist2 = (struct gl_list_t *)gl_fetch(aml2,pl); |
1816 |
|
|
len2 = gl_length(pathlist1); |
1817 |
|
|
for (p = 1; p <= len2; p++) { |
1818 |
|
|
path1 = (struct gl_list_t *)gl_fetch(pathlist1,p); |
1819 |
|
|
path2 = (struct gl_list_t *)gl_fetch(pathlist2,p); |
1820 |
|
|
assert(path1!=NULL); |
1821 |
|
|
assert(path2!=NULL); |
1822 |
|
|
len3 = gl_length(path1); |
1823 |
|
|
len4 = gl_length(path2); |
1824 |
|
|
if (len4 != len3) { |
1825 |
|
|
return (len4 > len3) ? -1 : 1; /* longer is >, right? */ |
1826 |
|
|
} |
1827 |
|
|
} |
1828 |
|
|
} |
1829 |
|
|
/* for each descendant, check that paths of same content were found */ |
1830 |
|
|
for (pl = 1; pl <= len1; pl++) { |
1831 |
|
|
pathlist1 = (struct gl_list_t *)gl_fetch(aml1,pl); |
1832 |
|
|
pathlist2 = (struct gl_list_t *)gl_fetch(aml2,pl); |
1833 |
|
|
len2 = gl_length(pathlist1); |
1834 |
|
|
for (p = 1; p <= len2; p++) { |
1835 |
|
|
path1 = (struct gl_list_t *)gl_fetch(pathlist1,p); |
1836 |
|
|
path2 = (struct gl_list_t *)gl_fetch(pathlist2,p); |
1837 |
|
|
cmp = gl_compare_ptrs(path1,path2); |
1838 |
|
|
if (cmp != 0) { |
1839 |
|
|
return cmp; |
1840 |
|
|
} |
1841 |
|
|
} |
1842 |
|
|
} |
1843 |
|
|
|
1844 |
|
|
return 0; |
1845 |
|
|
} |
1846 |
|
|
|
1847 |
|
|
/* |
1848 |
|
|
* Returns the comparison of the merge information stored in two |
1849 |
|
|
* instances, which must of the same formal type and have |
1850 |
|
|
* children of the same anonymous types. It doesn't make |
1851 |
|
|
* sense to call the function on ATOM-like instances, since they |
1852 |
|
|
* can have no deeper merged structures. |
1853 |
|
|
* UNIVERSAL instances will always return 0. (Think about it.) |
1854 |
|
|
* Objects which are part of UNIVERSAL instances or somehow |
1855 |
|
|
* not supposed to have a list of merges will always return 2, |
1856 |
|
|
* even if compared to themselves! |
1857 |
|
|
* If the comparison is valid, it will return 0,1,-1. |
1858 |
|
|
* instances without lists shold be < instances with lists. |
1859 |
|
|
*/ |
1860 |
|
|
int Asc_AnonMergeCmpInstances(CONST struct Instance *i1, |
1861 |
|
|
CONST struct Instance *i2) |
1862 |
|
|
{ |
1863 |
|
|
struct AnonMergeIP *amip1, *amip2; |
1864 |
|
|
int cmp, flags1, flags2; |
1865 |
|
|
assert(i1!=NULL); |
1866 |
|
|
assert(i2!=NULL); |
1867 |
|
|
assert(InstanceTypeDesc(i1) == InstanceTypeDesc(i2)); |
1868 |
|
|
assert(i1 != i2); |
1869 |
|
|
flags1 = GetAnonFlags(i1); |
1870 |
|
|
if (flags1 == (AMUFLAG | AMIPFLAG)) { |
1871 |
|
|
return 0; |
1872 |
|
|
} |
1873 |
|
|
flags2 = GetAnonFlags(i2); |
1874 |
|
|
if ((flags1 & AMIPFLAG) == 0 || (flags2 & AMIPFLAG) == 0 ) { |
1875 |
|
|
return 2; |
1876 |
|
|
} |
1877 |
|
|
amip1 = (struct AnonMergeIP *)GetInterfacePtr(i1); |
1878 |
|
|
amip2 = (struct AnonMergeIP *)GetInterfacePtr(i2); |
1879 |
|
|
if (amip1 == NULL || amip2 == NULL) { |
1880 |
|
|
return 2; |
1881 |
|
|
} |
1882 |
|
|
cmp = AnonMergeCmpMLists(amip1->amlist, amip2->amlist); |
1883 |
|
|
return cmp; |
1884 |
|
|
} |
1885 |
|
|
|
1886 |
|
|
/* |
1887 |
|
|
* frees data structures returned by AnonMergeMarkIPs. |
1888 |
|
|
*/ |
1889 |
|
|
void Asc_AnonMergeUnmarkIPs(VOIDPTR vp) |
1890 |
|
|
{ |
1891 |
|
|
struct AnonMergeIPData *amipd; |
1892 |
|
|
if (g_ammarking == 0) { |
1893 |
johnpye |
1064 |
ASC_PANIC("Called without marks current"); |
1894 |
aw0a |
1 |
} |
1895 |
|
|
amipd = (struct AnonMergeIPData *)vp; |
1896 |
|
|
#if AMSTAT |
1897 |
|
|
amipd->fp = fopen("/tmp/amipd","w+"); |
1898 |
|
|
SilentVisitInstanceTreeTwo(amipd->root,(VisitTwoProc)AnonMergeLogIP, |
1899 |
|
|
1,0,amipd); |
1900 |
|
|
FPRINTF(amipd->fp,"\n######\n"); |
1901 |
|
|
#endif |
1902 |
|
|
PopInterfacePtrs(amipd->oldips, |
1903 |
|
|
(IPDeleteFunc)AnonMergeDestroyIP, |
1904 |
|
|
(VOIDPTR)amipd); |
1905 |
|
|
assert(amipd->ipused==amipd->ipback); |
1906 |
|
|
#if AMSTAT |
1907 |
|
|
fclose(amipd->fp); |
1908 |
|
|
#endif |
1909 |
|
|
DestroyAMIPD(amipd); |
1910 |
|
|
g_ammarking = 0; |
1911 |
|
|
} |
1912 |
|
|
|
1913 |
|
|
void Asc_AnonMergeWriteList(FILE *fp, struct Instance *i) |
1914 |
|
|
{ |
1915 |
|
|
struct AnonMergeIP *amip; |
1916 |
|
|
if (g_ammarking == 0) { |
1917 |
johnpye |
1064 |
ASC_PANIC("Called without marks current"); |
1918 |
aw0a |
1 |
} |
1919 |
|
|
assert(i!= NULL); |
1920 |
|
|
if (IsCompoundInstance(i)==0) { |
1921 |
|
|
FPRINTF(fp,"NONE\n"); |
1922 |
|
|
return; |
1923 |
|
|
} |
1924 |
|
|
if (GetAnonFlags(i)&AMIPFLAG) { |
1925 |
|
|
amip = (struct AnonMergeIP *)GetInterfacePtr(i); |
1926 |
|
|
if (amip->amlist != NULL) { |
1927 |
|
|
WriteMList(fp, amip->amlist, i); |
1928 |
|
|
} |
1929 |
|
|
} |
1930 |
|
|
} |
1931 |
|
|
|
1932 |
|
|
|