/[ascend]/trunk/base/generic/compiler/anonmerg.c
ViewVC logotype

Annotation of /trunk/base/generic/compiler/anonmerg.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 708 - (hide annotations) (download) (as text)
Tue Jun 27 07:34:31 2006 UTC (18 years ago) by johnpye
File MIME type: text/x-csrc
File size: 67036 byte(s)
Replaced some references to ascmalloc with ASC_NEW_ARRAY
1 aw0a 1 /*
2     * anonmerge.c
3     * by Benjamin Andrew Allan
4     * Created September 21, 1997
5     * Copyright 1997 Carnegie Mellon University.
6     * Version: $Revision: 1.9 $
7     * Version control file: $RCSfile: anonmerg.c,v $
8     * Date last modified: $Date: 2000/01/25 02:25:52 $
9     * Last modified by: $Author: ballan $
10     *
11     * This file is part of the Ascend Language Interpreter.
12     *
13     * The Ascend Language Interpreter is free software; you can redistribute
14     * it and/or modify it under the terms of the GNU General Public License as
15     * published by the Free Software Foundation; either version 2 of the
16     * License, or (at your option) any later version.
17     *
18     * The Ascend Language Interpreter is distributed in hope that it will be
19     * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
20     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21     * General Public License for more details.
22     *
23     * You should have received a copy of the GNU General Public License
24     * along with the program; if not, write to the Free Software Foundation,
25     * Inc., 675 Mass Ave, Cambridge, MA 02139 USA. Check the file named
26     * COPYING.
27 johnpye 399 *
28     * The idea is to find and mark all the merges that need to be
29 aw0a 1 * marked to disambiguate the anontype classification scheme
30     * which can be mislead into believing identical anontypes when
31     * one instance has some children merged but the other doesn't.
32 johnpye 399 * Put another way, we want to record the minimum set of merges
33 aw0a 1 * that can account for the identity of all variables/constants
34     * in any relation,logical relation, or when statement.
35     *
36     * Assumptions:
37     * - We will perform this step at a point in the compilation
38     * sequence where the instance structures are set (any time after
39     * phase 1). Instances cannot move while detecting merges.
40     * - The tmpnums and interface pointers are not in use. (To be
41     * safe we will PushInterfacePointers. Tmpnums left 0.)
42     * - Universal instances need not be investigated for merge
43     * properties. That is, when enumerating a namespace and we
44     * encounter a universal instance, we will skip it and all
45     * its children. We should probably introduce a child-of-universal
46     * bit in the ChildList info and disallow merges/aliases of parts
47     * of universals with the outside in order to prevent graphs of
48     * universal instances from having connections anywhere except at
49     * the top. There are unofficial universals, and we will detect them.
50     *
51     *
52     * Desirables:
53     * - We want to mark instances with a minimal set of merges that accounts
54     * for the deviations in their name space, i.e. we don't want to collect
55     * the merges for all a.b.x, c.b.x if a.b, c.b are merged.
56     * - We want to mark the instance which is the smallest scope containing
57     * both merges. That is, a.b.c, a.b.d are merged, we want to mark a.b,
58     * not a.
59     *
60     * Problems:
61     * Because there is no simple indexing of parents of an instance
62     * (i.e. I can't travel from a parent p to a child c and easily
63     * know which of the back (parent) links of c I should follow
64     * to return to the parent) we must store paths for navigation
65     * in terms of child number/instance context pairs. Following
66     * back a parent trail requires a search for the child being
67     * left which is unacceptable. This is the same as an io NameNode,
68     * unless we decide to play a reference count game to avoid memory
69     * consumption.
70 johnpye 399 *
71 aw0a 1 * Much of this process is readily explained in terms of a global
72     * list of childnumber/context pairs which maps the entire namespace.
73     * Fortunately we don't need to build this list, we can build parts
74     * of it only, but the overall algorithm becomes obscured.
75     * Because we are mapping the LINK space, rather than the node space,
76     * we have to write our own bizarre set of VisitName functions.
77     * We will have to visit each link between any two objects once.
78     *
79     * These functions maintain all their own data and do not use any
80     * global information, EXCEPT that they use instance InterfacePtrs.
81     * For this reason they are not thread-safe. They also build data
82     * structures with references to instances, so no instance moving
83     * actions should be performed while there are structures from this
84     * module in use.
85     */
86    
87    
88     #include <limits.h> /* for INT_MAX */
89 johnpye 399 #include <utilities/ascConfig.h>
90     #include <utilities/ascMalloc.h>
91     #include <utilities/ascPanic.h>
92     #include <utilities/ascPrint.h>
93     #include <general/pool.h>
94     #include <general/list.h>
95     #include <general/listio.h>
96     #include <general/dstring.h>
97     #include "compiler.h"
98 aw0a 1 #if TIMECOMPILER
99     #include <time.h>
100 johnpye 399 #include <general/tm_time.h>
101 aw0a 1 #endif
102 johnpye 399 #include "fractions.h"
103     #include "dimen.h"
104     #include "child.h"
105     #include "type_desc.h"
106     #include "instance_enum.h"
107 johnpye 669 #include "expr_types.h"
108 johnpye 399 #include "instance_types.h"
109     #include "instance_name.h"
110     #include "tmpnum.h"
111     #include "parentchild.h"
112     #include "instquery.h"
113     #include "visitinst.h"
114     #include "instance_io.h"
115     #include "visitinst.h"
116     #include "visitlink.h"
117     #include "arrayinst.h" /* for arrayvisitlocalleaves only */
118     #include "anonmerg.h"
119 aw0a 1 #if AMSTAT
120     /* need numlistio to write debugging info */
121     #define NUMLISTEXPORTIO
122     #endif
123 johnpye 399 #include "numlist.h"
124 aw0a 1 #if AMSTAT
125     #undef NUMLISTEXPORTIO
126     #endif
127    
128     #ifndef lint
129     static CONST char AnonMergeModuleID[] = "$Id: anonmerg.c,v 1.9 2000/01/25 02:25:52 ballan Exp $";
130     #endif
131    
132     /* if want to compile unused functions, set to 1.
133     */
134     #define AMUNUSED 0
135    
136     /* print detail timing breakdown of merge detection */
137     #define AMBKDN 0
138     #define CFI 0
139    
140     /* print node list info */
141     #define CHECKN (AMSTAT || 0)
142    
143     /* if want to write reachability info as derived, set to 1. */
144     #define WRITEREACHABLE 0
145    
146     /*
147     * Number of buckets in the hash table to use for detecting universals.
148     * Should bear some relation to the number of formal types in question.
149     * Must match the mask in AMUHASH (buckets = 2^N, and mask = 2^N -1.)
150     */
151     #define AMUBUCKETS 1024
152     /*
153     * hash function to put a heap pointer in a bucket.
154     */
155     #define AMUHASH(p) (((((long) (p))*1103515245) >> 20) & 1023)
156    
157     /*
158     * this is the flag that indicates an instance is one of a kind
159     * within the scope of merge detection, therefore is effectively
160     * UNIVERSAL.
161     */
162     #define AMUFLAG 0x10
163    
164     /* This is the flag in anonflags indicates that an amip is on the instance.
165     */
166     #define AMIPFLAG 0x1
167    
168    
169     /* so on each node which has merged descendants that need to be recorded,
170     * we need a list of lists of paths.
171     * i->amip->amlist-|-------------|->path1 to d1 |||||
172     * | |->path2 to d1 |||||
173     * |
174     * |-------------|->path1 to d2 |||||
175     * etc |->path2 to d2 |||||
176     * etc
177     *
178     * gllist gllists gllist of childnums
179     * where each path is the gllist of childnumbers that is followed
180     * to reach the shared instance di.
181     */
182 johnpye 399 /*
183 aw0a 1 */
184     struct AnonMergeIP { /* Hangs off all instances with amipflag=1 */
185     struct gl_list_t *amlist;
186     /* The list of merges contained in this inst. each element is a
187     * gl_list of paths. each path is a gl_list of child numbers.
188     */
189     int ip_number; /* number of ip node (subgraph) */
190     int node_number; /* number of merged node, or 0 if this
191     * node is itself uninteresting.
192     * (shared)
193     */
194     Numlist_p subgraph; /* list of all ip'd nodes in subgraph(i) */
195     Numlist_p shared; /* list of merged nodes in subgraph(i) */
196     Numlist_p parentslist;/* list of parent nodes of i */
197     struct gl_list_t *glp;/* gl list of parent nodes of i. */
198     /* number of merges recorded is gllen amlist. */
199     };
200    
201     /* The existence of a bucket implies at least one instance of the
202     * formal type exists. If no more than one exists, then its pointer
203     * is stored in the bucket. If a second is found, the bucket becomes empty.
204     * (b->i==NULL).
205     * When the table is all built, mark anything that has a non-NULL
206     * instance as locally-universal.
207     */
208     struct AnonMergeUniBucket {
209     CONST struct TypeDescription *d; /* type = hash key */
210     struct Instance *i; /* could be a list later */
211     struct AnonMergeUniBucket *next; /* chain */
212     unsigned long indirected; /* needed for arrays */
213     };
214    
215     struct AnonMergeVisitInfo {
216     struct Instance *root;
217     unsigned long nip; /* instances to get IPs */
218     unsigned long nim; /* instances with multiple nonaliased parents */
219     struct AnonMergeUniBucket *t[AMUBUCKETS]; /* hash table */
220     unsigned long maxchildren; /* most children of any instance */
221     };
222    
223     struct sgelt {
224     int sg;
225     struct sgelt *next;
226     };
227    
228     struct mdata {
229     struct sgelt **header; /* [maxip] */
230     int *colcount; /* [maxip] */
231     int *sg2blob; /* [maxchildren] */
232     int *blobcollected; /* [maxchildren] */
233     struct gl_list_t **blob; /* [maxchildren] */
234     int nextnode_ip;
235     int sg;
236     int maxchildren;
237     pool_store_t pool;
238     };
239    
240    
241     /*
242     * packet that persists while derived merge data is in use.
243     */
244     struct AnonMergeIPData {
245     struct gl_list_t *oldips; /* result of puship */
246     struct Instance *root;
247     struct AnonMergeIP *ipbuf; /* a mass allocated set of AMIPs */
248     unsigned long iplen; /* number of ips */
249     unsigned int ipused; /* number of slots from buf handed out */
250     unsigned int ipback; /* number of slots from buf returned */
251     FILE *fp;
252    
253     int *listhints; /* listhints[i] --> hint for graph i */
254     int *node2ip; /* node2ip[node_number] --> ip_number */
255    
256     Numlist_p enlp0, enlp1,enlp2; /* scratch expandable Numlists */
257     struct gl_list_t *nlpgl; /* gllist of collected numlists */
258    
259     Numlist_p senlp0, senlp1,senlp2; /* scratch expandable Numlists */
260     struct gl_list_t *snlpgl; /* gllist of collected numlists */
261    
262     Numlist_p *mergegraphs; /* array of shareds size totgraphs */
263     Numlist_p *graphs; /* array of subgraphs size totgraphs */
264     unsigned long *graphchildnum; /* child of i that graph came from */
265     int totgraphs; /* number of children of i with subgraphs */
266    
267     Numlist_p *routes; /* array of subgraphs size totroutes */
268     unsigned long *routechildnum; /* child of i that route graph came from */
269     int totroutes; /* number of children containing target */
270    
271     unsigned long *finalchildnum; /* children of i that yield merge path */
272     int totfinal; /* number of paths to target we record */
273    
274     struct gl_list_t *scratchpath; /* scratch childnumber list */
275     struct gl_list_t *scratchpathlist; /* scratch list of paths */
276     struct gl_list_t *scratchamlist; /* scratch list of paths */
277    
278     int num_mergelists; /* total number of N-path merges recorded */
279     int num_iwithmerge; /* total number of instances logging merges. */
280     int compcount;
281    
282     struct mdata md;
283     };
284    
285     /* given a pointer to an AM gl_list, destroys the list and sublists.
286     * ignores NULL input.
287     */
288     static
289     void DestroyMList(struct gl_list_t *aml)
290     {
291     struct gl_list_t *pathlist, *path;
292     unsigned long c,len,d,dlen;
293     if (aml != NULL) {
294     len = gl_length(aml);
295     /* for each descendant */
296     for ( c = 1; c <= len; c++) {
297     pathlist = (struct gl_list_t *)gl_fetch(aml,c);
298     dlen = gl_length(pathlist);
299     /* for each path in list of paths to descendant */
300     for ( d = 1; d <= dlen; d++) {
301     path = (struct gl_list_t *)gl_fetch(pathlist,d);
302     /* path is just a list of ints, so nothing to do to them */
303     gl_destroy(path);
304     }
305     gl_destroy(pathlist);
306     }
307     gl_destroy(aml);
308     }
309     }
310    
311     /*
312     * return an ip from the array of them we are managing
313     */
314     static
315     struct AnonMergeIP *AMGetIP(struct AnonMergeIPData *amipd)
316     {
317     struct AnonMergeIP *amip;
318     assert(amipd!=NULL);
319     if (amipd->iplen <= amipd->ipused) {
320     Asc_Panic(2,"AMGetIP","Too many ips requested");
321     gl_write_list(NULL,NULL); /* delete when done. forcing linker */
322     return NULL; /* not reached */
323     }
324     amip = &(amipd->ipbuf[amipd->ipused]);
325     (amipd->ipused)++;
326     return amip;
327     }
328    
329     /* Returns 1 if instance ch is of set, constant, array of set, or
330     * array of constant type. The mergedness of these cannot affect
331     * the meaning of models containing them, since the models' structures
332     * are dependent on values of them, not identity.
333     * Likewise returns 1 for relations, logical relations, whens,
334     * and arrays thereof since these always have 1 parent and couldn't
335     * be merged in any case. Likewise for dummy instance, which are UNIVERSAL.
336     */
337 johnpye 399 static
338 aw0a 1 int IsIgnorableInstance(struct Instance *ch)
339     {
340     assert(ch!=NULL);
341     if (InstanceKind(ch)&IERRINST) {
342     Asc_Panic(2,"IsIgnorableInstance","Called with bad instance");
343     return 0; /* not reached */
344     }
345     if (InstanceKind(ch) & (IFUND | ICONS | IDUMB | IWHEN | IRELN | ILRELN)) {
346     return 1;
347     }
348     if (IsArrayInstance(ch) ) {
349     struct TypeDescription *d;
350     d = GetArrayBaseType(InstanceTypeDesc(ch));
351     if (GetBaseType(d) & ERROR_KIND) {
352     Asc_Panic(2,"IsIgnorableInstance","Called with bad array instance");
353     return 0; /* not reached */
354     }
355     if (GetBaseType(d) & (EQN_KIND | CONSTANT_KIND | DUMB_KIND | SET_KIND)) {
356     return 1;
357     }
358     }
359     return 0;
360     }
361    
362     /*
363     * Collects and merges into a cheaply allocated numlist (not expandable)
364     * all the interesting descendant information from the children of i.
365     * i itself is in the subgraph of i if i is interesting.
366     * If nothing interesting is reachable, may return NULL.
367     *
368     * This function does not collect as 'reachable' constant and set
369     * instance types since the 'identity' or lack thereof does not
370     * change the semantics of these structural parameter instances.
371 johnpye 399 * We don't need to know about their mergedness.
372 aw0a 1 * Because we are numbering only the mergednodes we care about,
373 johnpye 399 * ignoring constants does not fill the maps with holes.
374 aw0a 1 *
375     * shared is the list described above. This function also collects
376     * subgraph, the list of both merged and compound nodes reachable
377     * from i.
378 johnpye 399 *
379 aw0a 1 * We handle each child of i once and double up the other operations.
380     */
381     static
382     void CollectSubgraphNumlist(struct Instance *i,
383     struct AnonMergeIPData *amipd,
384     struct AnonMergeIP *amip)
385     {
386     unsigned long c,len, tn;
387     struct Instance *ch;
388     struct AnonMergeIP *chamip;
389     Numlist_p shared; /* list of shared nodes in the dag */
390     Numlist_p subgraph; /* list of shared and compound nodes in dag */
391     ChildListPtr cl;
392    
393     NumpairClearList(amipd->enlp0);
394     NumpairClearList(amipd->senlp0);
395     gl_reset(amipd->nlpgl);
396     gl_reset(amipd->snlpgl);
397     len = NumberChildren(i);
398     cl = (InstanceKind(i) == MODEL_INST)?GetChildList(InstanceTypeDesc(i)):NULL;
399     for (c = 1; c <= len; c++) {
400     ch = InstanceChild(i,c);
401     if ( ch != NULL && (GetAnonFlags(ch) & AMIPFLAG) != 0 ) {
402     chamip = (struct AnonMergeIP *)GetInterfacePtr(ch);
403     /* collect child reachables and also their nums */
404     if (chamip != NULL) {
405     if (chamip->glp != NULL &&
406     (cl == NULL || ChildAliasing(cl,c)==0)) {
407     gl_append_ptr(chamip->glp,(void *)amip->ip_number);
408     }
409     if (chamip->shared != NULL) {
410     gl_append_ptr(amipd->nlpgl, chamip->shared);
411     }
412     tn = chamip->node_number;
413     if (tn != 0L) {
414     NumpairAppendList(amipd->enlp0, tn);
415     }
416    
417     if (chamip->subgraph != NULL) {
418     gl_append_ptr(amipd->snlpgl, chamip->subgraph);
419     }
420     tn = chamip->ip_number;
421     if (tn != 0L) {
422     NumpairAppendList(amipd->senlp0, tn);
423     }
424     }
425     }
426     }
427     if (amip->node_number != 0) {
428     NumpairAppendList(amipd->enlp0, amip->node_number);
429     }
430     if (amip->ip_number != 0) {
431     NumpairAppendList(amipd->senlp0, amip->ip_number);
432     }
433     gl_append_ptr(amipd->nlpgl, amipd->enlp0);
434     gl_append_ptr(amipd->snlpgl, amipd->senlp0);
435     shared = NumpairCombineLists(amipd->nlpgl, amipd->enlp1, amipd->enlp2);
436     subgraph = NumpairCombineLists(amipd->snlpgl, amipd->senlp1, amipd->senlp2);
437     NumpairClearList(amipd->enlp0);
438     NumpairClearList(amipd->senlp0);
439     amip->shared = shared;
440     amip->subgraph = subgraph;
441     }
442    
443     /* Grabs an ip from the buffer and inits/returns it.
444     * When used with pushIP, decorates all instances that are marked with
445 johnpye 399 * AMIPFLAG which should only be models/arrays and shared variables.
446 aw0a 1 * As an interesting side effect, we collect the list of mergednodes in the
447     * subgraph of i (in numlist_p form). Should be applied bottom-up.
448     * We also add i's number to the lists of parents in its children.
449     * By dfbu, parent numbers will be added in increasing order, possibly
450     * with the same parent appearing twice consecutively in the child's
451     * parent list. Logging numbers here allows us to avoid sorting the
452     * numbers later before creating a persistent numlistp.
453     * Actually, to reduce work, we subcontract numbering the children
454     * to CollectSubgraphNumlist.
455     */
456     static
457     struct AnonMergeIP *AnonMergeCreateIP(struct Instance *i,
458     struct AnonMergeIPData *amipd)
459     {
460     struct AnonMergeIP *amip;
461    
462     assert(i!=NULL);
463    
464     amip = NULL;
465    
466     if (GetAnonFlags(i) & AMIPFLAG) {
467    
468     amip = AMGetIP(amipd);
469     assert(amip!=NULL);
470     amip->amlist = NULL;
471     amip->subgraph = NULL;
472     amip->glp = gl_create(NumberParents(i)+2);
473     amip->parentslist = NULL;
474    
475 johnpye 399 amip->node_number = (int)GetTmpNum(i);
476 aw0a 1 amip->ip_number = amipd->ipused; /* array loc in ipbuf +1 */
477     gl_append_ptr(amip->glp,(void *)amip->ip_number);
478     assert(amip->node_number >= 0);
479     /* Node number is the dfbu numbering of shared instances,
480     * the dfbu mergenode number in tmpnum of i.
481     */
482     SetTmpNum(i,0L);
483    
484     CollectSubgraphNumlist(i, amipd, amip);
485     }
486     return amip;
487     }
488    
489     /* Hunts up node 'target' in the descendants of child 'start' of i.
490     * Resets content of scratch on entry.
491     * Fills path 'scratch' with childnumbers that can be followed to
492     * get to target. Scratch should not be NULL.
493     * Could return target instance for free if needed.
494     * This function cannot fail if the shared lists in the amip are
495     * correct.
496     * Scratch result should be copied.
497     */
498     static
499     void AnonMergeFindPath(struct Instance *i, int target,
500     unsigned long start, struct gl_list_t *scratch)
501     {
502     #define AMFPDEBUG 0
503     unsigned long c;
504 johnpye 89 struct Instance *ch = NULL; /* unnec init to avoid warning */
505 aw0a 1 struct AnonMergeIP *amip;
506     int notfound, notchild;
507     ChildListPtr cl;
508     #if AMFPDEBUG
509     int first = 1;
510     #endif
511 johnpye 89
512 aw0a 1 assert(scratch != NULL);
513     assert(InstanceChild(i,start) != NULL);
514    
515     /* start out knowing the start child, so we don't search much on 1st one */
516     gl_reset(scratch);
517     c = start;
518     notfound = 1;
519     /* ch will always hit InstanceChild assignment before being used
520     * when this function is called with a correct start value.
521     */
522     while (notfound) { /* entered at least once */
523     notchild = 1;
524     if (InstanceKind(i) == MODEL_INST) {
525     cl = GetChildList(InstanceTypeDesc(i));
526     } else {
527     cl = NULL;
528     }
529     while (notchild) { /* entered at least once */
530     /* must succeed on first trip if start is correct */
531     /* hunt for next child of i which has target as a merged descendant */
532     ch = InstanceChild(i,c);
533 johnpye 399 if (ch != NULL &&
534 aw0a 1 (GetAnonFlags(ch) & AMIPFLAG) != 0 &&
535     (cl == NULL || ChildAliasing(cl,c) == 0)) {
536     amip = (struct AnonMergeIP *)GetInterfacePtr(ch);
537     assert(amip != NULL);
538     if (NumpairNumberInList(amip->shared,(int)target)) { /*nnl*/
539     notchild = 0;
540     if (amip->node_number == target) {
541     notfound = 0;
542     }
543     gl_append_ptr(scratch,(void *)c);
544     }
545     } /* else child can't be it. go to next. */
546     #if AMFPDEBUG
547     if (first) {
548     first = 0;
549     if (notchild) {
550     FPRINTF(ASCERR,"AMFindPath missed on given child %lu\n",start);
551     }
552     }
553     #endif
554     c++;
555     }
556     /* descend to child found. hunt for next left to right. */
557     c = 1;
558     i = ch; /* so when at last located, i becomes target instance. */
559     }
560     return;
561     }
562    
563     static
564     void AnonWritePath(FILE *fp, struct Instance *i, struct gl_list_t *path)
565     {
566     #define AWPDB 0 /* debugging for this function */
567     struct Instance *root;
568 johnpye 399 unsigned long c,len, cn;
569 aw0a 1 #if AWPDB
570     int ip,im;
571     struct AnonMergeIP *amip;
572     #endif
573     struct InstanceName name;
574    
575     assert(i != NULL);
576     assert(path != NULL);
577    
578     len = gl_length(path);
579     for (c = 1; c <= len; c++) {
580     root = i;
581     cn = (unsigned long)gl_fetch(path,c);
582     i = InstanceChild(root,cn);
583     #if AWPDB
584     if (GetAnonFlags(i)&AMIPFLAG) {
585     amip = (struct AnonMergeIP *)GetInterfacePtr(i);
586     assert(amip != NULL);
587     ip = amip->ip_number;
588     im = amip->node_number;
589     } else {
590     im = ip = -1;
591     }
592     FPRINTF(fp,"(%d){%d}",ip,im);
593     #endif
594 johnpye 399 name = ChildName(root,cn);
595 aw0a 1 switch (InstanceNameType(name)){
596     case StrName:
597     if (c>1) {
598     PUTC('.',fp);
599     }
600     FPRINTF(fp,SCP(InstanceNameStr(name)));
601     break;
602     case IntArrayIndex:
603     FPRINTF(fp,"[%ld]",InstanceIntIndex(name));
604     break;
605     case StrArrayIndex:
606     FPRINTF(fp,"['%s']",SCP(InstanceStrIndex(name)));
607     break;
608     }
609     }
610     FPRINTF(fp,"\n");
611     }
612    
613     static
614     void WriteMList(FILE *fp, struct gl_list_t *aml, struct Instance *i)
615     {
616     struct gl_list_t *pathlist, *path;
617 johnpye 399 unsigned long c,len;
618     unsigned long d,dlen;
619 aw0a 1 unsigned long elen;
620    
621     if (aml !=NULL) {
622     len = gl_length(aml);
623     for (c = 1; c <= len; c++) { /* over each merge bush */
624     pathlist = (struct gl_list_t *)gl_fetch(aml,c);
625     assert(pathlist != NULL);
626     dlen = gl_length(pathlist);
627     assert(dlen != 0);
628     FPRINTF(fp,"%lu ALTERNATE PATHS to ",dlen);
629     path = (struct gl_list_t *)gl_fetch(pathlist,1);
630     assert(pathlist != NULL);
631     AnonWritePath(fp,i,path);
632     /* write the list of paths */
633     FPRINTF(fp," //\n");
634     for (d = 1; d <= dlen; d++) {
635     path = (struct gl_list_t *)gl_fetch(pathlist,d);
636     elen = gl_length(path);
637     AnonWritePath(fp,i,path);
638     }
639     }
640     }
641     }
642    
643    
644     #if AMSTAT
645     static
646     void AnonMergeLogIP(struct Instance *i, struct AnonMergeIPData *amipd)
647     {
648     struct AnonMergeIP *amip;
649     if (GetAnonFlags(i)&AMIPFLAG) {
650     amip = (struct AnonMergeIP *)GetInterfacePtr(i);
651     if (amip->amlist != NULL) {
652     FPRINTF(amipd->fp,"%lu, ", gl_length(amip->amlist));
653     WriteInstanceName(amipd->fp,i,NULL);
654     FPRINTF(amipd->fp,"\n");
655     WriteMList(amipd->fp,amip->amlist,i);
656     FPRINTF(amipd->fp,"\n");
657     }
658     }
659     }
660     #endif
661     /*
662     * clears lists in amip, if any.
663     */
664     static
665     void AnonMergeDestroyIP(struct Instance *i,
666     struct AnonMergeIP *amip,
667     struct AnonMergeIPData *amipd)
668     {
669     (void)i;
670     DestroyMList(amip->amlist);
671     #if AMSTAT
672     if (amip->ip_number !=0 || amip->node_number != 0) {
673     FPRINTF(amipd->fp,"ip# %d mn# %d ",amip->ip_number,amip->node_number);
674     WriteInstanceName(amipd->fp,i,NULL);
675     FPRINTF(amipd->fp,"\n");
676     }
677     #endif
678     amip->amlist = NULL;
679     if (amip->subgraph != NULL) {
680     NumpairDestroyList(amip->subgraph);
681     amip->subgraph = NULL;
682     }
683     if (amip->glp != NULL) {
684     gl_destroy(amip->glp);
685     amip->glp = NULL;
686     }
687     if (amip->parentslist != NULL) {
688     NumpairDestroyList(amip->parentslist);
689     amip->parentslist = NULL;
690     }
691     if (amip->shared != NULL) {
692     NumpairDestroyList(amip->shared);
693     amip->shared = NULL;
694     }
695     amipd->ipback++;
696     }
697    
698    
699     #define THASH 0
700     static
701     struct AnonMergeUniBucket *FindUniBucket(struct TypeDescription *d,
702     unsigned long indirected,
703     struct AnonMergeUniBucket **t)
704     {
705     struct AnonMergeUniBucket *result;
706     int index;
707     index = AMUHASH(SCP(GetName(d)));
708     result = t[index];
709     while (result != NULL &&
710     ( d != result->d ||
711     (indirected != LONG_MAX && indirected != result->indirected)
712     )
713     ) {
714     result = result->next;
715     }
716     #if THASH
717     FPRINTF(ASCERR,"FUB:\t%d\tind %lu\t%s\t%lu\n",
718     index,indirected,SCP(GetName(d)),(unsigned long)result);
719     #endif
720     return result;
721     }
722    
723     /*
724     * caller is responsible for setting b->i.
725     */
726     static
727     struct AnonMergeUniBucket *AddUniBucket(struct TypeDescription *d,
728     unsigned long indirected,
729     struct AnonMergeUniBucket **t)
730     {
731     struct AnonMergeUniBucket *b;
732     int index;
733     b = (struct AnonMergeUniBucket *)ascmalloc(sizeof(struct AnonMergeUniBucket));
734     if (b==NULL) {
735     return NULL;
736     }
737     index = AMUHASH(SCP(GetName(d)));
738     b->next = t[index];
739     t[index] = b;
740     b->d = d;
741     b->indirected = indirected;
742     return b;
743     }
744     /*
745     * Clears the table (free the buckets) and any bucket
746     * for implying only 1 instance, mark the instance as
747     * being universal and mark it as having 0 parent references
748     * because we don't care about its merges.
749     */
750     static
751     void AnonMergeMarkUniversals(struct AnonMergeVisitInfo *amvi)
752     {
753     int i;
754     struct AnonMergeUniBucket **t;
755     struct AnonMergeUniBucket *b;
756     t = amvi->t;
757     for (i = 0; i < AMUBUCKETS; i++) {
758     while (t[i] != NULL) {
759     b = t[i];
760     t[i] = b->next;
761     if (b->i != NULL) {
762     SetAnonFlags(b->i,AMUFLAG); /* this must also catch explicit univs */
763     SetTmpNum(b->i,0L);
764     #if (AMSTAT && 1)
765     WriteInstanceName(ASCERR,b->i,NULL);
766     FPRINTF(ASCERR," UNIVERSAL\n");
767     #endif
768     }
769     ascfree(b);
770     }
771     }
772     }
773    
774     /*
775     * AnonMergeCountTree(i,amvi);
776     * This must be applied bottom up.
777     * Sets the anonflags to 0 for all i.
778 johnpye 399 * Sets the tmpnum of i to zero.
779 aw0a 1 * Increment tmpnum in each child of compound i which is not an alias child of
780     * i or a constant type. Constants are fully accounted for in their individual
781     * anontype, so sharedness does not further affect instance semantics -- at
782     * least so far as equations are concerned. We deduct the array alii
783     * contributions from the MODEL level where we find the alias statement
784     * because it is hard to do it in the array scope.
785     *
786     * When visit done with this function, every nonconstant instance ends with
787     * tmpnum set to the number of parents it is connected to in a non-alias way.
788     *
789     * Sometimes and incorrectly:
790     * Counts total number of children, parents, extra parents, universals,
791     * parents of universals;
792     * These will be overestimates because the visitinstance
793     * functions do not ignore universals, even though we will.
794     */
795     void AnonMergeCountTree(struct Instance *i, struct AnonMergeVisitInfo *amvi)
796     {
797     unsigned long c,len;
798     struct AnonMergeUniBucket *b;
799     struct Instance *ch;
800     ChildListPtr cl;
801     #if AMSTAT
802     unsigned long tmp;
803     (void) tmp;
804     #endif
805    
806     SetAnonFlags(i,0);
807     SetTmpNum(i,0);
808    
809     /* we need to find if all instances are universal or at least of
810     * locally unique formal type. Inventory them here.
811     */
812     b = FindUniBucket(InstanceTypeDesc(i),InstanceIndirected(i),amvi->t);
813     if (b == NULL) {
814     b = AddUniBucket(InstanceTypeDesc(i),InstanceIndirected(i),amvi->t);
815     b->i = i; /* remember first, hopefully only, instance */
816     } else {
817     b->i = NULL; /* more than 1 instance. not universal */
818     }
819    
820     if (IsCompoundInstance(i)) {
821     /* Mark increment parent link count on interesting children. Elsewhere
822     * all compound instances and all the ones that happen to have multiple
823     * non-alias parents will get amips. All those with with multiple
824     * non-alias parents will eventually get tmpnums reassigned dfbu.
825     */
826     len = NumberChildren(i);
827     if (len > amvi->maxchildren) {
828     amvi->maxchildren = len;
829     }
830     if (InstanceKind(i) == MODEL_INST) {
831     cl = GetChildList(InstanceTypeDesc(i));
832     assert(cl!=NULL);
833     for (c = 1; c <= len; c++) {
834     if ((ch = InstanceChild(i,c)) != NULL && !IsIgnorableInstance(ch)) {
835     /*ignore const/set/reln/dummy/aryofsame ch*/
836     if ( !ChildAliasing(cl,c) ) {
837     /* record existence of a real link to ch */
838     IncrementTmpNum(ch);
839     } else {
840     /* ALIASES. array possibly. */
841     if (IsArrayInstance(ch)) {
842     /* Now, in the context of the MODEL, we can spot if a child is
843     * an alias array. If it is, then we want to decrement the
844     * reference counts on the leaves of the array, but not on the
845     * array_insts, unless those leaves happen to be other arrays.
846     * It may happen that there are no leaves if the array is
847 johnpye 399 * defined over a NULL set in some portion. So for example if
848 aw0a 1 * c[8][9] IS_A real; b[1..2][4..5] ALIASES c;
849     * then the instance b has 4 subscripts to the end user,
850     * but the 'leaves' of b which we don't want to
851     * count as parents of elements c[i] are b[i][j].
852     * Go NumberofDereferences(ch) and delete that contribution to
853     * the number of parents counts. Add this to arrayinst.c.
854     */
855     ArrayVisitLocalLeaves(ch,(AVProc)DecrementTmpNum);
856     }
857     }
858     }
859     }
860     } else {
861     assert(IsArrayInstance(i)); /* must be array since not MODEL */
862     for (c = 1; c <= len; c++) {
863     if ( (ch = InstanceChild(i,c)) != NULL) {
864     if (!IsIgnorableInstance(ch)) {
865     /* do not care about merges on arrays of constants/sets/relns
866     * arrays of same since the anontype includes value and
867     * structure doesn't matter if value is the same.
868     */
869     IncrementTmpNum(ch);
870     }
871     }
872     }
873     }
874     }
875     }
876    
877     /* Sets the tmpnum to mergenode dfbu if tmpnum currently > 1.
878     * Sets the tmpnum to 0 if tmpnum currently 1.
879     * Does nothing to tmpnum if tmpnum 0.
880     * Counts the instance toward the number of ips needed if it is
881     * compound or its final tmpnum is not 0.
882     * This must be called after universals are marked, else the universal
883     * marking may stop on the anonflags.
884     */
885     static
886     void AnonMergeLabelNodes(struct Instance *i, struct AnonMergeVisitInfo *amvi)
887     {
888     assert(GetTmpNum(i) <10000000);
889     switch (GetTmpNum(i)) {
890 johnpye 399 case 0:
891 aw0a 1 /* all ignorable stuff should fall in here */
892     break;
893     case 1:
894     assert(IsIgnorableInstance(i)==0);
895     /* we're not supposed to be marking ignorable instances tmpnums at all
896     * in the net.
897     */
898     SetTmpNum(i,0L);
899     break;
900     default:
901     /* more than 1 real parent */
902     (amvi->nim)++;
903     assert(IsIgnorableInstance(i)==0);
904     SetTmpNum(i,(unsigned long)amvi->nim);
905     assert(GetTmpNum(i) <100000000);
906     /* compound or not it gets an IP */
907     (amvi->nip)++;
908 johnpye 399 SetAnonFlags(i,(GetAnonFlags(i) | AMIPFLAG));
909 aw0a 1 return;
910     }
911     if (IsCompoundInstance(i)) {
912     /* nonmerged instance or uninteresting instance. check compoundness
913     * as even uninteresting, unmerged compound instances need a space
914     * to hang their subgraph numlist. universals included.
915     */
916 johnpye 399 SetAnonFlags(i,(GetAnonFlags(i) | AMIPFLAG));
917 aw0a 1 (amvi->nip)++;
918     }
919     }
920    
921     /*
922     * prints out the statistics of academic interest.
923     * does not compile (cannot) unless amstat == TRUE.
924     */
925     #if (AMSTAT)
926     static
927     void AMCWriteCounts(struct AnonMergeVisitInfo *amvi)
928     {
929     FPRINTF(ASCERR,"AnonMerge statistics:\n");
930     FPRINTF(ASCERR,"\tMerged Instances:\t%lu\n",amvi->nim);
931     FPRINTF(ASCERR,"\tIP Instances:\t%lu\n",amvi->nip);
932     }
933     #endif /* amstat */
934    
935     static
936     void AMVIInit(struct AnonMergeVisitInfo *amvi,struct Instance *root)
937     {
938     int i;
939     assert(amvi!=NULL);
940     assert(root!=NULL);
941     amvi->root = root;
942     amvi->nip = 0L;
943     amvi->nim = 0L;
944     amvi->maxchildren = 0L;
945     for (i = 0; i < AMUBUCKETS; i++) {
946     amvi->t[i] = NULL;
947     }
948     }
949    
950     #define SP_LEN 2
951     #if (SIZEOF_VOID_P == 8)
952     #define SP_WID 252
953     #else
954     #define SP_WID 504
955     #endif
956     /* retune spwid if the size of sgelt changes dramatically */
957     #define SP_ELT_SIZE (sizeof(struct sgelt))
958     #define SP_MORE_ELTS 1
959     /* Number of slots filled if more elements needed.
960     So if the pool grows, it grows by SP_MORE_ELTS*SP_WID elements at a time. */
961     #define SP_MORE_BARS 50
962     /* This is the number of pool bar slots to add during expansion.
963     not all the slots will be filled immediately. */
964    
965     static
966     void InitSgeltPool(struct mdata *md) {
967     if (md->pool != NULL ) {
968     Asc_Panic(2, NULL, "ERROR: InitSgeltPool called twice.\n");
969     }
970     md->pool =
971     pool_create_store(SP_LEN, SP_WID, SP_ELT_SIZE, SP_MORE_ELTS, SP_MORE_BARS);
972     if (md->pool == NULL) {
973     Asc_Panic(2, NULL, "ERROR: InitSgeltPool unable to allocate pool.\n");
974     }
975     }
976    
977     static
978     void DestroySgeltPool(struct mdata *md) {
979     if (md->pool==NULL) return;
980     pool_destroy_store(md->pool);
981     md->pool = NULL;
982     }
983    
984 johnpye 89 #if 0
985     /* UNUSED */
986 aw0a 1 static
987     void ReportSgeltPool(FILE *f,struct mdata *md)
988     {
989     if (md->pool==NULL) {
990     FPRINTF(f,"SgeltPool is empty\n");
991     }
992     FPRINTF(f,"SgeltPool ");
993     pool_print_store(f,md->pool,0);
994     }
995 johnpye 89 #endif
996 aw0a 1
997     /* create amipd and init all its bits */
998     static
999     struct AnonMergeIPData *AMIPDInit(struct AnonMergeVisitInfo *amvi)
1000     {
1001     size_t asize;
1002     struct AnonMergeIPData *amipd;
1003     int i,len;
1004     unsigned long j,dlen;
1005    
1006     amipd = (struct AnonMergeIPData *)ascmalloc(sizeof(struct AnonMergeIPData));
1007     if (amipd==NULL) {
1008     ascfree(amipd);
1009     Asc_Panic(2,"AMIPDInit","Insufficent memory for amipd.");
1010     return NULL;
1011     }
1012     amipd->root = amvi->root;
1013     amipd->iplen = amvi->nip;
1014     amipd->num_mergelists = 0;
1015     amipd->num_iwithmerge = 0;
1016 johnpye 708 amipd->node2ip = ASC_NEW_ARRAY(int,(amvi->nim+1));
1017 aw0a 1 if (amipd->node2ip == NULL) {
1018     Asc_Panic(2,"AMIPDInit","Insufficent memory for node2ip.");
1019     return NULL;
1020     }
1021     amipd->ipbuf = (struct AnonMergeIP *)
1022     ascmalloc(sizeof(struct AnonMergeIP)*amvi->nip);
1023     if (amipd->ipbuf==NULL) {
1024     Asc_Panic(2,"AMIPDInit","Insufficent memory for ipbuf.");
1025     return NULL;
1026     }
1027     amipd->ipused = 0;
1028     amipd->ipback = 0;
1029     amipd->compcount = 1;
1030     amipd->enlp0 = NumpairExpandableList(NULL,100);
1031     amipd->enlp1 = NumpairExpandableList(NULL,100);
1032     amipd->enlp2 = NumpairExpandableList(NULL,100);
1033     amipd->nlpgl = gl_create(100);
1034     if (amipd->enlp0 == NULL ||
1035     amipd->enlp1 == NULL ||
1036     amipd->enlp2 == NULL ||
1037     amipd->nlpgl == NULL) {
1038     Asc_Panic(2,"AMIPDInit","Insufficent memory for scratch nlps.");
1039     return NULL;
1040     }
1041     amipd->senlp0 = NumpairExpandableList(NULL,100);
1042     amipd->senlp1 = NumpairExpandableList(NULL,100);
1043     amipd->senlp2 = NumpairExpandableList(NULL,100);
1044     amipd->snlpgl = gl_create(100);
1045     if (amipd->senlp0 == NULL ||
1046     amipd->senlp1 == NULL ||
1047     amipd->senlp2 == NULL ||
1048     amipd->snlpgl == NULL) {
1049     Asc_Panic(2,"AMIPDInit","Insufficent memory for scratch snlps.");
1050     return NULL;
1051     }
1052     amipd->oldips = PushInterfacePtrs(amvi->root,(IPFunc)AnonMergeCreateIP,
1053     amvi->nip,1, (VOIDPTR)amipd);
1054 johnpye 399
1055 aw0a 1 asize = amvi->maxchildren; /* now some buffers of this size */
1056    
1057 johnpye 669 amipd->listhints = ASC_NEW_ARRAY(int,asize);
1058     amipd->graphs = ASC_NEW_ARRAY(Numlist_p,asize);
1059     amipd->mergegraphs = ASC_NEW_ARRAY(Numlist_p,asize);
1060 johnpye 399 amipd->graphchildnum =
1061 johnpye 669 ASC_NEW_ARRAY(unsigned long,asize);
1062     amipd->routes = ASC_NEW_ARRAY(Numlist_p,asize);
1063 johnpye 399 amipd->routechildnum =
1064 johnpye 669 ASC_NEW_ARRAY(unsigned long,asize);
1065 johnpye 399 amipd->finalchildnum =
1066 johnpye 669 ASC_NEW_ARRAY(unsigned long,asize);
1067 aw0a 1 /* totfinal, totroute, totgraph are all 'in use' counters, not
1068     * the size of allocation. amvi->maxchildren is the capacity.
1069     */
1070     if (amipd->graphs == NULL ||
1071     amipd->mergegraphs == NULL ||
1072     amipd->graphchildnum == NULL ||
1073     amipd->routes == NULL ||
1074     amipd->routechildnum == NULL ||
1075     amipd->finalchildnum == NULL) {
1076     Asc_Panic(2,"AMIPDInit","Insufficent memory for subgraph lists.");
1077     return NULL;
1078     }
1079     amipd->scratchpath = gl_create(100);
1080     amipd->scratchpathlist = gl_create(100);
1081     amipd->scratchamlist = gl_create(100);
1082     if (amipd->scratchpath == NULL ||
1083     amipd->scratchpathlist == NULL ||
1084     amipd->scratchamlist == NULL) {
1085     Asc_Panic(2,"AMIPDInit","Insufficent memory for scratch lists.");
1086     return NULL;
1087     }
1088     /* setup md */
1089     amipd->md.header = (struct sgelt **)
1090     ascmalloc(sizeof(struct sgelt *)*(amvi->nip+1));
1091     if (amipd->md.header == NULL) {
1092     Asc_Panic(2,"AMIPDInit","Insufficent memory for md.header.");
1093     return NULL;
1094     }
1095 johnpye 708 amipd->md.colcount = ASC_NEW_ARRAY(int,(amvi->nip+1));
1096 aw0a 1 if (amipd->md.colcount == NULL) {
1097     Asc_Panic(2,"AMIPDInit","Insufficent memory for md.colcount.");
1098     return NULL;
1099     }
1100 johnpye 708 amipd->md.sg2blob = ASC_NEW_ARRAY(int,(asize+1));
1101 aw0a 1 if (amipd->md.sg2blob == NULL) {
1102     Asc_Panic(2,"AMIPDInit","Insufficent memory for md.sg2blob.");
1103     return NULL;
1104     }
1105 johnpye 708 amipd->md.blobcollected = ASC_NEW_ARRAY(int,(asize+1));
1106 aw0a 1 if (amipd->md.blobcollected == NULL) {
1107     Asc_Panic(2,"AMIPDInit","Insufficent memory for md.blobcollected.");
1108     return NULL;
1109     }
1110     amipd->md.blob = (struct gl_list_t **)
1111     ascmalloc(sizeof(struct gl_list_t *)*(asize+1));
1112     if (amipd->md.blob == NULL) {
1113     Asc_Panic(2,"AMIPDInit","Insufficent memory for md.blob.");
1114     return NULL;
1115     }
1116 johnpye 89 for (i=0;i <= (int)asize;i++) {
1117 aw0a 1 /* as we'll never use most of these, create them small and
1118     * let them grow.
1119     */
1120     amipd->md.blob[i] = gl_create(2);
1121     if (amipd->md.blob[i] == NULL) {
1122     Asc_Panic(2,"AMIPDInit","Insufficent memory for md.blob data.");
1123     return NULL;
1124     }
1125     }
1126     amipd->md.maxchildren = asize;
1127     amipd->md.nextnode_ip = 0;
1128     amipd->md.sg = 0;
1129     amipd->md.pool = NULL;
1130     InitSgeltPool(&(amipd->md));
1131     /* */
1132     len = amipd->ipused;
1133     for (i=0; i < len; i++) {
1134     struct gl_list_t *gl;
1135     gl = amipd->ipbuf[i].glp;
1136     if (gl != NULL) {
1137     if ((dlen=gl_length(gl)) > 0) {
1138     NumpairClearList(amipd->enlp0);
1139     for (j=1; j <= dlen; j++) {
1140     NumpairAppendList(amipd->enlp0,(int)gl_fetch(gl,j));
1141     }
1142     amipd->ipbuf[i].parentslist = NumpairCopyList(amipd->enlp0);
1143     }
1144     amipd->ipbuf[i].glp = NULL;
1145     gl_destroy(gl);
1146     }
1147     }
1148     return amipd;
1149     }
1150     /* deallocate amipd */
1151     static
1152     void DestroyAMIPD(struct AnonMergeIPData *amipd) {
1153     int i;
1154     ascfree(amipd->ipbuf);
1155     ascfree(amipd->node2ip);
1156     ascfree(amipd->listhints);
1157     DestroySgeltPool(&(amipd->md));
1158     ascfree(amipd->md.header);
1159     ascfree(amipd->md.colcount);
1160     ascfree(amipd->md.sg2blob);
1161     ascfree(amipd->md.blobcollected);
1162     for (i=0; i <= amipd->md.maxchildren; i++) {
1163     gl_destroy(amipd->md.blob[i]);
1164     }
1165     ascfree(amipd->md.blob);
1166     gl_destroy(amipd->nlpgl);
1167     NumpairDestroyList(amipd->enlp0);
1168     NumpairDestroyList(amipd->enlp1);
1169     NumpairDestroyList(amipd->enlp2);
1170     gl_destroy(amipd->snlpgl);
1171     NumpairDestroyList(amipd->senlp0);
1172     NumpairDestroyList(amipd->senlp1);
1173     NumpairDestroyList(amipd->senlp2);
1174     ascfree(amipd->mergegraphs);
1175     ascfree(amipd->graphs);
1176     ascfree(amipd->routes);
1177     ascfree(amipd->graphchildnum);
1178     ascfree(amipd->routechildnum);
1179     ascfree(amipd->finalchildnum);
1180     gl_destroy(amipd->scratchpath);
1181     gl_destroy(amipd->scratchpathlist);
1182     gl_destroy(amipd->scratchamlist);
1183     ascfree(amipd);
1184     }
1185    
1186     #if CHECKN
1187     static
1188     void AMCheckN(struct Instance *i, struct AnonMergeIPData *amipd)
1189     {
1190     struct AnonMergeIP *amip;
1191     if ((GetAnonFlags(i) & AMIPFLAG) == 0) {
1192     return;
1193     } else {
1194     /* get amip for use in the rest of this function. */
1195     amip = (struct AnonMergeIP *)GetInterfacePtr(i);
1196     }
1197     if (amip->node_number > 0) {
1198     if (amip->shared != NULL && NumpairListLen(amip->shared)>0) {
1199     FPRINTF(amipd->fp,"snlist %d len=%d\n", amip->node_number,
1200     NumpairListLen(amip->shared));
1201     }
1202     }
1203     if (amip->ip_number > 0) {
1204     if (amip->subgraph != NULL && NumpairListLen(amip->subgraph)>0) {
1205     FPRINTF(amipd->fp,"iplist %d len=%d\n", amip->ip_number,
1206     NumpairListLen(amip->subgraph));
1207     }
1208     }
1209     }
1210     #endif
1211    
1212     static
1213     Numlist_p GetParentsList(int p, struct AnonMergeIPData *amipd)
1214     {
1215     assert(amipd != NULL);
1216     assert(p>0);
1217     assert (amipd->ipbuf[p-1].ip_number == p);
1218     return amipd->ipbuf[p-1].parentslist;
1219     }
1220    
1221     static
1222     void ZeroArrayEntry(int i, struct mdata *md)
1223     {
1224     md->header[i] = NULL;
1225     md->colcount[i] = 0;
1226     }
1227    
1228     /*
1229     * Move content of all blobs indicated in bl into the biggest.
1230     * renumbers sg2blob of smaller blobs to match bigblob.
1231     */
1232     static
1233     void MergeBlobs(struct mdata *md, int bigblob, struct gl_list_t *bl)
1234     {
1235     struct gl_list_t *keep, *old;
1236     unsigned long c,len, blc,bllen;
1237     int oldblob, oldsg;
1238     keep = md->blob[bigblob];
1239     for (blc=1,bllen = gl_length(bl); blc <= bllen; blc++) {
1240     oldblob = (int)gl_fetch(bl,blc);
1241     if (oldblob != bigblob) {
1242     old = md->blob[oldblob];
1243     for (c=1, len = gl_length(old); c <= len; c++) {
1244     oldsg = (int)gl_fetch(old,c);
1245 johnpye 399 #if 1
1246 aw0a 1 /* check the supposition that we are keeping lists nonoverlapping */
1247     assert(gl_ptr_search(keep,(VOIDPTR)oldsg,0)==0);
1248     #endif
1249     gl_append_ptr(keep,(VOIDPTR)oldsg); /* add to new blob */
1250     md->sg2blob[oldsg] = bigblob; /* reassign sg2blob lookup */
1251     }
1252     }
1253     }
1254     }
1255    
1256     #define ELTMALLOC (struct sgelt *)pool_get_element(md->pool)
1257     /* ELTFREE is not defined, as we use pool reset instead */
1258     #define ELTRESET(md) pool_clear_store((md)->pool)
1259     /*
1260     * Build a sparse matrix of sorts col indexed by parent ip
1261     * with 'rows' indexing by sg, the reduced child number of i.
1262     * there is no rowwise access to this because we don't need it.
1263     * elts must be pooled or we spend eternity in malloc.
1264     */
1265     static
1266     void add_matrix_incidence(int parent, struct mdata *md)
1267     {
1268     struct sgelt *elt;
1269     if (parent > md->nextnode_ip) {
1270     #if CFI
1271     FPRINTF(ASCERR,"mat row %d col %d\n",md->sg,parent);
1272     #endif
1273     elt = ELTMALLOC;
1274     elt->sg = md->sg;
1275     elt->next = md->header[parent];
1276     md->header[parent] = elt;
1277     md->colcount[parent]++;
1278 johnpye 399 } else {
1279 aw0a 1 #if (CFI && 0)
1280     FPRINTF(ASCERR,"ign row %d col %d\n",md->sg,parent);
1281     #endif
1282     }
1283     }
1284    
1285     /* can we move some of this into md? */
1286     static
1287 johnpye 399 void CalcFinalIndependentRoutes(struct mdata *md,
1288 aw0a 1 Numlist_p parentsini,
1289     Numlist_p CONST *routes,
1290     Numlist_p parentsinchild,
1291     struct gl_list_t *blobsincol,
1292     struct gl_list_t *newsgincol,
1293     unsigned long * CONST finalchildnum,
1294     unsigned long * CONST routechildnum,
1295     int * CONST totfinal,
1296     CONST int totroutes)
1297     {
1298     unsigned long newsg,blc,bllen;
1299     struct sgelt *elt;
1300     int blob;
1301     int bigblob;
1302     unsigned long bigsize;
1303     int p,sg,hint;
1304    
1305     /* init work space */
1306     NumpairListIterate(parentsini,(NPLFunc)ZeroArrayEntry,(VOIDPTR)md);
1307     /* collect connection matrix */
1308     for (sg = 0; sg < totroutes; sg++) {
1309     md->sg = sg;
1310     md->sg2blob[sg] = -1;
1311     md->blobcollected[sg] = 0;
1312     gl_reset(md->blob[sg]);
1313     NumpairCalcIntersection(parentsini,routes[sg],parentsinchild);
1314     NumpairListIterate(parentsinchild,(NPLFunc)add_matrix_incidence,
1315     (VOIDPTR)md);
1316     }
1317     /* process columns of matrix to group rows into overlapping blobs */
1318     /* while processing a column, blobcollected indicates if we've
1319     * already seen that blob in this column.
1320     */
1321     p = 0;
1322     hint = -1;
1323     while ( p = NumpairPrevNumber(parentsini,p, &hint),
1324     p > md->nextnode_ip) {
1325     #if CFI
1326     FPRINTF(ASCERR,"parent %d count %d\n",p,md->colcount[p]);
1327     #endif
1328     switch(md->colcount[p]) {
1329     case 0: /* target appears directly in child list of source */
1330     /* At least one element of sg2blob will stay -1.
1331     * We'll need that sg at the end as it is an independent path.
1332     */
1333     break;
1334     case 1:
1335     sg = md->header[p]->sg;
1336     if (md->sg2blob[sg] == -1) {
1337     /* new blob for sg, and new sg for new blob */
1338     md->sg2blob[sg] = sg;
1339     gl_append_ptr(md->blob[sg], (VOIDPTR)sg);
1340     }
1341     /* else this parent is already seen by some other
1342     * child of the source.
1343     */
1344     break;
1345     default:
1346     elt = md->header[p];
1347     if (elt != NULL) {
1348     /* must be at least a blob or an unblobbed sg in the column */
1349     bigblob = -1; /* sg of the biggest seen so far */
1350     bigsize = 0L;
1351     gl_reset(blobsincol); /* nonredundant list of blobs seen in col */
1352     /* each blob is a nonredundant list of sg in the blob */
1353     gl_reset(newsgincol); /* list of sg new in column not in any blob */
1354     /* Compute list of existing blobs in col. These must be
1355     * merged if there are more than 1.
1356     * Compute list of unblobbed (as yet) parentsini. If no
1357     * existing blobs, these form a new blob, else
1358     * we add these to the existing blobs.
1359     * With this collection method, we never need merge overlapping
1360     * blobs because we never create a blob that contains any member
1361     * of an existing blob. At the end of the surrounding loop, we
1362     * will have only nonoverlapping blobs and direct children (-1)
1363     * left in matrix.sg2blob. We want to keep the lowest sg of each
1364     * blob in sg2blob.
1365     */
1366     while (elt != NULL) {
1367     blob = md->sg2blob[elt->sg];
1368     if (blob == -1) {
1369     /* never seen this child of source */
1370     gl_append_ptr(newsgincol,(VOIDPTR)elt->sg);
1371     } else {
1372     if (md->blobcollected[blob] == 0) {
1373     /* never seen this blob, blob must be nonempty */
1374     assert(gl_length(md->blob[blob]) != 0);
1375     if (gl_length(md->blob[blob]) > bigsize) {
1376     /* track which of collected blobs is biggest */
1377     bigsize = gl_length(md->blob[blob]);
1378     bigblob = blob;
1379     }
1380     gl_append_ptr(blobsincol,(VOIDPTR)blob);
1381     md->blobcollected[blob] = 1;
1382 johnpye 399 }
1383 aw0a 1 /* else saw blob before. */
1384     }
1385     elt = elt->next;
1386     } /* endwhile */
1387     /* By construction, last element of newsgincol
1388     * is also the lowest sg in col because sg are inserted in
1389     * increasing order at header in the matrix assembly.
1390     */
1391 johnpye 399 if (gl_length(blobsincol) == 0L) {
1392 aw0a 1 assert(bigblob == -1);
1393     /* get list index of least sg to use as new blob */
1394 johnpye 399 bllen = (int)gl_length(newsgincol);
1395 aw0a 1 #if CFI
1396     FPRINTF(ASCERR,"No blobs. %lu newsg\n",bllen);
1397     #endif
1398 johnpye 399 assert(bllen > 0);
1399 aw0a 1 /* fails mysteriously when all sg in first blob.
1400     * Why is blobsincol empty? because someone left blobcollected 1.
1401     */
1402     /* new blob */
1403     blob = (int)gl_fetch(newsgincol,bllen); /* new blob number:last */
1404     assert(md->blob[blob] != NULL);
1405     assert(gl_length(md->blob[blob]) == 0);
1406     /* add sg to new blob and assign new blob to those sg */
1407     for (blc = 1; blc <= bllen; blc++) {
1408     newsg = (int)gl_fetch(newsgincol,blc);
1409     gl_append_ptr(md->blob[blob],(VOIDPTR)newsg);
1410     md->sg2blob[newsg] = blob;
1411     }
1412     } else {
1413     #if CFI
1414     FPRINTF(ASCERR,"blobs: %lu.\n",gl_length(blobsincol));
1415     #endif
1416     assert(bigblob >= 0);
1417     /* renumber smaller blobs in blobsincol to be members of biggest */
1418     MergeBlobs(md, bigblob, blobsincol);
1419     md->blobcollected[bigblob] = 0;
1420     /* blobcollected for smaller blobs will not be seen again.
1421     * reinit it for next column for the big blob
1422     */
1423     /* add new sg's to bigblob */
1424     for (blc = 1, bllen = gl_length(newsgincol); blc <= bllen; blc++) {
1425     newsg = (int)gl_fetch(newsgincol,blc);
1426     gl_append_ptr(md->blob[bigblob],(VOIDPTR)newsg);
1427     md->sg2blob[newsg] = bigblob;
1428     }
1429     }
1430     } /* end if elt ! NULL */
1431     break;
1432     } /* end switch */
1433     } /* end while */
1434     for (sg = 0; sg < totroutes; sg++) {
1435     md->blobcollected[sg] = 0;
1436     }
1437     /* note this process and the mergeblobs process both leave lots
1438     * of old data scattered in their wakes.
1439     */
1440     /* now blobcollected indicates we're keeping a route */
1441     for (sg = 0; sg < totroutes; sg++) {
1442     if (md->sg2blob[sg] == -1) {
1443     /* direct route. sg not in blob. collect it. */
1444     md->blobcollected[sg] = 1;
1445     finalchildnum[*totfinal] = routechildnum[sg];
1446     (*totfinal)++;
1447     } else {
1448     if (md->blobcollected[md->sg2blob[sg]] == 0) {
1449     /* sg is leftmost route through blob it is in.
1450 johnpye 399 * collect sg and mark blob collected.
1451 aw0a 1 */
1452     md->blobcollected[md->sg2blob[sg]] = 1;
1453     finalchildnum[*totfinal] = routechildnum[sg];
1454     (*totfinal)++;
1455     }
1456     }
1457     }
1458     ELTRESET(md);
1459     /* ok, totfinal and finalchildnum are what they need to be. */
1460     }
1461    
1462     /* This now called with visittree, not visitnametree, so no
1463     * path to i is known or needed.
1464     * All the amipd scratch spaces are assumed clean on entry,
1465     * so leave them clean on exit and clean them before the
1466     * first call to this occurs in a related series.
1467     */
1468     static
1469     void AnonMergeDetect(struct Instance *i,
1470     struct AnonMergeIPData *amipd)
1471     {
1472     struct Instance *ch; /* immediate children of i */
1473     struct AnonMergeIP *amip, *chamip;
1474     unsigned long nch,c;
1475     int len;
1476     int nextnode, nextnode_ip; /* index for descendant iteration */
1477     int nphint; /* clues for iterations */
1478     int thisnode; /* nodenumber for i */
1479     int sg; /* iteration */
1480     unsigned int aflags;
1481     Numlist_p mergednodes; /* interesting descendants of i */
1482     Numlist_p parents; /* interesting descendants of i */
1483 johnpye 399 struct gl_list_t *scratchpath, *scratchpathlist, *scratchamlist;
1484 aw0a 1 struct gl_list_t *path, *pathlist;
1485     ChildListPtr cl;
1486     /* probably want to make totroutes, totgraphs, totfinal local ints */
1487    
1488 johnpye 399 /* Here we take the subgraph map of merged nodes in i
1489 aw0a 1 * and try to find nonintersecting paths to each of the
1490 johnpye 399 * merged nodes starting from each of the immediate
1491 aw0a 1 * children of i. All compound or merged instances have a subgraph
1492     * map (or NULL if they have no merged descendants of
1493 johnpye 399 * interest) of mergednode numbers.
1494 aw0a 1 */
1495    
1496     aflags = GetAnonFlags(i);
1497     if ((aflags & AMIPFLAG) == 0 || /* ignore junk */
1498     (aflags & AMUFLAG) != 0 /* ignore UNIVERSALs */) {
1499     /* skip the maximally uninteresting nodes */
1500     return;
1501     } else {
1502     /* get amip for use in the rest of this function. */
1503     amip = (struct AnonMergeIP *)GetInterfacePtr(i);
1504     assert(amip!=NULL);
1505     /* for all nodes set up the translation to ip_number */
1506     amipd->node2ip[amip->node_number] = amip->ip_number;
1507     /* this number is not used in this function call, but in
1508     * later calls where this node is the merged descendant
1509     * being accounted for. node2ip[0] is obviously not meaningful.
1510     */
1511     }
1512    
1513     if (IsCompoundInstance(i) && (nch = NumberChildren(i)) > 1) {
1514     mergednodes = amip->shared;
1515     if (mergednodes == NULL || NumpairListLen(mergednodes) == 0) {
1516     /* no merged descendants to account for */
1517     return;
1518     }
1519     /* get this nodes number so we can ignore i while processing i */
1520     thisnode = amip->node_number;
1521     amipd->totgraphs = 0;
1522     #if (AMSTAT && 0)
1523     FPRINTF(ASCERR,"\nroot ip %d ",amip->ip_number);
1524     WriteInstanceName(ASCERR,i,NULL);
1525     FPRINTF(ASCERR,"\n");
1526     #if (AMSTAT && 0)
1527     FPRINTF(ASCERR,"descendants:\n");
1528     NLPWrite(NULL,amip->subgraph);
1529     #endif
1530     #endif
1531     if (InstanceKind(i)==MODEL_INST) {
1532     cl = GetChildList(InstanceTypeDesc(i));
1533     } else {
1534     cl = NULL;
1535     }
1536    
1537     for (c = 1; c <= nch; c++) {
1538     /* lots of children have no merged subgraphs. collect total set from
1539     * those that do and then iterate only over those children.
1540     * Also exclude alias children as they will be accounted in
1541     * the type definition.
1542     */
1543     ch = InstanceChild(i,c);
1544 johnpye 399 if (ch != NULL &&
1545 aw0a 1 (GetAnonFlags(ch)&AMIPFLAG) != 0 &&
1546     (cl == NULL || ChildAliasing(cl,c)==0)) {
1547     chamip = (struct AnonMergeIP *)GetInterfacePtr(ch);
1548     assert(chamip != NULL);
1549     if (chamip->shared != NULL) {
1550     amipd->graphs[amipd->totgraphs] = chamip->subgraph;
1551     amipd->mergegraphs[amipd->totgraphs] = chamip->shared;
1552     amipd->graphchildnum[amipd->totgraphs] = c; /* track origin */
1553     amipd->totgraphs++;
1554     #if (AMSTAT && 0)
1555 johnpye 399 if (chamip->subgraph != NULL &&
1556 aw0a 1 NumpairListLen(chamip->subgraph) > 0) {
1557     FPRINTF(ASCERR,"root ip %d, child number %lu, subgraph:\n",
1558     amip->ip_number,c);
1559     #if NLPWRITE
1560     NLPWrite(ASCERR, chamip->subgraph);
1561     FPRINTF(ASCERR,"shared\n");
1562     NLPWrite(ASCERR, chamip->shared);
1563     FPRINTF(ASCERR,"\n");
1564     #endif
1565     } /* endif */
1566     #endif /*amstat*/
1567     } /* endif */
1568     } /* endif */
1569     } /* endfor children i */
1570    
1571     if (amipd->totgraphs < 2) {
1572     /* one child has all the merged nodes, so we will never be able
1573     * to find two independent paths to any merged descendant. quit now.
1574 johnpye 399 */
1575 aw0a 1 amipd->totgraphs = 0;
1576     return;
1577     }
1578    
1579 johnpye 399 /* Foreach merged descendant of i, find merges recordable on i.
1580 aw0a 1 * There can never be more than n=totgraphs independent paths.
1581     * Process in descending node order,
1582     * though this is not required as what is recorded about one descendant
1583     * does not affect what is recorded about another.
1584     * Reverse order may make some set computations cheaper if we were
1585     * to highly optimize the processing.
1586     */
1587     scratchpath = amipd->scratchpath;
1588     scratchpathlist = amipd->scratchpathlist;
1589 johnpye 399 scratchamlist = amipd->scratchamlist;
1590 aw0a 1 gl_reset(scratchamlist);
1591     nphint = -1;
1592     len = amipd->totgraphs;
1593     for (sg = 0; sg < len; sg++) {
1594     /* With a little care, we could construct totgraphs full of -1
1595     * and always leave it in that state. The amount of assignments
1596     * would be the same, but the iteration would be skipped if we
1597     * could piggyback the iteration on some other iteration.
1598     */
1599     amipd->listhints[sg] = -1;
1600     }
1601     for (nextnode = NumpairPrevNumber(mergednodes,0,&nphint);
1602     nextnode > 0;
1603     nextnode = NumpairPrevNumber(mergednodes,nextnode,&nphint)) {
1604     amipd->totroutes = 0;
1605     /* find merges of i's descendants nextnode but not of i itself. */
1606     if (nextnode != thisnode) {
1607     len = amipd->totgraphs;
1608     /* filter out shared that don't have a route to nextnode in them */
1609     nextnode_ip = amipd->node2ip[nextnode];
1610     for (sg = 0; sg < len; sg++) {
1611     if (NumpairNumberInListHintedDecreasing(amipd->mergegraphs[sg],
1612     nextnode,
1613     &(amipd->listhints[sg]))
1614     ) {
1615     /* collect shared that contain a route to nextnode.
1616     * Because we can do this in a hinted fashion and skip
1617     * out on extra computing for many easy cases, we
1618     * do not combine it with the redundancy detection step which
1619     * costs a bit more.
1620     */
1621     amipd->routes[amipd->totroutes] = amipd->graphs[sg];
1622     amipd->routechildnum[amipd->totroutes] = amipd->graphchildnum[sg];
1623     amipd->totroutes++;
1624     }
1625     }
1626     if (amipd->totroutes < 2) {
1627     /* one child has all the routes, there are not
1628     * two independent paths to this merged descendant.
1629     * quit this descendant now.
1630 johnpye 399 */
1631 aw0a 1 continue; /* go on to nextnode */
1632     }
1633     #if (AMSTAT && 0)
1634     FPRINTF(ASCERR,"Routes to merge %d (ip %d): %d\n", nextnode,
1635     nextnode_ip, amipd->totroutes);
1636     #endif
1637     /* filter out redundant routes and redundant merges */
1638     /* amipd->enlp0 is a scratch enlp */
1639     NumpairClearList(amipd->enlp0);
1640    
1641     /* collect parents of nextnode that are in graph of i.
1642     * Do this by calculating the intersection of subgraph(i)
1643     * with parents(nextnode). Calculate the parents(nextnode)
1644     * list once in preprocessing.
1645     */
1646     parents = GetParentsList(nextnode_ip,amipd);
1647     assert(parents != NULL);
1648     #if (AMSTAT && 0)
1649     FPRINTF(ASCERR,"Parents of ip#%d\n",nextnode_ip);
1650     NLPWrite(NULL,parents);
1651     #endif
1652 johnpye 399 NumpairCalcIntersection(amip->subgraph, parents, amipd->enlp0);
1653 aw0a 1 amipd->totfinal = 0;
1654     #if CFI
1655     FPRINTF(ASCERR,"Checking merged ip# %d\n",nextnode_ip);
1656     #endif
1657     amipd->md.nextnode_ip = nextnode_ip;
1658     CalcFinalIndependentRoutes(
1659     &(amipd->md),
1660     amipd->enlp0,
1661     amipd->routes,
1662     amipd->enlp1,
1663     scratchpathlist,
1664     scratchpath,
1665     amipd->finalchildnum,
1666     amipd->routechildnum,
1667     &(amipd->totfinal),
1668     amipd->totroutes
1669     );
1670    
1671     if (amipd->totfinal < 2) {
1672     /* merges for nextnode are redundant in i. go to next */
1673     continue;
1674     }
1675    
1676     /*
1677     * Ok, so now we have a list of nonintersecting starting points
1678     * (direct children of i). Follow alphabetic path from each
1679     * to nextnode and record it. (Intersecting at i and nextnode,
1680     * but nowhere else in the intervening dag, is ok.)
1681     */
1682     gl_reset(scratchpathlist);
1683     #if (AMSTAT && 0)
1684     FPRINTF(ASCERR,"ip_node %d: merge %d (ip %d) in subgraph %d"
1685     " independent ways.\n",
1686     amip->ip_number,nextnode,nextnode_ip,amipd->totfinal);
1687     #endif /* amstat */
1688     for (sg = 0; sg < amipd->totfinal; sg++) {
1689     gl_reset(scratchpath);
1690     #if (AMSTAT && 0)
1691     FPRINTF(ASCERR,"Searching in child %lu.\n",amipd->finalchildnum[sg]);
1692     #endif /* amstat */
1693     AnonMergeFindPath(i,nextnode,amipd->finalchildnum[sg],scratchpath);
1694     /* save path found to pathlist */
1695     path = gl_copy(scratchpath);
1696     gl_append_ptr(scratchpathlist,path);
1697     }
1698     /* save pathlist to mergelist of i */
1699     pathlist = gl_copy(scratchpathlist);
1700     gl_append_ptr(scratchamlist,pathlist);
1701     }
1702     } /* endfor nextnode */
1703     /* save merges for this node i to IP */
1704     assert(amip->amlist == NULL);
1705     if (gl_length(scratchamlist) > 0) {
1706     amip->amlist = gl_copy(scratchamlist);
1707     amipd->num_iwithmerge++;
1708     amipd->num_mergelists += (int)gl_length(amip->amlist);
1709     } else {
1710     amip->amlist = NULL;
1711     }
1712     } /* done with this compound instance */
1713     }
1714    
1715     /* public stuff */
1716     /*
1717     * sanity checker variable.
1718     */
1719     static int g_ammarking = 0;
1720     /*
1721     * vp = Asc_AnonMergeMarkIP(root);
1722     * VOIDPTR vp;
1723     * This function finds merged instances, anon or otherwise, and
1724     * records the merges at the scope most appropriate.
1725     * On return, all InterfacePointers in the tree of root are either
1726     * NULL or point to some data we understand.
1727     * When done using these IPs, the caller should call
1728 johnpye 399 * AnonMergeDestroyIPs(vp);
1729 aw0a 1 * This function uses the push/pop protocol for ips, so ip data
1730     * of other clients may be lost if Unmark is not called properly.
1731     * Generally, both functions should be called from the same scope.
1732     *
1733     * ! ! Assumes that tmpnums are all 0 on entry, and leaves any
1734     * it has touched 0 on exit.
1735     *
1736 johnpye 399 * Does not record recursive merges, i.e. if a,b ARE_THE_SAME,
1737 aw0a 1 * don't record a.i,b.i ARE_THE_SAME. This is simple to detect
1738     * as a.i,b.i have the same childnumber/instanceparent pair.
1739     */
1740     VOIDPTR Asc_AnonMergeMarkIPs(struct Instance *root)
1741     {
1742     struct AnonMergeVisitInfo amvi; /* no pointers inside, so no malloc/free*/
1743     struct AnonMergeIPData *amipd;
1744    
1745     g_ammarking = 1;
1746     /* init before estimate of ips needed */
1747     AMVIInit(&amvi,root);
1748    
1749     /* zero anon flags (universal), mark number of
1750     * real parents to children in tmpnums, and census the formal types
1751     * for detecting anonymously universal instances.
1752     */
1753     #if (AMSTAT || AMBKDN)
1754     FPRINTF(ASCERR,"%ld init amvi done\n",clock());
1755     #endif
1756     SilentVisitInstanceTreeTwo(root,(VisitTwoProc)AnonMergeCountTree,1,0,&amvi);
1757    
1758     #if (AMSTAT || AMBKDN)
1759     FPRINTF(ASCERR,"%ld counttree done\n",clock());
1760     #endif
1761     /* mark the instances which have only one instance as 'locally' universal
1762     * in anonflags and set tmpnum to 0 for same.
1763     */
1764     AnonMergeMarkUniversals(&amvi);
1765     #if (AMSTAT || AMBKDN)
1766     FPRINTF(ASCERR,"%ld mark universals done\n",clock());
1767     #endif
1768    
1769     /* Set the tmpnum of anything with a 1 tmpnum(1 parent) to 0 tmpnum.
1770     * Set the tmpnums of anything with > 1 parent to the merged node
1771     * number. note that this forces us to use tmpnum instead of amip->nodenum
1772     * since there is amip are on any compound instance and any shared instance.
1773     * amvi.nip is number of ips we'll need and amvi.nim is the number of
1774     * shared instances for which we need to account the merges.
1775     */
1776     SilentVisitInstanceTreeTwo(root,(VisitTwoProc)AnonMergeLabelNodes,
1777     1,0,&amvi);
1778     /* Nodes are tmpnumbered 1..mergednodes for nodes of interest, not all
1779     * instances. If we were cpu fascists, we would merge this step with the
1780     * next one quite carefully.
1781     */
1782     #if (AMSTAT || AMBKDN)
1783     FPRINTF(ASCERR,"%ld label nodes done\n",clock());
1784     #endif
1785    
1786     #if (AMSTAT)
1787     AMCWriteCounts(&amvi);
1788     #endif /*amstat*/
1789    
1790     /* create AMIPs and other temporary memory */
1791     amipd = AMIPDInit(&amvi);
1792     if (amipd == NULL) {
1793     return NULL;
1794     }
1795    
1796     #if (AMSTAT || AMBKDN)
1797     FPRINTF(ASCERR,"%ld amipdinit done\n",clock());
1798     #endif
1799    
1800     #if (CHECKN)
1801     /* check the numbering for continuity */
1802     amipd->fp = fopen("/tmp/nodes","w+");
1803     SilentVisitInstanceTreeTwo(root,(VisitTwoProc)AMCheckN,1,0,amipd);
1804     fclose(amipd->fp);
1805     #endif
1806     /* do the magic bottom up. */
1807     SilentVisitInstanceTreeTwo(root,(VisitTwoProc)AnonMergeDetect,1,0,amipd);
1808 johnpye 399
1809 aw0a 1 #if (AMSTAT || AMBKDN)
1810     FPRINTF(ASCERR,"%ld mergedetect done\n\n",clock());
1811     #endif
1812     #if AMBKDN
1813     FPRINTF(ASCERR,"Instances with merges %d\n",amipd->num_iwithmerge);
1814     FPRINTF(ASCERR,"N-way merges recorded %d\n",amipd->num_mergelists);
1815     FPRINTF(ASCERR,"Merged instances %ld\n",amvi.nim);
1816     FPRINTF(ASCERR,"Instances expecting IP %ld\n",amvi.nip);
1817     FPRINTF(ASCERR,"IPs used %d\n",amipd->ipused);
1818     #endif
1819     return (VOIDPTR)amipd;
1820     }
1821    
1822     /*
1823     * return the comparison of merge lists from two distinct instances
1824     * first check for same instance (aml's equal), then
1825     * we must compare until a difference found.
1826     * This function must yield a partial order, ie a<b && b<c ==> a < c.
1827     * We compare by at 4 levels, searching for the first difference:
1828     * 1) length of aml (number of merged instances in i)
1829     * 2) lengths of lists in aml (number of names for each merged descendant)
1830     * 3) lengths of paths in each pathlist.
1831     * 4) content of each pair of paths.
1832     */
1833     static
1834     int AnonMergeCmpMLists(CONST struct gl_list_t *aml1,
1835     CONST struct gl_list_t *aml2)
1836     {
1837     unsigned long len1, len2, len3, len4, pl, p;
1838     struct gl_list_t *pathlist1, *pathlist2, *path1, *path2;
1839     int cmp = 0;
1840    
1841     if (aml1==aml2) {
1842     /* includes NULL == NULL case. */
1843 johnpye 399 return 0;
1844 aw0a 1 }
1845     if (aml1 == NULL) {
1846     return -1;
1847     }
1848     if (aml2 == NULL) {
1849     return 1;
1850     }
1851     assert(aml1!=NULL);
1852     assert(aml2!=NULL);
1853     /* check for same number of merged descendants */
1854     len1 = gl_length(aml1);
1855     len2 = gl_length(aml2);
1856     if (len2 != len1) {
1857     return (len2 > len1) ? -1 : 1; /* longer is >, right? */
1858     }
1859     /* foreach descendant, check same number of paths found */
1860     for (pl = 1; pl <= len1; pl++) {
1861     pathlist1 = (struct gl_list_t *)gl_fetch(aml1,pl);
1862     pathlist2 = (struct gl_list_t *)gl_fetch(aml2,pl);
1863     assert(pathlist1!=NULL);
1864     assert(pathlist2!=NULL);
1865     len2 = gl_length(pathlist1);
1866     len3 = gl_length(pathlist2);
1867     if (len3 != len2) {
1868     return (len3 > len2) ? -1 : 1; /* longer is >, right? */
1869     }
1870     }
1871     /* for each descendant, check that paths of same length were found */
1872     for (pl = 1; pl <= len1; pl++) {
1873     pathlist1 = (struct gl_list_t *)gl_fetch(aml1,pl);
1874     pathlist2 = (struct gl_list_t *)gl_fetch(aml2,pl);
1875     len2 = gl_length(pathlist1);
1876     for (p = 1; p <= len2; p++) {
1877     path1 = (struct gl_list_t *)gl_fetch(pathlist1,p);
1878     path2 = (struct gl_list_t *)gl_fetch(pathlist2,p);
1879     assert(path1!=NULL);
1880     assert(path2!=NULL);
1881     len3 = gl_length(path1);
1882     len4 = gl_length(path2);
1883     if (len4 != len3) {
1884     return (len4 > len3) ? -1 : 1; /* longer is >, right? */
1885     }
1886     }
1887     }
1888     /* for each descendant, check that paths of same content were found */
1889     for (pl = 1; pl <= len1; pl++) {
1890     pathlist1 = (struct gl_list_t *)gl_fetch(aml1,pl);
1891     pathlist2 = (struct gl_list_t *)gl_fetch(aml2,pl);
1892     len2 = gl_length(pathlist1);
1893     for (p = 1; p <= len2; p++) {
1894     path1 = (struct gl_list_t *)gl_fetch(pathlist1,p);
1895     path2 = (struct gl_list_t *)gl_fetch(pathlist2,p);
1896     cmp = gl_compare_ptrs(path1,path2);
1897     if (cmp != 0) {
1898     return cmp;
1899     }
1900     }
1901     }
1902    
1903     return 0;
1904     }
1905    
1906     /*
1907     * Returns the comparison of the merge information stored in two
1908     * instances, which must of the same formal type and have
1909     * children of the same anonymous types. It doesn't make
1910     * sense to call the function on ATOM-like instances, since they
1911     * can have no deeper merged structures.
1912     * UNIVERSAL instances will always return 0. (Think about it.)
1913     * Objects which are part of UNIVERSAL instances or somehow
1914     * not supposed to have a list of merges will always return 2,
1915     * even if compared to themselves!
1916     * If the comparison is valid, it will return 0,1,-1.
1917     * instances without lists shold be < instances with lists.
1918     */
1919     int Asc_AnonMergeCmpInstances(CONST struct Instance *i1,
1920     CONST struct Instance *i2)
1921     {
1922     struct AnonMergeIP *amip1, *amip2;
1923     int cmp, flags1, flags2;
1924     assert(i1!=NULL);
1925     assert(i2!=NULL);
1926     assert(InstanceTypeDesc(i1) == InstanceTypeDesc(i2));
1927     assert(i1 != i2);
1928     flags1 = GetAnonFlags(i1);
1929     if (flags1 == (AMUFLAG | AMIPFLAG)) {
1930     return 0;
1931     }
1932     flags2 = GetAnonFlags(i2);
1933     if ((flags1 & AMIPFLAG) == 0 || (flags2 & AMIPFLAG) == 0 ) {
1934     return 2;
1935     }
1936     amip1 = (struct AnonMergeIP *)GetInterfacePtr(i1);
1937     amip2 = (struct AnonMergeIP *)GetInterfacePtr(i2);
1938     if (amip1 == NULL || amip2 == NULL) {
1939     return 2;
1940     }
1941     cmp = AnonMergeCmpMLists(amip1->amlist, amip2->amlist);
1942     return cmp;
1943     }
1944    
1945     /*
1946     * frees data structures returned by AnonMergeMarkIPs.
1947     */
1948     void Asc_AnonMergeUnmarkIPs(VOIDPTR vp)
1949     {
1950     struct AnonMergeIPData *amipd;
1951     if (g_ammarking == 0) {
1952     Asc_Panic(2,"Asc_AnonMergeUnmarkIP","Called without marks current");
1953     }
1954     amipd = (struct AnonMergeIPData *)vp;
1955     #if AMSTAT
1956     amipd->fp = fopen("/tmp/amipd","w+");
1957     SilentVisitInstanceTreeTwo(amipd->root,(VisitTwoProc)AnonMergeLogIP,
1958     1,0,amipd);
1959     FPRINTF(amipd->fp,"\n######\n");
1960     #endif
1961     PopInterfacePtrs(amipd->oldips,
1962     (IPDeleteFunc)AnonMergeDestroyIP,
1963     (VOIDPTR)amipd);
1964     assert(amipd->ipused==amipd->ipback);
1965     #if AMSTAT
1966     fclose(amipd->fp);
1967     #endif
1968     DestroyAMIPD(amipd);
1969     g_ammarking = 0;
1970     }
1971    
1972     void Asc_AnonMergeWriteList(FILE *fp, struct Instance *i)
1973     {
1974     struct AnonMergeIP *amip;
1975     if (g_ammarking == 0) {
1976     Asc_Panic(2,"Asc_AnonMergeWriteList","Called without marks current");
1977     }
1978     assert(i!= NULL);
1979     if (IsCompoundInstance(i)==0) {
1980     FPRINTF(fp,"NONE\n");
1981     return;
1982     }
1983     if (GetAnonFlags(i)&AMIPFLAG) {
1984     amip = (struct AnonMergeIP *)GetInterfacePtr(i);
1985     if (amip->amlist != NULL) {
1986     WriteMList(fp, amip->amlist, i);
1987     }
1988     }
1989     }
1990    
1991    

john.pye@anu.edu.au
ViewVC Help
Powered by ViewVC 1.1.22