/[ascend]/trunk/ascend/general/list.c
ViewVC logotype

Contents of /trunk/ascend/general/list.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2697 - (show annotations) (download) (as text)
Thu Mar 7 00:51:07 2013 UTC (9 years, 4 months ago) by jpye
File MIME type: text/x-csrc
File size: 34206 byte(s)
reformatting only
1 /* ASCEND modelling environment
2 Copyright (C) 1990, 1993, 1994 Thomas Guthrie Epperly
3 Copyright (C) 2006 Carnegie Mellon University
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *//* @file
18 Implementation of List Module
19 *//*
20 Tom Epperly
21 Last in CVS $Revision: 1.3 $ $Date: 1998/01/06 12:06:55 $ $Author: ballan $
22 */
23
24 #include <stdarg.h>
25 #include "platform.h"
26 #include <ascend/utilities/error.h>
27 #include "panic.h"
28 #include "ascMalloc.h"
29 #include "list.h"
30 #if LISTUSESPOOL
31 #include "pool.h"
32 #endif
33 #include "mathmacros.h"
34
35 #define address(a,b) (((unsigned long)(a) + ((b)*sizeof(VOIDPTR))-sizeof(VOIDPTR)))
36
37 #define GL_LENGTH(l) ((l)->length)
38 #define LOWCAPACITY 2
39 /* Shortest list we will allow. It must not be 0! */
40 static const unsigned long MIN_INCREMENT = 8;
41 /* Minimum number of elements to increase when expanding list. */
42
43 /*
44 * Be a bit more informative when dying on a list range error.
45 * No printf we know of returns -2, so the print will always be
46 * just before assertion acts to handle the range error.
47 */
48 #define ASSERTRANGE(list,pos,n) \
49 asc_assert((pos >= 1) && (pos <= GL_LENGTH(list)));
50
51 #ifndef MOD_REALLOC
52 #define REALLOCDEBUG FALSE
53 #else
54 #define REALLOCDEBUG TRUE
55 #endif
56 /* if REALLOCDEBUG TRUE, then we use our own pricey version of realloc
57 * instead of the standard library version.
58 * This is to test for a bug in purify/os interactions.
59 */
60 /*
61 * We have modified gl_destroy to recycle lists of up to a certain size
62 * MAXRECYCLESIZE.
63 * The data structure for holding the recycled lists is a vector
64 * of pointers to gl_list_t. Item 1 in a recycled list is the pointer
65 * to the next recycled list, which may be NULL. This is a change
66 * from Tom's initial implementation which used a dense array to store
67 * pointers to lists.
68 *
69 * The number of recycled lists of a particular
70 * capacity i to be allowed is found in the AllowedContents[i].
71 *
72 * It is intended that RecycledContents[i] is the current count of
73 * lists of capacity i. It is a coincidence of implementation that
74 * when RecycledContents[i] <=0 RecycledList[i]==NULL.
75 *
76 * If LISTRECYCLERDEBUG,
77 * HighWaterMark[i] is the most of capacity i ever recycled.
78 * ListsDestroyed[i] is likewise lists not recycled because full.
79 */
80 #define LISTRECYCLERDEBUG 0
81 #define MAXRECYCLESIZE 500 /* the maximum list size to be recycled */
82 #define MAXRECYCLESMALLITEMS 300/* the maximum number of size < LARGEITEM */
83 #define MAXRECYCLELARGEITEMS 25 /* the maximum number of each large size */
84 #define MAXRECYCLESEARCH 2 /* how much larger a list will be searched for */
85 /* search performance. 0 and 2 are best values, 2 slightly better. alphaosf2 */
86 #define LARGEITEM 21 /* smallest large item */
87 /* note: baa, 3/8/96 LARGEITEM and MAXRECYCLELARGEITEMS not in use yet */
88 static struct gl_list_t *RecycledList[MAXRECYCLESIZE+1];
89 static int RecycledContents[MAXRECYCLESIZE+1];
90 static int AllowedContents[MAXRECYCLESIZE+1];
91 /*
92 * It is assumed that these arrays are initialized to zero according to the
93 * rules given in "The C Programming Language", Brian W. Kernighan and Dennis
94 * M. Richie, second edition.
95 */
96 #if LISTRECYCLERDEBUG
97 static int HighWaterMark[MAXRECYCLESIZE+1];
98 /* most ever in recycle list of size i */
99 static int ListsDestroyed[MAXRECYCLESIZE+1];
100 /* number of lists destroyed because Allowed was exceeded of size i */
101
102 static int ListsDestroyedTotal = 0;
103 /* number of lists destroyed because Allowed was exceeded all sizes */
104 static int NewListsUsed = 0;
105 /* calls to gl_create that required malloc */
106 static int RecycledListsUsed = 0;
107 /* calls to gl_create that used recycle */
108 #endif
109
110 /* If this function is not called, no recycling will take place
111 * because all the Alloweds will be at default of 0.
112 * This initializes the AllowedContents array, and nothing else.
113 * General limits are set based on MAXRECYCLE*ITEMS.
114 * Specific ones are done after and may be tuned.
115 */
116 void gl_init(void)
117 {
118 int i;
119 if (AllowedContents[0] &&
120 AllowedContents[0] != MAXRECYCLESMALLITEMS) {
121 ERROR_REPORTER_HERE(ASC_PROG_ERR,"gl_init recalled after data corrupted!\n");
122 return;
123 /* somebody called us twice, with intervening nonsense. punt */
124 }
125 for (i = LOWCAPACITY; i < LARGEITEM; i++) {
126 AllowedContents[i] = MAXRECYCLESMALLITEMS;
127 }
128 for (i = LARGEITEM; i <= MAXRECYCLESIZE; i++) {
129 AllowedContents[i] = MAXRECYCLELARGEITEMS;
130 }
131 #define LARGESYSTEM 1
132 #if LARGESYSTEM
133 AllowedContents[2] = 40000;
134 AllowedContents[3] = 20000;
135 AllowedContents[4] = 8000;
136 AllowedContents[5] = 2000;
137 AllowedContents[6] = 2000;
138 AllowedContents[7] = 5000;
139 AllowedContents[8] = 1000;
140 #else
141 AllowedContents[2] = 10000;
142 AllowedContents[3] = 5000;
143 AllowedContents[4] = 5000;
144 AllowedContents[7] = 5000;
145 #endif
146 AllowedContents[23] = 50;
147 }
148
149 #if LISTUSESPOOL
150 static pool_store_t g_list_head_pool = NULL;
151 /* global for our memory manager */
152 #define POOL_ALLOCHEAD (struct gl_list_t *)(pool_get_element(g_list_head_pool))
153 /* get a token. Token is the size of the struct struct gl_list_t */
154 #define POOL_FREEHEAD(p) (pool_free_element(g_list_head_pool,((void *)p)))
155 /* return a struct gl_list_t */
156
157 #define GLP_LEN 10
158 #ifdef __alpha
159 #define GLP_WID 72
160 #else
161 #define GLP_WID 127
162 #endif
163 /* retune rpwid if the size of pending_t changes dramatically */
164 #define GLP_ELT_SIZE (sizeof(struct gl_list_t))
165 #define GLP_MORE_ELTS 10
166 /*
167 * Number of slots filled if more elements needed.
168 * So if the pool grows, it grows by GLP_MORE_ELTS*GLP_WID elements at a time.
169 */
170 #define GLP_MORE_BARS 500
171 /* This is the number of pool bar slots to add during expansion.
172 not all the slots will be filled immediately. */
173 #else
174
175 /* we default back to malloc/free if LISTUSESPOOL == FALSE */
176 #define POOL_ALLOCHEAD ((struct gl_list_t*)ascmalloc(sizeof(struct gl_list_t)))
177 #define POOL_FREEHEAD(p) ascfree((char *)(p))
178
179 #endif
180
181
182 /* This function is called at compiler startup time and destroy at shutdown. */
183 void gl_init_pool(void){
184 #if LISTUSESPOOL
185 if (g_list_head_pool != NULL) {
186 ASC_PANIC("ERROR: gl_init_pool called twice.\n");
187 }
188 g_list_head_pool = pool_create_store(GLP_LEN, GLP_WID, GLP_ELT_SIZE,
189 GLP_MORE_ELTS, GLP_MORE_BARS);
190 if (g_list_head_pool == NULL) {
191 ASC_PANIC("ERROR: gl_init_pool unable to allocate pool.\n");
192 }
193 #else
194 ERROR_REPORTER_HERE(ASC_PROG_ERR,"list.[ch] built without pooling of overheads\n");
195 #endif
196 }
197
198
199 void gl_destroy_pool(void){
200 #if LISTUSESPOOL
201 if (g_list_head_pool==NULL) return;
202 gl_emptyrecycler(); /* deallocate data in recycled lists, zero RecycledContents[] */
203 pool_clear_store(g_list_head_pool);
204 pool_destroy_store(g_list_head_pool);
205 g_list_head_pool = NULL;
206 #else
207 ERROR_REPORTER_HERE(ASC_PROG_ERR,"list.[ch] built without pooling of overheads\n");
208 #endif
209 }
210
211
212 int gl_pool_initialized(void){
213 #if LISTUSESPOOL
214 return (g_list_head_pool == NULL) ? FALSE : TRUE;
215 #else
216 ERROR_REPORTER_HERE(ASC_PROG_ERR,"list.[ch] built without pooling of overheads\n");
217 return TRUE;
218 #endif
219 }
220
221
222 void gl_report_pool(FILE *f){
223 #if LISTUSESPOOL
224 if (g_list_head_pool==NULL)
225 FPRINTF(f,"ListHeadPool is empty\n");
226 FPRINTF(f,"ListHeadPool ");
227 pool_print_store(f,g_list_head_pool,0);
228 #else
229 FPRINTF(f,"list.[ch] built without pooling of overheads\n");
230 #endif
231 }
232
233
234 /*
235 * Guess of the capacity of the list. If the
236 * list capacity needs to be expanded it will
237 * be. It is good to be close though
238 */
239 struct gl_list_t *gl_create(unsigned long int capacity){
240 struct gl_list_t *new;
241 unsigned long i;
242 i = capacity = MAX((unsigned long)LOWCAPACITY,capacity);
243 while(i <= capacity+MAXRECYCLESEARCH &&
244 i <= MAXRECYCLESIZE){
245 asc_assert(RecycledContents[i] >= 0 &&
246 RecycledContents[i] <= AllowedContents[i]);
247 if (RecycledContents[i] > 0){
248 #if LISTRECYCLERDEBUG
249 RecycledListsUsed++;
250 #endif
251 new = RecycledList[i]; /* fetch last recycled list */
252 asc_assert(new != NULL);
253 RecycledList[i] = (struct gl_list_t *)new->data[0]; /* point to next */
254 --RecycledContents[i]; /* reduce list count for this size */
255 new->length = 0;
256 new->flags = (unsigned int)(gsf_SORTED | gsf_EXPANDABLE);
257 #ifndef NDEBUG
258 if (i>0) new->data[0]=NULL;
259 #endif
260 return new;
261 }
262 i++;
263 }
264 /*FPRINTF(ASCERR,"ABOUT TO ALLOCHEAD\n");*/
265 new=POOL_ALLOCHEAD;
266 /*FPRINTF(ASCERR,"GL_CREATE: COULDN'T ALLOC SIZE %d, new = %p\n",sizeof(struct gl_list_t),POOL_ALLOCHEAD);*/
267 if (new != NULL) {
268 new->length = 0;
269 #if LISTRECYCLERDEBUG
270 NewListsUsed++;
271 #endif
272 #ifndef NDEBUG
273 new->data = ASC_NEW_ARRAY_CLEAR(VOIDPTR,capacity);
274 #else
275 new->data = (VOIDPTR *)ascmalloc((unsigned)capacity*sizeof(VOIDPTR));
276 #endif
277 new->capacity = capacity;
278 new->flags = (unsigned int)(gsf_SORTED | gsf_EXPANDABLE);
279 return new;
280 }
281 else {
282 ERROR_REPORTER_HERE(ASC_PROG_ERR,"UNABLE TO ALLOCATE MEMORY FOR LIST\n");
283 return NULL;
284 }
285 }
286
287
288 void gl_free_and_destroy(struct gl_list_t *list){
289 unsigned long c;
290 if (list == NULL) return;
291 #if LISTUSESPOOL
292 AssertMemory(list);
293 #else
294 AssertAllocatedMemory(list,sizeof(struct gl_list_t));
295 #endif
296 for(c=0;c<GL_LENGTH(list);c++) {
297 AssertContainedMemory(list->data[c],0);
298 ascfree(list->data[c]);
299 }
300 #ifndef NDEBUG
301 ascbzero(list->data,sizeof(VOIDPTR)*list->capacity);
302 #endif
303 c = list->capacity; /* gonna use it a lot */
304 if (c <= MAXRECYCLESIZE && RecycledContents[c] < AllowedContents[c]) {
305 asc_assert(RecycledContents[c] >= 0);
306 /* push list into LIFO list of lists, and bump the count. */
307 list->data[0] = RecycledList[c];
308 RecycledList[c] = list;
309 RecycledContents[c]++;
310 #if LISTRECYCLERDEBUG
311 if (RecycledContents[c] > HighWaterMark[c]) {
312 HighWaterMark[c] = RecycledContents[c];
313 }
314 #endif
315 }else{
316 #if LISTRECYCLERDEBUG
317 if (c<=MAXRECYCLESIZE) {
318 ListsDestroyed[c]++;
319 }
320 #endif
321 ascfree(list->data);
322 list->data = NULL;
323 list->capacity = list->length = 0;
324 POOL_FREEHEAD(list);
325 }
326 }
327
328
329 void gl_destroy(struct gl_list_t *list){
330 unsigned long c;
331 if (list == NULL) return;
332 #if LISTUSESPOOL
333 AssertMemory(list);
334 #else
335 AssertAllocatedMemory(list,sizeof(struct gl_list_t));
336 #endif
337 c = list->capacity; /* gonna use it a lot */
338 if (c <= MAXRECYCLESIZE && RecycledContents[c] < AllowedContents[c]) {
339 asc_assert(RecycledContents[c] >= 0);
340 /* push list into LIFO list of lists, and bump the count. */
341 list->data[0] = RecycledList[c];
342 RecycledList[c] = list;
343 RecycledContents[c]++;
344 #if LISTRECYCLERDEBUG
345 if (RecycledContents[c] > HighWaterMark[c]) {
346 HighWaterMark[c] = RecycledContents[c];
347 }
348 #endif
349 } else {
350 #if LISTRECYCLERDEBUG
351 if (c<=MAXRECYCLESIZE) {
352 ListsDestroyed[c]++;
353 }
354 #endif
355 #ifndef NDEBUG
356 if (list->capacity>0) list->data[0]=NULL;
357 #endif
358 ascfree(list->data);
359 list->data = NULL;
360 list->capacity = list->length = 0;
361 POOL_FREEHEAD(list);
362 }
363 }
364
365
366 VOIDPTR gl_fetchF(CONST struct gl_list_t *list, unsigned long int pos){
367 asc_assert(NULL != list);
368 ASSERTRANGE(list,pos,"gl_fetchF");
369 return list->data[pos-1];
370 }
371
372
373 #define SORTED_OFF(l) (l)->flags &= (~gsf_SORTED)
374 #define EXPAND_OFF(l) (l)->flags &= (~gsf_EXPANDABLE)
375
376
377 /* for internal use only, where we can verify that STUFF is correct
378 * and it is SOMEONE else job to maintain the integrity of
379 * flags.sorted and list-> length.
380 */
381 #ifdef NDEBUG
382 #define GL_STORE(list,pos,ptr) ((list)->data[(pos)-1] = (ptr))
383 #else
384 #define GL_STORE(list,pos,ptr) gl_store((list),(pos),(ptr))
385 #endif
386 void gl_store(struct gl_list_t *list, unsigned long int pos, VOIDPTR ptr){
387 asc_assert(NULL != list);
388 if (GL_LENGTH(list)>1) SORTED_OFF(list);
389 ASSERTRANGE(list,pos,"gl_store");
390 list->data[pos-1] = ptr;
391 }
392
393
394 #if REALLOCDEBUG
395 /* this we know to not leak if MOD_REALLOC is defined */
396 #define DATAREALLOC(l,incr) \
397 ascreallocPURE((char *)(l)->data, \
398 (size_t)(((l)->capacity-(incr))*sizeof(VOIDPTR)), \
399 (size_t)((l)->capacity*sizeof(VOIDPTR)))
400 #else
401 /* this we think might leak under hpux, sunos4. more like purify lies. */
402 #define DATAREALLOC(l,incr) \
403 ascrealloc((l)->data,(unsigned)((l)->capacity*sizeof(VOIDPTR)))
404 #endif
405 /* actually, when all said and done, a static structure in solver/analyze.c
406 * was being left out of the cleanup process at shutdown.
407 */
408
409
410 static void gl_expand_list(struct gl_list_t *list){
411 VOIDPTR *tmp;
412 unsigned long increment;
413 increment = (list->capacity*50)/100; /* 10% is too small. try 50% baa */
414 increment = MAX(MIN_INCREMENT,increment);
415 list->capacity += increment;
416 tmp = (VOIDPTR *) DATAREALLOC(list,increment);
417
418 if (tmp==NULL) {
419 ERROR_REPORTER_HERE(ASC_PROG_ERR,"gl_expand_list: memory allocation failed");
420 list->capacity -= increment;
421 } else {
422 list->data = tmp;
423 }
424 }
425
426
427 /* changes only the capacity of the list, and possibly data pointer */
428 static void gl_expand_list_by(struct gl_list_t *list,unsigned long addlen)
429 {
430 if (!addlen) return;
431 addlen = MAX(MIN_INCREMENT,addlen); /* expand by at least 8 */
432 list->capacity += addlen;
433 list->data = (VOIDPTR *)DATAREALLOC(list,addlen);
434
435 if (list->data==NULL)ERROR_REPORTER_HERE(ASC_PROG_ERR,"gl_expand_list_by: memory allocation failed\n");
436 asc_assert(list->data!=NULL);
437 }
438
439
440 void gl_append_ptr(struct gl_list_t *list, VOIDPTR ptr){
441 asc_assert((NULL != list) && (0 != gl_expandable(list)));
442 if (list->length > 0) SORTED_OFF(list);
443 if (++(list->length) > list->capacity) /* expand list capacity*/
444 gl_expand_list(list);
445 list->data[list->length-1] = ptr;
446 }
447
448
449 void gl_append_list(struct gl_list_t *extendlist, struct gl_list_t *list){
450 register unsigned long c,len,oldlen,newlen;
451
452 asc_assert((NULL != extendlist) &&
453 (0 != gl_expandable(extendlist)) &&
454 (NULL != list));
455 if (extendlist->length >0 || list->length > 0) {
456 SORTED_OFF(extendlist);
457 }
458 newlen = extendlist->length + list->length;
459 if (newlen > extendlist->capacity) {
460 gl_expand_list_by(extendlist,newlen - extendlist->capacity);
461 }
462 len = list->length;
463 oldlen = extendlist->length;
464 extendlist->length += len;
465 for (c=0;c<len;c++) {
466 extendlist->data[oldlen] = list->data[c];
467 oldlen++;
468 }
469 }
470
471
472 void gl_fast_append_ptr(struct gl_list_t *list, VOIDPTR ptr){
473 asc_assert(NULL != list);
474 SORTED_OFF(list);
475 list->data[list->length] = ptr;
476 ++(list->length);
477 }
478
479
480 unsigned long gl_safe_length(CONST struct gl_list_t *list){
481 if (list !=NULL) {
482 return list->length;
483 } else {
484 return 0L;
485 }
486 }
487
488
489 unsigned long gl_lengthF(CONST struct gl_list_t *list){
490 asc_assert(NULL != list);
491 return list->length;
492 }
493
494
495 unsigned long gl_capacity(CONST struct gl_list_t *list){
496 if (list==NULL) return 0;
497 return list->capacity;
498 }
499
500
501 int gl_sorted(CONST struct gl_list_t *list){
502 asc_assert(NULL != list);
503 return (int)(list->flags & gsf_SORTED);
504 }
505
506
507 #if LISTIMPLEMENTED
508 /* I believe this function is ok */
509 /**
510 This function will choose a pivot. It can actually return any
511 number between and including upper and lower. However, the goal
512 is to return a pivot that will not cause Quick Sort to have
513 O(n^2) behavior. This returns the value rather than the address
514 of the pivot.
515
516 The heuristic I am choosing is to pick the middle of three test
517 sites which are upper,lower, and (upper+lower)/2.
518 */
519 static void *gl_choose_ptrpivot(struct gl_list_t *list
520 , unsigned long int lower, unsigned long int upper
521 )
522 {
523 unsigned long middle;
524 middle = (upper+lower)/2;
525 if(gl_fetch(list,middle) > gl_fetch(list,lower)) {
526 /* middle > lower */
527 if(gl_fetch(list,upper) > gl_fetch(list,middle)) {
528 /* middle > lower && upper > middle */
529 return gl_fetch(list,middle);
530 }else{
531 /* middle > lower && middle >= upper */
532 if(gl_fetch(list,upper) > gl_fetch(list,lower)) {
533 /* middle > lower && middle >= upper && upper > lower */
534 return gl_fetch(list,upper);
535 }else{
536 /* middle > lower && middle >= upper && lower >= upper */
537 return gl_fetch(list,lower);
538 }
539 }
540 }else{
541 /* lower >= middle */
542 if(gl_fetch(list,upper) > gl_fetch(list,lower)) {
543 /* lower >= middle && upper > lower */
544 return gl_fetch(list,lower);
545 }else{
546 /* lower >= middle && lower >= upper */
547 if(gl_fetch(list,middle) > gl_fetch(list,upper)) {
548 /* lower >= middle && lower >= upper && middle > upper */
549 return gl_fetch(list,middle);
550 }else{
551 /* lower >= middle && lower >= upper && upper >= middle */
552 return gl_fetch(list,upper);
553 }
554 }
555 }
556 }
557 #endif
558
559 /**
560 This function will choose a pivot. It can actually return any
561 number between and including upper and lower. However, the goal
562 is to return a pivot that will not cause Quick Sort to have
563 O(n^2) behavior.
564
565 The heuristic I am choosing is to pick the middle of three test
566 sites which are upper,lower, and (upper+lower)/2.
567 */
568 static unsigned long gl_choose_pivot(struct gl_list_t *list
569 , unsigned long int lower, unsigned long int upper, CmpFunc func
570 ){
571 unsigned long middle;
572 middle = (upper+lower)/2;
573 if((*func)(gl_fetch(list,middle),gl_fetch(list,lower)) > 0) {
574 /* middle > lower */
575 if((*func)(gl_fetch(list,upper),gl_fetch(list,middle)) > 0) {
576 /* middle > lower && upper > middle */
577 return middle;
578 }else{
579 /* middle > lower && middle >= upper */
580 if ((*func)(gl_fetch(list,upper),gl_fetch(list,lower)) > 0) {
581 /* middle > lower && middle >= upper && upper > lower */
582 return upper;
583 }else{
584 /* middle > lower && middle >= upper && lower >= upper */
585 return lower;
586 }
587 }
588 }else{
589 /* lower >= middle */
590 if ((*func)(gl_fetch(list,upper),gl_fetch(list,lower)) > 0) {
591 /* lower >= middle && upper > lower */
592 return lower;
593 }else{
594 /* lower >= middle && lower >= upper */
595 if ((*func)(gl_fetch(list,middle),gl_fetch(list,upper)) > 0) {
596 /* lower >= middle && lower >= upper && middle > upper */
597 return middle;
598 }else{
599 /* lower >= middle && lower >= upper && upper >= middle */
600 return upper;
601 }
602 }
603 }
604 }
605
606
607 /* do not export. makes rosy assumptions. callers of this function
608 * are responsible for making sure list.sorted is set appropriately.
609 */
610 static void gl_swap(struct gl_list_t *list
611 , unsigned long int i, unsigned long int j
612 ){
613 VOIDPTR temp;
614 temp = gl_fetch(list,i);
615 GL_STORE(list,i,gl_fetch(list,j));
616 GL_STORE(list,j,temp);
617 }
618
619
620 #if LISTIMPLEMENTED
621 /* this function is probably ok */
622 void gl_upsort(struct gl_list_t *list,
623 unsigned long int lower,
624 unsigned long int upper)
625 {
626 register unsigned long i,j;
627 VOIDPTR pivot_element;
628 j = upper;
629 i = lower;
630 pivot_element = gl_choose_ptrpivot(list,lower,upper);
631 do {
632 while (pivot_element > gl_fetch(list,i)) i += 1;
633 while (gl_fetch(list,j) > pivot_element) j -= 1;
634 if (i <= j) {
635 register VOIDPTR temp;
636 temp = gl_fetch(list,i);
637 GL_STORE(list,i,gl_fetch(list,j));
638 GL_STORE(list,j,temp);
639 i++; j--;
640 }
641 } while(i <= j);
642 if (lower < j) gl_upsort(list,lower,j);
643 if (i < upper) gl_upsort(list,i,upper);
644 }
645
646 /* this function is not ok */
647 void gl_downsort(struct gl_list_t *list,
648 unsigned long int lower,
649 unsigned long int upper)
650 {
651 unsigned long pivot,i,j;
652 VOIDPTR pivot_element;
653 j = upper;
654 i = lower;
655 pivot_element = gl_choose_ptrpivot(list,lower,upper);
656 do {
657 while ((*func)(pivot_element,gl_fetch(list,i))>0) i += 1;
658 while ((*func)(gl_fetch(list,j),pivot_element)>0) j -= 1;
659 if (i <= j) {
660 gl_swap(list,i++,j--);
661 }
662 } while(i <= j);
663 if (lower < j) gl_downsort(list,lower,j);
664 if (i<upper) gl_downsort(list,i,upper);
665 }
666 #endif
667
668 static
669 void gl_qsort(struct gl_list_t *list, unsigned long int lower
670 , unsigned long int upper, CmpFunc func
671 ){
672 unsigned long pivot,i,j;
673 VOIDPTR pivot_element;
674 j = upper;
675 i = lower;
676 pivot = gl_choose_pivot(list,lower,upper,func);
677 pivot_element = gl_fetch(list,pivot);
678 do{
679 while((*func)(pivot_element,gl_fetch(list,i))>0) i += 1;
680 while((*func)(gl_fetch(list,j),pivot_element)>0) j -= 1;
681 if(i <= j){
682 gl_swap(list,i++,j--);
683 }
684 }while(i <= j);
685 if(lower < j) gl_qsort(list,lower,j,func);
686 if(i<upper)gl_qsort(list,i,upper,func);
687 }
688
689
690 #if LISTIMPLEMENTED
691 /* ok once its children are ok */
692 void gl_ptr_sort(struct gl_list_t *list, int increasing)
693 {
694 if (GL_LENGTH(list) > 1) {
695 if (increasing) {
696 gl_upsort(list,1,GL_LENGTH(list));
697 } else {
698 gl_downsort(list,1,GL_LENGTH(list));
699 }
700 }
701 list->flags |= gsf_SORTED;
702 }
703 #endif
704
705
706 #if LISTIMPLEMENTED
707 /* broken */
708 void gl_insert_ptr_sorted(struct gl_list_t *,VOIDPTR,int){}
709 #endif
710
711
712 void gl_sort(struct gl_list_t *list, CmpFunc func){
713 asc_assert((NULL != list) && (NULL != func));
714 if (GL_LENGTH(list) > 1) {
715 gl_qsort(list,1,GL_LENGTH(list),func);
716 }
717 list->flags |= gsf_SORTED;
718 }
719
720
721 void gl_insert_sorted(struct gl_list_t *list
722 , VOIDPTR ptr, CmpFunc func
723 ){
724 int comparison;
725 register unsigned long lower,upper,search=0L;
726
727 asc_assert((NULL != list) &&
728 (0 != gl_expandable(list)) &&
729 (NULL != func));
730 if(list->flags & gsf_SORTED) {
731 if(GL_LENGTH(list)==0) {
732 gl_append_ptr(list,ptr);
733 list->flags |= gsf_SORTED;
734 return;
735 }
736 if(++list->length > list->capacity) /* expand list capacity */
737 gl_expand_list(list);
738 lower = 1;
739 upper = GL_LENGTH(list)-1;
740 while (lower<=upper) {
741 search = ( lower + upper) >> 1; /* divide by two */
742 comparison = (*func)(gl_fetch(list,search),ptr);
743 if (comparison==0) {
744 /* make room */
745 for(lower=GL_LENGTH(list);lower>search;lower--)
746 GL_STORE(list,lower,gl_fetch(list,lower-1));
747 /* store it */
748 GL_STORE(list,search+1,ptr);
749 list->flags |= gsf_SORTED;
750 return;
751 }
752 if (comparison < 0) lower = search + 1;
753 else upper = search - 1;
754 }
755 if ((*func)(gl_fetch(list,search),ptr) > 0) {
756 for(lower=GL_LENGTH(list);lower>search;lower--) {
757 GL_STORE(list,lower,gl_fetch(list,lower-1));
758 }
759 GL_STORE(list,search,ptr);
760 }else{
761 for(lower=GL_LENGTH(list);lower>(search+1);lower--) {
762 GL_STORE(list,lower,gl_fetch(list,lower-1));
763 }
764 GL_STORE(list,search+1,ptr);
765 }
766 list->flags |= gsf_SORTED;
767 }else{
768 ERROR_REPORTER_HERE(ASC_PROG_WARNING,"gl_insert_sorted called on unsorted list -- sorting list now.");
769 gl_append_ptr(list,ptr);
770 gl_sort(list,func);
771 }
772 }
773
774
775 void gl_iterate(struct gl_list_t *list, void (*func) (VOIDPTR)){
776 #ifdef NDEBUG
777 register unsigned long length,counter;
778 length = GL_LENGTH(list);
779 for (counter=0;counter<length;counter++)
780 (*func)(list->data[counter]);
781 #else
782 unsigned long length,counter;
783 asc_assert(NULL != list);
784 asc_assert(NULL != func);
785 length = GL_LENGTH(list);
786 for (counter=1;counter<=length;counter++)
787 (*func)(gl_fetch(list,counter));
788 #endif
789 }
790
791
792 /*
793 this will probably need casts to compile or an intermediate
794 void *array variable
795 this function is on the critical path in the compiler, or
796 should be, so it is highly tweaked here.
797 */
798 unsigned long gl_ptr_search(CONST struct gl_list_t *list
799 , CONST VOIDPTR match, int increasing
800 ){
801 #define SHORTSEARCH 5
802 /* if list is short, use linear instead of binary search.
803 SHORTSEARCH is the minimum size for a binary search.
804 SHORTSEARCH must be >=0 */
805 register unsigned long lower,upper,search;
806
807 asc_assert(NULL != list);
808 if(!GL_LENGTH(list))return 0;
809 if((list->flags & gsf_SORTED)
810 #if (SHORTSEARCH > 0)
811 && (upper = GL_LENGTH(list)-1) > SHORTSEARCH
812 #endif
813 ){ /* use binary search */
814 register long comparison;
815 lower = 0;
816 #if (SHORTSEARCH <= 0)
817 upper = GL_LENGTH(list)-1;
818 #endif
819 if(increasing){
820 while(lower <= upper) {
821 search = (lower+upper) / 2;
822 comparison =
823 ((char *)list->data[search]-(char *)match); /* pointer difference */
824 if(comparison==0){
825 return (search + 1);
826 }else if(comparison < 0){
827 lower = search + 1;
828 }else if(search > 0){
829 upper = search - 1;
830 }else{
831 return 0;
832 }
833 }
834 }else{
835 while(lower <= upper){
836 search = (lower+upper) / 2;
837 comparison =
838 ((char *)match-(char *)list->data[search]); /* pointer difference */
839 if(comparison==0){
840 return (search + 1);
841 }else if (comparison < 0){
842 lower = search + 1;
843 }else if (search > 0){
844 upper = search - 1;
845 }else{
846 return 0;
847 }
848 }
849 }
850 }else{ /* use linear search */
851 upper = GL_LENGTH(list);
852 if(increasing){
853 for(search=0; search < upper; search++) {
854 if (list->data[search]==match) return search+1;
855 }
856 }else{
857 /* the test could be written 'search < upper' with unsigned assumption,
858 but someday we may want to make list length signed and move on. */
859 /* I removed the 'search >= 0 &&' part of the test because indeed it's
860 tautological with the current unsigned integers -- JP */
861 for(search=upper-1; /* search >= 0 && */ search < upper; search--){
862 if (list->data[search]==match) return search+1;
863 }
864 }
865 }
866 return 0; /* search failed */
867 }
868
869
870 unsigned long gl_search(CONST struct gl_list_t *list
871 ,CONST VOIDPTR match, CmpFunc func
872 ){
873 register unsigned long lower,upper,search;
874 #ifdef __alpha
875 long comparison;
876 #else
877 int comparison;
878 #endif
879 asc_assert((NULL != list) && (NULL != func));
880 if (list->flags & gsf_SORTED) { /* use binary search */
881 lower = 1;
882 upper = GL_LENGTH(list);
883 while (lower <= upper) {
884 search = (lower+upper) / 2;
885 comparison = (*func)(gl_fetch(list,search),match);
886 if (comparison==0) return search;
887 if (comparison < 0) lower = search + 1;
888 else upper = search -1;
889 }
890 }else{ /* use linear search */
891 upper = GL_LENGTH(list);
892 for(search=0; search < upper; search++) {
893 if ((*func)(list->data[search],match)==0) return search+1;
894 }
895 }
896 return 0; /* search failed */
897 }
898
899
900 unsigned long gl_search_reverse(CONST struct gl_list_t *list
901 ,CONST VOIDPTR match, CmpFunc func
902 ){
903 register unsigned long search;
904
905 asc_assert((NULL != list) && (NULL != func));
906 if(list->flags & gsf_SORTED) { /* use binary search */
907 return gl_search(list, match, func);
908 }else{ /* use linear search */
909 /* since search is unsigned, the FOR loop cannot go to
910 * zero, since 0-- is ULONG_MAX. Must do zero separately.
911 */
912 for(search = (GL_LENGTH(list)-1); search > 0; search-- ) {
913 if((*func)(list->data[search],match)==0) return search+1;
914 }
915 search = 0;
916 if((*func)(list->data[search],match)==0) return search+1;
917 }
918 return 0; /* search failed */
919 }
920
921
922 int gl_unique_list(CONST struct gl_list_t *list){
923 unsigned long i,j,len,lm1;
924 VOIDPTR e;
925 VOIDPTR *d;
926
927 if (list==NULL || GL_LENGTH(list)<2) {
928 return 1;
929 }
930 len = GL_LENGTH(list);
931 lm1 = len-1;
932 d = list->data;
933 for (i=0; i<lm1; i++) {
934 e = d[i];
935 for (j=i+1; j < len; j++) {
936 if (d[j]==e) {
937 return 0;
938 }
939 }
940 }
941 return 1;
942 }
943
944
945 int gl_empty(CONST struct gl_list_t *list){
946 asc_assert(NULL != list);
947 return(GL_LENGTH(list)==0);
948 }
949
950
951 void gl_delete(struct gl_list_t *list, unsigned long int pos, int dispose){
952 unsigned long c,length;
953 int sorted;
954 VOIDPTR ptr;
955
956 asc_assert(NULL != list);
957 /* should be ASSERTRANGE(list,pos,"gl_delete");
958 The check for (pos > 0) below is suspicious, though.
959 It suggests this function may be called with pos == 0. */
960 asc_assert(pos <= gl_length(list));
961
962 if (pos > 0) {
963 sorted = (int)(list->flags & gsf_SORTED);
964 if (dispose) {
965 ptr = gl_fetch(list,pos);
966 ascfree(ptr);
967 }
968 length = GL_LENGTH(list);
969 #ifdef NDEBUG
970 /* the copyleft the remainder of the array. easy-eye version */
971 for (c = pos; c < length; c++) {
972 list->data[c-1] = list->data[c];
973 }
974 #else
975 /* shift the remainder of the array using the macros */
976 for (c=pos+1;c<=length;c++) {
977 GL_STORE(list,c-1,gl_fetch(list,c));
978 }
979 #endif
980 list->length--;
981 if (sorted) {
982 list->flags |= gsf_SORTED;
983 }else{
984 SORTED_OFF(list);
985 }
986 }
987 }
988
989
990 void gl_reverse(struct gl_list_t *list){
991 VOIDPTR *tmpdata;
992 unsigned long c,len;
993
994 if (list==NULL || list->length <2L) return;
995 tmpdata = ASC_NEW_ARRAY(VOIDPTR,(list->capacity));
996 if (tmpdata==NULL) {
997 ASC_PANIC("gl_reverse out of memory. Bye!\n");
998 }
999 len = list->length;
1000 for (c=0;c < len;c++) {
1001 tmpdata[len - (c+1)] = list->data[c];
1002 }
1003 ascfree(list->data);
1004 list->data = tmpdata;
1005 }
1006
1007
1008 void gl_reset(struct gl_list_t *list){
1009 asc_assert(NULL != list);
1010 list->flags = (unsigned int)(gsf_SORTED | gsf_EXPANDABLE);
1011 list->length = 0;
1012 }
1013
1014
1015 struct gl_list_t *gl_copy(CONST struct gl_list_t *list){
1016 struct gl_list_t *new;
1017 unsigned long counter,length;
1018
1019 asc_assert(NULL != list);
1020
1021 length = GL_LENGTH(list);
1022 new = gl_create(length);
1023 for (counter = 1;counter <= length;counter++)
1024 gl_append_ptr(new,gl_fetch(list,counter));
1025 new->flags = list->flags;
1026 return new;
1027 }
1028
1029 struct gl_list_t *gl_concat(CONST struct gl_list_t *list1
1030 , CONST struct gl_list_t *list2
1031 ){
1032 struct gl_list_t *new;
1033 unsigned long counter,length1,length2;
1034
1035 asc_assert ((NULL != list1) && (NULL != list2));
1036
1037 length1 = GL_LENGTH(list1);
1038 length2 = GL_LENGTH(list2);
1039 new = gl_create(length1+length2);
1040 for(counter = 1;counter<= length1; counter++) {
1041 gl_append_ptr(new,gl_fetch(list1,counter));
1042 }
1043 for(counter = 1;counter<= length2; counter++) {
1044 gl_append_ptr(new,gl_fetch(list2,counter));
1045 }
1046 return new;
1047 }
1048
1049 int gl_compare_ptrs(CONST struct gl_list_t *l1, CONST struct gl_list_t *l2){
1050 unsigned long len,c;
1051 register unsigned long i1,i2;
1052 if (l1==NULL || l2 == NULL) {
1053 ASC_PANIC("Called with NULL input");
1054 }
1055 len = MIN(GL_LENGTH(l1),GL_LENGTH(l2));
1056 for(c=0;c < len; c++) {
1057 i1 = (asc_intptr_t)(l1->data[c]);
1058 i2 = (asc_intptr_t)(l2->data[c]);
1059 if(i1<i2) {
1060 return -1;
1061 }
1062 if(i1>i2) {
1063 return 1;
1064 }
1065 }
1066 return (GL_LENGTH(l1) == GL_LENGTH(l2)) ?
1067 0 :
1068 (( GL_LENGTH(l2) > len) ? -1 : 1);
1069 }
1070
1071
1072 void gl_set_sorted(struct gl_list_t *list, int TRUE_or_FALSE){
1073 asc_assert(NULL != list);
1074 if (FALSE == TRUE_or_FALSE)
1075 list->flags &= ~gsf_SORTED;
1076 else
1077 list->flags |= gsf_SORTED;
1078 }
1079
1080
1081 int gl_expandable(struct gl_list_t *list){
1082 asc_assert(NULL != list);
1083 return (int)(list->flags & gsf_EXPANDABLE);
1084 }
1085
1086
1087 void gl_set_expandable(struct gl_list_t *list, int TRUE_or_FALSE){
1088 asc_assert(NULL != list);
1089 if (FALSE == TRUE_or_FALSE)
1090 list->flags &= ~gsf_EXPANDABLE;
1091 else
1092 list->flags |= gsf_EXPANDABLE;
1093 }
1094
1095
1096 VOIDPTR *gl_fetchaddr(CONST struct gl_list_t *list, unsigned long int pos){
1097 asc_assert(NULL != list);
1098 ASSERTRANGE(list,pos,"gl_fetchaddr");
1099 return (VOIDPTR *)&(list->data[pos-1]);
1100 }
1101
1102
1103 void gl_emptyrecycler(){
1104 int i;
1105 #if LISTRECYCLERDEBUG
1106 unsigned long bytecount = 0;
1107 #endif
1108 struct gl_list_t *list;
1109
1110 #if LISTRECYCLERDEBUG
1111 PRINTF("LIST RECYCLER SUMMARY\n");
1112 PRINTF("=====================\n");
1113 PRINTF("New lists created : %d\n", NewListsUsed);
1114 PRINTF("Recycled lists used: %d\n", RecycledListsUsed);
1115 ListsDestroyedTotal = 0;
1116 for (i=0;i <=MAXRECYCLESIZE; i++) {
1117 ListsDestroyedTotal += ListsDestroyed[i];
1118 }
1119 PRINTF("Lists destroyed : %d\n", ListsDestroyedTotal);
1120 PRINTF("Size\tAllowed\tCurr.\tMax\tDestroyed\n");
1121 #endif
1122 for(i = 0 ; i <= MAXRECYCLESIZE ; i++){
1123 #if LISTRECYCLERDEBUG
1124 if (HighWaterMark[i] >0) {
1125 PRINTF("%d\t%d\t%d\t%d\t%d\n",i,AllowedContents[i], RecycledContents[i],
1126 HighWaterMark[i], ListsDestroyed[i]);
1127 }
1128 bytecount += sizeof(struct gl_list_t)*RecycledContents[i];
1129 bytecount += sizeof(void *)*i*RecycledContents[i];
1130 #endif
1131 while (RecycledContents[i] > 0){
1132 /* pop a list off the LIFO list */
1133 list = RecycledList[i];
1134 RecycledList[i] = (struct gl_list_t *)list->data[0];
1135 #ifndef NDEBUG
1136 list->data[0] = NULL;
1137 #endif
1138 --RecycledContents[i];
1139 ascfree(list->data);
1140 #ifndef NDEBUG
1141 list->data = NULL;
1142 list->capacity = list->length = 0;
1143 #endif
1144 POOL_FREEHEAD(list);
1145 }
1146 asc_assert(RecycledList[i]==NULL); /* someone forgot to empty the last one */
1147 }
1148 #if LISTRECYCLERDEBUG
1149 bytecount += MAXRECYCLESIZE*sizeof(void *);
1150 PRINTF("Total bytes:\t%lu\n",bytecount);
1151 #endif
1152 }
1153
1154
1155 void gl_reportrecycler(FILE *fp){
1156 int i;
1157 unsigned long bytecount = 0;
1158 #if !LISTRECYCLERDEBUG
1159 (void) fp;
1160 #endif
1161
1162 #if LISTRECYCLERDEBUG
1163 asc_assert(NULL != fp);
1164 FPRINTF(fp,"LIST RECYCLER SUMMARY\n");
1165 FPRINTF(fp,"=====================\n");
1166 FPRINTF(fp,"New lists created : %d\n", NewListsUsed);
1167 FPRINTF(fp,"Recycled lists used: %d\n", RecycledListsUsed);
1168 for (i=0;i <=MAXRECYCLESIZE; i++) {
1169 ListsDestroyedTotal += ListsDestroyed[i];
1170 }
1171 FPRINTF(fp,"Lists destroyed : %d\n", ListsDestroyedTotal);
1172 FPRINTF(fp,"Size\tAllowed\tCurr.\tMax\tDestroyed\n");
1173 #endif
1174
1175 for(i = 0 ; i <= MAXRECYCLESIZE ; i++){
1176 #if LISTRECYCLERDEBUG
1177 if (HighWaterMark[i] >0) {
1178 FPRINTF(fp,"%d\t%d\t%d\t%d\t%d\n",i,AllowedContents[i],
1179 RecycledContents[i], HighWaterMark[i], ListsDestroyed[i]);
1180 bytecount += sizeof(struct gl_list_t)*RecycledContents[i];
1181 bytecount += sizeof(void *)*i*RecycledContents[i];
1182 }
1183 #else
1184 bytecount += sizeof(struct gl_list_t)*RecycledContents[i];
1185 bytecount += sizeof(void *)*RecycledContents[i]*i;
1186 #endif
1187 }
1188 #if LISTRECYCLERDEBUG
1189 bytecount += MAXRECYCLESIZE*sizeof(void *);
1190 FPRINTF(fp,"Total bytes:\t%lu\n",bytecount);
1191 #endif
1192 }
1193

john.pye@anu.edu.au
ViewVC Help
Powered by ViewVC 1.1.22