1 |
/* ASCEND modelling environment |
2 |
Copyright (C) 1990, 1993, 1994 Thomas Guthrie Epperly |
3 |
Copyright (C) 2006 Carnegie Mellon University |
4 |
|
5 |
This program is free software; you can redistribute it and/or modify |
6 |
it under the terms of the GNU General Public License as published by |
7 |
the Free Software Foundation; either version 2, or (at your option) |
8 |
any later version. |
9 |
|
10 |
This program is distributed in the hope that it will be useful, |
11 |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 |
GNU General Public License for more details. |
14 |
|
15 |
You should have received a copy of the GNU General Public License |
16 |
along with this program; if not, write to the Free Software |
17 |
Foundation, Inc., 59 Temple Place - Suite 330, |
18 |
Boston, MA 02111-1307, USA. |
19 |
*//* @file |
20 |
Implementation of List Module |
21 |
*//* |
22 |
Tom Epperly |
23 |
Last in CVS $Revision: 1.3 $ $Date: 1998/01/06 12:06:55 $ $Author: ballan $ |
24 |
*/ |
25 |
|
26 |
#include <stdarg.h> |
27 |
#include <ascend/utilities/ascConfig.h> |
28 |
#include <ascend/utilities/error.h> |
29 |
#include <ascend/utilities/ascPanic.h> |
30 |
#include <ascend/utilities/ascMalloc.h> |
31 |
#include "list.h" |
32 |
#if LISTUSESPOOL |
33 |
#include "pool.h" |
34 |
#endif |
35 |
#include "mathmacros.h" |
36 |
|
37 |
#define address(a,b) (((unsigned long)(a) + ((b)*sizeof(VOIDPTR))-sizeof(VOIDPTR))) |
38 |
|
39 |
#define GL_LENGTH(l) ((l)->length) |
40 |
#define LOWCAPACITY 2 |
41 |
/* Shortest list we will allow. It must not be 0! */ |
42 |
static const unsigned long MIN_INCREMENT = 8; |
43 |
/* Minimum number of elements to increase when expanding list. */ |
44 |
|
45 |
/* |
46 |
* Be a bit more informative when dying on a list range error. |
47 |
* No printf we know of returns -2, so the print will always be |
48 |
* just before assertion acts to handle the range error. |
49 |
*/ |
50 |
#define ASSERTRANGE(list,pos,n) \ |
51 |
asc_assert((pos >= 1) && (pos <= GL_LENGTH(list))); |
52 |
|
53 |
#ifndef MOD_REALLOC |
54 |
#define REALLOCDEBUG FALSE |
55 |
#else |
56 |
#define REALLOCDEBUG TRUE |
57 |
#endif |
58 |
/* if REALLOCDEBUG TRUE, then we use our own pricey version of realloc |
59 |
* instead of the standard library version. |
60 |
* This is to test for a bug in purify/os interactions. |
61 |
*/ |
62 |
/* |
63 |
* We have modified gl_destroy to recycle lists of up to a certain size |
64 |
* MAXRECYCLESIZE. |
65 |
* The data structure for holding the recycled lists is a vector |
66 |
* of pointers to gl_list_t. Item 1 in a recycled list is the pointer |
67 |
* to the next recycled list, which may be NULL. This is a change |
68 |
* from Tom's initial implementation which used a dense array to store |
69 |
* pointers to lists. |
70 |
* |
71 |
* The number of recycled lists of a particular |
72 |
* capacity i to be allowed is found in the AllowedContents[i]. |
73 |
* |
74 |
* It is intended that RecycledContents[i] is the current count of |
75 |
* lists of capacity i. It is a coincidence of implementation that |
76 |
* when RecycledContents[i] <=0 RecycledList[i]==NULL. |
77 |
* |
78 |
* If LISTRECYCLERDEBUG, |
79 |
* HighWaterMark[i] is the most of capacity i ever recycled. |
80 |
* ListsDestroyed[i] is likewise lists not recycled because full. |
81 |
*/ |
82 |
#define LISTRECYCLERDEBUG 0 |
83 |
#define MAXRECYCLESIZE 500 /* the maximum list size to be recycled */ |
84 |
#define MAXRECYCLESMALLITEMS 300/* the maximum number of size < LARGEITEM */ |
85 |
#define MAXRECYCLELARGEITEMS 25 /* the maximum number of each large size */ |
86 |
#define MAXRECYCLESEARCH 2 /* how much larger a list will be searched for */ |
87 |
/* search performance. 0 and 2 are best values, 2 slightly better. alphaosf2 */ |
88 |
#define LARGEITEM 21 /* smallest large item */ |
89 |
/* note: baa, 3/8/96 LARGEITEM and MAXRECYCLELARGEITEMS not in use yet */ |
90 |
static struct gl_list_t *RecycledList[MAXRECYCLESIZE+1]; |
91 |
static int RecycledContents[MAXRECYCLESIZE+1]; |
92 |
static int AllowedContents[MAXRECYCLESIZE+1]; |
93 |
/* |
94 |
* It is assumed that these arrays are initialized to zero according to the |
95 |
* rules given in "The C Programming Language", Brian W. Kernighan and Dennis |
96 |
* M. Richie, second edition. |
97 |
*/ |
98 |
#if LISTRECYCLERDEBUG |
99 |
static int HighWaterMark[MAXRECYCLESIZE+1]; |
100 |
/* most ever in recycle list of size i */ |
101 |
static int ListsDestroyed[MAXRECYCLESIZE+1]; |
102 |
/* number of lists destroyed because Allowed was exceeded of size i */ |
103 |
|
104 |
static int ListsDestroyedTotal = 0; |
105 |
/* number of lists destroyed because Allowed was exceeded all sizes */ |
106 |
static int NewListsUsed = 0; |
107 |
/* calls to gl_create that required malloc */ |
108 |
static int RecycledListsUsed = 0; |
109 |
/* calls to gl_create that used recycle */ |
110 |
#endif |
111 |
|
112 |
/* If this function is not called, no recycling will take place |
113 |
* because all the Alloweds will be at default of 0. |
114 |
* This initializes the AllowedContents array, and nothing else. |
115 |
* General limits are set based on MAXRECYCLE*ITEMS. |
116 |
* Specific ones are done after and may be tuned. |
117 |
*/ |
118 |
void gl_init(void) |
119 |
{ |
120 |
int i; |
121 |
if (AllowedContents[0] && |
122 |
AllowedContents[0] != MAXRECYCLESMALLITEMS) { |
123 |
ERROR_REPORTER_HERE(ASC_PROG_ERR,"gl_init recalled after data corrupted!\n"); |
124 |
return; |
125 |
/* somebody called us twice, with intervening nonsense. punt */ |
126 |
} |
127 |
for (i = LOWCAPACITY; i < LARGEITEM; i++) { |
128 |
AllowedContents[i] = MAXRECYCLESMALLITEMS; |
129 |
} |
130 |
for (i = LARGEITEM; i <= MAXRECYCLESIZE; i++) { |
131 |
AllowedContents[i] = MAXRECYCLELARGEITEMS; |
132 |
} |
133 |
#define LARGESYSTEM 1 |
134 |
#if LARGESYSTEM |
135 |
AllowedContents[2] = 40000; |
136 |
AllowedContents[3] = 20000; |
137 |
AllowedContents[4] = 8000; |
138 |
AllowedContents[5] = 2000; |
139 |
AllowedContents[6] = 2000; |
140 |
AllowedContents[7] = 5000; |
141 |
AllowedContents[8] = 1000; |
142 |
#else |
143 |
AllowedContents[2] = 10000; |
144 |
AllowedContents[3] = 5000; |
145 |
AllowedContents[4] = 5000; |
146 |
AllowedContents[7] = 5000; |
147 |
#endif |
148 |
AllowedContents[23] = 50; |
149 |
} |
150 |
|
151 |
#if LISTUSESPOOL |
152 |
static pool_store_t g_list_head_pool = NULL; |
153 |
/* global for our memory manager */ |
154 |
#define POOL_ALLOCHEAD (struct gl_list_t *)(pool_get_element(g_list_head_pool)) |
155 |
/* get a token. Token is the size of the struct struct gl_list_t */ |
156 |
#define POOL_FREEHEAD(p) (pool_free_element(g_list_head_pool,((void *)p))) |
157 |
/* return a struct gl_list_t */ |
158 |
|
159 |
#define GLP_LEN 10 |
160 |
#ifdef __alpha |
161 |
#define GLP_WID 72 |
162 |
#else |
163 |
#define GLP_WID 127 |
164 |
#endif |
165 |
/* retune rpwid if the size of pending_t changes dramatically */ |
166 |
#define GLP_ELT_SIZE (sizeof(struct gl_list_t)) |
167 |
#define GLP_MORE_ELTS 10 |
168 |
/* |
169 |
* Number of slots filled if more elements needed. |
170 |
* So if the pool grows, it grows by GLP_MORE_ELTS*GLP_WID elements at a time. |
171 |
*/ |
172 |
#define GLP_MORE_BARS 500 |
173 |
/* This is the number of pool bar slots to add during expansion. |
174 |
not all the slots will be filled immediately. */ |
175 |
#else |
176 |
|
177 |
/* we default back to malloc/free if LISTUSESPOOL == FALSE */ |
178 |
#define POOL_ALLOCHEAD ((struct gl_list_t*)ascmalloc(sizeof(struct gl_list_t))) |
179 |
#define POOL_FREEHEAD(p) ascfree((char *)(p)) |
180 |
|
181 |
#endif |
182 |
|
183 |
/* This function is called at compiler startup time and destroy at shutdown. */ |
184 |
void gl_init_pool(void) { |
185 |
#if LISTUSESPOOL |
186 |
if (g_list_head_pool != NULL) { |
187 |
ASC_PANIC("ERROR: gl_init_pool called twice.\n"); |
188 |
} |
189 |
g_list_head_pool = pool_create_store(GLP_LEN, GLP_WID, GLP_ELT_SIZE, |
190 |
GLP_MORE_ELTS, GLP_MORE_BARS); |
191 |
if (g_list_head_pool == NULL) { |
192 |
ASC_PANIC("ERROR: gl_init_pool unable to allocate pool.\n"); |
193 |
} |
194 |
#else |
195 |
ERROR_REPORTER_HERE(ASC_PROG_ERR,"list.[ch] built without pooling of overheads\n"); |
196 |
#endif |
197 |
} |
198 |
|
199 |
void gl_destroy_pool(void) { |
200 |
#if LISTUSESPOOL |
201 |
if (g_list_head_pool==NULL) return; |
202 |
gl_emptyrecycler(); /* deallocate data in recycled lists, zero RecycledContents[] */ |
203 |
pool_clear_store(g_list_head_pool); |
204 |
pool_destroy_store(g_list_head_pool); |
205 |
g_list_head_pool = NULL; |
206 |
#else |
207 |
ERROR_REPORTER_HERE(ASC_PROG_ERR,"list.[ch] built without pooling of overheads\n"); |
208 |
#endif |
209 |
} |
210 |
|
211 |
|
212 |
int gl_pool_initialized(void) |
213 |
{ |
214 |
#if LISTUSESPOOL |
215 |
return (g_list_head_pool == NULL) ? FALSE : TRUE; |
216 |
#else |
217 |
ERROR_REPORTER_HERE(ASC_PROG_ERR,"list.[ch] built without pooling of overheads\n"); |
218 |
return TRUE; |
219 |
#endif |
220 |
} |
221 |
|
222 |
|
223 |
void gl_report_pool(FILE *f) |
224 |
{ |
225 |
#if LISTUSESPOOL |
226 |
if (g_list_head_pool==NULL) |
227 |
FPRINTF(f,"ListHeadPool is empty\n"); |
228 |
FPRINTF(f,"ListHeadPool "); |
229 |
pool_print_store(f,g_list_head_pool,0); |
230 |
#else |
231 |
FPRINTF(f,"list.[ch] built without pooling of overheads\n"); |
232 |
#endif |
233 |
} |
234 |
|
235 |
/* |
236 |
* Guess of the capacity of the list. If the |
237 |
* list capacity needs to be expanded it will |
238 |
* be. It is good to be close though |
239 |
*/ |
240 |
struct gl_list_t *gl_create(unsigned long int capacity) |
241 |
{ |
242 |
struct gl_list_t *new; |
243 |
unsigned long i; |
244 |
i = capacity = MAX((unsigned long)LOWCAPACITY,capacity); |
245 |
while(i <= capacity+MAXRECYCLESEARCH && |
246 |
i <= MAXRECYCLESIZE){ |
247 |
asc_assert(RecycledContents[i] >= 0 && |
248 |
RecycledContents[i] <= AllowedContents[i]); |
249 |
if (RecycledContents[i] > 0){ |
250 |
#if LISTRECYCLERDEBUG |
251 |
RecycledListsUsed++; |
252 |
#endif |
253 |
new = RecycledList[i]; /* fetch last recycled list */ |
254 |
asc_assert(new != NULL); |
255 |
RecycledList[i] = (struct gl_list_t *)new->data[0]; /* point to next */ |
256 |
--RecycledContents[i]; /* reduce list count for this size */ |
257 |
new->length = 0; |
258 |
new->flags = (unsigned int)(gsf_SORTED | gsf_EXPANDABLE); |
259 |
#ifndef NDEBUG |
260 |
if (i>0) new->data[0]=NULL; |
261 |
#endif |
262 |
return new; |
263 |
} |
264 |
i++; |
265 |
} |
266 |
/*FPRINTF(ASCERR,"ABOUT TO ALLOCHEAD\n");*/ |
267 |
new=POOL_ALLOCHEAD; |
268 |
/*FPRINTF(ASCERR,"GL_CREATE: COULDN'T ALLOC SIZE %d, new = %p\n",sizeof(struct gl_list_t),POOL_ALLOCHEAD);*/ |
269 |
if (new != NULL) { |
270 |
new->length = 0; |
271 |
#if LISTRECYCLERDEBUG |
272 |
NewListsUsed++; |
273 |
#endif |
274 |
#ifndef NDEBUG |
275 |
new->data = ASC_NEW_ARRAY_CLEAR(VOIDPTR,capacity); |
276 |
#else |
277 |
new->data = (VOIDPTR *)ascmalloc((unsigned)capacity*sizeof(VOIDPTR)); |
278 |
#endif |
279 |
new->capacity = capacity; |
280 |
new->flags = (unsigned int)(gsf_SORTED | gsf_EXPANDABLE); |
281 |
return new; |
282 |
} |
283 |
else { |
284 |
ERROR_REPORTER_HERE(ASC_PROG_ERR,"UNABLE TO ALLOCATE MEMORY FOR LIST\n"); |
285 |
return NULL; |
286 |
} |
287 |
} |
288 |
|
289 |
void gl_free_and_destroy(struct gl_list_t *list) |
290 |
{ |
291 |
unsigned long c; |
292 |
if (list == NULL) return; |
293 |
#if LISTUSESPOOL |
294 |
AssertMemory(list); |
295 |
#else |
296 |
AssertAllocatedMemory(list,sizeof(struct gl_list_t)); |
297 |
#endif |
298 |
for(c=0;c<GL_LENGTH(list);c++) { |
299 |
AssertContainedMemory(list->data[c],0); |
300 |
ascfree(list->data[c]); |
301 |
} |
302 |
#ifndef NDEBUG |
303 |
ascbzero(list->data,sizeof(VOIDPTR)*list->capacity); |
304 |
#endif |
305 |
c = list->capacity; /* gonna use it a lot */ |
306 |
if (c <= MAXRECYCLESIZE && RecycledContents[c] < AllowedContents[c]) { |
307 |
asc_assert(RecycledContents[c] >= 0); |
308 |
/* push list into LIFO list of lists, and bump the count. */ |
309 |
list->data[0] = RecycledList[c]; |
310 |
RecycledList[c] = list; |
311 |
RecycledContents[c]++; |
312 |
#if LISTRECYCLERDEBUG |
313 |
if (RecycledContents[c] > HighWaterMark[c]) { |
314 |
HighWaterMark[c] = RecycledContents[c]; |
315 |
} |
316 |
#endif |
317 |
} else{ |
318 |
#if LISTRECYCLERDEBUG |
319 |
if (c<=MAXRECYCLESIZE) { |
320 |
ListsDestroyed[c]++; |
321 |
} |
322 |
#endif |
323 |
ascfree(list->data); |
324 |
list->data = NULL; |
325 |
list->capacity = list->length = 0; |
326 |
POOL_FREEHEAD(list); |
327 |
} |
328 |
} |
329 |
|
330 |
void gl_destroy(struct gl_list_t *list) |
331 |
{ |
332 |
unsigned long c; |
333 |
if (list == NULL) return; |
334 |
#if LISTUSESPOOL |
335 |
AssertMemory(list); |
336 |
#else |
337 |
AssertAllocatedMemory(list,sizeof(struct gl_list_t)); |
338 |
#endif |
339 |
c = list->capacity; /* gonna use it a lot */ |
340 |
if (c <= MAXRECYCLESIZE && RecycledContents[c] < AllowedContents[c]) { |
341 |
asc_assert(RecycledContents[c] >= 0); |
342 |
/* push list into LIFO list of lists, and bump the count. */ |
343 |
list->data[0] = RecycledList[c]; |
344 |
RecycledList[c] = list; |
345 |
RecycledContents[c]++; |
346 |
#if LISTRECYCLERDEBUG |
347 |
if (RecycledContents[c] > HighWaterMark[c]) { |
348 |
HighWaterMark[c] = RecycledContents[c]; |
349 |
} |
350 |
#endif |
351 |
} else { |
352 |
#if LISTRECYCLERDEBUG |
353 |
if (c<=MAXRECYCLESIZE) { |
354 |
ListsDestroyed[c]++; |
355 |
} |
356 |
#endif |
357 |
#ifndef NDEBUG |
358 |
if (list->capacity>0) list->data[0]=NULL; |
359 |
#endif |
360 |
ascfree(list->data); |
361 |
list->data = NULL; |
362 |
list->capacity = list->length = 0; |
363 |
POOL_FREEHEAD(list); |
364 |
} |
365 |
} |
366 |
|
367 |
VOIDPTR gl_fetchF(CONST struct gl_list_t *list, unsigned long int pos){ |
368 |
asc_assert(NULL != list); |
369 |
ASSERTRANGE(list,pos,"gl_fetchF"); |
370 |
return list->data[pos-1]; |
371 |
} |
372 |
|
373 |
#define SORTED_OFF(l) (l)->flags &= (~gsf_SORTED) |
374 |
#define EXPAND_OFF(l) (l)->flags &= (~gsf_EXPANDABLE) |
375 |
|
376 |
/* for internal use only, where we can verify that STUFF is correct |
377 |
* and it is SOMEONE else job to maintain the integrity of |
378 |
* flags.sorted and list-> length. |
379 |
*/ |
380 |
#ifdef NDEBUG |
381 |
#define GL_STORE(list,pos,ptr) ((list)->data[(pos)-1] = (ptr)) |
382 |
#else |
383 |
#define GL_STORE(list,pos,ptr) gl_store((list),(pos),(ptr)) |
384 |
#endif |
385 |
void gl_store(struct gl_list_t *list, unsigned long int pos, VOIDPTR ptr) |
386 |
{ |
387 |
asc_assert(NULL != list); |
388 |
if (GL_LENGTH(list)>1) SORTED_OFF(list); |
389 |
ASSERTRANGE(list,pos,"gl_store"); |
390 |
list->data[pos-1] = ptr; |
391 |
} |
392 |
|
393 |
#if REALLOCDEBUG |
394 |
/* this we know to not leak if MOD_REALLOC is defined */ |
395 |
#define DATAREALLOC(l,incr) \ |
396 |
ascreallocPURE((char *)(l)->data, \ |
397 |
(size_t)(((l)->capacity-(incr))*sizeof(VOIDPTR)), \ |
398 |
(size_t)((l)->capacity*sizeof(VOIDPTR))) |
399 |
#else |
400 |
/* this we think might leak under hpux, sunos4. more like purify lies. */ |
401 |
#define DATAREALLOC(l,incr) \ |
402 |
ascrealloc((l)->data,(unsigned)((l)->capacity*sizeof(VOIDPTR))) |
403 |
#endif |
404 |
/* actually, when all said and done, a static structure in solver/analyze.c |
405 |
* was being left out of the cleanup process at shutdown. |
406 |
*/ |
407 |
|
408 |
static void gl_expand_list(struct gl_list_t *list) |
409 |
{ |
410 |
VOIDPTR *tmp; |
411 |
unsigned long increment; |
412 |
increment = (list->capacity*50)/100; /* 10% is too small. try 50% baa */ |
413 |
increment = MAX(MIN_INCREMENT,increment); |
414 |
list->capacity += increment; |
415 |
tmp = (VOIDPTR *) DATAREALLOC(list,increment); |
416 |
|
417 |
if (tmp==NULL) { |
418 |
ERROR_REPORTER_HERE(ASC_PROG_ERR,"gl_expand_list: memory allocation failed"); |
419 |
list->capacity -= increment; |
420 |
} else { |
421 |
list->data = tmp; |
422 |
} |
423 |
} |
424 |
|
425 |
/* changes only the capacity of the list, and possibly data pointer */ |
426 |
static void gl_expand_list_by(struct gl_list_t *list,unsigned long addlen) |
427 |
{ |
428 |
if (!addlen) return; |
429 |
addlen = MAX(MIN_INCREMENT,addlen); /* expand by at least 8 */ |
430 |
list->capacity += addlen; |
431 |
list->data = (VOIDPTR *)DATAREALLOC(list,addlen); |
432 |
|
433 |
if (list->data==NULL)ERROR_REPORTER_HERE(ASC_PROG_ERR,"gl_expand_list_by: memory allocation failed\n"); |
434 |
asc_assert(list->data!=NULL); |
435 |
} |
436 |
|
437 |
void gl_append_ptr(struct gl_list_t *list, VOIDPTR ptr) |
438 |
{ |
439 |
asc_assert((NULL != list) && (0 != gl_expandable(list))); |
440 |
if (list->length > 0) SORTED_OFF(list); |
441 |
if (++(list->length) > list->capacity) /* expand list capacity*/ |
442 |
gl_expand_list(list); |
443 |
list->data[list->length-1] = ptr; |
444 |
} |
445 |
|
446 |
void gl_append_list(struct gl_list_t *extendlist, struct gl_list_t *list) |
447 |
{ |
448 |
register unsigned long c,len,oldlen,newlen; |
449 |
|
450 |
asc_assert((NULL != extendlist) && |
451 |
(0 != gl_expandable(extendlist)) && |
452 |
(NULL != list)); |
453 |
if (extendlist->length >0 || list->length > 0) { |
454 |
SORTED_OFF(extendlist); |
455 |
} |
456 |
newlen = extendlist->length + list->length; |
457 |
if (newlen > extendlist->capacity) { |
458 |
gl_expand_list_by(extendlist,newlen - extendlist->capacity); |
459 |
} |
460 |
len = list->length; |
461 |
oldlen = extendlist->length; |
462 |
extendlist->length += len; |
463 |
for (c=0;c<len;c++) { |
464 |
extendlist->data[oldlen] = list->data[c]; |
465 |
oldlen++; |
466 |
} |
467 |
} |
468 |
|
469 |
void gl_fast_append_ptr(struct gl_list_t *list, VOIDPTR ptr) |
470 |
{ |
471 |
asc_assert(NULL != list); |
472 |
SORTED_OFF(list); |
473 |
list->data[list->length] = ptr; |
474 |
++(list->length); |
475 |
} |
476 |
|
477 |
unsigned long gl_safe_length(CONST struct gl_list_t *list) |
478 |
{ |
479 |
if (list !=NULL) { |
480 |
return list->length; |
481 |
} else { |
482 |
return 0L; |
483 |
} |
484 |
} |
485 |
unsigned long gl_lengthF(CONST struct gl_list_t *list) |
486 |
{ |
487 |
asc_assert(NULL != list); |
488 |
return list->length; |
489 |
} |
490 |
|
491 |
unsigned long gl_capacity(CONST struct gl_list_t *list) |
492 |
{ |
493 |
if (list==NULL) return 0; |
494 |
return list->capacity; |
495 |
} |
496 |
|
497 |
|
498 |
int gl_sorted(CONST struct gl_list_t *list) |
499 |
{ |
500 |
asc_assert(NULL != list); |
501 |
return (int)(list->flags & gsf_SORTED); |
502 |
} |
503 |
|
504 |
#if LISTIMPLEMENTED |
505 |
/* I believe this function is ok */ |
506 |
static void *gl_choose_ptrpivot(struct gl_list_t *list, |
507 |
unsigned long int lower, |
508 |
unsigned long int upper) |
509 |
/*********************************************************************\ |
510 |
* This function will choose a pivot. It can actually return any |
511 |
* number between and including upper and lower. However, the goal |
512 |
* is to return a pivot that will not cause Quick Sort to have |
513 |
* O(n^2) behavior. This returns the value rather than the address |
514 |
* of the pivot. |
515 |
* |
516 |
* The heuristic I am choosing is to pick the middle of three test |
517 |
* sites which are upper,lower, and (upper+lower)/2. |
518 |
\*********************************************************************/ |
519 |
{ |
520 |
unsigned long middle; |
521 |
middle = (upper+lower)/2; |
522 |
if (gl_fetch(list,middle) > gl_fetch(list,lower)) { |
523 |
/* middle > lower */ |
524 |
if (gl_fetch(list,upper) > gl_fetch(list,middle)) { |
525 |
/* middle > lower && upper > middle */ |
526 |
return gl_fetch(list,middle); |
527 |
} |
528 |
else { |
529 |
/* middle > lower && middle >= upper */ |
530 |
if (gl_fetch(list,upper) > gl_fetch(list,lower)) { |
531 |
/* middle > lower && middle >= upper && upper > lower */ |
532 |
return gl_fetch(list,upper); |
533 |
} |
534 |
else { |
535 |
/* middle > lower && middle >= upper && lower >= upper */ |
536 |
return gl_fetch(list,lower); |
537 |
} |
538 |
} |
539 |
} |
540 |
else { |
541 |
/* lower >= middle */ |
542 |
if (gl_fetch(list,upper) > gl_fetch(list,lower)) { |
543 |
/* lower >= middle && upper > lower */ |
544 |
return gl_fetch(list,lower); |
545 |
} |
546 |
else { |
547 |
/* lower >= middle && lower >= upper */ |
548 |
if (gl_fetch(list,middle) > gl_fetch(list,upper)) { |
549 |
/* lower >= middle && lower >= upper && middle > upper */ |
550 |
return gl_fetch(list,middle); |
551 |
} |
552 |
else { |
553 |
/* lower >= middle && lower >= upper && upper >= middle */ |
554 |
return gl_fetch(list,upper); |
555 |
} |
556 |
} |
557 |
} |
558 |
} |
559 |
#endif |
560 |
|
561 |
static |
562 |
unsigned long gl_choose_pivot(struct gl_list_t *list, |
563 |
unsigned long int lower, |
564 |
unsigned long int upper, |
565 |
CmpFunc func) |
566 |
/* |
567 |
* This function will choose a pivot. It can actually return any |
568 |
* number between and including upper and lower. However, the goal |
569 |
* is to return a pivot that will not cause Quick Sort to have |
570 |
* O(n^2) behavior. |
571 |
* |
572 |
* The heuristic I am choosing is to pick the middle of three test |
573 |
* sites which are upper,lower, and (upper+lower)/2. |
574 |
*/ |
575 |
{ |
576 |
unsigned long middle; |
577 |
middle = (upper+lower)/2; |
578 |
if ((*func)(gl_fetch(list,middle),gl_fetch(list,lower)) > 0) { |
579 |
/* middle > lower */ |
580 |
if ((*func)(gl_fetch(list,upper),gl_fetch(list,middle)) > 0) { |
581 |
/* middle > lower && upper > middle */ |
582 |
return middle; |
583 |
} |
584 |
else { |
585 |
/* middle > lower && middle >= upper */ |
586 |
if ((*func)(gl_fetch(list,upper),gl_fetch(list,lower)) > 0) { |
587 |
/* middle > lower && middle >= upper && upper > lower */ |
588 |
return upper; |
589 |
} |
590 |
else { |
591 |
/* middle > lower && middle >= upper && lower >= upper */ |
592 |
return lower; |
593 |
} |
594 |
} |
595 |
} |
596 |
else { |
597 |
/* lower >= middle */ |
598 |
if ((*func)(gl_fetch(list,upper),gl_fetch(list,lower)) > 0) { |
599 |
/* lower >= middle && upper > lower */ |
600 |
return lower; |
601 |
} |
602 |
else { |
603 |
/* lower >= middle && lower >= upper */ |
604 |
if ((*func)(gl_fetch(list,middle),gl_fetch(list,upper)) > 0) { |
605 |
/* lower >= middle && lower >= upper && middle > upper */ |
606 |
return middle; |
607 |
} |
608 |
else { |
609 |
/* lower >= middle && lower >= upper && upper >= middle */ |
610 |
return upper; |
611 |
} |
612 |
} |
613 |
} |
614 |
} |
615 |
|
616 |
/* do not export. makes rosy assumptions. callers of this function |
617 |
* are responsible for making sure list.sorted is set appropriately. |
618 |
*/ |
619 |
static void gl_swap(struct gl_list_t *list, |
620 |
unsigned long int i, unsigned long int j) |
621 |
{ |
622 |
VOIDPTR temp; |
623 |
temp = gl_fetch(list,i); |
624 |
GL_STORE(list,i,gl_fetch(list,j)); |
625 |
GL_STORE(list,j,temp); |
626 |
} |
627 |
|
628 |
#if LISTIMPLEMENTED |
629 |
/* this function is probably ok */ |
630 |
void gl_upsort(struct gl_list_t *list, |
631 |
unsigned long int lower, |
632 |
unsigned long int upper) |
633 |
{ |
634 |
register unsigned long i,j; |
635 |
VOIDPTR pivot_element; |
636 |
j = upper; |
637 |
i = lower; |
638 |
pivot_element = gl_choose_ptrpivot(list,lower,upper); |
639 |
do { |
640 |
while (pivot_element > gl_fetch(list,i)) i += 1; |
641 |
while (gl_fetch(list,j) > pivot_element) j -= 1; |
642 |
if (i <= j) { |
643 |
register VOIDPTR temp; |
644 |
temp = gl_fetch(list,i); |
645 |
GL_STORE(list,i,gl_fetch(list,j)); |
646 |
GL_STORE(list,j,temp); |
647 |
i++; j--; |
648 |
} |
649 |
} while(i <= j); |
650 |
if (lower < j) gl_upsort(list,lower,j); |
651 |
if (i < upper) gl_upsort(list,i,upper); |
652 |
} |
653 |
|
654 |
/* this function is not ok */ |
655 |
void gl_downsort(struct gl_list_t *list, |
656 |
unsigned long int lower, |
657 |
unsigned long int upper) |
658 |
{ |
659 |
unsigned long pivot,i,j; |
660 |
VOIDPTR pivot_element; |
661 |
j = upper; |
662 |
i = lower; |
663 |
pivot_element = gl_choose_ptrpivot(list,lower,upper); |
664 |
do { |
665 |
while ((*func)(pivot_element,gl_fetch(list,i))>0) i += 1; |
666 |
while ((*func)(gl_fetch(list,j),pivot_element)>0) j -= 1; |
667 |
if (i <= j) { |
668 |
gl_swap(list,i++,j--); |
669 |
} |
670 |
} while(i <= j); |
671 |
if (lower < j) gl_downsort(list,lower,j); |
672 |
if (i<upper) gl_downsort(list,i,upper); |
673 |
} |
674 |
#endif |
675 |
|
676 |
static |
677 |
void gl_qsort(struct gl_list_t *list, |
678 |
unsigned long int lower, |
679 |
unsigned long int upper, |
680 |
CmpFunc func) |
681 |
{ |
682 |
unsigned long pivot,i,j; |
683 |
VOIDPTR pivot_element; |
684 |
j = upper; |
685 |
i = lower; |
686 |
pivot = gl_choose_pivot(list,lower,upper,func); |
687 |
pivot_element = gl_fetch(list,pivot); |
688 |
do { |
689 |
while ((*func)(pivot_element,gl_fetch(list,i))>0) i += 1; |
690 |
while ((*func)(gl_fetch(list,j),pivot_element)>0) j -= 1; |
691 |
if (i <= j) { |
692 |
gl_swap(list,i++,j--); |
693 |
} |
694 |
} while(i <= j); |
695 |
if (lower < j) gl_qsort(list,lower,j,func); |
696 |
if (i<upper) gl_qsort(list,i,upper,func); |
697 |
} |
698 |
|
699 |
#if LISTIMPLEMENTED |
700 |
/* ok once its children are ok */ |
701 |
void gl_ptr_sort(struct gl_list_t *list, int increasing) |
702 |
{ |
703 |
if (GL_LENGTH(list) > 1) { |
704 |
if (increasing) { |
705 |
gl_upsort(list,1,GL_LENGTH(list)); |
706 |
} else { |
707 |
gl_downsort(list,1,GL_LENGTH(list)); |
708 |
} |
709 |
} |
710 |
list->flags |= gsf_SORTED; |
711 |
} |
712 |
#endif |
713 |
|
714 |
#if LISTIMPLEMENTED |
715 |
/* broken */ |
716 |
void gl_insert_ptr_sorted(struct gl_list_t *,VOIDPTR,int) |
717 |
{ |
718 |
} |
719 |
#endif |
720 |
|
721 |
void gl_sort(struct gl_list_t *list, CmpFunc func) |
722 |
{ |
723 |
asc_assert((NULL != list) && (NULL != func)); |
724 |
if (GL_LENGTH(list) > 1) { |
725 |
gl_qsort(list,1,GL_LENGTH(list),func); |
726 |
} |
727 |
list->flags |= gsf_SORTED; |
728 |
} |
729 |
|
730 |
void gl_insert_sorted(struct gl_list_t *list, |
731 |
VOIDPTR ptr, CmpFunc func) |
732 |
{ |
733 |
int comparison; |
734 |
register unsigned long lower,upper,search=0L; |
735 |
|
736 |
asc_assert((NULL != list) && |
737 |
(0 != gl_expandable(list)) && |
738 |
(NULL != func)); |
739 |
if (list->flags & gsf_SORTED) { |
740 |
if (GL_LENGTH(list)==0) { |
741 |
gl_append_ptr(list,ptr); |
742 |
list->flags |= gsf_SORTED; |
743 |
return; |
744 |
} |
745 |
if (++list->length > list->capacity) /* expand list capacity */ |
746 |
gl_expand_list(list); |
747 |
lower = 1; |
748 |
upper = GL_LENGTH(list)-1; |
749 |
while (lower<=upper) { |
750 |
search = ( lower + upper) >> 1; /* divide by two */ |
751 |
comparison = (*func)(gl_fetch(list,search),ptr); |
752 |
if (comparison==0) { |
753 |
/* make room */ |
754 |
for(lower=GL_LENGTH(list);lower>search;lower--) |
755 |
GL_STORE(list,lower,gl_fetch(list,lower-1)); |
756 |
/* store it */ |
757 |
GL_STORE(list,search+1,ptr); |
758 |
list->flags |= gsf_SORTED; |
759 |
return; |
760 |
} |
761 |
if (comparison < 0) lower = search + 1; |
762 |
else upper = search - 1; |
763 |
} |
764 |
if ((*func)(gl_fetch(list,search),ptr) > 0) { |
765 |
for(lower=GL_LENGTH(list);lower>search;lower--) { |
766 |
GL_STORE(list,lower,gl_fetch(list,lower-1)); |
767 |
} |
768 |
GL_STORE(list,search,ptr); |
769 |
} |
770 |
else { |
771 |
for(lower=GL_LENGTH(list);lower>(search+1);lower--) { |
772 |
GL_STORE(list,lower,gl_fetch(list,lower-1)); |
773 |
} |
774 |
GL_STORE(list,search+1,ptr); |
775 |
} |
776 |
list->flags |= gsf_SORTED; |
777 |
} |
778 |
else { |
779 |
ERROR_REPORTER_HERE(ASC_PROG_ERR,"gl_insert_sorted called on unsorted list -- sorting list now."); |
780 |
gl_append_ptr(list,ptr); |
781 |
gl_sort(list,func); |
782 |
} |
783 |
} |
784 |
|
785 |
void gl_iterate(struct gl_list_t *list, void (*func) (VOIDPTR)) |
786 |
{ |
787 |
#ifdef NDEBUG |
788 |
register unsigned long length,counter; |
789 |
length = GL_LENGTH(list); |
790 |
for (counter=0;counter<length;counter++) |
791 |
(*func)(list->data[counter]); |
792 |
#else |
793 |
unsigned long length,counter; |
794 |
asc_assert(NULL != list); |
795 |
asc_assert(NULL != func); |
796 |
length = GL_LENGTH(list); |
797 |
for (counter=1;counter<=length;counter++) |
798 |
(*func)(gl_fetch(list,counter)); |
799 |
#endif |
800 |
} |
801 |
|
802 |
/* this will probably need casts to compile or an intermediate |
803 |
* void *array variable |
804 |
* this function is on the critical path in the compiler, or |
805 |
* should be, so it is highly tweaked here. |
806 |
*/ |
807 |
unsigned long gl_ptr_search(CONST struct gl_list_t *list, |
808 |
CONST VOIDPTR match, int increasing) |
809 |
{ |
810 |
#define SHORTSEARCH 5 |
811 |
/* if list is short, use linear instead of binary search. |
812 |
* SHORTSEARCH is the minimum size for a binary search. |
813 |
* SHORTSEARCH must be >=0 |
814 |
*/ |
815 |
register unsigned long lower,upper,search; |
816 |
|
817 |
asc_assert(NULL != list); |
818 |
if (!GL_LENGTH(list)) return 0; |
819 |
if ( (list->flags & gsf_SORTED) |
820 |
#if (SHORTSEARCH > 0) |
821 |
&& (upper = GL_LENGTH(list)-1) > SHORTSEARCH |
822 |
#endif |
823 |
) { /* use binary search */ |
824 |
register long comparison; |
825 |
lower = 0; |
826 |
#if (SHORTSEARCH <= 0) |
827 |
upper = GL_LENGTH(list)-1; |
828 |
#endif |
829 |
if (increasing) { |
830 |
while (lower <= upper) { |
831 |
search = (lower+upper) / 2; |
832 |
comparison = |
833 |
((char *)list->data[search]-(char *)match); /* pointer difference */ |
834 |
if (comparison==0) { |
835 |
return (search + 1); |
836 |
} |
837 |
else if (comparison < 0) { |
838 |
lower = search + 1; |
839 |
} |
840 |
else if (search > 0) { |
841 |
upper = search - 1; |
842 |
} |
843 |
else { |
844 |
return 0; |
845 |
} |
846 |
} |
847 |
} else { |
848 |
while (lower <= upper) { |
849 |
search = (lower+upper) / 2; |
850 |
comparison = |
851 |
((char *)match-(char *)list->data[search]); /* pointer difference */ |
852 |
if (comparison==0) { |
853 |
return (search + 1); |
854 |
} |
855 |
else if (comparison < 0) { |
856 |
lower = search + 1; |
857 |
} |
858 |
else if (search > 0) { |
859 |
upper = search - 1; |
860 |
} |
861 |
else { |
862 |
return 0; |
863 |
} |
864 |
} |
865 |
} |
866 |
} |
867 |
else { /* use linear search */ |
868 |
upper = GL_LENGTH(list); |
869 |
if (increasing) { |
870 |
for(search=0; search < upper; search++) { |
871 |
if (list->data[search]==match) return search+1; |
872 |
} |
873 |
} else { |
874 |
/* the test could be written 'search < upper' with unsigned assumption, |
875 |
but someday we may want to make list length signed and move on. */ |
876 |
for(search=upper-1; search >= 0 && search < upper; search--) { |
877 |
if (list->data[search]==match) return search+1; |
878 |
} |
879 |
} |
880 |
} |
881 |
return 0; /* search failed */ |
882 |
} |
883 |
|
884 |
unsigned long gl_search(CONST struct gl_list_t *list, |
885 |
CONST VOIDPTR match, CmpFunc func) |
886 |
{ |
887 |
register unsigned long lower,upper,search; |
888 |
#ifdef __alpha |
889 |
long comparison; |
890 |
#else |
891 |
int comparison; |
892 |
#endif |
893 |
asc_assert((NULL != list) && (NULL != func)); |
894 |
if (list->flags & gsf_SORTED) { /* use binary search */ |
895 |
lower = 1; |
896 |
upper = GL_LENGTH(list); |
897 |
while (lower <= upper) { |
898 |
search = (lower+upper) / 2; |
899 |
comparison = (*func)(gl_fetch(list,search),match); |
900 |
if (comparison==0) return search; |
901 |
if (comparison < 0) lower = search + 1; |
902 |
else upper = search -1; |
903 |
} |
904 |
} |
905 |
else { /* use linear search */ |
906 |
upper = GL_LENGTH(list); |
907 |
for(search=0; search < upper; search++) { |
908 |
if ((*func)(list->data[search],match)==0) return search+1; |
909 |
} |
910 |
} |
911 |
return 0; /* search failed */ |
912 |
} |
913 |
|
914 |
|
915 |
unsigned long gl_search_reverse(CONST struct gl_list_t *list, |
916 |
CONST VOIDPTR match, CmpFunc func) |
917 |
{ |
918 |
register unsigned long search; |
919 |
|
920 |
asc_assert((NULL != list) && (NULL != func)); |
921 |
if (list->flags & gsf_SORTED) { /* use binary search */ |
922 |
return gl_search(list, match, func); |
923 |
} else { /* use linear search */ |
924 |
/* since search is unsigned, the FOR loop cannot go to |
925 |
* zero, since 0-- is ULONG_MAX. Must do zero separately. |
926 |
*/ |
927 |
for( search = (GL_LENGTH(list)-1); search > 0; search-- ) { |
928 |
if ((*func)(list->data[search],match)==0) return search+1; |
929 |
} |
930 |
search = 0; |
931 |
if ((*func)(list->data[search],match)==0) return search+1; |
932 |
} |
933 |
return 0; /* search failed */ |
934 |
} |
935 |
|
936 |
|
937 |
int gl_unique_list(CONST struct gl_list_t *list) |
938 |
{ |
939 |
unsigned long i,j,len,lm1; |
940 |
VOIDPTR e; |
941 |
VOIDPTR *d; |
942 |
|
943 |
if (list==NULL || GL_LENGTH(list)<2) { |
944 |
return 1; |
945 |
} |
946 |
len = GL_LENGTH(list); |
947 |
lm1 = len-1; |
948 |
d = list->data; |
949 |
for (i=0; i<lm1; i++) { |
950 |
e = d[i]; |
951 |
for (j=i+1; j < len; j++) { |
952 |
if (d[j]==e) { |
953 |
return 0; |
954 |
} |
955 |
} |
956 |
} |
957 |
return 1; |
958 |
} |
959 |
|
960 |
int gl_empty(CONST struct gl_list_t *list) |
961 |
{ |
962 |
asc_assert(NULL != list); |
963 |
return(GL_LENGTH(list)==0); |
964 |
} |
965 |
|
966 |
void gl_delete(struct gl_list_t *list, unsigned long int pos, int dispose) |
967 |
{ |
968 |
unsigned long c,length; |
969 |
int sorted; |
970 |
VOIDPTR ptr; |
971 |
|
972 |
asc_assert(NULL != list); |
973 |
/* should be ASSERTRANGE(list,pos,"gl_delete"); |
974 |
The check for (pos > 0) below is suspicious, though. |
975 |
It suggests this function may be called with pos == 0. */ |
976 |
asc_assert(pos <= gl_length(list)); |
977 |
|
978 |
if (pos > 0) { |
979 |
sorted = (int)(list->flags & gsf_SORTED); |
980 |
if (dispose) { |
981 |
ptr = gl_fetch(list,pos); |
982 |
ascfree(ptr); |
983 |
} |
984 |
length = GL_LENGTH(list); |
985 |
#ifdef NDEBUG |
986 |
/* the copyleft the remainder of the array. easy-eye version */ |
987 |
for (c = pos; c < length; c++) { |
988 |
list->data[c-1] = list->data[c]; |
989 |
} |
990 |
#else |
991 |
/* shift the remainder of the array using the macros */ |
992 |
for (c=pos+1;c<=length;c++) { |
993 |
GL_STORE(list,c-1,gl_fetch(list,c)); |
994 |
} |
995 |
#endif |
996 |
list->length--; |
997 |
if (sorted) { |
998 |
list->flags |= gsf_SORTED; |
999 |
} else { |
1000 |
SORTED_OFF(list); |
1001 |
} |
1002 |
} |
1003 |
} |
1004 |
|
1005 |
void gl_reverse(struct gl_list_t *list) |
1006 |
{ |
1007 |
VOIDPTR *tmpdata; |
1008 |
unsigned long c,len; |
1009 |
|
1010 |
if (list==NULL || list->length <2L) return; |
1011 |
tmpdata = ASC_NEW_ARRAY(VOIDPTR,(list->capacity)); |
1012 |
if (tmpdata==NULL) { |
1013 |
ASC_PANIC("gl_reverse out of memory. Bye!\n"); |
1014 |
} |
1015 |
len = list->length; |
1016 |
for (c=0;c < len;c++) { |
1017 |
tmpdata[len - (c+1)] = list->data[c]; |
1018 |
} |
1019 |
ascfree(list->data); |
1020 |
list->data = tmpdata; |
1021 |
} |
1022 |
|
1023 |
void gl_reset(struct gl_list_t *list) |
1024 |
{ |
1025 |
asc_assert(NULL != list); |
1026 |
list->flags = (unsigned int)(gsf_SORTED | gsf_EXPANDABLE); |
1027 |
list->length = 0; |
1028 |
} |
1029 |
|
1030 |
|
1031 |
struct gl_list_t *gl_copy(CONST struct gl_list_t *list) |
1032 |
{ |
1033 |
struct gl_list_t *new; |
1034 |
unsigned long counter,length; |
1035 |
|
1036 |
asc_assert(NULL != list); |
1037 |
|
1038 |
length = GL_LENGTH(list); |
1039 |
new = gl_create(length); |
1040 |
for (counter = 1;counter <= length;counter++) |
1041 |
gl_append_ptr(new,gl_fetch(list,counter)); |
1042 |
new->flags = list->flags; |
1043 |
return new; |
1044 |
} |
1045 |
|
1046 |
struct gl_list_t *gl_concat(CONST struct gl_list_t *list1, |
1047 |
CONST struct gl_list_t *list2) |
1048 |
{ |
1049 |
struct gl_list_t *new; |
1050 |
unsigned long counter,length1,length2; |
1051 |
|
1052 |
asc_assert ((NULL != list1) && (NULL != list2)); |
1053 |
|
1054 |
length1 = GL_LENGTH(list1); |
1055 |
length2 = GL_LENGTH(list2); |
1056 |
new = gl_create(length1+length2); |
1057 |
for (counter = 1;counter<= length1; counter++) { |
1058 |
gl_append_ptr(new,gl_fetch(list1,counter)); |
1059 |
} |
1060 |
for (counter = 1;counter<= length2; counter++) { |
1061 |
gl_append_ptr(new,gl_fetch(list2,counter)); |
1062 |
} |
1063 |
return new; |
1064 |
} |
1065 |
|
1066 |
int gl_compare_ptrs(CONST struct gl_list_t *l1, CONST struct gl_list_t *l2) |
1067 |
{ |
1068 |
unsigned long len,c; |
1069 |
register unsigned long i1,i2; |
1070 |
if (l1==NULL || l2 == NULL) { |
1071 |
ASC_PANIC("Called with NULL input"); |
1072 |
} |
1073 |
len = MIN(GL_LENGTH(l1),GL_LENGTH(l2)); |
1074 |
for (c=0;c < len; c++) { |
1075 |
i1 = (unsigned long)(l1->data[c]); |
1076 |
i2 = (unsigned long)(l2->data[c]); |
1077 |
if (i1<i2) { |
1078 |
return -1; |
1079 |
} |
1080 |
if (i1>i2) { |
1081 |
return 1; |
1082 |
} |
1083 |
} |
1084 |
return (GL_LENGTH(l1) == GL_LENGTH(l2)) ? |
1085 |
0 : |
1086 |
(( GL_LENGTH(l2) > len) ? -1 : 1); |
1087 |
} |
1088 |
|
1089 |
void gl_set_sorted(struct gl_list_t *list, int TRUE_or_FALSE) |
1090 |
{ |
1091 |
asc_assert(NULL != list); |
1092 |
if (FALSE == TRUE_or_FALSE) |
1093 |
list->flags &= ~gsf_SORTED; |
1094 |
else |
1095 |
list->flags |= gsf_SORTED; |
1096 |
} |
1097 |
|
1098 |
int gl_expandable(struct gl_list_t *list) |
1099 |
{ |
1100 |
asc_assert(NULL != list); |
1101 |
return (int)(list->flags & gsf_EXPANDABLE); |
1102 |
} |
1103 |
|
1104 |
void gl_set_expandable(struct gl_list_t *list, int TRUE_or_FALSE) |
1105 |
{ |
1106 |
asc_assert(NULL != list); |
1107 |
if (FALSE == TRUE_or_FALSE) |
1108 |
list->flags &= ~gsf_EXPANDABLE; |
1109 |
else |
1110 |
list->flags |= gsf_EXPANDABLE; |
1111 |
} |
1112 |
|
1113 |
|
1114 |
VOIDPTR *gl_fetchaddr(CONST struct gl_list_t *list, |
1115 |
unsigned long int pos) |
1116 |
{ |
1117 |
asc_assert(NULL != list); |
1118 |
ASSERTRANGE(list,pos,"gl_fetchaddr"); |
1119 |
return (VOIDPTR *)&(list->data[pos-1]); |
1120 |
} |
1121 |
|
1122 |
|
1123 |
void gl_emptyrecycler() |
1124 |
{ |
1125 |
int i; |
1126 |
#if LISTRECYCLERDEBUG |
1127 |
unsigned long bytecount = 0; |
1128 |
#endif |
1129 |
struct gl_list_t *list; |
1130 |
|
1131 |
#if LISTRECYCLERDEBUG |
1132 |
PRINTF("LIST RECYCLER SUMMARY\n"); |
1133 |
PRINTF("=====================\n"); |
1134 |
PRINTF("New lists created : %d\n", NewListsUsed); |
1135 |
PRINTF("Recycled lists used: %d\n", RecycledListsUsed); |
1136 |
ListsDestroyedTotal = 0; |
1137 |
for (i=0;i <=MAXRECYCLESIZE; i++) { |
1138 |
ListsDestroyedTotal += ListsDestroyed[i]; |
1139 |
} |
1140 |
PRINTF("Lists destroyed : %d\n", ListsDestroyedTotal); |
1141 |
PRINTF("Size\tAllowed\tCurr.\tMax\tDestroyed\n"); |
1142 |
#endif |
1143 |
for(i = 0 ; i <= MAXRECYCLESIZE ; i++){ |
1144 |
#if LISTRECYCLERDEBUG |
1145 |
if (HighWaterMark[i] >0) { |
1146 |
PRINTF("%d\t%d\t%d\t%d\t%d\n",i,AllowedContents[i], RecycledContents[i], |
1147 |
HighWaterMark[i], ListsDestroyed[i]); |
1148 |
} |
1149 |
bytecount += sizeof(struct gl_list_t)*RecycledContents[i]; |
1150 |
bytecount += sizeof(void *)*i*RecycledContents[i]; |
1151 |
#endif |
1152 |
while (RecycledContents[i] > 0){ |
1153 |
/* pop a list off the LIFO list */ |
1154 |
list = RecycledList[i]; |
1155 |
RecycledList[i] = (struct gl_list_t *)list->data[0]; |
1156 |
#ifndef NDEBUG |
1157 |
list->data[0] = NULL; |
1158 |
#endif |
1159 |
--RecycledContents[i]; |
1160 |
ascfree(list->data); |
1161 |
#ifndef NDEBUG |
1162 |
list->data = NULL; |
1163 |
list->capacity = list->length = 0; |
1164 |
#endif |
1165 |
POOL_FREEHEAD(list); |
1166 |
} |
1167 |
asc_assert(RecycledList[i]==NULL); /* someone forgot to empty the last one */ |
1168 |
} |
1169 |
#if LISTRECYCLERDEBUG |
1170 |
bytecount += MAXRECYCLESIZE*sizeof(void *); |
1171 |
PRINTF("Total bytes:\t%lu\n",bytecount); |
1172 |
#endif |
1173 |
} |
1174 |
|
1175 |
void gl_reportrecycler(FILE *fp) |
1176 |
{ |
1177 |
int i; |
1178 |
unsigned long bytecount = 0; |
1179 |
#if !LISTRECYCLERDEBUG |
1180 |
(void) fp; |
1181 |
#endif |
1182 |
|
1183 |
#if LISTRECYCLERDEBUG |
1184 |
asc_assert(NULL != fp); |
1185 |
FPRINTF(fp,"LIST RECYCLER SUMMARY\n"); |
1186 |
FPRINTF(fp,"=====================\n"); |
1187 |
FPRINTF(fp,"New lists created : %d\n", NewListsUsed); |
1188 |
FPRINTF(fp,"Recycled lists used: %d\n", RecycledListsUsed); |
1189 |
for (i=0;i <=MAXRECYCLESIZE; i++) { |
1190 |
ListsDestroyedTotal += ListsDestroyed[i]; |
1191 |
} |
1192 |
FPRINTF(fp,"Lists destroyed : %d\n", ListsDestroyedTotal); |
1193 |
FPRINTF(fp,"Size\tAllowed\tCurr.\tMax\tDestroyed\n"); |
1194 |
#endif |
1195 |
|
1196 |
for(i = 0 ; i <= MAXRECYCLESIZE ; i++){ |
1197 |
#if LISTRECYCLERDEBUG |
1198 |
if (HighWaterMark[i] >0) { |
1199 |
FPRINTF(fp,"%d\t%d\t%d\t%d\t%d\n",i,AllowedContents[i], |
1200 |
RecycledContents[i], HighWaterMark[i], ListsDestroyed[i]); |
1201 |
bytecount += sizeof(struct gl_list_t)*RecycledContents[i]; |
1202 |
bytecount += sizeof(void *)*i*RecycledContents[i]; |
1203 |
} |
1204 |
#else |
1205 |
bytecount += sizeof(struct gl_list_t)*RecycledContents[i]; |
1206 |
bytecount += sizeof(void *)*RecycledContents[i]*i; |
1207 |
#endif |
1208 |
} |
1209 |
#if LISTRECYCLERDEBUG |
1210 |
bytecount += MAXRECYCLESIZE*sizeof(void *); |
1211 |
FPRINTF(fp,"Total bytes:\t%lu\n",bytecount); |
1212 |
#endif |
1213 |
} |