1 |
/* |
2 |
* Implementation |
3 |
* of List Module |
4 |
* Tom Epperly |
5 |
* Part of Ascend |
6 |
* Version: $Revision: 1.3 $ |
7 |
* Version control file: $RCSfile: list.c,v $ |
8 |
* Date last modified: $Date: 1998/01/06 12:06:55 $ |
9 |
* Last modified by: $Author: ballan $ |
10 |
* |
11 |
* This file is part of the Ascend Language Interpreter. |
12 |
* |
13 |
* Copyright (C) 1990, 1993, 1994 Thomas Guthrie Epperly |
14 |
* |
15 |
* The Ascend Language Interpreter is free software; you can redistribute |
16 |
* it and/or modify it under the terms of the GNU General Public License |
17 |
* as published by the Free Software Foundation; either version 2 of the |
18 |
* License, or (at your option) any later version. |
19 |
* |
20 |
* The Ascend Language Interpreter is distributed in hope that it will be |
21 |
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of |
22 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
23 |
* General Public License for more details. |
24 |
* |
25 |
* You should have received a copy of the GNU General Public License |
26 |
* along with the program; if not, write to the Free Software Foundation, |
27 |
* Inc., 675 Mass Ave, Cambridge, MA 02139 USA. Check the file named |
28 |
* COPYING. |
29 |
* |
30 |
*/ |
31 |
|
32 |
#include <stdarg.h> |
33 |
#include "utilities/ascConfig.h" |
34 |
#include "utilities/ascMalloc.h" |
35 |
#include "utilities/ascPanic.h" |
36 |
#include "general/list.h" |
37 |
#if LISTUSESPOOL |
38 |
#include "general/pool.h" |
39 |
#endif |
40 |
|
41 |
#ifndef lint |
42 |
static CONST char ListID[] = "$Id: list.c,v 1.3 1998/01/06 12:06:55 ballan Exp $"; |
43 |
#endif |
44 |
|
45 |
#ifndef MAX |
46 |
#define MAX(a,b) (((a)>(b))?(a):(b)) |
47 |
#endif |
48 |
#define address(a,b) \ |
49 |
(((unsigned long)(a) + ((b)*sizeof(VOIDPTR))-sizeof(VOIDPTR))) |
50 |
#define GL_LENGTH(l) ((l)->length) |
51 |
#define LOWCAPACITY 2 |
52 |
/* Shortest list we will allow. It must not be 0! */ |
53 |
static const unsigned long MIN_INCREMENT = 8; |
54 |
/* Minimum number of elements to increase when expanding list. */ |
55 |
|
56 |
/* |
57 |
* Be a bit more informative when dying on a list range error. |
58 |
* No printf we know of returns -2, so the print will always be |
59 |
* just before assertion acts to handle the range error. |
60 |
*/ |
61 |
#define ASSERTRANGE(list,pos,n) \ |
62 |
asc_assert(((pos >= 1) && (pos <= GL_LENGTH(list))) || \ |
63 |
(PRINTF("%s called with illegal length %lu\n",n,pos) == -2)); |
64 |
|
65 |
#ifndef MOD_REALLOC |
66 |
#define REALLOCDEBUG FALSE |
67 |
#else |
68 |
#define REALLOCDEBUG TRUE |
69 |
#endif |
70 |
/* if REALLOCDEBUG TRUE, then we use our own pricey version of realloc |
71 |
* instead of the standard library version. |
72 |
* This is to test for a bug in purify/os interactions. |
73 |
*/ |
74 |
/* |
75 |
* We have modified gl_destroy to recycle lists of up to a certain size |
76 |
* MAXRECYCLESIZE. |
77 |
* The data structure for holding the recycled lists is a vector |
78 |
* of pointers to gl_list_t. Item 1 in a recycled list is the pointer |
79 |
* to the next recycled list, which may be NULL. This is a change |
80 |
* from Tom's initial implementation which used a dense array to store |
81 |
* pointers to lists. |
82 |
* |
83 |
* The number of recycled lists of a particular |
84 |
* capacity i to be allowed is found in the AllowedContents[i]. |
85 |
* |
86 |
* It is intended that RecycledContents[i] is the current count of |
87 |
* lists of capacity i. It is a coincidence of implementation that |
88 |
* when RecycledContents[i] <=0 RecycledList[i]==NULL. |
89 |
* |
90 |
* If LISTRECYCLERDEBUG, |
91 |
* HighWaterMark[i] is the most of capacity i ever recycled. |
92 |
* ListsDestroyed[i] is likewise lists not recycled because full. |
93 |
*/ |
94 |
#define LISTRECYCLERDEBUG 0 |
95 |
#define MAXRECYCLESIZE 500 /* the maximum list size to be recycled */ |
96 |
#define MAXRECYCLESMALLITEMS 300/* the maximum number of size < LARGEITEM */ |
97 |
#define MAXRECYCLELARGEITEMS 25 /* the maximum number of each large size */ |
98 |
#define MAXRECYCLESEARCH 2 /* how much larger a list will be searched for */ |
99 |
/* search performance. 0 and 2 are best values, 2 slightly better. alphaosf2 */ |
100 |
#define LARGEITEM 21 /* smallest large item */ |
101 |
/* note: baa, 3/8/96 LARGEITEM and MAXRECYCLELARGEITEMS not in use yet */ |
102 |
static struct gl_list_t *RecycledList[MAXRECYCLESIZE+1]; |
103 |
static int RecycledContents[MAXRECYCLESIZE+1]; |
104 |
static int AllowedContents[MAXRECYCLESIZE+1]; |
105 |
/* |
106 |
* It is assumed that these arrays are initialized to zero according to the |
107 |
* rules given in "The C Programming Language", Brian W. Kernighan and Dennis |
108 |
* M. Richie, second edition. |
109 |
*/ |
110 |
#if LISTRECYCLERDEBUG |
111 |
static int HighWaterMark[MAXRECYCLESIZE+1]; |
112 |
/* most ever in recycle list of size i */ |
113 |
static int ListsDestroyed[MAXRECYCLESIZE+1]; |
114 |
/* number of lists destroyed because Allowed was exceeded of size i */ |
115 |
|
116 |
static int ListsDestroyedTotal = 0; |
117 |
/* number of lists destroyed because Allowed was exceeded all sizes */ |
118 |
static int NewListsUsed = 0; |
119 |
/* calls to gl_create that required malloc */ |
120 |
static int RecycledListsUsed = 0; |
121 |
/* calls to gl_create that used recycle */ |
122 |
#endif |
123 |
|
124 |
/* If this function is not called, no recycling will take place |
125 |
* because all the Alloweds will be at default of 0. |
126 |
* This initializes the AllowedContents array, and nothing else. |
127 |
* General limits are set based on MAXRECYCLE*ITEMS. |
128 |
* Specific ones are done after and may be tuned. |
129 |
*/ |
130 |
void gl_init(void) |
131 |
{ |
132 |
int i; |
133 |
if (AllowedContents[0] && |
134 |
AllowedContents[0] != MAXRECYCLESMALLITEMS) { |
135 |
PRINTF("gl_init recalled after data corrupted!\n"); |
136 |
return; |
137 |
/* somebody called us twice, with intervening nonsense. punt */ |
138 |
} |
139 |
for (i = LOWCAPACITY; i < LARGEITEM; i++) { |
140 |
AllowedContents[i] = MAXRECYCLESMALLITEMS; |
141 |
} |
142 |
for (i = LARGEITEM; i <= MAXRECYCLESIZE; i++) { |
143 |
AllowedContents[i] = MAXRECYCLELARGEITEMS; |
144 |
} |
145 |
#define LARGESYSTEM 1 |
146 |
#if LARGESYSTEM |
147 |
AllowedContents[2] = 40000; |
148 |
AllowedContents[3] = 20000; |
149 |
AllowedContents[4] = 8000; |
150 |
AllowedContents[5] = 2000; |
151 |
AllowedContents[6] = 2000; |
152 |
AllowedContents[7] = 5000; |
153 |
AllowedContents[8] = 1000; |
154 |
#else |
155 |
AllowedContents[2] = 10000; |
156 |
AllowedContents[3] = 5000; |
157 |
AllowedContents[4] = 5000; |
158 |
AllowedContents[7] = 5000; |
159 |
#endif |
160 |
AllowedContents[23] = 50; |
161 |
} |
162 |
|
163 |
#if LISTUSESPOOL |
164 |
static pool_store_t g_list_head_pool = NULL; |
165 |
/* global for our memory manager */ |
166 |
#define POOL_ALLOCHEAD (struct gl_list_t *)(pool_get_element(g_list_head_pool)) |
167 |
/* get a token. Token is the size of the struct struct gl_list_t */ |
168 |
#define POOL_FREEHEAD(p) (pool_free_element(g_list_head_pool,((void *)p))) |
169 |
/* return a struct gl_list_t */ |
170 |
|
171 |
#define GLP_LEN 10 |
172 |
#ifdef __alpha |
173 |
#define GLP_WID 72 |
174 |
#else |
175 |
#define GLP_WID 127 |
176 |
#endif |
177 |
/* retune rpwid if the size of pending_t changes dramatically */ |
178 |
#define GLP_ELT_SIZE (sizeof(struct gl_list_t)) |
179 |
#define GLP_MORE_ELTS 10 |
180 |
/* |
181 |
* Number of slots filled if more elements needed. |
182 |
* So if the pool grows, it grows by GLP_MORE_ELTS*GLP_WID elements at a time. |
183 |
*/ |
184 |
#define GLP_MORE_BARS 500 |
185 |
/* This is the number of pool bar slots to add during expansion. |
186 |
not all the slots will be filled immediately. */ |
187 |
#else |
188 |
|
189 |
/* we default back to malloc/free if LISTUSESPOOL == FALSE */ |
190 |
#define POOL_ALLOCHEAD ((struct gl_list_t*)ascmalloc(sizeof(struct gl_list_t))) |
191 |
#define POOL_FREEHEAD(p) ascfree((char *)(p)) |
192 |
|
193 |
#endif |
194 |
|
195 |
/* This function is called at compiler startup time and destroy at shutdown. */ |
196 |
void gl_init_pool(void) { |
197 |
#if LISTUSESPOOL |
198 |
if (g_list_head_pool != NULL) { |
199 |
Asc_Panic(2, NULL, "ERROR: gl_init_pool called twice.\n"); |
200 |
} |
201 |
g_list_head_pool = pool_create_store(GLP_LEN, GLP_WID, GLP_ELT_SIZE, |
202 |
GLP_MORE_ELTS, GLP_MORE_BARS); |
203 |
if (g_list_head_pool == NULL) { |
204 |
Asc_Panic(2, NULL, "ERROR: gl_init_pool unable to allocate pool.\n"); |
205 |
} |
206 |
#else |
207 |
PRINTF("list.[ch] built without pooling of overheads\n"); |
208 |
#endif |
209 |
} |
210 |
|
211 |
void gl_destroy_pool(void) { |
212 |
#if LISTUSESPOOL |
213 |
if (g_list_head_pool==NULL) return; |
214 |
gl_emptyrecycler(); /* deallocate data in recycled lists, zero RecycledContents[] */ |
215 |
pool_clear_store(g_list_head_pool); |
216 |
pool_destroy_store(g_list_head_pool); |
217 |
g_list_head_pool = NULL; |
218 |
#else |
219 |
PRINTF("list.[ch] built without pooling of overheads\n"); |
220 |
#endif |
221 |
} |
222 |
|
223 |
|
224 |
int gl_pool_initialized(void) |
225 |
{ |
226 |
#if LISTUSESPOOL |
227 |
return (g_list_head_pool == NULL) ? FALSE : TRUE; |
228 |
#else |
229 |
PRINTF("list.[ch] built without pooling of overheads\n"); |
230 |
return TRUE; |
231 |
#endif |
232 |
} |
233 |
|
234 |
|
235 |
void gl_report_pool(FILE *f) |
236 |
{ |
237 |
#if LISTUSESPOOL |
238 |
if (g_list_head_pool==NULL) |
239 |
FPRINTF(f,"ListHeadPool is empty\n"); |
240 |
FPRINTF(f,"ListHeadPool "); |
241 |
pool_print_store(f,g_list_head_pool,0); |
242 |
#else |
243 |
FPRINTF(f,"list.[ch] built without pooling of overheads\n"); |
244 |
#endif |
245 |
} |
246 |
|
247 |
/* |
248 |
* Guess of the capacity of the list. If the |
249 |
* list capacity needs to be expanded it will |
250 |
* be. It is good to be close though |
251 |
*/ |
252 |
struct gl_list_t *gl_create(unsigned long int capacity) |
253 |
{ |
254 |
struct gl_list_t *new; |
255 |
unsigned long i; |
256 |
i = capacity = MAX((unsigned long)LOWCAPACITY,capacity); |
257 |
while(i <= capacity+MAXRECYCLESEARCH && |
258 |
i <= MAXRECYCLESIZE){ |
259 |
asc_assert(RecycledContents[i] >= 0 && |
260 |
RecycledContents[i] <= AllowedContents[i]); |
261 |
if (RecycledContents[i] > 0){ |
262 |
#if LISTRECYCLERDEBUG |
263 |
RecycledListsUsed++; |
264 |
#endif |
265 |
new = RecycledList[i]; /* fetch last recycled list */ |
266 |
asc_assert(new != NULL); |
267 |
RecycledList[i] = (struct gl_list_t *)new->data[0]; /* point to next */ |
268 |
--RecycledContents[i]; /* reduce list count for this size */ |
269 |
new->length = 0; |
270 |
new->flags = (unsigned int)(gsf_SORTED | gsf_EXPANDABLE); |
271 |
#ifndef NDEBUG |
272 |
if (i>0) new->data[0]=NULL; |
273 |
#endif |
274 |
return new; |
275 |
} |
276 |
i++; |
277 |
} |
278 |
/*FPRINTF(ASCERR,"ABOUT TO ALLOCHEAD\n");*/ |
279 |
new=POOL_ALLOCHEAD; |
280 |
/*FPRINTF(ASCERR,"GL_CREATE: COULDN'T ALLOC SIZE %d, new = %p\n",sizeof(struct gl_list_t),POOL_ALLOCHEAD);*/ |
281 |
if (new != NULL) { |
282 |
new->length = 0; |
283 |
#if LISTRECYCLERDEBUG |
284 |
NewListsUsed++; |
285 |
#endif |
286 |
#ifndef NDEBUG |
287 |
new->data = (VOIDPTR *)asccalloc((unsigned)capacity,sizeof(VOIDPTR)); |
288 |
#else |
289 |
new->data = (VOIDPTR *)ascmalloc((unsigned)capacity*sizeof(VOIDPTR)); |
290 |
#endif |
291 |
new->capacity = capacity; |
292 |
new->flags = (unsigned int)(gsf_SORTED | gsf_EXPANDABLE); |
293 |
return new; |
294 |
} |
295 |
else { |
296 |
FPRINTF(ASCERR,"UNABLE TO ALLOCATE MEMORY FOR LIST\n"); |
297 |
return NULL; |
298 |
} |
299 |
} |
300 |
|
301 |
void gl_free_and_destroy(struct gl_list_t *list) |
302 |
{ |
303 |
unsigned long c; |
304 |
if (list == NULL) return; |
305 |
AssertAllocatedMemory(list,sizeof(struct gl_list_t)); |
306 |
for(c=0;c<GL_LENGTH(list);c++) { |
307 |
AssertContainedMemory(list->data[c],4); |
308 |
ascfree(list->data[c]); |
309 |
} |
310 |
#ifndef NDEBUG |
311 |
ascbzero(list->data,sizeof(VOIDPTR)*list->capacity); |
312 |
#endif |
313 |
c = list->capacity; /* gonna use it a lot */ |
314 |
if (c <= MAXRECYCLESIZE && RecycledContents[c] < AllowedContents[c]) { |
315 |
asc_assert(RecycledContents[c] >= 0); |
316 |
/* push list into LIFO list of lists, and bump the count. */ |
317 |
list->data[0] = RecycledList[c]; |
318 |
RecycledList[c] = list; |
319 |
RecycledContents[c]++; |
320 |
#if LISTRECYCLERDEBUG |
321 |
if (RecycledContents[c] > HighWaterMark[c]) { |
322 |
HighWaterMark[c] = RecycledContents[c]; |
323 |
} |
324 |
#endif |
325 |
} else{ |
326 |
#if LISTRECYCLERDEBUG |
327 |
if (c<=MAXRECYCLESIZE) { |
328 |
ListsDestroyed[c]++; |
329 |
} |
330 |
#endif |
331 |
ascfree(list->data); |
332 |
list->data = NULL; |
333 |
list->capacity = list->length = 0; |
334 |
POOL_FREEHEAD(list); |
335 |
} |
336 |
} |
337 |
|
338 |
void gl_destroy(struct gl_list_t *list) |
339 |
{ |
340 |
unsigned long c; |
341 |
if (list == NULL) return; |
342 |
AssertAllocatedMemory(list,sizeof(struct gl_list_t)); |
343 |
c = list->capacity; /* gonna use it a lot */ |
344 |
if (c <= MAXRECYCLESIZE && RecycledContents[c] < AllowedContents[c]) { |
345 |
asc_assert(RecycledContents[c] >= 0); |
346 |
/* push list into LIFO list of lists, and bump the count. */ |
347 |
list->data[0] = RecycledList[c]; |
348 |
RecycledList[c] = list; |
349 |
RecycledContents[c]++; |
350 |
#if LISTRECYCLERDEBUG |
351 |
if (RecycledContents[c] > HighWaterMark[c]) { |
352 |
HighWaterMark[c] = RecycledContents[c]; |
353 |
} |
354 |
#endif |
355 |
} else { |
356 |
#if LISTRECYCLERDEBUG |
357 |
if (c<=MAXRECYCLESIZE) { |
358 |
ListsDestroyed[c]++; |
359 |
} |
360 |
#endif |
361 |
#ifndef NDEBUG |
362 |
if (list->capacity>0) list->data[0]=NULL; |
363 |
#endif |
364 |
ascfree(list->data); |
365 |
list->data = NULL; |
366 |
list->capacity = list->length = 0; |
367 |
POOL_FREEHEAD(list); |
368 |
} |
369 |
} |
370 |
|
371 |
VOIDPTR gl_fetchF(CONST struct gl_list_t *list, unsigned long int pos) |
372 |
{ |
373 |
asc_assert(NULL != list); |
374 |
ASSERTRANGE(list,pos,"gl_fetchF"); |
375 |
return list->data[pos-1]; |
376 |
} |
377 |
|
378 |
#define SORTED_OFF(l) (l)->flags &= (~gsf_SORTED) |
379 |
#define EXPAND_OFF(l) (l)->flags &= (~gsf_EXPANDABLE) |
380 |
|
381 |
/* for internal use only, where we can verify that STUFF is correct |
382 |
* and it is SOMEONE else job to maintain the integrity of |
383 |
* flags.sorted and list-> length. |
384 |
*/ |
385 |
#ifdef NDEBUG |
386 |
#define GL_STORE(list,pos,ptr) ((list)->data[(pos)-1] = (ptr)) |
387 |
#else |
388 |
#define GL_STORE(list,pos,ptr) gl_store((list),(pos),(ptr)) |
389 |
#endif |
390 |
void gl_store(struct gl_list_t *list, unsigned long int pos, VOIDPTR ptr) |
391 |
{ |
392 |
asc_assert(NULL != list); |
393 |
if (GL_LENGTH(list)>1) SORTED_OFF(list); |
394 |
ASSERTRANGE(list,pos,"gl_store"); |
395 |
list->data[pos-1] = ptr; |
396 |
} |
397 |
|
398 |
#if REALLOCDEBUG |
399 |
/* this we know to not leak if MOD_REALLOC is defined */ |
400 |
#define DATAREALLOC(l,incr) \ |
401 |
ascreallocPURE((char *)(l)->data, \ |
402 |
(size_t)(((l)->capacity-(incr))*sizeof(VOIDPTR)), \ |
403 |
(size_t)((l)->capacity*sizeof(VOIDPTR))) |
404 |
#else |
405 |
/* this we think might leak under hpux, sunos4. more like purify lies. */ |
406 |
#define DATAREALLOC(l,incr) \ |
407 |
ascrealloc((l)->data,(unsigned)((l)->capacity*sizeof(VOIDPTR))) |
408 |
#endif |
409 |
/* actually, when all said and done, a static structure in solver/analyze.c |
410 |
* was being left out of the cleanup process at shutdown. |
411 |
*/ |
412 |
|
413 |
static void gl_expand_list(struct gl_list_t *list) |
414 |
{ |
415 |
VOIDPTR *tmp; |
416 |
unsigned long increment; |
417 |
increment = (list->capacity*50)/100; /* 10% is too small. try 50% baa */ |
418 |
increment = MAX(MIN_INCREMENT,increment); |
419 |
list->capacity += increment; |
420 |
tmp = (VOIDPTR *) DATAREALLOC(list,increment); |
421 |
|
422 |
if (tmp==NULL) { |
423 |
PRINTF("gl_expand_list: memory allocation failed\n"); |
424 |
list->capacity -= increment; |
425 |
} else { |
426 |
list->data = tmp; |
427 |
} |
428 |
} |
429 |
|
430 |
/* changes only the capacity of the list, and possibly data pointer */ |
431 |
static void gl_expand_list_by(struct gl_list_t *list,unsigned long addlen) |
432 |
{ |
433 |
if (!addlen) return; |
434 |
addlen = MAX(MIN_INCREMENT,addlen); /* expand by at least 8 */ |
435 |
list->capacity += addlen; |
436 |
list->data = (VOIDPTR *)DATAREALLOC(list,addlen); |
437 |
|
438 |
if (list->data==NULL) PRINTF("gl_expand_list_by: memory allocation failed\n"); |
439 |
asc_assert(list->data!=NULL); |
440 |
} |
441 |
|
442 |
void gl_append_ptr(struct gl_list_t *list, VOIDPTR ptr) |
443 |
{ |
444 |
asc_assert((NULL != list) && (0 != gl_expandable(list))); |
445 |
if (list->length > 0) SORTED_OFF(list); |
446 |
if (++(list->length) > list->capacity) /* expand list capacity*/ |
447 |
gl_expand_list(list); |
448 |
list->data[list->length-1] = ptr; |
449 |
} |
450 |
|
451 |
void gl_append_list(struct gl_list_t *extendlist, struct gl_list_t *list) |
452 |
{ |
453 |
register unsigned long c,len,oldlen,newlen; |
454 |
|
455 |
asc_assert((NULL != extendlist) && |
456 |
(0 != gl_expandable(extendlist)) && |
457 |
(NULL != list)); |
458 |
if (extendlist->length >0 || list->length > 0) { |
459 |
SORTED_OFF(extendlist); |
460 |
} |
461 |
newlen = extendlist->length + list->length; |
462 |
if (newlen > extendlist->capacity) { |
463 |
gl_expand_list_by(extendlist,newlen - extendlist->capacity); |
464 |
} |
465 |
len = list->length; |
466 |
oldlen = extendlist->length; |
467 |
extendlist->length += len; |
468 |
for (c=0;c<len;c++) { |
469 |
extendlist->data[oldlen] = list->data[c]; |
470 |
oldlen++; |
471 |
} |
472 |
} |
473 |
|
474 |
void gl_fast_append_ptr(struct gl_list_t *list, VOIDPTR ptr) |
475 |
{ |
476 |
asc_assert(NULL != list); |
477 |
SORTED_OFF(list); |
478 |
list->data[list->length] = ptr; |
479 |
++(list->length); |
480 |
} |
481 |
|
482 |
unsigned long gl_safe_length(CONST struct gl_list_t *list) |
483 |
{ |
484 |
if (list !=NULL) { |
485 |
return list->length; |
486 |
} else { |
487 |
return 0L; |
488 |
} |
489 |
} |
490 |
unsigned long gl_lengthF(CONST struct gl_list_t *list) |
491 |
{ |
492 |
asc_assert(NULL != list); |
493 |
return list->length; |
494 |
} |
495 |
|
496 |
unsigned long gl_capacity(CONST struct gl_list_t *list) |
497 |
{ |
498 |
if (list==NULL) return 0; |
499 |
return list->capacity; |
500 |
} |
501 |
|
502 |
|
503 |
int gl_sorted(CONST struct gl_list_t *list) |
504 |
{ |
505 |
asc_assert(NULL != list); |
506 |
return (int)(list->flags & gsf_SORTED); |
507 |
} |
508 |
|
509 |
#if LISTIMPLEMENTED |
510 |
/* I believe this function is ok */ |
511 |
static void *gl_choose_ptrpivot(struct gl_list_t *list, |
512 |
unsigned long int lower, |
513 |
unsigned long int upper) |
514 |
/*********************************************************************\ |
515 |
* This function will choose a pivot. It can actually return any |
516 |
* number between and including upper and lower. However, the goal |
517 |
* is to return a pivot that will not cause Quick Sort to have |
518 |
* O(n^2) behavior. This returns the value rather than the address |
519 |
* of the pivot. |
520 |
* |
521 |
* The heuristic I am choosing is to pick the middle of three test |
522 |
* sites which are upper,lower, and (upper+lower)/2. |
523 |
\*********************************************************************/ |
524 |
{ |
525 |
unsigned long middle; |
526 |
middle = (upper+lower)/2; |
527 |
if (gl_fetch(list,middle) > gl_fetch(list,lower)) { |
528 |
/* middle > lower */ |
529 |
if (gl_fetch(list,upper) > gl_fetch(list,middle)) { |
530 |
/* middle > lower && upper > middle */ |
531 |
return gl_fetch(list,middle); |
532 |
} |
533 |
else { |
534 |
/* middle > lower && middle >= upper */ |
535 |
if (gl_fetch(list,upper) > gl_fetch(list,lower)) { |
536 |
/* middle > lower && middle >= upper && upper > lower */ |
537 |
return gl_fetch(list,upper); |
538 |
} |
539 |
else { |
540 |
/* middle > lower && middle >= upper && lower >= upper */ |
541 |
return gl_fetch(list,lower); |
542 |
} |
543 |
} |
544 |
} |
545 |
else { |
546 |
/* lower >= middle */ |
547 |
if (gl_fetch(list,upper) > gl_fetch(list,lower)) { |
548 |
/* lower >= middle && upper > lower */ |
549 |
return gl_fetch(list,lower); |
550 |
} |
551 |
else { |
552 |
/* lower >= middle && lower >= upper */ |
553 |
if (gl_fetch(list,middle) > gl_fetch(list,upper)) { |
554 |
/* lower >= middle && lower >= upper && middle > upper */ |
555 |
return gl_fetch(list,middle); |
556 |
} |
557 |
else { |
558 |
/* lower >= middle && lower >= upper && upper >= middle */ |
559 |
return gl_fetch(list,upper); |
560 |
} |
561 |
} |
562 |
} |
563 |
} |
564 |
#endif |
565 |
|
566 |
static |
567 |
unsigned long gl_choose_pivot(struct gl_list_t *list, |
568 |
unsigned long int lower, |
569 |
unsigned long int upper, |
570 |
CmpFunc func) |
571 |
/* |
572 |
* This function will choose a pivot. It can actually return any |
573 |
* number between and including upper and lower. However, the goal |
574 |
* is to return a pivot that will not cause Quick Sort to have |
575 |
* O(n^2) behavior. |
576 |
* |
577 |
* The heuristic I am choosing is to pick the middle of three test |
578 |
* sites which are upper,lower, and (upper+lower)/2. |
579 |
*/ |
580 |
{ |
581 |
unsigned long middle; |
582 |
middle = (upper+lower)/2; |
583 |
if ((*func)(gl_fetch(list,middle),gl_fetch(list,lower)) > 0) { |
584 |
/* middle > lower */ |
585 |
if ((*func)(gl_fetch(list,upper),gl_fetch(list,middle)) > 0) { |
586 |
/* middle > lower && upper > middle */ |
587 |
return middle; |
588 |
} |
589 |
else { |
590 |
/* middle > lower && middle >= upper */ |
591 |
if ((*func)(gl_fetch(list,upper),gl_fetch(list,lower)) > 0) { |
592 |
/* middle > lower && middle >= upper && upper > lower */ |
593 |
return upper; |
594 |
} |
595 |
else { |
596 |
/* middle > lower && middle >= upper && lower >= upper */ |
597 |
return lower; |
598 |
} |
599 |
} |
600 |
} |
601 |
else { |
602 |
/* lower >= middle */ |
603 |
if ((*func)(gl_fetch(list,upper),gl_fetch(list,lower)) > 0) { |
604 |
/* lower >= middle && upper > lower */ |
605 |
return lower; |
606 |
} |
607 |
else { |
608 |
/* lower >= middle && lower >= upper */ |
609 |
if ((*func)(gl_fetch(list,middle),gl_fetch(list,upper)) > 0) { |
610 |
/* lower >= middle && lower >= upper && middle > upper */ |
611 |
return middle; |
612 |
} |
613 |
else { |
614 |
/* lower >= middle && lower >= upper && upper >= middle */ |
615 |
return upper; |
616 |
} |
617 |
} |
618 |
} |
619 |
} |
620 |
|
621 |
/* do not export. makes rosy assumptions. callers of this function |
622 |
* are responsible for making sure list.sorted is set appropriately. |
623 |
*/ |
624 |
static void gl_swap(struct gl_list_t *list, |
625 |
unsigned long int i, unsigned long int j) |
626 |
{ |
627 |
VOIDPTR temp; |
628 |
temp = gl_fetch(list,i); |
629 |
GL_STORE(list,i,gl_fetch(list,j)); |
630 |
GL_STORE(list,j,temp); |
631 |
} |
632 |
|
633 |
#if LISTIMPLEMENTED |
634 |
/* this function is probably ok */ |
635 |
void gl_upsort(struct gl_list_t *list, |
636 |
unsigned long int lower, |
637 |
unsigned long int upper) |
638 |
{ |
639 |
register unsigned long i,j; |
640 |
VOIDPTR pivot_element; |
641 |
j = upper; |
642 |
i = lower; |
643 |
pivot_element = gl_choose_ptrpivot(list,lower,upper); |
644 |
do { |
645 |
while (pivot_element > gl_fetch(list,i)) i += 1; |
646 |
while (gl_fetch(list,j) > pivot_element) j -= 1; |
647 |
if (i <= j) { |
648 |
register VOIDPTR temp; |
649 |
temp = gl_fetch(list,i); |
650 |
GL_STORE(list,i,gl_fetch(list,j)); |
651 |
GL_STORE(list,j,temp); |
652 |
i++; j--; |
653 |
} |
654 |
} while(i <= j); |
655 |
if (lower < j) gl_upsort(list,lower,j); |
656 |
if (i < upper) gl_upsort(list,i,upper); |
657 |
} |
658 |
|
659 |
/* this function is not ok */ |
660 |
void gl_downsort(struct gl_list_t *list, |
661 |
unsigned long int lower, |
662 |
unsigned long int upper) |
663 |
{ |
664 |
unsigned long pivot,i,j; |
665 |
VOIDPTR pivot_element; |
666 |
j = upper; |
667 |
i = lower; |
668 |
pivot_element = gl_choose_ptrpivot(list,lower,upper); |
669 |
do { |
670 |
while ((*func)(pivot_element,gl_fetch(list,i))>0) i += 1; |
671 |
while ((*func)(gl_fetch(list,j),pivot_element)>0) j -= 1; |
672 |
if (i <= j) { |
673 |
gl_swap(list,i++,j--); |
674 |
} |
675 |
} while(i <= j); |
676 |
if (lower < j) gl_downsort(list,lower,j); |
677 |
if (i<upper) gl_downsort(list,i,upper); |
678 |
} |
679 |
#endif |
680 |
|
681 |
static |
682 |
void gl_qsort(struct gl_list_t *list, |
683 |
unsigned long int lower, |
684 |
unsigned long int upper, |
685 |
CmpFunc func) |
686 |
{ |
687 |
unsigned long pivot,i,j; |
688 |
VOIDPTR pivot_element; |
689 |
j = upper; |
690 |
i = lower; |
691 |
pivot = gl_choose_pivot(list,lower,upper,func); |
692 |
pivot_element = gl_fetch(list,pivot); |
693 |
do { |
694 |
while ((*func)(pivot_element,gl_fetch(list,i))>0) i += 1; |
695 |
while ((*func)(gl_fetch(list,j),pivot_element)>0) j -= 1; |
696 |
if (i <= j) { |
697 |
gl_swap(list,i++,j--); |
698 |
} |
699 |
} while(i <= j); |
700 |
if (lower < j) gl_qsort(list,lower,j,func); |
701 |
if (i<upper) gl_qsort(list,i,upper,func); |
702 |
} |
703 |
|
704 |
#if LISTIMPLEMENTED |
705 |
/* ok once its children are ok */ |
706 |
void gl_ptr_sort(struct gl_list_t *list, int increasing) |
707 |
{ |
708 |
if (GL_LENGTH(list) > 1) { |
709 |
if (increasing) { |
710 |
gl_upsort(list,1,GL_LENGTH(list)); |
711 |
} else { |
712 |
gl_downsort(list,1,GL_LENGTH(list)); |
713 |
} |
714 |
} |
715 |
list->flags |= gsf_SORTED; |
716 |
} |
717 |
#endif |
718 |
|
719 |
#if LISTIMPLEMENTED |
720 |
/* broken */ |
721 |
void gl_insert_ptr_sorted(struct gl_list_t *,VOIDPTR,int) |
722 |
{ |
723 |
} |
724 |
#endif |
725 |
|
726 |
void gl_sort(struct gl_list_t *list, CmpFunc func) |
727 |
{ |
728 |
asc_assert((NULL != list) && (NULL != func)); |
729 |
if (GL_LENGTH(list) > 1) { |
730 |
gl_qsort(list,1,GL_LENGTH(list),func); |
731 |
} |
732 |
list->flags |= gsf_SORTED; |
733 |
} |
734 |
|
735 |
void gl_insert_sorted(struct gl_list_t *list, |
736 |
VOIDPTR ptr, CmpFunc func) |
737 |
{ |
738 |
int comparison; |
739 |
register unsigned long lower,upper,search=0L; |
740 |
|
741 |
asc_assert((NULL != list) && |
742 |
(0 != gl_expandable(list)) && |
743 |
(NULL != func)); |
744 |
if (list->flags & gsf_SORTED) { |
745 |
if (GL_LENGTH(list)==0) { |
746 |
gl_append_ptr(list,ptr); |
747 |
list->flags |= gsf_SORTED; |
748 |
return; |
749 |
} |
750 |
if (++list->length > list->capacity) /* expand list capacity */ |
751 |
gl_expand_list(list); |
752 |
lower = 1; |
753 |
upper = GL_LENGTH(list)-1; |
754 |
while (lower<=upper) { |
755 |
search = ( lower + upper) >> 1; /* divide by two */ |
756 |
comparison = (*func)(gl_fetch(list,search),ptr); |
757 |
if (comparison==0) { |
758 |
/* make room */ |
759 |
for(lower=GL_LENGTH(list);lower>search;lower--) |
760 |
GL_STORE(list,lower,gl_fetch(list,lower-1)); |
761 |
/* store it */ |
762 |
GL_STORE(list,search+1,ptr); |
763 |
list->flags |= gsf_SORTED; |
764 |
return; |
765 |
} |
766 |
if (comparison < 0) lower = search + 1; |
767 |
else upper = search - 1; |
768 |
} |
769 |
if ((*func)(gl_fetch(list,search),ptr) > 0) { |
770 |
for(lower=GL_LENGTH(list);lower>search;lower--) { |
771 |
GL_STORE(list,lower,gl_fetch(list,lower-1)); |
772 |
} |
773 |
GL_STORE(list,search,ptr); |
774 |
} |
775 |
else { |
776 |
for(lower=GL_LENGTH(list);lower>(search+1);lower--) { |
777 |
GL_STORE(list,lower,gl_fetch(list,lower-1)); |
778 |
} |
779 |
GL_STORE(list,search+1,ptr); |
780 |
} |
781 |
list->flags |= gsf_SORTED; |
782 |
} |
783 |
else { |
784 |
PRINTF( |
785 |
"Warning gl_insert_sorted called on unsorted list.\nSorting list.\n"); |
786 |
gl_append_ptr(list,ptr); |
787 |
gl_sort(list,func); |
788 |
} |
789 |
} |
790 |
|
791 |
void gl_iterate(struct gl_list_t *list, void (*func) (VOIDPTR)) |
792 |
{ |
793 |
#ifdef NDEBUG |
794 |
register unsigned long length,counter; |
795 |
length = GL_LENGTH(list); |
796 |
for (counter=0;counter<length;counter++) |
797 |
(*func)(list->data[counter]); |
798 |
#else |
799 |
unsigned long length,counter; |
800 |
asc_assert(NULL != list); |
801 |
asc_assert(NULL != func); |
802 |
length = GL_LENGTH(list); |
803 |
for (counter=1;counter<=length;counter++) |
804 |
(*func)(gl_fetch(list,counter)); |
805 |
#endif |
806 |
} |
807 |
|
808 |
/* this will probably need casts to compile or an intermediate |
809 |
* void *array variable |
810 |
* this function is on the critical path in the compiler, or |
811 |
* should be, so it is highly tweaked here. |
812 |
*/ |
813 |
unsigned long gl_ptr_search(CONST struct gl_list_t *list, |
814 |
CONST VOIDPTR match, int increasing) |
815 |
{ |
816 |
#define SHORTSEARCH 5 |
817 |
/* if list is short, use linear instead of binary search. |
818 |
* SHORTSEARCH is the minimum size for a binary search. |
819 |
* SHORTSEARCH must be >=0 |
820 |
*/ |
821 |
register unsigned long lower,upper,search; |
822 |
|
823 |
asc_assert(NULL != list); |
824 |
if (!GL_LENGTH(list)) return 0; |
825 |
if ( (list->flags & gsf_SORTED) |
826 |
#if (SHORTSEARCH > 0) |
827 |
&& (upper = GL_LENGTH(list)-1) > SHORTSEARCH |
828 |
#endif |
829 |
) { /* use binary search */ |
830 |
register long comparison; |
831 |
lower = 0; |
832 |
#if (SHORTSEARCH <= 0) |
833 |
upper = GL_LENGTH(list)-1; |
834 |
#endif |
835 |
if (increasing) { |
836 |
while (lower <= upper) { |
837 |
search = (lower+upper) / 2; |
838 |
comparison = |
839 |
((char *)list->data[search]-(char *)match); /* pointer difference */ |
840 |
if (comparison==0) { |
841 |
return (search + 1); |
842 |
} |
843 |
else if (comparison < 0) { |
844 |
lower = search + 1; |
845 |
} |
846 |
else if (search > 0) { |
847 |
upper = search - 1; |
848 |
} |
849 |
else { |
850 |
return 0; |
851 |
} |
852 |
} |
853 |
} else { |
854 |
while (lower <= upper) { |
855 |
search = (lower+upper) / 2; |
856 |
comparison = |
857 |
((char *)match-(char *)list->data[search]); /* pointer difference */ |
858 |
if (comparison==0) { |
859 |
return (search + 1); |
860 |
} |
861 |
else if (comparison < 0) { |
862 |
lower = search + 1; |
863 |
} |
864 |
else if (search > 0) { |
865 |
upper = search - 1; |
866 |
} |
867 |
else { |
868 |
return 0; |
869 |
} |
870 |
} |
871 |
} |
872 |
} |
873 |
else { /* use linear search */ |
874 |
upper = GL_LENGTH(list); |
875 |
for(search=0; search < upper; search++) { |
876 |
if (list->data[search]==match) return search+1; |
877 |
} |
878 |
} |
879 |
return 0; /* search failed */ |
880 |
} |
881 |
|
882 |
unsigned long gl_search(CONST struct gl_list_t *list, |
883 |
CONST VOIDPTR match, CmpFunc func) |
884 |
{ |
885 |
register unsigned long lower,upper,search; |
886 |
#ifdef __alpha |
887 |
long comparison; |
888 |
#else |
889 |
int comparison; |
890 |
#endif |
891 |
asc_assert((NULL != list) && (NULL != func)); |
892 |
if (list->flags & gsf_SORTED) { /* use binary search */ |
893 |
lower = 1; |
894 |
upper = GL_LENGTH(list); |
895 |
while (lower <= upper) { |
896 |
search = (lower+upper) / 2; |
897 |
comparison = (*func)(gl_fetch(list,search),match); |
898 |
if (comparison==0) return search; |
899 |
if (comparison < 0) lower = search + 1; |
900 |
else upper = search -1; |
901 |
} |
902 |
} |
903 |
else { /* use linear search */ |
904 |
upper = GL_LENGTH(list); |
905 |
for(search=0; search < upper; search++) { |
906 |
if ((*func)(list->data[search],match)==0) return search+1; |
907 |
} |
908 |
} |
909 |
return 0; /* search failed */ |
910 |
} |
911 |
|
912 |
|
913 |
unsigned long gl_search_reverse(CONST struct gl_list_t *list, |
914 |
CONST VOIDPTR match, CmpFunc func) |
915 |
{ |
916 |
register unsigned long search; |
917 |
|
918 |
asc_assert((NULL != list) && (NULL != func)); |
919 |
if (list->flags & gsf_SORTED) { /* use binary search */ |
920 |
return gl_search(list, match, func); |
921 |
} else { /* use linear search */ |
922 |
/* since search is unsigned, the FOR loop cannot go to |
923 |
* zero, since 0-- is ULONG_MAX. Must do zero separately. |
924 |
*/ |
925 |
for( search = (GL_LENGTH(list)-1); search > 0; search-- ) { |
926 |
if ((*func)(list->data[search],match)==0) return search+1; |
927 |
} |
928 |
search = 0; |
929 |
if ((*func)(list->data[search],match)==0) return search+1; |
930 |
} |
931 |
return 0; /* search failed */ |
932 |
} |
933 |
|
934 |
|
935 |
int gl_unique_list(CONST struct gl_list_t *list) |
936 |
{ |
937 |
unsigned long i,j,len,lm1; |
938 |
VOIDPTR e; |
939 |
VOIDPTR *d; |
940 |
|
941 |
if (list==NULL || GL_LENGTH(list)<2) { |
942 |
return 1; |
943 |
} |
944 |
len = GL_LENGTH(list); |
945 |
lm1 = len-1; |
946 |
d = list->data; |
947 |
for (i=0; i<lm1; i++) { |
948 |
e = d[i]; |
949 |
for (j=i+1; j < len; j++) { |
950 |
if (d[j]==e) { |
951 |
return 0; |
952 |
} |
953 |
} |
954 |
} |
955 |
return 1; |
956 |
} |
957 |
|
958 |
int gl_empty(CONST struct gl_list_t *list) |
959 |
{ |
960 |
asc_assert(NULL != list); |
961 |
return(GL_LENGTH(list)==0); |
962 |
} |
963 |
|
964 |
void gl_delete(struct gl_list_t *list, unsigned long int pos, int dispose) |
965 |
{ |
966 |
unsigned long c,length; |
967 |
int sorted; |
968 |
VOIDPTR ptr; |
969 |
|
970 |
asc_assert(NULL != list); |
971 |
/* should be ASSERTRANGE(list,pos,"gl_delete"); |
972 |
The check for (pos > 0) below is suspicious, though. |
973 |
It suggests this function may be called with pos == 0. */ |
974 |
asc_assert(pos <= gl_length(list)); |
975 |
|
976 |
if (pos > 0) { |
977 |
sorted = (int)(list->flags & gsf_SORTED); |
978 |
if (dispose) { |
979 |
ptr = gl_fetch(list,pos); |
980 |
ascfree(ptr); |
981 |
} |
982 |
length = GL_LENGTH(list); |
983 |
#ifdef NDEBUG |
984 |
/* the copyleft the remainder of the array. easy-eye version */ |
985 |
for (c = pos; c < length; c++) { |
986 |
list->data[c-1] = list->data[c]; |
987 |
} |
988 |
#else |
989 |
/* shift the remainder of the array using the macros */ |
990 |
for (c=pos+1;c<=length;c++) { |
991 |
GL_STORE(list,c-1,gl_fetch(list,c)); |
992 |
} |
993 |
#endif |
994 |
list->length--; |
995 |
if (sorted) { |
996 |
list->flags |= gsf_SORTED; |
997 |
} else { |
998 |
SORTED_OFF(list); |
999 |
} |
1000 |
} |
1001 |
} |
1002 |
|
1003 |
void gl_reverse(struct gl_list_t *list) |
1004 |
{ |
1005 |
VOIDPTR *tmpdata; |
1006 |
unsigned long c,len; |
1007 |
|
1008 |
if (list==NULL || list->length <2L) return; |
1009 |
tmpdata = (VOIDPTR *)ascmalloc(list->capacity*sizeof(VOIDPTR)); |
1010 |
if (tmpdata==NULL) { |
1011 |
Asc_Panic(2, NULL, "gl_reverse out of memory. Bye!\n"); |
1012 |
} |
1013 |
len = list->length; |
1014 |
for (c=0;c < len;c++) { |
1015 |
tmpdata[len - (c+1)] = list->data[c]; |
1016 |
} |
1017 |
ascfree(list->data); |
1018 |
list->data = tmpdata; |
1019 |
} |
1020 |
|
1021 |
void gl_reset(struct gl_list_t *list) |
1022 |
{ |
1023 |
asc_assert(NULL != list); |
1024 |
list->flags = (unsigned int)(gsf_SORTED | gsf_EXPANDABLE); |
1025 |
list->length = 0; |
1026 |
} |
1027 |
|
1028 |
|
1029 |
struct gl_list_t *gl_copy(CONST struct gl_list_t *list) |
1030 |
{ |
1031 |
struct gl_list_t *new; |
1032 |
unsigned long counter,length; |
1033 |
|
1034 |
asc_assert(NULL != list); |
1035 |
|
1036 |
length = GL_LENGTH(list); |
1037 |
new = gl_create(length); |
1038 |
for (counter = 1;counter <= length;counter++) |
1039 |
gl_append_ptr(new,gl_fetch(list,counter)); |
1040 |
new->flags = list->flags; |
1041 |
return new; |
1042 |
} |
1043 |
|
1044 |
struct gl_list_t *gl_concat(CONST struct gl_list_t *list1, |
1045 |
CONST struct gl_list_t *list2) |
1046 |
{ |
1047 |
struct gl_list_t *new; |
1048 |
unsigned long counter,length1,length2; |
1049 |
|
1050 |
asc_assert ((NULL != list1) && (NULL != list2)); |
1051 |
|
1052 |
length1 = GL_LENGTH(list1); |
1053 |
length2 = GL_LENGTH(list2); |
1054 |
new = gl_create(length1+length2); |
1055 |
for (counter = 1;counter<= length1; counter++) { |
1056 |
gl_append_ptr(new,gl_fetch(list1,counter)); |
1057 |
} |
1058 |
for (counter = 1;counter<= length2; counter++) { |
1059 |
gl_append_ptr(new,gl_fetch(list2,counter)); |
1060 |
} |
1061 |
return new; |
1062 |
} |
1063 |
|
1064 |
int gl_compare_ptrs(CONST struct gl_list_t *l1, CONST struct gl_list_t *l2) |
1065 |
{ |
1066 |
unsigned long len,c; |
1067 |
register unsigned long i1,i2; |
1068 |
if (l1==NULL || l2 == NULL) { |
1069 |
Asc_Panic(2,"gl_compare_ptrs","Called with NULL input"); |
1070 |
} |
1071 |
len = MIN(GL_LENGTH(l1),GL_LENGTH(l2)); |
1072 |
for (c=0;c < len; c++) { |
1073 |
i1 = (unsigned long)(l1->data[c]); |
1074 |
i2 = (unsigned long)(l2->data[c]); |
1075 |
if (i1<i2) { |
1076 |
return -1; |
1077 |
} |
1078 |
if (i1>i2) { |
1079 |
return 1; |
1080 |
} |
1081 |
} |
1082 |
return (GL_LENGTH(l1) == GL_LENGTH(l2)) ? |
1083 |
0 : |
1084 |
(( GL_LENGTH(l2) > len) ? -1 : 1); |
1085 |
} |
1086 |
|
1087 |
void gl_set_sorted(struct gl_list_t *list, int TRUE_or_FALSE) |
1088 |
{ |
1089 |
asc_assert(NULL != list); |
1090 |
if (FALSE == TRUE_or_FALSE) |
1091 |
list->flags &= ~gsf_SORTED; |
1092 |
else |
1093 |
list->flags |= gsf_SORTED; |
1094 |
} |
1095 |
|
1096 |
int gl_expandable(struct gl_list_t *list) |
1097 |
{ |
1098 |
asc_assert(NULL != list); |
1099 |
return (int)(list->flags & gsf_EXPANDABLE); |
1100 |
} |
1101 |
|
1102 |
void gl_set_expandable(struct gl_list_t *list, int TRUE_or_FALSE) |
1103 |
{ |
1104 |
asc_assert(NULL != list); |
1105 |
if (FALSE == TRUE_or_FALSE) |
1106 |
list->flags &= ~gsf_EXPANDABLE; |
1107 |
else |
1108 |
list->flags |= gsf_EXPANDABLE; |
1109 |
} |
1110 |
|
1111 |
|
1112 |
VOIDPTR *gl_fetchaddr(CONST struct gl_list_t *list, |
1113 |
unsigned long int pos) |
1114 |
{ |
1115 |
asc_assert(NULL != list); |
1116 |
ASSERTRANGE(list,pos,"gl_fetchaddr"); |
1117 |
return (VOIDPTR *)&(list->data[pos-1]); |
1118 |
} |
1119 |
|
1120 |
|
1121 |
void gl_emptyrecycler() |
1122 |
{ |
1123 |
int i; |
1124 |
#if LISTRECYCLERDEBUG |
1125 |
unsigned long bytecount = 0; |
1126 |
#endif |
1127 |
struct gl_list_t *list; |
1128 |
|
1129 |
#if LISTRECYCLERDEBUG |
1130 |
PRINTF("LIST RECYCLER SUMMARY\n"); |
1131 |
PRINTF("=====================\n"); |
1132 |
PRINTF("New lists created : %d\n", NewListsUsed); |
1133 |
PRINTF("Recycled lists used: %d\n", RecycledListsUsed); |
1134 |
ListsDestroyedTotal = 0; |
1135 |
for (i=0;i <=MAXRECYCLESIZE; i++) { |
1136 |
ListsDestroyedTotal += ListsDestroyed[i]; |
1137 |
} |
1138 |
PRINTF("Lists destroyed : %d\n", ListsDestroyedTotal); |
1139 |
PRINTF("Size\tAllowed\tCurr.\tMax\tDestroyed\n"); |
1140 |
#endif |
1141 |
for(i = 0 ; i <= MAXRECYCLESIZE ; i++){ |
1142 |
#if LISTRECYCLERDEBUG |
1143 |
if (HighWaterMark[i] >0) { |
1144 |
PRINTF("%d\t%d\t%d\t%d\t%d\n",i,AllowedContents[i], RecycledContents[i], |
1145 |
HighWaterMark[i], ListsDestroyed[i]); |
1146 |
} |
1147 |
bytecount += sizeof(struct gl_list_t)*RecycledContents[i]; |
1148 |
bytecount += sizeof(void *)*i*RecycledContents[i]; |
1149 |
#endif |
1150 |
while (RecycledContents[i] > 0){ |
1151 |
/* pop a list off the LIFO list */ |
1152 |
list = RecycledList[i]; |
1153 |
RecycledList[i] = (struct gl_list_t *)list->data[0]; |
1154 |
#ifndef NDEBUG |
1155 |
list->data[0] = NULL; |
1156 |
#endif |
1157 |
--RecycledContents[i]; |
1158 |
ascfree(list->data); |
1159 |
#ifndef NDEBUG |
1160 |
list->data = NULL; |
1161 |
list->capacity = list->length = 0; |
1162 |
#endif |
1163 |
POOL_FREEHEAD(list); |
1164 |
} |
1165 |
asc_assert(RecycledList[i]==NULL); /* someone forgot to empty the last one */ |
1166 |
} |
1167 |
#if LISTRECYCLERDEBUG |
1168 |
bytecount += MAXRECYCLESIZE*sizeof(void *); |
1169 |
PRINTF("Total bytes:\t%lu\n",bytecount); |
1170 |
#endif |
1171 |
} |
1172 |
|
1173 |
void gl_reportrecycler(FILE *fp) |
1174 |
{ |
1175 |
int i; |
1176 |
unsigned long bytecount = 0; |
1177 |
#if !LISTRECYCLERDEBUG |
1178 |
(void) fp; |
1179 |
#endif |
1180 |
|
1181 |
#if LISTRECYCLERDEBUG |
1182 |
asc_assert(NULL != fp); |
1183 |
FPRINTF(fp,"LIST RECYCLER SUMMARY\n"); |
1184 |
FPRINTF(fp,"=====================\n"); |
1185 |
FPRINTF(fp,"New lists created : %d\n", NewListsUsed); |
1186 |
FPRINTF(fp,"Recycled lists used: %d\n", RecycledListsUsed); |
1187 |
for (i=0;i <=MAXRECYCLESIZE; i++) { |
1188 |
ListsDestroyedTotal += ListsDestroyed[i]; |
1189 |
} |
1190 |
FPRINTF(fp,"Lists destroyed : %d\n", ListsDestroyedTotal); |
1191 |
FPRINTF(fp,"Size\tAllowed\tCurr.\tMax\tDestroyed\n"); |
1192 |
#endif |
1193 |
|
1194 |
for(i = 0 ; i <= MAXRECYCLESIZE ; i++){ |
1195 |
#if LISTRECYCLERDEBUG |
1196 |
if (HighWaterMark[i] >0) { |
1197 |
FPRINTF(fp,"%d\t%d\t%d\t%d\t%d\n",i,AllowedContents[i], |
1198 |
RecycledContents[i], HighWaterMark[i], ListsDestroyed[i]); |
1199 |
bytecount += sizeof(struct gl_list_t)*RecycledContents[i]; |
1200 |
bytecount += sizeof(void *)*i*RecycledContents[i]; |
1201 |
} |
1202 |
#else |
1203 |
bytecount += sizeof(struct gl_list_t)*RecycledContents[i]; |
1204 |
bytecount += sizeof(void *)*RecycledContents[i]*i; |
1205 |
#endif |
1206 |
} |
1207 |
#if LISTRECYCLERDEBUG |
1208 |
bytecount += MAXRECYCLESIZE*sizeof(void *); |
1209 |
FPRINTF(fp,"Total bytes:\t%lu\n",bytecount); |
1210 |
#endif |
1211 |
} |