1 |
/* |
2 |
* Memory module |
3 |
* by Karl Westerberg, Ben Allan |
4 |
* Created: 6/90 |
5 |
* Version: $Revision: 1.5 $ |
6 |
* Version control file: $RCSfile: mem.h,v $ |
7 |
* Date last modified: $Date: 1997/07/18 12:04:22 $ |
8 |
* Last modified by: $Author: mthomas $ |
9 |
* |
10 |
* This file is part of the Ascend Language Interpreter. |
11 |
* |
12 |
* Copyright (C) 1997 Carnegie Mellon University |
13 |
* |
14 |
* The Ascend Language Interpreter is free software; you can redistribute |
15 |
* it and/or modify it under the terms of the GNU General Public License as |
16 |
* published by the Free Software Foundation; either version 2 of the |
17 |
* License, or (at your option) any later version. |
18 |
* |
19 |
* The Ascend Language Interpreter is distributed in hope that it will be |
20 |
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of |
21 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
22 |
* General Public License for more details. |
23 |
* |
24 |
* You should have received a copy of the GNU General Public License |
25 |
* along with the program; if not, write to the Free Software Foundation, |
26 |
* Inc., 675 Mass Ave, Cambridge, MA 02139 USA. Check the file named |
27 |
* COPYING. COPYING is in ../compiler. |
28 |
*/ |
29 |
|
30 |
/** @file |
31 |
* Memory module. |
32 |
* <pre> |
33 |
* Contents: Memory module |
34 |
* |
35 |
* Authors: Karl Westerberg |
36 |
* Benjamin Andrew Allan |
37 |
* |
38 |
* Dates: 06/90 - original version |
39 |
* 08/94 - cleanup with bzero, bcopy, and corrected casting. |
40 |
* BAA, JJZ |
41 |
* 05/95 - add nearly generic element allocation routines. |
42 |
* BAA |
43 |
* |
44 |
* Description: It is convenient for pointers and ints to be the same |
45 |
* size, and most C compilers support this. However for |
46 |
* many machines (particularly ones with 16-bit words), |
47 |
* there is more RAM available than can be accessed by a |
48 |
* single int, so standard C pointers may only be able to |
49 |
* point to a limited region of memory. The matter is |
50 |
* further complicated by the fact that the pointer to |
51 |
* address conversion rules may be different for pointers |
52 |
* to functions (code) than for pointers to other objects |
53 |
* (data). This module attempts to partially circumvent |
54 |
* these obstacles, by allowing the user to address memory |
55 |
* with long ints. |
56 |
* |
57 |
* As you can see, the above was written in the bad old |
58 |
* days pre-ANSI/STDC. |
59 |
* This file now serves to isolate bzero, bcopy, etc |
60 |
* from solver code should they become nonstandard. |
61 |
* |
62 |
* The generic element memory manager is yet another |
63 |
* implementation of a fixed byte size malloc system |
64 |
* that is more efficient than most standard mallocs. This |
65 |
* is possible because we make the fixed size assumption. |
66 |
* The idea is clearly not original. |
67 |
* For best results, you must understand your memory |
68 |
* usage pattern and tune the mem_create_store() parameters |
69 |
* accordingly. See mem_create_store() for more information. |
70 |
* |
71 |
* Detailed Description of Memory Manager: |
72 |
* |
73 |
* The following definitions provide a generic and reasonably efficient memory |
74 |
* allocation system for situations where many many objects of the same size |
75 |
* need to be "allocated and deallocated" rapidly and stored efficiently: |
76 |
* that is to say when normal malloc is too slow and expensive. |
77 |
* This scheme does not play reference count games. Instead any elements that |
78 |
* have been created and then used and subsequently "freed" go on a list |
79 |
* and are handed out again at the next opportunity. The list is run LIFO. |
80 |
* The list is associated with the pool_store_t. |
81 |
* |
82 |
* There is one restriction on the elements: they will be a multiple of the |
83 |
* size of a pointer, even if you specify otherwise. If this is too large, |
84 |
* write your own allocator. Think about how your elements align. On many |
85 |
* architectures doubles should only be stored at addresses that are |
86 |
* multiples of 8 bytes. |
87 |
* There is the implicit restriction on a store that it must contain |
88 |
* no more than MAXINT elements. If this is too small, write your own |
89 |
* allocator. |
90 |
* |
91 |
* Even More Details |
92 |
* |
93 |
* If you specify an element size that is not a nice multiple of your machine |
94 |
* word length, you are *very* likely to get data alignment (bus) errors. |
95 |
* E.g., if your element is not a convenient multiple of |
96 |
* sizeof(double) in size, double data are likely to die. |
97 |
* |
98 |
* The allocation scheme looks like so: |
99 |
* [struct pool_store_header | ] |
100 |
* __________________| |
101 |
* | |
102 |
* V |
103 |
* pool _____________________________________________________ |
104 |
* | b | b | b | b | b | b | b | b | b | b | b | b | b | |
105 |
* ----------------------------------------------------- |
106 |
* where each b is a pointer to an array of size 'width' of elements. |
107 |
* |
108 |
* The size characteristics of this scheme are tunable at run time so that |
109 |
* it can be scaled well when the number of elements and likely amount |
110 |
* of expansion required are known. |
111 |
* |
112 |
* Requires: #include <stdio.h> |
113 |
* #include "utilities/ascConfig.h" |
114 |
* </pre> |
115 |
* @todo Do we need to maintain the seemingly duplicate memory utilities and |
116 |
* pooled allocators of utilites/ascMalloc.c and utilities/mem.c? These |
117 |
* look like they could be consolidated into a single module. |
118 |
* |
119 |
*/ |
120 |
|
121 |
#ifndef mem_NULL |
122 |
|
123 |
#define mem_NULL NULL |
124 |
#define mem_code_NULL NULL |
125 |
#define mem_address(ptr) ((long)(ptr)) |
126 |
#define mem_code_address(ptr) ((long)(ptr)) |
127 |
|
128 |
#define mem_move_cast(from,too,nbytes) \ |
129 |
mem_move((POINTER)(from),(POINTER)(too),(size_t)(nbytes)) |
130 |
/**< |
131 |
* Copies nbytes of data from memory location from to memory location too. |
132 |
* The memory regions can be overlapping. |
133 |
* |
134 |
* @param from Pointer to memory to copy from. |
135 |
* @param too Pointer to memory to receive copy. |
136 |
* @param nbytes The number of bytes to copy (size_t). |
137 |
* @return No return value. |
138 |
* @see mem_move() |
139 |
*/ |
140 |
|
141 |
extern void mem_move(POINTER from, POINTER too, size_t nbytes); |
142 |
/**< |
143 |
* Implementation function for mem_move_cast(). Do not call this |
144 |
* function directly - use mem_move_cast() instead. |
145 |
*/ |
146 |
|
147 |
#define mem_copy_cast(from,too,nbytes) \ |
148 |
mem_move_disjoint((POINTER)(from),(POINTER)(too),(size_t)(nbytes)) |
149 |
/**< |
150 |
* Copies nbytes of data from memory location from to memory location too. |
151 |
* The memory regions can NOT be overlapping. |
152 |
* |
153 |
* @param from Pointer to memory to copy from. |
154 |
* @param too Pointer to memory to receive copy. |
155 |
* @param nbytes The number of bytes to copy (size_t). |
156 |
* @return No return value. |
157 |
* @see mem_move_disjoint() |
158 |
*/ |
159 |
|
160 |
extern void mem_move_disjoint(POINTER from, POINTER too, size_t nbytes); |
161 |
/**< |
162 |
* Implementation function for mem_copy_cast(). Do not call this |
163 |
* function directly - use mem_copy_cast() instead. |
164 |
*/ |
165 |
|
166 |
#define mem_repl_byte_cast(too,byte,nbytes) \ |
167 |
mem_repl_byte((POINTER)(too),(unsigned)(byte),(size_t)(nbytes)) |
168 |
/**< |
169 |
* Replaces nbytes of data at memory location too with byte. |
170 |
* |
171 |
* @param too Pointer to start of block to be modified. |
172 |
* @param byte The character to write (unsigned int). |
173 |
* @param nbytes The number of bytes to modify (size_t). |
174 |
* @return No return value. |
175 |
* @see mem_repl_byte() |
176 |
*/ |
177 |
|
178 |
extern void mem_repl_byte(POINTER too, unsigned byte, size_t nbytes); |
179 |
/**< |
180 |
* Implementation function for mem_repl_byte_cast(). Do not call this |
181 |
* function directly - use mem_repl_byte_cast() instead. |
182 |
*/ |
183 |
|
184 |
#define mem_zero_byte_cast(too,byte,nbytes) \ |
185 |
mem_zero_byte((POINTER)(too),(unsigned)(byte),(size_t)(nbytes)) |
186 |
/**< |
187 |
* Zeroes nbytes of data at memory location too. |
188 |
* byte is ignored - it is a placeholder for mem_repl_byte |
189 |
* substitutability. |
190 |
* |
191 |
* @param too Pointer to start of block to be modified. |
192 |
* @param byte Ignored (unsigned). |
193 |
* @param nbytes The number of bytes to zero (size_t). |
194 |
* @return No return value. |
195 |
* @see mem_zero_byte() |
196 |
*/ |
197 |
|
198 |
extern void mem_zero_byte(POINTER too, unsigned byte, size_t nbytes); |
199 |
/**< |
200 |
* Implementation function for mem_zero_byte_cast(). Do not call this |
201 |
* function directly - use mem_zero_byte_cast() instead. |
202 |
*/ |
203 |
|
204 |
#define mem_repl_word_cast(too,word,nwords) \ |
205 |
mem_repl_word((POINTER)(too),(unsigned)(word),(size_t)(nwords)) |
206 |
/**< |
207 |
* Replaces nwords of data at memory location too with word. |
208 |
* |
209 |
* @param too Pointer to start of block to be modified. |
210 |
* @param word The word to write (unsigned). |
211 |
* @param nbytes The number of bytes to modify (size_t). |
212 |
* @return No return value. |
213 |
* @see mem_repl_word() |
214 |
*/ |
215 |
|
216 |
extern void mem_repl_word(POINTER too, unsigned word, size_t nwords); |
217 |
/**< |
218 |
* Implementation function for mem_repl_word_cast(). Do not call this |
219 |
* function directly - use mem_repl_word_cast() instead. |
220 |
*/ |
221 |
|
222 |
/* the following are pretty much a monument to Karl. */ |
223 |
#if 0 |
224 |
extern int mem_get_byte(long from); /**< Returns the byte located at from. */ |
225 |
#endif |
226 |
extern unsigned char mem_get_byte(long from); /**< Returns the byte located at from. */ |
227 |
extern int mem_get_int(long from); /**< Returns the int located at from. */ |
228 |
extern long mem_get_long(long from); /**< Returns the long located at from. */ |
229 |
extern double mem_get_float(long from); /**< Returns the float located at from. */ |
230 |
extern double mem_get_double(long from); /**< Returns the double located at from. */ |
231 |
extern void mem_set_byte(long from, int b); /**< Sets the byte located at from. */ |
232 |
extern void mem_set_int(long from, int i); /**< Sets the int located at from. */ |
233 |
extern void mem_set_long(long from, long l); /**< Sets the long located at from. */ |
234 |
extern void mem_set_float(long from, double f); /**< Sets the float located at from. */ |
235 |
extern void mem_set_double(long from, double d); /**< Sets the double located at from. */ |
236 |
|
237 |
#define mem_get_unsigned(from) ((unsigned)mem_get_int(from)) |
238 |
/**< Returns the unsigned located at from. */ |
239 |
#define mem_set_unsigned(from,u) mem_set_int(from,(int)u) |
240 |
/**< Sets the unsigned located at from. */ |
241 |
|
242 |
/*--------------------------------------------------------------------------- |
243 |
The following definitions provide a generic and reasonably efficient memory |
244 |
allocation system for situations where many many objects of the same size |
245 |
need to be "allocated and deallocated" rapidly and stored efficiently: |
246 |
that is to say when normal malloc is too slow and expensive. |
247 |
This scheme does not play reference count games. Instead any elements that |
248 |
have been created and then used and subsequently "freed" go on a list |
249 |
and are handed out again at the next opportunity. The list is run LIFO. |
250 |
The list is associated with the mem_store_t. |
251 |
|
252 |
There is one restriction on the elements: they will be a multiple of the |
253 |
size of a pointer, even if you specify otherwise. If this is too large, |
254 |
write your own allocator. |
255 |
There is the implicit restriction on a store that it must contain |
256 |
no more than MAXINT elements. If this is too small, write your own allocator. |
257 |
|
258 |
Note for the intelligent: |
259 |
If you specify an element size that is not a nice multiple of your machine |
260 |
word length, you are *very* likely to get data alignment (bus) errors. |
261 |
E.g., if your element is not a convenient multiple of sizeof(double) in size, |
262 |
double data are likely to die. |
263 |
|
264 |
The allocation scheme looks like so: |
265 |
[struct mem_store_header | ] |
266 |
__________________| |
267 |
| |
268 |
V |
269 |
pool _____________________________________________________ |
270 |
| b | b | b | b | b | b | b | b | b | b | b | b | b | |
271 |
----------------------------------------------------- |
272 |
where each b is a pointer to an array of size 'width' of elements. |
273 |
|
274 |
The size characteristics of this scheme are tunable at run time so that |
275 |
it can be scaled well when the number of elements and likely amount |
276 |
of expansion required are known. |
277 |
---------------------------------------------------------------------------*/ |
278 |
|
279 |
typedef struct mem_store_header *mem_store_t; |
280 |
/**< |
281 |
* The token for this memory system. Internal details of the |
282 |
* implementation are private. Do not dereference or free this |
283 |
* pointer. |
284 |
*/ |
285 |
|
286 |
/** |
287 |
* Memory statistics data structure. |
288 |
* This is the reporting structure for a pool_store_header query. |
289 |
*/ |
290 |
struct mem_statistics { |
291 |
double m_eff; /**< bytes in use / bytes allocated */ |
292 |
double m_recycle; /**< avg reuses per element */ |
293 |
int elt_total; /**< current elements existing in store*/ |
294 |
int elt_taken; /**< fresh elements handed out */ |
295 |
int elt_inuse; /**< elements the user currently has */ |
296 |
int elt_onlist; /**< elements awaiting reuse */ |
297 |
int elt_size; /**< bytes/element, as mem sees it */ |
298 |
int str_len; /**< length of active pool. */ |
299 |
int str_wid; /**< elements/pointer in pool. */ |
300 |
}; |
301 |
|
302 |
extern void mem_get_stats(struct mem_statistics *m_stats, mem_store_t ms); |
303 |
/**< |
304 |
* Get statistics about a memory store. |
305 |
* Stuffs the user's interface structure, m_stats, with info |
306 |
* derived from ms given. |
307 |
* If mem_LIGHTENING (see below) is TRUE, no statistics |
308 |
* except elt_total, elt_taken, elt_onlist, and elt_size are |
309 |
* available. |
310 |
* |
311 |
* @param m_stats Pointer to a mem_statistics struct to receive |
312 |
* the info. If m_stats is NULL, an error message |
313 |
* is printed and the function returns. |
314 |
* @param ms Pointer to the memory store to query. |
315 |
*/ |
316 |
|
317 |
extern mem_store_t mem_create_store(int length, int width, size_t eltsize, |
318 |
int deltalen, int deltapool); |
319 |
/**< |
320 |
* Creates and returns a new memory store. The returned mem_store_t |
321 |
* contains all the necessary accounting information, but in particular |
322 |
* the eltsize is fixed at creation. All elements requested from ms |
323 |
* will be pointers to eltsize bytes of memory. |
324 |
* Returns NULL if a store of the requested length*width*eltsize |
325 |
* cannot be initially allocated.<br><br> |
326 |
* |
327 |
* The user may request more than length*width elements from the store: |
328 |
* this will cause it to grow. It will grow (internally) in chunks of |
329 |
* deltalen*width elements. The pool vector above grows in chunks of |
330 |
* deltapool, the extra pointers in it being NULL until needed.<br><br> |
331 |
* |
332 |
* @param length The initial number of width*eltsize blocks that the |
333 |
* new pool store will contain. If length < 1, an error |
334 |
* message is printed and NULL is returned. |
335 |
* |
336 |
* @param width Number of elements in each block. |
337 |
* Width should (for some architectures) be such that |
338 |
* width*eltsize = 2^n - 32 for some n fairly large |
339 |
* (heuristic: n= 9..13). Widths that are too large may |
340 |
* be prone to causing excess page faults, though the |
341 |
* process cpu time reported by the clock() can be much |
342 |
* better for extremely large sizes. If width < 1, an |
343 |
* error message is printed and NULL is returned.<br><br> |
344 |
* Widths that are too small will result in an excessive |
345 |
* number of pool expansions, which may severely limit |
346 |
* performance on some VM systems. See deltapool below |
347 |
* about pool expansions.<br><br> |
348 |
* If you know something about the page size of your |
349 |
* architecture, fiddling with width may help you reduce |
350 |
* your page fault or cache miss count in some uses. |
351 |
* |
352 |
* @param eltsize Element size maintained by the pool. |
353 |
* For maximum efficiency, eltsize should be an integer |
354 |
* multiple of sizeof(void *). If it is not, elts will be |
355 |
* padded so that this is the case. This is to avoid |
356 |
* pointer data misalignment. This restriction may or |
357 |
* may not help avoid alignment problems with items inside |
358 |
* the user's element structure. |
359 |
* |
360 |
* @param deltalen Number of additional pointers in the pool that will be |
361 |
* allocated when more elements are needed than are |
362 |
* available internally. deltalen must be at least 1 or |
363 |
* creation of the new pool will fail. |
364 |
* |
365 |
* @param deltapool Size change of the pool array when expanded. It should |
366 |
* be as large as you are willing to tolerate. The pool |
367 |
* array starts out completely filled (all pointers allocated). |
368 |
* When the pool needs more pointers it gets them in chunks of |
369 |
* at least deltapool. These additional pointers will not |
370 |
* automatically have elements allocated to them; rather, |
371 |
* they will be initialized to NULL and filled in only as the |
372 |
* chunks of deltalen*width elements are required. |
373 |
* |
374 |
* @return A pointer to the newly created pool store, NULL if an error occurred. |
375 |
*/ |
376 |
|
377 |
extern void *mem_get_element(mem_store_t ms); |
378 |
/**< |
379 |
* Get a usable element from the pool. |
380 |
* Returns a void pointer to a blob of memory of the eltsize |
381 |
* set when ms was created. You must cast it appropriately. |
382 |
* The blob data is not initialized in any particular way. |
383 |
* |
384 |
* @param ms The pool store from which to retrieve an element. |
385 |
* If ms is NULL, then an error message is printed |
386 |
* and NULL is returned. |
387 |
* @return A pointer to the usable element, or NULL iff ms is NULL or |
388 |
* store growth is required and the operating system is unable |
389 |
* to allocate the required memory. |
390 |
*/ |
391 |
|
392 |
extern void mem_get_element_list(mem_store_t ms, int len, void **ellist); |
393 |
/**< |
394 |
* NOT IMPLEMENTED. |
395 |
* |
396 |
* Takes the pointer array, ellist, of length len provided by the user |
397 |
* and fills it with pointers to elements from the store. |
398 |
* There is not necessarily any relation (memory map wise) between the |
399 |
* locations pointed to by successive entries in the ellist returned. |
400 |
* Ellist should point to an array with enough space for len pointers. |
401 |
* Returns NULL in ellist[0] iff store growth is required and the operating |
402 |
* system is unable to allocate the required memory.<br><br> |
403 |
* |
404 |
* The user is reminded that if he knows how many elements he needs |
405 |
* ahead of time, he is probably better off mallocing the array himself. |
406 |
* |
407 |
* @todo Implement utilities/mem.c:mem_get_element_list() of remove |
408 |
* it from pool.h. |
409 |
*/ |
410 |
|
411 |
#define mem_DEBUG FALSE |
412 |
/**< |
413 |
* Flag controlling extra checking of the pool management routines. |
414 |
* Setting mem_DEBUG to TRUE causes the mem_store routines to do |
415 |
* some RATHER expensive checking. It should be set to FALSE. |
416 |
*/ |
417 |
#define mem_LIGHTENING FALSE |
418 |
/**< |
419 |
* Flag controlling extent of internal sanity checking. |
420 |
* Setting mem_LIGHTENING to TRUE causes mem_store routines to assume |
421 |
* the user is perfect: i.e. no sanity checks are at all necessary and most |
422 |
* internal accounting can be disabled. No one with an ounce of sanity |
423 |
* would ever set this flag to TRUE unless the code using the |
424 |
* mem module was proven bug free. It makes the allocator smaller |
425 |
* and faster, though, by ~15%. <br><br> |
426 |
* |
427 |
* This flag exists to make it easy to test the theory that the |
428 |
* accounting overhead in this code is not of significant cost. |
429 |
* Below 1e5 elements it really isn't bad enough to justify the |
430 |
* assumption that the user is perfect. |
431 |
*/ |
432 |
|
433 |
extern void mem_free_element(mem_store_t ms, void *eltpointer); |
434 |
/**< |
435 |
* Releases an element back to the store. |
436 |
* If you return the same pointer twice, we will have |
437 |
* no qualms about returning it to you twice. We won't necessarily |
438 |
* return it to you twice, though.<br><br> |
439 |
* |
440 |
* If mem_DEBUG is TRUE, eltpointer will be checked for belonging |
441 |
* to ms. If you call mem_free_element() with a pointer the ms does |
442 |
* not recognize, it will not be freed and a message will be |
443 |
* sent to ASCERR.<br><br> |
444 |
* |
445 |
* If mem_DEBUG is FALSE, eltpointer will be assumed to belong |
446 |
* with the ms in question. The implications of handing |
447 |
* mem_free_element() an element of the wrong size or from the |
448 |
* wrong ms (bearing in mind the LIFO reuse of elements) should be |
449 |
* obvious. If they are not, stop using these routines.<br><br> |
450 |
* |
451 |
* If at any time the number of elements freed exceeds the number |
452 |
* handed out, we will whine (unless mem_LIGHTENING). If ms is |
453 |
* NULL, and error message is printed and the function returns. |
454 |
* If eltpointer is NULL, we will ignore it completely. |
455 |
* |
456 |
* @param ms The memory store to modify. |
457 |
* @param eltpointer The element to return to the pool. |
458 |
* @return No return value. |
459 |
*/ |
460 |
|
461 |
extern void mem_clear_store(mem_store_t ms); |
462 |
/**< |
463 |
* Clears the books in ms. That is, we reset the ms to think |
464 |
* that __all__ elements are freshly available and have never |
465 |
* been handed out. If ms is NULL, an error message is printed |
466 |
* and the function returns.<br><br> |
467 |
* |
468 |
* If mem_DEBUG is TRUE, it first verifies that all elements have |
469 |
* been mem_freed first and whines if not. |
470 |
* Get and free calls will be balanced to see if spurious elements |
471 |
* have been handed in. (This is a heuristic check). |
472 |
* The clear process will cause any spurious pointers that were |
473 |
* turned in via mem_free_element() to be forgotten about.<br><br> |
474 |
* |
475 |
* Clearing a store is not necessary for mem_destroy_store(). |
476 |
* Recycling is faster from the recycle list than from a cleared store, ~2%. |
477 |
* Clear is provided for users who want to obtain elements with a higher |
478 |
* probability of successive elements being near each other. |
479 |
* |
480 |
* @param ms The memory store to clear. |
481 |
* @return No return value. |
482 |
*/ |
483 |
|
484 |
extern void mem_destroy_store(mem_store_t ms); |
485 |
/**< |
486 |
* Deallocates everything associated with the ms. |
487 |
* If mem_DEBUG is TRUE, it first verifies that all elements |
488 |
* have been mem_freed first and whines if not. |
489 |
* If pmem_DEBUG is FALSE, just nukes everything unconditionally. |
490 |
* If ms is NULL, an error message is printed and the function |
491 |
* returns. |
492 |
* |
493 |
* @paramms The memory store to destroy. |
494 |
*/ |
495 |
|
496 |
extern void mem_print_store(FILE *fp, mem_store_t ms, unsigned detail); |
497 |
/**< |
498 |
* Prints statistics about a mem_store_t to the file stream given. |
499 |
* Which stats get printed depends on detail. |
500 |
* - If detail 0, displays just summary statistics. |
501 |
* - If detail 1, just internal statistics. |
502 |
* - If detail >1, displays both. |
503 |
* |
504 |
* @param fp The open file stream on which to print the report. |
505 |
* @param ms The memory store on which to report. |
506 |
* @param detail The level of detail to print: |
507 |
* 0 = summary, 1 = internal stats, >1 = both. |
508 |
*/ |
509 |
|
510 |
extern size_t mem_sizeof_store(mem_store_t ms); |
511 |
/**< |
512 |
* Retrieves the current total byte usage of the store. |
513 |
* Returns 0 if an invalid pool store is specified. |
514 |
* |
515 |
* @param ms The memory store to query. |
516 |
* @return The total bytes currently used by the memory store. |
517 |
*/ |
518 |
|
519 |
#endif /* mem_NULL */ |
520 |
|