Bug Summary

File:subcmd/unitools/store.c
Warning:line 705, column 23
Dereference of null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name store.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/home/kfp/aldor/aldor/aldor/subcmd/unitools -fcoverage-compilation-dir=/home/kfp/aldor/aldor/aldor/subcmd/unitools -resource-dir /usr/local/lib/clang/18 -D PACKAGE_NAME="aldor" -D PACKAGE_TARNAME="aldor" -D PACKAGE_VERSION="1.4.0" -D PACKAGE_STRING="aldor 1.4.0" -D PACKAGE_BUGREPORT="aldor@xinutec.org" -D PACKAGE_URL="" -D PACKAGE="aldor" -D VERSION="1.4.0" -D YYTEXT_POINTER=1 -D HAVE_STDIO_H=1 -D HAVE_STDLIB_H=1 -D HAVE_STRING_H=1 -D HAVE_INTTYPES_H=1 -D HAVE_STDINT_H=1 -D HAVE_STRINGS_H=1 -D HAVE_SYS_STAT_H=1 -D HAVE_SYS_TYPES_H=1 -D HAVE_UNISTD_H=1 -D STDC_HEADERS=1 -D HAVE_LIBREADLINE=1 -D HAVE_READLINE_READLINE_H=1 -D HAVE_READLINE_HISTORY=1 -D HAVE_READLINE_HISTORY_H=1 -D USE_GLOOP_SHELL=1 -D GENERATOR_COROUTINES=0 -D HAVE_DLFCN_H=1 -D LT_OBJDIR=".libs/" -I . -I ./../../src -I ../../src -internal-isystem /usr/local/lib/clang/18/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -ferror-limit 19 -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2026-01-15-223856-845667-1 -x c store.c
1/*****************************************************************************
2 *
3 * store.c: Storage management.
4 *
5 * Copyright (c) 1990-2007 Aldor Software Organization Ltd (Aldor.org).
6 *
7 ****************************************************************************/
8
9/*
10 * Select one of:
11 * STO_USE_BTREE B-Tree based quick fit.
12 * STO_USE_ONCE Don't look back.
13 * STO_USE_MALLOC Based on malloc/free.
14 *
15 *
16 * For STO_USE_BTREE, select
17 * any of:
18 * STO_TALLY Keep storage use statistics.
19 * STO_LONGJMP Error handler can return when StoErr_CantBuild occurs.
20 * STO_WATCH Trace allocations and deallocations.
21 * STO_CERTIFY Trace tests controlling if statements, etc.
22 * USE_MEMORY_CLIMATE To over-ride the setting of stoAlloc codes.
23 * one of:
24 * STO_NEW_JUNK Newly allocated store contains any old junk.
25 * STO_NEW_AAAA Newly allocated store contains 0xAAAAAA...
26 * STO_NEW_ZERO Newly allocated store contains zeros.
27 * and one of:
28 * STO_FREE_JUNK Free storage contains any old junk.
29 * STO_FREE_DDDD Free storage contains 0xDDDDDD...
30 *
31 * For debugging support define STO_DEBUG_DISPLAY. This will cause the
32 * garbage collector to check for the GC_DETAIL and GC_CLASSIFY environment
33 * variables when the first GC takes place.
34 *
35 * The value of GC_DETAIL is used to control the output of stoShow(). The
36 * existence of GC_CLASSIFY determines whether or not tables showing the
37 * classification of pointers are displayed.
38 *
39 * By default blacklisting is not enabled since the current implementation
40 * doesn't have much effect (for good or ill). Define STO_CAN_BLACKLIST to
41 * enable it and then set the GC_BLACKLIST environment variable to actually
42 * use it at runtime.
43 *
44 * To speed up the computation of indices of quanta, fixed-size sections
45 * whose size is not an integral power of two we use division via a lookup
46 * table. Enable this by defining STO_DIVISION_BY_LOOKUP and suffer a
47 * penalty of a 48K table in static data.
48 */
49
50#include "debug.h"
51#include "opsys.h"
52#include "store.h"
53#include "timer.h"
54
55/*
56 * If no other allocator is specified, used the B-Tree based by default.
57 */
58#if !defined(STO_USE_BTREE)&& !defined(STO_USE_ONCE)&& !defined(STO_USE_MALLOC) && !defined(STO_USE_BOEHM)
59# if __APPLE__
60# define STO_USE_ONCE 1
61# elif 0 && !defined(FOAM_RTS)
62# define STO_USE_MALLOC
63# else
64# define STO_USE_BTREE
65# endif
66#endif
67
68#include "axlgen0.h"
69#include "btree.h"
70#include "memclim.h"
71#include "util.h"
72
73/* this will create a debug version of GC that should be safer */
74#undef NDEBUG
75
76/*
77 * User programs can make limited checks on pointers to ensure
78 * that they do not attempt to write to areas of memory that
79 * are writable. Although we could give the user a much more
80 * precise result (such as this object is a free object on the
81 * heap) we choose not to.
82 */
83#define POINTER_IS_INVALID(-1) (-1)
84#define POINTER_IS_UNKNOWN( 0) ( 0)
85#define POINTER_IS_VALID( 1) ( 1)
86
87
88/*===========================================================================*/
89
90#ifdef STO_USE_BTREE
91
92#if defined(OS_WIN32)
93#include <windows.h>
94#endif
95
96
97/*
98 * B-Tree based allocator.
99 *
100 *
101 * At the coarsest level, memory has "page" granularity.
102 * Memory is used in contiguous sequences of pages, called "sections".
103 *
104 * The pages of this memory manager need not bear any relation to those
105 * of the operating system. If the definitions coincide, however, then
106 * virtual memory performace should be better.
107 *
108 * Some pages contain pieces which must all be the same size. These pieces
109 * are called "fixed" pieces. Other pages do not dictate the size of the
110 * pieces they contain. These pieces are called "mixed". There are also
111 * other types of pages for foreign inclusions and structures used by the
112 * allocator. A page map keeps track of how each page is used.
113 *
114 * A section of one or more pages is layed out as follows:
115 * +-----------------------+--- - - - - - - - ---+-----------------------+
116 * | first page | | last page |
117 * +----++++ - ++++-----+--+--+ - - - - - - - +--+--+--+--+ - - +--+--+--+
118 * |head| info | gap | data |
119 * +----++++ - ++++-----+--+--+ - - - - - - - +--+--+--+--+ - - +--+--+--+
120 * Note that the data quanta are aligned at the last page boundary.
121 *
122 * Keeping info separate from data requires less space but more time.
123 * Space is saved since no padding is needed to align the data pointer.
124 * Finding the info for a piece takes more time, though.
125 *
126 * Compared to having a single tag table, keeping a tag table per section
127 * requires more memory access for tag lookup. But a single tag table must be
128 * relocated when memory grows and will be much larger if pages are sparse.
129 *
130 * The fixed pieces which are not in use are kept in free lists, one for each
131 * fixed size.
132 *
133 * The handling of free mixed pieces is somewhat more complicated.
134 * The complications are introduced by the competing criteria that requests
135 * be satisfied quickly, and that storage be used as efficiently as possible.
136 *
137 * To use storage efficiently, fragmentaition must be avoided. Three steps
138 * toward achieving this goal are
139 * (1) when a mixed piece is freed, it is merged with any adjacent mixed free
140 * pieces to form a larger piece,
141 * (2) requests are satisfied using a "best fit" strategy, and
142 * (3) among best fits, the one with the lowest address is used.
143 *
144 * List-based implementations of best fit are slow so to achieve the speed
145 * objective, the free pieces are sorted by size in a B-tree.
146 * The keys into the B-tree are the sizes and the entries are doubly linked
147 * lists of same-size pieces.
148 *
149 * Using doubly linked lists means that the tree need be modified only when
150 * a new size is introduced or removed. This benefit is enhanced by rounding
151 * all mixed pieces up to a multiple of some size.
152 */
153
154/*
155 * To do:
156 * -- When freeing the last piece in a section, return sect to free page list.
157 * -- Robustify btreeX functions to check for 0 return from newX.
158 * -- Audits for BTree node and DLL node pages
159 * -- Sort DLLs by address
160 * -- Think about using SLLs instead of DLLs
161 * -- Use osFree when possible
162 * -- Ask for only as many pages as needed at once.
163 * -- Modify mixedFrontier to be truly busy.
164 * -- If adjacent mixed pieces are gc-ed, merge before adding to BTree.
165 * -- Merge returned free pieces to mixed frontier, if possible.
166 * -- Increase qmSize for sections with larger pieces.
167 * -- Encode run lengths in PgInfo to speed up sectFor.
168 * -- Relocation based on pointer certainty.
169 * -- Remove setjmps for stoAllocInner_ErrorCatch by ensuring enough to start.
170 */
171
172/*****************************************************************************
173 *
174 * :: Parameters
175 *
176 *****************************************************************************/
177
178#define LgPgSize12 12 /* Log[2](PgSize). pagesTest ok for 3..15. */
179
180#define PgSize(1L<<12) (1L<<LgPgSize12) /* Granularity for OS request. */
181
182#define PgGroup16 16 /* Get this many pages at once from OS. */
183
184#define FixedSizePgGroup1 1 /* This many pages at once for fxmem. */
185
186#define FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])) (sizeof(fixedSize)/sizeof(fixedSize[0]))
187 /* Number of different fixed small sizes. */
188
189#define FixedSizeMax(32*sizeof(Pointer)) (32*sizeof(Pointer))
190 /* Maximum fixed small size. */
191
192#define STO_DIVISION_BY_LOOKUP1 1
193#define STO_SECT_AND_PAGE_OF1 1
194
195static Length fixedSize[] = {
196 1 * sizeof(Pointer),
197 2 * sizeof(Pointer),
198 3 * sizeof(Pointer),
199 4 * sizeof(Pointer),
200 6 * sizeof(Pointer),
201 8 * sizeof(Pointer),
202 10 * sizeof(Pointer),
203 12 * sizeof(Pointer),
204 16 * sizeof(Pointer),
205 20 * sizeof(Pointer),
206 24 * sizeof(Pointer),
207 FixedSizeMax(32*sizeof(Pointer))
208}; /* Fixed sizes for small allocations. */
209
210static int fixedSizeLog[] = {
211 0, 1, -1, 2, -2, 3, -3, -4, 4, -5, -6, 5
212 /*-1,-1,-1, -1,-1, -1, -1,-1, -1, -1, -1*/
213};
214
215
216#ifdef FOAM_RTS
217/*
218 * Ensure that these names agree with the CensusType enumeration in
219 * foam_c.h otherwise the results of stoTakeCensus will be meaningless.
220 */
221static char *censusName[] = {
222 "Unknown",
223 "Bogus",
224 "BInt",
225 "DFlo",
226 "Closure",
227 "Record",
228 "RawRecord",
229 "DynFormat",
230 "Char[]",
231 "Bool[]",
232 "Byte[]",
233 "HInt[]",
234 "SInt[]",
235 "SFlo[]",
236 "DFlo[]",
237 "Word[]",
238 "Ptr[]",
239 "Trailing[]",
240 "EnvInfo",
241 "EnvLevel",
242 "Fluid",
243 "GlobalInfo",
244 "SaveState",
245 "Total",
246};
247#else
248/*
249 * Ensure that these names agree with the OB_* definitions in axlgen.h
250 * and axlobs.h otherwise the results of stoTakeCensus will be meaningless.
251 */
252static char *censusName[] = {
253 "Unknown",
254 "Bogus",
255 "BInt",
256 "BTree",
257 "Bitv",
258 "Buffer",
259 "List",
260 "String",
261 "Symbol",
262 "Table",
263 "DNF",
264 "CCode",
265 "SExpr",
266 "CoMsg",
267 "SrcLine",
268 "Token",
269 "Doc",
270 "AbSyn",
271 "AbBind",
272 "Stab",
273 "Syme",
274 "TForm",
275 "TPoss",
276 "TConst",
277 "TQual",
278 "Foam",
279 "Lib",
280 "Archive",
281 "Total",
282};
283#endif
284
285
286/* We can't access the CensusType enumeration so we guess the limit */
287#define STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0])) (sizeof(censusName)/sizeof(censusName[0]))
288static ULong censusBefore[STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0]))];
289static ULong censusAfter[STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0]))];
290static ULong censusMemBefore[STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0]))];
291static ULong censusMemAfter[STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0]))];
292
293
294#ifdef STO_DIVISION_BY_LOOKUP1
295static short stoDivTable[6][PgSize(1L<<12)];
296#endif
297
298
299static UShort fixedSizeFor [FixedSizeMax(32*sizeof(Pointer))+1];
300 /* Look up size to use. */
301static UByte fixedSizeIndexFor[FixedSizeMax(32*sizeof(Pointer))+1];
302 /* Index into size table. */
303
304#define MixedBTreeT16 16 /* Min #branches for btree interior node. */
305#define MixedSizePgGroup2 2 /* At least this many pgs at once for mxmem. */
306#define MixedSizeQuantum(32*sizeof(Pointer)) FixedSizeMax(32*sizeof(Pointer))
307 /* All mixed sizes are a multiple of this. */
308
309/* - flo-po banished coz we don't want to risk exceptions in the runtime*/
310/* double GcEffectiveFactor = 0.3; Get more pages unless this fraction free. */
311long GcEffectiveFactorNum=3;
312long GcEffectiveFactorDen=10;
313/* double GcGrowthFactor = 1.4; Grow the heap by this much if a Gc fails.*/
314long GcGrowthFactorNum=14;
315long GcGrowthFactorDen=10;
316long GcMinGrowth = (512*1024)/PgSize(1L<<12); /* Grow by a minumum of 0.5meg */
317
318long GcFrugalFactorNum = 70;
319long GcFrugalFactorDen = 100;
320
321
322/* Set GC_FRUGAL to enable more reticent heap growth */
323static Bool GcIsFrugal = false((int) 0);
324
325
326/* Object information.
327 * Maybe consider using bitmap for these...
328 * Note that in reality we only have 32 objects (28 of which are taken
329 * by the compiler for internal data structures).
330 */
331#define OB_MAX256 256
332
333/*****************************************************************************/
334/*** These MUST be the same as their cousins in foam_c.h ***/
335/*****************************************************************************/
336typedef Pointer (*StoFiFun)();
337
338typedef struct _StoFiProg
339{
340 StoFiFun fun;
341 StoFiFun fcall;
342 Pointer progInfo;
343 Pointer data;
344} *StoFiProg;
345
346typedef struct _StoFiClos
347{
348 Pointer env;
349 StoFiProg prog;
350} *StoFiClos;
351
352#define stoFiCFun(t, fn)(*((t (*)())(fn)->prog->fun)) (*((t (*)())(fn)->prog->fun))
353#define stoFiCCall2(t,fn,a,b)((*((t (*)())(fn)->prog->fun))((fn)->env,a,b)) (stoFiCFun(t,fn)(*((t (*)())(fn)->prog->fun))((fn)->env,a,b))
354/*****************************************************************************/
355
356static char stoObRegistered[OB_MAX256];
357static char stoObNoInternalPtrs[OB_MAX256];
358static StoFiClos stoObAldorTracer[OB_MAX256];
359static StoTraceFun stoObCTracer[OB_MAX256];
360
361/*****************************************************************************
362 *
363 * :: Controls
364 *
365 *****************************************************************************/
366
367static Bool gcLevel = StoCtl_GcLevel_Automatic2;
368static FILE *gcTraceFile = 0;
369static Bool stoMustWash = true1;
370static Bool stoMustTag = true1;
371
372static Bool markingStats = false((int) 0);
373
374#ifdef STO_DEBUG_DISPLAY
375# define stoDebug((int) 0) true1
376#else
377# define stoDebug((int) 0) false((int) 0)
378#endif
379
380/*****************************************************************************
381 *
382 * :: Benchmarks
383 *
384 *****************************************************************************/
385
386static struct tmTimer gcTimer;
387
388/*****************************************************************************
389 *
390 * :: Types
391 *
392 *****************************************************************************/
393
394enum PgKind {
395 PgFree, /* Page is available. */
396 PgBusyFirst, /* First of related busy pages. */
397 PgBusyFollow, /* Subsequent related busy pages. */
398 PgPgMap, /* Page used by pgMap. */
399 PgBTree, /* Page used by free tree as node. */
400 PgDLL, /* Page used by free tree carrier. */
401 PgForeign /* Page is not ours. */
402};
403
404typedef char Page[PgSize(1L<<12)]; /* Type for striding across pages. */
405typedef UByte PgInfo; /* Type for compact page map info. */
406typedef UByte QmInfo; /* Type for compact quantum info. */
407
408typedef enum PgKind PgKind; /* Page is available, busy, etc. */
409
410typedef struct Section Section; /* A section of busy pages. */
411typedef struct FxMem FxMem; /* Ptr to fixed pcs of a given size. */
412typedef struct MxMem MxMem; /* Ptr to mixed pcs of a given size. */
413
414
415/*****************************************************************************
416 *
417 * :: Forward Declarations
418 *
419 *****************************************************************************/
420
421localstatic MostAlignedType *stoDefaultError (int errnum);
422static StoErrorFun stoError = stoDefaultError;
423
424localstatic Bool stoNeedsMoreHeadroom(int);
425localstatic Length sectQmCount(Length, Length);
426
427extern void stoTakeCensus(AInt);
428
429/*****************************************************************************
430 *
431 * :: Conditional Code
432 *
433 *****************************************************************************/
434
435/*
436 * Set debugging configuration, if appropriate.
437 */
438#ifndef NDEBUG
439# define STO_TALLY
440
441# if !defined(STO_NEW_JUNK) && !defined(STO_NEW_ZERO)
442# define STO_NEW_AAAA
443# endif
444
445# if !defined(STO_FREE_JUNK)
446# define STO_FREE_DDDD
447# endif
448#endif
449
450
451/*
452 * Conditional code to fill and test memory.
453 */
454#if defined(STO_NEW_ZERO)
455# define STO_NEW_CHAR0xAA 0x00
456#endif
457#if defined(STO_NEW_AAAA)
458# define STO_NEW_CHAR0xAA 0xAA
459#endif
460#if defined(STO_FREE_DDDD)
461# define STO_FREE_CHAR0xDD 0xDD
462#endif
463
464#if defined(STO_NEW_CHAR0xAA)
465# define newFill(s,n)(stoMustWash ? memlset (s,0xAA,n) : 0) (stoMustWash ? memlset (s,STO_NEW_CHAR0xAA,n) : 0)
466#else
467# define newFill(s,n)(stoMustWash ? memlset (s,0xAA,n) : 0) Nothing
468#endif
469
470#if defined(STO_FREE_CHAR0xDD)
471# define freeFill(s,n)(stoMustWash ? memlset (s,0xDD,n) : 0) (stoMustWash ? memlset (s,STO_FREE_CHAR0xDD,n) : 0)
472# define freeAssert(s,n)(stoMustWash ? memltest(s,0xDD,n) : 0) (stoMustWash ? memltest(s,STO_FREE_CHAR0xDD,n) : 0)
473#else
474# define freeFill(s,n)(stoMustWash ? memlset (s,0xDD,n) : 0) Nothing
475# define freeAssert(s,n)(stoMustWash ? memltest(s,0xDD,n) : 0) Nothing
476#endif
477
478
479/*
480 * Conditional code to keep track of storage.
481 */
482#define STO_TALLY
483#ifdef STO_TALLY
484# define stoTally(expr)(expr) (expr)
485#else
486# define stoTally(expr)(expr) Nothing
487#endif
488
489
490/*
491 * Conditional code to control tracing.
492 */
493
494
495#ifdef STO_WATCH
496# define stoWatch(s,p,n,f) stoWatchReally(s,p,n,f)
497#else
498# define stoWatch(s,p,n,f) Nothing
499#endif
500
501#ifdef STO_CERTIFY
502# define stoCertify(n,c)(c) stoCertifyReally((n), (c) ? true1 : false((int) 0))
503#else
504# define stoCertify(n,c)(c) (c)
505#endif
506
507#define IF(c)if ((c)) if (stoCertify(__LINE__,c)(c))
508#define WHILE(c)while ((c)) while (stoCertify(__LINE__,c)(c))
509
510/*****************************************************************************
511 *
512 * :: Storage manager state
513 *
514 *****************************************************************************/
515
516static int stoIsInit = 0; /* Is the storage manager initialized? */
517
518static char *heapStart, /* Start of allocator area. */
519 *heapEnd; /* Past end of allocator area. */
520
521static PgInfo *pgMap; /* Info for each page we care about. */
522static Length pgMapSize; /* Number of pages we care about. */
523static Length pgMapUses; /* Space occupied by page map. */
524
525static FxMem *fixedPieces[FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0]))];
526static BTree mixedPieces;
527static MxMem *mixedFrontier;
528
529ULong stoBytesOwn; /* Total owned by allocator. */
530ULong stoBytesAlloc; /* Total ever allocated. */
531ULong stoBytesFree; /* Total ever freed. */
532ULong stoBytesGc; /* Total ever garbage collected. */
533ULong stoBytesBlack; /* Total blacklisted */
534ULong stoPiecesGc[STO_CODE_LIMIT32]; /* # this time, by code. */
535
536
537/*
538 * freeFixedPieces[i] is the number of free pieces in fixedPieces[i]
539 * busyFixedPieces[i] is the number of busy pieces of that size.
540 * freeMixedBytes is the number of bytes held in mixed-sized free-tree.
541 * busyMixedBytes is the number of bytes held in mixed-size busy pieces.
542 */
543static ULong freeFixedPieces[FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0]))];
544static ULong busyFixedPieces[FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0]))];
545static ULong freeMixedBytes, busyMixedBytes;
546
547/****************************************************************************
548 *
549 * :: Debugging
550 *
551 ***************************************************************************/
552
553#define stoWatchAlloc(p,n) stoWatch("Alloc ",(p), (n), true)
554#define stoWatchFree(p) stoWatch("Free ",(p), 0, true)
555#define stoWatchFrontier(p) Nothing
556#define stoWatchMarkFrom(p) stoWatch("Mark fr ",(p), 0, false)
557#define stoWatchMarkTo(p) stoWatch("Mark to ",(p), 0, false)
558
559
560static int stoWatchCount = 0;
561
562int
563stoWatchReally(String title, Pointer p, ULong n, Bool audit)
564{
565 const char *fmt = n ? "[[%4d %s: %p (%lu)]]\n" : "[[%4d %s: %p]]\n";
566
567 fprintf(osStderr, fmt, stoWatchCount, title, p, n);
568 fflush (osStderr);
569
570 if (audit) {
571 stoAudit();
572 }
573
574 stoWatchCount++;
575 return 0;
576}
577
578Bool
579stoCertifyReally(int loc, Bool flag)
580{
581 fprintf(osStderr, " %d-%s", loc, flag ? "T" : "F");
582 return flag;
583}
584
585TmTimer
586stoGcTimer(void)
587{
588 return &gcTimer;
589}
590
591/****************************************************************************
592 *
593 * :: Basic allocation
594 *
595 ***************************************************************************/
596
597/*
598 * Obtain an aligned area from the operating system.
599 */
600localstatic Pointer
601byteGetIfCan(Length alignment, ULong nbytes, ULong *pnbytesGot)
602{
603 Pointer p;
604
605 osAllocAlignHint(alignment);
606
607 nbytes += alignment;
608 p = osAlloc(&nbytes);
609
610 if (p != 0) {
611 freeFill(p, nbytes)(stoMustWash ? memlset (p,0xDD,nbytes) : 0);
612 stoBytesOwn += nbytes;
613 }
614
615 /*
616 * Round up appropriately.
617 */
618 if (p != 0) {
619 long plong, rem;
620
621 plong = ptrToLong(p)((long) (p));
622 rem = plong % alignment;
623 plong += (rem == 0) ? 0 : alignment - rem;
624 nbytes-= rem;
625
626 p = ptrFrLong(plong)((Pointer)(plong));
627 }
628
629 if (pnbytesGot) *pnbytesGot = nbytes;
630
631 return p;
632}
633
634
635
636/*****************************************************************************
637 *
638 * :: Page map
639 *
640 * Modifies globals: pgMap, pgMapSize, pgMapUses
641 * Uses globals: heapStart, heapEnd
642 *
643 ****************************************************************************/
644
645# define PgMask~((1L<<12)-1) ~(PgSize(1L<<12)-1)
646#ifdef STO_SECT_AND_PAGE_OF1
647# define pgOf(p)(((Pointer)((char *)(heapStart) + ((((long)((char *)(p) - (char
*)(heapStart))) & ~((1L<<12)-1))))))
(ptrOff(heapStart, (ptrDiff(p, heapStart) & PgMask))((Pointer)((char *)(heapStart) + ((((long)((char *)(p) - (char
*)(heapStart))) & ~((1L<<12)-1)))))
)
648#endif
649# define pgAt(n)((Page *)((Pointer)((char *)(heapStart) + ((n) * (1L<<12
)))))
((Page *)ptrOff(heapStart, (n) * PgSize)((Pointer)((char *)(heapStart) + ((n) * (1L<<12)))))
650# define pgNo(p)(((long)((char *)((char *)(p)) - (char *)(heapStart))) >>
12)
(ptrDiff((char *)(p), heapStart)((long)((char *)((char *)(p)) - (char *)(heapStart))) >> LgPgSize12)
651# define pgLen(nb)((Length) (((nb) + (1L<<12) - 1) >> 12)) ((Length) (((nb) + PgSize(1L<<12) - 1) >> LgPgSize12))
652# define isInHeap(p)((((Pointer)(heapStart)) <= ((Pointer)((p)))) && (
((Pointer)((p))) < ((Pointer)(heapEnd))))
(ptrLE(heapStart,(p))(((Pointer)(heapStart)) <= ((Pointer)((p)))) && ptrLT((p),heapEnd)(((Pointer)((p))) < ((Pointer)(heapEnd))))
653
654
655localstatic Page * pagesFind(Length npages);
656localstatic void pagesPut (Page *, Length npages);
657
658/*
659 * Obtain and free storage for page map.
660 */
661localstatic PgInfo *
662pgmapAlloc(Length npgMapUses)
663{
664 PgInfo *npgMap = 0;
665 if (pgMap)
666 npgMap = (PgInfo *) pagesFind(npgMapUses);
667 if (!npgMap)
668 npgMap = (PgInfo *) byteGetIfCan(PgSize(1L<<12),PgSize(1L<<12)*npgMapUses,NULL((void*)0));
669 return npgMap;
670}
671
672localstatic void
673pgmapFree(PgInfo *opgMap, Length opgMapUses)
674{
675 char *b, *e;
676 b = (char *) opgMap;
677 e = b + PgSize(1L<<12) * opgMapUses - 1;
678
679 if (isInHeap(b)((((Pointer)(heapStart)) <= ((Pointer)((b)))) && (
((Pointer)((b))) < ((Pointer)(heapEnd))))
&& isInHeap(e)((((Pointer)(heapStart)) <= ((Pointer)((e)))) && (
((Pointer)((e))) < ((Pointer)(heapEnd))))
)
680 pagesPut((Page *) opgMap, opgMapUses);
681}
682
683/*
684 * Create the initial page map.
685 */
686localstatic void
687pgmapInit(void)
688{
689 pgMap = 0;
690
691 pgMap = pgmapAlloc(1);
692 if (!pgMap) return;
693
694 pgMapUses = 1;
695 pgMapSize = 1;
696}
697
698/*
699 * Update the page map for count pages starting at p.
700 */
701localstatic void
702pgmapMod(Page *p, Length count, PgKind kind)
703{
704 PgInfo *m = pgMap + pgNo(p)(((long)((char *)((char *)(p)) - (char *)(heapStart))) >>
12)
;
705 while (count--) *m++ = kind;
36
Loop condition is true. Entering loop body
37
Null pointer value stored to 'm'
38
Dereference of null pointer
706}
707
708/*
709 * Search for the first free page sequence long enough. Failure is -1.
710 */
711int pgmapFoundFreeLastTime=0 ;
712
713localstatic int
714pgmapFindFree(Length count)
715{
716 int i, i0, iL;
717
718 /* look after previous find */
719 for (i = pgmapFoundFreeLastTime ; i < pgMapSize; i++)
720 if (pgMap[i] == PgFree) {
721 i0 = i;
722 iL = i + count;
723 if (iL > pgMapSize) return -1;
724 for ( ; i < iL; i++) {
725 if (pgMap[i] != PgFree) break;
726 }
727 if (i == iL) {
728 pgmapFoundFreeLastTime = i0;
729 return i0;
730 }
731 }
732 /* now look at the bits we missed */
733 for (i = 0 ; i < pgmapFoundFreeLastTime ; i++)
734 if (pgMap[i] == PgFree) {
735 i0 = i;
736 iL = i + count;
737 if (iL > pgMapSize) return -1;
738 for ( ; i < iL; i++) {
739 if (pgMap[i] != PgFree) break;
740 }
741 if (i == iL) {
742 pgmapFoundFreeLastTime = i0;
743 return i0;
744 }
745 }
746 pgmapFoundFreeLastTime =0 ;
747 return -1;
748}
749
750/*
751 * Count the number of free pages.
752 */
753localstatic int
754pgmapCountFree(void)
755{
756 int i, nfree;
757
758 nfree = 0;
759 for (i = 0; i < pgMapSize; i++) if (pgMap[i] == PgFree) nfree++;
760 return nfree;
761}
762
763/*
764 * Count the number of non-foreign pages.
765 */
766localstatic int
767pgmapCountDomestic(void)
768{
769 int i, ndom;
770
771 ndom = 0;
772 for (i = 0; i < pgMapSize; i++) if (pgMap[i] != PgForeign) ndom++;
773 return ndom;
774}
775
776/*
777 * Guarantee page map is large enough.
778 */
779localstatic int
780pgmapNeed(Length count)
781{
782 PgInfo *opgMap = pgMap, *npgMap;
783 Length opgMapUses = pgMapUses, npgMapUses = pgLen(count)((Length) (((count) + (1L<<12) - 1) >> 12));
784
785 if (opgMapUses >= npgMapUses) return 1;
786
787 npgMap = pgmapAlloc(npgMapUses);
788 if (!npgMap) return 0;
789
790 memcpy(npgMap, opgMap, pgMapSize * sizeof(PgInfo));
791
792 pgMap = (PgInfo*) npgMap;
793 pgMapUses = npgMapUses;
794
795 pgmapFree(opgMap, opgMapUses);
796
797 return 1;
798}
799
800/*
801 * Slide page map to insert extra pages at beginning.
802 */
803localstatic int
804pgmapSlide(Length count, PgKind kind)
805{
806 PgInfo *oend, *nend;
807 Length i;
808
809 if (count == 0) return 1;
810
811 if (!pgmapNeed(pgMapSize + count)) return 0;
812
813 oend = pgMap + pgMapSize;
814 nend = oend + count;
815
816 for (i = 0; i < pgMapSize; i++) *--nend = *--oend;
817 for (i = 0; i < count; i++) *--nend = kind;
818
819 pgMapSize += count;
820
821 return 1;
822}
823
824/*
825 * Extend page map with extra pages at the end.
826 */
827localstatic int
828pgmapExtend(Length count, PgKind kind)
829{
830 PgInfo *oend;
831 Length i;
832
833 if (!pgmapNeed(pgMapSize + count)) return 0;
834
835 oend = pgMap + pgMapSize;
836
837 for (i = 0; i < count; i++) *oend++ = kind;
838
839 pgMapSize += count;
840
841 return 1;
842}
843
844
845/*****************************************************************************
846 *
847 * :: Heap page management
848 *
849 * Modifies globals: heapStart, heapEnd
850 *
851 ****************************************************************************/
852
853/*
854 * Add a stretch of free pages.
855 * The page map and heap limits are updated accordingly.
856 * A return value of 1 indicates success; 0 indicates failure.
857 *
858 * The page map may or may not include info about its own pages.
859 * If it does, then they may be noted as PgForeign or PgPgMap.
860 */
861localstatic int
862pagesAdd(Length nBest, Length nMin)
863{
864 ULong nbytesGot, nRequest, nForeign;
865 char *b, *e;
866 int ok;
867
868 /* Request ideal ammount; back off until minimum. */
869 nRequest = (nBest > PgGroup16) ? nBest : PgGroup16;
870 for (;;) {
871 b = (char *) byteGetIfCan(PgSize(1L<<12), nRequest*PgSize(1L<<12), &nbytesGot);
872 if (nbytesGot % PgSize(1L<<12) != 0) nbytesGot -= nbytesGot % PgSize(1L<<12);
873
874 e = (char *) ptrOff(b,nbytesGot)((Pointer)((char *)(b) + (nbytesGot)));
875 if (b) break;
876 if (nRequest <= nMin) return 0;
877 nRequest = (nRequest >= 2*nMin) ? nRequest / 2 : nRequest - 1;
878 }
879
880 nRequest = nbytesGot >> LgPgSize12;
881 nForeign = ptrGE(b, heapEnd)(((Pointer)(b)) >= ((Pointer)(heapEnd))) ? pgLen(ptrDiff(b, heapEnd))((Length) (((((long)((char *)(b) - (char *)(heapEnd)))) + (1L
<<12) - 1) >> 12))
882 : ptrLE(e, heapStart)(((Pointer)(e)) <= ((Pointer)(heapStart))) ? pgLen(ptrDiff(heapStart, e))((Length) (((((long)((char *)(heapStart) - (char *)(e)))) + (
1L<<12) - 1) >> 12))
883 : 0 ;
884
885 /* Update page map. */
886 if (ptrGE(b, heapEnd)(((Pointer)(b)) >= ((Pointer)(heapEnd)))) {
887 /* New store is above. */
888 heapEnd = e;
889 ok = pgmapExtend(nRequest + nForeign, PgForeign);
890 if (!ok) return 0;
891 }
892 else if (ptrLE(e, heapStart)(((Pointer)(e)) <= ((Pointer)(heapStart)))) {
893 /* New store is below. */
894 heapStart = b;
895 ok = pgmapSlide(nRequest + nForeign, PgForeign);
896 if (!ok) return 0;
897 }
898 else {
899 /* New store is in middle (formerly foreign). */
900 }
901
902 /* Add pages to heap. */
903 pagesPut((Page *) b, nRequest);
904
905 /* If heap now includes page map, indicate it. */
906 b = (char *) pgMap;
907 e = (char *) ptrOff(b, pgMapUses * PgSize)((Pointer)((char *)(b) + (pgMapUses * (1L<<12))));
908 if (isInHeap(b)((((Pointer)(heapStart)) <= ((Pointer)((b)))) && (
((Pointer)((b))) < ((Pointer)(heapEnd))))
&& isInHeap(ptrOff(e,-1))((((Pointer)(heapStart)) <= ((Pointer)((((Pointer)((char *
)(e) + (-1))))))) && (((Pointer)((((Pointer)((char *)
(e) + (-1)))))) < ((Pointer)(heapEnd))))
)
909 pgmapMod((Page *) pgMap, pgMapUses, PgPgMap);
910
911 return 1;
912}
913
914localstatic Page *
915pagesFind(Length npages)
916{
917 int i = pgmapFindFree(npages);
918/* if (osGetEnv("GC_FINDFREE")!=NULL) { fprintf(osStderr, "Page Found %ld\n",i);}; */
919 return (i == -1) ? 0 : pgAt(i)((Page *)((Pointer)((char *)(heapStart) + ((i) * (1L<<12
)))))
;
920}
921
922localstatic Page *
923pagesGet(Length nMin)
924{
925 Page *p;
926 Bool addAnyway = false((int) 0);
927 Length nBest = nMin;
928
929 p = pagesFind(nMin);
930
931 if (p) {
932 if (nMin > 0) pgmapMod(p, 1, PgBusyFirst);
933 if (nMin > 1) pgmapMod(p+1, nMin-1, PgBusyFollow);
934 }
935 else if (gcLevel == StoCtl_GcLevel_Automatic2) {
936 int tot, free0, free1;
937 int gceLhs, gceRhs;
938
939 tot = pgmapCountDomestic();
940 free0 = pgmapCountFree();
941 stoGc();
942 free1 = pgmapCountFree();
943
944
945 /* Are there sufficient free pages for this request? */
946 addAnyway = (nMin > free1);
947
948
949 /* If there are enough, is the headroom big enough? */
950 if (!addAnyway)
951 {
952 /* Frugal or normal heap growth? */
953 if (GcIsFrugal)
954 addAnyway = stoNeedsMoreHeadroom(free1);
955 else
956 {
957 gceLhs = GcEffectiveFactorNum * tot;
958 gceRhs = GcEffectiveFactorDen * free1;
959 addAnyway = (gceLhs > gceRhs);
960 }
961 }
962
963 if (addAnyway &&
964 (GcGrowthFactorNum * tot) > (GcGrowthFactorDen*(tot + nMin)))
965 {
966 nBest = (Length) ((GcGrowthFactorNum - GcGrowthFactorDen ) * tot /GcGrowthFactorDen + 1);
967 nBest = nBest < GcMinGrowth ? GcMinGrowth : nBest;
968 }
969
970 p = pagesFind(nMin);
971 if (p) {
972 if (nMin > 0) pgmapMod(p, 1, PgBusyFirst);
973 if (nMin > 1) pgmapMod(p+1, nMin-1, PgBusyFollow);
974 }
975
976 if (gcTraceFile) {
977 fprintf(gcTraceFile, " [GC: %s enough, %d/%d -> %d/%d for " LENGTH_FMT"%lu" "]\n",
978 p ? "!!! Got" : "... Not", free0, tot, free1, tot, nMin);
979
980 if (addAnyway) {
981 fprintf(gcTraceFile, "\n [GC: Growing heap (not enough %s)]\n",
982 (nMin > free1) ? "free pages" : "headroom");
983 }
984 }
985#ifdef FOAM_RTS
986 if (markingStats) {
987 fprintf(osStderr, " [GC: %s enough, %d/%d -> %d/%d for " LENGTH_FMT"%lu" "]\n",
988 p ? "!!! Got" : "... Not", free0, tot, free1, tot, nMin);
989
990 if (addAnyway) {
991 fprintf(osStderr, "\n [GC: Growing heap (not enough %s)]\n",
992 (nMin > free1) ? "free pages" : "headroom");
993 }
994 }
995#endif
996 }
997 if (!p || addAnyway)
998 pagesAdd(nBest, nMin);
999 if (!p) {
1000 p = pagesFind(nMin);
1001 if (p) {
1002 if (nMin > 0) pgmapMod(p, 1, PgBusyFirst);
1003 if (nMin > 1) pgmapMod(p+1, nMin-1, PgBusyFollow);
1004 }
1005 }
1006
1007 return p;
1008}
1009
1010localstatic void
1011pagesPut(Page *pg, Length count)
1012{
1013 pgmapMod (pg, count, PgFree);
1014 freeFill(pg, count*PgSize)(stoMustWash ? memlset (pg,0xDD,count*(1L<<12)) : 0);
1015}
1016
1017/*****************************************************************************
1018 *
1019 * :: Section manipulation
1020 *
1021 ****************************************************************************/
1022
1023struct Section {
1024 short pgCount; /* Number of pages in section. */
1025 short qmLog; /* base 2 log of size, if integral */
1026#ifdef STO_DIVISION_BY_LOOKUP1
1027 UByte qmDiv; /* division table if !qmLog */
1028#endif
1029 short qmSize; /* Alloc quantum. */
1030 UByte qmSizeIndex; /* Index of quantum in size table. */
1031 BPack(Bool)UByte isFixed; /* Alloc of same or different sizes? */
1032
1033 FxMem *free; /* Section free list (for stoTune). */
1034 ULong qmCount; /* Number of quanta of data. */
1035 Pointer data; /* Pointer to start of data. */
1036 QmInfo info[NARY10]; /* Per quantum information. */
1037};
1038
1039#define SectionHeadSize(sizeof(Section) + (0) * sizeof(QmInfo) - 10 * sizeof(QmInfo)
)
fullsizeof(Section, 0, QmInfo)(sizeof(Section) + (0) * sizeof(QmInfo) - 10 * sizeof(QmInfo)
)
1040
1041/*
1042 * Macros for maintaining the info array:
1043 * bits 0-4: object code
1044 * bit 5: mark bit
1045 * bits 6-7: kind of page
1046 */
1047#define QmKindMask0xC0 0xC0
1048#define QmFollow0x00 0x00
1049#define QmFreeFirst0x40 0x40
1050#define QmBusyFirst0x80 0x80
1051#define QmBlacklisted0xC0 0xC0
1052
1053#define QmMarkMask0x20 0x20
1054#define QmCodeMask0x1F 0x1F
1055
1056#define QmInfoMake(kind, code)(((kind) & 0xC0) | ((code) & 0x1F)) (((kind) & QmKindMask0xC0) | ((code) & QmCodeMask0x1F))
1057#define QmInfoMake0(kind)(kind) (kind)
1058#define QmInfoKind(info)((info) & 0xC0) ((info) & QmKindMask0xC0)
1059#define QmInfoCode(info)((info) & 0x1F) ((info) & QmCodeMask0x1F)
1060#define QmInfoMark(info)((info) & 0x20) ((info) & QmMarkMask0x20)
1061
1062#define QmInfoSetKind(info,kind)((info) = ((info) & (~0xC0 & 0xFF)) | ((kind) & 0xC0
))
((info) = \
1063 ((info) & (~QmKindMask0xC0 & 0xFF)) | ((kind) & QmKindMask0xC0))
1064#define QmInfoSetCode(info,code)((info) = ((info) & (~0x1F & 0xFF)) | ((code) & 0x1F
))
((info) = \
1065 ((info) & (~QmCodeMask0x1F & 0xFF)) | ((code) & QmCodeMask0x1F))
1066#define QmInfoSetMark(info)((info) |= 0x20) ((info) |= QmMarkMask0x20)
1067#define QmInfoClearMark(info)((info) &= (~0x20 & 0xFF)) ((info) &= (~QmMarkMask0x20 & 0xFF))
1068
1069#define QmIsPtrFree(info)(stoObNoInternalPtrs[((info) & 0x1F)]) (stoObNoInternalPtrs[QmInfoCode(info)((info) & 0x1F)])
1070#define QmAldorTraced(info)(stoObAldorTracer[((info) & 0x1F)]) (stoObAldorTracer[QmInfoCode(info)((info) & 0x1F)])
1071#define QmCTraced(info)(stoObCTracer[((info) & 0x1F)]) (stoObCTracer[QmInfoCode(info)((info) & 0x1F)])
1072
1073
1074/*
1075 * Find the section into which p points. 0 indicates error. If you know
1076 * that the section is the first (eg for fixed-size pages) then sectOf(p)
1077 * is the most efficient method to use rather than sectAt(pgNo(p)).
1078 */
1079#ifdef STO_SECT_AND_PAGE_OF1
1080#define sectOf(p)((Section *)(((Pointer)((char *)(heapStart) + ((((long)((char
*)(p) - (char *)(heapStart))) & ~((1L<<12)-1))))))
)
((Section *)pgOf(p)(((Pointer)((char *)(heapStart) + ((((long)((char *)(p) - (char
*)(heapStart))) & ~((1L<<12)-1))))))
)
1081#else
1082#define sectOf(p)((Section *)(((Pointer)((char *)(heapStart) + ((((long)((char
*)(p) - (char *)(heapStart))) & ~((1L<<12)-1))))))
)
sectFor(p)(pgMap[(((long)((char *)((char *)(p)) - (char *)(heapStart)))
>> 12)]==PgBusyFirst ? ((Section *) ((Page *)((Pointer
)((char *)(heapStart) + (((((long)((char *)((char *)(p)) - (char
*)(heapStart))) >> 12)) * (1L<<12)))))):_sectFor
(p))
1083#endif
1084#define sectAt(n)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((n) *
(1L<<12))))))
((Section *) pgAt(n)((Page *)((Pointer)((char *)(heapStart) + ((n) * (1L<<12
)))))
)
1085#define sectFor(p)(pgMap[(((long)((char *)((char *)(p)) - (char *)(heapStart)))
>> 12)]==PgBusyFirst ? ((Section *) ((Page *)((Pointer
)((char *)(heapStart) + (((((long)((char *)((char *)(p)) - (char
*)(heapStart))) >> 12)) * (1L<<12)))))):_sectFor
(p))
(pgMap[pgNo(p)(((long)((char *)((char *)(p)) - (char *)(heapStart))) >>
12)
]==PgBusyFirst ? sectAt(pgNo(p))((Section *) ((Page *)((Pointer)((char *)(heapStart) + (((((long
)((char *)((char *)(p)) - (char *)(heapStart))) >> 12))
* (1L<<12))))))
:_sectFor(p))
1086
1087/*
1088 * Given a pointer and a section, find the index into the info array.
1089 */
1090#define qmNo(p,sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))/(sect)->qmSize)
(ptrDiff((char*)(p),(char*)((sect)->data))((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))
/(sect)->qmSize)
1091
1092#define qmLogNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
)))) >> (sect)->qmLog)
(ptrDiff((char*)(p),(char*)((sect)->data))((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))
>> (sect)->qmLog)
1093
1094
1095/* For sections with fixed-sized pages, this is more efficient: */
1096#ifdef STO_DIVISION_BY_LOOKUP1
1097#define qmDivNo(p, sect)stoDivTable[(sect)->qmDiv][(((long)((char *)((char*)(p)) -
(char *)((char*)((sect)->data)))))]
stoDivTable[(sect)->qmDiv][(ptrDiff((char*)(p),(char*)((sect)->data))((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))
)]
1098#endif
1099
1100
1101/*
1102 * How many quanta will fit in a section with this many pages?
1103 */
1104localstatic Length
1105sectQmCount(Length pageCount, Length qmSize)
1106{
1107 return (pageCount * PgSize(1L<<12) - SectionHeadSize(sizeof(Section) + (0) * sizeof(QmInfo) - 10 * sizeof(QmInfo)
)
)/(qmSize + sizeof(QmInfo));
1108}
1109
1110/*
1111 * Initialize the stretch of pages.
1112 */
1113localstatic Section *
1114sectPrepare(Page *p, Length npages, Length sz, int isFixed)
1115{
1116 Section *x = (Section *) p;
1117 Length szixix, i, nq;
1118 int lgWordSize;
1119
1120 lgWordSize = 0;
1121 for (i = sizeof(Pointer); i > 1; i = i>>1) lgWordSize++;
1122 assert(npages < (1<<16))do { if (!(npages < (1<<16))) _do_assert(("npages < (1<<16)"
),"store.c",1122); } while (0)
;
1123 szixix = (sz <= FixedSizeMax(32*sizeof(Pointer))) ? sz : 0;
1124 x->pgCount = npages;
1125 x->qmSize = sz;
1126 x->qmSizeIndex = fixedSizeIndexFor[szixix];
1127#ifdef STO_DIVISION_BY_LOOKUP1
1128 x->qmLog = fixedSizeLog[x->qmSizeIndex] < 0
1129 ? 0 : fixedSizeLog[x->qmSizeIndex] + lgWordSize;
1130 x->qmDiv = x->qmLog ? 0 : -(fixedSizeLog[x->qmSizeIndex]+1);
1131#else
1132 x->qmLog = fixedSizeLog[x->qmSizeIndex] < 0
1133 ? 0 : fixedSizeLog[x->qmSizeIndex] + lgWordSize;
1134#endif
1135 x->isFixed = isFixed;
1136 x->qmCount = nq = sectQmCount(npages, sz);
1137 x->data = ptrOff((char *) p, npages * PgSize - nq * sz)((Pointer)((char *)((char *) p) + (npages * (1L<<12) - nq
* sz)))
;
1138
1139 /*
1140 * Fixed section is a whole bunch of little pieces.
1141 * Mixed section contains one big one.
1142 */
1143 if (stoMustTag) {
1144 x->info[0] = QmInfoMake0(QmFreeFirst)(0x40);
1145 if (isFixed)
1146 for (i = 1; i < nq; i++)
1147 x->info[i] = QmInfoMake0(QmFreeFirst)(0x40);
1148 else
1149 for (i = 1; i < nq; i++)
1150 x->info[i] = QmInfoMake0(QmFollow)(0x00);
1151 }
1152 return x;
1153}
1154
1155localstatic Section *
1156_sectFor(Pointer p)
1157{
1158 Length pi;
1159
1160 if (!isInHeap(p)((((Pointer)(heapStart)) <= ((Pointer)((p)))) && (
((Pointer)((p))) < ((Pointer)(heapEnd))))
)
1161 return 0;
1162 pi = pgNo(p)(((long)((char *)((char *)(p)) - (char *)(heapStart))) >>
12)
;
1163 while (pgMap[pi] == PgBusyFollow)
1164 pi--;
1165 if (pgMap[pi] == PgBusyFirst)
1166 return sectAt(pi)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((pi) *
(1L<<12))))))
;
1167 return 0;
1168}
1169
1170/*****************************************************************************
1171 *
1172 * :: Fixed piece management
1173 *
1174 ****************************************************************************/
1175
1176struct FxMem {
1177 struct FxMem *next;
1178};
1179
1180# define fxmemCleanHead(pc)(stoMustWash ? memlset ((UByte *)(pc),0xDD,sizeof(FxMem)) : 0
)
\
1181 freeFill ((UByte *)(pc), sizeof(FxMem))(stoMustWash ? memlset ((UByte *)(pc),0xDD,sizeof(FxMem)) : 0
)
1182# define fxmemCleanBody(pc, sz)(stoMustWash ? memlset ((UByte *)(pc) + sizeof(FxMem),0xDD,(sz
) - sizeof(FxMem)) : 0)
\
1183 freeFill ((UByte *)(pc) + sizeof(FxMem), (sz) - sizeof(FxMem))(stoMustWash ? memlset ((UByte *)(pc) + sizeof(FxMem),0xDD,(sz
) - sizeof(FxMem)) : 0)
1184# define fxmemAssertCleanBody(pc, sz)(stoMustWash ? memltest((UByte *)(pc) + sizeof(FxMem),0xDD,(sz
) - sizeof(FxMem)) : 0)
\
1185 freeAssert((UByte *)(pc) + sizeof(FxMem), (sz) - sizeof(FxMem))(stoMustWash ? memltest((UByte *)(pc) + sizeof(FxMem),0xDD,(sz
) - sizeof(FxMem)) : 0)
1186
1187
1188localstatic FxMem *
1189piecesGetFixed(Length nbytes)
1190{
1191 Page *pages;
1192 Section *sect;
1193 FxMem *pieces, *pieces0;
1194 Length sz, i, npages, npieces;
1195
1196 npages = FixedSizePgGroup1;
1197 sz = fixedSizeFor[nbytes];
1198 pages = pagesGet(npages);
1199 if (!pages) return 0;
1200
1201 sect = sectPrepare(pages, npages, sz, true1);
1202
1203 npieces = sect->qmCount;
1204 pieces0 = (FxMem *) sect->data;
1205
1206 for (pieces = pieces0, i = 1; i < npieces; i++, pieces = pieces->next)
1207 pieces->next = (FxMem *) ptrOff((char *) pieces, sz)((Pointer)((char *)((char *) pieces) + (sz)));
1208 pieces->next = fixedPieces[fixedSizeIndexFor[nbytes]];
1209
1210 return pieces0;
1211}
1212
1213/*****************************************************************************
1214 *
1215 * :: Mixed piece management
1216 *
1217 ****************************************************************************/
1218
1219typedef struct MxMemDLL {
1220 ULong nbytes;
1221 MxMem *pieces;
1222} MxMemDLL;
1223
1224typedef union MxMemU {
1225 struct {
1226 MxMem *linkA, *linkB;
1227 MxMemDLL *dll;
1228 } free;
1229 struct {
1230 MostAlignedType data;
1231 } busy;
1232} MxMemU;
1233
1234
1235struct MxMem {
1236 ULong nbytesPrev;
1237 ULong nbytesThis;
1238 BPack(Bool)UByte isFree;
1239 BPack(Bool)UByte isFirst;
1240 BPack(Bool)UByte isLast;
1241 Section *sect;
1242
1243 /* Gap */
1244 union MxMemU body;
1245};
1246
1247#define MxMemHeadSize(sizeof(MxMem) - sizeof(MxMemU)) (sizeof(MxMem) - sizeof(MxMemU))
1248
1249#define mxmemNext(p)((p)->isLast ? 0 : (MxMem *)((Pointer)((char *)((char *)(p
)) + ((long)(p)->nbytesThis))))
((p)->isLast ? 0 : (MxMem *)ptrOff((char *)(p), (long)(p)->nbytesThis)((Pointer)((char *)((char *)(p)) + ((long)(p)->nbytesThis)
))
)
1250#define mxmemPrev(p)((p)->isFirst? 0 : (MxMem *)((Pointer)((char *)((char *)(p
)) + (-(long)(p)->nbytesPrev))))
((p)->isFirst? 0 : (MxMem *)ptrOff((char *)(p),-(long)(p)->nbytesPrev)((Pointer)((char *)((char *)(p)) + (-(long)(p)->nbytesPrev
)))
)
1251
1252
1253# define mxmemCleanHead0(pc)(stoMustWash ? memlset ((UByte *)(pc),0xDD,sizeof(MxMem)) : 0
)
\
1254 freeFill ((UByte *)(pc), sizeof(MxMem))(stoMustWash ? memlset ((UByte *)(pc),0xDD,sizeof(MxMem)) : 0
)
1255# define mxmemCleanHead(pc)(stoMustWash ? memlset ((UByte *)(pc) + (sizeof(MxMem) - sizeof
(MxMemU)),0xDD,sizeof(MxMem) - (sizeof(MxMem) - sizeof(MxMemU
))) : 0)
\
1256 freeFill ((UByte *)(pc) + MxMemHeadSize, sizeof(MxMem) - MxMemHeadSize)(stoMustWash ? memlset ((UByte *)(pc) + (sizeof(MxMem) - sizeof
(MxMemU)),0xDD,sizeof(MxMem) - (sizeof(MxMem) - sizeof(MxMemU
))) : 0)
1257# define mxmemCleanBody(pc, sz)(stoMustWash ? memlset ((UByte *)(pc) + sizeof(MxMem),0xDD,(sz
) - sizeof(MxMem)) : 0)
\
1258 freeFill ((UByte *)(pc) + sizeof(MxMem), (sz) - sizeof(MxMem))(stoMustWash ? memlset ((UByte *)(pc) + sizeof(MxMem),0xDD,(sz
) - sizeof(MxMem)) : 0)
1259# define mxmemAssertCleanBody(pc, sz)(stoMustWash ? memltest((UByte *)(pc) + sizeof(MxMem),0xDD,(sz
) - sizeof(MxMem)) : 0)
\
1260 freeAssert((UByte *)(pc) + sizeof(MxMem), (sz) - sizeof(MxMem))(stoMustWash ? memltest((UByte *)(pc) + sizeof(MxMem),0xDD,(sz
) - sizeof(MxMem)) : 0)
1261
1262
1263#ifdef STO_LONGJMP
1264static jmp_buf stoAllocInner_ErrorCatch;
1265static Pointer stoAllocInner_ErrorValue;
1266#endif
1267
1268localstatic FxMem *
1269stoAllocInner(ULong nbytes, PgKind pgkind)
1270{
1271 int i, npcs;
1272 ULong npages;
1273 Page *pages;
1274 FxMem *pcs0, *pcs;
1275
1276 npages = 1;
1277 assert(nbytes <= npages*PgSize)do { if (!(nbytes <= npages*(1L<<12))) _do_assert(("nbytes <= npages*PgSize"
),"store.c",1277); } while (0)
;
1278 npcs = (npages*PgSize(1L<<12))/nbytes;
1279 pages = pagesGet(npages);
1280
1281 if (pages == 0) {
1282#ifdef STO_LONGJMP
1283 stoAllocInner_ErrorValue = (*stoError)(StoErr_CantBuild3);
1284 longjmp(stoAllocInner_ErrorCatch, int0((int) 0));
1285#else
1286 (*stoError)(StoErr_CantBuild3);
1287 NotReached(;){(void)bug("Not supposed to reach line %d in file: %s\n",1287
, "store.c");}
;
1288#endif
1289 }
1290
1291 pgmapMod(pages, npages, pgkind);
1292
1293 pcs = pcs0 = (FxMem *) pages;
1294 for (i = 1; i < npcs; i++)
1295 pcs = pcs->next = (FxMem *) ptrCanon((char *) pcs+nbytes)((Pointer)((char *) pcs+nbytes));
1296 pcs->next = 0;
1297 return pcs0;
1298}
1299
1300static FxMem *btreeNodes = 0;
1301static FxMem *mxmemDLLs = 0;
1302
1303BTree
1304mxmemAllocBTree(ULong nbytes)
1305{
1306 BTree b;
1307
1308 if (!btreeNodes) btreeNodes = stoAllocInner(nbytes, PgBTree);
1309 b = (BTree) btreeNodes;
1310 btreeNodes = btreeNodes->next;
1311 return b;
1312}
1313
1314void
1315mxmemFreeBTree(BTree b)
1316{
1317 FxMem *p = (FxMem *) b;
1318 p->next = btreeNodes;
1319 btreeNodes = p;
1320}
1321
1322MxMemDLL *
1323mxmemAllocDLL(void)
1324{
1325 MxMemDLL *p;
1326
1327 if (!mxmemDLLs) mxmemDLLs = stoAllocInner((ULong) sizeof(*p), PgDLL);
1328 p = (MxMemDLL *) mxmemDLLs;
1329 mxmemDLLs = mxmemDLLs->next;
1330 return p;
1331}
1332
1333void
1334mxmemFreeDLL(MxMemDLL *m)
1335{
1336 FxMem *p = (FxMem *) m;
1337 p->next = mxmemDLLs;
1338 mxmemDLLs = p;
1339}
1340
1341# define mxmemUnlinkFromBTree(mi, pbt){ MxMemDLL *dll = mxmemUnlink(mi); if ((!dll->pieces)) { btreeDeleteX
((pbt), dll->nbytes, ((void*)0), mxmemFreeBTree); mxmemFreeDLL
(dll); } }
{ \
1342 MxMemDLL *dll = mxmemUnlink(mi); \
1343 IF(!dll->pieces)if ((!dll->pieces)) { \
1344 btreeDeleteX((pbt), dll->nbytes, NULL((void*)0), mxmemFreeBTree); \
1345 mxmemFreeDLL(dll); \
1346 } \
1347 }
1348
1349localstatic MxMemDLL *
1350mxmemUnlink(MxMem *mi)
1351{
1352 MxMemDLL *dll = mi->body.free.dll;
1353 MxMem *A = mi->body.free.linkA,
1354 *B = mi->body.free.linkB;
1355
1356 /* Remove from doubly linked list. */
1357 IF (A)if ((A)) A->body.free.linkB = B;
1358 IF (B)if ((B)) B->body.free.linkA = A;
1359
1360 /* Adjust pointer to the list. Make null if no siblings. */
1361 IF (ptrEQ(mi, dll->pieces))if ((( ((Pointer)((Pointer)(mi)) == (Pointer)((Pointer)(dll->
pieces))))))
1362 dll->pieces = A ? A : B;
1363
1364 return dll;
1365}
1366
1367/*
1368 * Link a mixed-size piece into the B-tree.
1369 */
1370localstatic void
1371mxmemLink(MxMem *mi)
1372{
1373 BTree bnode;
1374 int bix;
1375
1376 mi = (MxMem *) ptrCanon(mi)((Pointer)(mi)); /* For < comp */
1377 bnode = btreeSearchEQ(mixedPieces, mi->nbytesThis, &bix);
1378
1379 IF (bnode)if ((bnode)) {
1380 MxMemDLL *dll = (MxMemDLL *) btreeElt(bnode, bix)((bnode)->part[bix].entry);
1381 MxMem *u, *v;
1382
1383 /*!! Sorting by address can give bad paging with these DLLs. */
1384 u = dll->pieces;
1385 v = u->body.free.linkA;
1386 WHILE (mi > u && v)while ((mi > u && v)) { u = v; v = u->body.free.linkA; }
1387
1388 mi->body.free.dll = dll;
1389 mi->body.free.linkA = v;
1390 mi->body.free.linkB = u;
1391 u->body.free.linkA = mi;
1392 IF (v)if ((v)) v->body.free.linkB = mi;
1393 }
1394 else {
1395#if 1
1396 MxMemDLL *dll = mxmemAllocDLL();
1397
1398 dll->nbytes = mi->nbytesThis;
1399 dll->pieces = mi;
1400 btreeInsertX(&mixedPieces,
1401 (BTreeKey) dll->nbytes, (BTreeElt) dll,
1402 mxmemAllocBTree);
1403
1404 mi->body.free.dll = dll;
1405 mi->body.free.linkA = 0;
1406 mi->body.free.linkB = 0;
1407#else
1408 MxMemDLL *dll;
1409 mi->body.free.linkA = 0;
1410 mi->body.free.linkB = 0;
1411 dll = mxmemAllocDLL();
1412
1413 dll->nbytes = mi->nbytesThis;
1414 dll->pieces = mi;
1415 btreeInsertX(&mixedPieces,
1416 (BTreeKey) dll->nbytes, (BTreeElt) dll,
1417 mxmemAllocBTree);
1418
1419 mi->body.free.dll = dll;
1420
1421#endif
1422 }
1423}
1424
1425/*
1426 * Merge two adjacent pieces.
1427 */
1428localstatic void
1429mxmemMerge(MxMem *curr, MxMem *next)
1430{
1431 MxMem *N = mxmemNext(next)((next)->isLast ? 0 : (MxMem *)((Pointer)((char *)((char *
)(next)) + ((long)(next)->nbytesThis))))
;
1432
1433 curr->nbytesThis += next->nbytesThis;
1434 curr->isLast = next->isLast;
1435 IF (N)if ((N)) N->nbytesPrev = curr->nbytesThis;
1436
1437 if (stoMustTag) {
1438 QmInfo *pqm = next->sect->info + qmNo(next,next->sect)(((long)((char *)((char*)(next)) - (char *)((char*)((next->
sect)->data))))/(next->sect)->qmSize)
;
1439 QmInfoSetKind(*pqm, QmFollow)((*pqm) = ((*pqm) & (~0xC0 & 0xFF)) | ((0x00) & 0xC0
))
;
1440 }
1441 mxmemCleanHead0(next)(stoMustWash ? memlset ((UByte *)(next),0xDD,sizeof(MxMem)) :
0)
;
1442}
1443
1444/*
1445 * Return the remainder, where curr keeps only nbytes.
1446 * nbytes must be a multiple of MixedSizeQuantum.
1447 */
1448localstatic MxMem *
1449mxmemSplit(MxMem *curr, ULong nbytes)
1450{
1451 MxMem *r = (MxMem *) ptrOff((char *)curr, nbytes)((Pointer)((char *)((char *)curr) + (nbytes)));
1452 MxMem *N = mxmemNext(curr)((curr)->isLast ? 0 : (MxMem *)((Pointer)((char *)((char *
)(curr)) + ((long)(curr)->nbytesThis))))
;
1453
1454 r->isFree = curr->isFree;
1455 r->isFirst = false((int) 0);
1456 r->isLast = curr->isLast;
1457 r->nbytesThis = curr->nbytesThis - nbytes;
1458 r->nbytesPrev = nbytes;
1459 r->sect = curr->sect;
1460
1461 curr->isLast = false((int) 0);
1462 curr->nbytesThis = nbytes;
1463
1464 IF (N)if ((N)) N->nbytesPrev = r->nbytesThis;
1465
1466 if (stoMustTag) {
1467 QmInfo *pqm = curr->sect->info + qmNo(r,curr->sect)(((long)((char *)((char*)(r)) - (char *)((char*)((curr->sect
)->data))))/(curr->sect)->qmSize)
;
1468 QmInfoSetKind(*pqm, QmFreeFirst)((*pqm) = ((*pqm) & (~0xC0 & 0xFF)) | ((0x40) & 0xC0
))
;
1469 }
1470 return r;
1471}
1472
1473localstatic MxMemDLL *
1474mxmemShow(MxMemDLL *dll)
1475{
1476 int n;
1477 MxMem *t;
1478
1479 /* Make t point to A-most piece. */
1480 t = dll->pieces;
1481 while (t->body.free.linkA)
1482 t = t->body.free.linkA;
1483 /* Count the pieces */
1484 for (n = 0; t; n++) {
1485 if (t->nbytesThis != dll->nbytes)
1486 fprintf(osStderr, "(%ld)", t->nbytesThis);
1487 t = t->body.free.linkB;
1488 }
1489 fprintf(osStderr," %dx%ld", n, dll->nbytes);
1490
1491 return dll;
1492}
1493
1494void
1495piecePutMixed(MxMem *mi)
1496{
1497 MxMem *prev, *next;
1498
1499 if (!mixedPieces) mixedPieces = btreeNewX(MixedBTreeT16, mxmemAllocBTree);
1500
1501
1502 /* 1. Determine which adjacent pieces will be merged. */
1503 prev = mxmemPrev(mi)((mi)->isFirst? 0 : (MxMem *)((Pointer)((char *)((char *)(
mi)) + (-(long)(mi)->nbytesPrev))))
;
1504 IF (prev && !prev->isFree)if ((prev && !prev->isFree)) prev = 0;
1505 next = mxmemNext(mi)((mi)->isLast ? 0 : (MxMem *)((Pointer)((char *)((char *)(
mi)) + ((long)(mi)->nbytesThis))))
;
1506 IF (next && !next->isFree)if ((next && !next->isFree)) next = 0;
1507
1508 /* 2. Remove those pieces from the piece tree and merge. */
1509 IF (next)if ((next)) {
1510 mxmemUnlinkFromBTree(next, &mixedPieces){ MxMemDLL *dll = mxmemUnlink(next); if ((!dll->pieces)) {
btreeDeleteX((&mixedPieces), dll->nbytes, ((void*)0),
mxmemFreeBTree); mxmemFreeDLL(dll); } }
;
1511 mxmemMerge(mi, next);
1512 }
1513
1514 IF (prev)if ((prev)) {
1515 mxmemUnlinkFromBTree(prev, &mixedPieces){ MxMemDLL *dll = mxmemUnlink(prev); if ((!dll->pieces)) {
btreeDeleteX((&mixedPieces), dll->nbytes, ((void*)0),
mxmemFreeBTree); mxmemFreeDLL(dll); } }
;
1516 mxmemMerge(prev, mi);
1517 mi = prev;
1518 }
1519
1520 /* 3. Add piece to piece tree. */
1521 mxmemLink(mi);
1522
1523 /* 4. We're free */
1524 mi->isFree = true1;
1525
1526}
1527
1528/*
1529 * Get the best piece with size at least nbytes from the mixed piece pool.
1530 * nbytes is guaranteed to be a multiple of MixedSizeQuantum.
1531 *
1532 * Keeping mixedFrontier out of mixedPieces btree avoids one btreeDelete
1533 * and one btreeInsert each time it is used.
1534 */
1535
1536#define shdSplit1(nhas, nreq)((nhas) > (nreq) + (32*sizeof(Pointer))) ((nhas) > (nreq) + MixedSizeQuantum(32*sizeof(Pointer)))
1537#define shdSplit2(nhas,nreq)((nhas) > (nreq) + (32*sizeof(Pointer))) ((nhas) > (nreq) + MixedSizeQuantum(32*sizeof(Pointer)))
1538#define shdBe1(nhas,nreq)(nreq) (nreq)
1539#define shdBe2(nhas,nreq)(nreq) (nreq)
1540
1541MxMem *
1542pieceGetMixed(ULong nbytes)
1543{
1544 BTree bnode, bnode1;
1545 int bix, bix1;
1546 MxMem *mi, *mt, *tmp;
1547 MxMemDLL *dll;
1548 Bool is1;
1549 ULong mn;
1550
1551 if (!mixedPieces) mixedPieces = btreeNewX(MixedBTreeT16, mxmemAllocBTree);
1552
1553
1554 /* First check in mixed-size pieces free tree. */
1555 bnode = btreeSearchGE(mixedPieces, nbytes, &bix);
1556 IF (bnode)if ((bnode)) {
1557 mi = ((MxMemDLL *) btreeElt(bnode, bix)((bnode)->part[bix].entry))->pieces;
1558 dll = mxmemUnlink(mi);
1559 is1 = !dll->pieces;
1560 mi->isFree = false((int) 0);
1561
1562 mn = mi->nbytesThis;
1563 IF (shdSplit1(mn, nbytes))if ((((mn) > (nbytes) + (32*sizeof(Pointer))))) {
1564 ULong r = mi->nbytesThis - nbytes;
1565
1566 mt = mxmemSplit(mi,shdBe1(mn, nbytes)(nbytes));
1567 mt->isFree = true1;
1568 bnode1 = !is1 ? 0 : btreeSearchGE(mixedPieces,r,&bix1);
1569
1570 IF (!is1)if ((!is1)) {
1571 piecePutMixed(mt);
1572 }
1573 else IF (bnode1 != bnode || bix1 != bix)if ((bnode1 != bnode || bix1 != bix)) {
1574 btreeDeleteX(&mixedPieces, dll->nbytes, NULL((void*)0),
1575 mxmemFreeBTree);
1576 mxmemFreeDLL(dll);
1577 piecePutMixed(mt);
1578 }
1579 else {
1580 /* Reuse btree entry. */
1581 btreeKey(bnode1, bix1)((bnode1)->part[bix1].key) = r;
1582 dll->nbytes = r;
1583 dll->pieces = mt;
1584 mt->body.free.linkA = mt->body.free.linkB = 0;
1585 mt->body.free.dll = dll;
1586 }
1587 }
1588 else IF (is1)if ((is1)) {
1589 btreeDeleteX(&mixedPieces, dll->nbytes, NULL((void*)0),
1590 mxmemFreeBTree);
1591 mxmemFreeDLL(dll);
1592 }
1593 }
1594 else {
1595 /* no piece in mixed-size free tree is big enough. */
1596 IF (mixedFrontier && mixedFrontier->nbytesThis < nbytes)if ((mixedFrontier && mixedFrontier->nbytesThis <
nbytes))
{
1597 /* Frontier piece is too small. Thow it away. */
1598 tmp = mixedFrontier;
1599 mixedFrontier = 0;
1600 piecePutMixed(tmp);
1601 stoWatchFrontier(tmp);
1602 /* (Actually, this could have lead to a merge). */
1603 }
1604
1605 IF (!mixedFrontier)if ((!mixedFrontier)) {
1606 /* Need a new frontier piece to satisfy request. */
1607 Length npages;
1608 ULong nq, nb;
1609 Page *pages;
1610 Section *sect;
1611
1612 nq = QUO_ROUND_UP(nbytes, MixedSizeQuantum)((nbytes) % ((32*sizeof(Pointer))) ? (nbytes)/((32*sizeof(Pointer
))) + 1 : (nbytes)/((32*sizeof(Pointer))))
;
1613 nb = SectionHeadSize(sizeof(Section) + (0) * sizeof(QmInfo) - 10 * sizeof(QmInfo)
)
+
1614 nq * (sizeof(QmInfo) + MixedSizeQuantum(32*sizeof(Pointer)));
1615 npages = QUO_ROUND_UP(nb, PgSize)((nb) % ((1L<<12)) ? (nb)/((1L<<12)) + 1 : (nb)/(
(1L<<12)))
;
1616
1617 IF (npages < MixedSizePgGroup)if ((npages < 2))
1618 npages = MixedSizePgGroup2;
1619
1620 pages = pagesGet(npages);
1621 if (!pages) return 0;
1622
1623 sect = sectPrepare(pages, npages,
1624 (Length) MixedSizeQuantum(32*sizeof(Pointer)), false((int) 0));
1625
1626 mt = (MxMem *) sect->data;
1627 mt->isFree = false((int) 0); /* so free won't merge+unlink */
1628 mt->isFirst = true1;
1629 mt->isLast = true1;
1630 mt->nbytesPrev = 0;
1631 mt->nbytesThis = sect->qmCount * MixedSizeQuantum(32*sizeof(Pointer));
1632 mt->sect = sect;
1633
1634 mixedFrontier = mt;
1635 stoWatchFrontier(mixedFrontier);
1636 }
1637
1638 mi = mixedFrontier;
1639 mi->isFree = false((int) 0);
1640
1641 mn = mixedFrontier->nbytesThis;
1642
1643 IF (shdSplit2(mn, nbytes))if ((((mn) > (nbytes) + (32*sizeof(Pointer)))))
1644 mixedFrontier = mxmemSplit(mi, shdBe2(mn, nbytes)(nbytes));
1645 else
1646 mixedFrontier = 0;
1647
1648 stoWatchFrontier(mixedFrontier);
1649 }
1650 return mi;
1651}
1652
1653/****************************************************************************
1654 *
1655 * :: Garbage collector
1656 *
1657 ***************************************************************************/
1658
1659/*
1660 * This draft assumes that all words can be pointers.
1661 * The type code could be used to avoid following things which are known
1662 * not to be pointers.
1663 *
1664 * Pointers into the middle of structures are properly recognized.
1665 */
1666
1667localstatic void stoGcMarkAndSweep (void);
1668localstatic int stoGcMark (void);
1669localstatic int stoGcMarkRange (Pointer *lo, Pointer *hi, int check);
1670localstatic int stoGcSweep (void);
1671localstatic int stoGcSweepFixed (Section *);
1672localstatic int stoGcSweepMixed (Section *);
1673
1674static int stoGcMarkedFree;
1675
1676localstatic Bool
1677stoNeedsMoreHeadroom(int npgFree)
1678{
1679 int i;
1680 ULong gceLhs, gceRhs;
1681 ULong nFree, nBusy, nPer;
1682 ULong nSpare, nAvail, nTotal;
1683
1684
1685 /* Check the headroom for each size of fixed-size pieces */
1686 for (i = 0; i < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); i++)
1687 {
1688 /* Compute free/busy/spare counts */
1689 nFree = freeFixedPieces[i];
1690 nBusy = busyFixedPieces[i];
1691 nPer = sectQmCount(1L, fixedSize[i]);
1692 nSpare = nPer*npgFree;
1693 nAvail = nFree + nSpare;
1694 nTotal = nBusy + nAvail;
1695
1696
1697 /* Headroom ratios (don't want any FPE's) */
1698 gceLhs = GcEffectiveFactorNum*nTotal;
1699 gceRhs = GcEffectiveFactorDen*nAvail;
1700
1701
1702 /* Stop if not enough headroom */
1703 if (gceLhs > gceRhs) return true1;
1704 }
1705
1706
1707 /* Compute mixed-size usage */
1708 nSpare = npgFree*(PgSize(1L<<12) - MxMemHeadSize(sizeof(MxMem) - sizeof(MxMemU)));
1709 nAvail = nSpare + freeMixedBytes;
1710 nTotal = nAvail + busyMixedBytes;
1711
1712
1713 /* Headroom ratios */
1714 gceLhs = GcEffectiveFactorNum*nTotal;
1715 gceRhs = GcEffectiveFactorDen*nAvail;
1716
1717
1718 /* Do we have enough mixed-sized headroom? */
1719 return (gceLhs > gceRhs);
1720}
1721
1722/*
1723 * Entry point to garbage collector.
1724 * Use setjmp to force registers onto stack, then mark from roots.
1725 */
1726
1727localstatic void
1728stoGcMarkAndSweep(void)
1729{
1730 jmp_buf jb;
1731 int nm, ns;
1732
1733 setjmp(jb)_setjmp (jb);
1734
1735 if (gcTraceFile) {
1736 fprintf(gcTraceFile, " [GC: ");
1737 fflush(gcTraceFile);
1738 }
1739#ifdef FOAM_RTS
1740 if (markingStats) {
1741 fprintf(osStderr, " [GC: ");
1742 fflush(osStderr);
1743 }
1744#endif
1745
1746
1747#ifdef STO_LONGJMP
1748 if (!setjmp(stoAllocInner_ErrorCatch)_setjmp (stoAllocInner_ErrorCatch)) return;
1749#endif
1750
1751 nm = stoGcMark();
1752 ns = stoGcSweep();
1753
1754 if (gcTraceFile) {
1755 fprintf(gcTraceFile, "marked %d (+ %d free), swept %d.]\n",
1756 nm, stoGcMarkedFree, ns);
1757 }
1758#ifdef FOAM_RTS
1759 if (markingStats) fprintf(osStderr, "marked %d (+ %d free), swept %d.]\n",
1760 nm, stoGcMarkedFree, ns);
1761#endif
1762}
1763
1764
1765/* Pointer classification: DO NOT change the order of these defines! */
1766#define MEM_HEAP0 0
1767#define MEM_IDATA1 1
1768#define MEM_STACK2 2
1769#define MEM_DDATA3 3
1770#define MEM_MAX_TYPE(3 +1) (MEM_DDATA3+1)
1771
1772#define PTR_INTO_HEAP0 0
1773#define PTR_INTO_BUSY1 1
1774#define PTR_INTO_DATA2 2
1775#define PTR_INTO_NEW3 3
1776#define PTR_INTO_FREE4 4
1777#define PTR_MAX_TYPE(4 +1) (PTR_INTO_FREE4+1)
1778
1779static int stoMarkArea, stoMarkChildren;
1780static long stoMarkCount[2*MEM_MAX_TYPE(3 +1)][PTR_MAX_TYPE(4 +1)];
1781
1782static char *
1783stoMarkCountArea(int i)
1784{
1785 if (i >= MEM_MAX_TYPE(3 +1))
1786 {
1787 switch (i - MEM_MAX_TYPE(3 +1))
1788 {
1789 case MEM_HEAP0 : return "(*) Heap";
1790 case MEM_IDATA1 : return "(*) Idata";
1791 case MEM_DDATA3 : return "(*) Ddata";
1792 case MEM_STACK2 : return "(*) Stack";
1793 default : return "(*) ????";
1794 }
1795 }
1796 else
1797 {
1798 switch (i)
1799 {
1800 case MEM_HEAP0 : return " Heap";
1801 case MEM_IDATA1 : return " Idata";
1802 case MEM_DDATA3 : return " Ddata";
1803 case MEM_STACK2 : return " Stack";
1804 default : return " ????";
1805 }
1806 }
1807}
1808
1809static char *
1810stoMarkCountTest(int i)
1811{
1812 switch (i)
1813 {
1814 case PTR_INTO_HEAP0 : return "Heap";
1815 case PTR_INTO_BUSY1 : return "Busy";
1816 case PTR_INTO_DATA2 : return "Data";
1817 case PTR_INTO_NEW3 : return " New";
1818 case PTR_INTO_FREE4 : return "Free";
1819 default : return "????";
1820 }
1821}
1822
1823/*
1824 * Mark the pieces currently in use.
1825 * Use the stack, static data, and foreign dynamic data as roots.
1826 * Return the number of busy pieces marked.
1827 */
1828
1829localstatic int
1830stoGcMark(void)
1831{
1832 struct osMemMap **mm;
1833 int n;
1834 int i, j;
1835 static int doClassify = -1;
1836
1837 mm = osMemMap(OSMEM_STACK0x04 | OSMEM_IDATA0x01 | OSMEM_DDATA0x02);
1838 if (!mm) return 0;
1839
1840 /* Pointer classification only available in debug version */
1841 if (DEBUG(sto)((int) 0)) {
1842 if (doClassify == -1)
1843 doClassify = (osGetEnv("GC_CLASSIFY") != NULL((void*)0));
1844
1845 if (doClassify)
1846 {
1847 /* Clear the classification tables */
1848 for (i = 0;i < (2*MEM_MAX_TYPE(3 +1));i++)
1849 for (j = 0;j < PTR_MAX_TYPE(4 +1);j++)
1850 stoMarkCount[i][j] = 0;
1851 }
1852 }
1853
1854 stoGcMarkedFree = 0;
1855 n = 0;
1856
1857 for ( ; (*mm)->use != OSMEM_END0x00; mm++) {
1858 if (DEBUG(sto)((int) 0)) {
1859 /* Memory type being traced (for classification) */
1860 stoMarkArea = MEM_HEAP0;
1861 stoMarkChildren = 0;
1862 }
1863
1864 switch ((*mm)->use) {
1865 case OSMEM_STACK0x04:
1866 if (DEBUG(sto)((int) 0)) {stoMarkArea++;}
1867 /* Fall through */
1868 case OSMEM_IDATA0x01:
1869 if (DEBUG(sto)((int) 0)) {stoMarkArea++;}
1870 /*
1871 * Top-level call to stoGcMarkRange requires
1872 * page check under Win98.
1873 */
1874 n += stoGcMarkRange((Pointer *)((*mm)->lo),
1875 (Pointer *)((*mm)->hi), 1);
1876
1877 /* Some compilers, e.g. Watcom, define 1 byte aligned
1878 * pointers even packing structures with a 4 bytes alignment.
1879 */
1880#if defined(CC_one_byte_aligned_pointers)
1881 {
1882 int i;
1883 for (i = 1; i < sizeof(char*); i++)
1884 n += stoGcMarkRange(
1885 (Pointer *)ptrOff((*mm)->lo, i)((Pointer)((char *)((*mm)->lo) + (i))),
1886 (Pointer *)((*mm)->hi), 1);
1887 }
1888#endif /* CC_one_byte_aligned_pointers_ */
1889
1890 break;
1891 case OSMEM_DDATA0x02: {
1892#if defined(OS_WIN32)
1893 /*
1894 * We have to be careful - this segment may contain
1895 * just part of the heap.
1896 */
1897 char *p;
1898 int i, inbottom = 0, intop = 0;
1899
1900 if (DEBUG(sto)((int) 0)) {stoMarkArea = MEM_DDATA3;}
1901
1902 /* Scan from (*mm)->lo to heapStart? */
1903 if ((Pointer)heapStart >= (*mm)->lo &&
1904 (Pointer)heapStart < (*mm)->hi)
1905 {
1906 inbottom = 1;
1907 n += stoGcMarkRange((Pointer *)((*mm)->lo),
1908 (Pointer *)(heapStart), 1);
1909 }
1910
1911 /* Scan from heapEnd to (*mm)->hi? */
1912 if ( (Pointer)heapEnd >= (*mm)->lo &&
1913 (Pointer)heapEnd < (*mm)->hi)
1914 {
1915 intop = 1;
1916 n += stoGcMarkRange((Pointer *)(heapEnd),
1917 (Pointer *)((*mm)->hi), 1);
1918 }
1919
1920 /* If the heap is in the segment, scan foreign pages */
1921 if (inbottom || intop)
1922 {
1923 /* Compute start and finish page */
1924 int first = inbottom ? 0 : pgNo((Pointer)(*mm)->lo)(((long)((char *)((char *)((Pointer)(*mm)->lo)) - (char *)
(heapStart))) >> 12)
;
1925 int last = intop ? pgMapSize : pgNo((Pointer)(*mm)->hi)(((long)((char *)((char *)((Pointer)(*mm)->hi)) - (char *)
(heapStart))) >> 12)
;
1926
1927 /* Scan foreign pages in heap in the segment */
1928 for (i = first; i < last; i++) {
1929 if (pgMap[i] != PgForeign) continue;
1930 p = (char *) pgAt(i)((Page *)((Pointer)((char *)(heapStart) + ((i) * (1L<<12
)))))
;
1931 n += stoGcMarkRange((Pointer *)(p),
1932 (Pointer *)(p+PgSize(1L<<12)), 1);
1933 }
1934 }
1935 else {
1936 /* Scan the whole segment */
1937 n += stoGcMarkRange((Pointer *)((*mm)->lo),
1938 (Pointer *)((*mm)->hi), 1);
1939 }
1940#else
1941 /*
1942 * Assume that if heapStart lies within this
1943 * segment then heapEnd does also.
1944 */
1945 int i;
1946 char *p;
1947
1948 if (DEBUG(sto)((int) 0)) {stoMarkArea = MEM_DDATA3;}
1949 if ( (Pointer) heapStart >= (*mm)->lo &&
1950 (Pointer) heapStart < (*mm)-> hi)
1951 {
1952 n += stoGcMarkRange((Pointer *)((*mm)->lo),
1953 (Pointer *)(heapStart), 1);
1954 n += stoGcMarkRange((Pointer *)(heapEnd),
1955 (Pointer *)((*mm)->hi),1 );
1956
1957#if defined(OS_MAC_OSX)
1958 /*
1959 DGC:
1960 On Mac OS X the heap is not necessarily contiguous.
1961 There may be PgForeign pages, but there may also be
1962 gaps in VM. Avoid trying to mark non-existent memory.
1963
1964 I suspect that this may be a bug for other systems
1965 too, and the fix should work for any system, but
1966 I'll leave it OS_MAC_OSX specific for now.
1967 */
1968 int mm_hi_pgNo = pgNo((*mm)->hi)(((long)((char *)((char *)((*mm)->hi)) - (char *)(heapStart
))) >> 12)
;
1969 int iMax = (mm_hi_pgNo<pgMapSize) ? mm_hi_pgNo : pgMapSize ;
1970 for (i = 0; i < iMax; i++) {
1971 p = (char *) pgAt(i)((Page *)((Pointer)((char *)(heapStart) + ((i) * (1L<<12
)))))
;
1972 n += stoGcMarkRange((Pointer *)(p),
1973 (Pointer *)(p+PgSize(1L<<12)), 1 );
1974 }
1975#else /* !defined(OS_MAC_OSX) */
1976 for (i = 0; i < pgMapSize; i++) {
1977 if (pgMap[i] != PgForeign) continue;
1978 p = (char *) pgAt(i)((Page *)((Pointer)((char *)(heapStart) + ((i) * (1L<<12
)))))
;
1979 n += stoGcMarkRange((Pointer *)(p),
1980 (Pointer *)(p+PgSize(1L<<12)), 1 );
1981 }
1982#endif /* defined(OS_MAC_OSX) */
1983 }
1984 else {
1985 n += stoGcMarkRange((Pointer *)((*mm)->lo),
1986 (Pointer *)((*mm)->hi), 1);
1987 }
1988#endif
1989 break;
1990
1991 }
1992 default:
1993 assert(false)do { if (!(((int) 0))) _do_assert(("false"),"store.c",1993); }
while (0)
; /*mem type is silly*/
1994 break;
1995 }
1996 }
1997
1998 /* Emit pointer classification table? */
1999 if (DEBUG(sto)((int) 0)) {
2000 if (doClassify)
2001 {
2002 /* Column headings */
2003 fprintf(osStderr, "\n ");
2004 for (j = 0;j < PTR_MAX_TYPE(4 +1);j++)
2005 fprintf(osStderr,"%10s ",stoMarkCountTest(j));
2006
2007
2008 /* Table contents */
2009 fprintf(osStderr, "\n");
2010 for (i = 0;i < (2*MEM_MAX_TYPE(3 +1));i++)
2011 {
2012 fprintf(osStderr, "%s: ", stoMarkCountArea(i));
2013 for (j = 0;j < PTR_MAX_TYPE(4 +1);j++)
2014 {
2015 long c = stoMarkCount[i][j];
2016 fprintf(osStderr,"%10ld ", c);
2017 }
2018 fprintf(osStderr, "\n");
2019 }
2020
2021 /* Leave a gap below the table */
2022 fprintf(osStderr, "\n\n");
2023 }
2024 }
2025
2026 return n;
2027}
2028
2029static int lvl;
2030localstatic int stoGcMarkRange1(Pointer *lo, Pointer *hi, int check);
2031localstatic int
2032stoGcMarkRange(Pointer *lo, Pointer *hi, int check)
2033{
2034 lvl++;
2035 if (DEBUG(sto)((int) 0) && lvl % 3000 == 0) {
2036 printf("Deep stack %d\n", lvl);
2037 }
2038
2039 int ret = stoGcMarkRange1(lo, hi, check);
2040 lvl--;
2041 return ret;
2042}
2043
2044localstatic int
2045stoGcMarkRange1(Pointer *lo, Pointer *hi, int check)
2046{
2047 Pointer p, *pp, *plo, *phi, *hi0;
2048 static int pgno, qmno;
2049 static PgInfo pgtag;
2050 static QmInfo qmtag;
2051 static Section *sect;
2052 int n = 0;
2053 int oldStoMarkArea;
2054#if defined(OS_WIN32)
2055 MEMORY_BASIC_INFORMATION minfo;
2056 int access;
2057#endif
2058
2059#ifdef STO_CAN_BLACKLIST
2060 static int canBlacklist = -1;
2061
2062 if (canBlacklist == -1)
2063 canBlacklist = (osGetEnv("GC_BLACKLIST") != NULL((void*)0));
2064#endif
2065
2066#if defined(OS_WIN32)
2067 /*
2068 * Under Win98 and probably Win95 too, pages that were accessible
2069 * during the call to osMemMap may now be inaccessible. Some pages
2070 * are decommitted, some are released and some have their access
2071 * permissions changed. Without this check we get a segfault or GPF.
2072 *
2073 * We daren't put this code in a procedure because the pages might
2074 * be on the stack and be unmapped on return.
2075 *
2076 * ASSUMES that if the first page is still valid, the rest are ...
2077 */
2078 if (!check) goto PagesOkay; /* Page range known to be safe (in heap) */
2079
2080 if (VirtualQuery((void *)lo, &minfo, sizeof(minfo)) != sizeof(minfo))
2081 return n; /* Query failed => don't risk scanning the page */
2082
2083 /* First check the page type */
2084 switch (minfo.State)
2085 {
2086 case MEM_COMMIT:
2087 /* These pages are okay. */
2088 break;
2089 case MEM_FREE: /* FALL THROUGH */
2090 (void)fprintf(stderrstderr, "*** committed -> free\n");
2091 (void)fflush(stderrstderr);
2092 return n;
2093 case MEM_RESERVE: /* FALL THROUGH */
2094 /* These pages are definitely not okay. */
2095#if 0
2096 (void)fprintf(stderrstderr, "*** committed -> reserved\n");
2097 (void)fflush(stderrstderr);
2098#endif
2099 return n;
2100#if 0
2101 case MEM_MAPPED:
2102 /* These pages might be okay. */
2103 break;
2104 case MEM_PRIVATE:
2105 /* These pages might be okay. */
2106 break;
2107#endif
2108 default:
2109 (void)fprintf(stderrstderr, "*** Unknown page type\n");
2110 (void)fflush(stderrstderr);
2111 /* Unknown page type: might be okay. */
2112 break;
2113 }
2114
2115 /*
2116 * Check the state of the page ignoring guard and cache status. We
2117 * want to skip any page that we cannot read from or write to. If we
2118 * were paranoid then we only skip pages we cannot read.
2119 */
2120 access = minfo.Protect & ~(PAGE_GUARD | PAGE_NOCACHE);
2121 switch (access)
2122 {
2123 case PAGE_NOACCESS:
2124 /* Cannot scan this page even if it holds pointers */
2125 (void)fprintf(stderrstderr, "*** read/write -> no access\n");
2126 (void)fflush(stderrstderr);
2127 return n;
2128 case PAGE_READONLY:
2129 case PAGE_EXECUTE:
2130 case PAGE_EXECUTE_READ:
2131 /* Hope it doesn't contain pointers: try the next region. */
2132 (void)fprintf(stderrstderr, "*** read/write -> read/exec\n");
2133 (void)fflush(stderrstderr);
2134 return n;
2135 case PAGE_READWRITE:
2136 case PAGE_WRITECOPY:
2137 case PAGE_EXECUTE_READWRITE:
2138 case PAGE_EXECUTE_WRITECOPY:
2139 case PAGE_WRITECOMBINE:
2140 /* Might contain pointers so scan them. */
2141 break;
2142 default:
2143 /* Ignore this region in case it is something nasty. */
2144 (void)fprintf(stderrstderr, "*** read/write -> ??? [%d]\n", access);
2145 (void)fflush(stderrstderr);
2146 return n;
2147 }
2148
2149 /* We get here if the page check passed or wasn't needed */
2150PagesOkay: {}
2151#endif
2152
2153 /* Pointer classification */
2154 if (DEBUG(sto)((int) 0)) {oldStoMarkArea = stoMarkArea;}
2155
2156 stoWatchMarkFrom(lo);
2157 stoWatchMarkTo (hi);
2158
2159 TailRecursion:
2160 hi = (Pointer *) ptrCanon(hi)((Pointer)(hi));
2161 hi0 = (Pointer *) ptrOff(((char *) hi), 1 - sizeof(char *))((Pointer)((char *)(((char *) hi)) + (1 - sizeof(char *))));
2162
2163 for (pp = lo; ptrLT(pp, hi0)(((Pointer)(pp)) < ((Pointer)(hi0)));
2164 pp = (Pointer *) ptrOff((char *) pp, alignof(Pointer))((Pointer)((char *)((char *) pp) + ((sizeof(struct {char a; Pointer
b;}) - sizeof(Pointer)))))
) {
2165 p = *pp;
2166
2167 /* Verify pointer is into heap. */
2168 if (!isInHeap(p)((((Pointer)(heapStart)) <= ((Pointer)((p)))) && (
((Pointer)((p))) < ((Pointer)(heapEnd))))
) continue;
2169 if (DEBUG(sto)((int) 0)) {stoMarkCount[stoMarkArea][PTR_INTO_HEAP0]++;}
2170
2171
2172 /* Verify pointer is to busy page. */
2173 pgno = pgNo(p)(((long)((char *)((char *)(p)) - (char *)(heapStart))) >>
12)
;
2174 pgtag = pgMap[pgno];
2175 if (pgtag != PgBusyFirst && pgtag != PgBusyFollow) continue;
2176 if (DEBUG(sto)((int) 0)) {
2177 stoMarkCount[stoMarkArea][PTR_INTO_HEAP0]--;
2178 stoMarkCount[stoMarkArea][PTR_INTO_BUSY1]++;
2179 }
2180
2181
2182 /* Grab section header. */
2183 while (pgtag == PgBusyFollow) pgtag = pgMap[--pgno];
2184 sect = sectAt(pgno)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((pgno
) * (1L<<12))))))
;
2185
2186 /* Verify pointed-to quantum. */
2187 if (ptrLT(p, sect->data)(((Pointer)(p)) < ((Pointer)(sect->data)))) continue;
2188 if (DEBUG(sto)((int) 0)) {
2189 stoMarkCount[stoMarkArea][PTR_INTO_BUSY1]--;
2190 stoMarkCount[stoMarkArea][PTR_INTO_DATA2]++;
2191 }
2192
2193
2194#ifdef STO_DIVISION_BY_LOOKUP1
2195 /* If interior of object, point to beginning. */
2196 /* Use shift operations where possible */
2197 if (sect->isFixed) {
2198 if (sect->qmLog) {
2199 qmno = qmLogNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
)))) >> (sect)->qmLog)
;
2200 p = ptrOff((char *) sect->data, qmno << sect->qmLog)((Pointer)((char *)((char *) sect->data) + (qmno << sect
->qmLog)))
;
2201 }
2202 else {
2203 qmno = qmDivNo(p, sect)stoDivTable[(sect)->qmDiv][(((long)((char *)((char*)(p)) -
(char *)((char*)((sect)->data)))))]
;
2204 assert(qmno == qmNo(p, sect))do { if (!(qmno == (((long)((char *)((char*)(p)) - (char *)((
char*)((sect)->data))))/(sect)->qmSize))) _do_assert(("qmno == qmNo(p, sect)"
),"store.c",2204); } while (0)
;
2205 p = ptrOff((char *) sect->data, qmno * sect->qmSize)((Pointer)((char *)((char *) sect->data) + (qmno * sect->
qmSize)))
;
2206 }
2207 }
2208 else {
2209 qmno = qmNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))/(sect)->qmSize)
;
2210 p = ptrOff((char *) sect->data, qmno * sect->qmSize)((Pointer)((char *)((char *) sect->data) + (qmno * sect->
qmSize)))
;
2211 }
2212#else
2213 /* If interior of object, point to beginning. */
2214 /* Use shift operations where possible */
2215 if (sect->isFixed && sect->qmLog) {
2216 qmno = qmLogNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
)))) >> (sect)->qmLog)
;
2217 p = ptrOff((char *) sect->data, qmno << sect->qmLog)((Pointer)((char *)((char *) sect->data) + (qmno << sect
->qmLog)))
;
2218 }
2219 else {
2220 qmno = qmNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))/(sect)->qmSize)
;
2221 /* If interior of object, point to beginning. */
2222 p = ptrOff((char *) sect->data, qmno * sect->qmSize)((Pointer)((char *)((char *) sect->data) + (qmno * sect->
qmSize)))
;
2223 }
2224#endif
2225
2226 /* Verify not already marked. */
2227 qmtag = sect->info[qmno];
2228 if (QmInfoMark(qmtag)((qmtag) & 0x20)) continue;
2229 if (DEBUG(sto)((int) 0)) {
2230 stoMarkCount[stoMarkArea][PTR_INTO_DATA2]--;
2231 stoMarkCount[stoMarkArea][PTR_INTO_NEW3]++;
2232 }
2233
2234
2235 /* Add mark bits quanta in piece. Determine data bounds. */
2236 if (sect->isFixed) {
2237 QmInfoSetMark(sect->info[qmno])((sect->info[qmno]) |= 0x20);
2238 plo = (Pointer *) p;
2239 phi = (Pointer *) ptrOff((char *) plo, sect->qmSize)((Pointer)((char *)((char *) plo) + (sect->qmSize)));
2240 }
2241 else {
2242 MxMem *pc;
2243 int sz, nq, qi;
2244
2245 while (QmInfoKind(qmtag)((qmtag) & 0xC0) == QmFollow0x00)
2246 qmtag = sect->info[--qmno];
2247
2248 pc = (MxMem *)ptrOff((char *)sect->data,((Pointer)((char *)((char *)sect->data) + (qmno*sect->qmSize
)))
2249 qmno*sect->qmSize)((Pointer)((char *)((char *)sect->data) + (qmno*sect->qmSize
)))
;
2250 sz = pc->nbytesThis;
2251 nq = sz/sect->qmSize;
2252
2253 for (qi = qmno; qi < qmno + nq; qi++)
2254 QmInfoSetMark(sect->info[qi])((sect->info[qi]) |= 0x20);
2255
2256 sz -= MxMemHeadSize(sizeof(MxMem) - sizeof(MxMemU));
2257 plo = (Pointer *) (&pc->body.busy.data);
2258 phi = (Pointer *) ptrOff((char *) plo, sz)((Pointer)((char *)((char *) plo) + (sz)));
2259 }
2260
2261 /* Verify piece is in use. */
2262 if (QmInfoKind(qmtag)((qmtag) & 0xC0) == QmFreeFirst0x40) {
2263 if (DEBUG(sto)((int) 0)) {
2264 stoMarkCount[stoMarkArea][PTR_INTO_NEW3]--;
2265 stoMarkCount[stoMarkArea][PTR_INTO_FREE4]++;
2266 }
2267 stoGcMarkedFree++;
2268
2269#ifdef STO_CAN_BLACKLIST
2270 /* We only blacklist fixed-sized pages currently */
2271 if (canBlacklist && sect->isFixed)
2272 sect->info[qmno] = QmInfoMake0(QmBlacklisted)(0xC0);
2273#endif
2274 continue;
2275 }
2276
2277#ifdef STO_CAN_BLACKLIST
2278 if (QmInfoKind(qmtag)((qmtag) & 0xC0) == QmBlacklisted0xC0) continue;
2279#endif
2280 n++;
2281
2282 /* Check that this contains pointers
2283 * This test should use a bit in the tag, not the code.
2284 */
2285 if (QmIsPtrFree(qmtag)(stoObNoInternalPtrs[((qmtag) & 0x1F)])) continue;
2286
2287
2288#if STO_USER_CAN_TRACE
2289 /*
2290 * Has the creator of this object requested
2291 * permission to trace it?
2292 */
2293 if (QmAldorTraced(qmtag)(stoObAldorTracer[((qmtag) & 0x1F)]))
2294 {
2295 /* Invoke the creator's tracer */
2296 n += (int)stoFiCCall2(int, QmAldorTraced(qmtag),((*((int (*)())((stoObAldorTracer[((qmtag) & 0x1F)]))->
prog->fun))(((stoObAldorTracer[((qmtag) & 0x1F)]))->
env,plo,phi))
2297 plo, phi)((*((int (*)())((stoObAldorTracer[((qmtag) & 0x1F)]))->
prog->fun))(((stoObAldorTracer[((qmtag) & 0x1F)]))->
env,plo,phi))
;
2298 continue;
2299 }
2300 if (QmCTraced(qmtag)(stoObCTracer[((qmtag) & 0x1F)]))
2301 {
2302 /* Invoke the creator's tracer */
2303 n += (QmCTraced(qmtag)(stoObCTracer[((qmtag) & 0x1F)]))(plo, phi);
2304 continue;
2305 }
2306#endif
2307
2308 /* Mark descendants. */
2309 if (ptrEQ(pp, hi-1)( ((Pointer)((Pointer)(pp)) == (Pointer)((Pointer)(hi-1))))) {
2310 if (DEBUG(sto)((int) 0)) {
2311 if (stoMarkArea < MEM_MAX_TYPE(3 +1)) {
2312 /* Now marking within heap */
2313 oldStoMarkArea = stoMarkArea;
2314 stoMarkArea += MEM_MAX_TYPE(3 +1);
2315 }
2316 }
2317
2318 lo = plo;
2319 hi = phi;
2320 goto TailRecursion;
2321 }
2322
2323 /* Pointer classification */
2324 if (DEBUG(sto)((int) 0)) {
2325 if (stoMarkArea < MEM_MAX_TYPE(3 +1)) {
2326 /* Now marking within heap */
2327 oldStoMarkArea = stoMarkArea;
2328 stoMarkArea += MEM_MAX_TYPE(3 +1);
2329 }
2330 }
2331
2332 n += stoGcMarkRange(plo, phi, (int) 0);
2333
2334 /* Pointer classification */
2335 if (DEBUG(sto)((int) 0)) {stoMarkArea = oldStoMarkArea;}
2336 }
2337
2338 /* Pointer classification */
2339 if (DEBUG(sto)((int) 0)) {stoMarkArea = oldStoMarkArea;}
2340 return n;
2341}
2342
2343/*
2344 * Collect unmarked busy pieces.
2345 * The small piece free lists are reconstructed, sorted by section.
2346 */
2347
2348static FxMem fixedHeadStruct[FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0]))], *fixedTail[FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0]))];
2349static ULong freeFixedStruct[FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0]))];
2350static ULong busyFixedStruct[FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0]))];
2351
2352localstatic int
2353stoGcSweep(void)
2354{
2355 int cd, ix, pgno, pgcount, swept, got;
2356 Section *sect;
2357
2358#ifdef USE_MEMORY_CLIMATE
2359 initMemoryClimateHistogram();
2360#endif
2361
2362 for (cd = 0; cd < STO_CODE_LIMIT32; cd++)
2363 stoPiecesGc[cd] = 0; /* Tally by code of pieces swept. */
2364
2365 /* Reset the fixed-sized free lists and counters. */
2366 for (ix = 0; ix < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); ix++)
2367 {
2368 fixedTail[ix] = &fixedHeadStruct[ix];
2369
2370 freeFixedStruct[ix] = 0L;
2371 busyFixedStruct[ix] = 0L;
2372 }
2373
2374
2375 /* Reset the mixed-sized store counting */
2376 freeMixedBytes = 0L;
2377 busyMixedBytes = 0L;
2378
2379
2380 /* Check each section in the heap */
2381 for (swept = 0, pgno = 0; pgno < pgMapSize; pgno += pgcount)
2382 {
2383 if (pgMap[pgno] != PgBusyFirst) { pgcount = 1; continue; }
2384
2385 sect = sectAt(pgno)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((pgno
) * (1L<<12))))))
;
2386 pgcount = sect->pgCount;
2387
2388 if (sect->isFixed)
2389 {
2390 got = stoGcSweepFixed(sect);
2391 swept += got;
2392 }
2393 else
2394 swept += stoGcSweepMixed(sect);
2395 }
2396
2397 for (ix = 0; ix < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); ix++) {
2398 fixedTail[ix]->next = 0;
2399 fixedPieces[ix] = fixedHeadStruct[ix].next;
2400
2401 freeFixedPieces[ix] = freeFixedStruct[ix];
2402 busyFixedPieces[ix] = busyFixedStruct[ix];
2403 }
2404#ifdef USE_MEMORY_CLIMATE
2405 finiMemoryClimateHistogram();
2406 showMemoryClimateHistogram(stdoutstdout);
2407#endif
2408 return swept;
2409}
2410
2411localstatic int
2412stoGcSweepFixed(Section *sect)
2413{
2414 int qmcount, qmsize, qmsizeix, qmno, qmbusy, mark, swept;
2415 QmInfo qmtag;
2416 char *data;
2417 FxMem *pc, *nfixedTail;
2418 ULong nfixedFree;
2419
2420 data = (char *) sect->data;
2421 qmcount = sect->qmCount;
2422 qmsize = sect->qmSize;
2423 qmsizeix = sect->qmSizeIndex;
2424 swept = 0;
2425 qmbusy = 0;
2426 nfixedTail = fixedTail[qmsizeix];
2427 nfixedFree = 0L;
2428
2429 for (qmno = 0; qmno < qmcount; qmno++) {
2430 qmtag = sect->info[qmno];
2431 mark = QmInfoMark(qmtag)((qmtag) & 0x20);
2432
2433 if (QmInfoKind(qmtag)((qmtag) & 0xC0) == QmBusyFirst0x80) {
2434 if (mark) {
2435 QmInfoClearMark(sect->info[qmno])((sect->info[qmno]) &= (~0x20 & 0xFF));
2436#ifdef USE_MEMORY_CLIMATE
2437 incrMemoryClimateHistogram(
2438 QmInfoCode(sect->info[qmno])((sect->info[qmno]) & 0x1F), (int) 0, 1
2439 );
2440#endif
2441 qmbusy++;
2442 }
2443 else {
2444 pc = (FxMem *)(data+qmno*qmsize);
2445 stoPiecesGc[ QmInfoCode(sect->info[qmno])((sect->info[qmno]) & 0x1F) ]++;
2446#ifdef USE_MEMORY_CLIMATE
2447 incrMemoryClimateHistogram(
2448 QmInfoCode(sect->info[qmno])((sect->info[qmno]) & 0x1F), 1, 1
2449 );
2450#endif
2451 sect->info[qmno] = QmInfoMake0(QmFreeFirst)(0x40);
2452 fxmemCleanBody(pc, qmsize)(stoMustWash ? memlset ((UByte *)(pc) + sizeof(FxMem),0xDD,(qmsize
) - sizeof(FxMem)) : 0)
;
2453 stoTally(stoBytesGc += qmsize)(stoBytesGc += qmsize);
2454 nfixedTail->next = pc;
2455 nfixedTail = pc;
2456 swept++;
2457 }
2458 }
2459 else {
2460 if (mark) QmInfoClearMark(sect->info[qmno])((sect->info[qmno]) &= (~0x20 & 0xFF));
2461
2462#ifdef STO_CAN_BLACKLIST
2463 /* Skip blacklisted pieces */
2464 if (QmInfoKind(qmtag)((qmtag) & 0xC0) == QmBlacklisted0xC0) {
2465 stoTally(stoBytesBlack += qmsize)(stoBytesBlack += qmsize);
2466 continue;
2467 }
2468#endif
2469
2470 pc = (FxMem *)(data+qmno*qmsize);
2471 nfixedTail->next = pc;
2472 nfixedTail = pc;
2473 nfixedFree++;
2474 }
2475 }
2476
2477 if (qmbusy)
2478 {
2479 fixedTail[qmsizeix] = nfixedTail;
2480 freeFixedStruct[qmsizeix] += (nfixedFree + swept);
2481 busyFixedStruct[qmsizeix] += qmbusy;
2482 }
2483 else
2484 pagesPut((Page *) sect, sect->pgCount);
2485 return swept;
2486}
2487
2488localstatic int
2489stoGcSweepMixed(Section *sect)
2490{
2491 int qmcount, qmsize, qmno, qi, nq, sz, mark, swept, qmbusy;
2492 QmInfo qmtag;
2493 MxMem *pc;
2494 char *data;
2495 Pointer pstart, pend, pfront;
2496 ULong nMixedFree = freeMixedBytes;
2497 ULong nMixedBusy = busyMixedBytes;
2498
2499 data = (char *) sect->data;
2500 qmsize = sect->qmSize;
2501 qmcount = sect->qmCount;
2502
2503 swept = 0;
2504 qmbusy = 0;
2505
2506 for (qmno = 0; qmno < qmcount; qmno += nq) {
2507 pc = (MxMem *)(data + qmno*qmsize);
2508 sz = pc->nbytesThis;
2509 nq = sz/sect->qmSize;
2510 qmtag = sect->info[qmno];
2511 mark = QmInfoMark(qmtag)((qmtag) & 0x20);
2512
2513 if (!mark && QmInfoKind(qmtag)((qmtag) & 0xC0) == QmBusyFirst0x80) {
2514 MxMem *npc = mxmemNext(pc)((pc)->isLast ? 0 : (MxMem *)((Pointer)((char *)((char *)(
pc)) + ((long)(pc)->nbytesThis))))
;
2515
2516 /* Two cases, depending on whether might merge. */
2517 if (npc == 0 || !npc->isFree) {
2518
2519 /* Free piece. */
2520 stoPiecesGc[ QmInfoCode(sect->info[qmno])((sect->info[qmno]) & 0x1F) ]++;
2521#ifdef USE_MEMORY_CLIMATE
2522 incrMemoryClimateHistogram(
2523 QmInfoCode(sect->info[qmno])((sect->info[qmno]) & 0x1F), 1, 1
2524 );
2525#endif
2526 sect->info[qmno] = QmInfoMake0(QmFreeFirst)(0x40);
2527 mxmemCleanBody(pc, sz)(stoMustWash ? memlset ((UByte *)(pc) + sizeof(MxMem),0xDD,(sz
) - sizeof(MxMem)) : 0)
;
2528 stoTally(stoBytesGc += sz-MxMemHeadSize)(stoBytesGc += sz-(sizeof(MxMem) - sizeof(MxMemU)));
2529 piecePutMixed(pc);
2530 swept++;
2531 }
2532 else {
2533 /* Save info, in case of merge. */
2534 Length nqmno = qmno + nq;
2535 ULong nsz = npc->nbytesThis;
2536 Length nnq = nsz/sect->qmSize;
2537 QmInfo nqmtag = sect->info[nqmno];
2538
2539 /* Free piece. */
2540 stoPiecesGc[ QmInfoCode(sect->info[qmno])((sect->info[qmno]) & 0x1F) ]++;
2541#ifdef USE_MEMORY_CLIMATE
2542 incrMemoryClimateHistogram(
2543 QmInfoCode(sect->info[qmno])((sect->info[qmno]) & 0x1F), 1, 1
2544 );
2545#endif
2546 sect->info[qmno] = QmInfoMake0(QmFreeFirst)(0x40);
2547 mxmemCleanBody(pc, sz)(stoMustWash ? memlset ((UByte *)(pc) + sizeof(MxMem),0xDD,(sz
) - sizeof(MxMem)) : 0)
;
2548 stoTally(stoBytesGc += sz-MxMemHeadSize)(stoBytesGc += sz-(sizeof(MxMem) - sizeof(MxMemU)));
2549 piecePutMixed(pc);
2550 swept++;
2551
2552 /* Handle if next piece was merged. */
2553 if (QmInfoKind(sect->info[nqmno])((sect->info[nqmno]) & 0xC0)==QmFollow0x00) {
2554 if (QmInfoMark(nqmtag)((nqmtag) & 0x20)) {
2555 int N = nqmno+nnq;
2556 for (qi = nqmno; qi < N; qi++)
2557 QmInfoClearMark(((sect->info[qi]) &= (~0x20 & 0xFF))
2558 sect->info[qi])((sect->info[qi]) &= (~0x20 & 0xFF));
2559 }
2560 nq += nnq;
2561 }
2562 }
2563 nMixedFree += sz;
2564 }
2565 else if (mark) {
2566#ifdef USE_MEMORY_CLIMATE
2567 incrMemoryClimateHistogram(
2568 QmInfoCode(sect->info[qmno])((sect->info[qmno]) & 0x1F), (int) 0, 1
2569 );
2570#endif
2571 for (qi = qmno; qi < qmno+nq; qi++)
2572 QmInfoClearMark(sect->info[qi])((sect->info[qi]) &= (~0x20 & 0xFF));
2573
2574 if (QmInfoKind(qmtag)((qmtag) & 0xC0) == QmBusyFirst0x80)
2575 qmbusy += nq;
2576 nMixedBusy += sz;
2577 }
2578 }
2579
2580 /* Return pages if no busy pieces & sect does not contain frontier. */
2581 pstart = ptrCanon((char *) sect)((Pointer)((char *) sect));
2582 pend = ptrCanon((char *) sect + sect->pgCount * PgSize)((Pointer)((char *) sect + sect->pgCount * (1L<<12))
)
;
2583 pfront = ptrCanon((char *) mixedFrontier)((Pointer)((char *) mixedFrontier));
2584
2585 if (!qmbusy && (pfront < pstart || pend <= pfront)) {
2586 mxmemUnlinkFromBTree((MxMem *) data, &mixedPieces){ MxMemDLL *dll = mxmemUnlink((MxMem *) data); if ((!dll->
pieces)) { btreeDeleteX((&mixedPieces), dll->nbytes, (
(void*)0), mxmemFreeBTree); mxmemFreeDLL(dll); } }
;
2587 pagesPut((Page *) sect, sect->pgCount);
2588 }
2589 else
2590 {
2591 /* Note these changes in busy/free counts */
2592 freeMixedBytes = nMixedFree;
2593 busyMixedBytes = nMixedBusy;
2594 }
2595 return swept;
2596}
2597
2598
2599/****************************************************************************
2600 *
2601 * :: Audit code
2602 *
2603 ***************************************************************************/
2604
2605#ifndef NDEBUG
2606
2607localstatic void stoAuditHeapLocation (void);
2608localstatic void stoAuditMapPages (void);
2609
2610localstatic long * stoAuditFixedSizeSections(void);
2611localstatic long * stoAuditFixedSizePieces (void);
2612
2613localstatic long stoAuditMixedSizeSections(void);
2614localstatic long stoAuditMixedSizePieces (void);
2615
2616static long nMixed, nSizes;
2617
2618/*
2619 * Main entry into the audit code.
2620 */
2621localstatic void
2622stoAuditAll(void)
2623{
2624 long i, n1, n2, *a1, *a2;
2625
2626 nMixed = 0, nSizes = 0;
2627
2628 stoAuditHeapLocation();
2629 stoAuditMapPages();
2630
2631 a1 = stoAuditFixedSizeSections();
2632 a2 = stoAuditFixedSizePieces();
2633 for (i = 0; i < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); i++) assert(a1[i] == a2[i])do { if (!(a1[i] == a2[i])) _do_assert(("a1[i] == a2[i]"),"store.c"
,2633); } while (0)
;
2634
2635 n1 = stoAuditMixedSizeSections();
2636 n2 = stoAuditMixedSizePieces();
2637
2638 assert(n1 == n2)do { if (!(n1 == n2)) _do_assert(("n1 == n2"),"store.c",2638)
; } while (0)
;
2639}
2640
2641/*
2642 * Verify heap alignment and range.
2643 */
2644localstatic void
2645stoAuditHeapLocation(void)
2646{
2647 /* Verify heap is page-aligned. */
2648 assert(ptrToLong((Pointer) heapStart) % PgSize == 0)do { if (!(((long) ((Pointer) heapStart)) % (1L<<12) ==
0)) _do_assert(("ptrToLong((Pointer) heapStart) % PgSize == 0"
),"store.c",2648); } while (0)
;
2649 assert(ptrToLong((Pointer) heapEnd) % PgSize == 0)do { if (!(((long) ((Pointer) heapEnd)) % (1L<<12) == 0
)) _do_assert(("ptrToLong((Pointer) heapEnd) % PgSize == 0"),
"store.c",2649); } while (0)
;
2650}
2651
2652/*
2653 * Verify the page map is self-consisteng.
2654 */
2655localstatic void
2656stoAuditMapPages(void)
2657{
2658 Length i, good, pgcount;
2659
2660 /* Verify page map location. */
2661 pgcount = QUO_ROUND_UP(pgMapSize * sizeof(PgInfo), PgSize)((pgMapSize * sizeof(PgInfo)) % ((1L<<12)) ? (pgMapSize
* sizeof(PgInfo))/((1L<<12)) + 1 : (pgMapSize * sizeof
(PgInfo))/((1L<<12)))
;
2662 assert(ptrToLong((Pointer) pgMap) % PgSize == 0)do { if (!(((long) ((Pointer) pgMap)) % (1L<<12) == 0))
_do_assert(("ptrToLong((Pointer) pgMap) % PgSize == 0"),"store.c"
,2662); } while (0)
;
2663 assert(pgMapUses == pgcount)do { if (!(pgMapUses == pgcount)) _do_assert(("pgMapUses == pgcount"
),"store.c",2663); } while (0)
;
2664
2665 /* Verify map contains only good entries. */
2666 good = 0;
2667 for (i = 0; i < pgMapSize; i++) {
2668 switch (pgMap[i]) {
2669 case PgFree: case PgBusyFirst: case PgBusyFollow:
2670 case PgPgMap: case PgBTree: case PgDLL:
2671 case PgForeign:
2672 good++;
2673 }
2674 }
2675 assert(good == pgMapSize)do { if (!(good == pgMapSize)) _do_assert(("good == pgMapSize"
),"store.c",2675); } while (0)
;
2676}
2677
2678/*
2679 * Verify the pages for sections of fixed size pieces.
2680 */
2681localstatic long *
2682stoAuditFixedSizeSections(void)
2683{
2684 static long fxpCount[FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0]))];
2685
2686 int pgno, pgcount, qmno, qmcount, sz, ix;
2687 QmInfo qmtag;
2688 Section *sect;
2689 char *data;
2690
2691 for (ix = 0; ix < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); ix++)
2692 fxpCount[ix] = 0;
2693
2694 for (pgno = 0; pgno < pgMapSize; pgno += pgcount) {
2695 if (pgMap[pgno] != PgBusyFirst) { pgcount = 1; continue; }
2696
2697 /* Get section information. */
2698 sect = sectAt(pgno)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((pgno
) * (1L<<12))))))
;
2699 pgcount = sect->pgCount;
2700 if (!sect->isFixed) continue;
2701
2702 data = (char *) sect->data;
2703 qmcount = sect->qmCount;
2704 ix = sect->qmSizeIndex;
2705 sz = sect->qmSize;
2706
2707 /* Verify section consistency. */
2708 assert(sz == fixedSize[ix])do { if (!(sz == fixedSize[ix])) _do_assert(("sz == fixedSize[ix]"
),"store.c",2708); } while (0)
;
2709 assert(ptrEQ(data + qmcount*sz, (char *)sect + pgcount * PgSize))do { if (!(( ((Pointer)((Pointer)(data + qmcount*sz)) == (Pointer
)((Pointer)((char *)sect + pgcount * (1L<<12))))))) _do_assert
(("ptrEQ(data + qmcount*sz, (char *)sect + pgcount * PgSize)"
),"store.c",2709); } while (0)
;
2710 assert(pgno + pgcount <= pgMapSize)do { if (!(pgno + pgcount <= pgMapSize)) _do_assert(("pgno + pgcount <= pgMapSize"
),"store.c",2710); } while (0)
;
2711
2712 /* Verify data alignment. */
2713 if (sz < alignof(MostAlignedType)(sizeof(struct {char a; MostAlignedType b;}) - sizeof(MostAlignedType
))
) {
2714 assert((long) data % sz == 0)do { if (!((long) data % sz == 0)) _do_assert(("(long) data % sz == 0"
),"store.c",2714); } while (0)
;
2715 }
2716 else {
2717 assert((long) data % alignof(MostAlignedType) == 0)do { if (!((long) data % (sizeof(struct {char a; MostAlignedType
b;}) - sizeof(MostAlignedType)) == 0)) _do_assert(("(long) data % alignof(MostAlignedType) == 0"
),"store.c",2717); } while (0)
;
2718 assert(sz % alignof(MostAlignedType) == 0)do { if (!(sz % (sizeof(struct {char a; MostAlignedType b;}) -
sizeof(MostAlignedType)) == 0)) _do_assert(("sz % alignof(MostAlignedType) == 0"
),"store.c",2718); } while (0)
;
2719 }
2720
2721 /* Scan data. */
2722 for (qmno = 0; qmno < qmcount; qmno++) {
2723 qmtag = sect->info[qmno];
2724 if (QmInfoKind(qmtag)((qmtag) & 0xC0) == QmFreeFirst0x40) {
2725 fxmemAssertCleanBody(data + qmno*sz, sz)(stoMustWash ? memltest((UByte *)(data + qmno*sz) + sizeof(FxMem
),0xDD,(sz) - sizeof(FxMem)) : 0)
;
2726 fxpCount[ix]++;
2727 }
2728 else if (QmInfoKind(qmtag)((qmtag) & 0xC0) == QmBlacklisted0xC0)
2729 /* Do nothing */;
2730 else
2731 assert(QmInfoKind(qmtag) == QmBusyFirst)do { if (!(((qmtag) & 0xC0) == 0x80)) _do_assert(("QmInfoKind(qmtag) == QmBusyFirst"
),"store.c",2731); } while (0)
;
2732 assert(!QmInfoMark(qmtag))do { if (!(!((qmtag) & 0x20))) _do_assert(("!QmInfoMark(qmtag)"
),"store.c",2732); } while (0)
;
2733 }
2734 }
2735
2736 return fxpCount;
2737}
2738
2739/*
2740 * Verify the recorded fixed size pieces.
2741 */
2742localstatic long *
2743stoAuditFixedSizePieces(void)
2744{
2745 static long fxpCount[FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0]))];
2746
2747 int pgno, qmno, sz, ix;
2748 PgInfo pgtag;
2749 QmInfo qmtag;
2750 Section *sect;
2751 FxMem *pc;
2752 char *data;
2753
2754 /* Verify pieces in free lists. */
2755 for (ix = 0; ix < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); ix++) {
2756 fxpCount[ix] = 0;
2757 sz = fixedSize[ix];
2758 for (pc = fixedPieces[ix]; pc; pc = pc->next) {
2759 pgno = pgNo(pc)(((long)((char *)((char *)(pc)) - (char *)(heapStart))) >>
12)
;
2760 pgtag = pgMap[pgno];
2761 sect = sectFor(pc)(pgMap[(((long)((char *)((char *)(pc)) - (char *)(heapStart))
) >> 12)]==PgBusyFirst ? ((Section *) ((Page *)((Pointer
)((char *)(heapStart) + (((((long)((char *)((char *)(pc)) - (
char *)(heapStart))) >> 12)) * (1L<<12)))))):_sectFor
(pc))
;
2762 assert(sect != 0)do { if (!(sect != 0)) _do_assert(("sect != 0"),"store.c",2762
); } while (0)
;
2763
2764 data = (char *) sect->data;
2765#ifdef STO_DIVISION_BY_LOOKUP1
2766 /* TTT */
2767 qmno = (sect->qmLog) ? qmLogNo(pc, sect)(((long)((char *)((char*)(pc)) - (char *)((char*)((sect)->
data)))) >> (sect)->qmLog)
: qmDivNo(pc, sect)stoDivTable[(sect)->qmDiv][(((long)((char *)((char*)(pc)) -
(char *)((char*)((sect)->data)))))]
;
2768 assert(qmno == qmNo(pc, sect))do { if (!(qmno == (((long)((char *)((char*)(pc)) - (char *)(
(char*)((sect)->data))))/(sect)->qmSize))) _do_assert((
"qmno == qmNo(pc, sect)"),"store.c",2768); } while (0)
;
2769#else
2770 qmno = qmNo(pc, sect)(((long)((char *)((char*)(pc)) - (char *)((char*)((sect)->
data))))/(sect)->qmSize)
;
2771#endif
2772
2773 qmtag = sect->info[qmno];
2774
2775 /* Verify kind of page. */
2776 assert(0 <= pgno && pgno < pgMapSize)do { if (!(0 <= pgno && pgno < pgMapSize)) _do_assert
(("0 <= pgno && pgno < pgMapSize"),"store.c",2776
); } while (0)
;
2777 assert(pgtag == PgBusyFirst || pgtag == PgBusyFollow)do { if (!(pgtag == PgBusyFirst || pgtag == PgBusyFollow)) _do_assert
(("pgtag == PgBusyFirst || pgtag == PgBusyFollow"),"store.c",
2777); } while (0)
;
2778
2779
2780 /* Verify kind of section. */
2781 assert(sect->isFixed)do { if (!(sect->isFixed)) _do_assert(("sect->isFixed")
,"store.c",2781); } while (0)
;
2782 assert(sect->qmSize == sz && sect->qmSizeIndex == ix)do { if (!(sect->qmSize == sz && sect->qmSizeIndex
== ix)) _do_assert(("sect->qmSize == sz && sect->qmSizeIndex == ix"
),"store.c",2782); } while (0)
;
2783
2784
2785 /* Verify alignment in section. */
2786 assert(ptrLE(data, pc))do { if (!((((Pointer)(data)) <= ((Pointer)(pc))))) _do_assert
(("ptrLE(data, pc)"),"store.c",2786); } while (0)
;
2787 assert(ptrDiff((char *) pc, data) % sz == 0)do { if (!(((long)((char *)((char *) pc) - (char *)(data))) %
sz == 0)) _do_assert(("ptrDiff((char *) pc, data) % sz == 0"
),"store.c",2787); } while (0)
;
2788 assert(0 <= qmno && qmno < sect->qmCount)do { if (!(0 <= qmno && qmno < sect->qmCount
)) _do_assert(("0 <= qmno && qmno < sect->qmCount"
),"store.c",2788); } while (0)
;
2789
2790 /* Verify piece is free. */
2791 assert(QmInfoKind(qmtag) == QmFreeFirst)do { if (!(((qmtag) & 0xC0) == 0x40)) _do_assert(("QmInfoKind(qmtag) == QmFreeFirst"
),"store.c",2791); } while (0)
;
2792
2793 fxpCount[ix]++;
2794 }
2795 }
2796 return fxpCount;
2797}
2798
2799/*
2800 * Verify the pages for sections of mixed size pieces.
2801 */
2802localstatic long
2803stoAuditMixedSizeSections(void)
2804{
2805 int pgno, pgcount, qmno, qmcount, qmsize, sz, nq, i, cd;
2806 long mxpBytes;
2807 QmInfo qmtag;
2808 Section *sect;
2809 MxMem *pc;
2810 char *data;
2811
2812 mxpBytes = 0;
2813
2814 /* Verify pages with mixed size pieces. */
2815 for (pgno = 0; pgno < pgMapSize; pgno += pgcount) {
2816 if (pgMap[pgno] != PgBusyFirst) { pgcount = 1; continue; }
2817
2818 /* Get section information. */
2819 sect = sectAt(pgno)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((pgno
) * (1L<<12))))))
;
2820 pgcount = sect->pgCount;
2821 if (sect->isFixed) continue;
2822
2823 data = (char *) sect->data;
2824 qmcount = sect->qmCount;
2825 qmsize = sect->qmSize;
2826
2827 /* Verify section consistency. */
2828 assert(qmsize % MixedSizeQuantum == 0)do { if (!(qmsize % (32*sizeof(Pointer)) == 0)) _do_assert(("qmsize % MixedSizeQuantum == 0"
),"store.c",2828); } while (0)
;
2829 assert(ptrEQ(ptrOff(data, qmcount*(long)qmsize),ptrOff((char *)sect, pgcount*PgSize)))do { if (!(( ((Pointer)((Pointer)(((Pointer)((char *)(data) +
(qmcount*(long)qmsize))))) == (Pointer)((Pointer)(((Pointer)
((char *)((char *)sect) + (pgcount*(1L<<12)))))))))) _do_assert
(("ptrEQ(ptrOff(data, qmcount*(long)qmsize),ptrOff((char *)sect, pgcount*PgSize))"
),"store.c",2829); } while (0)
;
2830 assert(pgno + pgcount <= pgMapSize)do { if (!(pgno + pgcount <= pgMapSize)) _do_assert(("pgno + pgcount <= pgMapSize"
),"store.c",2830); } while (0)
;
2831
2832 for (i = pgno+1; i < pgno + pgcount; i++)
2833 assert(pgMap[i] == PgBusyFollow)do { if (!(pgMap[i] == PgBusyFollow)) _do_assert(("pgMap[i] == PgBusyFollow"
),"store.c",2833); } while (0)
;
2834
2835 if (pgno + pgcount < pgMapSize) {
2836 assert(pgMap[pgno + pgcount] != PgBusyFollow)do { if (!(pgMap[pgno + pgcount] != PgBusyFollow)) _do_assert
(("pgMap[pgno + pgcount] != PgBusyFollow"),"store.c",2836); }
while (0)
;
2837 }
2838
2839 /* Verify data alignment. */
2840 assert(ptrToLong(data) % alignof(MostAlignedType) == 0)do { if (!(((long) (data)) % (sizeof(struct {char a; MostAlignedType
b;}) - sizeof(MostAlignedType)) == 0)) _do_assert(("ptrToLong(data) % alignof(MostAlignedType) == 0"
),"store.c",2840); } while (0)
;
2841 assert(qmsize % alignof(MostAlignedType) == 0)do { if (!(qmsize % (sizeof(struct {char a; MostAlignedType b
;}) - sizeof(MostAlignedType)) == 0)) _do_assert(("qmsize % alignof(MostAlignedType) == 0"
),"store.c",2841); } while (0)
;
2842
2843 /* Scan data. */
2844 for (qmno = 0; qmno < qmcount; qmno += nq) {
2845 pc = (MxMem *)ptrOff(data, qmno*(long)qmsize)((Pointer)((char *)(data) + (qmno*(long)qmsize)));
2846 sz = pc->nbytesThis;
2847 nq = sz/qmsize;
2848
2849 /* Verify context. */
2850 assert(sz > 1)do { if (!(sz > 1)) _do_assert(("sz > 1"),"store.c",2850
); } while (0)
; /* sz == 1 => fixed */
2851 assert(ptrEQ(pc->sect, sect))do { if (!(( ((Pointer)((Pointer)(pc->sect)) == (Pointer)(
(Pointer)(sect)))))) _do_assert(("ptrEQ(pc->sect, sect)"),
"store.c",2851); } while (0)
;
2852
2853 /* Verify boundary conditions. */
2854 assert((pc->isFirst) == (qmno == 0))do { if (!((pc->isFirst) == (qmno == 0))) _do_assert(("(pc->isFirst) == (qmno == 0)"
),"store.c",2854); } while (0)
;
2855 assert((pc->isLast) == (qmno+nq == qmcount))do { if (!((pc->isLast) == (qmno+nq == qmcount))) _do_assert
(("(pc->isLast) == (qmno+nq == qmcount)"),"store.c",2855);
} while (0)
;
2856
2857 /* Verify neighbours. */
2858 assert(qmno + nq <= qmcount)do { if (!(qmno + nq <= qmcount)) _do_assert(("qmno + nq <= qmcount"
),"store.c",2858); } while (0)
;
2859 assert(pc->nbytesThis % qmsize == 0)do { if (!(pc->nbytesThis % qmsize == 0)) _do_assert(("pc->nbytesThis % qmsize == 0"
),"store.c",2859); } while (0)
;
2860 assert(pc->nbytesPrev % qmsize == 0)do { if (!(pc->nbytesPrev % qmsize == 0)) _do_assert(("pc->nbytesPrev % qmsize == 0"
),"store.c",2860); } while (0)
;
2861
2862 if (!pc->isFirst) {
2863 i = qmno - pc->nbytesPrev/qmsize;
2864 assert(0 <= i && i <= qmcount)do { if (!(0 <= i && i <= qmcount)) _do_assert(
("0 <= i && i <= qmcount"),"store.c",2864); } while
(0)
;
2865
2866 qmtag = sect->info[i];
2867 cd = QmInfoKind(qmtag)((qmtag) & 0xC0);
2868 assert(cd == QmFreeFirst || cd == QmBusyFirst)do { if (!(cd == 0x40 || cd == 0x80)) _do_assert(("cd == QmFreeFirst || cd == QmBusyFirst"
),"store.c",2868); } while (0)
;
2869 }
2870 if (!pc->isLast) {
2871 i = qmno + pc->nbytesThis/qmsize;
2872 assert(0 <= i && i <= qmcount)do { if (!(0 <= i && i <= qmcount)) _do_assert(
("0 <= i && i <= qmcount"),"store.c",2872); } while
(0)
;
2873
2874 qmtag = sect->info[i];
2875 cd = QmInfoKind(qmtag)((qmtag) & 0xC0);
2876 assert(cd == QmFreeFirst || cd == QmBusyFirst)do { if (!(cd == 0x40 || cd == 0x80)) _do_assert(("cd == QmFreeFirst || cd == QmBusyFirst"
),"store.c",2876); } while (0)
;
2877 }
2878
2879 /* Verify tags. */
2880 qmtag = sect->info[qmno];
2881 cd = QmInfoKind(qmtag)((qmtag) & 0xC0);
2882 assert(!QmInfoMark(qmtag))do { if (!(!((qmtag) & 0x20))) _do_assert(("!QmInfoMark(qmtag)"
),"store.c",2882); } while (0)
;
2883
2884 if (pc->isFree) {
2885 mxmemAssertCleanBody(pc, pc->nbytesThis)(stoMustWash ? memltest((UByte *)(pc) + sizeof(MxMem),0xDD,(pc
->nbytesThis) - sizeof(MxMem)) : 0)
;
2886 assert(cd == QmFreeFirst)do { if (!(cd == 0x40)) _do_assert(("cd == QmFreeFirst"),"store.c"
,2886); } while (0)
;
2887 }
2888 else if (ptrEQ(pc, mixedFrontier)( ((Pointer)((Pointer)(pc)) == (Pointer)((Pointer)(mixedFrontier
))))
) {
2889 mxmemAssertCleanBody(pc, pc->nbytesThis)(stoMustWash ? memltest((UByte *)(pc) + sizeof(MxMem),0xDD,(pc
->nbytesThis) - sizeof(MxMem)) : 0)
;
2890 assert(cd == QmFreeFirst)do { if (!(cd == 0x40)) _do_assert(("cd == QmFreeFirst"),"store.c"
,2890); } while (0)
;
2891 }
2892 else
2893 assert(cd == QmBusyFirst)do { if (!(cd == 0x80)) _do_assert(("cd == QmBusyFirst"),"store.c"
,2893); } while (0)
;
2894
2895 for (i = qmno+1; i < qmno+nq; i++) {
2896 qmtag = sect->info[i];
2897 cd = QmInfoKind(qmtag)((qmtag) & 0xC0);
2898 assert(!QmInfoMark(qmtag))do { if (!(!((qmtag) & 0x20))) _do_assert(("!QmInfoMark(qmtag)"
),"store.c",2898); } while (0)
;
2899 assert(cd == QmFollow)do { if (!(cd == 0x00)) _do_assert(("cd == QmFollow"),"store.c"
,2899); } while (0)
;
2900 }
2901 if (qmno+nq < qmcount) {
2902 qmtag = sect->info[qmno+nq];
2903 cd = QmInfoKind(qmtag)((qmtag) & 0xC0);
2904 assert(cd != QmFollow)do { if (!(cd != 0x00)) _do_assert(("cd != QmFollow"),"store.c"
,2904); } while (0)
;
2905 }
2906
2907 if (pc->isFree) mxpBytes += sz;
2908 }
2909 }
2910
2911 return mxpBytes;
2912}
2913
2914/*
2915 * Verify the recorded mixed size pieces.
2916 */
2917localstatic long stoAuditBTree (BTree bt, BTreeKey *pLoBd, BTreeKey *pHiBd);
2918localstatic long stoAuditDLL (Length l, MxMemDLL *dll);
2919
2920localstatic long
2921stoAuditMixedSizePieces(void)
2922{
2923 return stoAuditBTree(mixedPieces, NULL((void*)0), NULL((void*)0));
2924}
2925
2926localstatic long
2927stoAuditBTree(BTree bt, BTreeKey *pLoBd, BTreeKey *pHiBd)
2928{
2929 BTreePart xp, x0, xN;
2930 int isRoot = !pLoBd && !pHiBd;
2931 int n, pgno;
2932 long mxpBytes;
2933
2934 if (isRoot && !bt) return 0;
2935
2936 /* Node info. */
2937 assert(bt != 0)do { if (!(bt != 0)) _do_assert(("bt != 0"),"store.c",2937); }
while (0)
;
2938 assert(bt->t == MixedBTreeT)do { if (!(bt->t == 16)) _do_assert(("bt->t == MixedBTreeT"
),"store.c",2938); } while (0)
;
2939 n = bt->nKeys;
2940 x0 = bt->part;
2941 xN = bt->part + n;
2942
2943 /* Verify bt points to a node entirely within a B-tree node page. */
2944 pgno = pgNo(bt)(((long)((char *)((char *)(bt)) - (char *)(heapStart))) >>
12)
;
2945 assert(0 <= pgno && pgno < pgMapSize)do { if (!(0 <= pgno && pgno < pgMapSize)) _do_assert
(("0 <= pgno && pgno < pgMapSize"),"store.c",2945
); } while (0)
;
2946 assert(pgMap[pgno] == PgBTree)do { if (!(pgMap[pgno] == PgBTree)) _do_assert(("pgMap[pgno] == PgBTree"
),"store.c",2946); } while (0)
;
2947
2948 pgno = pgNo((char *)bt + btreeNodeSize(bt->t) - 1)(((long)((char *)((char *)((char *)bt + (sizeof(struct btree)
+ (2*(bt->t)) * sizeof(struct btreePart) - 10 * sizeof(struct
btreePart)) - 1)) - (char *)(heapStart))) >> 12)
;
2949 assert(0 <= pgno && pgno < pgMapSize)do { if (!(0 <= pgno && pgno < pgMapSize)) _do_assert
(("0 <= pgno && pgno < pgMapSize"),"store.c",2949
); } while (0)
;
2950 assert(pgMap[pgno] == PgBTree)do { if (!(pgMap[pgno] == PgBTree)) _do_assert(("pgMap[pgno] == PgBTree"
),"store.c",2950); } while (0)
;
2951
2952 /* Verify bt has legal number of keys. */
2953 assert((isRoot ? 0 : bt->t-1) <= n && n <= 2*bt->t-1)do { if (!((isRoot ? 0 : bt->t-1) <= n && n <=
2*bt->t-1)) _do_assert(("(isRoot ? 0 : bt->t-1) <= n && n <= 2*bt->t-1"
),"store.c",2953); } while (0)
;
2954
2955 /* Verify order of keys: Note duplicate keys are not allowed here. */
2956 if (pLoBd) { assert(*pLoBd < x0->key)do { if (!(*pLoBd < x0->key)) _do_assert(("*pLoBd < x0->key"
),"store.c",2956); } while (0)
; }
2957 for (xp = x0+1; xp < xN; xp++) assert((xp-1)->key < xp->key)do { if (!((xp-1)->key < xp->key)) _do_assert(("(xp-1)->key < xp->key"
),"store.c",2957); } while (0)
;
2958 if (pHiBd) { assert((xN-1)->key < *pHiBd)do { if (!((xN-1)->key < *pHiBd)) _do_assert(("(xN-1)->key < *pHiBd"
),"store.c",2958); } while (0)
; }
2959
2960 /* Check descendants. */
2961 mxpBytes = 0;
2962 for (xp = x0; xp < xN; xp++)
2963 mxpBytes += stoAuditDLL((Length)xp->key,(MxMemDLL *)xp->entry);
2964 if (!bt->isLeaf) {
2965 mxpBytes += stoAuditBTree(x0->branch, pLoBd, &x0->key);
2966 for (xp = x0+1; xp < xN; xp++)
2967 mxpBytes +=
2968 stoAuditBTree(xp->branch, &(xp-1)->key, &xp->key);
2969 mxpBytes += stoAuditBTree(xN->branch, &(xN-1)->key, pHiBd);
2970 }
2971 return mxpBytes;
2972}
2973
2974localstatic long
2975stoAuditDLL(Length nbytes, MxMemDLL *dll)
2976{
2977 int pgno;
2978 long mxpBytes;
2979 MxMem *P0, *P, *A, *B;
2980
2981 nSizes++;
2982 /* Verify dll points into DLL page. */
2983 assert(dll != 0)do { if (!(dll != 0)) _do_assert(("dll != 0"),"store.c",2983)
; } while (0)
;
2984 pgno = pgNo(dll)(((long)((char *)((char *)(dll)) - (char *)(heapStart))) >>
12)
;
2985 assert(0 <= pgno && pgno < pgMapSize)do { if (!(0 <= pgno && pgno < pgMapSize)) _do_assert
(("0 <= pgno && pgno < pgMapSize"),"store.c",2985
); } while (0)
;
2986 assert(pgMap[pgno] == PgDLL)do { if (!(pgMap[pgno] == PgDLL)) _do_assert(("pgMap[pgno] == PgDLL"
),"store.c",2986); } while (0)
;
2987
2988 /* Verify node. */
2989 assert(dll->nbytes == nbytes)do { if (!(dll->nbytes == nbytes)) _do_assert(("dll->nbytes == nbytes"
),"store.c",2989); } while (0)
;
2990 assert(dll->pieces != 0)do { if (!(dll->pieces != 0)) _do_assert(("dll->pieces != 0"
),"store.c",2990); } while (0)
;
2991
2992 /* Verify center of DLL. */
2993 P = P0 = dll->pieces;
2994 assert(P->isFree)do { if (!(P->isFree)) _do_assert(("P->isFree"),"store.c"
,2994); } while (0)
;
2995 assert(P->nbytesThis == nbytes)do { if (!(P->nbytesThis == nbytes)) _do_assert(("P->nbytesThis == nbytes"
),"store.c",2995); } while (0)
;
2996 assert(P->body.free.dll == dll)do { if (!(P->body.free.dll == dll)) _do_assert(("P->body.free.dll == dll"
),"store.c",2996); } while (0)
;
2997 A = P->body.free.linkA; if (A) { assert(A->body.free.linkB == P)do { if (!(A->body.free.linkB == P)) _do_assert(("A->body.free.linkB == P"
),"store.c",2997); } while (0)
; }
2998 B = P->body.free.linkB; if (B) { assert(A->body.free.linkA == P)do { if (!(A->body.free.linkA == P)) _do_assert(("A->body.free.linkA == P"
),"store.c",2998); } while (0)
; }
2999 mxpBytes = nbytes;
3000 nMixed++;
3001
3002 /* Verify A direction of DLL. */
3003 for (P = P0->body.free.linkA; P; P = A) {
3004 assert(P->isFree)do { if (!(P->isFree)) _do_assert(("P->isFree"),"store.c"
,3004); } while (0)
;
3005 assert(P->nbytesThis == nbytes)do { if (!(P->nbytesThis == nbytes)) _do_assert(("P->nbytesThis == nbytes"
),"store.c",3005); } while (0)
;
3006 assert(P->body.free.dll == dll)do { if (!(P->body.free.dll == dll)) _do_assert(("P->body.free.dll == dll"
),"store.c",3006); } while (0)
;
3007 A = P->body.free.linkA;
3008 if (A) { assert(A->body.free.linkB == P)do { if (!(A->body.free.linkB == P)) _do_assert(("A->body.free.linkB == P"
),"store.c",3008); } while (0)
; }
3009 mxpBytes += nbytes;
3010 nMixed++;
3011 }
3012
3013 /* Verify B direction of DLL. */
3014 for (P = P0->body.free.linkB; P; P = B) {
3015 assert(P->isFree)do { if (!(P->isFree)) _do_assert(("P->isFree"),"store.c"
,3015); } while (0)
;
3016 assert(P->nbytesThis == nbytes)do { if (!(P->nbytesThis == nbytes)) _do_assert(("P->nbytesThis == nbytes"
),"store.c",3016); } while (0)
;
3017 assert(P->body.free.dll == dll)do { if (!(P->body.free.dll == dll)) _do_assert(("P->body.free.dll == dll"
),"store.c",3017); } while (0)
;
3018 B = P->body.free.linkB;
3019 if (B) { assert(A->body.free.linkA == P)do { if (!(A->body.free.linkA == P)) _do_assert(("A->body.free.linkA == P"
),"store.c",3019); } while (0)
; }
3020 mxpBytes += nbytes;
3021 nMixed++;
3022 }
3023 return mxpBytes;
3024}
3025
3026#endif
3027
3028long
3029stoShowMixedSizeSections(void)
3030{
3031 int pgno, pgcount, qmno, qmcount, qmsize, sz, nq, i, cd;
3032 QmInfo qmtag;
3033 Section *sect;
3034 MxMem *pc;
3035 char *data;
3036 Bool anyMarked = false((int) 0);
3037
3038 fprintf(osStderr, "- - - -\n");
3039 for (pgno = 0; pgno < pgMapSize; pgno += pgcount) {
3040 if (pgMap[pgno] != PgBusyFirst) { pgcount = 1; continue; }
3041
3042 /* Get section information. */
3043 sect = sectAt(pgno)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((pgno
) * (1L<<12))))))
;
3044 pgcount = sect->pgCount;
3045 data = (char *) sect->data;
3046 qmcount = sect->qmCount;
3047 qmsize = sect->qmSize;
3048
3049 if (sect->isFixed) {
3050 fprintf(osStderr,"(:::Fx %d[%d:%d:%d]:::)\n",
3051 qmsize,pgno,pgcount,qmcount);
3052 continue;
3053 }
3054 else {
3055 fprintf(osStderr,"(:::Mx %d[%d:%d:%d]\n",
3056 qmsize,pgno,pgcount,qmcount);
3057 }
3058
3059
3060 for (qmno = 0; qmno < qmcount; qmno += nq) {
3061 pc = (MxMem *)ptrOff(data, qmno*(long)qmsize)((Pointer)((char *)(data) + (qmno*(long)qmsize)));
3062 sz = pc->nbytesThis;
3063 nq = sz/qmsize;
3064
3065 if (pc->isFirst) fprintf(osStderr, "<<\n");
3066
3067 if (!pc->isFirst)
3068 fprintf(osStderr, "(%ld)\n",
3069 pc->nbytesPrev/qmsize);
3070 fprintf(osStderr, "%p=(%d){", (void*) pc, nq);
3071 if (pc->isFree)
3072 fprintf(osStderr, "F/");
3073 else
3074 fprintf(osStderr, "B/");
3075
3076 if (ptrEQ(pc, mixedFrontier)( ((Pointer)((Pointer)(pc)) == (Pointer)((Pointer)(mixedFrontier
))))
)
3077 fprintf(osStderr, "<frontier>");
3078
3079 for (i = qmno; i < qmno+nq; i++) {
3080 qmtag = sect->info[i];
3081 cd = QmInfoKind(qmtag)((qmtag) & 0xC0);
3082
3083 if (QmInfoMark(qmtag)((qmtag) & 0x20)) {
3084 fprintf(osStderr, "<MARK>");
3085 anyMarked = true1;
3086 }
3087 switch (cd) {
3088 case QmFreeFirst0x40:
3089 fprintf(osStderr, "f"); break;
3090 case QmBusyFirst0x80:
3091 fprintf(osStderr, "b"); break;
3092 case QmFollow0x00:
3093 fprintf(osStderr, "."); break;
3094 case QmBlacklisted0xC0:
3095 fprintf(osStderr, "."); break;
3096 default:
3097 fprintf(osStderr, "=[%x]", cd); break;
3098 }
3099 }
3100 fprintf(osStderr, "}");
3101 if (pc->isLast) { fprintf(osStderr, "\n>>"); }
3102 if (nq == 0) { fprintf(osStderr, "BUG!!\n"); exit(int0((int) 0)); }
3103
3104 }
3105 fprintf(osStderr, "\n:::)\n");
3106 }
3107 fprintf(osStderr, "- - - -\n");
3108 if (anyMarked) exit(int0((int) 0));
3109 return 0;
3110}
3111
3112/****************************************************************************
3113 *
3114 * :: Tuning
3115 *
3116 ***************************************************************************/
3117
3118/*
3119 * Sort free lists into ascending address order.
3120 * This does not use QmInfo so it can be used even without tags.
3121 */
3122localstatic void
3123stoRebuildFreeLists(void)
3124{
3125 Section *sect;
3126 int ix, pgno, pgcount;
3127 FxMem *pc, *hd, *fp;
3128
3129 /* Initialize each section's free list to 0. */
3130 for (pgno = 0; pgno < pgMapSize; pgno += pgcount) {
3131 if (pgMap[pgno] != PgBusyFirst)
3132 pgcount = 1;
3133 else {
3134 sect = sectAt(pgno)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((pgno
) * (1L<<12))))))
;
3135 sect->free = 0;
3136 pgcount = sect->pgCount;
3137 }
3138 }
3139
3140 /* Organize pieces from the free lists into the per-section lists. */
3141 for (ix = 0; ix < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); ix++) {
3142 pc = fixedPieces[ix];
3143 fixedPieces[ix] = 0;
3144 while (pc) {
3145 hd = pc;
3146 pc = pc->next;
3147 sect = sectFor((Pointer) hd)(pgMap[(((long)((char *)((char *)((Pointer) hd)) - (char *)(heapStart
))) >> 12)]==PgBusyFirst ? ((Section *) ((Page *)((Pointer
)((char *)(heapStart) + (((((long)((char *)((char *)((Pointer
) hd)) - (char *)(heapStart))) >> 12)) * (1L<<12)
))))):_sectFor((Pointer) hd))
;
3148 hd->next = sect->free;
3149 sect->free = hd;
3150 }
3151 }
3152 /* Join the per-lists to make the new free lists. */
3153 for (pgno = pgMapSize-1; pgno >= 0; pgno--) {
3154 if (pgMap[pgno] != PgBusyFirst) continue;
3155
3156 sect = sectAt(pgno)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((pgno
) * (1L<<12))))))
;
3157 if (!sect->isFixed) continue;
3158
3159 ix = sect->qmSizeIndex;
3160 pc = fixedPieces[ix];
3161 fp = sect->free;
3162
3163 while (fp) {
3164 hd = fp;
3165 fp = fp->next;
3166 hd->next = pc;
3167 pc = hd;
3168 }
3169 fixedPieces[ix] = pc;
3170 }
3171}
3172
3173/****************************************************************************
3174 *
3175 * :: Externally visible operations
3176 *
3177 ***************************************************************************/
3178
3179
3180localstatic int
3181stoInit(void)
3182{
3183 StoInfoObj info;
3184 int i, j, sz0, sz;
3185 gcTraceFile = 0;
3186
3187#ifdef USE_MEMORY_CLIMATE
3188 limitNumberOfMemoryClimates(QmCodeMask0x1F);
3189#endif
3190
3191 if (osGetEnv("GC_FRUGAL")) GcIsFrugal = true1;
21
Assuming the condition is false
22
Taking false branch
3192
3193 if (GcIsFrugal)
23
Assuming 'GcIsFrugal' is 0
24
Taking false branch
3194 {
3195 GcEffectiveFactorNum = GcFrugalFactorNum;
3196 GcEffectiveFactorDen = GcFrugalFactorDen;
3197 }
3198
3199 if (osGetEnv("GC_GEFN")) GcEffectiveFactorNum = atoi(osGetEnv("GC_GEFN"));
25
Assuming the condition is false
26
Taking false branch
3200 if (osGetEnv("GC_GEFD")) GcEffectiveFactorDen = atoi(osGetEnv("GC_GEFD"));
27
Assuming the condition is false
28
Taking false branch
3201
3202 if (osGetEnv("GC_GGFN")) GcGrowthFactorNum = atoi(osGetEnv("GC_GGFN"));
29
Assuming the condition is false
30
Taking false branch
3203 if (osGetEnv("GC_GGFD")) GcGrowthFactorDen = atoi(osGetEnv("GC_GGFD"));
31
Assuming the condition is false
32
Taking false branch
3204
3205 if (osGetEnv("GC_DETAIL")) markingStats = true1;
33
Assuming the condition is false
34
Taking false branch
3206
3207
3208 /* Initialize memory mapper. */
3209 osMemMap(int0((int) 0));
3210
3211 /* Statistics */
3212 stoBytesOwn = 0;
3213 stoBytesAlloc = 0;
3214 stoBytesFree = 0;
3215 stoBytesGc = 0;
3216 stoBytesBlack = 0;
3217
3218 /* Page map */
3219 pgmapInit();
3220
3221 /* Heap */
3222 heapStart = (char *) ptrOff((char *) pgMap, long0)((Pointer)((char *)((char *) pgMap) + (((long) 0))));
3223 heapEnd = (char *) ptrOff((char *) pgMap, pgMapUses*PgSize)((Pointer)((char *)((char *) pgMap) + (pgMapUses*(1L<<12
))))
;
3224 pgmapMod((Page *) pgMap, pgMapUses, PgPgMap);
35
Calling 'pgmapMod'
3225
3226 /* Fixed pieces */
3227 for (sz0 = 0, i = 0; i < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); sz0 = sz+1, i++) {
3228 sz = fixedSize[i];
3229 for (j = sz0; j <= sz; j++) {
3230 fixedSizeFor [j] = sz;
3231 fixedSizeIndexFor[j] = i;
3232 }
3233 }
3234
3235 /* Free lists and fixed-sized store counting */
3236 for (i = 0; i < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); i++) {
3237 fixedPieces[i] = 0;
3238 freeFixedPieces[i] = 0L;
3239 busyFixedPieces[i] = 0L;
3240 }
3241
3242
3243 /* Mixed-sized store counting */
3244 freeMixedBytes = 0L;
3245 busyMixedBytes = 0L;
3246
3247
3248#ifdef STO_DIVISION_BY_LOOKUP1
3249 /* Initialise division lookup-table */
3250 for (i = 0; i < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); i++)
3251 {
3252 int table = -(fixedSizeLog[i]+1);
3253 int size = fixedSize[i];
3254
3255
3256 /* Skip sizes which don't require division */
3257 if (fixedSizeLog[i] >= 0) continue;
3258
3259
3260 /* Compute the divisions */
3261 for (j = 0;j < PgSize(1L<<12);j++)
3262 stoDivTable[table][j] = j / size;
3263 }
3264#endif
3265
3266
3267#ifndef FOAM_RTS
3268 /* All objects contain pointers */
3269 info.hasPtrs = 1;
3270 for (i = 0;i < STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0])); i++)
3271 {
3272 info.code = i;
3273 stoRegister(&info);
3274 }
3275#endif
3276
3277
3278 /* Mixed pieces */
3279 mixedPieces = 0;
3280 mixedFrontier = 0;
3281 stoWatchFrontier(mixedFrontier);
3282
3283 /* User traceable objects */
3284 for (i = 0;i < OB_MAX256;i++) stoObAldorTracer[i] = (StoFiClos)0;
3285 for (i = 0;i < OB_MAX256;i++) stoObCTracer[i] = (StoTraceFun)0;
3286
3287 stoIsInit = pgMap != 0;
3288
3289 gcTimer.time = 0;
3290 gcTimer.live = 0;
3291
3292 return stoIsInit;
3293}
3294
3295
3296/*
3297 * Added specifically for use with LIP big integers
3298 */
3299MostAlignedType *
3300stoCAlloc(unsigned code, ULong nbytes)
3301{
3302 MostAlignedType *result = stoAlloc(code, nbytes);
3303 memlset(result, 0x00, nbytes);
3304 return result;
3305}
3306
3307
3308MostAlignedType *
3309stoAlloc(unsigned code, ULong nbytes)
3310{
3311 Pointer p;
3312 Section *sect;
3313 Length qi;
3314 MostAlignedType *ap;
3315
3316 if (!stoIsInit && !stoInit())
19
Assuming 'stoIsInit' is 0
20
Calling 'stoInit'
3317 return (*stoError)(StoErr_CantBuild3);
3318
3319#ifdef USE_MEMORY_CLIMATE
3320 code = getMemoryClimate();
3321#endif
3322 if (nbytes > 100*1024*1024) {
3323 bug("stoAlloc: Too large");
3324 }
3325 if (nbytes==0) return NULL((void*)0); /* TTT */
3326 if (nbytes <= FixedSizeMax(32*sizeof(Pointer)))
3327 {
3328 FxMem *pc;
3329 int si;
3330
3331 si = fixedSizeIndexFor[nbytes];
3332 pc = fixedPieces[si];
3333
3334 if (!pc) {
3335 pc = piecesGetFixed(nbytes);
3336 if (!pc) return (*stoError)(StoErr_OutOfMemory1);
3337 }
3338
3339 fixedPieces[si] = pc->next;
3340 p = (Pointer)pc;
3341
3342 if (stoMustTag)
3343 {
3344 /* This is much more efficient than sectFor() */
3345 sect = sectOf(p)((Section *)(((Pointer)((char *)(heapStart) + ((((long)((char
*)(p) - (char *)(heapStart))) & ~((1L<<12)-1))))))
)
;
3346
3347
3348#ifdef STO_DIVISION_BY_LOOKUP1
3349 /* Compute the quantum index */
3350 qi = (sect->qmLog) ? qmLogNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
)))) >> (sect)->qmLog)
: /* TTT */
3351 /* Use division-by-lookup-table */ qmDivNo(p, sect)stoDivTable[(sect)->qmDiv][(((long)((char *)((char*)(p)) -
(char *)((char*)((sect)->data)))))]
;
3352
3353 assert(qi == qmNo(p, sect))do { if (!(qi == (((long)((char *)((char*)(p)) - (char *)((char
*)((sect)->data))))/(sect)->qmSize))) _do_assert(("qi == qmNo(p, sect)"
),"store.c",3353); } while (0)
;
3354
3355#else
3356 /* Compute the quantum index */
3357 if (sect->qmLog)
3358 qi = qmLogNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
)))) >> (sect)->qmLog)
;
3359 else
3360 qi = qmNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))/(sect)->qmSize)
;
3361#endif
3362
3363
3364 /* Update the info for this quantum */
3365 sect->info[qi] = QmInfoMake(QmBusyFirst, code)(((0x80) & 0xC0) | ((code) & 0x1F));
3366 }
3367
3368 stoTally(stoBytesAlloc += fixedSize[si])(stoBytesAlloc += fixedSize[si]);
3369 newFill (p, fixedSize[si])(stoMustWash ? memlset (p,0xAA,fixedSize[si]) : 0);
3370 }
3371 else
3372 {
3373 MxMem *pc;
3374 ULong nb;
3375
3376#ifdef STO_LONGJMP
3377 if (setjmp(stoAllocInner_ErrorCatch)_setjmp (stoAllocInner_ErrorCatch)) {
3378 p = ptrCanon(stoAllocInner_ErrorValue)((Pointer)(stoAllocInner_ErrorValue));
3379 return (MostAlignedType *) p;
3380 }
3381#endif
3382 nb = ROUND_UP(nbytes + MxMemHeadSize, MixedSizeQuantum)((nbytes + (sizeof(MxMem) - sizeof(MxMemU))) % ((32*sizeof(Pointer
))) ? (nbytes + (sizeof(MxMem) - sizeof(MxMemU))) + ((32*sizeof
(Pointer))) - (nbytes + (sizeof(MxMem) - sizeof(MxMemU))) % (
(32*sizeof(Pointer))) : (nbytes + (sizeof(MxMem) - sizeof(MxMemU
))))
;
3383 pc = pieceGetMixed(nb);
3384
3385 if (!pc) return (*stoError)(StoErr_OutOfMemory1);
3386
3387 p = (Pointer) (&pc->body.busy.data);
3388
3389 if (stoMustTag) {
3390 sect = sectFor((Pointer) pc)(pgMap[(((long)((char *)((char *)((Pointer) pc)) - (char *)(heapStart
))) >> 12)]==PgBusyFirst ? ((Section *) ((Page *)((Pointer
)((char *)(heapStart) + (((((long)((char *)((char *)((Pointer
) pc)) - (char *)(heapStart))) >> 12)) * (1L<<12)
))))):_sectFor((Pointer) pc))
;
3391 qi = qmNo(pc, sect)(((long)((char *)((char*)(pc)) - (char *)((char*)((sect)->
data))))/(sect)->qmSize)
;
3392 sect->info[qi] = QmInfoMake(QmBusyFirst, code)(((0x80) & 0xC0) | ((code) & 0x1F));
3393 }
3394
3395 stoTally(stoBytesAlloc += pc->nbytesThis - MxMemHeadSize)(stoBytesAlloc += pc->nbytesThis - (sizeof(MxMem) - sizeof
(MxMemU)))
;
3396 newFill (p, pc->nbytesThis - MxMemHeadSize)(stoMustWash ? memlset (p,0xAA,pc->nbytesThis - (sizeof(MxMem
) - sizeof(MxMemU))) : 0)
;
3397 }
3398
3399
3400 /* Canonicalise the pointer and return it */
3401 ap = (MostAlignedType *) ptrCanon(p)((Pointer)(p));
3402 stoWatchAlloc(ap, nbytes);
3403 return ap;
3404}
3405
3406
3407void
3408stoFree(Pointer p)
3409{
3410 int si;
3411 Length qi;
3412 Section *sect;
3413
3414 if (p==0) return; /* TTT */
3415 if (!stoIsInit || !isInHeap(p)((((Pointer)(heapStart)) <= ((Pointer)((p)))) && (
((Pointer)((p))) < ((Pointer)(heapEnd))))
|| (sect = sectFor(p)(pgMap[(((long)((char *)((char *)(p)) - (char *)(heapStart)))
>> 12)]==PgBusyFirst ? ((Section *) ((Page *)((Pointer
)((char *)(heapStart) + (((((long)((char *)((char *)(p)) - (char
*)(heapStart))) >> 12)) * (1L<<12)))))):_sectFor
(p))
) == 0) {
3416 (*stoError)(StoErr_FreeBad4);
3417 return;
3418 }
3419 stoWatchFree(p);
3420
3421 if (sect->isFixed) {
3422 FxMem *pc = (FxMem *) p;
3423 si = sect->qmSizeIndex;
3424 if (stoMustTag) {
3425#ifdef STO_DIVISION_BY_LOOKUP1
3426 qi = (sect->qmLog) ? qmLogNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
)))) >> (sect)->qmLog)
: qmDivNo(p, sect)stoDivTable[(sect)->qmDiv][(((long)((char *)((char*)(p)) -
(char *)((char*)((sect)->data)))))]
; /* TTT */
3427 assert(qi == qmNo(p, sect))do { if (!(qi == (((long)((char *)((char*)(p)) - (char *)((char
*)((sect)->data))))/(sect)->qmSize))) _do_assert(("qi == qmNo(p, sect)"
),"store.c",3427); } while (0)
;
3428#else
3429 if (sect->qmLog)
3430 qi = qmLogNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
)))) >> (sect)->qmLog)
;
3431 else
3432 qi = qmNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))/(sect)->qmSize)
;
3433#endif
3434 if (QmInfoKind(sect->info[qi])((sect->info[qi]) & 0xC0) != QmBusyFirst0x80)
3435 (*stoError)(StoErr_FreeBad4);
3436 sect->info[qi] = QmInfoMake0(QmFreeFirst)(0x40);
3437 }
3438 stoTally(stoBytesFree += fixedSize[si])(stoBytesFree += fixedSize[si]);
3439 fxmemCleanBody(pc, fixedSize[si])(stoMustWash ? memlset ((UByte *)(pc) + sizeof(FxMem),0xDD,(fixedSize
[si]) - sizeof(FxMem)) : 0)
;
3440 pc->next = fixedPieces[si];
3441 fixedPieces[si] = pc;
3442 }
3443 else {
3444 MxMem *pc = (MxMem *) ptrOff((char *) p, -(long)MxMemHeadSize)((Pointer)((char *)((char *) p) + (-(long)(sizeof(MxMem) - sizeof
(MxMemU)))))
;
3445#ifdef STO_LONGJMP
3446 if (setjmp(stoAllocInner_ErrorCatch)_setjmp (stoAllocInner_ErrorCatch))
3447 return;
3448#endif
3449 if (stoMustTag) {
3450 qi = qmNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))/(sect)->qmSize)
;
3451 if (QmInfoKind(sect->info[qi])((sect->info[qi]) & 0xC0) != QmBusyFirst0x80)
3452 (*stoError)(StoErr_FreeBad4);
3453 sect->info[qi] = QmInfoMake0(QmFreeFirst)(0x40);
3454 }
3455 stoTally(stoBytesFree += pc->nbytesThis - MxMemHeadSize)(stoBytesFree += pc->nbytesThis - (sizeof(MxMem) - sizeof(
MxMemU)))
;
3456 mxmemCleanBody(pc, pc->nbytesThis)(stoMustWash ? memlset ((UByte *)(pc) + sizeof(MxMem),0xDD,(pc
->nbytesThis) - sizeof(MxMem)) : 0)
;
3457 piecePutMixed(pc);
3458 }
3459}
3460
3461/*
3462 * Return the actual size of the piece pointed to.
3463 */
3464ULong
3465stoSize(Pointer p)
3466{
3467 Section *sect;
3468
3469 if (!stoIsInit || !isInHeap(p)((((Pointer)(heapStart)) <= ((Pointer)((p)))) && (
((Pointer)((p))) < ((Pointer)(heapEnd))))
|| (sect = sectFor(p)(pgMap[(((long)((char *)((char *)(p)) - (char *)(heapStart)))
>> 12)]==PgBusyFirst ? ((Section *) ((Page *)((Pointer
)((char *)(heapStart) + (((((long)((char *)((char *)(p)) - (char
*)(heapStart))) >> 12)) * (1L<<12)))))):_sectFor
(p))
) == 0) {
3470 (*stoError)(StoErr_UsedNonalloc2);
3471 return 0;
3472 }
3473 if (sect->isFixed)
3474 return sect->qmSize;
3475 else {
3476 MxMem *pc = (MxMem *) ((char *) p - MxMemHeadSize(sizeof(MxMem) - sizeof(MxMemU)));
3477 return pc->nbytesThis - MxMemHeadSize(sizeof(MxMem) - sizeof(MxMemU));
3478 }
3479}
3480
3481unsigned
3482stoCode(Pointer p)
3483{
3484 if (stoMustTag) {
3485 Length qi;
3486 Section *sect;
3487
3488 if (!stoIsInit || !isInHeap(p)((((Pointer)(heapStart)) <= ((Pointer)((p)))) && (
((Pointer)((p))) < ((Pointer)(heapEnd))))
|| (sect = sectFor(p)(pgMap[(((long)((char *)((char *)(p)) - (char *)(heapStart)))
>> 12)]==PgBusyFirst ? ((Section *) ((Page *)((Pointer
)((char *)(heapStart) + (((((long)((char *)((char *)(p)) - (char
*)(heapStart))) >> 12)) * (1L<<12)))))):_sectFor
(p))
) == 0)
3489 return 0;
3490 qi = qmNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))/(sect)->qmSize)
;
3491 if (QmInfoKind(sect->info[qi])((sect->info[qi]) & 0xC0) == QmBusyFirst0x80)
3492 return QmInfoCode(sect->info[qi])((sect->info[qi]) & 0x1F);
3493 }
3494 return 0;
3495}
3496
3497
3498MostAlignedType *
3499stoRecode(Pointer p, unsigned code)
3500{
3501 if (stoMustTag) {
3502 Section *sect;
3503 Length qi;
3504
3505 if (!stoIsInit || !isInHeap(p)((((Pointer)(heapStart)) <= ((Pointer)((p)))) && (
((Pointer)((p))) < ((Pointer)(heapEnd))))
|| (sect = sectFor(p)(pgMap[(((long)((char *)((char *)(p)) - (char *)(heapStart)))
>> 12)]==PgBusyFirst ? ((Section *) ((Page *)((Pointer
)((char *)(heapStart) + (((((long)((char *)((char *)(p)) - (char
*)(heapStart))) >> 12)) * (1L<<12)))))):_sectFor
(p))
) == 0)
3506 return (*stoError)(StoErr_UsedNonalloc2);
3507
3508 qi = qmNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))/(sect)->qmSize)
;
3509 QmInfoSetCode(sect->info[qi], code)((sect->info[qi]) = ((sect->info[qi]) & (~0x1F &
0xFF)) | ((code) & 0x1F))
;
3510 }
3511 return (MostAlignedType *) p;
3512}
3513
3514
3515MostAlignedType *
3516stoResize(Pointer p, ULong nbytes)
3517{
3518 Section *sect;
3519 Pointer np;
3520 int oc = 0;
3521 ULong nsz, osz;
3522
3523 if (!stoIsInit || !isInHeap(p)((((Pointer)(heapStart)) <= ((Pointer)((p)))) && (
((Pointer)((p))) < ((Pointer)(heapEnd))))
|| (sect = sectFor(p)(pgMap[(((long)((char *)((char *)(p)) - (char *)(heapStart)))
>> 12)]==PgBusyFirst ? ((Section *) ((Page *)((Pointer
)((char *)(heapStart) + (((((long)((char *)((char *)(p)) - (char
*)(heapStart))) >> 12)) * (1L<<12)))))):_sectFor
(p))
) == 0
)
1
Assuming 'stoIsInit' is not equal to 0
2
Assuming 'heapStart' is <= 'p'
3
Assuming 'p' is < 'heapEnd'
4
Assuming the condition is false
5
'?' condition is false
6
Assuming the condition is false
7
Taking false branch
3524 return (*stoError)(StoErr_UsedNonalloc2);
3525
3526 /* true size of old piece */
3527 if (sect->isFixed)
8
Assuming field 'isFixed' is 0
9
Taking false branch
3528 osz = sect->qmSize;
3529 else {
3530 MxMem *pc = (MxMem *) ((char *) p - MxMemHeadSize(sizeof(MxMem) - sizeof(MxMemU)));
3531 osz = pc->nbytesThis - MxMemHeadSize(sizeof(MxMem) - sizeof(MxMemU));
3532 }
3533
3534 /* true size of new piece */
3535 if (nbytes <= FixedSizeMax(32*sizeof(Pointer)))
10
Assuming the condition is false
3536 nsz = fixedSizeFor[nbytes];
3537 else {
3538 nsz = ROUND_UP(nbytes + MxMemHeadSize, MixedSizeQuantum)((nbytes + (sizeof(MxMem) - sizeof(MxMemU))) % ((32*sizeof(Pointer
))) ? (nbytes + (sizeof(MxMem) - sizeof(MxMemU))) + ((32*sizeof
(Pointer))) - (nbytes + (sizeof(MxMem) - sizeof(MxMemU))) % (
(32*sizeof(Pointer))) : (nbytes + (sizeof(MxMem) - sizeof(MxMemU
))))
;
11
Taking false branch
12
Assuming the condition is false
13
'?' condition is false
3539 nsz -= MxMemHeadSize(sizeof(MxMem) - sizeof(MxMemU));
3540 }
3541
3542 if (osz == nsz) return (MostAlignedType *) ptrCanon(p)((Pointer)(p));
14
Assuming 'osz' is not equal to 'nsz'
15
Taking false branch
3543 if (stoMustTag) oc = QmInfoCode(sect->info[qmNo(p, sect)])((sect->info[(((long)((char *)((char*)(p)) - (char *)((char
*)((sect)->data))))/(sect)->qmSize)]) & 0x1F)
;
16
Assuming 'stoMustTag' is 0
17
Taking false branch
3544 np = (Pointer) stoAlloc(oc, nbytes);
18
Calling 'stoAlloc'
3545 memcpy(np, p, MIN(nbytes, osz)((nbytes) < (osz) ? (nbytes) : (osz)));
3546 stoFree(p);
3547
3548 return (MostAlignedType *) ptrCanon(np)((Pointer)(np));
3549}
3550
3551int
3552stoIsPointer(Pointer p)
3553{
3554 if (stoMustTag) {
3555 Section *sect;
3556 Length qi;
3557
3558 if (!stoIsInit || !isInHeap(p)((((Pointer)(heapStart)) <= ((Pointer)((p)))) && (
((Pointer)((p))) < ((Pointer)(heapEnd))))
|| (sect = sectFor(p)(pgMap[(((long)((char *)((char *)(p)) - (char *)(heapStart)))
>> 12)]==PgBusyFirst ? ((Section *) ((Page *)((Pointer
)((char *)(heapStart) + (((((long)((char *)((char *)(p)) - (char
*)(heapStart))) >> 12)) * (1L<<12)))))):_sectFor
(p))
) == 0)
3559 return 0;
3560 qi = qmNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))/(sect)->qmSize)
;
3561 return QmInfoKind(sect->info[qi])((sect->info[qi]) & 0xC0) == QmBusyFirst0x80;
3562 }
3563 else {
3564 if (!stoIsInit || !isInHeap(p)((((Pointer)(heapStart)) <= ((Pointer)((p)))) && (
((Pointer)((p))) < ((Pointer)(heapEnd))))
) return 0;
3565 return 1;
3566 }
3567}
3568
3569#define STO_SHOW_PAGES0x00000001 0x00000001
3570#define STO_SHOW_MEMORY0x00000002 0x00000002
3571#define STO_SHOW_OVERHEAD0x00000004 0x00000004
3572#define STO_SHOW_RESERVED0x00000008 0x00000008
3573#define STO_SHOW_FIXED0x00000010 0x00000010
3574#define STO_SHOW_MIXED0x00000020 0x00000020
3575#define STO_SHOW_PAGEMAP0x00000040 0x00000040
3576#define STO_SHOW_PAGEKEY0x00000080 0x00000080
3577#define STO_SHOW_MEMMAP0x00000100 0x00000100
3578#define STO_SHOW_SHOW0x00000200 0x00000200
3579#define STO_SHOW_CENSUS0x00000400 0x00000400
3580#define STO_SHOW_USAGE0x00000800 0x00000800
3581#define STO_SHOW_ALL((-1) & ~0x00000080 & ~0x00000200) ((-1) & ~STO_SHOW_PAGEKEY0x00000080 & ~STO_SHOW_SHOW0x00000200)
3582#define STO_SHOW_BEFORE_MASK(~(0x00000400 | 0x00000800)) (~(STO_SHOW_CENSUS0x00000400 | STO_SHOW_USAGE0x00000800))
3583
3584int
3585stoShowArgs(char *detail)
3586{
3587 int r = 0x00;
3588
3589
3590 /* NULL argument means no details */
3591 if (!detail) return 0;
3592
3593
3594 /* Treat the argument as a list of words */
3595 while (*detail)
3596 {
3597 char *w;
3598 int l;
3599
3600 /* Skip any leading whitespace or commas */
3601 while (*detail && (isspace(*detail)((*__ctype_b_loc ())[(int) ((*detail))] & (unsigned short
int) _ISspace)
|| (*detail == ',')))
3602 detail++;
3603
3604
3605 /* Note the starting position of the word */
3606 if (*detail) w = detail;
3607 else break; /* At EOS - stop */
3608
3609
3610 /* Locate the end of the word */
3611 while (*detail && !isspace(*detail)((*__ctype_b_loc ())[(int) ((*detail))] & (unsigned short
int) _ISspace)
&& (*detail != ','))
3612 detail++;
3613
3614
3615 /* How long is the word? */
3616 l = detail - w;
3617
3618 /* Do we recognise it? */
3619 if (!strncmp(w,"pages",l)) r |= STO_SHOW_PAGES0x00000001;
3620 else if (!strncmp(w,"memory",l)) r |= STO_SHOW_MEMORY0x00000002;
3621 else if (!strncmp(w,"overhead",l)) r |= STO_SHOW_OVERHEAD0x00000004;
3622 else if (!strncmp(w,"reserved",l)) r |= STO_SHOW_RESERVED0x00000008;
3623 else if (!strncmp(w,"fixed",l)) r |= STO_SHOW_FIXED0x00000010;
3624 else if (!strncmp(w,"mixed",l)) r |= STO_SHOW_MIXED0x00000020;
3625 else if (!strncmp(w,"pagemap",l)) r |= STO_SHOW_PAGEMAP0x00000040;
3626 else if (!strncmp(w,"pagekey",l)) r |= STO_SHOW_PAGEKEY0x00000080;
3627 else if (!strncmp(w,"memmap",l)) r |= STO_SHOW_MEMMAP0x00000100;
3628 else if (!strncmp(w,"census",l)) r |= STO_SHOW_CENSUS0x00000400;
3629 else if (!strncmp(w,"usage",l)) r |= STO_SHOW_USAGE0x00000800;
3630 else if (!strncmp(w,"all",l)) r |= STO_SHOW_ALL((-1) & ~0x00000080 & ~0x00000200);
3631 else if (!strncmp(w,"show",l))
3632 {
3633 (void)fprintf(osStderr,
3634 "\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n\n",
3635 "Possible values are a space or comma separated list of:",
3636 " all: select all options (except pagekey and show)",
3637 " census: display object counts/sizes by type",
3638 " fixed: classification and count of free fixed-size pages",
3639 " memmap: display the stack, heap, idata and ddata limits",
3640 " memory: amount of store allocated, owned etc",
3641 " mixed: classification and count of free mixed-size pages",
3642 " overhead: memory overhead of the storage manager",
3643 " pagekey: display the symbols used in the pagemap display",
3644 " pagemap: tabular display of the page map (see also pagekey)",
3645 " pages: classification and count of store pages",
3646 " reserved: amount of free memory in reserve",
3647 " show: display this message",
3648 " usage: fixed-sized pages free/busy usage");
3649 }
3650 }
3651 fflush(osStderr);
3652
3653
3654 /* Return the flags */
3655 return r;
3656}
3657
3658
3659void
3660stoGc(void)
3661{
3662 static int doShow = -1;
3663
3664 if (doShow == -1)
3665 doShow = stoShowArgs(osGetEnv("GC_DETAIL"));
3666 if (stoMustTag) {
3667 static Bool inGc = false((int) 0);
3668 if (inGc) return;
3669 tmStart(stoGcTimer());
3670 if (DEBUG(sto)((int) 0)) {
3671 if (doShow) {
3672 /* Census taking is special */
3673 if (doShow & STO_SHOW_CENSUS0x00000400)
3674 stoTakeCensus(1);
3675
3676
3677 /* Some things not shown before GC */
3678 stoShowDetail(doShow & STO_SHOW_BEFORE_MASK(~(0x00000400 | 0x00000800)));
3679 }
3680 }
3681 inGc = true1;
3682 stoGcMarkAndSweep();
3683 inGc = false((int) 0);
3684 if (DEBUG(sto)((int) 0)) {
3685 if (doShow) {
3686 /* Census taking is special */
3687 if (doShow & STO_SHOW_CENSUS0x00000400)
3688 stoTakeCensus((AInt)0);
3689
3690
3691 /* Allowed to show census this time */
3692 stoShowDetail(doShow);
3693 }
3694 }
3695 tmStop(stoGcTimer());
3696 }
3697}
3698
3699void
3700stoTune(void)
3701{
3702 stoRebuildFreeLists();
3703}
3704
3705void
3706stoShow(void)
3707{
3708 stoShowDetail(STO_SHOW_ALL((-1) & ~0x00000080 & ~0x00000200));
3709}
3710
3711
3712localstatic ULong
3713stoTakeFixedCensus(Section *sect, ULong *hist, ULong *mem)
3714{
3715 Length i;
3716 ULong nobjs = 0;
3717 ULong size = sect->qmSize;
3718 int info;
3719
3720
3721 /* Count all the objects in this section */
3722 for (i = 0; i < sect->qmCount; i++)
3723 {
3724 /* Skip irrevelent quanta */
3725 if (QmInfoKind(sect->info[i])((sect->info[i]) & 0xC0) != QmBusyFirst0x80)
3726 continue;
3727
3728
3729 /* Remember the object type */
3730 info = QmInfoCode(sect->info[i])((sect->info[i]) & 0x1F);
3731
3732
3733 /* Increment census slot for this type */
3734 hist[info]++;
3735
3736
3737 /* Increase storage count */
3738 mem[info] += size;
3739
3740
3741 /* Found another object */
3742 nobjs++;
3743 }
3744
3745
3746 /* Return the number of objects counted. */
3747 return nobjs;
3748}
3749
3750
3751localstatic ULong
3752stoTakeMixedCensus(Section *sect, ULong *hist, ULong *mem)
3753{
3754 Length i;
3755 ULong nobjs = 0;
3756 ULong size;
3757 int kind;
3758 int info;
3759 MxMem *pc;
3760
3761
3762 /* Count all the objects in this section */
3763 for (i = 0; i < sect->qmCount; i++)
3764 {
3765 /* Get the type of quanta */
3766 kind = QmInfoKind(sect->info[i])((sect->info[i]) & 0xC0);
3767
3768
3769 /* Skip irrevelent quanta */
3770 if (kind != QmBusyFirst0x80)
3771 continue;
3772
3773
3774 /* Compute the address of the piece */
3775 pc = (MxMem *)ptrOff((char *)sect->data, i*sect->qmSize)((Pointer)((char *)((char *)sect->data) + (i*sect->qmSize
)))
;
3776
3777
3778 /* Read the size of the data (may cover several quanta) */
3779 size = pc->nbytesThis;
3780
3781
3782 /* Remember the object type */
3783 info = QmInfoCode(sect->info[i])((sect->info[i]) & 0x1F);
3784
3785
3786 /* Increment census slot for this type */
3787 hist[info]++;
3788
3789
3790 /* Increase storage count */
3791 mem[info] += size;
3792
3793
3794 /* Found another object */
3795 nobjs++;
3796 }
3797
3798
3799 /* Return the number of objects counted. */
3800 return nobjs;
3801}
3802
3803
3804/*
3805 * This function can be used to display a table of information about
3806 * different types of objects which are present in the store. The
3807 * number of objects before and after garbage collection is computed
3808 * along with the amount of memory they use.
3809 */
3810void
3811stoTakeCensus(AInt before)
3812{
3813 Length i;
3814 ULong nobjs = 0L;
3815 ULong *hist, *mem;
3816
3817
3818 /* Which table are we filling? */
3819 hist = before ? censusBefore : censusAfter;
3820 mem = before ? censusMemBefore : censusMemAfter;
3821
3822
3823 /* Reset the histograms */
3824 for (i = 0;i < STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0])); i++)
3825 hist[i] = mem[i] = 0L;
3826
3827
3828 /* Scan the heap counting number and size of objects */
3829 for (i = 0; i < pgMapSize; i++)
3830 {
3831 if (pgMap[i] == PgBusyFirst)
3832 {
3833 Section *sect = sectAt(i)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((i) *
(1L<<12))))))
;
3834
3835
3836 /* Census depends on section type */
3837 if (sect->isFixed)
3838 nobjs += stoTakeFixedCensus(sect, hist, mem);
3839 else
3840 nobjs += stoTakeMixedCensus(sect, hist, mem);
3841 }
3842 }
3843
3844
3845 /* Record the total number of objects found */
3846 hist[STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0])) - 1] = nobjs;
3847}
3848
3849static String stoCensusDivider = "---------------------------------------------------------------------------------";
3850
3851static String stoCensusBlank = " ";
3852
3853localstatic void
3854stoShowCensusBar(int hires)
3855{
3856 String line = stoCensusDivider;
3857 String blank = stoCensusBlank;
3858
3859
3860 /* Decide what sort of bar to draw */
3861 if (hires)
3862 {
3863 /* Field name */
3864 (void)fprintf(osStderr, "+-");
3865 (void)fprintf(osStderr, "%10.10s-+-", line);
3866
3867
3868 /* Object counts: total, live and swept */
3869 (void)fprintf(osStderr, "%9.9s-+-", line);
3870 (void)fprintf(osStderr, "%9.9s-+-", line);
3871 (void)fprintf(osStderr, "%9.9s-+-", line);
3872
3873
3874 /* Object sizes: total, live and swept */
3875 (void)fprintf(osStderr, "%6.6s-+-", line);
3876 (void)fprintf(osStderr, "%6.6s-+-", line);
3877 (void)fprintf(osStderr, "%6.6s-+", line);
3878 }
3879 else
3880 {
3881 /* Field name */
3882 (void)fprintf(osStderr, " ");
3883 (void)fprintf(osStderr, "%10.10s +-", blank);
3884
3885
3886 /* Object counts: total, live and swept */
3887 (void)fprintf(osStderr, "%33.33s-+-", line);
3888
3889
3890 /* Object sizes: total, live and swept */
3891 (void)fprintf(osStderr, "%23.23s--+", line);
3892 }
3893
3894
3895 /* Finish the line */
3896 (void)fprintf(osStderr, "\n");
3897}
3898
3899
3900localstatic void
3901stoShowCensusTitle(void)
3902{
3903 /* Start the title */
3904 stoShowCensusBar(int0((int) 0));
3905
3906
3907 /* Field name */
3908 (void)fprintf(osStderr, " ");
3909 (void)fprintf(osStderr, "%-10.10s | ", "");
3910
3911
3912 /* Object counts: total, live and swept */
3913 (void)fprintf(osStderr, "%-32.32s | ", " Number of objects");
3914
3915
3916 /* Object sizes: total, live and swept */
3917 (void)fprintf(osStderr, "%-23.23s |", " Memory used (K)");
3918
3919
3920 /* Finish the line */
3921 (void)fprintf(osStderr, "\n");
3922
3923
3924 /* Start the title */
3925 stoShowCensusBar(1);
3926
3927
3928 /* Field name */
3929 (void)fprintf(osStderr, "| ");
3930 (void)fprintf(osStderr, "%-10.10s | ", " Type");
3931
3932
3933 /* Object counts: total, live and swept */
3934 (void)fprintf(osStderr, "%-9.9s | ", " Total");
3935 (void)fprintf(osStderr, "%-9.9s | ", " Live");
3936 (void)fprintf(osStderr, "%-9.9s | ", " Swept");
3937
3938
3939 /* Object sizes: total, live and swept */
3940 (void)fprintf(osStderr, "%-6.6s | ", " Total");
3941 (void)fprintf(osStderr, "%-6.6s | ", " Live");
3942 (void)fprintf(osStderr, "%-6.6s |", " Swept");
3943
3944
3945 /* Finish the line */
3946 (void)fprintf(osStderr, "\n");
3947 stoShowCensusBar(1);
3948}
3949
3950
3951localstatic void
3952stoShowCensus(AInt width)
3953{
3954 Length i;
3955 ULong tmem = 0L;
3956 ULong lmem = 0L;
3957
3958
3959 /* Compute the total store usages */
3960 for (i = 0;(i < STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0])) - 1); i++)
3961 {
3962 tmem += censusMemBefore[i];
3963 lmem += censusMemAfter[i];
3964 }
3965
3966
3967 /* Shove it in the total slot */
3968 censusMemBefore[STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0])) - 1] = tmem;
3969 censusMemAfter[STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0])) - 1] = lmem;
3970
3971
3972 /* Display a nice title bar */
3973 stoShowCensusTitle();
3974
3975
3976 /* Now show the bars of the histogram with titles */
3977 for (i = 0;i < STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0])); i++)
3978 {
3979 ULong tsize = censusMemBefore[i] / 1024;
3980 ULong lsize = censusMemAfter[i] / 1024;
3981 ULong tobjs = censusBefore[i];
3982 ULong lobjs = censusAfter[i];
3983 int last = (i == (STO_CENSUS_LIMIT(sizeof(censusName)/sizeof(censusName[0])) - 1));
3984
3985
3986 /* The last line shows the totals */
3987 if (last) stoShowCensusBar(1);
3988
3989
3990 /* Field name */
3991 (void)fprintf(osStderr, "| ");
3992 (void)fprintf(osStderr, "%10.10s | ", censusName[i]);
3993
3994
3995 /* Object counts: total, live and swept */
3996 (void)fprintf(osStderr, "%9ld | ", tobjs);
3997 (void)fprintf(osStderr, "%9ld | ", lobjs);
3998 (void)fprintf(osStderr, "%9ld | ", tobjs - lobjs);
3999
4000
4001 /* Object sizes: total, live and swept */
4002 (void)fprintf(osStderr, "%6ld | ", tsize);
4003 (void)fprintf(osStderr, "%6ld | ", lsize);
4004 (void)fprintf(osStderr, "%6ld |", tsize - lsize);
4005
4006
4007 /* Finish the row */
4008 (void)fprintf(osStderr, "\n");
4009 }
4010
4011
4012 /* Finish with the bottom line */
4013 stoShowCensusBar(1);
4014 (void)fprintf(osStderr, "\n\n");
4015}
4016
4017
4018void
4019stoShowDetail(int stoDetail)
4020{
4021 struct osMemMap **mm;
4022 Length i;
4023 ULong npgBusy=0, npgFree=0, npgStoMan=0, npgForeign=0, npgOther=0;
4024 ULong pgOHead, pcOHead, avPc;
4025 ULong frN[FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0]))];
4026 String sep = stoCensusDivider;
4027
4028
4029 /*
4030 * If we are showing some output other than the census
4031 * then we provide a simple separator to help the user.
4032 */
4033 if (stoDetail & ~STO_SHOW_CENSUS0x00000400)
4034 (void)fprintf(osStderr, "+%76.76s\n", sep);
4035
4036
4037 /* Classify and count the number of pages in the store */
4038 for (i = 0; i < pgMapSize; i++)
4039 {
4040 switch (pgMap[i])
4041 {
4042 case PgBusyFirst: /* Fall through */
4043 case PgBusyFollow: npgBusy++; break;
4044 case PgFree: npgFree++; break;
4045 case PgPgMap: /* Fall through */
4046 case PgBTree: /* Fall through */
4047 case PgDLL: npgStoMan++; break;
4048 case PgForeign: npgForeign++; break;
4049 default: npgOther++; break;
4050 }
4051 }
4052
4053
4054 /* Show the number of different pages in the store */
4055 if (stoDetail & STO_SHOW_PAGES0x00000001)
4056 {
4057 fprintf(osStderr, "| %-11s %ld: %ld busy, %ld free, ",
4058 "Pages:", (long) pgMapSize, (long) npgBusy, (long) npgFree);
4059 fprintf(osStderr, "%ld overhead, %ld foreign, %ld other\n",
4060 npgStoMan, npgForeign, npgOther);
4061 }
4062
4063
4064 /* Show how much store has been allocated/freed/owned */
4065 if (stoDetail & STO_SHOW_MEMORY0x00000002)
4066 {
4067 fprintf(osStderr, "| %-11s %ld alloc, %ld freed, %ld owned, ",
4068 "KBytes:",
4069 (stoBytesAlloc + 512)/1024,
4070 (stoBytesFree + 512)/1024,
4071 (stoBytesOwn + 512)/1024);
4072 fprintf(osStderr, "%ld blacklisted",
4073 (stoBytesBlack + 512)/1024);
4074 fprintf(osStderr, "\n");
4075 }
4076
4077
4078 /* Compute the overheads imposed by the storage manager */
4079 pgOHead = pgMapSize * SectionHeadSize(sizeof(Section) + (0) * sizeof(QmInfo) - 10 * sizeof(QmInfo)
)
4080 + npgStoMan * PgSize(1L<<12);
4081 pcOHead = 0;
4082 for (i = 0; i < pgMapSize; i++)
4083 if (pgMap[i] == PgBusyFirst) {
4084 Section *sect = sectAt(i)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((i) *
(1L<<12))))))
;
4085 pcOHead += sect->qmCount * sizeof(QmInfo);
4086 }
4087
4088
4089 /* Show how much overhead the storage manager is using */
4090 if (stoDetail & STO_SHOW_OVERHEAD0x00000004)
4091 {
4092 fprintf(osStderr, "| %-11s (%ld pg bytes + %ld pc bytes)/",
4093 "Overhead:", pgOHead, pcOHead);
4094 fprintf(osStderr, "%ld owned => %d%%\n",
4095 stoBytesOwn,
4096 (int)((100*pgOHead + 100*pcOHead)/stoBytesOwn));
4097 }
4098
4099
4100 /* How much storage is being held in reserve? */
4101 avPc = 0;
4102 for (i = 0; i < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); i++) {
4103 FxMem *pc;
4104 Length sz = fixedSize[i];
4105 frN[i] = 0;
4106 for (pc = fixedPieces[i]; pc; pc = pc->next) {
4107 frN[i] ++;
4108 avPc += sz;
4109 }
4110 }
4111
4112
4113 /* Show the amount of store in reserve */
4114 if (stoDetail & STO_SHOW_RESERVED0x00000008)
4115 {
4116 fprintf(osStderr, "| %-11s %ld in raw pgs, ",
4117 "Reserve:", npgFree * PgSize(1L<<12));
4118 fprintf(osStderr, "%ld in free lists (equiv %ld pgs)\n",
4119 avPc, QUO_ROUND_UP(avPc, PgSize)((avPc) % ((1L<<12)) ? (avPc)/((1L<<12)) + 1 : (avPc
)/((1L<<12)))
);
4120 }
4121
4122
4123 /* Show the details of fixed-size pages */
4124 if (stoDetail & STO_SHOW_FIXED0x00000010)
4125 {
4126 int wrote = 0;
4127
4128 fprintf(osStderr, "| %-11s %d\n", "FixedSizes:", (int) FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])));
4129 fprintf(osStderr, "| Fixed-size free pieces:\n");
4130 wrote = fprintf(osStderr, "| ");
4131 for (i = 0; i < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); i++)
4132 {
4133 if (frN[i])
4134 {
4135 wrote += fprintf(osStderr, " %ldx%d",
4136 frN[i], (int) fixedSize[i]);
4137 if (wrote > 70)
4138 {
4139 /* Wrap long lines */
4140 (void)fprintf(osStderr, "\n");
4141 wrote = fprintf(osStderr, "| ");
4142 }
4143 }
4144 }
4145 fprintf(osStderr, "\n");
4146 }
4147
4148
4149 /* Show the usage counts for fixed-size pages */
4150 if (stoDetail & STO_SHOW_USAGE0x00000800)
4151 {
4152 ULong tot;
4153 ULong spareMixedBytes, availMixedBytes, totalMixedBytes;
4154 ULong nMixedFreeK, nMixedBusyK, nMixedSpareK;
4155 double gceff, gcnum, gcden;
4156 double headroom;
4157
4158
4159 /* Total number of pages under our control */
4160 tot = npgBusy + npgFree + npgStoMan + npgOther;
4161
4162
4163 /* How much headroom do we have? */
4164 if (tot > 0)
4165 headroom = (double)npgFree/(double)tot;
4166 else
4167 headroom = 1.0;
4168
4169
4170 /* How much headroom do we need? */
4171 gcnum = (double)GcEffectiveFactorNum;
4172 gcden = (double)GcEffectiveFactorDen;
4173 gceff = gcnum / gcden;
4174
4175
4176 /* Neatly tabulate the results */
4177 (void)fprintf(osStderr, "| %-11s %.1f%% (%s = %.1f%%, %s)\n",
4178 "Headroom:", 100.0*headroom, "gcEff", 100.0*gceff,
4179 (headroom < gceff) ? "too low" : "okay");
4180 (void)fprintf(osStderr, "| Fixed-size page usage:\n");
4181 (void)fprintf(osStderr, "| ");
4182 (void)fprintf(osStderr, "%4s: ", "Size");
4183 (void)fprintf(osStderr, "%8s ", "Free");
4184 (void)fprintf(osStderr, "%8s ", "Busy");
4185 (void)fprintf(osStderr, "%8s ", "Spare");
4186 (void)fprintf(osStderr, "%7s", "HeadRm");
4187 (void)fprintf(osStderr, "\n");
4188
4189 for (i = 0; i < FixedSizeCount(sizeof(fixedSize)/sizeof(fixedSize[0])); i++)
4190 {
4191 ULong fxFree = freeFixedPieces[i];
4192 ULong fxBusy = busyFixedPieces[i];
4193 ULong fxPer = sectQmCount(1L, fixedSize[i]);
4194 ULong fxSpare = fxPer*npgFree;
4195 ULong fxAvail = fxFree + fxSpare;
4196 ULong fxTotal = fxBusy + fxAvail;
4197 double headrm;
4198
4199 if (fxTotal > 0)
4200 headrm = (double)fxAvail/(double)fxTotal;
4201 else
4202 headrm = 1.0;
4203
4204 (void)fprintf(osStderr, "| ");
4205 (void)fprintf(osStderr, "%4ld: ", (long) fixedSize[i]);
4206 (void)fprintf(osStderr, "%8lu ", fxFree);
4207 (void)fprintf(osStderr, "%8lu ", fxBusy);
4208 (void)fprintf(osStderr, "%8lu ", fxSpare);
4209 (void)fprintf(osStderr, "%6.1f%%", 100.0*headrm);
4210 (void)fprintf(osStderr, "\n");
4211 }
4212
4213
4214 /* Compute mixed-size usage */
4215 spareMixedBytes = npgFree*(PgSize(1L<<12) - MxMemHeadSize(sizeof(MxMem) - sizeof(MxMemU)));
4216 availMixedBytes = freeMixedBytes + spareMixedBytes;
4217 totalMixedBytes = availMixedBytes + busyMixedBytes;
4218
4219
4220 /* Convert bytes into K for readability */
4221 nMixedFreeK = freeMixedBytes / 1024;
4222 nMixedBusyK = busyMixedBytes / 1024;
4223 nMixedSpareK = spareMixedBytes / 1024;
4224
4225
4226 /* Compute headroom */
4227 if (totalMixedBytes > 0)
4228 {
4229 double num = (double)availMixedBytes;
4230 double den = (double)totalMixedBytes;
4231 headroom = num/den;
4232 }
4233 else
4234 headroom = 1.0;
4235
4236
4237 /* Show mixed-size usage. */
4238 (void)fprintf(osStderr, "| Mixed-size page usage: ");
4239 (void)fprintf(osStderr, "%luK free, ", nMixedFreeK);
4240 (void)fprintf(osStderr, "%luK busy, ", nMixedBusyK);
4241 (void)fprintf(osStderr, "%luK spare, ", nMixedSpareK);
4242 (void)fprintf(osStderr, "%.1f%% headroom\n", 100.0*headroom);
4243 }
4244
4245
4246 /* Show details about mixed-size pages */
4247 if (stoDetail & STO_SHOW_MIXED0x00000020)
4248 {
4249 fprintf(osStderr, "| Mixed-size free pieces:\n| ");
4250 btreeNMap((BTreeEltFun) mxmemShow, mixedPieces);
4251 putc('\n',osStderr);
4252 }
4253
4254
4255 /* Display the whole store at page resolution */
4256 if (stoDetail & STO_SHOW_PAGEMAP0x00000040)
4257 {
4258 fprintf(osStderr, "| %-11s [%ld pages]:",
4259 "Page map:", (long) pgMapSize);
4260 for (i = 0; i < pgMapSize; i++)
4261 {
4262 if (i % 64 == 0)
4263 {
4264 fprintf(osStderr, "\n|%5ld-%5ldK ",
4265 (i) * PgSize(1L<<12)/1024,
4266 (i+63) * PgSize(1L<<12)/1024);
4267 }
4268
4269 switch (pgMap[i])
4270 {
4271 case PgFree: putc('+',osStderr); break;
4272 case PgBusyFirst:
4273 {
4274 Section *sect = sectAt(i)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((i) *
(1L<<12))))))
;
4275
4276 if (!sect->isFixed)
4277 putc('m',osStderr);
4278 else if (sect->qmSizeIndex < 10)
4279 putc('0' + sect->qmSizeIndex,osStderr);
4280 else if (sect->qmSizeIndex < 16)
4281 putc('a' + sect->qmSizeIndex - 10,osStderr);
4282 else
4283 putc('>',osStderr);
4284 break;
4285 }
4286 case PgBusyFollow: putc('.',osStderr); break;
4287 case PgForeign: putc('/',osStderr); break;
4288 case PgPgMap: putc('G',osStderr); break;
4289 case PgBTree: putc('T',osStderr); break;
4290 case PgDLL: putc('L',osStderr); break;
4291 default: putc('?',osStderr); break;
4292 }
4293 }
4294
4295 putc('\n',osStderr);
4296 }
4297
4298
4299 /* Key to the symbols used in the pagemap display */
4300 if (stoDetail & STO_SHOW_PAGEKEY0x00000080)
4301 {
4302 fprintf(osStderr, "| %-11s\n", "Symbols:");
4303 fprintf(osStderr, "| %8s : %s\n",
4304 "+", "free page");
4305 fprintf(osStderr, "| %8s : %s\n",
4306 "[0-9]", "fixed-size pages by piece size");
4307 fprintf(osStderr, "| %8s : %s\n",
4308 "[a-f]", "fixed-size pages by piece size");
4309 fprintf(osStderr, "| %8s : %s\n",
4310 ">", "fixed-size pages with large pieces");
4311 fprintf(osStderr, "| %8s : %s\n",
4312 "m", "mixed-size page");
4313 fprintf(osStderr, "| %8s : %s\n",
4314 ".", "mixed-size pages for rest of large object");
4315 fprintf(osStderr, "| %8s : %s\n",
4316 "/", "foreign (not owned by us)");
4317 fprintf(osStderr, "| %8s : %s\n",
4318 "G", "holds the page map");
4319 fprintf(osStderr, "| %8s : %s\n",
4320 "T", "btree storage (mixed-page housekeeping)");
4321 fprintf(osStderr, "| %8s : %s\n",
4322 "L", "dll storage (mixed-page housekeeping)");
4323 fprintf(osStderr, "| %8s : %s\n",
4324 "?", "unknown page type");
4325 }
4326
4327
4328 /* Display the memory map of this process */
4329 if (stoDetail & STO_SHOW_MEMMAP0x00000100)
4330 {
4331 fprintf(osStderr, "| Heap....... [%p..%p) %ldK\n",
4332 (void *) heapStart, (void *) heapEnd,
4333 ptrDiff(heapEnd, heapStart)((long)((char *)(heapEnd) - (char *)(heapStart)))/1024);
4334
4335 for (mm=osMemMap(OSMEM_DDATA0x02); (*mm)->use != OSMEM_END0x00; mm++)
4336 fprintf(osStderr, "| Dyn memory: [%p..%p) %ldK\n",
4337 (*mm)->lo, (*mm)->hi,
4338 ptrDiff((*mm)->hi, (*mm)->lo)((long)((char *)((*mm)->hi) - (char *)((*mm)->lo)))/1024);
4339 for (mm=osMemMap(OSMEM_IDATA0x01); (*mm)->use != OSMEM_END0x00; mm++)
4340 fprintf(osStderr, "| Init data: [%p..%p) %ldK\n",
4341 (*mm)->lo, (*mm)->hi,
4342 ptrDiff((*mm)->hi, (*mm)->lo)((long)((char *)((*mm)->hi) - (char *)((*mm)->lo)))/1024);
4343 for (mm=osMemMap(OSMEM_STACK0x04); (*mm)->use != OSMEM_END0x00; mm++)
4344 fprintf(osStderr, "| Stack: [%p..%p) %ldbytes\n",
4345 (*mm)->lo, (*mm)->hi,
4346 ptrDiff((*mm)->hi, (*mm)->lo)((long)((char *)((*mm)->hi) - (char *)((*mm)->lo))));
4347 }
4348
4349
4350 /*
4351 * If we are showing some output other than the census
4352 * then we provide another separator to help the user.
4353 */
4354 if (stoDetail & ~STO_SHOW_CENSUS0x00000400)
4355 (void)fprintf(osStderr, "+%76.76s\n\n\n", sep);
4356
4357
4358 /* Census of object types */
4359 if (stoDetail & STO_SHOW_CENSUS0x00000400)
4360 stoShowCensus(20);
4361}
4362
4363
4364/*
4365 * Test consistency of storage management structures.
4366 */
4367void
4368stoAudit(void)
4369{
4370#ifndef NDEBUG
4371 stoAuditAll();
4372#endif
4373}
4374
4375
4376/*
4377 * Control storage manager behaviour.
4378 */
4379
4380int
4381stoCtl(int cmd, ...)
4382{
4383 va_list argp;
4384 int rc;
4385
4386 va_start(argp, cmd)__builtin_va_start(argp, cmd);
4387 rc = 0;
4388
4389 switch (cmd) {
4390 case StoCtl_GcLevel1:
4391 if (gcLevel != StoCtl_GcLevel_Never0)
4392 gcLevel = va_arg(argp, int)__builtin_va_arg(argp, int);
4393 if (gcLevel == StoCtl_GcLevel_Never0)
4394 stoMustTag = false((int) 0);
4395 break;
4396 case StoCtl_GcFile2:
4397 gcTraceFile = va_arg(argp, FILE *)__builtin_va_arg(argp, FILE *);
4398 break;
4399 case StoCtl_Wash3:
4400 stoMustWash = va_arg(argp, Bool)__builtin_va_arg(argp, Bool);
4401 break;
4402 default:
4403 rc = -1;
4404 }
4405
4406 va_end(argp)__builtin_va_end(argp);
4407 return rc;
4408}
4409
4410
4411void
4412stoRegister(StoInfo info)
4413{
4414 if (info->code >= OB_MAX256)
4415 bug("Too many object types");
4416
4417 stoObRegistered[info->code] = 1;
4418 stoObNoInternalPtrs[info->code] = !info->hasPtrs;
4419}
4420
4421
4422/*
4423 * Classify the specified pointer. This code is very shaky and needs
4424 * to be checked more thoroughly. Do pointer comparisons work here?
4425 */
4426int
4427stoWritablePointer(Pointer p)
4428{
4429#if defined(OS_WIN32)
4430 /* Win32 gives us a function specifically for this task */
4431 return IsBadWritePtr(p, sizeof(long)) ? POINTER_IS_INVALID(-1) : POINTER_IS_VALID( 1);
4432#else
4433 struct osMemMap **mm;
4434 int n;
4435
4436 /*
4437 * Check if the target is in the heap. Unless the
4438 * pointer is invalid then this is the most likely
4439 * place to find the target object. We assume that
4440 * all areas of the heap are writable.
4441 */
4442 if (isInHeap(p)((((Pointer)(heapStart)) <= ((Pointer)((p)))) && (
((Pointer)((p))) < ((Pointer)(heapEnd))))
) return POINTER_IS_VALID( 1);
4443
4444
4445 /* Get the memory mappings */
4446 mm = osMemMap(OSMEM_STACK0x04 | OSMEM_IDATA0x01 | OSMEM_DDATA0x02);
4447
4448
4449 /* Can we classify this pointer at all? */
4450 if (!mm) return POINTER_IS_UNKNOWN( 0);
4451
4452
4453 /*
4454 * Scan the whole memory map to find out where the
4455 * target of this pointer lies. We assume that the
4456 * stack, static and dynamic areas are all writable.
4457 */
4458 for (n = 0 ; (*mm)->use != OSMEM_END0x00; mm++) {
4459 switch ((*mm)->use) {
4460 case OSMEM_STACK0x04:
4461 /* Is the object in this segment? */
4462 if (p < (Pointer)((*mm)->lo)) break;
4463 if (p > (Pointer)((*mm)->hi)) break;
4464 return POINTER_IS_VALID( 1);
4465 case OSMEM_IDATA0x01:
4466 /* Is the object in this segment? */
4467 if (p < (Pointer)((*mm)->lo)) break;
4468 if (p > (Pointer)((*mm)->hi)) break;
4469 return POINTER_IS_VALID( 1);
4470 case OSMEM_DDATA0x02:
4471 /* Is the object in this segment? */
4472 if (p < (Pointer)((*mm)->lo)) break;
4473 if (p > (Pointer)((*mm)->hi)) break;
4474 return POINTER_IS_VALID( 1);
4475 break;
4476 default:
4477 assert(false)do { if (!(((int) 0))) _do_assert(("false"),"store.c",4477); }
while (0)
; /*mem type is silly*/
4478 break;
4479 }
4480 }
4481
4482
4483 /* Can't find that pointer - assume invalid */
4484 return POINTER_IS_INVALID(-1);
4485#endif
4486}
4487
4488
4489/*
4490 * This is a somewhat dodgy function and is really only
4491 * here for experimentation. It allows an Aldor domain
4492 * to provide a call-back for marking itself during GC.
4493 * Unless values from the domain occupy a lot of storage
4494 * space with very few pointers it is extremely unlikely
4495 * that this call back will be efficient. Not only that
4496 * but the user must be extremely careful not to miss a
4497 * potential pointer.
4498 */
4499void
4500stoSetAldorTracer(int code, Pointer clos)
4501{
4502 if (code >= OB_MAX256)
4503 bug("Too many object types");
4504
4505 if (!stoObRegistered[code])
4506 bug("Object type not registered");
4507
4508 stoObAldorTracer[code] = (StoFiClos)clos;
4509}
4510
4511
4512/*
4513 * This function performs exactly the same job as
4514 * stoRegisterAldorTracer but is designed to be
4515 * called from C.
4516 */
4517void
4518stoSetTracer(int code, StoTraceFun fun)
4519{
4520 if (code >= OB_MAX256)
4521 bug("Too many object types");
4522
4523 if (!stoObRegistered[code])
4524 bug("Object type not registered");
4525
4526 stoObCTracer[code] = fun;
4527}
4528
4529
4530/*
4531 * Given a pointer, try to mark the object pointed to.
4532 * IMPORTANT: the code here must perform exactly the same
4533 * checks as the code in stoGcMarkRange(). We
4534 * don't do blacklisting though.
4535 */
4536int
4537stoMarkObject(Pointer p)
4538{
4539 Pointer *plo, *phi;
4540 static int pgno, qmno;
4541 static PgInfo pgtag;
4542 static QmInfo qmtag;
4543 static Section *sect;
4544 int n = 0;
4545 int oldStoMarkArea = stoMarkArea;
4546
4547
4548 /* Verify pointer is into heap. */
4549 if (!isInHeap(p)((((Pointer)(heapStart)) <= ((Pointer)((p)))) && (
((Pointer)((p))) < ((Pointer)(heapEnd))))
) return 0;
4550 if (DEBUG(sto)((int) 0)) {stoMarkCount[stoMarkArea][PTR_INTO_HEAP0]++;}
4551
4552
4553 /* Verify pointer is to busy page. */
4554 pgno = pgNo(p)(((long)((char *)((char *)(p)) - (char *)(heapStart))) >>
12)
;
4555 pgtag = pgMap[pgno];
4556 if (pgtag != PgBusyFirst && pgtag != PgBusyFollow) return 0;
4557 if (DEBUG(sto)((int) 0)) {
4558 stoMarkCount[stoMarkArea][PTR_INTO_HEAP0]--;
4559 stoMarkCount[stoMarkArea][PTR_INTO_BUSY1]++;
4560 }
4561
4562
4563 /* Grab section header. */
4564 while (pgtag == PgBusyFollow) pgtag = pgMap[--pgno];
4565 sect = sectAt(pgno)((Section *) ((Page *)((Pointer)((char *)(heapStart) + ((pgno
) * (1L<<12))))))
;
4566
4567
4568 /* Verify pointed-to quantum. */
4569 if (ptrLT(p, sect->data)(((Pointer)(p)) < ((Pointer)(sect->data)))) return 0;
4570 if (DEBUG(sto)((int) 0)) {
4571 stoMarkCount[stoMarkArea][PTR_INTO_BUSY1]--;
4572 stoMarkCount[stoMarkArea][PTR_INTO_DATA2]++;
4573 }
4574
4575
4576#ifdef STO_DIVISION_BY_LOOKUP1
4577 /* If interior of object, point to beginning. */
4578 /* Use shift operations where possible */
4579 if (sect->isFixed)
4580 {
4581 qmno = (sect->qmLog) ? qmLogNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
)))) >> (sect)->qmLog)
: qmDivNo(p, sect)stoDivTable[(sect)->qmDiv][(((long)((char *)((char*)(p)) -
(char *)((char*)((sect)->data)))))]
; /* TTT */
4582 assert(qmno == qmNo(p, sect))do { if (!(qmno == (((long)((char *)((char*)(p)) - (char *)((
char*)((sect)->data))))/(sect)->qmSize))) _do_assert(("qmno == qmNo(p, sect)"
),"store.c",4582); } while (0)
;
4583 p = (sect->qmLog) ? ptrOff((char *) sect->data, qmno << sect->qmLog)((Pointer)((char *)((char *) sect->data) + (qmno << sect
->qmLog)))
:
4584 ptrOff((char *) sect->data, qmno * sect->qmSize)((Pointer)((char *)((char *) sect->data) + (qmno * sect->
qmSize)))
;
4585 }
4586 else
4587 {
4588 qmno = qmNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))/(sect)->qmSize)
;
4589 p = ptrOff((char *) sect->data, qmno * sect->qmSize)((Pointer)((char *)((char *) sect->data) + (qmno * sect->
qmSize)))
;
4590 }
4591#else
4592 /* If interior of object, point to beginning. */
4593 /* Use shift operations where possible */
4594 if (sect->isFixed && sect->qmLog) {
4595 qmno = qmLogNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
)))) >> (sect)->qmLog)
;
4596 p = ptrOff((char *) sect->data, qmno << sect->qmLog)((Pointer)((char *)((char *) sect->data) + (qmno << sect
->qmLog)))
;
4597 }
4598 else {
4599 qmno = qmNo(p, sect)(((long)((char *)((char*)(p)) - (char *)((char*)((sect)->data
))))/(sect)->qmSize)
;
4600 /* If interior of object, point to beginning. */
4601 p = ptrOff((char *) sect->data, qmno * sect->qmSize)((Pointer)((char *)((char *) sect->data) + (qmno * sect->
qmSize)))
;
4602 }
4603#endif
4604
4605
4606 /* Verify not already marked. */
4607 qmtag = sect->info[qmno];
4608 if (QmInfoMark(qmtag)((qmtag) & 0x20)) return 0;
4609 if (DEBUG(sto)((int) 0)) {
4610 stoMarkCount[stoMarkArea][PTR_INTO_DATA2]--;
4611 stoMarkCount[stoMarkArea][PTR_INTO_NEW3]++;
4612 }
4613
4614
4615 /* Add mark bits quanta in piece. Determine data bounds. */
4616 if (sect->isFixed) {
4617 QmInfoSetMark(sect->info[qmno])((sect->info[qmno]) |= 0x20);
4618 plo = (Pointer *) p;
4619 phi = (Pointer *) ptrOff((char *) plo, sect->qmSize)((Pointer)((char *)((char *) plo) + (sect->qmSize)));
4620 }
4621 else {
4622 MxMem *pc;
4623 int sz, nq, qi;
4624
4625 while (QmInfoKind(qmtag)((qmtag) & 0xC0) == QmFollow0x00)
4626 qmtag = sect->info[--qmno];
4627
4628 pc = (MxMem *)ptrOff((char *)sect->data,((Pointer)((char *)((char *)sect->data) + (qmno*sect->qmSize
)))
4629 qmno*sect->qmSize)((Pointer)((char *)((char *)sect->data) + (qmno*sect->qmSize
)))
;
4630 sz = pc->nbytesThis;
4631 nq = sz/sect->qmSize;
4632
4633 for (qi = qmno; qi < qmno + nq; qi++)
4634 QmInfoSetMark(sect->info[qi])((sect->info[qi]) |= 0x20);
4635
4636 sz -= MxMemHeadSize(sizeof(MxMem) - sizeof(MxMemU));
4637 plo = (Pointer *) (&pc->body.busy.data);
4638 phi = (Pointer *) ptrOff((char *) plo, sz)((Pointer)((char *)((char *) plo) + (sz)));
4639 }
4640
4641
4642 /* Verify piece is in use. */
4643 if (QmInfoKind(qmtag)((qmtag) & 0xC0) == QmFreeFirst0x40) {
4644 if (DEBUG(sto)((int) 0)) {
4645 stoMarkCount[stoMarkArea][PTR_INTO_NEW3]--;
4646 stoMarkCount[stoMarkArea][PTR_INTO_FREE4]++;
4647 }
4648 stoGcMarkedFree++;
4649 return 0;
4650 }
4651
4652
4653 /* Check that this contains pointers
4654 * This test should use a bit in the tag, not the code.
4655 */
4656 if (QmIsPtrFree(qmtag)(stoObNoInternalPtrs[((qmtag) & 0x1F)])) return 0;
4657
4658
4659 /* We have marked this piece */
4660 n++;
4661
4662
4663#if STO_USER_CAN_TRACE
4664 /*
4665 * Has the creator of this object requested
4666 * permission to trace it?
4667 */
4668 if (QmAldorTraced(qmtag)(stoObAldorTracer[((qmtag) & 0x1F)]))
4669 {
4670 /* Invoke the creator's tracer */
4671 n += (int)stoFiCCall2(int, QmAldorTraced(qmtag), plo, phi)((*((int (*)())((stoObAldorTracer[((qmtag) & 0x1F)]))->
prog->fun))(((stoObAldorTracer[((qmtag) & 0x1F)]))->
env,plo,phi))
;
4672 return n;
4673 }
4674 if (QmCTraced(qmtag)(stoObCTracer[((qmtag) & 0x1F)]))
4675 {
4676 /* Invoke the creator's tracer */
4677 n += (QmCTraced(qmtag)(stoObCTracer[((qmtag) & 0x1F)]))(plo, phi);
4678 return n;
4679 }
4680#endif
4681
4682
4683 /* Pointer classification */
4684 if (DEBUG(sto)((int) 0)) {
4685 if (stoMarkArea < MEM_MAX_TYPE(3 +1)) {
4686 /* Now marking within heap */
4687 oldStoMarkArea = stoMarkArea;
4688 stoMarkArea += MEM_MAX_TYPE(3 +1);
4689 }
4690 }
4691
4692
4693 /* Count the number of children marked */
4694 n += stoGcMarkRange(plo, phi, (int) 0);
4695
4696
4697 /* Pointer classification */
4698 if (DEBUG(sto)((int) 0)) {stoMarkArea = oldStoMarkArea;}
4699
4700 return n;
4701}
4702
4703
4704/*
4705 * Exception handling code.
4706 */
4707
4708localstatic MostAlignedType *
4709stoDefaultError(int errnum)
4710{
4711 String msg;
4712
4713 switch (errnum) {
4714 case StoErr_OutOfMemory1: return 0;
4715 case StoErr_UsedNonalloc2: msg = "using non-allocated space"; break;
4716 case StoErr_CantBuild3: msg = "can't build internal structure"; break;
4717 case StoErr_FreeBad4: msg = "attempt to free unknown space"; break;
4718 default: msg = "unexpected"; break;
4719 }
4720 fprintf(osStderr, "Storage allocation error (%s).\n", msg);
4721 exitFailure();
4722 NotReached(return 0){(void)bug("Not supposed to reach line %d in file: %s\n",4722
, "store.c");}
;
4723}
4724
4725StoErrorFun
4726stoSetHandler(StoErrorFun f)
4727{
4728 StoErrorFun oldf = stoError;
4729 stoError = f ? f : (StoErrorFun) stoDefaultError;
4730 return oldf;
4731}
4732
4733/*===========================================================================*/
4734
4735#else /* not STO_USE_BTREE */
4736
4737/*
4738 * Alternate storage management schemes are based on three functions:
4739 * stoMore, stoLess, and stoMoreOrLess.
4740 */
4741
4742#ifdef STO_USE_MALLOC
4743extern Pointer malloc (Length);
4744extern void free (Pointer);
4745
4746# define stoMore(sz) ((Pointer) malloc(sz))
4747# define stoMoreOrLess(p,osz,nsz) ((Pointer) realloc(p, nsz))
4748# define stoLess(p) free(p)
4749#endif
4750
4751#ifdef STO_USE_BOEHM
4752/* uncomment this */
4753#include <gc/gc.h>
4754
4755# define stoMore(sz) ((Pointer) GC_malloc(sz))
4756# define stoMoreOrLess(p,osz,nsz) ((Pointer) GC_realloc(p,nsz))
4757# define stoLess(p) GC_free(p)
4758#endif
4759
4760
4761
4762#ifdef STO_USE_ONCE
4763localstatic Pointer
4764stoMore(Length sz)
4765{
4766 static long stoC = 0;
4767 static char * stoV;
4768 Pointer r;
4769
4770 sz = ROUND_UP(sz, alignof(MostAlignedType))((sz) % ((sizeof(struct {char a; MostAlignedType b;}) - sizeof
(MostAlignedType))) ? (sz) + ((sizeof(struct {char a; MostAlignedType
b;}) - sizeof(MostAlignedType))) - (sz) % ((sizeof(struct {char
a; MostAlignedType b;}) - sizeof(MostAlignedType))) : (sz))
;
4771 if (sz > stoC) {
4772 ULong L = MAX(sz, ((long) 1) << 15)((sz) > (((long) 1) << 15) ? (sz) : (((long) 1) <<
15))
;
4773 Pointer P = osAlloc(&L);
4774 if (!P) return 0;
4775 stoC = L; stoV = (char *) P;
4776 }
4777 r = (Pointer) stoV; stoC -= sz; stoV += sz;
4778 return r;
4779}
4780localstatic Pointer
4781stoMoreOrLess(Pointer p, Length osz, Length nsz)
4782{
4783 Pointer r;
4784
4785 if (osz >= nsz) return p;
4786 r = stoMore(nsz);
4787 if (r) memmove(r, p, osz);
4788 return r;
4789}
4790# define stoLess(p) ((void) (p))
4791#endif
4792
4793/*
4794 * The remainder is common to all alternate implementations.
4795 */
4796
4797
4798static Bool stoMustTag = false((int) 0);
4799
4800ULong stoBytesOwn = 0; /* Total owned by allocator. */
4801ULong stoBytesAlloc = 0; /* Total ever allocated. */
4802ULong stoBytesFree = 0; /* Total ever freed. */
4803ULong stoBytesGc = 0; /* Total ever garbage collected. */
4804ULong stoPiecesGc[STO_CODE_LIMIT32]; /* # this time, by code. */
4805
4806#ifdef STO_TALLY
4807# define stoTally(expr)(expr) (expr)
4808#else
4809# define stoTally(expr)(expr) Nothing
4810#endif
4811
4812struct stoHdr {
4813 char code;
4814 unsigned short magic;
4815 Length size;
4816 MostAlignedType data;
4817};
4818
4819# define STO_MAGIC 0xFE20 /* Any unlikely 2 byte sequence. */
4820
4821# define sizeofHdr (sizeof(struct stoHdr) - sizeof(MostAlignedType))
4822# define stoPtrToHdr(p) ((struct stoHdr *)((char *) p - sizeofHdr))
4823# define stoHdrToPtr(h) ((Pointer) &((h)->data))
4824
4825localstatic MostAlignedType * stoDefaultError (int errnum);
4826static StoErrorFun stoError = stoDefaultError;
4827
4828MostAlignedType *
4829stoAlloc(unsigned code, ULong size)
4830{
4831 Length sz = size + sizeofHdr;
4832 Pointer p;
4833 struct stoHdr *q;
4834
4835 if (sz == 0) sz = 1; /* For malloc. */
4836
4837 p = stoMore(sz);
4838 if (!p) return (*stoError)(StoErr_OutOfMemory1);
4839
4840 q = (struct stoHdr *) p;
4841 p = stoHdrToPtr(q);
4842
4843 if (stoMustTag) {
4844 q->code = code;
4845 q->magic = STO_MAGIC;
4846 }
4847 stoTally(stoBytesAlloc += size)(stoBytesAlloc += size);
4848 q->size = sz;
4849 return (MostAlignedType *) p;
4850}
4851
4852void
4853stoFree(Pointer p)
4854{
4855 if (p==NULL((void*)0))
4856 return;
4857 stoTally(stoBytesFree += stoPtrToHdr(p)->size)(stoBytesFree += stoPtrToHdr(p)->size);
4858 stoLess(stoPtrToHdr(p));
4859}
4860
4861ULong
4862stoSize(Pointer p)
4863{
4864 return stoPtrToHdr(p)->size - sizeofHdr;
4865}
4866
4867unsigned
4868stoCode(Pointer p)
4869{
4870 int c = 0;
4871 if (stoMustTag)
4872 c = stoPtrToHdr(p)->code;
4873 return c;
4874}
4875
4876MostAlignedType *
4877stoResize(Pointer p0, ULong size)
4878{
4879 struct stoHdr *q0, *q1;
4880 Length nsz = size + sizeofHdr;
4881
4882 q0 = stoPtrToHdr(p0);
4883 q1 = (struct stoHdr *) stoMoreOrLess(q0, q0->size, nsz);
4884 if (!q1) return (*stoError)(StoErr_OutOfMemory1);
4885 q1->size = nsz;
4886
4887 return (MostAlignedType *) stoHdrToPtr(q1);
4888}
4889
4890MostAlignedType *
4891stoRecode(Pointer p, unsigned code)
4892{
4893 if (stoMustTag) stoPtrToHdr(p)->code = code;
4894 return (MostAlignedType *) p;
4895}
4896
4897Bool
4898stoIsPointer(Pointer p)
4899{
4900 int isMagic = 1;
4901 if (!p) return 0;
4902 if (stoMustTag) isMagic = stoPtrToHdr(p)->magic == STO_MAGIC;
4903 return isMagic;
4904}
4905
4906void stoGc (void) { }
4907void stoTune (void) { }
4908int stoShowArgs (char *detail) { return 0; }
4909void stoShow (void) { }
4910void stoShowDetail (int x) { }
4911void stoAudit (void) { }
4912void stoRegister (StoInfo info) { }
4913void stoSetAldorTracer (int code, Pointer clos) {}
4914void stoSetTracer (int code, StoTraceFun fun) {}
4915int stoMarkObject (Pointer p) { return 0; }
4916int stoWritablePointer (Pointer p) { return POINTER_IS_UNKNOWN( 0); }
4917int stoCtl (int cmd, ...) { return 0; }
4918
4919static struct tmTimer gcTimer;
4920TmTimer stoGcTimer (void) { return &gcTimer; }
4921
4922
4923/*
4924 * Exception handling code.
4925 */
4926localstatic MostAlignedType *
4927stoDefaultError(int errnum)
4928{
4929 String msg;
4930 switch (errnum) {
4931 case StoErr_OutOfMemory1:
4932 return 0;
4933 case StoErr_UsedNonalloc2:
4934 msg = "using non-allocated space";
4935 break;
4936 case StoErr_CantBuild3:
4937 msg = "can't build internal structure";
4938 break;
4939 default:
4940 msg = "unexpected";
4941 break;
4942 }
4943 fprintf(osStderr, "Storage allocation error (%s).\n", msg);
4944 exitFailure();
4945 NotReached(return 0){(void)bug("Not supposed to reach line %d in file: %s\n",4945
, "store.c");}
;
4946}
4947
4948StoErrorFun
4949stoSetHandler(StoErrorFun f)
4950{
4951 StoErrorFun oldf = stoError;
4952 stoError = f ? f : (StoErrorFun) stoDefaultError;
4953 return oldf;
4954}
4955
4956#endif /* !STO_USE_BTREE */