• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**********************************************************************
2  * File:        memblk.c  (Formerly memblock.c)
3  * Description: Enhanced instrumented memory allocator implemented as a class.
4  * Author:					Ray Smith
5  * Created:					Tue Jan 21 17:13:39 GMT 1992
6  *
7  * (C) Copyright 1992, Hewlett-Packard Ltd.
8  ** Licensed under the Apache License, Version 2.0 (the "License");
9  ** you may not use this file except in compliance with the License.
10  ** You may obtain a copy of the License at
11  ** http://www.apache.org/licenses/LICENSE-2.0
12  ** Unless required by applicable law or agreed to in writing, software
13  ** distributed under the License is distributed on an "AS IS" BASIS,
14  ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  ** See the License for the specific language governing permissions and
16  ** limitations under the License.
17  *
18  **********************************************************************/
19 
20 #include          "mfcpch.h"     //precompiled headers
21 #include          <stdlib.h>
22 #include          <string.h>
23 #include          "stderr.h"
24 #include          "memryerr.h"
25 #include          "hashfn.h"
26 #include          "tprintf.h"
27 #include          "memry.h"
28 #include          "memblk.h"
29 #ifdef __UNIX__
30 #include          <signal.h>
31 #endif
32 
33 class UWREC
34 {
35   public:
36     unsigned cur_frsize;         //frame size
37     unsigned cursp;              //stack
38     unsigned currls;             //pc space
39     unsigned currlo;             //pc offset
40     unsigned curdp;              //data pointer
41     unsigned toprp;              //rp
42     unsigned topmrp;             //mrp
43     unsigned topsr0;             //sr0
44     unsigned topsr4;             //sr4
45     unsigned r3;                 //gr3
46     unsigned cur_r19;            //gr19
47 };
48 
49 MEMUNION *free_block = NULL;     //head of freelist
50 
51 #define EXTERN
52 
53 EXTERN MEM_ALLOCATOR big_mem;
54 EXTERN MEM_ALLOCATOR main_mem;
55                                  //heads of freelists
56 EXTERN MEMUNION *free_structs[MAX_STRUCTS];
57                                  //number issued
58 EXTERN inT32 structs_in_use[MAX_STRUCTS];
59                                  //number issued
60 EXTERN inT32 blocks_in_use[MAX_STRUCTS];
61                                  //head of block lists
62 EXTERN MEMUNION *struct_blocks[MAX_STRUCTS];
63 EXTERN const char *owner_names[MAX_STRUCTS][MAX_CLASSES];
64 EXTERN inT32 owner_counts[MAX_STRUCTS][MAX_CLASSES];
65                                  //no of names
66 EXTERN inT16 name_counts[MAX_STRUCTS];
67 EXTERN inT32 free_struct_blocks; //no of free blocks
68 
69 EXTERN INT_VAR (mem_mallocdepth, 0, "Malloc stack depth to trace");
70 EXTERN INT_VAR (mem_mallocbits, 8, "Log 2 of hash table size");
71 EXTERN INT_VAR (mem_freedepth, 0, "Free stack dpeth to trace");
72 EXTERN INT_VAR (mem_freebits, 8, "Log 2 of hash table size");
73 EXTERN INT_VAR (mem_countbuckets, 16, "No of buckets for histogram");
74 EXTERN INT_VAR (mem_checkfreq, 0, "Calls to alloc_mem between owner counts");
75 
76 /**********************************************************************
77  * MEM_ALLOCATOR::MEM_ALLOCATOR
78  *
79  * Constructor for a memory allocator.
80  **********************************************************************/
81 
82 void
init(void * (* ext_malloc)(inT32),void (* ext_free)(void *),inT32 firstsize,inT32 lastsize,inT32 maxchunk)83 MEM_ALLOCATOR::init (            //initialize
84 void *(*ext_malloc) (inT32),     //external source
85 void (*ext_free) (void *),       //external free
86 inT32 firstsize,                 //size of first block
87 inT32 lastsize,                  //size of last block
88 inT32 maxchunk                   //biggest request
89 ) {
90   blockcount = 0;
91   malloc_serial = 0;
92   topblock = NULL;
93   currblock = NULL;
94   callers = NULL;
95   malloc = ext_malloc;
96   free = ext_free;
97   maxsize = lastsize;
98   biggestblock = maxchunk;
99   totalmem = 0;
100   memsize = firstsize;
101   malloc_div_ratio = 1;
102   malloc_minor_serial = 0;
103   malloc_auto_count = 0;
104   call_bits = 0;
105   entries = 0;
106 }
107 
108 
109 /**********************************************************************
110  * MEM_ALLOCATOR::hash_caller
111  *
112  * Generate a hash code for a caller, setup the tables if necessary.
113  **********************************************************************/
114 
hash_caller(void * addr)115 uinT16 MEM_ALLOCATOR::hash_caller(            //get hash code
116                                   void *addr  //return address
117                                  ) {
118   inT32 index;                   //index to table
119   inT32 initial_hash;            //initial index
120 
121   if (callers == NULL)
122     init_callers();  //setup table
123                                  //get hash code
124   initial_hash = hash (call_bits, &addr, sizeof (addr));
125   if (initial_hash == 0)
126     initial_hash = 1;
127   index = initial_hash;
128   if (callers[index].caller != NULL && callers[index].caller != addr) {
129     do {
130       index++;
131       if (index >= entries)
132         index = 1;
133     }
134     while (callers[index].caller != NULL
135       && callers[index].caller != addr && index != initial_hash);
136     if (index == initial_hash)
137       index = 0;
138   }
139   if (callers[index].caller == NULL) {
140     if (index != 0)
141       callers[index].caller = addr;
142     if (callers[index].free_list == NULL)
143                                  //setup free table
144       callers[index].init_freeers ();
145   }
146   return (uinT16) index;
147 }
148 
149 
150 /**********************************************************************
151  * MALLOC_CALL::count_freeer
152  *
153  * Generate a hash code for a freeer, setup the tables if necessary.
154  * Then count the call.
155  **********************************************************************/
156 
count_freeer(void * addr)157 void MALLOC_CALL::count_freeer(            //count calls to free
158                                void *addr  //return address
159                               ) {
160   inT32 entries;                 //entries in table
161   inT32 index;                   //index to table
162   inT32 initial_hash;            //initial index
163 
164   if (free_list == NULL)
165     init_freeers();  //setup table
166   entries = 1 << free_bits;
167                                  //get hash code
168   initial_hash = hash (free_bits, &addr, sizeof (addr));
169   if (initial_hash == 0)
170     initial_hash = 1;
171   index = initial_hash;
172   if (free_list[index].freeer != NULL && free_list[index].freeer != addr) {
173     do {
174       index++;
175       if (index >= entries)
176         index = 1;
177     }
178     while (free_list[index].freeer != NULL
179       && free_list[index].freeer != addr && index != initial_hash);
180     if (index == initial_hash)
181       index = 0;
182   }
183   if (free_list[index].freeer == NULL && index != 0) {
184     free_list[index].freeer = addr;
185   }
186   free_list[index].count++;      //count them
187 }
188 
189 
190 /**********************************************************************
191  * MEM_ALLOCATOR::init_callers
192  *
193  * Initialize the callers hash table.
194  **********************************************************************/
195 
init_callers()196 void MEM_ALLOCATOR::init_callers() {  //setup hash table
197   inT32 depth = mem_mallocdepth;
198 
199   mem_mallocdepth.set_value (0); //can't register it
200   call_bits = mem_mallocbits;
201   entries = 1 << call_bits;
202                                  //make an array
203   callers = new MALLOC_CALL[entries];
204   mem_mallocdepth.set_value (depth);
205 }
206 
207 
208 /**********************************************************************
209  * MALLOC_CALL::init_freeers
210  *
211  * Initialize the freeers hash table.
212  **********************************************************************/
213 
init_freeers()214 void MALLOC_CALL::init_freeers() {  //setup hash table
215   inT32 entries;                 //entries in table
216   inT32 depth = mem_mallocdepth;
217 
218   mem_mallocdepth.set_value (0); //can't register it
219   free_bits = mem_freebits;
220   entries = 1 << free_bits;
221                                  //make an array
222   free_list = new FREE_CALL[entries];
223   mem_mallocdepth.set_value (depth);
224 }
225 
226 
227 /**********************************************************************
228  * MEM_ALLOCATOR::reduce_counts
229  *
230  * Divide all ages by 2 to get a log use of counts.
231  **********************************************************************/
232 
reduce_counts()233 void MEM_ALLOCATOR::reduce_counts() {  //divide by 2
234   MEMBLOCK *block;               //current block
235   MEMUNION *chunk;               //current chunk
236   inT32 chunksize;               //size of chunk
237   inT32 blockindex;              //index of block
238 
239   check_mem ("Reducing counts", JUSTCHECKS);
240   for (blockindex = 0; blockindex < blockcount; blockindex++) {
241                                  //current block
242     block = &memblocks[blockindex];
243                                  //scan all chunks
244     for (chunk = block->blockstart; chunk != block->blockend; chunk += chunksize) {
245       chunksize = chunk->size;   //size of chunk
246       if (chunksize < 0)
247         chunksize = -chunksize;  //absolute size
248       chunk->age /= 2;           //divide ages
249     }
250   }
251 }
252 
253 
254 /**********************************************************************
255  * MEM_ALLOCATOR::display_counts
256  *
257  * Send counts of outstanding blocks to stderr.
258  **********************************************************************/
259 
display_counts()260 void MEM_ALLOCATOR::display_counts() {  //count up
261   MEMBLOCK *block;               //current block
262   MEMUNION *chunk;               //current chunk
263   inT32 chunksize;               //size of chunk
264   inT32 blockindex;              //index of block
265   inT32 buckets;                 //required buckets
266   inT32 bucketsize;              //no in each bucket
267   inT32 callindex;               //index to callers
268   inT32 freeindex;               //index to freeers
269   inT32 freeentries;             //table size
270   inT32 totalchunks;             //total chunk counts
271   inT32 totalspace;              //total mem space
272   inT32 totalpchunks;            //permanent chunks
273   inT32 totalpspace;             //permanent space
274   inT32 totalfrees;              //total free calls
275 
276   if (callers == NULL)
277     return;                      //can't do anything
278   check_mem ("Displaying counts", JUSTCHECKS);
279   buckets = mem_countbuckets;
280   bucketsize = (malloc_serial - 1) / buckets + 1;
281   tprintf ("\nEach bucket covers %g counts.\n",
282     (double) bucketsize * malloc_div_ratio);
283   for (callindex = 0; callindex < entries; callindex++) {
284     if (callers[callindex].free_list != NULL) {
285       callers[callindex].counts =
286         (inT32 *) malloc (buckets * 4 * sizeof (inT32));
287       memset (callers[callindex].counts, 0,
288         (size_t) (buckets * 4 * sizeof (inT32)));
289     }
290   }
291   for (blockindex = 0; blockindex < blockcount; blockindex++) {
292                                  //current block
293     block = &memblocks[blockindex];
294                                  //scan all chunks
295     for (chunk = block->blockstart; chunk != block->topchunk; chunk += chunksize) {
296       chunksize = chunk->size;   //size of chunk
297       if (chunksize < 0) {
298         chunksize = -chunksize;  //absolute size
299         callindex = chunk->owner;
300         if (callers[callindex].counts != NULL) {
301           callers[callindex].counts[chunk->age / bucketsize * 4]++;
302           callers[callindex].counts[chunk->age / bucketsize * 4 +
303             1] += chunksize;
304         }
305       }
306     }
307                                  //scan all chunks
308     for (; chunk != block->blockend; chunk += chunksize) {
309       chunksize = chunk->size;   //size of chunk
310       if (chunksize < 0) {
311         chunksize = -chunksize;  //absolute size
312         callindex = chunk->owner;
313         if (callers[callindex].counts != NULL) {
314           callers[callindex].counts[chunk->age / bucketsize * 4 +
315             2]++;
316           callers[callindex].counts[chunk->age / bucketsize * 4 +
317             3] += chunksize;
318         }
319       }
320     }
321   }
322   for (callindex = 0; callindex < entries; callindex++) {
323     if (callers[callindex].counts != NULL) {
324       for (totalspace = 0, totalchunks = 0, totalpspace =
325         0, totalpchunks = 0, freeindex = 0; freeindex < buckets;
326       freeindex++) {
327         totalchunks += callers[callindex].counts[freeindex * 4];
328         totalspace += callers[callindex].counts[freeindex * 4 + 1];
329         totalpchunks += callers[callindex].counts[freeindex * 4 + 2];
330         totalpspace += callers[callindex].counts[freeindex * 4 + 3];
331       }
332       freeentries = 1 << callers[callindex].free_bits;
333       for (totalfrees = 0, freeindex = 0; freeindex < freeentries;
334         freeindex++)
335       totalfrees += callers[callindex].free_list[freeindex].count;
336       if (totalspace != 0 || totalfrees != 0) {
337         tprintf ("alloc_mem at %d : total held=%d(%d), frees=%d.\n",
338           callers[callindex].caller,
339           totalchunks, totalspace * sizeof (MEMUNION),
340           totalfrees);
341       }
342       if (totalspace > 0) {
343         for (freeindex = 0; freeindex < buckets; freeindex++) {
344           tprintf ("%d(%d) ",
345             callers[callindex].counts[freeindex * 4],
346             callers[callindex].counts[freeindex * 4 +
347             1] * sizeof (MEMUNION));
348         }
349         tprintf ("\n");
350       }
351       if (totalfrees != 0) {
352         tprintf ("Calls to free : ");
353         for (freeindex = 0; freeindex < freeentries; freeindex++) {
354           if (callers[callindex].free_list[freeindex].count != 0)
355             tprintf ("%d : %d ",
356               callers[callindex].free_list[freeindex].freeer,
357               callers[callindex].free_list[freeindex].count);
358         }
359         tprintf ("\n");
360       }
361       if (totalpspace != 0) {
362         tprintf ("alloc_mem_p at %d : total held=%d(%d).\n",
363           callers[callindex].caller,
364           totalpchunks, totalpspace * sizeof (MEMUNION));
365         for (freeindex = 0; freeindex < buckets; freeindex++) {
366           tprintf ("%d(%d) ",
367             callers[callindex].counts[freeindex * 4 + 2],
368             callers[callindex].counts[freeindex * 4 +
369             3] * sizeof (MEMUNION));
370         }
371         tprintf ("\n");
372       }
373       free (callers[callindex].counts);
374       callers[callindex].counts = NULL;
375     }
376   }
377 }
378 
379 
380 /**********************************************************************
381  * MEM_ALLOCATOR::check
382  *
383  * Check consistency of all memory controlled by this allocator.
384  **********************************************************************/
385 
check(const char * string,inT8 level)386 void MEM_ALLOCATOR::check(                     //check consistency
387                           const char *string,  //context message
388                           inT8 level           //level of check
389                          ) {
390   MEMBLOCK *block;               //current block
391   MEMUNION *chunk;               //current chunk
392   MEMUNION *prevchunk;           //previous chunk
393   inT32 chunksize;               //size of chunk
394   inT32 usedcount;               //no of used chunks
395   inT32 usedsize;                //size of used chunks
396   inT32 freecount;               //no of free chunks
397   inT32 freesize;                //size of free chunks
398   inT32 biggest;                 //biggest free chunk
399   inT32 totusedcount;            //no of used chunks
400   inT32 totusedsize;             //size of used chunks
401   inT32 totfreecount;            //no of free chunks
402   inT32 totfreesize;             //size of free chunks
403   inT32 totbiggest;              //biggest free chunk
404   inT32 totblocksize;            //total size of blocks
405   inT32 chunkindex;              //index of chunk
406   inT32 blockindex;              //index of block
407 
408   if (level >= MEMCHECKS)
409     tprintf ("\nMEM_ALLOCATOR::check:at '%s'\n", string);
410   totusedcount = 0;              //grand totals
411   totusedsize = 0;
412   totfreecount = 0;
413   totfreesize = 0;
414   totbiggest = 0;
415   totblocksize = 0;
416   for (blockindex = 0; blockindex < blockcount; blockindex++) {
417                                  //current block
418     block = &memblocks[blockindex];
419     if (level >= MEMCHECKS)
420       tprintf ("Block %d:0x%x-0x%x, size=%d, top=0x%x, l=%d, u=%d\n",
421         blockindex, block->blockstart, block->blockend,
422         (block->blockend - block->blockstart) * sizeof (MEMUNION),
423         block->topchunk, block->lowerspace, block->upperspace);
424     usedcount = usedsize = 0;    //zero counters
425     freecount = freesize = 0;    //zero counters
426     biggest = 0;
427                                  //scan all chunks
428     for (chunkindex = 0, prevchunk = NULL, chunk = block->blockstart; chunk != block->blockend; chunkindex++, chunk += chunksize) {
429       chunksize = chunk->size;   //size of chunk
430       if (chunksize < 0)
431         chunksize = -chunksize;  //absolute size
432       if (level >= FULLMEMCHECKS) {
433         tprintf ("%5d=%8d%c caller=%d, age=%d ", (int) chunkindex,
434           chunksize * sizeof (MEMUNION),
435           chunk->size < 0 ? 'U' : 'F', chunk->owner, chunk->age);
436         if (chunkindex % 5 == 4)
437           tprintf ("\n");
438       }
439                                  //illegal sizes
440       if (chunksize == 0 || chunk->size == -1
441                                  //out of bounds
442         || chunk + chunksize - block->blockstart <= 0 || block->blockend - (chunk + chunksize) < 0)
443         BADMEMCHUNKS.error ("check_mem", ABORT,
444           "Block=%p, Prev chunk=%p, Chunk=%p, Size=%x",
445           block, prevchunk, chunk,
446           (int) chunk->size);
447 
448       else if (chunk->size < 0) {
449         usedcount++;             //used block
450         usedsize += chunksize;
451       }
452       else {
453         freecount++;             //free block
454         freesize += chunksize;
455         if (chunksize > biggest)
456           biggest = chunksize;
457       }
458       prevchunk = chunk;
459     }
460     if (level >= MEMCHECKS) {
461       if (level >= FULLMEMCHECKS)
462         tprintf ("\n");
463       tprintf ("%d chunks in use, total size=%d bytes\n",
464         (int) usedcount, usedsize * sizeof (MEMUNION));
465       tprintf ("%d chunks free, total size=%d bytes\n",
466         (int) freecount, freesize * sizeof (MEMUNION));
467       tprintf ("Largest free fragment=%d bytes\n",
468         biggest * sizeof (MEMUNION));
469     }
470     totusedcount += usedcount;   //grand totals
471     totusedsize += usedsize;
472     totfreecount += freecount;
473     totfreesize += freesize;
474     if (biggest > totbiggest)
475       totbiggest = biggest;
476     totblocksize += block->blockend - block->blockstart;
477   }
478   if (level >= MEMCHECKS) {
479     tprintf ("%d total blocks in use, total size=%d bytes\n",
480       blockcount, totblocksize * sizeof (MEMUNION));
481     tprintf ("%d total chunks in use, total size=%d bytes\n",
482       (int) totusedcount, totusedsize * sizeof (MEMUNION));
483     tprintf ("%d total chunks free, total size=%d bytes\n",
484       (int) totfreecount, totfreesize * sizeof (MEMUNION));
485     tprintf ("Largest free fragment=%d bytes\n",
486       totbiggest * sizeof (MEMUNION));
487   }
488   if (level >= MEMCHECKS)
489     display_counts();
490 }
491 
492 
493 /**********************************************************************
494  * MEM_ALLOCATOR::alloc_p
495  *
496  * Allocate permanent space which will never be returned.
497  * This space is allocated from the top end of a memory block to
498  * avoid the fragmentation which would result from alternate use
499  * of alloc_mem for permanent and temporary blocks.
500  **********************************************************************/
501 
alloc_p(inT32 count,void * caller)502 void *MEM_ALLOCATOR::alloc_p(              //permanent space
503                              inT32 count,  //block size to allocate
504                              void *caller  //ptr to caller
505                             ) {
506   MEMBLOCK *block;               //current block
507   MEMUNION *chunk;               //current chunk
508 
509   if (count < 1 || count > biggestblock)
510                                  //request too big
511     MEMTOOBIG.error ("alloc_mem_p", ABORT, "%d", (int) count);
512 
513   count += sizeof (MEMUNION) - 1;//round up to word
514   count /= sizeof (MEMUNION);
515   count++;                       //and add one
516   if (topblock == NULL) {
517     topblock = new_block (count);//get first block
518     currblock = topblock;
519     if (topblock == NULL) {
520       check_mem ("alloc_mem_p returning NULL", MEMCHECKS);
521       return NULL;
522     }
523   }
524   block = topblock;              //current block
525   do {
526     chunk = block->topchunk;
527     if (chunk->size < count)
528       block = block->next;       //try next block
529   }
530                                  //until all tried
531   while (chunk->size < count && block != topblock);
532   if (chunk->size < count) {     //still no good
533     chunk = (MEMUNION *) alloc ((count - 1) * sizeof (MEMUNION), caller);
534     //try last resort
535     if (chunk != NULL)
536       return chunk;
537     check_mem ("alloc_mem_p returning NULL", MEMCHECKS);
538     return NULL;
539   }
540   block->upperspace -= count;    //less above freechunk
541   if (chunk->size > count) {
542     chunk->size -= count;
543     chunk += chunk->size;
544   }
545   chunk->size = -count;          //mark as in use
546   if (mem_mallocdepth > 0) {
547     set_owner(chunk, caller);
548   }
549   else {
550     chunk->owner = 0;
551     chunk->age = 0;
552   }
553   return chunk + 1;              //created chunk
554 }
555 
556 
557 /**********************************************************************
558  * MEM_ALLOCATOR::alloc
559  *
560  * Return a pointer to a buffer of count bytes aligned for any type.
561  **********************************************************************/
562 
alloc(inT32 count,void * caller)563 void *MEM_ALLOCATOR::alloc(              //get memory
564                            inT32 count,  //no of bytes to get
565                            void *caller  //ptr to caller
566                           ) {
567   MEMBLOCK *block;               //current block
568   MEMUNION *chunk;               //current chunk
569   inT32 chunksize;               //size of free chunk
570   MEMUNION *chunkstart;          //start of free chunk
571 
572   if (count < 1 || count > biggestblock)
573     MEMTOOBIG.error ("alloc_mem", ABORT, "%d", (int) count);
574   //request too big
575 
576   count += sizeof (MEMUNION) - 1;//round up to word
577   count /= sizeof (MEMUNION);
578   count++;                       //and add one
579   if (currblock == NULL) {
580                                  //get first block
581     currblock = new_block (count);
582     topblock = currblock;
583     if (currblock == NULL) {
584       check_mem ("alloc_mem returning NULL", MEMCHECKS);
585       return NULL;
586     }
587   }
588   block = currblock;             //current block
589   if (block->upperspace <= block->lowerspace) {
590                                  //restart chunklist
591     block->freechunk = block->blockstart;
592     block->upperspace += block->lowerspace;
593     block->lowerspace = 0;       //correct space counts
594   }
595   chunk = block->freechunk;      //current free chunk
596   if (chunk->size < count) {     //big enough?
597     do {
598                                  //search for free chunk
599       chunk = block->find_chunk (count);
600       if (chunk->size < count)
601         block = block->next;     //try next block
602     }
603                                  //until all tried
604     while (chunk->size < count && block != currblock);
605     if (chunk->size < count) {   //still no good
606                                  //get a new block
607       currblock = new_block (count);
608       topblock = currblock;      //set perms here too
609       if (currblock == NULL) {
610         check_mem ("alloc_mem returning NULL", MEMCHECKS);
611         return NULL;
612       }
613       block = currblock;
614       chunk = block->freechunk;  //bound to be big enough
615     }
616   }
617   chunkstart = chunk;            //start of chunk
618   if (chunk == block->topchunk && chunk + count != block->blockend)
619     block->topchunk += count;    //top has moved
620   block->upperspace -= count;    //upper part used
621   chunksize = chunk->size;       //size of free chunk
622   chunk->size = -count;          //mark as used
623   chunk += count;                //next free space
624   totalmem -= count;             //no of free elements
625   if (chunksize > count)         //bigger than exact?
626                                  //remaining space
627     chunk->size = chunksize - count;
628   else if (chunk == block->blockend) {
629     chunk = block->blockstart;   //restart block
630     block->upperspace = block->lowerspace;
631     block->lowerspace = 0;       //fix space counts
632   }
633   block->freechunk = chunk;      //next free block
634   if (mem_mallocdepth > 0) {
635     set_owner(chunkstart, caller);
636   }
637   else {
638     chunkstart->owner = 0;
639     chunkstart->age = 0;
640   }
641   chunkstart++;                  //start of block
642   return chunkstart;             //pointer to block
643 }
644 
645 
646 /**********************************************************************
647  * MEM_ALLOCATOR::set_owner
648  *
649  * Set the owner and time stamp of the block and check if needed.
650  **********************************************************************/
651 
set_owner(MEMUNION * chunkstart,void * caller)652 void MEM_ALLOCATOR::set_owner(                       //get memory
653                               MEMUNION *chunkstart,  //chunk to set
654                               void *caller           //ptr to caller
655                              ) {
656   uinT16 callindex;              //hash code
657 
658   callindex = hash_caller (caller);
659   chunkstart->owner = callindex;
660                                  //store evidence
661   chunkstart->age = malloc_serial;
662   malloc_minor_serial++;
663   if (malloc_minor_serial >= malloc_div_ratio) {
664     malloc_minor_serial = 0;
665     malloc_serial++;             //count calls
666     if (malloc_serial == 0) {
667                                  //wrap around
668       reduce_counts();  //fix serial numbers
669       malloc_serial = MAX_INT16 + 1;
670                                  //all worth double
671       malloc_div_ratio += malloc_div_ratio;
672     }
673   }
674   malloc_auto_count++;
675   if (mem_checkfreq > 0 && malloc_auto_count >= (uinT32) mem_checkfreq) {
676     malloc_auto_count = 0;
677     check_mem ("Auto check", MEMCHECKS);
678   }
679 }
680 
681 
682 /**********************************************************************
683  * MEM_ALLOCATOR::dealloc
684  *
685  * Free a block allocated by alloc (or alloc_p).
686  * It checks that the pointer is legal and maintains counts of the
687  * amount of free memory above and below the current free pointer.
688  **********************************************************************/
689 
dealloc(void * oldchunk,void * caller)690 void MEM_ALLOCATOR::dealloc(                 //free memory
691                             void *oldchunk,  //chunk to free
692                             void *caller     //ptr to caller
693                            ) {
694   MEMUNION *chunk;               //current chunk
695   MEMBLOCK *block;               //current block
696 
697   if (oldchunk == NULL)
698     FREENULLPTR.error ("free_mem", ABORT, NULL);
699   chunk = (MEMUNION *) oldchunk;
700   block = currblock;             //current block
701   if (block == NULL)
702     NOTMALLOCMEM.error ("free_mem", ABORT, NULL);
703   do {
704     block = block->next;
705   }
706                                  //outside the block
707   while ((chunk - block->blockstart < 0 || block->blockend - chunk <= 0)
708     && block != currblock);
709 
710   if (chunk - block->blockstart < 0 || block->blockend - chunk <= 0)
711                                  //in no block
712     NOTMALLOCMEM.error ("free_mem", ABORT, NULL);
713 
714   chunk--;                       //point to size
715   if (chunk->size == 0)
716                                  //zero size
717     FREEILLEGALPTR.error ("free_mem", ABORT, NULL);
718   else if (chunk->size > 0)
719                                  //already free
720     FREEFREEDBLOCK.error ("free_mem", ABORT, NULL);
721   chunk->size = -chunk->size;    //mark it free
722   if (mem_freedepth > 0 && callers != NULL) {
723                                  //count calls
724     callers[chunk->owner].count_freeer (caller);
725   }
726   totalmem += chunk->size;       //total free memory
727   if (chunk - block->freechunk < 0)
728                                  //extra below
729     block->lowerspace += chunk->size;
730   else
731                                  //extra above
732     block->upperspace += chunk->size;
733 }
734 
735 
736 /**********************************************************************
737  * MEM_ALLOCATOR::new_block
738  *
739  * Gets a new big block of memory from malloc for use by alloc_mem.
740  **********************************************************************/
741 
new_block(inT32 minsize)742 MEMBLOCK *MEM_ALLOCATOR::new_block(               //get new big block
743                                    inT32 minsize  //minimum size
744                                   ) {
745   MEMBLOCK *newblock;            //new block
746 
747   if (blockcount >= MAXBLOCKS) {
748                                  //can't have another
749     NOMOREBLOCKS.error ("mem_new_block", TESSLOG, NULL);
750     return NULL;
751   }
752   if (mem_checkfreq != 0) {
753     tprintf ("\nGetting new block due to request size of %d",
754       minsize * sizeof (MEMUNION));
755     tprintf (" from %d from %d from %d from %d from %d\n",
756       trace_caller (3), trace_caller (4), trace_caller (5),
757       trace_caller (6), trace_caller (7));
758     check_mem ("Getting new block", MEMCHECKS);
759   }
760                                  //get a new one
761   newblock = &memblocks[blockcount++];
762   while (memsize < minsize)
763     memsize *= 4;                //go up in sizes
764                                  //get a big block
765   newblock->blockstart = (MEMUNION *)
766     malloc (memsize * sizeof (MEMUNION));
767   if (newblock->blockstart == NULL) {
768     NOMOREMEM.error ("mem_new_block", TESSLOG, NULL);
769 
770     #ifdef __UNIX__
771     raise(SIGTTOU);  //hangup for js
772     #endif
773     return NULL;
774   }
775                                  //end of block
776   newblock->blockend = newblock->blockstart + memsize;
777                                  //first free chunk
778   newblock->freechunk = newblock->blockstart;
779   newblock->topchunk = newblock->blockstart;
780   newblock->lowerspace = 0;
781   newblock->upperspace = memsize;//amount available
782                                  //set pointer
783   newblock->freechunk->size = memsize;
784   newblock->freechunk->owner = 0;
785   newblock->freechunk->age = 0;
786 
787   totalmem += memsize;           //total assigned mem
788 
789   if (memsize < maxsize)
790     memsize *= 4;                //successively bigger
791   if (currblock == NULL) {
792     newblock->next = newblock;   //first block
793   }
794   else {
795                                  //insert in list
796     newblock->next = currblock->next;
797     currblock->next = newblock;
798   }
799   return newblock;               //new block
800 }
801 
802 
803 /**********************************************************************
804  * MEMBLOCK::find_chunk
805  *
806  * Find a chunk within the block which is big enough for the given request
807  **********************************************************************/
808 
find_chunk(inT32 count)809 MEMUNION *MEMBLOCK::find_chunk(             //find free chunk
810                                inT32 count  //size required
811                               ) {
812   MEMUNION *chunk;               //current chunk
813   inT32 chunksize;               //size of free chunk
814   MEMUNION *chunkstart;          //start of free chunk
815   inT32 spaceshift;              //shift in lowerspace
816 
817   if (upperspace <= lowerspace) {
818     freechunk = blockstart;      //restart chunklist
819     upperspace += lowerspace;
820     lowerspace = 0;              //correct space counts
821   }
822   chunk = freechunk;             //current free chunk
823   if (chunk->size < count) {     //big enough?
824     spaceshift = 0;
825     do {
826       while (chunk->size < 0) {  //find free chunk
827         chunk -= chunk->size;    //skip forward
828         if (chunk == blockend) {
829           chunk = blockstart;    //restart block
830                                  //gone back to start
831           spaceshift = -lowerspace;
832         }
833         if (chunk == freechunk)
834           return chunk;          //gone all round & failed
835       }
836       chunkstart = chunk;        //start of chunk
837       chunksize = chunk->size;;
838       chunk += chunk->size;
839       while (chunk != blockend   //until end
840       && chunk->size > 0) {      //or used
841         chunksize += chunk->size;//coalesce free blocks
842                                  //gone all round
843         if (chunk == freechunk) {
844                                  //ensure it is at end
845           freechunk += chunk->size;
846           upperspace -= chunk->size;
847           lowerspace += chunk->size;
848           spaceshift -= chunk->size;
849         }
850         if (chunk == topchunk)   //got back to end one
851           topchunk = chunkstart; //end one bigger
852         chunk += chunk->size;    //get next block
853       }
854                                  //new big block
855       chunkstart->size = chunksize;
856       if (chunksize < count)
857         spaceshift += chunksize; //skipping free block
858       if (chunk == blockend) {
859         chunk = blockstart;      //back to start
860         if (freechunk == blockend) {
861           freechunk = blockstart;//so is freechunk
862           upperspace += lowerspace;
863           lowerspace = 0;
864           spaceshift = 0;
865         }
866         else
867                                  //so is shift
868             spaceshift = -lowerspace;
869       }
870     }
871     while (chunksize < count && chunk != freechunk);
872     if (chunksize < count)
873       return chunk;              //failed
874     lowerspace += spaceshift;    //get space counts right
875     upperspace -= spaceshift;
876     freechunk = chunkstart;
877     return chunkstart;           //success
878   }
879   return chunk;                  //easy
880 }
881 
882 
883 #ifdef __UNIX__
884 /**********************************************************************
885  * trace_caller
886  *
887  * Return the return address of the caller at a given depth back.
888  * 0 gives the return address of the caller to trace_caller.
889  * S300 ONLY!!
890  **********************************************************************/
891 //#pragma OPTIMIZE OFF                                                                                                  /*force link*/
892 
trace_caller(inT32 depth)893 void *trace_caller(             //trace stack
894                    inT32 depth  //depth to trace
895                   ) {
896   #ifdef hp9000s800
897 
898   unsigned sp, pc, rp;           //registers
899   UWREC rec1;                    //for unwinder
900   UWREC rec2;
901 
902   sp = (unsigned) (&depth + 9);
903   pc = *(int *) (sp - 20);
904   rp = 0;
905   get_pcspace(&rec1, pc);
906   rec1.cur_frsize = 0xc0;
907   rec1.currlo = pc & ~3;
908   rec1.curdp = 0;
909   rec1.toprp = rp;
910 
911   while (depth > 0) {
912     if (U_get_previous_frame (&rec1, &rec2))
913       return NULL;
914     rec1.currlo = rec2.currlo;
915     rec1.cur_frsize = rec2.cur_frsize;
916     rec1.cursp = rec2.cursp;
917     rec1.currls = rec2.currls;
918     rec1.curdp = rec2.curdp;
919     depth--;
920   }
921   return (void *) rec1.currlo;
922   #else
923   void *a6;                      //address register
924 
925   a6 = &depth - 2;
926   while (depth > 0) {
927     a6 = *(void **) a6;          //follow chain
928     depth--;
929   }
930   return *((void **) a6 + 1);
931   #endif
932 }
933 
934 
935 //#pragma OPTIMIZE ON
936 
937 #else
938 
939 // Fake procedure for non-UNIX
trace_caller(inT32 depth)940 void *trace_caller(             //trace stack
941                    inT32 depth  //depth to trace
942                   ) {
943   return NULL;
944 }
945 #endif
946 
947 /**********************************************************************
948  * identify_struct_owner
949  *
950  * Get an index into the table of owners of structures.
951  * Implemented very inefficiently, but only a debug tool!
952  **********************************************************************/
953 
identify_struct_owner(inT32 struct_count,const char * name)954 inT32 identify_struct_owner(                     //get table index
955                             inT32 struct_count,  //cell size
956                             const char *name     //name of type
957                            ) {
958   inT32 index;                   //index to structure
959 
960   for (index = 0; index < name_counts[struct_count]
961     && strcmp (name, owner_names[struct_count][index]); index++);
962   if (index < MAX_CLASSES) {
963     if (index == name_counts[struct_count]) {
964       name_counts[struct_count]++;
965       owner_names[struct_count][index] = name;
966       owner_counts[struct_count][index] = 0;
967     }
968   }
969   return index;
970 }
971 
972 
973 /**********************************************************************
974  * check_struct
975  *
976  * Check a particular structure size for consistency.
977  **********************************************************************/
978 
check_struct(inT8 level,inT32 count)979 void check_struct(             //check a structure
980                   inT8 level,  //print control
981                   inT32 count  //no of bytes
982                  ) {
983   MEMUNION *element;             //current element
984   MEMUNION *block;               //current block
985   inT32 struct_count;            //no of required structs
986   inT32 block_count;             //no of structure blocks
987   inT32 free_count;              //size of freelist*/
988   inT32 name_index;              //named holder
989   inT32 named_total;             //total held by names
990 
991                                  //no of MEMUNIONS-1
992   struct_count = (count - 1) / sizeof (MEMUNION);
993   if (struct_count < 0 || struct_count >= MAX_STRUCTS)
994                                  //request too big
995     MEMTOOBIG.error ("check_struct", ABORT, "%d", (int) count);
996 
997   free_count = 0;                //size of freelist
998                                  //count blocks
999   for (block_count = 0, block = struct_blocks[struct_count]; block != NULL; block = block->ptr, block_count++);
1000   if (block_count > 0) {
1001                                  //scan freelist
1002     for (element = free_structs[struct_count]; element != NULL; element = element->ptr)
1003       free_count++;
1004     if (level >= MEMCHECKS) {
1005       tprintf ("No of structs of size %d in use=%d,",
1006         (int) count, (int) structs_in_use[struct_count]);
1007       tprintf (" %d free", free_count);
1008       tprintf (" in %d blocks, total space=%d\n",
1009         (int) block_count,
1010         block_count * STRUCT_BLOCK_SIZE * sizeof (MEMUNION));
1011       for (named_total = 0, name_index = 0;
1012       name_index < name_counts[struct_count]; name_index++) {
1013         tprintf ("No held by %s=%d\n",
1014           owner_names[struct_count][name_index],
1015           owner_counts[struct_count][name_index]);
1016         named_total += owner_counts[struct_count][name_index];
1017       }
1018       tprintf ("Total held by names=%d\n", named_total);
1019     }
1020   }
1021   if (structs_in_use[struct_count] + free_count
1022     != block_count * (STRUCT_BLOCK_SIZE / (struct_count + 1) - 1))
1023     BADSTRUCTCOUNT.error ("check_struct", ABORT, "%d+%d=%d",
1024       structs_in_use[struct_count], free_count,
1025       block_count * (STRUCT_BLOCK_SIZE /
1026       (struct_count + 1) - 1));
1027 }
1028 
1029 
1030 /**********************************************************************
1031  * check_structs
1032  *
1033  * Reports statistics on each maintained structure type by calling
1034  * free_struct(NULL) on each.  Only active structure types are reported.
1035  **********************************************************************/
1036 
check_structs(inT8 level)1037 void check_structs(            //count in use on structs
1038                    inT8 level  //print control
1039                   ) {
1040   inT8 index;                    //index to structs
1041 
1042   for (index = 1; index <= MAX_STRUCTS; index++)
1043                                  //check number allocated
1044     check_struct (level, index * sizeof (MEMUNION));
1045 }
1046 
1047 
1048 /**********************************************************************
1049  * new_struct_block
1050  *
1051  * Allocate space for a new block of structures.  The space is obtained
1052  * from alloc_mem, and a freelist of such blocks is maintained for when
1053  * the individual structure types get completely freed.
1054  **********************************************************************/
1055 
new_struct_block()1056 void *new_struct_block() {  //allocate memory
1057   MEMUNION *element;             //current element
1058   MEMUNION *returnelement;       //return value
1059 
1060   returnelement = free_block;
1061   if (returnelement == NULL) {
1062                                  //need a new block
1063     element =
1064       (MEMUNION *) alloc_mem_p (STRUCT_BLOCK_SIZE * sizeof (MEMUNION));
1065     if (element == NULL)
1066       return NULL;               //can't get more
1067     returnelement = element;     //going to return 1st
1068   }
1069   else {
1070                                  //new free one
1071     free_block = returnelement->ptr;
1072   }
1073   return returnelement;          //free cell
1074 }
1075 
1076 
1077 /**********************************************************************
1078  * old_struct_block
1079  *
1080  * Free memory allocated by new_struct_block.  The block is returned
1081  * to a freelist ready for a new call to new_struct_block.
1082  * This saves confusion over freeing "permanent" blocks, yet
1083  * allows them to be recycled for different structures.
1084  **********************************************************************/
1085 
old_struct_block(MEMUNION * deadblock)1086 void old_struct_block(                     //free a structure block
1087                       MEMUNION *deadblock  //block to free
1088                      ) {
1089   if (deadblock != NULL) {
1090     deadblock->ptr = free_block; //add to freelist
1091     free_block = deadblock;
1092     free_struct_blocks++;
1093   }
1094   if (free_struct_blocks > MAX_FREE_S_BLOCKS) {
1095     MEMUNION *next_block;        //next in list
1096     deadblock = free_block;
1097     do {
1098       next_block = deadblock->ptr;
1099       free_mem(deadblock);  //really free it
1100       deadblock = next_block;
1101     }
1102     while (deadblock != NULL);
1103     free_struct_blocks = 0;
1104     free_block = NULL;
1105   }
1106 }
1107