BRL-CAD
memalloc.c
Go to the documentation of this file.
1 /* M E M A L L O C . C
2  * BRL-CAD
3  *
4  * Copyright (c) 2004-2014 United States Government as represented by
5  * the U.S. Army Research Laboratory.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public License
9  * version 2.1 as published by the Free Software Foundation.
10  *
11  * This library is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this file; see the file named COPYING for more
18  * information.
19  */
20 /** @addtogroup librt */
21 /** @{ */
22 /** @file librt/memalloc.c
23  *
24  * Functions -
25  * rt_memalloc allocate 'size' of memory from a given map
26  * rt_memget allocate 'size' of memory from map at 'place'
27  * rt_memfree return 'size' of memory to map at 'place'
28  * rt_mempurge free everything on current memory chain
29  * rt_memprint print a map
30  * rt_memclose
31  *
32  * The structure of the displaylist memory map chains consists of
33  * non-zero count and base address of that many contiguous units. The
34  * addresses are increasing and the list is terminated with the first
35  * zero link.
36  *
37  * rt_memalloc() and rt_memfree() use these tables to allocate displaylist memory.
38  *
39  * For each Memory Map there exists a queue (coremap). There also
40  * exists a queue of free buffers which are enqueued on to either of
41  * the previous queues. Initially all of the buffers are placed on
42  * the `freemap' queue. Whenever a buffer is freed because of
43  * coalescing ends in rt_memfree() or zero size in rt_memalloc() the
44  * mapping buffer is taken off from the respective queue and returned
45  * to the `freemap' queue.
46  *
47  */
48 /** @} */
49 
50 #include "common.h"
51 
52 #include "bio.h"
53 
54 #include "vmath.h"
55 #include "raytrace.h"
56 
57 
58 /* XXX not PARALLEL */
59 /* Allocation/Free spaces */
60 static struct mem_map *rt_mem_freemap = MAP_NULL; /* Freelist of buffers */
61 
62 /* Flags used by `type' in rt_memfree() */
63 #define M_TMTCH 00001 /* Top match */
64 #define M_BMTCH 00002 /* Bottom match */
65 #define M_TOVFL 00004 /* Top overflow */
66 #define M_BOVFL 00010 /* Bottom overflow */
67 
68 size_t
69 rt_memalloc(struct mem_map **pp, register size_t size)
70 {
71  register struct mem_map *prevp = MAP_NULL;
72  register struct mem_map *curp;
73  size_t addr;
74 
75  if (size == 0)
76  return 0L; /* fail */
77 
78  for (curp = *pp; curp; curp = (prevp=curp)->m_nxtp) {
79  if (curp->m_size >= size)
80  break;
81  }
82 
83  if (curp == MAP_NULL)
84  return 0L; /* No more space */
85 
86  addr = (size_t)curp->m_addr;
87  curp->m_addr += (off_t)size;
88 
89  /* If the element size goes to zero, put it on the freelist */
90 
91  if ((curp->m_size -= size) == 0) {
92  if (prevp)
93  prevp->m_nxtp = curp->m_nxtp;
94  else
95  *pp = curp->m_nxtp; /* Click list down at start */
96  curp->m_nxtp = rt_mem_freemap; /* Link it in */
97  rt_mem_freemap = curp; /* Make it the start */
98  }
99 
100  return addr;
101 }
102 
103 struct mem_map *
104 rt_memalloc_nosplit(struct mem_map **pp, register size_t size)
105 {
106  register struct mem_map *prevp = MAP_NULL;
107  register struct mem_map *curp;
108  register struct mem_map *best = MAP_NULL, *best_prevp = MAP_NULL;
109 
110  if (size == 0)
111  return MAP_NULL; /* fail */
112 
113  for (curp = *pp; curp; curp = (prevp=curp)->m_nxtp) {
114  if (curp->m_size < size) continue;
115  if (curp->m_size == size) {
116  best = curp;
117  best_prevp = prevp;
118  break;
119  }
120  /* This element has enough size */
121  if (best == MAP_NULL || curp->m_size < best->m_size) {
122  best = curp;
123  best_prevp = prevp;
124  }
125  }
126  if (!best)
127  return MAP_NULL; /* No space */
128 
129  /* Move this element to free list, return it, unsplit */
130  if (best_prevp)
131  best_prevp->m_nxtp = best->m_nxtp;
132  else
133  *pp = best->m_nxtp; /* Click list down at start */
134  best->m_nxtp = rt_mem_freemap; /* Link it in */
135  rt_mem_freemap = best; /* Make it the start */
136 
137  return best;
138 }
139 
140 size_t
141 rt_memget(struct mem_map **pp, register size_t size, off_t place)
142 {
143  register struct mem_map *prevp, *curp;
144  size_t addr;
145 
146  prevp = MAP_NULL; /* special for first pass through */
147  if (size == 0)
148  bu_bomb("rt_memget() size==0\n");
149 
150  curp = *pp;
151  while (curp) {
152  /*
153  * Assumption: We will always be APPENDING to an existing
154  * memory allocation, so we search for a free piece of memory
155  * which begins at 'place', without worrying about ones which
156  * could begin earlier but be long enough to satisfy this
157  * request.
158  */
159  if (curp->m_addr == place && curp->m_size >= size)
160  break;
161  curp = (prevp=curp)->m_nxtp;
162  }
163 
164  if (curp == MAP_NULL)
165  return 0L; /* No space here */
166 
167  addr = (size_t)curp->m_addr;
168  curp->m_addr += (off_t)size;
169 
170  /* If the element size goes to zero, put it on the freelist */
171  if ((curp->m_size -= size) == 0) {
172  if (prevp)
173  prevp->m_nxtp = curp->m_nxtp;
174  else
175  *pp = curp->m_nxtp; /* Click list down at start */
176  curp->m_nxtp = rt_mem_freemap; /* Link it in */
177  rt_mem_freemap = curp; /* Make it the start */
178  }
179  return addr;
180 }
181 
182 
183 /**
184  * Returns: 0 Unable to satisfy request
185  * <size> Actual size of free block, may be larger
186  * than requested size.
187  *
188  * Comments:
189  * Caller is responsible for returning unused portion.
190  */
191 size_t
192 rt_memget_nosplit(struct mem_map **pp, register size_t size, size_t place)
193 {
194  register struct mem_map *prevp, *curp;
195 
196  prevp = MAP_NULL; /* special for first pass through */
197  if (size == 0)
198  bu_bomb("rt_memget_nosplit() size==0\n");
199 
200  curp = *pp;
201  while (curp) {
202  /*
203  * Assumption: We will always be APPENDING to an existing
204  * memory allocation, so we search for a free piece of memory
205  * which begins at 'place', without worrying about ones which
206  * could begin earlier but be long enough to satisfy this
207  * request.
208  */
209  if (curp->m_addr == (off_t)place && curp->m_size >= size) {
210  size = curp->m_size;
211  /* put this element on the freelist */
212  if (prevp)
213  prevp->m_nxtp = curp->m_nxtp;
214  else
215  *pp = curp->m_nxtp; /* Click list down at start */
216  curp->m_nxtp = rt_mem_freemap; /* Link it in */
217  rt_mem_freemap = curp; /* Make it the start */
218  return size; /* actual size found */
219  }
220  curp = (prevp=curp)->m_nxtp;
221  }
222 
223  return 0L; /* No space found */
224 }
225 
226 
227 void
228 rt_memfree(struct mem_map **pp, size_t size, off_t addr)
229 {
230  register int type = 0;
231  register struct mem_map *prevp = MAP_NULL;
232  register struct mem_map *curp;
233  off_t il;
234  struct mem_map *tmap;
235 
236  if (size == 0)
237  return; /* Nothing to free */
238 
239  /* Find the position in the list such that (prevp)<(addr)<(curp) */
240  for (curp = *pp; curp; curp = (prevp=curp)->m_nxtp)
241  if (addr < curp->m_addr)
242  break;
243 
244  /* Make up the `type' variable */
245 
246  if (prevp) {
247  il = prevp->m_addr + (off_t)prevp->m_size;
248  if (il > addr)
249  type |= M_BOVFL;
250  if (il == addr)
251  type |= M_BMTCH;
252  }
253  if (curp) {
254  il = addr + (off_t)size;
255  if (il > curp->m_addr)
256  type |= M_TOVFL;
257  if (il == curp->m_addr)
258  type |= M_TMTCH;
259  }
260 
261  if (type & (M_TOVFL|M_BOVFL)) {
262  bu_log("rt_memfree(addr=%ld, size=%zu) ERROR type=0%o\n",
263  addr, size, type);
264  if (prevp)
265  bu_log("prevp: m_addr=%ld, m_size=%zu\n",
266  prevp->m_addr, prevp->m_size);
267  if (curp)
268  bu_log("curp: m_addr=%ld, m_size=%zu\n",
269  curp->m_addr, curp->m_size);
270  return;
271  }
272 
273  /*
274  * Now we do the surgery:
275  * If there are no matches on boundaries we allocate a buffer
276  * If there is one match we expand the appropriate buffer
277  * If there are two matches we will have a free buffer returned.
278  */
279 
280  switch (type & (M_BMTCH|M_TMTCH)) {
281  case M_TMTCH|M_BMTCH: /* Deallocate top element and expand bottom */
282  prevp->m_size += size + curp->m_size;
283  prevp->m_nxtp = curp->m_nxtp;
284  curp->m_nxtp = rt_mem_freemap; /* Link into rt_mem_freemap */
285  rt_mem_freemap = curp;
286  break;
287 
288  case M_BMTCH: /* Expand bottom element */
289  prevp->m_size += size;
290  break;
291 
292  case M_TMTCH: /* Expand top element downward */
293  curp->m_size += size;
294  curp->m_addr -= (off_t)size;
295  break;
296 
297  default: /* No matches; allocate and insert */
298  if ((tmap=rt_mem_freemap) == MAP_NULL)
299  BU_ALLOC(tmap, struct mem_map);
300  else
301  rt_mem_freemap = rt_mem_freemap->m_nxtp; /* Click one off */
302 
303  if (prevp)
304  prevp->m_nxtp = tmap;
305  else
306  *pp = tmap;
307 
308  tmap->m_size = size;
309  tmap->m_addr = addr;
310  tmap->m_nxtp = curp;
311  }
312 }
313 
314 
315 void
316 rt_mempurge(struct mem_map **pp)
317 {
318  register struct mem_map *prevp = MAP_NULL;
319  register struct mem_map *curp;
320 
321  if (*pp == MAP_NULL)
322  return;
323 
324  /* Find the end of the (busy) list */
325  for (curp = *pp; curp; curp = (prevp=curp)->m_nxtp)
326  ;
327 
328  /* Put the whole busy list onto the free list */
329  prevp->m_nxtp = rt_mem_freemap;
330  rt_mem_freemap = *pp;
331 
332  *pp = MAP_NULL;
333 }
334 
335 
336 void
337 rt_memprint(struct mem_map **pp)
338 {
339  register struct mem_map *curp;
340 
341  bu_log("rt_memprint(%p): address, length\n", (void *)*pp);
342  for (curp = *pp; curp; curp = curp->m_nxtp)
343  bu_log(" a=%ld, l=%.5zu\n", curp->m_addr, curp->m_size);
344 }
345 
346 
347 void
349 {
350  register struct mem_map *mp;
351 
352  while ((mp = rt_mem_freemap) != MAP_NULL) {
353  rt_mem_freemap = mp->m_nxtp;
354  bu_free((char *)mp, "struct mem_map " BU_FLSTR);
355  }
356 }
357 
358 
359 /*
360  * Local Variables:
361  * mode: C
362  * tab-width: 8
363  * indent-tabs-mode: t
364  * c-file-style: "stroustrup"
365  * End:
366  * ex: shiftwidth=4 tabstop=8
367  */
void rt_mempurge(struct mem_map **pp)
Definition: memalloc.c:316
void bu_log(const char *,...) _BU_ATTR_PRINTF12
Definition: log.c:176
#define MAP_NULL
Definition: raytrace.h:742
Header file for the BRL-CAD common definitions.
struct mem_map * m_nxtp
Linking pointer to next element.
Definition: raytrace.h:738
if(share_geom)
Definition: nmg_mod.c:3829
#define M_BMTCH
Definition: memalloc.c:64
#define BU_ALLOC(_ptr, _type)
Definition: malloc.h:223
#define M_BOVFL
Definition: memalloc.c:66
#define BU_FLSTR
Definition: defines.h:142
size_t m_size
Size of this free element.
Definition: raytrace.h:739
size_t rt_memget_nosplit(struct mem_map **pp, register size_t size, size_t place)
Definition: memalloc.c:192
struct mem_map * rt_memalloc_nosplit(struct mem_map **pp, register size_t size)
Definition: memalloc.c:104
#define M_TOVFL
Definition: memalloc.c:65
void bu_free(void *ptr, const char *str)
Definition: malloc.c:328
size_t rt_memalloc(struct mem_map **pp, register size_t size)
Definition: memalloc.c:69
void rt_memfree(struct mem_map **pp, size_t size, off_t addr)
Definition: memalloc.c:228
off_t m_addr
Address of start of this element.
Definition: raytrace.h:740
void rt_memprint(struct mem_map **pp)
Definition: memalloc.c:337
void bu_bomb(const char *str) _BU_ATTR_NORETURN
Definition: bomb.c:91
void rt_memclose(void)
Definition: memalloc.c:348
size_t rt_memget(struct mem_map **pp, register size_t size, off_t place)
Definition: memalloc.c:141
#define M_TMTCH
Definition: memalloc.c:63