JsonCpp project page JsonCpp home page

json_batchallocator.h
Go to the documentation of this file.
1 // Copyright 2007-2010 Baptiste Lepilleur
2 // Distributed under MIT license, or public domain if desired and
3 // recognized in your jurisdiction.
4 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
5 
6 #ifndef JSONCPP_BATCHALLOCATOR_H_INCLUDED
7 #define JSONCPP_BATCHALLOCATOR_H_INCLUDED
8 
9 #include <stdlib.h>
10 #include <assert.h>
11 
12 #ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
13 
14 namespace Json {
15 
16 /* Fast memory allocator.
17  *
18  * This memory allocator allocates memory for a batch of object (specified by
19  * the page size, the number of object in each page).
20  *
21  * It does not allow the destruction of a single object. All the allocated
22  * objects can be destroyed at once. The memory can be either released or reused
23  * for future allocation.
24  *
25  * The in-place new operator must be used to construct the object using the
26  * pointer returned by allocate.
27  */
28 template <typename AllocatedType, const unsigned int objectPerAllocation>
29 class BatchAllocator {
30 public:
31  BatchAllocator(unsigned int objectsPerPage = 255)
32  : freeHead_(0), objectsPerPage_(objectsPerPage) {
33  // printf( "Size: %d => %s\n", sizeof(AllocatedType),
34  // typeid(AllocatedType).name() );
35  assert(sizeof(AllocatedType) * objectPerAllocation >=
36  sizeof(AllocatedType*)); // We must be able to store a slist in the
37  // object free space.
38  assert(objectsPerPage >= 16);
39  batches_ = allocateBatch(0); // allocated a dummy page
40  currentBatch_ = batches_;
41  }
42 
43  ~BatchAllocator() {
44  for (BatchInfo* batch = batches_; batch;) {
45  BatchInfo* nextBatch = batch->next_;
46  free(batch);
47  batch = nextBatch;
48  }
49  }
50 
54  AllocatedType* allocate() {
55  if (freeHead_) // returns node from free list.
56  {
57  AllocatedType* object = freeHead_;
58  freeHead_ = *(AllocatedType**)object;
59  return object;
60  }
61  if (currentBatch_->used_ == currentBatch_->end_) {
62  currentBatch_ = currentBatch_->next_;
63  while (currentBatch_ && currentBatch_->used_ == currentBatch_->end_)
64  currentBatch_ = currentBatch_->next_;
65 
66  if (!currentBatch_) // no free batch found, allocate a new one
67  {
68  currentBatch_ = allocateBatch(objectsPerPage_);
69  currentBatch_->next_ = batches_; // insert at the head of the list
70  batches_ = currentBatch_;
71  }
72  }
73  AllocatedType* allocated = currentBatch_->used_;
74  currentBatch_->used_ += objectPerAllocation;
75  return allocated;
76  }
77 
81  void release(AllocatedType* object) {
82  assert(object != 0);
83  *(AllocatedType**)object = freeHead_;
84  freeHead_ = object;
85  }
86 
87 private:
88  struct BatchInfo {
89  BatchInfo* next_;
90  AllocatedType* used_;
91  AllocatedType* end_;
92  AllocatedType buffer_[objectPerAllocation];
93  };
94 
95  // disabled copy constructor and assignement operator.
96  BatchAllocator(const BatchAllocator&);
97  void operator=(const BatchAllocator&);
98 
99  static BatchInfo* allocateBatch(unsigned int objectsPerPage) {
100  const unsigned int mallocSize =
101  sizeof(BatchInfo) - sizeof(AllocatedType) * objectPerAllocation +
102  sizeof(AllocatedType) * objectPerAllocation * objectsPerPage;
103  BatchInfo* batch = static_cast<BatchInfo*>(malloc(mallocSize));
104  batch->next_ = 0;
105  batch->used_ = batch->buffer_;
106  batch->end_ = batch->buffer_ + objectsPerPage;
107  return batch;
108  }
109 
110  BatchInfo* batches_;
111  BatchInfo* currentBatch_;
113  AllocatedType* freeHead_;
114  unsigned int objectsPerPage_;
115 };
116 
117 } // namespace Json
118 
119 #endif // ifndef JSONCPP_DOC_INCLUDE_IMPLEMENTATION
120 
121 #endif // JSONCPP_BATCHALLOCATOR_H_INCLUDED