Skip to content

Commit ef74847

Browse files
committed
Update zone, table and page
1 parent cd4b91f commit ef74847

File tree

5 files changed

+276
-68
lines changed

5 files changed

+276
-68
lines changed

Sources/OpenGraph_SPI/Data/page.cpp

Lines changed: 0 additions & 5 deletions
This file was deleted.

Sources/OpenGraph_SPI/Data/table.cpp

Lines changed: 63 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
#include "table.hpp"
99
#include "page.hpp"
10+
#include "page.hpp"
1011
#include "../Util/assert.hpp"
1112
#include <sys/mman.h>
1213
#include <malloc/malloc.h>
@@ -21,40 +22,34 @@ uint8_t _shared_table_bytes[sizeof(table) / sizeof(uint8_t)] = {};
2122

2223
malloc_zone_t *_Nullable _malloc_zone;
2324

24-
void table::print() const OG_NOEXCEPT {
25-
os_unfair_lock_lock(&_lock);
26-
fprintf(stderr, "data::table %p:\n %.2fKB allocated, %.2fKB used, %.2fKB reusable.\n",
27-
this,
28-
double(_region_capacity - page_size) / 1024.0,
29-
double(this->used_pages_num()) / 1024.0,
30-
double(_reusable_pages_num) / 1024.0);
31-
os_unfair_lock_unlock(&_lock);
25+
table &table::ensure_shared() {
26+
static dispatch_once_t once;
27+
dispatch_once_f(&once, nullptr, [](void *_Nullable context){
28+
new (_shared_table_bytes) table();
29+
});
30+
return shared();
3231
}
3332

34-
//uint64_t table::raw_page_seed(ptr<page> page) {
35-
// page.assert_valid();
36-
//
37-
// lock();
38-
//
39-
// uint32_t page_index = (page / page_size) - 1;
40-
// uint32_t map_index = page_index / pages_per_map;
41-
//
42-
// uint64_t result = 0;
43-
// if (map_index < _page_metadata_maps.size() && _page_metadata_maps[map_index].test(page_index % page_size)) {
44-
// auto raw_zone_info = page->zone->info().to_raw_value();
45-
// result = raw_zone_info | (1 < 8);
46-
// }
47-
//
48-
// unlock();
49-
//
50-
// return result;
51-
//}
52-
53-
// dealloc_page_locked(OG::data::ptr<OG::data::page>
54-
55-
// alloc_page(OG::data::zone&, unsigned int)
33+
table &table::shared() { return *reinterpret_cast<data::table *>(&_shared_table_bytes); }
5634

57-
// make_pages_reusable(unsigned int, bool)
35+
// FIXME
36+
table::table() {
37+
constexpr vm_size_t initial_size = 32 * pages_per_map * page_size;
38+
_region_capacity = initial_size;
39+
void *region = mmap(nullptr, initial_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
40+
_region_base = reinterpret_cast<vm_address_t>(region);
41+
OGGraphVMRegionBaseAddress = region;
42+
if (region == MAP_FAILED) {
43+
OG::precondition_failure("memory allocation failure (%u bytes, %u)", _region_capacity, errno);
44+
}
45+
_data_base = reinterpret_cast<vm_address_t>(region) - page_size;
46+
_data_capacity = initial_size + page_size;
47+
if (_malloc_zone == nullptr) {
48+
malloc_zone_t *zone = malloc_create_zone(0, 0);
49+
_malloc_zone = zone;
50+
malloc_set_zone_name(zone, "OpenGraph graph data");
51+
}
52+
}
5853

5954
void table::grow_region() OG_NOEXCEPT {
6055
uint32_t new_size = 4 * _region_capacity;
@@ -91,34 +86,47 @@ void table::grow_region() OG_NOEXCEPT {
9186
_data_capacity = new_size + page_size;
9287
}
9388

94-
// FIXME
95-
table::table() {
96-
constexpr vm_size_t initial_size = 32 * pages_per_map * page_size;
97-
_region_capacity = initial_size;
98-
void *region = mmap(nullptr, initial_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
99-
_region_base = reinterpret_cast<vm_address_t>(region);
100-
OGGraphVMRegionBaseAddress = region;
101-
if (region == MAP_FAILED) {
102-
OG::precondition_failure("memory allocation failure (%u bytes, %u)", _region_capacity, errno);
103-
}
104-
_data_base = reinterpret_cast<vm_address_t>(region) - page_size;
105-
_data_capacity = initial_size + page_size;
106-
if (_malloc_zone == nullptr) {
107-
malloc_zone_t *zone = malloc_create_zone(0, 0);
108-
_malloc_zone = zone;
109-
malloc_set_zone_name(zone, "OpenGraph graph data");
110-
}
89+
void table::make_pages_reusable(uint32_t page_index, bool flag) OG_NOEXCEPT {
90+
precondition_failure("TODO");
11191
}
11292

113-
table &table::ensure_shared() {
114-
static dispatch_once_t once;
115-
dispatch_once_f(&once, nullptr, [](void *_Nullable context){
116-
new (_shared_table_bytes) table();
117-
});
118-
return shared();
93+
ptr<page> table::alloc_page(zone *zone, uint32_t size) OG_NOEXCEPT {
94+
precondition_failure("TODO");
11995
}
12096

121-
table &table::shared() { return *reinterpret_cast<data::table *>(&_shared_table_bytes); }
97+
void table::dealloc_page_locked(ptr<page> page) OG_NOEXCEPT {
98+
precondition_failure("TODO");
99+
}
100+
101+
uint64_t table::raw_page_seed(ptr<page> page) OG_NOEXCEPT {
102+
precondition_failure("TODO");
103+
// page.assert_valid();
104+
//
105+
// lock();
106+
//
107+
// uint32_t page_index = (page / page_size) - 1;
108+
// uint32_t map_index = page_index / pages_per_map;
109+
//
110+
// uint64_t result = 0;
111+
// if (map_index < _page_metadata_maps.size() && _page_metadata_maps[map_index].test(page_index % page_size)) {
112+
// auto raw_zone_info = page->zone->info().to_raw_value();
113+
// result = raw_zone_info | (1 < 8);
114+
// }
115+
//
116+
// unlock();
117+
//
118+
// return result;
119+
}
120+
121+
void table::print() const OG_NOEXCEPT {
122+
os_unfair_lock_lock(&_lock);
123+
fprintf(stderr, "data::table %p:\n %.2fKB allocated, %.2fKB used, %.2fKB reusable.\n",
124+
this,
125+
double(_region_capacity - page_size) / 1024.0,
126+
double(this->used_pages_num()) / 1024.0,
127+
double(_reusable_pages_num) / 1024.0);
128+
os_unfair_lock_unlock(&_lock);
129+
}
122130

123131
} /* data */
124132
} /* OG */

Sources/OpenGraph_SPI/Data/table.hpp

Lines changed: 22 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,7 @@ class table {
2525
public:
2626
static table &ensure_shared();
2727
static table &shared();
28-
29-
table();
30-
28+
3129
OG_INLINE OG_CONSTEXPR
3230
vm_address_t data_base() const OG_NOEXCEPT { return _data_base; };
3331

@@ -52,10 +50,25 @@ class table {
5250
OG_INLINE OG_CONSTEXPR
5351
uint32_t reusable_pages_num() const OG_NOEXCEPT { return _reusable_pages_num; };
5452

55-
void print() const OG_NOEXCEPT;
53+
OG_INLINE OG_CONSTEXPR
54+
uint32_t make_zone_id() {
55+
_zones_num += 1;
56+
return _zones_num;
57+
}
58+
59+
table();
5660

5761
void grow_region() OG_NOEXCEPT;
5862

63+
void make_pages_reusable(uint32_t page_index, bool flag) OG_NOEXCEPT;
64+
65+
ptr<page> alloc_page(zone *zone, uint32_t size) OG_NOEXCEPT;
66+
67+
void dealloc_page_locked(ptr<page> page) OG_NOEXCEPT;
68+
69+
uint64_t raw_page_seed(ptr<page> page) OG_NOEXCEPT;
70+
71+
void print() const OG_NOEXCEPT;
5972
private:
6073
/// _region_base - page_size
6174
vm_address_t _data_base;
@@ -79,11 +92,12 @@ class table {
7992

8093
using remapped_region = std::pair<vm_address_t, int64_t>;
8194
vector<remapped_region, 0, uint32_t> _remapped_regions = {};
82-
//
95+
8396
constexpr static unsigned int pages_per_map = 64;
84-
// using page_map_type = std::bitset<pages_per_map>;
85-
// vector<page_map_type, 0, uint32_t> _page_maps = {};
86-
// vector<page_map_type, 0, uint32_t> _page_metadata_maps = {};
97+
98+
using page_map_type = std::bitset<pages_per_map>;
99+
vector<page_map_type, 0, uint32_t> _page_maps = {};
100+
vector<page_map_type, 0, uint32_t> _page_metadata_maps = {};
87101

88102
}; /* table */
89103

Sources/OpenGraph_SPI/Data/zone.cpp

Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
//
2+
// zone.cpp
3+
// OpenGraph_SPI
4+
5+
#include "zone.hpp"
6+
#include "table.hpp"
7+
#include "page.hpp"
8+
#include "../Util/assert.hpp"
9+
10+
namespace OG {
11+
namespace data {
12+
13+
void zone::clear() OG_NOEXCEPT {
14+
table::shared().lock();
15+
while (last_page()) {
16+
auto page = last_page();
17+
_last_page = page->previous;
18+
table::shared().dealloc_page_locked(page);
19+
}
20+
table::shared().unlock();
21+
}
22+
23+
ptr<void> zone::alloc_slow(uint32_t size, uint32_t alignment_mask) OG_NOEXCEPT {
24+
if (last_page()) {
25+
// check if we can use remaining bytes in this page
26+
ptr<void> next_bytes = last_page() + last_page()->in_use;
27+
if (next_bytes.page_ptr() == _last_page) {
28+
ptr<bytes_info> aligned_next_bytes = next_bytes.aligned<bytes_info>();
29+
int32_t remaining_size = _last_page->total - _last_page->in_use + (next_bytes - aligned_next_bytes);
30+
if (remaining_size >= sizeof(bytes_info)) {
31+
bytes_info *remaining_bytes = aligned_next_bytes.get();
32+
remaining_bytes->next = _free_bytes;
33+
remaining_bytes->size = remaining_size;
34+
_free_bytes = aligned_next_bytes;
35+
}
36+
37+
// consume this entire page
38+
_last_page->in_use = _last_page->total;
39+
}
40+
}
41+
42+
ptr<page> new_page;
43+
if (size <= page_size / 2) {
44+
new_page = table::shared().alloc_page(this, page_size);
45+
new_page->previous = _last_page;
46+
_last_page = new_page;
47+
} else {
48+
uint32_t aligned_size = ((sizeof(page) + size) + alignment_mask) & ~alignment_mask;
49+
new_page = table::shared().alloc_page(this, aligned_size);
50+
if (_last_page) {
51+
// It's less likely we will be able to alloc unused bytes from this page,
52+
// so insert it before the last page.
53+
new_page->previous = _last_page->previous;
54+
_last_page->previous = new_page;
55+
} else {
56+
_last_page = new_page;
57+
}
58+
}
59+
60+
int32_t aligned_used_bytes = (new_page->in_use + alignment_mask) & ~alignment_mask;
61+
62+
// Sanity check
63+
if (aligned_used_bytes + size > new_page->total) {
64+
precondition_failure("internal error");
65+
}
66+
67+
new_page->in_use = aligned_used_bytes + size;
68+
return new_page + aligned_used_bytes;
69+
};
70+
71+
void zone::print() const OG_NOEXCEPT {
72+
unsigned long num_pages = 0;
73+
double pages_total_kb = 0.0;
74+
double pages_in_use_kb = 0.0;
75+
if (_last_page) {
76+
int64_t pages_total = 0;
77+
int64_t pages_in_use = 0;
78+
for (auto page = _last_page; page; page = page->previous) {
79+
num_pages++;
80+
pages_total += page->total;
81+
pages_in_use += page->in_use;
82+
}
83+
pages_total_kb = pages_total / 1024.0;
84+
pages_in_use_kb = pages_in_use / 1024.0;
85+
}
86+
87+
unsigned long num_free_elements = 0;
88+
unsigned long free_bytes = 0;
89+
if (_free_bytes) {
90+
for (auto bytes = _free_bytes; bytes; bytes = bytes->next) {
91+
num_free_elements++;
92+
free_bytes += bytes->size;
93+
}
94+
}
95+
96+
unsigned long num_persistent_buffers = _malloc_buffers.size();
97+
size_t malloc_total_size = 0;
98+
for (auto &element : _malloc_buffers) {
99+
malloc_total_size += malloc_size(element.get());
100+
}
101+
double malloc_total_size_kb = malloc_total_size / 1024.0;
102+
103+
fprintf(stderr, "%-16p %6lu %8.2f %8.2f %6lu %6lu %6lu %8.2f\n",
104+
this, // zone ptr
105+
num_pages, // pages
106+
pages_total_kb, // total
107+
pages_in_use_kb, // in-use
108+
num_free_elements, // free
109+
free_bytes, // bytes
110+
num_persistent_buffers, // malloc
111+
malloc_total_size_kb // total
112+
);
113+
}
114+
115+
void zone::print_header() OG_NOEXCEPT {
116+
fprintf(stderr, "Zones\n%-16s %6s %8s %8s %6s %6s %6s %8s\n",
117+
"zone ptr", "pages", "total", "in-use", "free", "bytes", "malloc", "total");
118+
}
119+
120+
} /* data */
121+
} /* OG */

Sources/OpenGraph_SPI/Data/zone.hpp

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
//
2+
// zone.hpp
3+
// OpenGraph_SPI
4+
5+
#ifndef zone_hpp
6+
#define zone_hpp
7+
8+
#include "OGBase.h"
9+
#include "ptr.hpp"
10+
11+
namespace OG {
12+
namespace data {
13+
14+
class zone {
15+
public:
16+
class info {
17+
private:
18+
constexpr static uint32_t zone_id_mask = 0x7fffffff;
19+
uint32_t _value;
20+
constexpr info(uint32_t value) : _value(value){};
21+
22+
public:
23+
uint32_t zone_id() { return _value & zone_id_mask; };
24+
info with_zone_id(uint32_t zone_id) const { return info((_value & ~zone_id_mask) | (zone_id & zone_id_mask)); };
25+
26+
uint32_t to_raw_value() { return _value; };
27+
static info from_raw_value(uint32_t value) { return info(value); };
28+
}; /* info */
29+
public:
30+
zone();
31+
32+
OG_INLINE OG_CONSTEXPR
33+
ptr<page> last_page() const OG_NOEXCEPT { return _last_page; };
34+
35+
OG_INLINE OG_CONSTEXPR
36+
info info() const OG_NOEXCEPT { return _info; };
37+
38+
void clear() OG_NOEXCEPT;
39+
40+
ptr<void> alloc_slow(uint32_t size, uint32_t alignment_mask) OG_NOEXCEPT;
41+
42+
void *alloc_persistent(size_t size) OG_NOEXCEPT;
43+
44+
void realloc_bytes(ptr<void> *buffer, uint32_t size, uint32_t new_size, uint32_t alignment_mask) OG_NOEXCEPT;
45+
46+
// ptr<void> alloc_bytes(uint32_t size, uint32_t alignment_mask);
47+
ptr<void> alloc_bytes_recycle(uint32_t size, uint32_t alignment_mask) OG_NOEXCEPT;
48+
49+
// Printing
50+
void print() const OG_NOEXCEPT;
51+
52+
void print_header() OG_NOEXCEPT;
53+
54+
~zone();
55+
private:
56+
typedef struct _bytes_info {
57+
ptr<struct _bytes_info> next;
58+
uint32_t size;
59+
} bytes_info;
60+
61+
vector<std::unique_ptr<void, table::malloc_zone_deleter>, 0, uint32_t> _malloc_buffers;
62+
ptr<page> _last_page;
63+
ptr<bytes_info> _free_bytes;
64+
class info _info;
65+
}; /* zone */
66+
67+
} /* data */
68+
} /* OG */
69+
70+
#endif /* zone_hpp */

0 commit comments

Comments
 (0)