Line data Source code
1 : /**
2 : * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 : * SPDX-License-Identifier: Apache-2.0.
4 : */
5 :
6 : #include <aws/common/ring_buffer.h>
7 :
8 : #include <aws/common/byte_buf.h>
9 :
10 : #ifdef CBMC
11 : # define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \
12 : dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order); \
13 : assert(__CPROVER_same_object(dest_ptr, ring_buf->allocation)); \
14 : assert(aws_ring_buffer_check_atomic_ptr(ring_buf, dest_ptr));
15 : # define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \
16 : assert(aws_ring_buffer_check_atomic_ptr(ring_buf, src_ptr)); \
17 : aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order);
18 : #else
19 : # define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \
20 0 : dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order);
21 : # define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \
22 0 : aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order);
23 : #endif
24 : #define AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, dest_ptr) \
25 0 : AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->tail, aws_memory_order_acquire);
26 : #define AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, src_ptr) \
27 0 : AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->tail, src_ptr, aws_memory_order_release);
28 : #define AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, dest_ptr) \
29 0 : AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->head, aws_memory_order_relaxed);
30 : #define AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, src_ptr) \
31 0 : AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->head, src_ptr, aws_memory_order_relaxed);
32 :
33 7919 : int aws_ring_buffer_init(struct aws_ring_buffer *ring_buf, struct aws_allocator *allocator, size_t size) {
34 7919 : AWS_PRECONDITION(ring_buf != NULL);
35 7919 : AWS_PRECONDITION(allocator != NULL);
36 7919 : AWS_PRECONDITION(size > 0);
37 7919 :
38 7919 : AWS_ZERO_STRUCT(*ring_buf);
39 7919 :
40 7919 : ring_buf->allocation = aws_mem_acquire(allocator, size);
41 7919 :
42 7919 : if (!ring_buf->allocation) {
43 0 : return AWS_OP_ERR;
44 0 : }
45 7919 :
46 7919 : ring_buf->allocator = allocator;
47 7919 : aws_atomic_init_ptr(&ring_buf->head, ring_buf->allocation);
48 7919 : aws_atomic_init_ptr(&ring_buf->tail, ring_buf->allocation);
49 7919 : ring_buf->allocation_end = ring_buf->allocation + size;
50 7919 :
51 7919 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
52 7919 : return AWS_OP_SUCCESS;
53 7919 : }
54 :
55 11959 : void aws_ring_buffer_clean_up(struct aws_ring_buffer *ring_buf) {
56 11959 : AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
57 11959 : if (ring_buf->allocation) {
58 11959 : aws_mem_release(ring_buf->allocator, ring_buf->allocation);
59 11959 : }
60 11959 :
61 11959 : AWS_ZERO_STRUCT(*ring_buf);
62 11959 : }
63 :
64 0 : int aws_ring_buffer_acquire(struct aws_ring_buffer *ring_buf, size_t requested_size, struct aws_byte_buf *dest) {
65 0 : AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
66 0 : AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
67 0 : AWS_ERROR_PRECONDITION(requested_size != 0);
68 0 :
69 0 : uint8_t *tail_cpy;
70 0 : uint8_t *head_cpy;
71 0 : AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
72 0 : AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
73 0 :
74 0 : /* this branch is, we don't have any vended buffers. */
75 0 : if (head_cpy == tail_cpy) {
76 0 : size_t ring_space = ring_buf->allocation_end - ring_buf->allocation;
77 0 :
78 0 : if (requested_size > ring_space) {
79 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
80 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
81 0 : return aws_raise_error(AWS_ERROR_OOM);
82 0 : }
83 0 : AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
84 0 : AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
85 0 : *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
86 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
87 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
88 0 : return AWS_OP_SUCCESS;
89 0 : }
90 0 :
91 0 : /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
92 0 : /* after N + 1 wraps */
93 0 : if (tail_cpy > head_cpy) {
94 0 : size_t space = tail_cpy - head_cpy - 1;
95 0 :
96 0 : if (space >= requested_size) {
97 0 : AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
98 0 : *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
99 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
100 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
101 0 : return AWS_OP_SUCCESS;
102 0 : }
103 0 : /* After N wraps */
104 0 : } else if (tail_cpy < head_cpy) {
105 0 : /* prefer the head space for efficiency. */
106 0 : if ((size_t)(ring_buf->allocation_end - head_cpy) >= requested_size) {
107 0 : AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
108 0 : *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
109 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
110 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
111 0 : return AWS_OP_SUCCESS;
112 0 : }
113 0 :
114 0 : if ((size_t)(tail_cpy - ring_buf->allocation) > requested_size) {
115 0 : AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
116 0 : *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
117 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
118 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
119 0 : return AWS_OP_SUCCESS;
120 0 : }
121 0 : }
122 0 :
123 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
124 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
125 0 : return aws_raise_error(AWS_ERROR_OOM);
126 0 : }
127 :
128 : int aws_ring_buffer_acquire_up_to(
129 : struct aws_ring_buffer *ring_buf,
130 : size_t minimum_size,
131 : size_t requested_size,
132 0 : struct aws_byte_buf *dest) {
133 0 : AWS_PRECONDITION(requested_size >= minimum_size);
134 0 : AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
135 0 : AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
136 0 :
137 0 : if (requested_size == 0 || minimum_size == 0 || !ring_buf || !dest) {
138 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
139 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
140 0 : return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
141 0 : }
142 0 :
143 0 : uint8_t *tail_cpy;
144 0 : uint8_t *head_cpy;
145 0 : AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
146 0 : AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
147 0 :
148 0 : /* this branch is, we don't have any vended buffers. */
149 0 : if (head_cpy == tail_cpy) {
150 0 : size_t ring_space = ring_buf->allocation_end - ring_buf->allocation;
151 0 :
152 0 : size_t allocation_size = ring_space > requested_size ? requested_size : ring_space;
153 0 :
154 0 : if (allocation_size < minimum_size) {
155 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
156 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
157 0 : return aws_raise_error(AWS_ERROR_OOM);
158 0 : }
159 0 :
160 0 : /* go as big as we can. */
161 0 : /* we don't have any vended, so this should be safe. */
162 0 : AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + allocation_size);
163 0 : AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
164 0 : *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, allocation_size);
165 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
166 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
167 0 : return AWS_OP_SUCCESS;
168 0 : }
169 0 : /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
170 0 : /* after N + 1 wraps */
171 0 : if (tail_cpy > head_cpy) {
172 0 : size_t space = tail_cpy - head_cpy;
173 0 : /* this shouldn't be possible. */
174 0 : AWS_ASSERT(space);
175 0 : space -= 1;
176 0 :
177 0 : size_t returnable_size = space > requested_size ? requested_size : space;
178 0 :
179 0 : if (returnable_size >= minimum_size) {
180 0 : AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + returnable_size);
181 0 : *dest = aws_byte_buf_from_empty_array(head_cpy, returnable_size);
182 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
183 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
184 0 : return AWS_OP_SUCCESS;
185 0 : }
186 0 : /* after N wraps */
187 0 : } else if (tail_cpy < head_cpy) {
188 0 : size_t head_space = ring_buf->allocation_end - head_cpy;
189 0 : size_t tail_space = tail_cpy - ring_buf->allocation;
190 0 :
191 0 : /* if you can vend the whole thing do it. Also prefer head space to tail space. */
192 0 : if (head_space >= requested_size) {
193 0 : AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
194 0 : *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
195 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
196 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
197 0 : return AWS_OP_SUCCESS;
198 0 : }
199 0 :
200 0 : if (tail_space > requested_size) {
201 0 : AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
202 0 : *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
203 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
204 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
205 0 : return AWS_OP_SUCCESS;
206 0 : }
207 0 :
208 0 : /* now vend as much as possible, once again preferring head space. */
209 0 : if (head_space >= minimum_size && head_space >= tail_space) {
210 0 : AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + head_space);
211 0 : *dest = aws_byte_buf_from_empty_array(head_cpy, head_space);
212 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
213 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
214 0 : return AWS_OP_SUCCESS;
215 0 : }
216 0 :
217 0 : if (tail_space > minimum_size) {
218 0 : AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + tail_space - 1);
219 0 : *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, tail_space - 1);
220 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
221 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
222 0 : return AWS_OP_SUCCESS;
223 0 : }
224 0 : }
225 0 :
226 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
227 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
228 0 : return aws_raise_error(AWS_ERROR_OOM);
229 0 : }
230 :
231 0 : static inline bool s_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) {
232 : #ifdef CBMC
233 : /* only continue if buf points-into ring_buffer because comparison of pointers to different objects is undefined
234 : * (C11 6.5.8) */
235 : if (!__CPROVER_same_object(buf->buffer, ring_buffer->allocation) ||
236 : !__CPROVER_same_object(buf->buffer, ring_buffer->allocation_end - 1)) {
237 : return false;
238 : }
239 : #endif
240 0 : return buf->buffer && ring_buffer->allocation && ring_buffer->allocation_end &&
241 0 : buf->buffer >= ring_buffer->allocation && buf->buffer + buf->capacity <= ring_buffer->allocation_end;
242 0 : }
243 :
244 0 : void aws_ring_buffer_release(struct aws_ring_buffer *ring_buffer, struct aws_byte_buf *buf) {
245 0 : AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer));
246 0 : AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
247 0 : AWS_PRECONDITION(s_buf_belongs_to_pool(ring_buffer, buf));
248 0 : AWS_ATOMIC_STORE_TAIL_PTR(ring_buffer, buf->buffer + buf->capacity);
249 0 : AWS_ZERO_STRUCT(*buf);
250 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer));
251 0 : }
252 :
253 0 : bool aws_ring_buffer_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) {
254 0 : AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer));
255 0 : AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
256 0 : bool rval = s_buf_belongs_to_pool(ring_buffer, buf);
257 0 : AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer));
258 0 : AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
259 0 : return rval;
260 0 : }
261 :
262 : /* Ring buffer allocator implementation */
263 0 : static void *s_ring_buffer_mem_acquire(struct aws_allocator *allocator, size_t size) {
264 0 : struct aws_ring_buffer *buffer = allocator->impl;
265 0 : struct aws_byte_buf buf;
266 0 : AWS_ZERO_STRUCT(buf);
267 0 : /* allocate extra space for the size */
268 0 : if (aws_ring_buffer_acquire(buffer, size + sizeof(size_t), &buf)) {
269 0 : return NULL;
270 0 : }
271 0 : /* store the size ahead of the allocation */
272 0 : *((size_t *)buf.buffer) = buf.capacity;
273 0 : return buf.buffer + sizeof(size_t);
274 0 : }
275 :
276 0 : static void s_ring_buffer_mem_release(struct aws_allocator *allocator, void *ptr) {
277 0 : /* back up to where the size is stored */
278 0 : const void *addr = ((uint8_t *)ptr - sizeof(size_t));
279 0 : const size_t size = *((size_t *)addr);
280 0 :
281 0 : struct aws_byte_buf buf = aws_byte_buf_from_array(addr, size);
282 0 : buf.allocator = allocator;
283 0 :
284 0 : struct aws_ring_buffer *buffer = allocator->impl;
285 0 : aws_ring_buffer_release(buffer, &buf);
286 0 : }
287 :
288 0 : static void *s_ring_buffer_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
289 0 : void *mem = s_ring_buffer_mem_acquire(allocator, num * size);
290 0 : if (!mem) {
291 0 : return NULL;
292 0 : }
293 0 : memset(mem, 0, num * size);
294 0 : return mem;
295 0 : }
296 :
297 0 : static void *s_ring_buffer_mem_realloc(struct aws_allocator *allocator, void *ptr, size_t old_size, size_t new_size) {
298 0 : (void)allocator;
299 0 : (void)ptr;
300 0 : (void)old_size;
301 0 : (void)new_size;
302 0 : AWS_FATAL_ASSERT(!"ring_buffer_allocator does not support realloc, as it breaks allocation ordering");
303 0 : return NULL;
304 0 : }
305 :
306 0 : int aws_ring_buffer_allocator_init(struct aws_allocator *allocator, struct aws_ring_buffer *ring_buffer) {
307 0 : if (allocator == NULL || ring_buffer == NULL) {
308 0 : return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
309 0 : }
310 0 :
311 0 : allocator->impl = ring_buffer;
312 0 : allocator->mem_acquire = s_ring_buffer_mem_acquire;
313 0 : allocator->mem_release = s_ring_buffer_mem_release;
314 0 : allocator->mem_calloc = s_ring_buffer_mem_calloc;
315 0 : allocator->mem_realloc = s_ring_buffer_mem_realloc;
316 0 : return AWS_OP_SUCCESS;
317 0 : }
318 :
319 0 : void aws_ring_buffer_allocator_clean_up(struct aws_allocator *allocator) {
320 0 : AWS_ZERO_STRUCT(*allocator);
321 0 : }
|